diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 0000000000000000000000000000000000000000..5f1e3f750d0fb2f1d937bc396fbe0575d63a9f69
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,4 @@
+[submodule "submodules/dagflow"]
+	path = submodules/dagflow
+	url = https://git.jinr.ru/dag-computing/dag-flow.git
+	branch = feature/pars-gindex
diff --git a/dagflow b/dagflow
index 20a458f8c905689a5a42423e66222fb034c4697c..28abe65d5c75465f2029bc2c8f7fd46e7328bf73 120000
--- a/dagflow
+++ b/dagflow
@@ -1 +1 @@
-subtrees/dagflow/dagflow
\ No newline at end of file
+submodules/dagflow/dagflow
\ No newline at end of file
diff --git a/gindex b/gindex
index 7c8de7d3abcd8feea005ca5876d869d69a05fbe1..9da0751cc3c2893a9785006c44c3e07371c7d729 120000
--- a/gindex
+++ b/gindex
@@ -1 +1 @@
-subtrees/dagflow/gindex
\ No newline at end of file
+submodules/dagflow/gindex
\ No newline at end of file
diff --git a/multikeydict b/multikeydict
index 54670422014917d5c278d08a0b57800872868f79..e600afb94bf19b205ee2e520c9791c2663d60491 120000
--- a/multikeydict
+++ b/multikeydict
@@ -1 +1 @@
-subtrees/dagflow/multikeydict
\ No newline at end of file
+submodules/dagflow/multikeydict
\ No newline at end of file
diff --git a/submodules/dagflow b/submodules/dagflow
new file mode 160000
index 0000000000000000000000000000000000000000..f581f0fa1a9695c99f40a336b0fe5db53d7fc726
--- /dev/null
+++ b/submodules/dagflow
@@ -0,0 +1 @@
+Subproject commit f581f0fa1a9695c99f40a336b0fe5db53d7fc726
diff --git a/subtrees/dagflow/.gitignore b/subtrees/dagflow/.gitignore
deleted file mode 100644
index f11cd0888ffc8c5026b90c05182545a12c042b41..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/.gitignore
+++ /dev/null
@@ -1,54 +0,0 @@
-output
-build
-_build
-__pycache__
-
-# Local configuration files and folders
-config_local
-.local/
-.vscode
-.direnv/
-.envrc
-.fish_functions
-matplotlibrc
-.coverage
-cov.*
-
-# Transient files (vim, etc)
-*~
-*.swp
-\#*
-.\#*
-.cache
-.lark_cache*
-.lark-cache*
-*.bak
-*.backup
-
-# vim
-UltiSnips/*
-.viminfo
-.vimrc
-.nvimrc
-*.vim
-.ycm_extra_conf.py
-
-# Latex
-*.aux
-*.pda
-*.toc
-*.log
-*.fdb*
-*.out
-*.pdf
-*.png
-*.blg
-*.snm
-*.nav
-# code
-
-# Code
-tags
-*.pyc
-*.o
-coverage.json
diff --git a/subtrees/dagflow/.gitlab-ci.yml b/subtrees/dagflow/.gitlab-ci.yml
deleted file mode 100644
index b1e3e2e3c2c47b57ff59cda6ab74ee92639cb673..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/.gitlab-ci.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-stages:
-    - tests
-
-tests:
-    image: git.jinr.ru:5005/gna/gna-base-docker-image:latest
-    stage: tests
-
-    script:
-    - python3 -m pip install -r requirements.txt
-    - coverage run --source=dagflow --omit=subtrees/* -m pytest
-    - coverage report
-    - coverage xml
-    coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
-    artifacts:
-        paths:
-            - test/output
-        reports:
-            coverage_report:
-                coverage_format: cobertura
-                path: coverage.xml
-    only:
-        - master
-        - update-to-data-preservation
-        - merge_requests
diff --git a/subtrees/dagflow/README.md b/subtrees/dagflow/README.md
deleted file mode 100644
index d691822eb40f841818fa3a1b0abe16aeb134022b..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/README.md
+++ /dev/null
@@ -1,80 +0,0 @@
-# Summary
-
-[![python](https://img.shields.io/badge/python-3.10-purple.svg)](https://www.python.org/)
-[![pipeline](https://git.jinr.ru/dag-computing/dag-flow/badges/master/pipeline.svg)](https://git.jinr.ru/dag-computing/dag-flow/commits/master)
-[![coverage report](https://git.jinr.ru/dag-computing/dag-flow/badges/master/coverage.svg)](https://git.jinr.ru/dag-computing/dag-flow/-/commits/master)
-<!--- Uncomment here after adding docs!
-[![pages](https://img.shields.io/badge/pages-link-white.svg)](http://dag-computing.pages.jinr.ru/dag-flow)
--->
-
-DAGFlow is python implementation of dataflow programming with lazy graph evaluation.
-
-Main goals:
-*  Lazy evaluated directed acyclic graph
-*  Concise connection syntax
-*  Plotting with graphviz
-*  Flexibility. The goal of DAGFlow is not to be efficient, but rather flexible.
-
-Here is an animation, showing the process of the graph evaluation
-
-![Image](example/graph_evaluation.gif)
-
-# Minimal example
-An example of small, graph calculating the formula (n1 + n2 + n3) * n4 may be 
-found in the [example](example/example.py):
-```python
-#!/usr/bin/env python
-
-from dagflow.node_deco import NodeClass
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.input_extra import MissingInputAddOne
-import numpy as N
-
-# Node functions
-@NodeClass(output='array')
-def Array(node, inputs, output):
-    """Creates a note with single data output with predefined array"""
-    outputs[0].data = N.arange(5, dtype='d')
-
-@NodeClass(missing_input_handler=MissingInputAddOne(output_fmt='result'))
-def Adder(node, inputs, output):
-    """Adds all the inputs together"""
-    out = None
-    for input in inputs:
-        if out is None:
-            out=outputs[0].data = input.data.copy()
-        else:
-            out+=input.data
-
-@NodeClass(missing_input_handler=MissingInputAddOne(output_fmt='result'))
-def Multiplier(node, inputs, output):
-    """Multiplies all the inputs together"""
-    out = None
-    for input in inputs:
-        if out is None:
-            out = outputs[0].data = input.data.copy()
-        else:
-            out*=input.data
-
-# The actual code
-with Graph() as graph:
-    (in1, in2, in3, in4) = [Array(name) for name in ['n1', 'n2', 'n3', 'n4']]
-    s = Adder('add')
-    m = Multiplier('mul')
-
-(in1, in2, in3) >> s
-(in4, s) >> m
-
-print('Result is:', m.outputs["result"].data)
-savegraph(graph, 'output/dagflow_example.png')
-```
-
-The code produces the following graph:
-
-![Image](example/dagflow_example.png)
-
-For `n=[1, 2, 3, 4]` the output is:
-```
-Result is: [ 0.  3. 12. 27. 48.]
-```
diff --git a/subtrees/dagflow/TODO.md b/subtrees/dagflow/TODO.md
deleted file mode 100644
index 502b50a7ae594b309f53aa52e46c1efab793367a..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/TODO.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# Update to Daya Bay data preservation
-
-## Common tasks
-
-- [x] Input renaming: `output -> input`, `corresponding_output -> output`
-- [x] Automatic creation of outputs is **restricted**
-- [x] Parentheses operator `()` as getter `[]` of inputs, but with creation
-of the input, instead of `KeyError`
-- [x] Implementing of flexible shift operators `>>` and `<<` or *using current*?
-  - Now using curren implementation.
-- [x] Implement `hooks`:
-  - At an input connection
-  - At a function evaluation
-- [x] Two types of `Exceptions`:
-  - connection and type checking (`non-critical` exception)
-  - call function (`critical`)
-- [x] Recursive close of a graph
-- [x] Solve troubles with a connection of an input or output and closure
-- [x] Implement 2 descriptors for the `Output`:
-  - `Shape` and `dtype`
-  - `Allocation` and `view`
-- [x] Move `handlers` to the `binding` stage
-- [x] Memory allocation:
-  - See `core/transformation/TransformationEntry.cc` method `updateTypes()`
-- [x] Datatype: `allocatable`, `non-alloc`
-- [x] Datadescr: `dtype`, `shape`
-- [x] Dict as `kwargs`:
-  - `ws = WeightedSum()"`;
-  -`{'weight' : data} >> ws` is the same as `data >> ws('weight')`
-- [x] Logging
-- [x] Inputs problem: there is a difference between node and output inputs
-- [x] Update naming for the second order `input` and `output`: `parent`, `child`
-- [x] `iinput` is a meta data, do not use in allocation and closure;
-use `Node` to do this stuff; do not use second order `input` and `output`
-- [x] Loops scheme:
-  1) Close:
-      - Typing:
-        - Update types
-        - Update shapes
-      - Allocation
-  2) Graph:
-      - Node:
-        - Inputs
-        - Outputs
-  3) See <https://hackmd.io/mMNrlOp7Q7i9wkVFvP4W4Q>
-- [x] `Tainted`
-- [x] Fix decorators
-- [x] Move common checks in `typefunc` into standalone module
-- [ ] Update wrapping
-
-## Transformations
-
-- [x] Implementing of some simple transformations with only `args` in function:
-`Sum`, `Product`, `Division`, ...
-- [x] Implementing of some simple transformations with `args` and `kwargs`:
-`WeightedSum` with `weight`, ...
-- [x] Check the style of the implementation
-- [x] Update the inputs checks before evaluation
-- [x] Concatenation
-- [x] Update `WeightedSum`
-- [ ] Implement `Integrator`
-
-## Tests
-
-- [x] Test the graph workflow with transformations
-- [x] Test of open and closure of the several graphs
-
-## Questions and suggestions
-
-- [x] Should we use only `numpy.ndarray` or also `numpy.number` for single element:
-  1) only `numpy.ndarray`!
-- [] Should we implement `zero`, `unity` objects with automatic dimension?
diff --git a/subtrees/dagflow/conftest.py b/subtrees/dagflow/conftest.py
deleted file mode 100644
index cb4f3e09a5847ac76d74ad2db570618dcdbf37ab..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/conftest.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from os import chdir, getcwd, mkdir, listdir
-from os.path import isdir
-
-from pytest import fixture
-
-
-def pytest_sessionstart(session):
-    """
-    Called after the Session object has been created and
-    before performing collection and entering the run test loop.
-
-    Automatic change path to the `dag-flow/test` and create `test/output` dir
-    """
-    while(path := getcwd()):
-        if (lastdir := path.split("/")[-1]) == "test":
-            break
-        elif ".git" in listdir(path):
-            chdir("./test")
-            break
-        else:
-            chdir("..")
-    if not isdir("output"):
-        mkdir("output")
-
-
-def pytest_addoption(parser):
-    parser.addoption("--debug_graph", action="store_true", default=False)
-
-
-@fixture(scope="session")
-def debug_graph(request):
-    return request.config.option.debug_graph
diff --git a/subtrees/dagflow/dagflow/__init__.py b/subtrees/dagflow/dagflow/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/subtrees/dagflow/dagflow/bundles/__init__.py b/subtrees/dagflow/dagflow/bundles/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/subtrees/dagflow/dagflow/bundles/load_parameters.py b/subtrees/dagflow/dagflow/bundles/load_parameters.py
deleted file mode 100644
index b8fc01fed955e92847d434b6ba50307c2864ff1e..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/bundles/load_parameters.py
+++ /dev/null
@@ -1,252 +0,0 @@
-from multikeydict.nestedmkdict import NestedMKDict
-# from multikeydict.flatmkdict import FlatMKDict # To be used later
-from gindex import GNIndex
-
-from schema import Schema, Or, Optional, Use, And, Schema, SchemaError
-from pathlib import Path
-
-from ..tools.schema import NestedSchema, LoadFileWithExt, LoadYaml
-
-class ParsCfgHasProperFormat(object):
-    def validate(self, data: dict) -> dict:
-        format = data['format']
-        if isinstance(format, str):
-            nelements = 1
-        else:
-            nelements = len(format)
-
-        dtin = NestedMKDict(data)
-        for key, subdata in dtin['parameters'].walkitems():
-            if isinstance(subdata, tuple):
-                if len(subdata)==nelements: continue
-            else:
-                if nelements==1: continue
-
-            key = ".".join(str(k) for k in key)
-            raise SchemaError(f'Key "{key}" has  value "{subdata}"" inconsistent with format "{format}"')
-
-        return data
-
-IsNumber = Or(float, int, error='Invalid number "{}", expect int of float')
-IsNumberOrTuple = Or(IsNumber, (IsNumber,), And([IsNumber], Use(tuple)), error='Invalid number/tuple {}')
-IsLabel = Or({
-        'text': str,
-        Optional('latex'): str,
-        Optional('graph'): str,
-        Optional('mark'): str,
-        Optional('name'): str
-    },
-    And(str, Use(lambda s: {'text': s}), error='Invalid string: {}')
-)
-IsValuesDict = NestedSchema(IsNumberOrTuple)
-IsLabelsDict = NestedSchema(IsLabel, processdicts=True)
-def IsFormatOk(format):
-    if not isinstance(format, (tuple, list)):
-        return format=='value'
-
-    if len(format)==1:
-        f1,=format
-        return f1=='value'
-    else:
-        if len(format)==2:
-            f1, f3 = format
-        elif len(format)==3:
-            f1, f2, f3 = format
-
-            if f2 not in ('value', 'central') or f1==f2:
-                return False
-        else:
-            return False
-
-        if f3 not in ('sigma_absolute', 'sigma_relative', 'sigma_percent'):
-            return False
-
-        return f1 in ('value', 'central')
-
-IsFormat = Schema(IsFormatOk, error='Invalid parameter format "{}".')
-IsStrSeq = (str,)
-IsStrSeqOrStr = Or(IsStrSeq, And(str, Use(lambda s: (s,))))
-IsParsCfgDict = Schema({
-    'parameters': IsValuesDict,
-    'labels': IsLabelsDict,
-    'format': IsFormat,
-    'state': Or('fixed', 'variable', error='Invalid parameters state: {}'),
-    Optional('path', default=''): str,
-    Optional('replicate', default=((),)): (IsStrSeqOrStr,),
-    },
-    # error = 'Invalid parameters configuration: {}'
-)
-IsProperParsCfgDict = And(IsParsCfgDict, ParsCfgHasProperFormat())
-IsLoadableDict = And(
-            {
-                'load': Or(str, And(Path, Use(str))),
-                Optional(str): object
-            },
-            Use(LoadFileWithExt(yaml=LoadYaml, key='load', update=True), error='Failed to load {}'),
-            IsProperParsCfgDict
-        )
-def ValidateParsCfg(cfg):
-    if isinstance(cfg, dict) and 'load' in cfg:
-        return IsLoadableDict.validate(cfg)
-    else:
-        return IsProperParsCfgDict.validate(cfg)
-
-def process_var_fixed1(vcfg, _, __):
-    return {'central': vcfg, 'value': vcfg, 'sigma': None}
-
-def process_var_fixed2(vcfg, format, hascentral) -> dict:
-    ret = dict(zip(format, vcfg))
-    if hascentral:
-        ret.setdefault('value', ret['central'])
-    else:
-        ret.setdefault('central', ret['value'])
-    ret['sigma'] = None
-    return ret
-
-def process_var_absolute(vcfg, format, hascentral) -> dict:
-    ret = process_var_fixed2(vcfg, format, hascentral)
-    ret['sigma'] = ret['sigma_absolute']
-    return ret
-
-def process_var_relative(vcfg, format, hascentral) -> dict:
-    ret = process_var_fixed2(vcfg, format, hascentral)
-    ret['sigma'] = ret['sigma_relative']*ret['central']
-    return ret
-
-def process_var_percent(vcfg, format, hascentral) -> dict:
-    ret = process_var_fixed2(vcfg, format, hascentral)
-    ret['sigma'] = 0.01*ret['sigma_percent']*ret['central']
-    return ret
-
-def get_format_processor(format):
-    if isinstance(format, str):
-        return process_var_fixed1
-
-    errfmt = format[-1]
-    if not errfmt.startswith('sigma'):
-        return process_var_fixed2
-
-    if errfmt.endswith('_absolute'):
-        return process_var_absolute
-    elif errfmt.endswith('_relative'):
-        return process_var_relative
-    else:
-        return process_var_percent
-
-def get_label(key: tuple, labelscfg: dict) -> dict:
-    try:
-        return labelscfg[key]
-    except KeyError:
-        pass
-
-    for n in range(1, len(key)+1):
-        subkey = key[:-n]
-        try:
-            lcfg = labelscfg[subkey]
-        except KeyError:
-            continue
-
-        if not subkey and not 'text' in lcfg:
-            break
-
-        sidx = '.'.join(key[n-1:])
-        return {k: v.format(sidx) for k, v in lcfg.items()}
-
-    return {}
-
-def iterate_varcfgs(cfg: NestedMKDict):
-    parameterscfg = cfg['parameters']
-    labelscfg = cfg['labels']
-    format = cfg['format']
-
-    hascentral = 'central' in format
-    process = get_format_processor(format)
-
-    for key, varcfg in parameterscfg.walkitems():
-        varcfg = process(varcfg, format, hascentral)
-        varcfg['label'] = get_label(key, labelscfg)
-        yield key, varcfg
-
-from dagflow.parameters import Parameters
-from dagflow.lib.SumSq import SumSq
-
-def load_parameters(acfg):
-    cfg = ValidateParsCfg(acfg)
-    cfg = NestedMKDict(cfg)
-
-    pathstr = cfg['path']
-    if pathstr:
-        path = tuple(pathstr.split('.'))
-    else:
-        path = ()
-
-    state = cfg['state']
-
-    ret = NestedMKDict(
-        {
-            'parameter': {
-                'constant': {},
-                'free': {},
-                'constrained': {},
-                'normalized': {},
-                },
-            'stat': {
-                'nuisance_parts': {},
-                'nuisance': {},
-                },
-            'parameter_node': {
-                'constant': {},
-                'free': {},
-                'constrained': {}
-                }
-        },
-        sep='.'
-    )
-
-    subkeys = cfg['replicate']
-
-    normpars = {}
-    for key_general, varcfg in iterate_varcfgs(cfg):
-        key_general_str = '.'.join(key_general)
-        varcfg.setdefault(state, True)
-
-        label_general = varcfg['label']
-
-        normpars_i = normpars.setdefault(key_general[0], [])
-        for subkey in subkeys:
-            key = key_general + subkey
-            key_str = '.'.join(key)
-            subkey_str = '.'.join(subkey)
-
-            varcfg['label'] = (label := label_general.copy())
-            label['key'] = key_str
-            label.setdefault('text', key_str)
-
-            par = Parameters.from_numbers(**varcfg)
-            if par.is_constrained:
-                target = ('constrained', path)
-            elif par.is_fixed:
-                target = ('constant', path)
-            else:
-                target = ('free', path)
-
-            ret[('parameter_node',)+target+key] = par
-
-            ptarget = ('parameter', target)
-            for subpar in par.parameters:
-                ret[ptarget+key] = subpar
-
-            ntarget = ('parameter', 'normalized', path)
-            for subpar in par.norm_parameters:
-                ret[ntarget+key] = subpar
-
-                normpars_i.append(subpar)
-
-        for name, np in normpars.items():
-            if np:
-                ssq = SumSq(f'nuisance for {pathstr}.{name}')
-                (n.output for n in np) >> ssq
-                ssq.close()
-                ret[('stat', 'nuisance_parts', path, name)] = ssq
-
-    return ret
diff --git a/subtrees/dagflow/dagflow/datadescriptor.py b/subtrees/dagflow/dagflow/datadescriptor.py
deleted file mode 100755
index 582978b3e7e652b5b73706fcbc7f092fb43570bb..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/datadescriptor.py
+++ /dev/null
@@ -1,38 +0,0 @@
-from typing import List, Optional
-
-from numpy.typing import DTypeLike
-
-from .types import EdgesLike, ShapeLike
-
-
-class DataDescriptor:
-    """
-    The data descriptor class stores `dtype`, `shape`,
-    `axes_edges` and `axes_nodes` information.
-    """
-
-    __slots__ = ("dtype", "shape", "axes_edges", "axes_nodes")
-    dtype: DTypeLike  # DTypeLike is already Optional
-    shape: Optional[ShapeLike]
-    axes_edges: Optional[List[EdgesLike]]
-    axes_nodes: Optional[List[EdgesLike]]
-
-    def __init__(
-        self,
-        dtype: DTypeLike,  # DTypeLike is already Optional
-        shape: Optional[ShapeLike],
-        axes_edges: Optional[List[EdgesLike]] = None,
-        axes_nodes: Optional[List[EdgesLike]] = None,
-    ) -> None:
-        """
-        Sets the attributes
-        """
-        self.dtype = dtype
-        self.shape = shape
-        self.axes_edges = axes_edges or []
-        self.axes_nodes = axes_nodes or []
-
-    @property
-    def dim(self) -> int:
-        """ Return the dimension of the data """
-        return len(self.shape)
diff --git a/subtrees/dagflow/dagflow/edges.py b/subtrees/dagflow/dagflow/edges.py
deleted file mode 100644
index 2a094fdc4e5a3774a6732d7abd1d3f84999c5a92..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/edges.py
+++ /dev/null
@@ -1,141 +0,0 @@
-from collections.abc import Sequence
-
-from .exception import CriticalError
-from .iter import IsIterable
-
-from typing import List, Dict, Union
-
-class EdgeContainer:
-    _kw_edges: Dict
-    _pos_edges: List
-    _all_edges: Dict
-    _dtype = None
-
-    def __init__(self, iterable=None):
-        self._kw_edges = {}
-        self._pos_edges = []
-        self._all_edges = {}
-        if iterable:
-            self.add(iterable)
-
-    def add(self, value, *, positional: bool=True, keyword: bool=True):
-        if positional==keyword==False:
-            raise RuntimeError('Edge should be at least positional or a keyword')
-
-        if IsIterable(value):
-            for v in value:
-                self.add(v, positional=positional, keyword=keyword)
-            return self
-        if self._dtype and not isinstance(value, self._dtype):
-            raise RuntimeError(
-                f"The type {type(value)} of the data doesn't correpond "
-                f"to {self._dtype}!"
-            )
-        name = value.name
-        if not name:
-            raise RuntimeError("May not add objects with undefined name")
-        if name in self._all_edges:
-            raise RuntimeError("May not add duplicated items")
-
-        if positional:
-            self._pos_edges.append(value)
-        if keyword:
-            self._kw_edges[name] = value
-        self._all_edges[name]=value
-        return self
-
-    def allocate(self) -> bool:
-        return all(edge.allocate() for edge in self._all_edges.values())
-
-    def __getitem__(self, key):
-        if isinstance(key, str):
-            return self._kw_edges[key]
-        elif isinstance(key, (int, slice)):
-            return self._pos_edges[key]
-        elif isinstance(key, Sequence):
-            return tuple(self.__getitem__(k) for k in key)
-        raise TypeError(f"Unsupported key type: {type(key).__name__}")
-
-    def get(self, key, default = None):
-        try:
-            return self.__getitem__(key)
-        except Exception:
-            return default
-
-    def has_key(self, key: str) -> bool:
-        return key in self._kw_edges
-
-    def get_pos(self, idx: int):
-        """Get positional leg"""
-        return self._pos_edges[idx]
-    iat = get_pos
-
-    def index(self, arg):
-        return self._pos_edges.index(arg)
-
-    def get_kw(self, key: str):
-        """Return keyword leg"""
-        return self._kw_edges[key]
-    kat = get_kw
-
-    def len_pos(self) -> int:
-        """Returns a number of the positional legs"""
-        return len(self._pos_edges)
-    __len__ = len_pos
-
-    def len_kw(self) -> int:
-        """Returns a number of the keyword legs"""
-        return len(self._kw_edges)
-
-    def len_all(self) -> int:
-        """Returns a number of the all legs"""
-        return len(self._all_edges)
-
-    def __iter__(self):
-        return iter(self._pos_edges)
-
-    def iter_all(self):
-        return iter(self._all_edges.values())
-
-    def iter_data(self):
-        for edge in self._pos_edges:
-            yield edge.data
-
-    def iter(self, key: Union[int, str, slice, Sequence]):
-        if isinstance(key, int):
-            yield self._pos_edges[key]
-        elif isinstance(key, str):
-            yield self._kw_edges[key]
-        elif isinstance(key, slice):
-            yield from self._pos_edges[key]
-        elif isinstance(key, Sequence):
-            for subkey in key:
-                if isinstance(subkey, int):
-                    yield self._pos_edges[subkey]
-                elif isinstance(subkey, str):
-                    yield self._kw_edges[subkey]
-                elif isinstance(subkey, slice):
-                    yield from self._pos_edges[subkey]
-                else:
-                    raise CriticalError(f'Invalid subkey type {type(subkey).__name__}')
-        else:
-            raise CriticalError(f'Invalid key type {type(key).__name__}')
-
-    def __contains__(self, name):
-        return name in self._all_edges
-
-    def _replace(self, old, new):
-        replaced = False
-
-        for k, v in self._kw_edges.items():
-            if old is v:
-                self._kw_edges[k] = new
-                replaced = True
-
-        for i, v in enumerate(self._pos_edges):
-            if old is v:
-                self._pos_edges[i] = new
-                replaced = True
-
-        if not replaced:
-            raise CriticalError('Unable to replace an output/input (not found)')
diff --git a/subtrees/dagflow/dagflow/exception.py b/subtrees/dagflow/dagflow/exception.py
deleted file mode 100644
index b57aaf94616a82daa43c318f0667524d6d5beb1c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/exception.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from typing import Optional
-from .types import NodeT, InputT, OutputT
-
-
-class DagflowError(Exception):
-    node: Optional[NodeT]
-    input: Optional[InputT]
-    output: Optional[OutputT]
-
-    def __init__(
-        self,
-        message: str,
-        node: Optional[NodeT] = None,
-        *,
-        input: Optional[InputT] = None,
-        output: Optional[OutputT] = None,
-    ):
-        if node:
-            message = f"{message} [node={node.name if 'name' in dir(node) else node}]"
-        if input:
-            message = f"{message} [input={input.name if 'name' in dir(input) else input}]"
-        if output:
-            message = f"{message} [output={output.name if 'name' in dir(output) else output}]"
-        super().__init__(message)
-        self.node = node
-        self.input = input
-        self.output = output
-
-        if node is not None:
-            node._exception = message
-
-class CriticalError(DagflowError):
-    pass
-
-
-class NoncriticalError(DagflowError):
-    pass
-
-class InitializationError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "Wrong initialization!"
-        super().__init__(message, *args, **kwargs)
-
-
-class AllocationError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "Unable to allocate memory!"
-        super().__init__(message, *args, **kwargs)
-
-class ClosingError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "An exception occured during closing procedure!"
-        super().__init__(message, *args, **kwargs)
-
-class OpeningError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "An exception occured during opening procedure!"
-        super().__init__(message, *args, **kwargs)
-
-class ClosedGraphError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "Unable to modify a closed graph!"
-        super().__init__(message, *args, **kwargs)
-
-class UnclosedGraphError(CriticalError):
-    def __init__(self, message : Optional[str]=None, *args, **kwargs):
-        if not message:
-            message = "The graph is not closed!"
-        super().__init__(message, *args, **kwargs)
-
-
-class TypeFunctionError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "An exception occurred during type function processing!"
-        super().__init__(message, *args, **kwargs)
-
-class ReconnectionError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "The object is already connected!"
-        super().__init__(message, *args, **kwargs)
-
-class ConnectionError(CriticalError):
-    def __init__(self, message: Optional[str] = None, *args, **kwargs):
-        if not message:
-            message = "An exception occurred during connection!"
-        super().__init__(message, *args, **kwargs)
diff --git a/subtrees/dagflow/dagflow/graph.py b/subtrees/dagflow/dagflow/graph.py
deleted file mode 100644
index dd0b9f1c32dca1f8b5f608bd16c48e6b43db506e..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/graph.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from .exception import (
-    UnclosedGraphError,
-    ClosedGraphError,
-    InitializationError
-)
-from .logger import Logger, get_logger
-from .node_group import NodeGroup
-
-from typing import Optional
-
-class Graph(NodeGroup):
-    """
-    The graph class:
-    holds nodes as a list, has name, label, logger and uses context
-    """
-
-    _context_graph: Optional['Graph'] = None
-    _label: Optional[str] = None
-    _name = "graph"
-    _close: bool = False
-    _closed: bool = False
-    _debug: bool = False
-    _logger: Logger
-
-    def __init__(self, *args, close: bool = False, **kwargs):
-        super().__init__(*args)
-        self._label = kwargs.pop("label", None)
-        self._name = kwargs.pop("name", "graph")
-        self._debug = kwargs.pop("debug", False)
-        self._close = close
-        # init or get default logger
-        self._logger = get_logger(
-            filename=kwargs.pop("logfile", None),
-            debug=self.debug,
-            console=kwargs.pop("console", True),
-            formatstr=kwargs.pop("logformat", None),
-            name=kwargs.pop("loggername", None),
-        )
-        if kwargs:
-            raise InitializationError(f"Unparsed arguments: {kwargs}!")
-
-    @property
-    def debug(self) -> bool:
-        return self._debug
-
-    @property
-    def logger(self) -> Logger:
-        return self._logger
-
-    @property
-    def name(self) -> str:
-        return self._name
-
-    @property
-    def closed(self) -> bool:
-        return self._closed
-
-    def _add_output(self, *args, **kwargs):
-        """Dummy method"""
-        pass
-
-    def _add_input(self, *args, **kwargs):
-        """Dummy method"""
-        pass
-
-    def label(self):
-        """Returns formatted label"""
-        if self._label:
-            return self._label.format(self._label, nodes=len(self._nodes))
-
-    def add_node(self, name, **kwargs):
-        """
-        Adds a node, if the graph is opened.
-        It is possible to pass the node class via the `nodeclass` arg
-        (default: `FunctionNode`)
-        """
-        if not self.closed:
-            from .nodes import FunctionNode
-            return kwargs.pop("nodeclass", FunctionNode)(
-                name, graph=self, **kwargs
-            )
-        raise ClosedGraphError(node=name)
-
-    def add_nodes(self, nodes, **kwargs):
-        """Adds nodes"""
-        if not self.closed:
-            return (self.add_node(node, **kwargs) for node in nodes)
-        raise ClosedGraphError(node=nodes)
-
-    def print(self):
-        print(f"Graph with {len(self._nodes)} nodes")
-        for node in self._nodes:
-            node.print()
-
-    @classmethod
-    def current(cls):
-        return cls._context_graph
-
-    def __enter__(self):
-        Graph._context_graph = self
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        Graph._context_graph = None
-        if exc_val is not None:
-            raise exc_val
-
-        if self._close:
-            self.close()
-
-    def close(self, **kwargs) -> bool:
-        """Closes the graph"""
-        # TODO: implement cross-closure of several graphs
-        if self._closed:
-            return True
-        self.logger.debug(f"Graph '{self.name}': Closing...")
-        self.logger.debug(f"Graph '{self.name}': Update types...")
-        for node in self._nodes:
-            node.update_types()
-        self.logger.debug(f"Graph '{self.name}': Allocate memory...")
-        for node in self._nodes:
-            node.allocate(**kwargs)
-        self.logger.debug(f"Graph '{self.name}': Closing nodes...")
-        self._closed = all(node.close(**kwargs) for node in self._nodes)
-        if not self._closed:
-            raise UnclosedGraphError("The graph is still open!")
-        self.logger.debug(f"Graph '{self.name}': The graph is closed!")
-        return self._closed
-
-    def open(self, force: bool = False) -> bool:
-        """Opens the graph recursively"""
-        if not self._closed and not force:
-            return True
-        self.logger.debug(f"Graph '{self.name}': Opening...")
-        self._closed = not all(node.open(force) for node in self._nodes)
-        if self._closed:
-            raise UnclosedGraphError("The graph is still open!")
-        return not self._closed
diff --git a/subtrees/dagflow/dagflow/graphviz.py b/subtrees/dagflow/dagflow/graphviz.py
deleted file mode 100644
index a6a9cb74a9bb3f8de874838c7aeaba706456dc16..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/graphviz.py
+++ /dev/null
@@ -1,373 +0,0 @@
-from .input import Input
-from .output import Output
-from .printl import printl
-from .types import NodeT
-
-from numpy import square
-from collections.abc import Sequence
-from typing import Union, Set, Optional, Dict
-
-try:
-    import pygraphviz as G
-except ImportError:
-    GraphDot = None
-    savegraph = None
-else:
-
-    def savegraph(graph, *args, **kwargs):
-        gd = GraphDot(graph, **kwargs)
-        gd.savegraph(*args)
-
-    class EdgeDef:
-        __slots__ = ('nodein', 'nodemid', 'nodeout', 'edges')
-        def __init__(self, nodeout, nodemid, nodein, edge):
-            self.nodein = nodein
-            self.nodemid = nodemid
-            self.nodeout = nodeout
-            self.edges = [edge]
-
-        def append(self, edge):
-            self.edges.append(edge)
-
-    class GraphDot:
-        _graph = None
-        _node_id_map: dict
-
-        _show: Set[str]
-        def __init__(
-            self,
-            dag,
-            graphattr: dict={}, edgeattr: dict={}, nodeattr: dict={},
-            show: Union[Sequence,str] = ['type', 'mark', 'label'],
-            **kwargs
-        ):
-            if show=='all' or 'all' in show:
-                self._show = {'type', 'mark', 'label', 'status', 'data', 'data_summary'}
-            else:
-                self._show = set(show)
-
-            graphattr = dict(graphattr)
-            graphattr.setdefault("rankdir", "LR")
-            graphattr.setdefault("dpi", 300)
-
-            edgeattr = dict(edgeattr)
-            edgeattr.setdefault("fontsize", 10)
-            edgeattr.setdefault("labelfontsize", 9)
-            edgeattr.setdefault("labeldistance", 1.2)
-
-            nodeattr = dict(nodeattr)
-
-            self._node_id_map = {}
-            self._nodes = {}
-            self._nodes_open_input = {}
-            self._nodes_open_output = {}
-            self._edges: Dict[str, EdgeDef] = {}
-            self._graph = G.AGraph(directed=True, strict=False, **kwargs)
-
-            if graphattr:
-                self._graph.graph_attr.update(graphattr)
-            if edgeattr:
-                self._graph.edge_attr.update(edgeattr)
-            if nodeattr:
-                self._graph.node_attr.update(nodeattr)
-
-            if label := kwargs.pop("label", dag.label()):
-                self.set_label(label)
-            self._transform(dag)
-
-        def _transform(self, dag):
-            for nodedag in dag._nodes:
-                self._add_node(nodedag)
-            for nodedag in dag._nodes:
-                self._add_open_inputs(nodedag)
-                self._add_edges(nodedag)
-            self.update_style()
-
-        def get_id(self, object, suffix: str="") -> str:
-            name = type(object).__name__
-            omap = self._node_id_map.setdefault(name, {})
-            onum = omap.setdefault(object, len(omap))
-            return f"{name}_{onum}{suffix}"
-
-        def get_label(self, node: NodeT) -> str:
-            text = node.label('graph') or node.name
-            try:
-                out0 = node.outputs[0]
-            except IndexError:
-                shape0 = '?'
-                dtype0 = '?'
-            else:
-                shape0 = out0.dd.shape
-                if shape0 is None:
-                    shape0 = '?'
-                shape0="x".join(str(s) for s in shape0)
-
-                dtype0 = out0.dd.dtype
-                if dtype0 is None:
-                    dtype0 = '?'
-                else:
-                    dtype0 = dtype0.char
-
-            nout_pos = len(node.outputs)
-            nout_nonpos = node.outputs.len_all()-nout_pos
-            if nout_nonpos==0:
-                if nout_pos>1:
-                    nout = f'→{nout_pos}'
-                else:
-                    nout = ''
-            else:
-                nout=f'→{nout_pos}+{nout_nonpos}'
-
-            nin_pos = len(node.inputs)
-            nin_nonpos = node.inputs.len_all() - nin_pos
-            if nin_nonpos==0:
-                if nin_pos>1:
-                    nin = f'{nin_pos}→'
-                else:
-                    nin = ''
-            else:
-                nin=f'{nin_pos}+{nin_nonpos}→'
-
-            nlegs = f' {nin}{nout}'.replace('→→', '→')
-
-            left, right = [], []
-            info_type = f"[{shape0}]{dtype0}{nlegs}"
-            if 'type' in self._show:
-                left.append(info_type)
-            if 'mark' in self._show and node.mark is not None:
-                left.append(node.mark)
-            if 'label' in self._show:
-                right.append(text)
-            if 'status' in self._show:
-                status = []
-                if node.types_tainted: status.append('types_tainted')
-                if node.tainted: status.append('tainted')
-                if node.frozen: status.append('frozen')
-                if node.frozen_tainted: status.append('frozen_tainted')
-                if node.invalid: status.append('invalid')
-                if not node.closed: status.append('open')
-                if status:
-                    right.append(status)
-
-            show_data = 'data' in self._show
-            show_data_summary = 'data_summary' in self._show
-            if show_data or show_data_summary:
-                data = None
-                tainted = out0.tainted and 'tainted' or 'updated'
-                try:
-                    data = out0.data
-                except Exception:
-                    right.append('cought exception')
-                    data = out0._data
-
-                if show_data:
-                    right.append(str(data).replace('\n', '\\l')+'\\l')
-                if show_data_summary:
-                    sm = data.sum()
-                    sm2 = square(data).sum()
-                    mn = data.min()
-                    mx = data.max()
-                    right.append((f'Σ={sm:.2g}', f'Σ²={sm2:.2g}', f'min={mn:.2g}', f'max={mx:.2g}', f'{tainted}'))
-
-            if node.exception is not None:
-                right.append(node.exception)
-
-            return self._combine_labels((left, right))
-
-        def _combine_labels(self, labels: Union[Sequence,str]) -> str:
-            if isinstance(labels, str):
-                return labels
-
-            slabels = [self._combine_labels(l) for l in labels]
-            return f"{{{'|'.join(slabels)}}}"
-
-        def _add_node(self, nodedag):
-            styledict = {
-                "shape": "Mrecord",
-                "label": self.get_label(nodedag)
-            }
-            target = self.get_id(nodedag)
-            self._graph.add_node(target, **styledict)
-            nodedot = self._graph.get_node(target)
-            self._nodes[nodedag] = nodedot
-
-        def _add_open_inputs(self, nodedag):
-            for input in nodedag.inputs:
-                if not input.connected():
-                    self._add_open_input(input, nodedag)
-
-        def _add_open_input(self, input, nodedag):
-            styledict = {}
-            source = self.get_id(input, "_in")
-            target = self.get_id(nodedag)
-
-            self._graph.add_node(source, label="", shape="none", **styledict)
-            self._graph.add_edge(source, target, **styledict)
-
-            nodein = self._graph.get_node(source)
-            edge = self._graph.get_edge(source, target)
-            nodeout = self._graph.get_node(target)
-
-            self._nodes_open_input[input] = nodein
-            self._edges[input] = EdgeDef(nodein, None, nodeout, edge)
-
-        def _add_edges(self, nodedag):
-            for output in nodedag.outputs:
-                if output.connected():
-                    if len(output.child_inputs)>1:
-                        self._add_edges_multi(nodedag, output)
-                    else:
-                        self._add_edge(nodedag, output, output.child_inputs[0])
-                else:
-                    self._add_open_output(nodedag, output)
-
-        def _add_edges_multi(self, nodedag, output):
-            vnode = self.get_id(output, "_mid")
-            self._graph.add_node(vnode, label="", shape="none", width=0, height=0, penwidth=0, weight=10)
-            firstinput = output.child_inputs[0]
-            self._add_edge(nodedag, output, firstinput, vtarget=vnode)
-            for input in output.child_inputs:
-                self._add_edge(nodedag, output, input, vsource=vnode)
-
-        def _add_open_output(self, nodedag, output):
-            styledict = {}
-            source = self.get_id(nodedag)
-            target = self.get_id(output, "_out")
-            self._get_index(output, styledict, 'taillabel')
-
-            self._graph.add_node(target, label="", shape="none", **styledict)
-            self._graph.add_edge(
-                source, target, arrowhead="empty", **styledict
-            )
-            nodein = self._graph.get_node(source)
-            edge = self._graph.get_edge(source, target)
-            nodeout = self._graph.get_node(target)
-
-            self._nodes_open_output[output] = nodeout
-            self._edges[output] = EdgeDef(nodein, None, nodeout, edge)
-
-        def _get_index(self, leg, styledict: dict, target: str):
-            if isinstance(leg, Input):
-                container = leg.node.inputs
-            else:
-                container = leg.node.outputs
-            if container.len_all()<2:
-                return
-
-            try:
-                idx = container.index(leg)
-            except ValueError:
-                pass
-            else:
-                styledict[target] = str(idx)
-
-        def _add_edge(self, nodedag, output, input, *, vsource: Optional[str]=None, vtarget: Optional[str]=None) -> None:
-            styledict = {}
-
-            if vsource is not None:
-                source = vsource
-                styledict['arrowtail'] = 'none'
-            else:
-                source = self.get_id(nodedag)
-                self._get_index(output, styledict, 'taillabel')
-
-            if vtarget is not None:
-                target = vtarget
-                styledict['arrowhead'] = 'none'
-            else:
-                target = self.get_id(input.node)
-                self._get_index(input, styledict, 'headlabel')
-
-            self._graph.add_edge(source, target, **styledict)
-
-            nodein = self._graph.get_node(source)
-            edge = self._graph.get_edge(source, target)
-            nodeout = self._graph.get_node(target)
-
-            edgedef = self._edges.get(input, None)
-            if edgedef is None:
-                self._edges[input] = EdgeDef(nodein, None, nodeout, edge)
-            else:
-                edgedef.append(edge)
-
-        def _set_style_node(self, node, attr):
-            if node is None:
-                attr["color"] = "gray"
-            else:
-                if node.invalid:
-                    attr["color"] = "black"
-                elif node.being_evaluated:
-                    attr["color"] = "gold"
-                elif node.tainted:
-                    attr["color"] = "red"
-                elif node.frozen_tainted:
-                    attr["color"] = "blue"
-                elif node.frozen:
-                    attr["color"] = "cyan"
-                elif node.immediate:
-                    attr["color"] = "green"
-                else:
-                    attr["color"] = "forestgreen"
-
-                if node.exception is not None:
-                    attr["color"] = "magenta"
-
-        def _set_style_edge(self, obj, attrin, attr, attrout):
-            if isinstance(obj, Input):
-                if obj.connected():
-                    node = obj.parent_output.node
-                else:
-                    node = None
-                    self._set_style_node(node, attrin)
-            else:
-                node = obj.node
-                self._set_style_node(node, attrout)
-
-            self._set_style_node(node, attr)
-
-            if isinstance(obj, Input):
-                allocated_on_input = obj.owns_buffer
-                try:
-                    allocated_on_output = obj.parent_output.owns_buffer
-                except AttributeError:
-                    allocated_on_output = True
-            elif isinstance(obj, Output):
-                allocated_on_input = False
-                allocated_on_output = obj.owns_buffer
-            attr.update({
-                "dir": "both",
-                "arrowsize": 0.5
-                })
-            attr["arrowhead"] = attr["arrowhead"] or allocated_on_input  and 'dotopen' or 'odotopen'
-            attr["arrowtail"] = attr["arrowtail"] or allocated_on_output and 'dot' or 'odot'
-
-            if node:
-                if node.frozen:
-                    attrin["style"] = "dashed"
-                    attr["style"] = "dashed"
-                    # attr['arrowhead']='tee'
-                else:
-                    attr["style"] = ""
-
-        def update_style(self):
-            for nodedag, nodedot in self._nodes.items():
-                self._set_style_node(nodedag, nodedot.attr)
-
-            for object, edgedef in self._edges.items():
-                for edge in edgedef.edges:
-                    self._set_style_edge(
-                        object, edgedef.nodein.attr, edge.attr, edgedef.nodeout.attr
-                    )
-
-        def set_label(self, label):
-            self._graph.graph_attr["label"] = label
-
-        def savegraph(self, fname, verbose=True):
-            if verbose:
-                printl("Write output file:", fname)
-
-            if fname.endswith(".dot"):
-                self._graph.write(fname)
-            else:
-                self._graph.layout(prog="dot")
-                self._graph.draw(fname)
diff --git a/subtrees/dagflow/dagflow/input.py b/subtrees/dagflow/dagflow/input.py
deleted file mode 100644
index a632f05ccfb0271acfbe14d18b37d1ce3029be59..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/input.py
+++ /dev/null
@@ -1,301 +0,0 @@
-from typing import Iterator, Optional, Tuple, Union
-
-from numpy import zeros
-from numpy.typing import DTypeLike, NDArray
-
-from .datadescriptor import DataDescriptor
-from .edges import EdgeContainer
-from .exception import (
-    AllocationError,
-    ClosedGraphError,
-    InitializationError,
-    ReconnectionError,
-)
-from .iter import StopNesting
-from .output import Output
-from .shift import lshift
-from .types import EdgesLike, InputT, NodeT, ShapeLike
-
-
-class Input:
-    _own_data: Optional[NDArray] = None
-    _own_dd: DataDescriptor
-
-    _node: Optional[NodeT]
-    _name: Optional[str]
-
-    _parent_output: Optional[Output]
-    _child_output: Optional[Output]
-
-    _allocatable: bool = False
-    _owns_buffer: bool = False
-
-    _debug: bool = False
-
-    def __init__(
-        self,
-        name: Optional[str] = None,
-        node: Optional[NodeT] = None,
-        *,
-        child_output: Optional[Output] = None,
-        parent_output: Optional[Output] = None,
-        debug: Optional[bool] = None,
-        allocatable: bool = False,
-        data: Optional[NDArray] = None,
-        dtype: DTypeLike = None,
-        shape: Optional[ShapeLike] = None,
-        axes_edges: Optional[Tuple[EdgesLike]] = None,
-        axes_nodes: Optional[Tuple[EdgesLike]] = None,
-    ):
-        if data is not None and (
-            allocatable or dtype is not None or shape is not None
-        ):
-            raise InitializationError(input=input, node=node)
-
-        self._name = name
-        self._node = node
-        self._child_output = child_output
-        self._parent_output = parent_output
-        self._allocatable = allocatable
-        if debug is not None:
-            self._debug = debug
-        elif node:
-            self._debug = node.debug
-        else:
-            self._debug = False
-
-        self._own_dd = DataDescriptor(dtype, shape, axes_edges, axes_nodes)
-
-        if data is not None:
-            self.set_own_data(data, owns_buffer=True)
-
-    def __str__(self) -> str:
-        return (
-            f"→○ {self._name}"
-            if self._owns_buffer is None
-            else f"→● {self._name}"
-        )
-
-    def __repr__(self) -> str:
-        return self.__str__()
-
-    @property
-    def own_data(self) -> Optional[NDArray]:
-        return self._own_data
-
-    @property
-    def own_dd(self) -> DataDescriptor:
-        return self._own_dd
-
-    @property
-    def owns_buffer(self) -> bool:
-        return self._owns_buffer
-
-    def set_own_data(
-        self,
-        data,
-        *,
-        owns_buffer: bool,
-        axes_edges: EdgesLike = None,
-        axes_nodes: EdgesLike = None,
-    ):
-        if self.closed:
-            raise ClosedGraphError(
-                "Unable to set input data.", node=self._node, input=self
-            )
-        if self.own_data is not None:
-            raise AllocationError(
-                "Input already has data.", node=self._node, input=self
-            )
-
-        self._own_data = data
-        self._owns_buffer = owns_buffer
-        self.own_dd.dtype = data.dtype
-        self.own_dd.shape = data.shape
-        self.own_dd.axes_edges = axes_edges
-        self.own_dd.axes_nodes = axes_nodes
-
-    @property
-    def closed(self):
-        return self._node.closed if self.node else False
-
-    def set_child_output(
-        self, child_output: Output, force: bool = False
-    ) -> None:
-        if not self.closed:
-            return self._set_child_output(child_output, force)
-        raise ClosedGraphError(input=self, node=self.node, output=child_output)
-
-    def _set_child_output(
-        self, child_output: Output, force: bool = False
-    ) -> None:
-        if self.child_output and not force:
-            raise ReconnectionError(output=self.child_output, node=self.node)
-        self._child_output = child_output
-        child_output.parent_input = self
-
-    def set_parent_output(
-        self, parent_output: Output, force: bool = False
-    ) -> None:
-        if not self.closed:
-            return self._set_parent_output(parent_output, force)
-        raise ClosedGraphError(
-            input=self, node=self.node, output=parent_output
-        )
-
-    def _set_parent_output(
-        self, parent_output: Output, force: bool = False
-    ) -> None:
-        if self.connected() and not force:
-            raise ReconnectionError(output=self._parent_output, node=self.node)
-        self._parent_output = parent_output
-
-    @property
-    def name(self) -> str:
-        return self._name
-
-    @name.setter
-    def name(self, name) -> None:
-        self._name = name
-
-    @property
-    def node(self) -> NodeT:
-        return self._node
-
-    @property
-    def parent_node(self) -> NodeT:
-        return self._parent_output.node
-
-    @property
-    def logger(self):
-        return self._node.logger
-
-    @property
-    def child_output(self) -> InputT:
-        return self._child_output
-
-    @property
-    def invalid(self) -> bool:
-        """Checks validity of the parent output data"""
-        return self._parent_output.invalid
-
-    @property
-    def has_data(self) -> bool:
-        return self._own_data is not None
-
-    @property
-    def allocatable(self) -> bool:
-        return self._allocatable
-
-    @property
-    def debug(self) -> bool:
-        return self._debug
-
-    @invalid.setter
-    def invalid(self, invalid) -> None:
-        """Sets the validity of the current node"""
-        self._node.invalid = invalid
-
-    @property
-    def parent_output(self) -> Output:
-        return self._parent_output
-
-    @property
-    def data(self):
-        # NOTE: if the node is being evaluated, we must touch the node
-        #       (trigger deep evaluation), else we get the data directly
-        if self.node.being_evaluated:
-            return self._parent_output.data
-        return self._parent_output.get_data_unsafe()
-
-    def get_data_unsafe(self):
-        return self._parent_output.get_data_unsafe()
-
-    @property
-    def dd(self):
-        return self._parent_output.dd
-
-    @property
-    def tainted(self) -> bool:
-        return self._parent_output.tainted
-
-    def touch(self):
-        return self._parent_output.touch()
-
-    def taint(self, **kwargs) -> None:
-        self._node.taint(caller=self, **kwargs)
-
-    def taint_type(self, *args, **kwargs) -> None:
-        self._node.taint_type(*args, **kwargs)
-
-    def connected(self) -> bool:
-        return bool(self._parent_output)
-
-    def deep_iter_inputs(self, disconnected_only=False):
-        if disconnected_only and self.connected():
-            return iter(tuple())
-        raise StopNesting(self)
-
-    def deep_iter_child_outputs(self):
-        if self._child_output:
-            raise StopNesting(self._child_output)
-        return iter(tuple())
-
-    def __lshift__(self, other):
-        """
-        self << other
-        """
-        return lshift(self, other)
-
-    def __rrshift__(self, other):
-        """
-        other >> self
-        """
-        return lshift(self, other)
-
-    def allocate(self, **kwargs) -> bool:
-        if not self._allocatable or self.has_data:
-            return True
-
-        if self.own_dd.shape is None or self.own_dd.dtype is None:
-            raise AllocationError(
-                "No shape/type information provided for the Input",
-                node=self._node,
-                output=self,
-            )
-        try:
-            self._own_data = zeros(
-                self.own_dd.shape, self.own_dd.dtype, **kwargs
-            )
-        except Exception as exc:
-            raise AllocationError(
-                f"Input: {exc.args[0]}", node=self._node, input=self
-            ) from exc
-
-        return True
-
-
-class Inputs(EdgeContainer):
-    _dtype = Input
-
-    def __init__(self, iterable=None):
-        super().__init__(iterable)
-
-    def __str__(self):
-        return f"→[{tuple(obj.name for obj in self)}]○"
-
-    def deep_iter_inputs(
-        self, disconnected_only: bool = False
-    ) -> Iterator[Input]:
-        for input in self:
-            if disconnected_only and input.connected():
-                continue
-            yield input
-
-    def deep_iter_child_outputs(self) -> Iterator[Union[Input, Output]]:
-        for child_output in self:
-            yield child_output.child_output
-
-    def touch(self) -> None:
-        for input in self:
-            input.touch()
diff --git a/subtrees/dagflow/dagflow/input_extra.py b/subtrees/dagflow/dagflow/input_extra.py
deleted file mode 100644
index cb71207f024ed96ef50cec8121cb739173f9188e..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/input_extra.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from typing import Optional, Union
-
-class SimpleFormatter():
-    _base: str
-    _numfmt: str
-    def __init__(self, base: str, numfmt: str = '_{:02d}'):
-        self._base = base
-        self._numfmt = numfmt
-
-    @staticmethod
-    def from_string(string: str):
-        if '{' in string:
-             return string
-
-        return SimpleFormatter(string)
-
-    def format(self, num: int) -> str:
-        if num>0:
-            return self._base+self._numfmt.format(num)
-
-        return self._base
-
-
-class MissingInputHandler:
-    """
-    Handler to implement behaviour when output
-    is connected to the missing input with >>/<<
-    """
-
-    _node = None
-
-    def __init__(self, node=None):
-        self.node = node
-
-    @property
-    def node(self):
-        return self._node
-
-    @node.setter
-    def node(self, node):
-        self._node = node
-
-    def __call__(self, idx=None, scope=None):
-        pass
-
-
-class MissingInputFail(MissingInputHandler):
-    """Default missing input handler: issues and exception"""
-
-    def __init__(self, node=None):
-        super().__init__(node)
-
-    def __call__(self, idx=None, scope=None):
-        raise RuntimeError(
-            "Unable to iterate inputs further. "
-            "No additional inputs may be created"
-        )
-
-
-class MissingInputAdd(MissingInputHandler):
-    """Adds an input for each output in >> operator"""
-
-    input_fmt: Union[str,SimpleFormatter] = SimpleFormatter("input", "_{:02d}")
-    input_kws: dict
-    output_fmt: Union[str,SimpleFormatter] = SimpleFormatter("output", "_{:02d}")
-    output_kws: dict
-
-    def __init__(
-        self,
-        node=None,
-        *,
-        input_fmt: Optional[Union[str,SimpleFormatter]] = None,
-        input_kws: Optional[dict] = None,
-        output_fmt: Optional[Union[str,SimpleFormatter]] = None,
-        output_kws: Optional[dict] = None,
-    ):
-        if input_kws is None:
-            input_kws = {}
-        if output_kws is None:
-            output_kws = {}
-        super().__init__(node)
-        self.input_kws = input_kws
-        self.output_kws = output_kws
-        if input_fmt is not None:
-            self.input_fmt = SimpleFormatter.from_string(input_fmt)
-        if output_fmt is not None:
-            self.output_fmt = SimpleFormatter.from_string(output_fmt)
-
-    def __call__(self, idx=None, scope=None, **kwargs):
-        kwargs_combined = dict(self.input_kws, **kwargs)
-        return self.node._add_input(
-            self.input_fmt.format(
-                idx if idx is not None else len(self.node.inputs)
-            ),
-            **kwargs_combined,
-        )
-
-
-class MissingInputAddPair(MissingInputAdd):
-    """
-    Adds an input for each output in >> operator.
-    Adds an output for each new input
-    """
-
-    def __init__(self, node=None, **kwargs):
-        super().__init__(node, **kwargs)
-
-    def __call__(self, idx=None, scope=None):
-        idx_out = len(self.node.outputs)
-        out = self.node._add_output(
-            self.output_fmt.format(idx_out), **self.output_kws
-        )
-        return super().__call__(idx, child_output=out, scope=scope)
-
-
-class MissingInputAddOne(MissingInputAdd):
-    """
-    Adds an input for each output in >> operator.
-    Adds only one output if needed
-    """
-
-    add_child_output = False
-
-    def __init__(self, node=None, *, add_child_output: bool = False, **kwargs):
-        super().__init__(node, **kwargs)
-        self.add_child_output = add_child_output
-
-    def __call__(self, idx=None, scope=None):
-        if (idx_out := len(self.node.outputs)) == 0:
-            out = self.node._add_output(
-                self.output_fmt.format(idx_out), **self.output_kws
-            )
-        else:
-            out = self.node.outputs[-1]
-        if self.add_child_output:
-            return super().__call__(idx, child_output=out, scope=scope)
-        return super().__call__(idx, scope=scope)
-
-
-class MissingInputAddEach(MissingInputAdd):
-    """
-    Adds an output for each block (for each >> operation)
-    """
-
-    add_child_output = False
-    scope = 0
-
-    def __init__(self, node=None, *, add_child_output=False, **kwargs):
-        super().__init__(node, **kwargs)
-        self.add_child_output = add_child_output
-
-    def __call__(self, idx=None, scope=None):
-        if scope == self.scope != 0:
-            out = self.node.outputs[-1]
-        else:
-            out = self.node._add_output(
-                self.output_fmt.format(len(self.node.outputs)),
-                **self.output_kws,
-            )
-            self.scope = scope
-        if self.add_child_output:
-            return super().__call__(idx, child_output=out, scope=scope)
-        return super().__call__(idx, scope=scope)
diff --git a/subtrees/dagflow/dagflow/iter.py b/subtrees/dagflow/dagflow/iter.py
deleted file mode 100644
index 09eaef8d6ee13f497a4b1fe1a4d75fb32e0a9f1e..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/iter.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from collections.abc import Iterable
-from itertools import islice
-
-class StopNesting(Exception):
-    def __init__(self, object):
-        self.object = object
-
-def IsIterable(obj):
-    return isinstance(obj, Iterable) and not isinstance(obj, str)
-
-def nth(iterable, n):
-    "Returns the nth item or a default value"
-    return next(islice(iterable, n, None)) if n > -1 else tuple(iterable)[n]
-
diff --git a/subtrees/dagflow/dagflow/iterators.py b/subtrees/dagflow/dagflow/iterators.py
deleted file mode 100644
index 7a4b59c0d75f851d037269125447e23ebd1806ce..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/iterators.py
+++ /dev/null
@@ -1,42 +0,0 @@
-
-from .iter import IsIterable, StopNesting
-
-
-def get_proper_iterator(obj, methodname, onerror, **kwargs):
-    if methodname:
-        if method := getattr(obj, methodname, None):
-            return method(**kwargs)
-    if IsIterable(obj):
-        return obj
-    raise RuntimeError(
-        f"Do not know how to get an iterator for '{onerror}'! "
-        f"{obj=}, {type(obj)=}"
-    )
-
-
-def deep_iterate(obj, methodname, onerror, **kwargs):
-    try:
-        iterable = get_proper_iterator(obj, methodname, onerror, **kwargs)
-        if isinstance(iterable, dict):
-            raise StopNesting(iterable)
-        for element in iterable:
-            yield from deep_iterate(element, methodname, onerror, **kwargs)
-    except StopNesting as sn:
-        yield sn.object
-
-
-def iter_inputs(inputs, disconnected_only=False):
-    return deep_iterate(
-        inputs,
-        "deep_iter_inputs",
-        "inputs",
-        disconnected_only=disconnected_only,
-    )
-
-
-def iter_outputs(outputs):
-    return deep_iterate(outputs, "deep_iter_outputs", "outputs")
-
-
-def iter_child_outputs(inputs):
-    return deep_iterate(inputs, "deep_iter_child_outputs", "child_outputs")
diff --git a/subtrees/dagflow/dagflow/legs.py b/subtrees/dagflow/dagflow/legs.py
deleted file mode 100644
index 91c1e204e6d3000dd840af983e8d865ee78f99b0..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/legs.py
+++ /dev/null
@@ -1,107 +0,0 @@
-
-from . import input_extra
-from .input import Inputs
-from .output import Outputs
-from .shift import lshift, rshift
-from .iter import StopNesting
-
-class Legs:
-    inputs: Inputs
-    outputs: Outputs
-    def __init__(self, inputs=None, outputs=None, missing_input_handler=None):
-        self._missing_input_handler = missing_input_handler
-        self.inputs = Inputs(inputs)
-        self.outputs = Outputs(outputs)
-
-    @property
-    def _missing_input_handler(self):
-        return self.__missing_input_handler
-
-    @_missing_input_handler.setter
-    def _missing_input_handler(self, handler):
-        if handler:
-            if isinstance(handler, str):
-                sethandler = getattr(input_extra, handler)(self)
-            elif isinstance(handler, type):
-                sethandler = handler(self)
-            else:
-                sethandler = handler
-                sethandler.node = self
-        elif hasattr(self, 'missing_input_handler'):
-            sethandler = self.missing_input_handler
-        else:
-            sethandler = input_extra.MissingInputFail(self)
-        self.__missing_input_handler = sethandler
-
-    def __getitem__(self, key):
-        if isinstance(key, (int, slice, str)):
-            return self.outputs[key]
-        if (y := len(key)) != 2:
-            raise ValueError(f"Legs key should be of length 2, but given {y}!")
-        ikey, okey = key
-        if ikey and okey:
-            if isinstance(ikey, (int, str)):
-                ikey = (ikey,)
-            if isinstance(okey, (int, str)):
-                okey = (okey,)
-            return Legs(
-                self.inputs[ikey],
-                self.outputs[okey],
-                missing_input_handler=self.__missing_input_handler,
-            )
-        if ikey:
-            return self.inputs[ikey]
-        if okey:
-            return self.outputs[okey]
-        raise ValueError("Empty keys specified")
-
-    def get(self, key, default = None):
-        try:
-            return self.__getitem__(key)
-        except Exception:
-            return default
-
-    def __str__(self) -> str:
-        return f"→[{len(self.inputs)}],[{len(self.outputs)}]→"
-
-    def __repr__(self) -> str:
-        return self.__str__()
-
-    def deep_iter_outputs(self):
-        return iter(self.outputs)
-
-    def deep_iter_inputs(self, disconnected_only=False):
-        return iter(self.inputs)
-
-    def deep_iter_child_outputs(self):
-        raise StopNesting(self)
-
-    def print(self):
-        for i, input in enumerate(self.inputs):
-            print(i, input)
-        for i, output in enumerate(self.outputs):
-            print(i, output)
-
-    def __rshift__(self, other):
-        """
-        self >> other
-        """
-        return rshift(self, other)
-
-    def __rlshift__(self, other):
-        """
-        other << self
-        """
-        return rshift(self, other)
-
-    def __lshift__(self, other):
-        """
-        self << other
-        """
-        return lshift(self, other)
-
-    def __rrshift__(self, other):
-        """
-        other >> self
-        """
-        return lshift(self, other)
diff --git a/subtrees/dagflow/dagflow/lib/Array.py b/subtrees/dagflow/dagflow/lib/Array.py
deleted file mode 100644
index 4d597d3c40ac8ea18173ffeb8a87b983c9158a16..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Array.py
+++ /dev/null
@@ -1,95 +0,0 @@
-from typing import Optional, Sequence, Union
-
-from numpy import array
-from numpy.typing import ArrayLike, NDArray
-
-from ..exception import InitializationError
-from ..nodes import FunctionNode
-from ..output import Output
-from ..typefunctions import check_array_edges_consistency, check_edges_type
-
-
-class Array(FunctionNode):
-    """Creates a node with a single data output with predefined array"""
-
-    _mode: str
-    _data: NDArray
-    _output = Output
-
-    def __init__(
-        self,
-        name,
-        arr,
-        *,
-        mode: str = "store",
-        outname="array",
-        mark: Optional[str] = None,
-        edges: Optional[Union[Output, Sequence[Output]]] = None,
-        **kwargs,
-    ):
-        super().__init__(name, **kwargs)
-        self._mode = mode
-        if mark is not None:
-            self._mark = mark
-        self._data = array(arr, copy=True)
-
-        if mode == "store":
-            self._output = self._add_output(outname, data=self._data)
-        elif mode == "store_weak":
-            self._output = self._add_output(
-                outname, data=self._data, owns_buffer=False
-            )
-        elif mode == "fill":
-            self._output = self._add_output(
-                outname, dtype=self._data.dtype, shape=self._data.shape
-            )
-        else:
-            raise InitializationError(
-                f'Array: invalid mode "{mode}"', node=self
-            )
-
-        self._functions.update(
-            {
-                "store": self._fcn_store,
-                "store_weak": self._fcn_store,
-                "fill": self._fcn_fill,
-            }
-        )
-        self.fcn = self._functions[self._mode]
-
-        if edges is not None:
-            if isinstance(edges, Output):
-                self._output.dd.axes_edges.append(edges)
-            else:
-                # assume that the edges are Sequence[Output]
-                try:
-                    self._output.dd.axes_edges.extend(edges)
-                except Exception as exc:
-                    raise InitializationError(
-                        "Array: edges must be `Output` or `Sequence[Output]`, "
-                        f"but given {edges=}, {type(edges)=}"
-                    ) from exc
-
-        if mode == "store":
-            self.close()
-
-    def _fcn_store(self, *args):
-        return self._data
-
-    def _fcn_fill(self, *args):
-        data = self._output._data
-        data[:] = self._data
-        return data
-
-    def _typefunc(self) -> None:
-        check_edges_type(self, slice(None), "array") # checks List[Output]
-        check_array_edges_consistency(self, self._output) # checks dim and N+1 size
-
-    def _post_allocate(self) -> None:
-        if self._mode == "fill":
-            return
-
-        self._data = self._output._data
-
-    def set(self, data: ArrayLike, check_taint: bool = False) -> bool:
-        return self._output.set(data, check_taint)
diff --git a/subtrees/dagflow/dagflow/lib/Cholesky.py b/subtrees/dagflow/dagflow/lib/Cholesky.py
deleted file mode 100644
index a4c3c12bd4121b529a3a7d4963c54501d7633596..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Cholesky.py
+++ /dev/null
@@ -1,60 +0,0 @@
-from ..input_extra import MissingInputAddPair
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    copy_input_to_output,
-    check_input_square_or_diag
-)
-from scipy.linalg import cholesky
-from numpy import sqrt
-
-class Cholesky(FunctionNode):
-    """Compute the Cholesky decomposition of a matrix V=LL̃ᵀ
-    1d input is considered to be a diagonal of square matrix"""
-    _mark: str = 'V→L'
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-                "missing_input_handler", MissingInputAddPair(input_fmt='matrix', output_fmt='L')
-        )
-        super().__init__(*args, **kwargs)
-
-        self._functions.update({
-                "square": self._fcn_square,
-                "diagonal": self._fcn_diagonal
-            })
-
-    def _fcn_square(self, _, inputs, outputs):
-        """Compute Cholesky decomposition using `scipy.linalg.cholesky`
-        NOTE: inplace computation (`overwrite_a=True`) works only for
-        the F-based arrays. As soon as by default C-arrays are used,
-        transposition produces an F-array (view). Transposition with
-        `lower=False` produces a lower matrix in the end.
-        """
-        inputs.touch()
-
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            output[:] = input
-            cholesky(output.T, overwrite_a=True, lower=False) # produces L (!) inplace
-            # output[:]=cholesky(input, lower=True)
-
-    def _fcn_diagonal(self, _, inputs, outputs):
-        """Compute "Cholesky" decomposition using of a diagonal of a square matrix.
-        Elementwise sqrt is used.
-        """
-        inputs.touch()
-
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            sqrt(input, out=output)
-
-    def _typefunc(self) -> None:
-        check_has_inputs(self)
-        ndim = check_input_square_or_diag(self, slice(None))
-        copy_input_to_output(self, slice(None), slice(None))
-
-        if ndim==2:
-            self.fcn = self._functions["square"]
-            self._mark = 'V→L'
-        else:
-            self.fcn = self._functions["diagonal"]
-            self._mark = 'sqrt(Váµ¢)'
-
diff --git a/subtrees/dagflow/dagflow/lib/Concatenation.py b/subtrees/dagflow/dagflow/lib/Concatenation.py
deleted file mode 100644
index ca9215db22da0e04bad8af40d43066715754c9c6..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Concatenation.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    combine_inputs_shape_to_output,
-    eval_output_dtype,
-)
-
-
-class Concatenation(FunctionNode):
-    """Creates a node with a single data output from all the inputs data"""
-
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        combine_inputs_shape_to_output(self, slice(None), "result")
-        eval_output_dtype(self, slice(None), "result")
-
-    def _fcn(self, _, inputs, outputs):
-        res = outputs["result"].data
-        res[:] = (inp.data for inp in inputs)
-        return res
diff --git a/subtrees/dagflow/dagflow/lib/CovmatrixFromCormatrix.py b/subtrees/dagflow/dagflow/lib/CovmatrixFromCormatrix.py
deleted file mode 100644
index 61d2834db8af4103e53313866485d490beed813f..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/CovmatrixFromCormatrix.py
+++ /dev/null
@@ -1,39 +0,0 @@
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_input_square,
-    copy_input_to_output,
-    check_input_dimension,
-    check_inputs_multiplicable_mat
-)
-
-from numpy import multiply
-
-class CovmatrixFromCormatrix(FunctionNode):
-    """Compute covariance matrix from correlation matrix:
-        Vₖₘ=Cₖₘσₖσₘ
-    """
-
-    _mode: str
-    _mark: str = 'C→V'
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
-        self._add_pair("matrix", "matrix", output_kws={'positional': True})
-        self._add_input("sigma", positional=False)
-
-    def _fcn(self, _, inputs, outputs):
-        inputs.touch()
-        C = inputs["matrix"].data
-        sigma = inputs["sigma"].data
-
-        V = outputs["matrix"].data
-
-        multiply(C, sigma[None,:], out=V)
-        multiply(V, sigma[:,None], out=V)
-
-    def _typefunc(self) -> None:
-        check_input_square(self, 'matrix')
-        check_input_dimension(self, 'sigma', 1)
-        check_inputs_multiplicable_mat(self, 'matrix', 'sigma')
-        copy_input_to_output(self, slice(None), slice(None))
-
diff --git a/subtrees/dagflow/dagflow/lib/Division.py b/subtrees/dagflow/dagflow/lib/Division.py
deleted file mode 100644
index 8fe9805f960772da6e42f366a1957d437a278ffc..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Division.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from numpy import copyto
-
-from .NodeManyToOne import NodeManyToOne
-
-class Division(NodeManyToOne):
-    """Division of all the inputs together"""
-
-    def _fcn(self, _, inputs, outputs):
-        out = outputs[0].data
-        copyto(out, inputs[0].data.copy())
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                out /= input.data
-        return out
diff --git a/subtrees/dagflow/dagflow/lib/ElSumSq.py b/subtrees/dagflow/dagflow/lib/ElSumSq.py
deleted file mode 100644
index ef46e7f896bcf559781d809985fac5fe5d1b8749..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/ElSumSq.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from numpy import ndarray
-from numpy.typing import NDArray
-
-from numba import njit
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    check_inputs_same_dtype,
-    AllPositionals
-)
-
-@njit(cache=True)
-def _sumsq(data: NDArray, out: NDArray):
-    sm = 0.0
-    for v in data:
-        sm+=v*v
-    out[0]+=sm
-
-class ElSumSq(FunctionNode):
-    """Sum of the squared of all the inputs"""
-
-    _buffer: ndarray
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
-    def _fcn(self, _, inputs, outputs):
-        out = outputs["result"].data
-        out[0] = 0.0
-        for input in inputs:
-            _sumsq(input.data, out)
-        return out
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        check_inputs_same_dtype(self)
-        eval_output_dtype(self, AllPositionals, "result")
-        self.outputs[0].dd.shape=(1,)
diff --git a/subtrees/dagflow/dagflow/lib/Integrator.py b/subtrees/dagflow/dagflow/lib/Integrator.py
deleted file mode 100644
index 2a36a29606dbad2f785308400f7d57b28b2eda46..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Integrator.py
+++ /dev/null
@@ -1,162 +0,0 @@
-from numba import njit
-from numpy import empty, floating, integer, multiply
-from numpy.typing import NDArray
-
-from ..exception import TypeFunctionError
-from ..input_extra import MissingInputAddEach
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    check_input_dimension,
-    check_input_dtype,
-    check_input_edges_dim,
-    check_input_edges_equivalence,
-    check_input_shape,
-    check_input_subtype,
-    check_output_subtype,
-)
-from ..types import ShapeLike
-
-
-@njit(cache=True)
-def _integrate1d(result: NDArray, data: NDArray, ordersX: NDArray):
-    """
-    Summing up `data` within `ordersX` and puts the result into `result`.
-    The 1-dimensional version of integration.
-    """
-    iprev = 0
-    for i, order in enumerate(ordersX):
-        inext = iprev + order
-        result[i] = data[iprev:inext].sum()
-        iprev = inext
-
-
-@njit(cache=True)
-def _integrate2d(
-    result: NDArray, data: NDArray, ordersX: NDArray, ordersY: NDArray
-):
-    """
-    Summing up `data` within `ordersX` and `ordersY` and then
-    puts the result into `result`. The 2-dimensional version of integration.
-    """
-    iprev = 0
-    for i, orderx in enumerate(ordersX):
-        inext = iprev + orderx
-        jprev = 0
-        for j, ordery in enumerate(ordersY):
-            jnext = jprev + ordery
-            result[i, j] = data[iprev:inext, jprev:jnext].sum()
-            jprev = jnext
-        iprev = inext
-
-
-class Integrator(FunctionNode):
-    """
-    The `Integrator` node performs integration (summation)
-    of every input within the `weight`, `ordersX` and `ordersY` (for 2 dim).
-
-    The `dim` and `precision=dtype` of integration are chosen *automaticly*
-    in the type function within the inputs.
-
-    For 2d-integration the `ordersY` input must be connected.
-
-    Note that the `Integrator` preallocates temporary buffer.
-    For the integration algorithm the `Numba`_ package is used.
-
-    .. _Numba: https://numba.pydata.org
-    """
-
-    __slots__ = ("__buffer",)
-
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault("missing_input_handler", MissingInputAddEach())
-        super().__init__(*args, **kwargs)
-        self._add_input("weights", positional=False)
-        self._add_input("ordersX", positional=False)
-        self._functions.update({1: self._fcn_1d, 2: self._fcn_2d})
-
-    def _typefunc(self) -> None:
-        """
-        The function to determine the dtype and shape.
-        Checks inputs dimension and, selects an integration algorithm,
-        determines dtype and shape for outputs
-        """
-        check_has_inputs(self)
-        check_has_inputs(self, "weights")
-
-        input0 = self.inputs[0]
-        dim = 1 if self.inputs.get("ordersY", None) is None else 2
-        if (ndim := len(input0.dd.shape)) != dim:
-            raise TypeFunctionError(
-                f"The Integrator works only with {dim}d inputs, but the first is {ndim}d!",
-                node=self,
-            )
-        check_input_dimension(self, (slice(None), "weights"), dim)
-        check_input_shape(self, (slice(None), "weights"), input0.dd.shape)
-        check_input_subtype(self, input0, floating)
-        dtype = input0.dd.dtype
-        check_input_dtype(self, (slice(None), "weights"), dtype)
-
-        edgeslenX, edgesX = self.__check_orders("ordersX", input0.dd.shape[0])
-        shape = [edgeslenX]
-        edges = [edgesX]
-        if dim == 2:
-            edgeslenY, edgesY = self.__check_orders(
-                "ordersY", input0.dd.shape[1]
-            )
-            shape.append(edgeslenY)
-            edges.append(edgesY)
-        check_input_edges_equivalence(self, slice(None), edges)
-
-        shape = tuple(shape)
-        self.fcn = self._functions[dim]
-        for output in self.outputs:
-            output.dd.dtype = dtype
-            output.dd.shape = shape
-            output.dd.axes_edges = edges
-            # TODO: copy axes_nodes?
-
-    def __check_orders(self, name: str, shape: ShapeLike) -> tuple:
-        """
-        The method checks dimension (==1) of the input `name`, type (==`integer`),
-        and `sum(orders) == len(input)`
-        """
-        check_input_dimension(self, name, 1)
-        orders = self.inputs[name]
-        check_output_subtype(self, orders, integer)
-        if (y := sum(orders.data)) != shape:
-            raise TypeFunctionError(
-                f"Orders '{name}' must be consistent with inputs len={shape}, "
-                f"but given '{y}'!",
-                node=self,
-                input=orders,
-            )
-        check_input_edges_dim(self, name, 1)
-        edges = orders.dd.axes_edges[0]
-        return edges.dd.shape[0] - 1, edges
-
-    def _post_allocate(self):
-        """Allocates the `buffer` within `weights`"""
-        weights = self.inputs["weights"].dd
-        self.__buffer = empty(shape=weights.shape, dtype=weights.dtype)
-
-    def _fcn_1d(self, _, inputs, outputs):
-        """1d version of integration function"""
-        weights = inputs["weights"].data
-        ordersX = inputs["ordersX"].data
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            multiply(input, weights, out=self.__buffer)
-            _integrate1d(output, self.__buffer, ordersX)
-        if self.debug:
-            return list(outputs.iter_data())
-
-    def _fcn_2d(self, _, inputs, outputs):
-        """2d version of integration function"""
-        weights = inputs["weights"].data  # (n, m)
-        ordersX = inputs["ordersX"].data  # (n, )
-        ordersY = inputs["ordersY"].data  # (m, )
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            multiply(input, weights, out=self.__buffer)
-            _integrate2d(output, self.__buffer, ordersX, ordersY)
-        if self.debug:
-            return list(outputs.iter_data())
diff --git a/subtrees/dagflow/dagflow/lib/IntegratorSampler.py b/subtrees/dagflow/dagflow/lib/IntegratorSampler.py
deleted file mode 100644
index e03427eabfb277a4e7de96c14357a2367e4c2cd1..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/IntegratorSampler.py
+++ /dev/null
@@ -1,262 +0,0 @@
-from typing import Literal, Optional
-
-from numpy import (
-    empty,
-    errstate,
-    integer,
-    linspace,
-    matmul,
-    meshgrid,
-    newaxis,
-)
-from numpy.polynomial.legendre import leggauss
-from numpy.typing import DTypeLike, NDArray
-
-from ..exception import InitializationError
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_input_dimension,
-    check_input_edges_dim,
-    check_inputs_number,
-    check_output_subtype,
-)
-
-
-def _gl_sampler(
-    orders: NDArray, sample: NDArray, weights: NDArray, edges: NDArray
-):
-    """
-    Uses `numpy.polynomial.legendre.leggauss` to sample points with weights
-    on the range [-1,1] and transforms to any range [a, b]
-    """
-    offset = 0
-    for i, n in enumerate(orders):
-        if n < 1:
-            continue
-        (
-            sample[offset : offset + n],
-            weights[offset : offset + n],
-        ) = leggauss(n)
-        # transforms to the original range [a, b]
-        sample[offset : offset + n] = 0.5 * (
-            sample[offset : offset + n] * (edges[i + 1] - edges[i])
-            + (edges[i + 1] + edges[i])
-        )
-        weights[offset : offset + n] *= 0.5 * (edges[i + 1] - edges[i])
-        # NOTE: the operations above may allocate additional memory in runtime!
-        offset += n
-
-
-class IntegratorSampler(FunctionNode):
-    """
-    The `IntegratorSampler` node creates a sample for the `Integrator` node.
-
-    There are several samplers for `1d` (`rect`, `trap`, `gl`) and only `2d`
-    for `2d` integrator, where `rect` is the rectangular, `trap` is the trapezoidal,
-    `gl` is the 1d Gauss-Legendre, and `2d` is the 2d Gauss-Legendre.
-
-    There is optional argument `offset` for the `rect` sampler,
-    taking the following values: `left`, `center`, or `right`.
-
-    There is no positional inputs. It is supposed that `orders` already have `edges`.
-    There are two outputs: 0 - `sample`, 1 - `weights`
-    """
-
-    __slots__ = ("__bufferX", "__bufferY")
-
-    def __init__(
-        self,
-        *args,
-        mode: Literal["rect", "trap", "gl", "2d"],
-        dtype: DTypeLike = "d",
-        align: Optional[Literal["left", "center", "right"]] = None,
-        **kwargs,
-    ) -> None:
-        super().__init__(*args, **kwargs)
-        if mode not in {"rect", "trap", "gl", "2d"}:
-            raise InitializationError(
-                f"Argument `mode` must be 'rect', 'trap', 'gl', or '2d', but given '{mode}'!",
-                node=self,
-            )
-        if align is not None and mode != "rect":
-            raise InitializationError(
-                "Argument 'align' is used only within 'rect' mode!", node=self
-            )
-        self._dtype = dtype
-        self._mode = mode
-        self._align = align if align is not None else "center"
-        self._add_input("ordersX", positional=False)
-        self._add_output("x")
-        if mode == "2d":
-            self._add_input("ordersY", positional=False)
-            self._add_output("y")
-        self._add_output("weights", positional=False)
-        self._functions.update(
-            {
-                "rect": self._fcn_rect,
-                "trap": self._fcn_trap,
-                "gl": self._fcn_gl1d,
-                "2d": self._fcn_gl2d,
-            }
-        )
-
-    @property
-    def mode(self) -> str:
-        return self._mode
-
-    @property
-    def dtype(self) -> DTypeLike:
-        return self._dtype
-
-    @property
-    def align(self) -> Optional[str]:
-        return self._align
-
-    def _typefunc(self) -> None:
-        """
-        The function to determine the dtype and shape.
-        Checks inputs dimension and, selects an integration algorithm,
-        determines dtype and shape for outputs
-        """
-        check_inputs_number(self, 0)
-        lenX, edgesX = self.__check_orders("ordersX")
-        if self.mode == "2d":
-            lenY, edgesY = self.__check_orders("ordersY")
-            shape = (lenX, lenY)
-            edges = [edgesX, edgesY]
-        else:
-            shape = (lenX,)
-            edges = [edgesX]
-        for output in (*self.outputs, self.outputs["weights"]):
-            output.dd.dtype = self.dtype
-            output.dd.shape = shape
-            output.dd.axes_edges = edges
-        self.fcn = self._functions[self.mode]
-
-    def __check_orders(self, name: str) -> tuple:
-        """
-        The method checks dimension (==1) of the input `name`, type (==`integer`),
-        and returns the `dd.shape[0]`
-        """
-        check_input_dimension(self, name, 1)
-        orders = self.inputs[name]
-        check_output_subtype(self, orders, integer)
-        check_input_edges_dim(self, name, 1)
-        return sum(orders.data), orders.dd.axes_edges[0]
-
-    def _post_allocate(self) -> None:
-        """Allocates the `buffer`"""
-        ordersX = self.inputs["ordersX"]
-        edgeshapeX = ordersX.dd.axes_edges[0].dd.shape[0] - 1
-        if self.mode == "rect":
-            shapeX = (4, edgeshapeX)
-        elif self.mode in {"trap", "gl"}:
-            shapeX = (edgeshapeX,)
-        else:
-            lenY = sum(self.inputs["ordersY"].data)
-            shapeY = (2, lenY)
-            self.__bufferY = empty(shape=shapeY, dtype=self.dtype)
-            lenX = sum(ordersX.data)
-            shapeX = (2, lenX)
-        self.__bufferX = empty(shape=shapeX, dtype=self.dtype)
-
-    def _fcn_rect(self, _, inputs, outputs) -> Optional[list]:
-        """The rectangular sampling"""
-        ordersX = inputs["ordersX"]
-        edges = ordersX.dd.axes_edges[0]._data  # n+1
-        orders = ordersX.data  # n
-        sample = outputs[0].data  # m = sum(orders)
-        weights = outputs["weights"].data
-        binwidths = self.__bufferX[0]  # n
-        samplewidths = self.__bufferX[1]  # n
-        low = self.__bufferX[2]  # n
-        high = self.__bufferX[3]  # n
-
-        binwidths[:] = edges[1:] - edges[:-1]
-        with errstate(invalid="ignore"):  # to ignore division by zero
-            samplewidths[:] = binwidths / orders
-        if self.align == "left":
-            low[:] = edges[:-1]
-            high[:] = edges[1:] - samplewidths
-        elif self.align == "center":
-            low[:] = edges[:-1] + samplewidths * 0.5
-            high[:] = edges[1:] - samplewidths * 0.5
-        else:
-            low[:] = edges[:-1] + samplewidths
-            high[:] = edges[1:]
-
-        offset = 0
-        for i, n in enumerate(orders):
-            if n > 1:
-                sample[offset : offset + n] = linspace(low[i], high[i], n)
-                weights[offset : offset + n] = samplewidths[i]
-            else:
-                sample[offset : offset + n] = low[i]
-                weights[offset : offset + n] = binwidths[i]
-            offset += n
-
-        if self.debug:
-            return list(outputs.iter_data())
-
-    def _fcn_trap(self, _, inputs, outputs) -> Optional[list]:
-        """The trapezoidal sampling"""
-        ordersX = inputs["ordersX"]
-        edges = ordersX.dd.axes_edges[0]._data  # n+1
-        orders = ordersX.data  # n
-        sample = outputs[0].data  # m = sum(orders)
-        weights = outputs["weights"].data
-        samplewidths = self.__bufferX  # n
-
-        samplewidths[:] = edges[1:] - edges[:-1]
-        with errstate(invalid="ignore"):  # to ignore division by zero
-            samplewidths[:] = samplewidths[:] / (orders - 2.0)
-
-        offset = 0
-        for i, n in enumerate(orders):
-            sample[offset : offset + n] = linspace(edges[i], edges[i + 1], n)
-            weights[offset] = samplewidths[i] * 0.5
-            if n > 2:
-                weights[offset + 1 : offset + n - 2] = samplewidths[i]
-            offset += n - 1
-        weights[-1] = samplewidths[-1] * 0.5
-
-        if self.debug:
-            return list(outputs.iter_data())
-
-    def _fcn_gl1d(self, _, inputs, outputs) -> Optional[list]:
-        """The 1d Gauss-Legendre sampling"""
-        ordersX = inputs["ordersX"]
-        edges = ordersX.dd.axes_edges[0]._data
-        orders = ordersX.data
-        sample = outputs[0].data
-        weights = outputs["weights"].data
-
-        _gl_sampler(orders, sample, weights, edges)
-
-        if self.debug:
-            return list(outputs.iter_data())
-
-    def _fcn_gl2d(self, _, inputs, outputs) -> Optional[list]:
-        """The 2d Gauss-Legendre sampling"""
-        ordersX = inputs["ordersX"]
-        ordersY = inputs["ordersY"]
-        edgesX = ordersX.dd.axes_edges[0]._data  # p + 1
-        edgesY = ordersY.dd.axes_edges[0]._data  # q + 1
-        ordersX = ordersX.data
-        ordersY = ordersY.data
-        weightsX = self.__bufferX[0]  # (n, )
-        weightsY = self.__bufferY[0]  # (m, )
-        sampleX = self.__bufferX[1]  # (n, )
-        sampleY = self.__bufferY[1]  # (m, )
-        X = outputs[0].data  # (n, m)
-        Y = outputs[1].data  # (n, m)
-        weights = outputs["weights"].data  # (n, m)
-
-        _gl_sampler(ordersX, sampleX, weightsX, edgesX)
-        _gl_sampler(ordersY, sampleY, weightsY, edgesY)
-
-        X[:], Y[:] = meshgrid(sampleX, sampleY, indexing="ij")
-        matmul(weightsX[newaxis].T, weightsY[newaxis], out=weights)
-
-        if self.debug:
-            return list(outputs.iter_data())
diff --git a/subtrees/dagflow/dagflow/lib/NodeManyToOne.py b/subtrees/dagflow/dagflow/lib/NodeManyToOne.py
deleted file mode 100644
index 1ef594b7ac2a1b9e8ce7fc52d07febf8a7496f56..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/NodeManyToOne.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    AllPositionals,
-    check_has_inputs,
-    check_inputs_equivalence,
-    copy_input_edges_to_output,
-    copy_input_shape_to_output,
-    eval_output_dtype,
-)
-
-
-class NodeManyToOne(FunctionNode):
-    """
-    The abstract node with only one output `result`,
-    which is the result of some function on all the positional inputs
-    """
-
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self) # at least one input
-        check_inputs_equivalence(self) # all the inputs are have same dd fields
-        copy_input_shape_to_output(self, 0, "result") # copy shape to result
-        copy_input_edges_to_output(self, 0, "result") # copy edges to result
-        eval_output_dtype(self, AllPositionals, "result") # eval dtype of result
diff --git a/subtrees/dagflow/dagflow/lib/NodeOneToOne.py b/subtrees/dagflow/dagflow/lib/NodeOneToOne.py
deleted file mode 100644
index 98b8e37ef0afca7270fbdabb7029800d81c765ee..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/NodeOneToOne.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from ..input_extra import MissingInputAddEach
-from ..nodes import FunctionNode
-from ..typefunctions import check_has_inputs
-
-
-class NodeOneToOne(FunctionNode):
-    """
-    The abstract node with an output for every positional input
-    """
-
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault("missing_input_handler", MissingInputAddEach())
-        super().__init__(*args, **kwargs)
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        for inp, out in zip(self.inputs, self.outputs):
-            out.dd.axes_edges = inp.dd.axes_edges
-            out.dd.axes_nodes = inp.dd.axes_nodes
-            out.dd.dtype = inp.dd.dtype
-            out.dd.shape = inp.dd.shape
diff --git a/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars.py b/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars.py
deleted file mode 100644
index 05c9126195d88421502391acb83780586eaa77ae..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars.py
+++ /dev/null
@@ -1,92 +0,0 @@
-from ..input_extra import MissingInputAddPair
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    check_input_square_or_diag,
-    copy_input_to_output,
-    check_input_dimension,
-    check_inputs_equivalence,
-    check_inputs_multiplicable_mat
-)
-from ..exception import InitializationError
-
-from scipy.linalg import solve_triangular
-from numpy import matmul, subtract, divide, multiply, add
-
-class NormalizeCorrelatedVars(FunctionNode):
-    """Normalize correlated variables or correlate normal variables with linear expression
-
-    If x is a vector of values, μ are the central values and L is a cholesky decomposition
-    of the covariance matrix V=LLáµ€ then
-    z = L⁻¹(x - μ)
-    x = Lz + μ
-    """
-
-    _mode: str
-    def __init__(self, *args, mode='forward', **kwargs):
-        if mode=='forward':
-            self._mark = 'c→u'
-        elif mode=='backward':
-            self._mark = 'u→c'
-        else:
-            raise InitializationError(f'Invalid NormalizeCorrelatedVars mode={mode}. Expect "forward" or "backward"',node=self)
-
-        self._mode = mode
-
-        super().__init__(*args, missing_input_handler=MissingInputAddPair(), **kwargs)
-
-        self._add_input("matrix", positional=False)
-        self._add_input("central", positional=False)
-
-        self._functions.update({
-                "forward_2d":  self._fcn_forward_2d,
-                "backward_2d": self._fcn_backward_2d,
-                "forward_1d":  self._fcn_forward_1d,
-                "backward_1d": self._fcn_backward_1d
-                })
-
-    def _fcn_forward_2d(self, _, inputs, outputs):
-        inputs.touch()
-        L = inputs["matrix"].data
-        central = inputs["central"].data
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            subtract(input, central, out=output)
-            solve_triangular(L, output, lower=True, overwrite_b=True, check_finite=False)
-
-    def _fcn_backward_2d(self, _, inputs, outputs):
-        inputs.touch()
-        L = inputs["matrix"].data
-        central = inputs["central"].data
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            matmul(L, input, out=output)
-            add(output, central, out=output)
-
-    def _fcn_forward_1d(self, _, inputs, outputs):
-        inputs.touch()
-        Ldiag = inputs["matrix"].data
-        central = inputs["central"].data
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            subtract(input, central, out=output)
-            divide(output, Ldiag, out=output)
-
-    def _fcn_backward_1d(self, _, inputs, outputs):
-        inputs.touch()
-        Ldiag = inputs["matrix"].data
-        central = inputs["central"].data
-        for input, output in zip(inputs.iter_data(), outputs.iter_data()):
-            multiply(Ldiag, input, out=output)
-            add(output, central, out=output)
-
-    def _typefunc(self) -> None:
-        check_has_inputs(self)
-        ndim = check_input_square_or_diag(self, 'matrix')
-        check_input_dimension(self, 'central', 1)
-        check_inputs_equivalence(self, ('central', slice(None)))
-        check_inputs_multiplicable_mat(self, 'matrix', slice(None))
-        copy_input_to_output(self, slice(None), slice(None))
-
-        key = f"{self._mode}_{ndim}d"
-        try:
-            self.fcn = self._functions[key]
-        except KeyError:
-            raise InitializationError(f'Invalid mode "{key}". Expect: {self._functions.keys()}')
diff --git a/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars2.py b/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars2.py
deleted file mode 100644
index 35837263e2c93b6590e2788fcf95d3b7b9e3e179..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/NormalizeCorrelatedVars2.py
+++ /dev/null
@@ -1,142 +0,0 @@
-from ..nodes import FunctionNode
-from ..node import Input, Output
-from ..typefunctions import (
-    check_has_inputs,
-    check_input_square_or_diag,
-    copy_input_to_output,
-    check_input_dimension,
-    check_inputs_equivalence,
-    check_inputs_multiplicable_mat
-)
-
-from scipy.linalg import solve_triangular
-from numpy import matmul, subtract, divide, multiply, add, zeros, copyto
-
-class NormalizeCorrelatedVars2(FunctionNode):
-    """Normalize correlated variables or correlate normal variables with linear expression
-
-    If x is a vector of values, μ are the central values and L is a cholesky decomposition
-    of the covariance matrix V=LLáµ€ then
-    z = L⁻¹(x - μ)
-    x = Lz + μ
-    """
-
-    _mark: str = 'c↔u'
-
-    _input_value: Input
-    _input_normvalue: Input
-    _output_value: Output
-    _output_normvalue: Output
-
-    _ndim: str
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
-        self._add_input("matrix", positional=False)
-        self._add_input("central", positional=False)
-
-        self._input_value, self._output_value = self._add_pair(
-            "value", "value",
-            input_kws={'allocatable': True},
-            output_kws={'forbid_reallocation': True, 'allocatable': False},
-        )
-        self._input_normvalue, self._output_normvalue = self._add_pair(
-            "normvalue", "normvalue",
-            input_kws={'allocatable': True},
-            output_kws={'forbid_reallocation': True, 'allocatable': False},
-        )
-
-        self._functions.update({
-                "forward_2d":  self._fcn_forward_2d,
-                "forward_1d":  self._fcn_forward_1d,
-                "backward_2d":  self._fcn_backward_2d,
-                "backward_1d":  self._fcn_backward_1d,
-                })
-
-    def _fcn_forward_2d(self, _, inputs, outputs):
-        inputs.touch()
-        L = inputs["matrix"].data
-        central = inputs["central"].data
-
-        input_value = inputs["value"].data
-        output_value = outputs["value"].data
-        output_normvalue = outputs["normvalue"].data
-
-        subtract(input_value, central, out=output_normvalue)
-        solve_triangular(L, output_normvalue, lower=True, overwrite_b=True, check_finite=False)
-        copyto(output_value, input_value)
-
-    def _fcn_backward_2d(self, _, inputs, outputs):
-        inputs.touch()
-        L = inputs["matrix"].data
-        central = inputs["central"].data
-
-        input_normvalue = inputs["normvalue"].data
-        output_normvalue = outputs["normvalue"].data
-        output_value = outputs["value"].data
-
-        matmul(L, input_normvalue, out=output_value)
-        add(output_value, central, out=output_value)
-        copyto(output_normvalue, input_normvalue)
-
-    def _fcn_forward_1d(self, _, inputs, outputs):
-        inputs.touch()
-        Ldiag = inputs["matrix"].data
-        central = inputs["central"].data
-
-        input_value = inputs["value"].data
-        output_value = outputs["value"].data
-        output_normvalue = outputs["normvalue"].data
-
-        subtract(input_value, central, out=output_normvalue)
-        divide(output_normvalue, Ldiag, out=output_normvalue)
-        copyto(output_value, input_value)
-
-    def _fcn_backward_1d(self, _, inputs, outputs):
-        inputs.touch()
-        Ldiag = inputs["matrix"].data
-        central = inputs["central"].data
-
-        input_normvalue = inputs["normvalue"].data
-        output_normvalue = outputs["normvalue"].data
-        output_value = outputs["value"].data
-
-        multiply(Ldiag, input_normvalue, out=output_value)
-        add(output_value, central, out=output_value)
-        copyto(output_normvalue, input_normvalue)
-
-    def _on_taint(self, caller: Input) -> None:
-        """Choose the function to call based on the modified input:
-            - if normvalue is modified, the value should be updated
-            - if value is modified, the normvalue should be updated
-            - if sigma or central is modified, the normvalue should be updated
-
-            TODO:
-                - implement partial taintflag propagation
-                - value should not be tainted on sigma/central modificantion
-        """
-        if caller is self._input_normvalue:
-            self.fcn = self._functions[f"backward_{self._ndim}"]
-        else:
-            self.fcn = self._functions[f"forward_{self._ndim}"]
-
-    def _typefunc(self) -> None:
-        check_has_inputs(self)
-        ndim = check_input_square_or_diag(self, 'matrix')
-        check_input_dimension(self, 'central', 1)
-        check_inputs_equivalence(self, ('central', slice(None)))
-        check_inputs_multiplicable_mat(self, 'matrix', slice(None))
-        copy_input_to_output(self, slice(None), slice(None))
-
-        self._inherit_labels(self._input_value.parent_node, fmt='Normalize {}')
-
-        self._ndim=f"{ndim}d"
-        self.fcn = self._functions[f"forward_{self._ndim}"]
-
-        self._valuedata = zeros(shape=self._input_value.dd.shape, dtype=self._input_value.dd.dtype)
-        self._normvaluedata = zeros(shape=self._input_normvalue.dd.shape, dtype=self._input_normvalue.dd.dtype)
-        self._input_value.set_own_data(self._valuedata, owns_buffer=False)
-        self._input_normvalue.set_own_data(self._normvaluedata, owns_buffer=False)
-        self._output_value._set_data(self._valuedata, owns_buffer=False, forbid_reallocation=True)
-        self._output_normvalue._set_data(self._normvaluedata, owns_buffer=False, forbid_reallocation=True)
diff --git a/subtrees/dagflow/dagflow/lib/Product.py b/subtrees/dagflow/dagflow/lib/Product.py
deleted file mode 100644
index 524831af867239acc36902caf9981fafae55d1a2..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Product.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from numpy import copyto
-
-from .NodeManyToOne import NodeManyToOne
-
-
-class Product(NodeManyToOne):
-    """Product of all the inputs together"""
-
-    def _fcn(self, _, inputs, outputs):
-        out = outputs["result"].data
-        copyto(out, inputs[0].data)
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                out *= input.data
-        return out
diff --git a/subtrees/dagflow/dagflow/lib/Sum.py b/subtrees/dagflow/dagflow/lib/Sum.py
deleted file mode 100644
index 1751902f37b47a2dc1894d2ed5775d8986bd497f..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/Sum.py
+++ /dev/null
@@ -1,19 +0,0 @@
-from numpy import add, copyto
-
-from .NodeManyToOne import NodeManyToOne
-
-
-class Sum(NodeManyToOne):
-    """Sum of all the inputs together"""
-
-    _mark = 'Σ'
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
-    def _fcn(self, _, inputs, outputs):
-        out = outputs["result"].data
-        copyto(out, inputs[0].data)
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                add(out, input.data, out=out)
-        return out
diff --git a/subtrees/dagflow/dagflow/lib/SumMatOrDiag.py b/subtrees/dagflow/dagflow/lib/SumMatOrDiag.py
deleted file mode 100644
index bd3ed648c87fb69615c96ff1391b3936e6449bee..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/SumMatOrDiag.py
+++ /dev/null
@@ -1,81 +0,0 @@
-from numpy import copyto, add
-from numpy.typing import NDArray
-from numba import njit
-
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    copy_input_shape_to_output,
-    check_inputs_square_or_diag,
-    check_inputs_same_dtype,
-    AllPositionals
-)
-
-@njit(cache=True)
-def _settodiag1(inarray: NDArray, outmatrix: NDArray):
-    for i in range(inarray.size):
-        outmatrix[i, i] = inarray[i]
-
-@njit(cache=True)
-def _addtodiag(inarray: NDArray, outmatrix: NDArray):
-    for i in range(inarray.size):
-        outmatrix[i, i] += inarray[i]
-
-class SumMatOrDiag(FunctionNode):
-    """Sum of all the inputs together. Inputs are square matrices or diagonals of square matrices"""
-
-    _ndim: int = 0
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
-        self._functions.update({
-                "2d":  self._fcn2d,
-                "1d":  self._fcn1d,
-                })
-
-    def _fcn2d(self, _, inputs, outputs):
-        out = outputs["result"].data
-        inp = inputs[0].data
-        if len(inp.shape)==1:
-            _settodiag1(inp, out)
-        else:
-            out[:] = inp
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                if len(input.dd.shape)==1:
-                    _addtodiag(input.data, out)
-                else:
-                    add(input.data, out, out=out)
-        return out
-
-    def _fcn1d(self, _, inputs, outputs):
-        out = outputs["result"].data
-        copyto(out, inputs[0].data)
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                add(out, input.data, out=out)
-        return out
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        copy_input_shape_to_output(self, 0, "result")
-        self._ndim = check_inputs_square_or_diag(self)
-        check_inputs_same_dtype(self)
-        eval_output_dtype(self, AllPositionals, "result")
-
-        size = self.inputs[0].dd.shape[0]
-        output = self.outputs[0]
-        if self._ndim==2:
-            output.dd.shape = size, size
-        elif self._ndim==1:
-            output.dd.shape = size,
-        else:
-            assert False
-
-        self.fcn = self._functions[f"{self._ndim}d"]
diff --git a/subtrees/dagflow/dagflow/lib/SumSq.py b/subtrees/dagflow/dagflow/lib/SumSq.py
deleted file mode 100644
index 53b8939e23968f2a76832679a3d32a590a9cbf83..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/SumSq.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from numpy import add, square, ndarray, empty_like
-
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    copy_input_shape_to_output,
-    check_inputs_equivalence,
-    AllPositionals
-)
-
-class SumSq(FunctionNode):
-    """Sum of the squared of all the inputs"""
-
-    _buffer: ndarray
-    _mark = 'Σ()²'
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
-    def _fcn(self, _, inputs, outputs):
-        out = outputs["result"].data
-        square(inputs[0].data, out=out)
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                square(input.data, out=self._buffer)
-                add(self._buffer, out, out=out)
-        return out
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        copy_input_shape_to_output(self, 0, "result")
-        check_inputs_equivalence(self)
-        eval_output_dtype(self, AllPositionals, "result")
-
-    def _post_allocate(self) -> None:
-        self._buffer = empty_like(self.inputs[0].get_data_unsafe())
diff --git a/subtrees/dagflow/dagflow/lib/View.py b/subtrees/dagflow/dagflow/lib/View.py
deleted file mode 100644
index fbb8425a976aca7cd971fec210fa2f3bdd474227..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/View.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    copy_input_dtype_to_output,
-    copy_input_shape_to_output,
-)
-
-class View(FunctionNode):
-    """Creates a node with a single data output which is a view on the input"""
-
-    def __init__(self, name, outname="view", **kwargs):
-        super().__init__(name, **kwargs)
-        output = self._add_output(
-            outname, allocatable=False, forbid_reallocation=True
-        )
-        self._add_input("input", child_output=output)
-
-    def _fcn(self, _, inputs, outputs):
-        return self.inputs[0].data
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        copy_input_dtype_to_output(self, 0, 0)
-        copy_input_shape_to_output(self, 0, 0)
-
-    def _post_allocate(self) -> None:
-        input = self.inputs[0]
-        output = self.outputs[0]
-        output._set_data(
-            input.parent_output._data,
-            owns_buffer=False,
-            forbid_reallocation=True,
-        )
diff --git a/subtrees/dagflow/dagflow/lib/ViewConcat.py b/subtrees/dagflow/dagflow/lib/ViewConcat.py
deleted file mode 100644
index 9ec8326916ca4bcb9ca278d61b14054c0a18add6..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/ViewConcat.py
+++ /dev/null
@@ -1,57 +0,0 @@
-from typing import List, Optional
-
-from numpy import zeros
-
-from ..nodes import FunctionNode
-from ..output import Output
-from ..typefunctions import check_input_dimension, check_input_dtype
-
-
-class ViewConcat(FunctionNode):
-    """Creates a node with a single data output which is a concatenated memory of the inputs"""
-
-    _output: Output
-    _offsets: List[int]
-
-    def __init__(self, name, outname="concat", **kwargs):
-        super().__init__(name, **kwargs)
-        self._output = self._add_output(
-            outname, allocatable=False, forbid_reallocation=True
-        )
-        self._offsets = []
-
-    def missing_input_handler(
-        self, idx: Optional[int] = None, scope: Optional[int] = None
-    ):
-        icount = len(self.inputs)
-        idx = idx if idx is not None else icount
-        iname = "input_{:02d}".format(idx)
-
-        kwargs = {"child_output": self._output}
-        return self._add_input(iname, allocatable=True, **kwargs)
-
-    def _fcn(self, _, inputs, outputs):
-        self.inputs.touch()
-        return self._output._data
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        size = 0
-        self._offsets = []
-        cdtype = self.inputs[0].dd.dtype
-        check_input_dtype(self, slice(None), cdtype)
-        check_input_dimension(self, slice(None), 1)
-        for input in self.inputs:
-            self._offsets.append(size)
-            size += input.dd.shape[0]
-
-        output = self.outputs[0]
-        output.dd.dtype = cdtype
-        output.dd.shape = (size,)
-        data = zeros(shape=size, dtype=cdtype)
-        output._set_data(data, owns_buffer=True)
-
-        for offset, input in zip(self._offsets, self.inputs):
-            size = input.dd.shape[0]
-            idata = data[offset : offset + size]
-            input.set_own_data(idata, owns_buffer=False)
diff --git a/subtrees/dagflow/dagflow/lib/WeightedSum.py b/subtrees/dagflow/dagflow/lib/WeightedSum.py
deleted file mode 100644
index dcf929a11c289f5e083965cffd79b28e975ba932..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/WeightedSum.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from numpy import copyto
-
-from ..exception import TypeFunctionError
-from ..input_extra import MissingInputAddOne
-from ..nodes import FunctionNode
-from ..typefunctions import (
-    check_has_inputs,
-    eval_output_dtype,
-    copy_input_shape_to_output,
-)
-
-
-class WeightedSum(FunctionNode):
-    """Weighted sum of all the inputs together"""
-
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-        self._add_input("weight", positional=False)
-        self._functions.update(
-            {"number": self._fcn_number, "iterable": self._fcn_iterable}
-        )
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        check_has_inputs(self)
-        weight = self.inputs["weight"]
-        shape = weight.dd.shape[0]
-        leninp = len(self.inputs)
-        if shape == 0:
-            raise TypeFunctionError(
-                "Cannot use WeightedSum with empty 'weight'!"
-            )
-        elif shape == 1:
-            self.fcn = self._functions["number"]
-        elif shape == leninp:
-            self.fcn = self._functions["iterable"]
-        else:
-            raise TypeFunctionError(
-                f"The number of weights (={shape}) must coincide "
-                f"with the number of inputs (={leninp})!"
-            )
-        copy_input_shape_to_output(self, 0, "result")
-        eval_output_dtype(self, slice(None), "result")
-
-    def _fcn_number(self, _, inputs, outputs):
-        """
-        The function for one weight for all inputs:
-        `len(weight) == 1`
-        """
-        out = outputs[0].data
-        weight = self.inputs["weight"].data
-        copyto(out, inputs[0].data.copy())
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                out += input.data
-        out *= weight
-        return out
-
-    def _fcn_iterable(self, _, inputs, outputs):
-        """
-        The function for one weight for every input:
-        `len(weight) == len(inputs)`
-        """
-        out = outputs[0].data
-        weights = self.inputs["weight"].data
-        copyto(out, inputs[0].data * weights[0])
-        if len(inputs) > 1:
-            for input, weight in zip(inputs[1:], weights[1:]):
-                out += input.data * weight
-        return out
diff --git a/subtrees/dagflow/dagflow/lib/__init__.py b/subtrees/dagflow/dagflow/lib/__init__.py
deleted file mode 100644
index dd731c0c80051ce8b37281492f01cfcacbd61dcb..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .Array import Array
-from .Sum import Sum
-from .Product import Product
-from .Division import Division
-from .Concatenation import Concatenation
-from .WeightedSum import WeightedSum
diff --git a/subtrees/dagflow/dagflow/lib/trigonometry.py b/subtrees/dagflow/dagflow/lib/trigonometry.py
deleted file mode 100644
index 01f5b23a6327a027246041787ed4118a08295bcc..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/lib/trigonometry.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from numpy import arctan, cos, sin, tan, arccos, arcsin
-
-from .NodeOneToOne import NodeOneToOne
-
-
-class Cos(NodeOneToOne):
-    """Cos function"""
-
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            cos(inp.data, out=out.data)
-        return list(outputs.iter_data())
-
-
-class Sin(NodeOneToOne):
-    """Sin function"""
-
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            sin(inp.data, out=out.data)
-        return list(outputs.iter_data())
-
-class ArcCos(NodeOneToOne):
-    """ArcCos function"""
-
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            arccos(inp.data, out=out.data)
-        return list(outputs.iter_data())
-
-
-class ArcSin(NodeOneToOne):
-    """ArcSin function"""
-
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            arcsin(inp.data, out=out.data)
-        return list(outputs.iter_data())
-
-
-class Tan(NodeOneToOne):
-    """Tan function"""
-
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            tan(inp.data, out=out.data)
-        return list(outputs.iter_data())
-
-
-class Arctan(NodeOneToOne):
-    """Arctan function"""
-
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            arctan(inp.data, out=out.data)
-        return list(outputs.iter_data())
diff --git a/subtrees/dagflow/dagflow/logger.py b/subtrees/dagflow/dagflow/logger.py
deleted file mode 100644
index 9e594b5f1ab6486121f6e0ff35f0a37aab3d0ebb..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/logger.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from logging import (
-    DEBUG,
-    INFO,
-    FileHandler,
-    Formatter,
-    Logger,
-    StreamHandler,
-    getLogger,
-    addLevelName
-)
-from typing import Optional
-
-# To avoid a creation of duplicates save an instance
-_loggers = {}
-
-def get_logger(
-    name="dagflow",
-    *,
-    filename: Optional[str] = None,
-    debug: bool = False,
-    console: bool = True,
-    formatstr: Optional[str] = "%(asctime)s - %(levelname)s - %(message)s",
-) -> Logger:
-    if logger := _loggers.get(name):
-        return logger
-    logger = getLogger(name)
-
-    level = DEBUG if debug else INFO
-    logger.setLevel(level)
-    formatter = Formatter(formatstr)
-    if filename:
-        fh = FileHandler(filename)
-        fh.setLevel(level)
-        fh.setFormatter(formatter)
-        logger.addHandler(fh)
-    if console:
-        ch = StreamHandler()
-        ch.setLevel(level)
-        ch.setFormatter(formatter)
-        logger.addHandler(ch)
-    _loggers[name] = logger
-    return logger
-
-SUBINFO    = INFO-1
-SUBSUBINFO = INFO-2
-addLevelName(SUBINFO, "SUBINFO")
-addLevelName(SUBSUBINFO, "SUBSUBINFO")
-
-logger = get_logger()
diff --git a/subtrees/dagflow/dagflow/membernode.py b/subtrees/dagflow/dagflow/membernode.py
deleted file mode 100644
index a0705fe3dd43f5ab352fb5acf54ce704dfed2c7b..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/membernode.py
+++ /dev/null
@@ -1,106 +0,0 @@
-from .graph import Graph
-from .node import Node
-
-from typing import Optional
-
-class MemberNodesHolder:
-    _graph: Optional[Graph] = None
-
-    def __init__(self, graph: Graph=None):
-        self.graph = graph
-        for key in dir(self):
-            val = getattr(self, key)
-            if isinstance(val, Node):
-                val.obj = self
-                val.graph = self._graph
-
-    @property
-    def graph(self):
-        return self._graph
-
-    @graph.setter
-    def graph(self, graph, **kwargs):
-        if self._graph:
-            raise ValueError("Graph is already set")
-        if graph is True:
-            self._graph = Graph()
-        elif isinstance(graph, str):
-            self._graph = Graph(label=graph)
-        elif isinstance(graph, dict):
-            self._graph = Graph(**kwargs)
-        elif graph:
-            self._graph = graph
-
-
-class MemberNode(Node):
-    """Function signature: fcn(master, node, inputs, outputs)"""
-
-    _obj = None
-
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-
-    def _eval(self):
-        self._being_evaluated = True
-        ret = self._fcn(self._obj, self, self.inputs, self.outputs)
-        self._being_evaluated = False
-        return ret
-
-    @property
-    def obj(self):
-        return self._obj
-
-    @obj.setter
-    def obj(self, obj):
-        self._obj = obj
-
-    def _stash_fcn(self):
-        prev_fcn = self._fcn
-        self._fcn_chain.append(prev_fcn)
-        return lambda node, inputs, outputs: prev_fcn(
-            node._obj, node, inputs, outputs
-        )
-
-    def _make_wrap(self, prev_fcn, wrap_fcn):
-        def wrapped_fcn(master, node, inputs, outputs):
-            wrap_fcn(prev_fcn, node, inputs, outputs)
-
-        return wrapped_fcn
-
-
-class StaticMemberNode(Node):
-    """Function signature: fcn(self)"""
-
-    _obj = None
-    _touch_inputs = True
-
-    def __init__(self, *args, **kwargs):
-        self._touch_inputs = kwargs.pop("touch_inputs", True)
-        super().__init__(*args, **kwargs)
-
-    def _eval(self):
-        self._being_evaluated = True
-        if self._touch_inputs:
-            self.inputs.touch()
-        ret = self._fcn(self._obj)
-        self._being_evaluated = False
-        return ret
-
-    @property
-    def obj(self):
-        return self._obj
-
-    @obj.setter
-    def obj(self, obj):
-        self._obj = obj
-
-    def _stash_fcn(self):
-        prev_fcn = self._fcn
-        self._fcn_chain.append(prev_fcn)
-        return lambda node, inputs, outputs: prev_fcn(node._obj)
-
-    def _make_wrap(self, prev_fcn, wrap_fcn):
-        def wrapped_fcn(master):
-            wrap_fcn(prev_fcn, self, self.inputs, self.outputs)
-
-        return wrapped_fcn
diff --git a/subtrees/dagflow/dagflow/node.py b/subtrees/dagflow/dagflow/node.py
deleted file mode 100644
index bc684028a55e834457c147ae458047d08a9009da..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/node.py
+++ /dev/null
@@ -1,561 +0,0 @@
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-from .exception import (
-    AllocationError,
-    ClosedGraphError,
-    ClosingError,
-    CriticalError,
-    DagflowError,
-    InitializationError,
-    OpeningError,
-    ReconnectionError,
-    UnclosedGraphError,
-)
-from .input import Input
-from .iter import IsIterable
-from .legs import Legs
-from .logger import Logger, get_logger
-from .output import Output
-from .types import GraphT
-from typing import Optional, List, Dict, Union, Callable, Any, Tuple, Generator
-
-class Node(Legs):
-    _name: str
-    _mark: Optional[str] = None
-    _labels: Dict[str, str]
-    _graph: Optional[GraphT] = None
-    _fcn: Optional[Callable] = None
-    _fcn_chain = None
-    _exception: Optional[str] = None
-
-    # Taintflag and status
-    _tainted: bool = True
-    _frozen: bool = False
-    _frozen_tainted: bool = False
-    _invalid: bool = False
-    _closed: bool = False
-    _allocated: bool = False
-    _being_evaluated: bool = False
-
-    _types_tainted: bool = True
-
-    # Options
-    _debug: bool = False
-    _auto_freeze: bool = False
-    _immediate: bool = False
-    # _always_tainted: bool = False
-
-    def __init__(
-        self,
-        name,
-        *,
-        label: Union[str, dict, None] = None,
-        graph: Optional[GraphT] = None,
-        fcn: Optional[Callable] = None,
-        typefunc: Optional[Callable] = None,
-        debug: Optional[bool] = None,
-        logger: Optional[Any] = None,
-        missing_input_handler: Optional[Callable] = None,
-        immediate: bool = False,
-        auto_freeze: bool = False,
-        frozen: bool = False,
-        **kwargs,
-    ):
-        super().__init__(missing_input_handler=missing_input_handler)
-        self._name = name
-        if fcn is not None:
-            self._fcn = fcn
-        if typefunc is not None:
-            self._typefunc = typefunc
-        elif typefunc is False:
-            self._typefunc = lambda: None
-
-        self._fcn_chain = []
-        if graph is None:
-            from .graph import Graph
-
-            self.graph = Graph.current()
-        else:
-            self.graph = graph
-
-        if debug is None and self.graph is not None:
-            self._debug = self.graph.debug
-        else:
-            self._debug = bool(debug)
-
-        if isinstance(label, str):
-            self._labels = {'text': label}
-        elif isinstance(label, dict):
-            self._labels = label
-        else:
-            self._labels = {'text': name}
-
-        if logger is not None:
-            self._logger = logger
-        elif self.graph is not None:
-            self._logger = self.graph.logger
-        else:
-            self._logger = get_logger()
-
-        self._immediate = immediate
-        self._auto_freeze = auto_freeze
-        self._frozen = frozen
-
-        if kwargs:
-            raise InitializationError(f"Unparsed arguments: {kwargs}!")
-
-    def __str__(self):
-        return f"{{{self.name}}} {super().__str__()}"
-
-    #
-    # Properties
-    #
-    @property
-    def name(self):
-        return self._name
-
-    @name.setter
-    def name(self, name):
-        self._name = name
-
-    @property
-    def mark(self):
-        return self._mark
-
-    @property
-    def exception(self):
-        return self._exception
-
-    @property
-    def logger(self) -> Logger:
-        return self._logger
-
-    @property
-    def tainted(self) -> bool:
-        return self._tainted
-
-    @property
-    def types_tainted(self) -> bool:
-        return self._types_tainted
-
-    @property
-    def frozen_tainted(self) -> bool:
-        return self._frozen_tainted
-
-    @property
-    def frozen(self) -> bool:
-        return self._frozen
-
-    @property
-    def auto_freeze(self) -> bool:
-        return self._auto_freeze
-
-    # @property
-    # def always_tainted(self) -> bool:
-    # return self._always_tainted
-
-    @property
-    def closed(self) -> bool:
-        return self._closed
-
-    @property
-    def debug(self) -> bool:
-        return self._debug
-
-    @property
-    def being_evaluated(self) -> bool:
-        return self._being_evaluated
-
-    @property
-    def allocated(self) -> bool:
-        return self._allocated
-
-    @property
-    def immediate(self) -> bool:
-        return self._immediate
-
-    @property
-    def invalid(self) -> bool:
-        return self._invalid
-
-    @invalid.setter
-    def invalid(self, invalid) -> None:
-        if invalid:
-            self.invalidate_self()
-        elif any(input.invalid for input in self.inputs.iter_all()):
-            return
-        else:
-            self.invalidate_self(False)
-        for output in self.outputs:
-            output.invalid = invalid
-
-    def invalidate_self(self, invalid=True) -> None:
-        self._invalid = bool(invalid)
-        self._frozen_tainted = False
-        self._frozen = False
-        self._tainted = True
-
-    def invalidate_children(self) -> None:
-        for output in self.outputs:
-            output.invalid = True
-
-    def invalidate_parents(self) -> None:
-        for input in self.inputs.iter_all():
-            node = input.parent_node
-            node.invalidate_self()
-            node.invalidate_parents()
-
-    @property
-    def graph(self):
-        return self._graph
-
-    @graph.setter
-    def graph(self, graph):
-        if graph is None:
-            return
-        if self._graph is not None:
-            raise DagflowError("Graph is already defined")
-        self._graph = graph
-        self._graph.register_node(self)
-
-    @property
-    def labels(self) -> Generator[Tuple[str,str], None, None]:
-        yield from self._labels.items()
-
-    #
-    # Methods
-    #
-    def __call__(self, name, child_output: Optional[Output] = None, **kwargs):
-        self.logger.debug(f"Node '{self.name}': Get input '{name}'")
-        kwargs.setdefault("positional", False)
-        inp = self.inputs.get(name, None)
-        if inp is None:
-            if self.closed:
-                raise ClosedGraphError(node=self)
-            return self._add_input(name, child_output=child_output, **kwargs)
-        elif inp.connected and (output := inp.parent_output):
-            raise ReconnectionError(input=inp, node=self, output=output)
-        return inp
-
-    def label(self, source='text'):
-        # if self._labels:
-        #     kwargs.setdefault("name", self._name)
-        #     return self._labels.format(*args, **kwargs)
-        label = self._labels.get(source, None)
-        if label is None:
-            return self._labels['text']
-
-        return label
-
-    def _inherit_labels(self, source: 'Node', fmt: Union[str, Callable]) -> None:
-        if isinstance(fmt, str):
-            formatter = fmt.format
-        elif isinstance(fmt, dict):
-            formatter = lambda s: fmt.get(s, s)
-        else:
-            formatter = fmt
-
-        for k, v in source.labels:
-            if k in ('key',):
-                continue
-            newv = formatter(v)
-            if newv is not None:
-                self._labels[k] = newv
-
-    def add_input(self, name, **kwargs) -> Union[Input, Tuple[Input]]:
-        if not self.closed:
-            return self._add_input(name, **kwargs)
-        raise ClosedGraphError(node=self)
-
-    def _add_input(self, name, **kwargs) -> Union[Input, Tuple[Input]]:
-        if IsIterable(name):
-            return tuple(self._add_input(n, **kwargs) for n in name)
-        self.logger.debug(f"Node '{self.name}': Add input '{name}'")
-        if name in self.inputs:
-            raise ReconnectionError(input=name, node=self)
-        positional = kwargs.pop("positional", True)
-        keyword = kwargs.pop("keyword", True)
-        inp = Input(name, self, **kwargs)
-        self.inputs.add(inp, positional=positional, keyword=keyword)
-
-        if self._graph:
-            self._graph._add_input(inp)
-        return inp
-
-    def add_output(self, name, **kwargs) -> Union[Output, Tuple[Output]]:
-        if not self.closed:
-            return self._add_output(name, **kwargs)
-        raise ClosedGraphError(node=self)
-
-    def _add_output(
-        self, name, *, keyword: bool = True, positional: bool = True, **kwargs
-    ) -> Union[Output, Tuple[Output]]:
-        if IsIterable(name):
-            return tuple(self._add_output(n, **kwargs) for n in name)
-        self.logger.debug(f"Node '{self.name}': Add output '{name}'")
-        if isinstance(name, Output):
-            if name.name in self.outputs or name.node:
-                raise ReconnectionError(output=name, node=self)
-            name._node = self
-            return self.__add_output(
-                name, positional=positional, keyword=keyword
-            )
-        if name in self.outputs:
-            raise ReconnectionError(output=name, node=self)
-
-        return self.__add_output(
-            Output(name, self, **kwargs),
-            positional=positional,
-            keyword=keyword,
-        )
-
-    def __add_output(
-        self, out, positional: bool = True, keyword: bool = True
-    ) -> Union[Output, Tuple[Output]]:
-        self.outputs.add(out, positional=positional, keyword=keyword)
-        if self._graph:
-            self._graph._add_output(out)
-        return out
-
-    def add_pair(
-        self, iname: str, oname: str, **kwargs
-    ) -> Tuple[Input, Output]:
-        if not self.closed:
-            return self._add_pair(iname, oname, **kwargs)
-        raise ClosedGraphError(node=self)
-
-    def _add_pair(
-        self,
-        iname: str,
-        oname: str,
-        input_kws: Optional[dict] = None,
-        output_kws: Optional[dict] = None,
-    ) -> Tuple[Input, Output]:
-        input_kws = input_kws or {}
-        output_kws = output_kws or {}
-        output = self._add_output(oname, **output_kws)
-        input = self._add_input(iname, child_output=output, **input_kws)
-        return input, output
-
-    def _wrap_fcn(self, wrap_fcn, *other_fcns):
-        prev_fcn = self._stash_fcn()
-        self._fcn = self._make_wrap(prev_fcn, wrap_fcn)
-        if other_fcns:
-            self._wrap_fcn(*other_fcns)
-
-    def _unwrap_fcn(self):
-        if not self._fcn_chain:
-            raise DagflowError("Unable to unwrap bare function")
-        self._fcn = self._fcn_chain.pop()
-
-    def _stash_fcn(self):
-        raise DagflowError(
-            "Unimplemented method: use FunctionNode, StaticNode or MemberNode"
-        )
-
-    def _make_wrap(self, prev_fcn, wrap_fcn):
-        raise DagflowError(
-            "Unimplemented method: use FunctionNode, StaticNode or MemberNode"
-        )
-
-    def touch(self, force=False):
-        if self._frozen:
-            return
-        if not self._tainted and not force:
-            return
-        self.logger.debug(f"Node '{self.name}': Touch")
-        ret = self.eval()
-        self._tainted = False  # self._always_tainted
-        if self._auto_freeze:
-            self._frozen = True
-        return ret
-
-    def _eval(self):
-        raise CriticalError(
-            "Unimplemented method: use FunctionNode, StaticNode or MemberNode"
-        )
-
-    def eval(self):
-        if not self._closed:
-            raise UnclosedGraphError("Cannot evaluate the node!", node=self)
-        self._being_evaluated = True
-        try:
-            ret = self._eval()
-            self.logger.debug(f"Node '{self.name}': Evaluated return={ret}")
-        except Exception as exc:
-            raise exc
-        self._being_evaluated = False
-        return ret
-
-    def freeze(self):
-        if self._frozen:
-            return
-        self.logger.debug(f"Node '{self.name}': Freeze")
-        if self._tainted:
-            raise CriticalError("Unable to freeze tainted node!", node=self)
-        self._frozen = True
-        self._frozen_tainted = False
-
-    def unfreeze(self, force: bool = False):
-        if not self._frozen and not force:
-            return
-        self.logger.debug(f"Node '{self.name}': Unfreeze")
-        self._frozen = False
-        if self._frozen_tainted:
-            self._frozen_tainted = False
-            self.taint(force=True)
-
-    def taint(self, *, caller: Optional[Input] = None, force: bool = False):
-        self.logger.debug(f"Node '{self.name}': Taint...")
-        if self._tainted and not force:
-            return
-        if self._frozen:
-            self._frozen_tainted = True
-            return
-        self._tainted = True
-        self._on_taint(caller)
-        ret = self.touch() if self._immediate else None
-        self.taint_children(force=force)
-        return ret
-
-    def taint_children(self, **kwargs):
-        for output in self.outputs:
-            output.taint_children(**kwargs)
-
-    def taint_type(self, force: bool = False):
-        self.logger.debug(f"Node '{self.name}': Taint types...")
-        if self._closed:
-            raise ClosedGraphError("Unable to taint type", node=self)
-        if self._type_tainted and not force:
-            return
-        self._type_tainted = True
-        self._tainted = True
-        self._frozen = False
-        for output in self.outputs:
-            output.taint_children_type(force)
-
-    def print(self):
-        print(
-            f"Node {self._name}: →[{len(self.inputs)}],[{len(self.outputs)}]→"
-        )
-        for i, input in enumerate(self.inputs):
-            print("  ", i, input)
-        for i, output in enumerate(self.outputs):
-            print("  ", i, output)
-
-    def _typefunc(self) -> bool:
-        """A output takes this function to determine the dtype and shape"""
-        raise DagflowError(
-            "Unimplemented method: the method must be overridden!"
-        )
-
-    def _fcn(self, _, inputs, outputs):
-        pass
-
-    def _on_taint(self, caller: Input):
-        """A node method to be called on taint"""
-        pass
-
-    def _post_allocate(self):
-        pass
-
-    def update_types(self, recursive: bool = True) -> bool:
-        if not self._types_tainted:
-            return True
-        if recursive:
-            self.logger.debug(
-                f"Node '{self.name}': Trigger recursive update types..."
-            )
-            for input in self.inputs.iter_all():
-                input.parent_node.update_types(recursive)
-        self.logger.debug(f"Node '{self.name}': Update types...")
-        self._typefunc()
-        self._types_tainted = False
-
-    def allocate(self, recursive: bool = True):
-        if self._allocated:
-            return True
-        if recursive:
-            self.logger.debug(
-                f"Node '{self.name}': Trigger recursive memory allocation..."
-            )
-            if not all(
-                input.parent_node.allocate(recursive)
-                for input in self.inputs.iter_all()
-            ):
-                return False
-        self.logger.debug(f"Node '{self.name}': Allocate memory on inputs")
-        if not self.inputs.allocate():
-            raise AllocationError(
-                "Cannot allocate memory for inputs!", node=self
-            )
-        self.logger.debug(f"Node '{self.name}': Allocate memory on outputs")
-        if not self.outputs.allocate():
-            raise AllocationError(
-                "Cannot allocate memory for outputs!", node=self
-            )
-        self.logger.debug(f"Node '{self.name}': Post allocate")
-        self._post_allocate()
-        self._allocated = True
-        return True
-
-    def close(
-        self, recursive: bool = True, together: List["Node"] = []
-    ) -> bool:
-        # Caution: `together` list should not be written in!
-
-        if self._closed:
-            return True
-        if self.invalid:
-            raise ClosingError("Cannot close an invalid node!", node=self)
-        self.logger.debug(f"Node '{self.name}': Trigger recursive close")
-        for node in [self] + together:
-            node.update_types(recursive=recursive)
-        for node in [self] + together:
-            node.allocate(recursive=recursive)
-        if recursive and not all(
-            input.parent_node.close(recursive)
-            for input in self.inputs.iter_all()
-        ):
-            return False
-        for node in together:
-            if not node.close(recursive=recursive):
-                return False
-        self._closed = self._allocated
-        if not self._closed:
-            raise ClosingError(node=self)
-        self.logger.debug(f"Node '{self.name}': Closed")
-        return self._closed
-
-    def open(self, force: bool = False) -> bool:
-        if not self._closed and not force:
-            return True
-        self.logger.debug(f"Node '{self.name}': Open")
-        if not all(
-            input.node.open(force)
-            for output in self.outputs
-            for input in output.child_inputs
-        ):
-            raise OpeningError(node=self)
-        self.unfreeze()
-        self.taint()
-        self._closed = False
-        return not self._closed
-
-    #
-    # Accessors
-    #
-    def get_data(self, key=0):
-        return self.outputs[key].data
-
-    def get_input_data(self, key):
-        return self.inputs[key].data()
-
-    def to_dict(self, *, label_from: str='text') -> dict:
-        data = self.get_data()
-        if data.size>1:
-            raise AttributeError('to_dict')
-        return {
-                'value': data[0],
-                'label': self.label(label_from)
-                }
diff --git a/subtrees/dagflow/dagflow/node_group.py b/subtrees/dagflow/dagflow/node_group.py
deleted file mode 100644
index 1993bf983865db6a59d50f8ed3ce9530ce950f7e..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/node_group.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from .shift import lshift
-
-
-class NodeGroup:
-    _nodes: list = None
-
-    def __init__(self, *args):
-        self._nodes = list(args)
-
-    def register_node(self, node):
-        self._nodes.append(node)
-
-    def _wrap_fcns(self, *args):
-        for node in self._nodes:
-            node._wrap_fcn(*args)
-
-    def _unwrap_fcns(self):
-        for node in self._nodes:
-            node._unwrap_fcn()
-
-    def print(self):
-        print(f"Group of {len(self._nodes)} nodes:")
-        for node in self._nodes:
-            node.print()
-
-    def __lshift__(self, other):
-        """
-        self << other
-        """
-        return lshift(self, other)
-
-    def __rrshift__(self, other):
-        """
-        other >> self
-        """
-        return lshift(self, other)
-
-    def __iter__(self):
-        """
-        iterate inputs
-
-        To be used with >>/<< operators which take only disconnected inputs
-        """
-        return iter(self._nodes)
diff --git a/subtrees/dagflow/dagflow/nodes.py b/subtrees/dagflow/dagflow/nodes.py
deleted file mode 100644
index 42b4dce1a6489691c1057aec5bb3903c8ea20643..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/nodes.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from .node import Node
-
-
-class FunctionNode(Node):
-    """Function signature: fcn(node, inputs, outputs)
-
-    Note: _fcn should be a static function with signature (node, inputs, outputs)
-
-    - Function defined as instance property will become a static method:
-        class Node(...):
-            def __init__(self):
-                self._fcn = ...
-        node = Node()
-        node.fcn() # will have NO self provided as first argument
-
-    - Fucntion defined in a nested class with staticmethod:
-        class Other(Node
-            @staticmethod
-            def _fcn():
-                ...
-
-        node = Node()
-        node.fcn() # will have NO self provided as first argument
-
-    - [deprecated] Function defined as class property will become a bound method:
-        class Node(...):
-            _fcn = ...
-        node = Node()
-        node.fcn() # will have self provided as first argument
-
-    - [deprecated] Function defined via staticmethod decorator as class property will become a static method:
-        class Node(...):
-            _fcn = staticmethod(...)
-        node = Node()
-        node.fcn() # will have NO self provided as first argument
-    """
-
-    fcn = None
-
-    def __init__(self, name, **kwargs):
-        super().__init__(name, **kwargs)
-        if self.fcn is None:
-            self._functions = {"default": self._fcn}
-            self.fcn = self._functions["default"]
-        else:
-            self._functions = {"default": self.fcn}
-
-    def _stash_fcn(self):
-        self._fcn_chain.append(self.fcn)
-        return self.fcn
-
-    def _make_wrap(self, prev_fcn, wrap_fcn):
-        def wrapped_fcn(node, inputs, outputs):
-            wrap_fcn(prev_fcn, node, inputs, outputs)
-
-        return wrapped_fcn
-
-    def _eval(self):
-        return self.fcn(self, self.inputs, self.outputs)
-
-
-class StaticNode(Node):
-    """Function signature: fcn()"""
-
-    _touch_inputs = True
-
-    def __init__(self, *args, **kwargs):
-        self._touch_inputs = kwargs.pop("touch_inputs", True)
-        super().__init__(*args, **kwargs)
-
-    def _eval(self):
-        self._being_evaluated = True
-        if self._touch_inputs:
-            self.inputs.touch()
-        ret = self._fcn()
-        self._being_evaluated = False
-        return ret
-
-    def _stash_fcn(self):
-        prev_fcn = self._fcn
-        self._fcn_chain.append(prev_fcn)
-        return lambda node, inputs, outputs: prev_fcn()
-
-    def _make_wrap(self, prev_fcn, wrap_fcn):
-        def wrapped_fcn():
-            wrap_fcn(prev_fcn, self, self.inputs, self.outputs)
-
-        return wrapped_fcn
diff --git a/subtrees/dagflow/dagflow/output.py b/subtrees/dagflow/dagflow/output.py
deleted file mode 100644
index 5f98ed7e2fce9add4388a08138880171de3c9bb6..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/output.py
+++ /dev/null
@@ -1,379 +0,0 @@
-from itertools import cycle
-from typing import List, Optional, Tuple
-
-from numpy import zeros
-from numpy.typing import ArrayLike, DTypeLike, NDArray
-
-from .edges import EdgeContainer
-from .exception import (
-    ClosedGraphError,
-    CriticalError,
-    InitializationError,
-    AllocationError,
-    ConnectionError,
-    UnclosedGraphError,
-)
-from .shift import lshift, rshift
-from .iter import StopNesting
-from .types import EdgesLike, InputT, NodeT, ShapeLike
-from .datadescriptor import DataDescriptor
-
-
-class Output:
-    _data: Optional[NDArray] = None
-    _dd: DataDescriptor
-
-    _node: Optional[NodeT]
-    _name: Optional[str]
-
-    _child_inputs: List[InputT]
-    _parent_input: Optional[InputT] = None
-    _allocating_input: Optional[InputT] = None
-
-    _allocatable: bool = True
-    _owns_buffer: bool = False
-    _forbid_reallocation: bool = False
-
-    _debug: bool = False
-
-    def __init__(
-        self,
-        name: Optional[str],
-        node: Optional[NodeT],
-        *,
-        debug: Optional[bool] = None,
-        allocatable: Optional[bool] = None,
-        data: Optional[NDArray] = None,
-        owns_buffer: Optional[bool] = None,
-        dtype: DTypeLike = None,
-        shape: Optional[ShapeLike] = None,
-        axes_edges: Optional[Tuple[EdgesLike]] = None,
-        axes_nodes: Optional[Tuple[EdgesLike]] = None,
-        forbid_reallocation: bool = False,
-    ):
-        self._name = name
-        self._node = node
-        self._child_inputs = []
-        self._debug = (
-            debug if debug is not None else node.debug if node else False
-        )
-        self._forbid_reallocation = forbid_reallocation
-
-        self._dd = DataDescriptor(dtype, shape, axes_edges, axes_nodes)
-
-        if data is None:
-            self._allocatable = True if allocatable is None else allocatable
-        else:
-            if owns_buffer is None:
-                owns_buffer = True
-            self._allocatable = not owns_buffer
-            self._set_data(data, owns_buffer=owns_buffer)
-
-            if allocatable or dtype is not None or shape is not None:
-                raise InitializationError(output=self, node=node)
-
-    def __str__(self):
-        return f"●→ {self._name}" if self.owns_buffer else f"○→ {self._name}"
-
-    def __repr__(self):
-        return self.__str__()
-
-    @property
-    def name(self):
-        return self._name
-
-    @name.setter
-    def name(self, name):
-        self._name = name
-
-    @property
-    def allocatable(self):
-        return self._allocatable
-
-    @property
-    def has_data(self) -> bool:
-        return self._data is not None
-
-    @property
-    def node(self):
-        return self._node
-
-    @property
-    def child_inputs(self):
-        return self._child_inputs
-
-    @property
-    def parent_input(self):
-        return self._parent_input
-
-    @parent_input.setter
-    def parent_input(self, input):
-        self._parent_input = input
-
-    @property
-    def logger(self):
-        return self._node.logger
-
-    @property
-    def invalid(self):
-        """Checks the validity of the current node"""
-        return self._node.invalid
-
-    @invalid.setter
-    def invalid(self, invalid):
-        """Sets the validity of the following nodes"""
-        for input in self.child_inputs:
-            input.invalid = invalid
-
-    @property
-    def data(self):
-        if self.node.being_evaluated:
-            return self._data
-        if not self.closed:
-            raise UnclosedGraphError(
-                "Unable to get the output data from unclosed graph!",
-                node=self._node,
-                output=self,
-            )
-        try:
-            self.touch()
-            return self.get_data_unsafe()
-        except Exception as exc:
-            raise CriticalError(
-                "An exception occured during touching of the parent node!",
-                node=self._node,
-                output=self,
-            ) from exc
-
-    def _set_data(
-        self,
-        data,
-        *,
-        owns_buffer: bool,
-        override: bool = False,
-        forbid_reallocation: Optional[bool] = None,
-    ):
-        if self.closed:
-            raise ClosedGraphError(
-                "Unable to set output data.", node=self._node, output=self
-            )
-        if self._data is not None and not override:
-            # TODO: this will fail during reallocation
-            raise AllocationError(
-                "Output already has data.", node=self._node, output=self
-            )
-        if owns_buffer:
-            forbid_reallocation = True
-        elif forbid_reallocation is None:
-            forbid_reallocation = owns_buffer
-
-        forbid_reallocation |= self._forbid_reallocation
-        if forbid_reallocation and self._allocating_input:
-            raise AllocationError(
-                "Output is connected to allocating input, but reallocation is forbidden",
-                node=self._node,
-                output=self,
-            )
-
-        self._data = data
-        self.dd.dtype = data.dtype
-        self.dd.shape = data.shape
-        self._owns_buffer = owns_buffer
-        self._forbid_reallocation = forbid_reallocation
-
-    @property
-    def dd(self) -> Optional[DataDescriptor]:
-        return self._dd
-
-    @property
-    def owns_buffer(self):
-        return self._owns_buffer
-
-    @property
-    def forbid_reallocation(self):
-        return self._forbid_reallocation
-
-    @property
-    def closed(self):
-        return self.node.closed if self.node else False
-
-    @property
-    def tainted(self) -> bool:
-        return self._node.tainted
-
-    @property
-    def debug(self) -> bool:
-        return self._debug
-
-    def get_data_unsafe(self):
-        return self._data
-
-    def connect_to(self, input) -> InputT:
-        if not self.closed and input.closed:
-            raise ConnectionError(
-                "Cannot connect an output to a closed input!",
-                node=self.node,
-                output=self,
-                input=input,
-            )
-        if self.closed and input.allocatable:
-            raise ConnectionError(
-                "Cannot connect a closed output to an allocatable input!",
-                node=self.node,
-                output=self,
-                input=input,
-            )
-        return self._connect_to(input)
-
-    def _connect_to(self, input) -> InputT:
-        if input.allocatable:
-            if self._allocating_input:
-                raise ConnectionError(
-                    "Output has multiple allocatable/allocated child inputs",
-                    node=self._node,
-                    output=self,
-                )
-            if self._forbid_reallocation:
-                raise ConnectionError(
-                    "Output forbids reallocation and may not connect to allocating inputs",
-                    node=self._node,
-                    output=self,
-                )
-            self._allocating_input = input
-        self._child_inputs.append(input)
-        input._set_parent_output(self)
-        return input
-
-    def __rshift__(self, other):
-        return rshift(self, other)
-
-    def __rlshift__(self, other):
-        return lshift(self, other)
-
-    def taint_children(self, **kwargs) -> None:
-        for input in self._child_inputs:
-            input.taint(**kwargs)
-
-    def taint_children_type(self, **kwargs) -> None:
-        for input in self._child_inputs:
-            input.taint_type(**kwargs)
-
-    def touch(self):
-        return self._node.touch()
-
-    def connected(self):
-        return bool(self._child_inputs)
-
-    def deep_iter_outputs(self, disconnected_only=False):
-        if disconnected_only and self.connected():
-            return iter(tuple())
-        raise StopNesting(self)
-
-    def deep_iter_child_outputs(self):
-        raise StopNesting(self)
-
-    def repeat(self):
-        return RepeatedOutput(self)
-
-    def allocate(self, **kwargs):
-        if not self._allocatable:
-            return True
-
-        if self._allocating_input:
-            input = self._allocating_input
-            input.allocate(recursive=False)
-            if input.has_data:
-                idata = input._own_data
-                if idata.shape != self.dd.shape or idata.dtype != self.dd.dtype:
-                    raise AllocationError(
-                        "Input's data shape/type is inconsistent",
-                        node=self._node,
-                        output=self,
-                        input=input,
-                    )
-
-                if self._data is not idata:
-                    if self._data is not None:
-                        idata[:] = self._data
-                    self._set_data(idata, owns_buffer=False, override=True)
-                return True
-
-        if self.has_data:
-            return True
-
-        if self.dd.shape is None or self.dd.dtype is None:
-            raise AllocationError(
-                "No shape/type information provided for the Output",
-                node=self._node,
-                output=self,
-            )
-        try:
-            data = zeros(self.dd.shape, self.dd.dtype, **kwargs)
-            self._set_data(data, owns_buffer=True)
-        except Exception as exc:
-            raise AllocationError(
-                f"Output: {exc.args[0]}", node=self._node, output=self
-            ) from exc
-
-        return True
-
-    def seti(self, idx: int, value: float, check_taint: bool = False, force: bool = False) -> bool:
-        if self.node._frozen and not force:
-            return False
-
-        tainted = True
-        if check_taint:
-            tainted = self._data[udx] != value
-
-        if tainted:
-            self._data[idx] = value
-            self.taint_children()
-            self.node.invalidate_parents()
-            self.node._tainted = False
-
-        return tainted
-
-    def set(
-        self, data: ArrayLike, check_taint: bool = False, force: bool = False
-    ) -> bool:
-        if self.node._frozen and not force:
-            return False
-
-        tainted = True
-        if check_taint:
-            tainted = (self._data != data).any()
-
-        if tainted:
-            self._data[:] = data
-            self.taint_children()
-            self.node.invalidate_parents()
-            self.node._tainted = False
-
-        return tainted
-
-
-class RepeatedOutput:
-    def __init__(self, output):
-        self._output = output
-
-    def __iter__(self):
-        return cycle((self._output,))
-
-    def __rshift__(self, other):
-        return rshift(self, other)
-
-    def __rlshift__(self, other):
-        return lshift(self, other)
-
-
-class Outputs(EdgeContainer):
-    _dtype = Output
-
-    def __init__(self, iterable=None) -> None:
-        super().__init__(iterable)
-
-    def __str__(self) -> str:
-        return f"○[{tuple(obj.name for obj in self)}]→"
-
-    def __repr__(self) -> str:
-        return self.__str__()
diff --git a/subtrees/dagflow/dagflow/parameters.py b/subtrees/dagflow/dagflow/parameters.py
deleted file mode 100644
index e7581d4c4ad04cb996d41f18748b418628eb6a16..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/parameters.py
+++ /dev/null
@@ -1,462 +0,0 @@
-from .node import Node, Output
-from .exception import InitializationError
-from .lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
-from .lib.Cholesky import Cholesky
-from .lib.Array import Array
-from .lib.CovmatrixFromCormatrix import CovmatrixFromCormatrix
-
-from numpy import zeros_like, array
-from numpy.typing import DTypeLike
-from typing import Optional, Dict, List
-
-class Parameter:
-    __slots__ = ('_idx','_parent', '_value_output', '_labelfmt')
-    _parent: Optional['Parameters']
-    _idx: int
-    _value_output: Output
-    _labelfmt: str
-
-    def __init__(
-        self,
-        value_output: Output,
-        idx: int=0,
-        *,
-        parent: 'Parameters',
-        labelfmt: str='{}'
-    ):
-        self._idx = idx
-        self._parent = parent
-        self._value_output = value_output
-        self._labelfmt = labelfmt
-
-    @property
-    def value(self) -> float:
-        return self._value_output.data[self._idx]
-
-    @value.setter
-    def value(self, value: float):
-        return self._value_output.seti(self._idx, value)
-
-    @property
-    def output(self) -> Output:
-        return self._value_output
-
-    def label(self, source: str='text') -> str:
-        return self._labelfmt.format(self._value_output.node.label(source))
-
-    def to_dict(self, *, label_from: str='text') -> dict:
-        return {
-                'value': self.value,
-                'label': self.label(label_from)
-                }
-
-class GaussianParameter(Parameter):
-    __slots__ = ( '_central_output', '_sigma_output', '_normvalue_output')
-    _central_output: Output
-    _sigma_output: Output
-    _normvalue_output: Output
-
-    def __init__(
-        self,
-        value_output: Output,
-        central_output: Output,
-        sigma_output: Output,
-        idx: int=0,
-        *,
-        normvalue_output: Output,
-        **kwargs
-    ):
-        super().__init__(value_output, idx, **kwargs)
-        self._central_output = central_output
-        self._sigma_output = sigma_output
-        self._normvalue_output = normvalue_output
-
-    @property
-    def central(self) -> float:
-        return self._central_output.data[0]
-
-    @central.setter
-    def central(self, central: float):
-        self._central_output.seti(self._idx, central)
-
-    @property
-    def sigma(self) -> float:
-        return self._sigma_output.data[0]
-
-    @sigma.setter
-    def sigma(self, sigma: float):
-        self._sigma_output.seti(self._idx, sigma)
-
-    @property
-    def sigma_relative(self) -> float:
-        return self.sigma/self.value
-
-    @sigma_relative.setter
-    def sigma_relative(self, sigma_relative: float):
-        self.sigma = sigma_relative * self.value
-
-    @property
-    def sigma_percent(self) -> float:
-        return 100.0 * (self.sigma/self.value)
-
-    @sigma_percent.setter
-    def sigma_percent(self, sigma_percent: float):
-        self.sigma = (0.01*sigma_percent) * self.value
-
-    @property
-    def normvalue(self) -> float:
-        return self._normvalue_output.data[0]
-
-    @normvalue.setter
-    def normvalue(self, normvalue: float):
-        self._normvalue_output.seti(self._idx, normvalue)
-
-    def to_dict(self, **kwargs) -> dict:
-        dct = super().to_dict(**kwargs)
-        dct.update({
-            'central': self.central,
-            'sigma': self.sigma,
-            # 'normvalue': self.normvalue,
-            })
-        return dct
-
-class NormalizedGaussianParameter(Parameter):
-    @property
-    def central(self) -> float:
-        return 0.0
-
-    @property
-    def sigma(self) -> float:
-        return 1.0
-
-    def to_dict(self, **kwargs) -> dict:
-        dct = super().to_dict(**kwargs)
-        dct.update({
-            'central': 0.0,
-            'sigma': 1.0,
-            # 'normvalue': self.value,
-            })
-        return dct
-
-class Constraint:
-    __slots__ = ('_pars')
-    _pars: "Parameters"
-
-    def __init__(self, parameters: "Parameters"):
-        self._pars = parameters
-
-class Parameters:
-    __slots__ = (
-        'value',
-        '_value_node',
-        '_pars',
-        '_norm_pars',
-        '_is_variable',
-        '_constraint'
-    )
-    value: Output
-    _value_node: Node
-    _pars: List[Parameter]
-    _norm_pars: List[Parameter]
-
-    _is_variable: bool
-
-    _constraint: Optional[Constraint]
-
-    def __init__(
-        self,
-        value: Node,
-        *,
-        variable: Optional[bool]=None,
-        fixed: Optional[bool]=None,
-        close: bool=True
-    ):
-        self._value_node = value
-        self.value = value.outputs[0]
-
-        if all(f is not None for f in (variable, fixed)):
-            raise RuntimeError("Parameter may not be set to variable and fixed at the same time")
-        if variable is not None:
-            self._is_variable = variable
-        elif fixed is not None:
-            self._is_variable = not fixed
-        else:
-            self._is_variable = True
-
-        self._constraint = None
-
-        self._pars = []
-        self._norm_pars = []
-        if close:
-            self._close()
-
-            for i in range(self.value._data.size):
-                self._pars.append(Parameter(self.value, i, parent=self))
-
-    def _close(self) -> None:
-        self._value_node.close(recursive=True)
-
-    @property
-    def is_variable(self) -> bool:
-        return self._is_variable
-
-    @property
-    def is_fixed(self) -> bool:
-        return not self._is_variable
-
-    @property
-    def is_constrained(self) -> bool:
-        return self._constraint is not None
-
-    @property
-    def is_free(self) -> bool:
-        return self._constraint is None
-
-    @property
-    def parameters(self) -> List:
-        return self._pars
-
-    @property
-    def norm_parameters(self) -> List:
-        return self._norm_pars
-
-    @property
-    def constraint(self) -> Optional[Constraint]:
-        return self._constraint
-
-    def to_dict(self, *, label_from: str='text') -> dict:
-        return {
-                'value': self.value.data[0],
-                'label': self._value_node.label(label_from)
-                }
-
-    def set_constraint(self, constraint: Constraint) -> None:
-        if self._constraint is not None:
-            raise InitializationError("Constraint already set")
-        self._constraint = constraint
-        # constraint._pars = self
-
-    @staticmethod
-    def from_numbers(
-        value: float,
-        *,
-        dtype: DTypeLike='d',
-        variable: Optional[bool]=None,
-        fixed: Optional[bool]=None,
-        label: Optional[Dict[str, str]]=None,
-        **kwargs
-    ) -> 'Parameters':
-        if label is None:
-            label = {'text': 'parameter'}
-        else:
-            label = dict(label)
-        name: str = label.setdefault('name', 'parameter')
-        has_constraint = kwargs.get('sigma', None) is not None
-        pars = Parameters(
-            Array(
-                name,
-                array((value,), dtype=dtype),
-                label = label,
-                mode='store_weak',
-            ),
-            fixed=fixed,
-            variable=variable,
-            close=not has_constraint
-        )
-
-        if has_constraint:
-            pars.set_constraint(
-                GaussianConstraint.from_numbers(
-                    parameters=pars,
-                    dtype=dtype,
-                    **kwargs
-                )
-            )
-            pars._close()
-
-        return pars
-
-class GaussianConstraint(Constraint):
-    __slots__ = (
-        'central', 'sigma', 'normvalue',
-        '_central_node', '_sigma_node', '_normvalue_node',
-        '_cholesky_node', '_covariance_node', '_correlation_node',
-        '_sigma_total_node',
-        '_norm_node',
-        '_is_constrained'
-    )
-    central: Output
-    sigma: Output
-    normvalue: Output
-
-    _central_node: Node
-    _sigma_node: Node
-    _normvalue_node: Node
-
-    _cholesky_node: Optional[Node]
-    _covariance_node: Optional[Node]
-    _correlation_node: Optional[Node]
-    _sigma_total_node: Optional[Node]
-
-    _norm_node: Node
-
-    _is_constrained: bool
-
-    def __init__(
-        self,
-        central: Node,
-        *,
-        parameters: Parameters,
-        sigma: Node=None,
-        covariance: Node=None,
-        correlation: Node=None,
-        constrained: Optional[bool]=None,
-        free: Optional[bool]=None,
-        **_
-    ):
-        super().__init__(parameters=parameters)
-        self._central_node = central
-
-        self._cholesky_node = None
-        self._covariance_node = None
-        self._correlation_node = None
-        self._sigma_total_node = None
-
-        if all(f is not None for f in (constrained, free)):
-            raise RuntimeError("GaussianConstraint may not be set to constrained and free at the same time")
-        if constrained is not None:
-            self._is_constrained = constrained
-        elif free is not None:
-            self._is_constrained = not free
-        else:
-            self._is_constrained = True
-
-        if sigma is not None and covariance is not None:
-            raise InitializationError('GaussianConstraint: got both "sigma" and "covariance" as arguments')
-        if correlation is not None and sigma is None:
-            raise InitializationError('GaussianConstraint: got "correlation", but no "sigma" as arguments')
-
-        value_node = parameters._value_node
-        if correlation is not None:
-            self._correlation_node = correlation
-            self._covariance_node = CovmatrixFromCormatrix(f"V({value_node.name})")
-            self._cholesky_node = Cholesky(f"L({value_node.name})")
-            self._sigma_total_node = sigma
-            self._sigma_node = self._cholesky_node
-
-            self._sigma_total_node >> self._covariance_node.inputs['sigma']
-            correlation >> self._covariance_node
-            self._covariance_node >> self._cholesky_node
-        elif sigma is not None:
-            self._sigma_node = sigma
-        elif covariance is not None:
-            self._cholesky_node = Cholesky(f"L({value_node.name})")
-            self._sigma_node = self._cholesky_node
-            self._covariance_node = covariance
-
-            covariance >> self._cholesky_node
-        else:
-            # TODO: no sigma/covariance AND central means normalized=value?
-            raise InitializationError('GaussianConstraint: got no "sigma" and no "covariance" arguments')
-
-        self.central = self._central_node.outputs[0]
-        self.sigma = self._sigma_node.outputs[0]
-
-        self._normvalue_node = Array(
-            f'Normalized {value_node.name}',
-            zeros_like(self.central._data),
-            mark = f'norm({value_node.mark})',
-            mode='store_weak'
-        )
-        self._normvalue_node._inherit_labels(self._pars._value_node, fmt='Normalized {}')
-        self.normvalue = self._normvalue_node.outputs[0]
-
-        self._norm_node = NormalizeCorrelatedVars2(f"Normalize {value_node.name}", immediate=True)
-        self.central >> self._norm_node.inputs['central']
-        self.sigma >> self._norm_node.inputs['matrix']
-
-        (parameters.value, self.normvalue) >> self._norm_node
-
-        self._norm_node.close(recursive=True)
-        self._norm_node.touch()
-
-        value_output = self._pars.value
-        for i in range(value_output._data.size):
-            self._pars._pars.append(
-                GaussianParameter(
-                    value_output,
-                    self.central,
-                    self.sigma,
-                    i,
-                    normvalue_output=self.normvalue,
-                    parent=self
-                )
-            )
-            self._pars._norm_pars.append(
-                NormalizedGaussianParameter(
-                    self.normvalue,
-                    i,
-                    parent=self,
-                    labelfmt='[norm] {}'
-                )
-            )
-
-    @property
-    def is_constrained(self) -> bool:
-        return self._is_constrained
-
-    @property
-    def is_free(self) -> bool:
-        return not self._is_constrained
-
-    @property
-    def is_correlated(self) -> bool:
-        return not self._covariance_node is not None
-
-    @staticmethod
-    def from_numbers(
-        *,
-        central: float,
-        sigma: float,
-        label: Optional[Dict[str,str]]=None,
-        dtype: DTypeLike='d',
-        **kwargs
-    ) -> 'GaussianParameters':
-        if label is None:
-            label = {'text': 'gaussian parameter'}
-        else:
-            label = dict(label)
-        name = label.setdefault('name', 'parameter')
-
-        node_central = Array(
-            f'{name}_central',
-            array((central,), dtype=dtype),
-            label = {k: f'central: {v}' for k,v in label.items()},
-            mode='store_weak'
-        )
-
-        node_sigma = Array(
-            f'{name}_sigma',
-            array((sigma,), dtype=dtype),
-            label = {k: f'sigma: {v}' for k,v in label.items()},
-            mode='store_weak'
-        )
-
-        return GaussianConstraint(central=node_central, sigma=node_sigma, **kwargs)
-
-    def to_dict(self, **kwargs) -> dict:
-        dct = super().to_dict(**kwargs)
-        dct.update({
-            'central': self.central.data[0],
-            'sigma': self.sigma.data[0],
-            # 'normvalue': self.normvalue.data[0],
-            })
-        return dct
-
-def GaussianParameters(value: Node, *args, **kwargs) -> Parameters:
-    pars = Parameters(value, close=False)
-    pars.set_constraint(GaussianConstraint(*args, parameters=pars, **kwargs))
-    pars._close()
-
-    return pars
-
diff --git a/subtrees/dagflow/dagflow/printl.py b/subtrees/dagflow/dagflow/printl.py
deleted file mode 100644
index 459eb59e9c4b045facb4735b9f2eb451681292a3..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/printl.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# -*- coding: utf-8 -*-
-
-
-printlevel = 0
-singlemargin = "    "
-marginflag = False
-prefix_function = lambda: ""
-
-
-def set_prefix_function(f):
-    global prefix_function
-    prefix_function = f
-
-
-class next_level:
-    def __enter__(self):
-        global printlevel
-        printlevel += 1
-
-    def __exit__(self, *args, **kwargs):
-        global printlevel
-        printlevel -= 1
-
-
-def current_level():
-    return printlevel
-
-
-def print_margin(kwargs):
-    global marginflag
-    prefix = kwargs.pop("prefix", prefix_function())
-    postfix = kwargs.pop("postfix", None)
-    prefixopts = kwargs.pop("prefixopts", dict(end=""))
-    postfixopts = kwargs.pop("postfixopts", dict(end=" "))
-    if marginflag:
-        return
-
-    if prefix:
-        print(*prefix, **prefixopts)
-
-    print(singlemargin * printlevel, sep="", end="")
-
-    if postfix:
-        print(*postfix, **postfixopts)
-
-    marginflag = True
-
-
-def reset_margin_flag(*args, **kwargs):
-    global marginflag
-
-    for arg in args + (kwargs.pop("sep", ""), kwargs.pop("end", "\n")):
-        if "\n" in str(arg):
-            marginflag = False
-            return
-
-
-def printl(*args, **kwargs):
-    print_margin(kwargs)
-    print(*args, **kwargs)
-    reset_margin_flag(*args, **kwargs)
diff --git a/subtrees/dagflow/dagflow/shift.py b/subtrees/dagflow/dagflow/shift.py
deleted file mode 100644
index 693c422063d1bc2ff4447d428316a454edf2aeba..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/shift.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from itertools import zip_longest
-
-from .exception import ConnectionError
-from .iterators import iter_child_outputs, iter_inputs, iter_outputs
-
-_rshift_scope_id = 0
-
-
-def rshift_scope_id():
-    global _rshift_scope_id
-    ret = _rshift_scope_id
-    _rshift_scope_id += 1
-    return ret
-
-
-def rshift(outputs, inputs):
-    """`>>` operator"""
-    scope_id = rshift_scope_id()
-
-    for output, inp in zip_longest(
-        iter_outputs(outputs),
-        iter_inputs(inputs, True),
-        fillvalue=None,
-    ):
-        if not output:
-            raise ConnectionError("Unable to connect mismatching lists!")
-        if isinstance(output, dict):
-            if inp:
-                raise ConnectionError(
-                    f"Cannot perform a binding from dict={output} due to "
-                    f"non-empty input={inp}!"
-                )
-            for key, val in output.items():
-                val >> inputs(key)
-            continue
-        if not inp:
-            missing_input_handler = getattr(
-                inputs, "_missing_input_handler", lambda *args, **kwargs: None
-            )
-            if not (inp := missing_input_handler(scope=scope_id)):
-                break
-        output.connect_to(inp)
-
-    child_outputs = tuple(iter_child_outputs(inputs))
-    return child_outputs[0] if len(child_outputs) == 1 else child_outputs
-
-
-def lshift(inputs, outputs):
-    """`<<` operator"""
-    return rshift(outputs, inputs)
diff --git a/subtrees/dagflow/dagflow/tools/__init__.py b/subtrees/dagflow/dagflow/tools/__init__.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/subtrees/dagflow/dagflow/tools/schema.py b/subtrees/dagflow/dagflow/tools/schema.py
deleted file mode 100644
index 598f586333b198088e8f4f7d2bfc228e1ba2dffb..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/tools/schema.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from ..logger import logger, SUBINFO
-from typing import Any, Union
-from schema import Schema, Schema, SchemaError
-from contextlib import suppress
-
-from os import access, R_OK
-from typing import Callable
-
-def IsReadable(filename: str):
-    """Returns True if the file is readable"""
-    return access(filename, R_OK)
-
-def IsFilewithExt(*exts: str):
-    """Returns a function that retunts True if the file extension is consistent"""
-    def checkfilename(filename: str):
-        return any(filename.endswith(f'.{ext}' for ext in exts))
-    return checkfilename
-
-def LoadFileWithExt(*, key: Union[str, dict]=None, update: bool=False, **kwargs: Callable):
-    """Returns a function that retunts True if the file extension is consistent"""
-    def checkfilename(filename: Union[str, dict]):
-        if key is not None:
-            dct = filename.copy()
-            filename = dct.pop(key)
-        else:
-            dct = None
-        for ext, loader in kwargs.items():
-            if filename.endswith(f'.{ext}'):
-                logger.log(SUBINFO, f'Read: {filename}')
-                ret = loader(filename)
-
-                if update and dct is not None:
-                    ret.update(dct)
-
-                return ret
-
-            return False
-    return checkfilename
-
-from yaml import load, Loader
-def LoadYaml(fname: str):
-    with open(fname, 'r') as file:
-        return load(file, Loader)
-
-from multikeydict.nestedmkdict import NestedMKDict
-class NestedSchema(object):
-    __slots__ = ('_schema', '_processdicts')
-    _schema: Union[Schema,object]
-    _processdicts: bool
-
-    def __init__(self, /, schema: Union[Schema,object], *, processdicts: bool=False):
-        self._schema = schema
-        self._processdicts = processdicts
-
-    def validate(self, data: Any) -> Any:
-        if not isinstance(data, dict):
-            return self._schema.validate(data)
-
-        if self._processdicts:
-            return {
-                key: self._process_dict((key,), subdata) for key, subdata in data.items()
-            }
-
-        dtin = NestedMKDict(data)
-        dtout = NestedMKDict({})
-        for key, subdata in dtin.walkitems():
-            dtout[key] = self._process_element(key, subdata)
-
-        return dtout.object
-
-    def _process_element(self, key, subdata: Any) -> Any:
-        try:
-            return self._schema.validate(subdata, _is_event_schema=False)
-        except SchemaError as err:
-            key = ".".join(str(k) for k in key)
-            raise SchemaError(f'Key "{key}" has invalid value "{subdata}":\n{err.args[0]}') from err
-
-    def _process_dict(self, key, data: Any) -> Any:
-        if not isinstance(data, dict):
-            return self._schema.validate(data)
-
-        with suppress(SchemaError):
-            return self._schema.validate(data, _is_event_schema=False)
-
-        return {
-            subkey: self._process_dict(key+(subkey,), subdata) for subkey, subdata in data.items()
-        }
diff --git a/subtrees/dagflow/dagflow/typefunctions.py b/subtrees/dagflow/dagflow/typefunctions.py
deleted file mode 100644
index 89a08e3de8d50390b0f8bc250a08b32428f82239..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/typefunctions.py
+++ /dev/null
@@ -1,503 +0,0 @@
-from collections.abc import Sequence
-from itertools import repeat
-from typing import Optional, Tuple, Union
-
-from numpy import issubdtype, result_type
-from numpy.typing import DTypeLike
-
-from .exception import TypeFunctionError
-from .input import Input
-from .output import Output
-from .types import NodeT
-
-AllPositionals = slice(None)
-
-try:
-    zip((), (), strict=True)
-except TypeError:
-    # provide a replacement of strict zip from Python 3.1
-    # to be deprecated at some point
-    from itertools import zip_longest
-
-    def zip(*iterables, strict: bool = False):
-        sentinel = object()
-        for combo in zip_longest(*iterables, fillvalue=sentinel):
-            if strict and sentinel in combo:
-                raise ValueError("Iterables have different lengths")
-            yield combo
-
-
-class MethodSequenceCaller:
-    """Class to call a sequence of methods"""
-
-    methods: list
-
-    def __init__(self) -> None:
-        self.methods = []
-
-    def __call__(self, inputs, outputs):
-        for method in self.methods:
-            method(inputs, outputs)
-
-
-def cpy_dtype(input, output):
-    output.dd.dtype = input.dd.dtype
-
-
-def cpy_shape(input, output):
-    output.dd.shape = input.dd.shape
-
-
-def cpy_edges(input, output):
-    output.dd.axes_edges = input.dd.axes_edges
-
-
-def cpy_nodes(input, output):
-    output.dd.axes_nodes = input.dd.axes_nodes
-
-
-def check_has_inputs(
-    node: NodeT, inputkey: Union[str, int, slice, Sequence, None] = None
-) -> None:
-    """Checking if the node has inputs"""
-    if inputkey is None or inputkey == AllPositionals:
-        try:
-            node.inputs[0]
-        except Exception as exc:
-            raise TypeFunctionError(
-                "The node must have at lease one input!", node=node
-            ) from exc
-    else:
-        try:
-            node.inputs[inputkey]
-        except Exception as exc:
-            raise TypeFunctionError(
-                f"The node must have the input '{inputkey}'!", node=node
-            ) from exc
-
-
-def check_inputs_number(node: NodeT, n: int) -> None:
-    """Checking if the node has only `n` inputs"""
-    if (ninp := len(node.inputs)) != n:
-        raise TypeFunctionError(
-            f"The node must have only {n} inputs, but given {ninp}!", node=node
-        )
-
-
-def eval_output_dtype(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-) -> None:
-    """Automatic calculation and setting dtype for the output"""
-    inputs = node.inputs.iter(inputkey)
-    outputs = node.outputs.iter(outputkey)
-
-    dtype = result_type(*(inp.dd.dtype for inp in inputs))
-    for output in outputs:
-        output.dd.dtype = dtype
-
-
-def copy_input_to_output(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = 0,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-    dtype: bool = True,
-    shape: bool = True,
-    edges: bool = True,
-    nodes: bool = True,
-) -> None:
-    """Coping input dtype and setting for the output"""
-    inputs = tuple(node.inputs.iter(inputkey))
-    outputs = tuple(node.outputs.iter(outputkey))
-
-    if not any((dtype, shape, edges, nodes)):
-        return
-
-    caller = MethodSequenceCaller()
-    if dtype:
-        caller.methods.append(cpy_dtype)
-    if shape:
-        caller.methods.append(cpy_shape)
-    if edges:
-        caller.methods.append(cpy_edges)
-    if nodes:
-        caller.methods.append(cpy_nodes)
-
-    if len(inputs) == 1:
-        inputs = repeat(inputs[0], len(outputs))
-
-    for input, output in zip(inputs, outputs, strict=True):
-        caller(input, output)
-
-
-def copy_input_dtype_to_output(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = 0,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-) -> None:
-    """Coping input dtype and setting for the output"""
-    inputs = tuple(node.inputs.iter(inputkey))
-    outputs = tuple(node.outputs.iter(outputkey))
-
-    if len(inputs) == 1:
-        inputs = repeat(inputs[0], len(outputs))
-
-    for input, output in zip(inputs, outputs, strict=True):
-        output.dd.dtype = input.dd.dtype
-
-
-def copy_input_shape_to_output(
-    node: NodeT,
-    inputkey: Union[str, int] = 0,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-) -> None:
-    """Coping input shape and setting for the output"""
-    inputs = tuple(node.inputs.iter(inputkey))
-    outputs = tuple(node.outputs.iter(outputkey))
-
-    if len(inputs) == 1:
-        inputs = repeat(inputs[0], len(outputs))
-
-    for input, output in zip(inputs, outputs, strict=True):
-        output.dd.shape = input.dd.shape
-
-
-def copy_input_edges_to_output(
-    node: NodeT,
-    inputkey: Union[str, int] = 0,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-) -> None:
-    """Coping input edges and setting for the output"""
-    inputs = tuple(node.inputs.iter(inputkey))
-    outputs = tuple(node.outputs.iter(outputkey))
-
-    if len(inputs) == 1:
-        inputs = repeat(inputs[0], len(outputs))
-
-    for input, output in zip(inputs, outputs, strict=True):
-        output.dd.axes_edges = input.dd.axes_edges
-
-
-def combine_inputs_shape_to_output(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-) -> None:
-    """Combine all the inputs shape and setting for the output"""
-    inputs = node.inputs.iter(inputkey)
-    shape = tuple(inp.dd.shape for inp in inputs)
-    for output in node.outputs.iter(outputkey):
-        output.dd.shape = shape
-
-
-def check_input_dimension(
-    node: NodeT, inputkey: Union[str, int, slice, Sequence], ndim: int
-):
-    """Checking the dimension of the input"""
-    for input in node.inputs.iter(inputkey):
-        dim = len(input.dd.shape)
-        if ndim != dim:
-            raise TypeFunctionError(
-                f"The node supports only {ndim}d inputs. Got {dim}d!",
-                node=node,
-                input=input,
-            )
-
-
-def check_input_square(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence],
-):
-    """Checking input is a square matrix"""
-    for input in node.inputs.iter(inputkey):
-        shape = input.dd.shape
-        dim = len(shape)
-        if dim != 2 or shape[0] != shape[1]:
-            raise TypeFunctionError(
-                f"The node supports only square inputs. Got {shape}!",
-                node=node,
-                input=input,
-            )
-
-
-def check_input_square_or_diag(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence],
-) -> int:
-    """Check if input is a square matrix or diagonal (1d) of a square matrix.
-    Returns the maximal dimension."""
-    dim_max = 0
-    for input in node.inputs.iter(inputkey):
-        shape = input.dd.shape
-        dim = len(shape)
-        dim_max = max(dim, dim_max)
-        if (dim == 2 and shape[0] != shape[1]) and dim != 1:
-            raise TypeFunctionError(
-                f"The node supports only square inputs (or 1d as diagonal). Got {shape}!",
-                node=node,
-                input=input,
-            )
-    return dim_max
-
-
-def check_input_shape(
-    node: NodeT, inputkey: Union[str, int, slice, Sequence], shape: tuple
-):
-    """Checking the shape equivalence for inputs"""
-    for input in node.inputs.iter(inputkey):
-        sshape = input.dd.shape
-        if sshape != shape:
-            raise TypeFunctionError(
-                f"The node supports only inputs with shape={shape}. Got {sshape}!",
-                node=node,
-                input=input,
-            )
-
-
-def check_input_dtype(
-    node: NodeT, inputkey: Union[str, int, slice, Sequence], dtype
-):
-    """Checking the dtype equivalence for inputs"""
-    for input in node.inputs.iter(inputkey):
-        dtt = input.dd.dtype
-        if dtt != dtype:
-            raise TypeFunctionError(
-                f"The node supports only input types {dtype}. Got {dtt}!",
-                node=node,
-                input=input,
-            )
-
-
-def check_inputs_equivalence(
-    node: NodeT, inputkey: Union[str, int, slice, Sequence] = AllPositionals
-):
-    """Checking the equivalence of the dtype, shape, axes_edges and axes_nodes of all the inputs"""
-    inputs = tuple(node.inputs.iter(inputkey))
-    input0, inputs = inputs[0], inputs[1:]
-
-    dtype, shape, edges, nodes = (
-        input0.dd.dtype,
-        input0.dd.shape,
-        input0.dd.axes_edges,
-        input0.dd.axes_nodes,
-    )
-    for input in inputs:
-        if (
-            input.dd.dtype != dtype
-            or input.dd.shape != shape
-            or input.dd.axes_edges != edges
-            or input.dd.axes_nodes != nodes
-        ):
-            raise TypeFunctionError(
-                f"Input data [{input.dtype=}, {input.shape=}, {input.axes_edges=}, {input.axes_nodes=}]"
-                f" is inconsistent with [{dtype=}, {shape=}, {edges=}, {nodes=}]",
-                node=node,
-                input=input,
-            )
-
-
-def check_inputs_square_or_diag(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
-) -> int:
-    """Check if inputs are square matrices or diagonals (1d) of a square matrices of the same size.
-    Returns the maximal dimension."""
-    inputs = tuple(node.inputs.iter(inputkey))
-
-    dim_max = 0
-    shape0 = inputs[0].dd.shape[0]
-
-    for input in inputs:
-        shape = input.dd.shape
-        dim = len(shape)
-        dim_max = max(dim, dim_max)
-        if shape0 != shape[0] or (
-            (dim == 2 and shape[0] != shape[1]) and dim != 1
-        ):
-            raise TypeFunctionError(
-                f"The node supports only square inputs (or 1d as diagonal) of size {shape0}x{shape0}. Got {shape}!",
-                node=node,
-                input=input,
-            )
-    return dim_max
-
-
-def check_inputs_same_dtype(
-    node: NodeT, inputkey: Union[str, int, slice, Sequence] = AllPositionals
-):
-    """Checking dtypes of all the inputs are same"""
-    inputs = tuple(node.inputs.iter(inputkey))
-    input0, inputs = inputs[0], inputs[1:]
-
-    dtype = input0.dd.dtype
-    for input in inputs:
-        if input.dd.dtype != dtype:
-            raise TypeFunctionError(
-                f"Input data {input.dd.dtype} is inconsistent with {dtype}",
-                node=node,
-                input=input,
-            )
-
-
-def check_input_subtype(node: NodeT, input: Input, dtype: DTypeLike):
-    """Checks if the input dtype is some subtype of `dtype`."""
-    if not issubdtype(input.dd.dtype, dtype):
-        raise TypeFunctionError(
-            f"The input must be an array of {dtype}, but given '{input.dd.dtype}'!",
-            node=node,
-            input=input,
-        )
-
-
-def check_output_subtype(node: NodeT, output: Output, dtype: DTypeLike):
-    """Checks if the output dtype is some subtype of `dtype`."""
-    if not issubdtype(output.dd.dtype, dtype):
-        raise TypeFunctionError(
-            f"The output must be an array of {dtype}, but given '{output.dd.dtype}'!",
-            node=node,
-            output=output,
-        )
-
-
-def check_inputs_multiplicable_mat(
-    node: NodeT,
-    inputkey1: Union[str, int, slice, Sequence],
-    inputkey2: Union[str, int, slice, Sequence],
-):
-    """Checking that inputs from key1 and key2 may be multiplied (matrix)"""
-    inputs1 = tuple(node.inputs.iter(inputkey1))
-    inputs2 = tuple(node.inputs.iter(inputkey2))
-
-    len1, len2 = len(inputs1), len(inputs2)
-    if len1 == len2:
-        pass
-    elif len1 == 1:
-        inputs1 = repeat(inputs1[0], len2)
-    elif len2 == 1:
-        inputs2 = repeat(inputs2[0], len1)
-
-    for input1, input2 in zip(inputs1, inputs2, strict=True):
-        shape1 = input1.dd.shape
-        shape2 = input2.dd.shape
-        if shape1[-1] != shape2[0]:
-            raise TypeFunctionError(
-                f"Inputs {shape1} and {shape2} are not multiplicable",
-                node=node,
-                input=input,
-            )
-
-
-def check_input_edges_dim(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
-    dim: int = 1,
-):
-    """Checking the existence and dim of the edges of the inputs"""
-    for input in node.inputs.iter(inputkey):
-        edges = input.dd.axes_edges
-        if len(edges) == 0:
-            raise TypeFunctionError(
-                f"The input must have edges, but given {edges=}!",
-                node=node,
-                input=input,
-            )
-        for edge in edges:
-            if not isinstance(edge, Output):
-                raise TypeFunctionError(
-                    f"The input edge must be an `Output`, but given {edge=}!",
-                    node=node,
-                    input=input,
-                )
-            if edge.dd.dim != dim:
-                raise TypeFunctionError(
-                    f"The input edge must be a {dim}d array, but given {edge.dd.dim=}!",
-                    node=node,
-                    input=input,
-                )
-
-
-def check_input_edges_equivalence(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
-    reference: Optional[Tuple[Output]] = None,
-):
-    """Checking the equivalence of the edges of the inputs."""
-    inputs = tuple(node.inputs.iter(inputkey))
-    if reference is None:
-        input0, inputs = inputs[0], inputs[1:]
-        reference = input0.dd.axes_edges
-    for input in inputs:
-        edges = input.dd.axes_edges
-        if edges != reference:
-            raise TypeFunctionError(
-                f"The input edge must be {reference}, but given {edges=}!",
-                node=node,
-                input=input,
-            )
-
-
-def check_edges_type(
-    node: NodeT,
-    inputkey: Union[str, int, slice, Sequence] = AllPositionals,
-    outputkey: Union[str, int, slice, Sequence] = AllPositionals,
-):
-    """Checking of the edges type (must be `List[Output]`) of the inputs and outputs."""
-    # check inputs
-    for input in node.inputs.iter(inputkey):
-        edges = input.dd.axes_edges
-        if not isinstance(edges, list):
-            raise TypeFunctionError(
-                f"The `input.dd.axes_edges` must be `List[Output]`, but given {edges=}!",
-                node=node,
-                input=input,
-            )
-        for edge in edges:
-            if not isinstance(edge, Output):
-                raise TypeFunctionError(
-                    f"The edge must be `Output`, but given {edge=}!",
-                    node=node,
-                    input=input,
-                )
-    # check outputs
-    for output in node.outputs.iter(outputkey):
-        edges = output.dd.axes_edges
-        if not isinstance(edges, list):
-            raise TypeFunctionError(
-                f"The `output.dd.axes_edges` must be `List[Output]`, but given {edges=}!",
-                node=node,
-                output=output,
-            )
-        for edge in edges:
-            if not isinstance(edge, Output):
-                raise TypeFunctionError(
-                    f"The edge must be `Output`, but given {edge=}!",
-                    node=node,
-                    iutput=output,
-                )
-
-
-def check_array_edges_consistency(node: NodeT, output: Output):
-    """
-    Checks the dimension equivalence of edges and the output, then checks that
-    `len(output) = N` and `len(edges) = N+1` for each dimension.
-    Tht type function is passed if the edges are empty.
-    """
-    dd = output.dd
-    edges = dd.axes_edges
-    if (y := len(edges)) > 0:
-        if y != dd.dim:
-            raise TypeFunctionError(
-                f"Array: the data ({dd.dim}d) and edges "
-                f"({len(edges)}d) must have the same dimension!",
-                node=node,
-                output=output,
-            )
-        for i, edge in enumerate(edges):
-            if edge.dd.shape[0] != dd.shape[i] + 1:
-                raise TypeFunctionError(
-                    f"Array: the data lenght (={dd.shape[i]} + 1) must be "
-                    f"consistent with edges (={edge.dd.shape[0]})!",
-                    node=node,
-                    output=output,
-                )
diff --git a/subtrees/dagflow/dagflow/types.py b/subtrees/dagflow/dagflow/types.py
deleted file mode 100644
index 8ee0295bd890c7554446ffc0eba3ecd2b10e2382..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/types.py
+++ /dev/null
@@ -1,11 +0,0 @@
-from typing import Tuple, TypeVar
-
-GraphT = TypeVar("GraphT", bound="Graph")
-NodeT = TypeVar("NodeT", bound="Node")
-InputT = TypeVar("InputT", bound="Input")
-InputsT = TypeVar("InputsT", bound="Inputs")
-OutputT = TypeVar("OutputT", bound="Output")
-OutputsT = TypeVar("OutputsT", bound="Outputs")
-
-ShapeLike = Tuple[int, ...]
-EdgesLike = Tuple[OutputT]
diff --git a/subtrees/dagflow/dagflow/variable.py b/subtrees/dagflow/dagflow/variable.py
deleted file mode 100644
index a826df1fe7df530307d9631c7a42907e27b73b9c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/variable.py
+++ /dev/null
@@ -1,450 +0,0 @@
-from .node import Node, Output
-from .exception import InitializationError
-from .lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
-from .lib.Cholesky import Cholesky
-from .lib.Array import Array
-from .lib.CovmatrixFromCormatrix import CovmatrixFromCormatrix
-
-from numpy import zeros_like, array
-from numpy.typing import DTypeLike
-from typing import Optional, Dict, List, Generator
-
-class Parameter:
-    __slots__ = ('_idx','_parent', '_value_output', '_labelfmt')
-    _parent: Optional['Parameters']
-    _idx: int
-    _value_output: Output
-    _labelfmt: str
-
-    def __init__(
-        self,
-        value_output: Output,
-        idx: int=0,
-        *,
-        parent: 'Parameters',
-        labelfmt: str='{}'
-    ):
-        self._idx = idx
-        self._parent = parent
-        self._value_output = value_output
-        self._labelfmt = labelfmt
-
-    @property
-    def value(self) -> float:
-        return self._value_output.data[self._idx]
-
-    @value.setter
-    def value(self, value: float):
-        return self._value_output.seti(self._idx, value)
-
-    @property
-    def output(self) -> Output:
-        return self._value_output
-
-    def label(self, source: str='text') -> str:
-        return self._labelfmt.format(self._value_output.node.label(source))
-
-    def to_dict(self, *, label_from: str='text') -> dict:
-        return {
-                'value': self.value,
-                'label': self.label(label_from)
-                }
-
-class GaussianParameter(Parameter):
-    __slots__ = ( '_central_output', '_sigma_output', '_normvalue_output')
-    _central_output: Output
-    _sigma_output: Output
-    _normvalue_output: Output
-
-    def __init__(
-        self,
-        value_output: Output,
-        central_output: Output,
-        sigma_output: Output,
-        idx: int=0,
-        *,
-        normvalue_output: Output,
-        **kwargs
-    ):
-        super().__init__(value_output, idx, **kwargs)
-        self._central_output = central_output
-        self._sigma_output = sigma_output
-        self._normvalue_output = normvalue_output
-
-    @property
-    def central(self) -> float:
-        return self._central_output.data[0]
-
-    @central.setter
-    def central(self, central: float):
-        self._central_output.seti(self._idx, central)
-
-    @property
-    def sigma(self) -> float:
-        return self._sigma_output.data[0]
-
-    @sigma.setter
-    def sigma(self, sigma: float):
-        self._sigma_output.seti(self._idx, sigma)
-
-    @property
-    def sigma_relative(self) -> float:
-        return self.sigma/self.value
-
-    @sigma_relative.setter
-    def sigma_relative(self, sigma_relative: float):
-        self.sigma = sigma_relative * self.value
-
-    @property
-    def sigma_percent(self) -> float:
-        return 100.0 * (self.sigma/self.value)
-
-    @sigma_percent.setter
-    def sigma_percent(self, sigma_percent: float):
-        self.sigma = (0.01*sigma_percent) * self.value
-
-    @property
-    def normvalue(self) -> float:
-        return self._normvalue_output.data[0]
-
-    @normvalue.setter
-    def normvalue(self, normvalue: float):
-        self._normvalue_output.seti(self._idx, normvalue)
-
-    def to_dict(self, **kwargs) -> dict:
-        dct = super().to_dict(**kwargs)
-        dct.update({
-            'central': self.central,
-            'sigma': self.sigma,
-            # 'normvalue': self.normvalue,
-            })
-        return dct
-
-class NormalizedGaussianParameter(Parameter):
-    @property
-    def central(self) -> float:
-        return 0.0
-
-    @property
-    def sigma(self) -> float:
-        return 1.0
-
-    def to_dict(self, **kwargs) -> dict:
-        dct = super().to_dict(**kwargs)
-        dct.update({
-            'central': 0.0,
-            'sigma': 1.0,
-            # 'normvalue': self.value,
-            })
-        return dct
-
-class Constraint:
-    __slots__ = ('_pars')
-    _pars: "Parameters"
-
-    def __init__(self, parameters: "Parameters"):
-        self._pars = parameters
-
-class Parameters:
-    __slots__ = (
-        'value',
-        '_value_node',
-        '_pars',
-        '_norm_pars',
-        '_is_variable',
-        '_constraint'
-    )
-    value: Output
-    _value_node: Node
-    _pars: List[Parameter]
-    _norm_pars: List[Parameter]
-
-    _is_variable: bool
-
-    _constraint: Optional[Constraint]
-
-    def __init__(
-        self,
-        value: Node,
-        *,
-        variable: Optional[bool]=None,
-        fixed: Optional[bool]=None,
-        close: bool=True
-    ):
-        self._value_node = value
-        self.value = value.outputs[0]
-
-        if all(f is not None for f in (variable, fixed)):
-            raise RuntimeError("Parameter may not be set to variable and fixed at the same time")
-        if variable is not None:
-            self._is_variable = variable
-        elif fixed is not None:
-            self._is_variable = not fixed
-        else:
-            self._is_variable = True
-
-        self._constraint = None
-
-        self._pars = []
-        self._norm_pars = []
-        if close:
-            self._close()
-
-            for i in range(self.value._data.size):
-                self._pars.append(Parameter(self.value, i, parent=self))
-
-    def _close(self) -> None:
-        self._value_node.close(recursive=True)
-
-    @property
-    def is_variable(self) -> bool:
-        return self._is_variable
-
-    @property
-    def is_fixed(self) -> bool:
-        return not self._is_variable
-
-    @property
-    def is_constrained(self) -> bool:
-        return self._constraint is not None
-
-    @property
-    def is_free(self) -> bool:
-        return self._constraint is None
-
-    @property
-    def parameters(self) -> List:
-        return self._pars
-
-    @property
-    def norm_parameters(self) -> List:
-        return self._norm_pars
-
-    def to_dict(self, *, label_from: str='text') -> dict:
-        return {
-                'value': self.value.data[0],
-                'label': self._value_node.label(label_from)
-                }
-
-    def set_constraint(self, constraint: Constraint) -> None:
-        if self._constraint is not None:
-            raise InitializationError("Constraint already set")
-        self._constraint = constraint
-        # constraint._pars = self
-
-    @staticmethod
-    def from_numbers(
-        value: float,
-        *,
-        dtype: DTypeLike='d',
-        variable: Optional[bool]=None,
-        fixed: Optional[bool]=None,
-        label: Optional[Dict[str, str]]=None,
-        **kwargs
-    ) -> 'Parameters':
-        if label is None:
-            label = {'text': 'parameter'}
-        else:
-            label = dict(label)
-        name: str = label.setdefault('name', 'parameter')
-        has_constraint = kwargs.get('sigma', None) is not None
-        pars = Parameters(
-            Array(
-                name,
-                array((value,), dtype=dtype),
-                label = label,
-                mode='store_weak',
-            ),
-            fixed=fixed,
-            variable=variable,
-            close=not has_constraint
-        )
-
-        if has_constraint:
-            pars.set_constraint(
-                GaussianConstraint.from_numbers(
-                    parameters=pars,
-                    dtype=dtype,
-                    **kwargs
-                )
-            )
-            pars._close()
-
-        return pars
-
-class GaussianConstraint(Constraint):
-    __slots__ = (
-        'central', 'sigma', 'normvalue',
-        '_central_node', '_sigma_node', '_normvalue_node',
-        '_cholesky_node', '_covariance_node', '_correlation_node',
-        '_sigma_total_node',
-        '_norm_node',
-        '_is_constrained'
-    )
-    central: Output
-    sigma: Output
-    normvalue: Output
-
-    _central_node: Node
-    _sigma_node: Node
-    _normvalue_node: Node
-
-    _cholesky_node: Optional[Node]
-    _covariance_node: Optional[Node]
-    _correlation_node: Optional[Node]
-    _sigma_total_node: Optional[Node]
-
-    _norm_node: Node
-
-    _is_constrained: bool
-
-    def __init__(
-        self,
-        central: Node,
-        *,
-        parameters: Parameters,
-        sigma: Node=None,
-        covariance: Node=None,
-        correlation: Node=None,
-        constrained: Optional[bool]=None,
-        free: Optional[bool]=None,
-        **_
-    ):
-        super().__init__(parameters=parameters)
-        self._central_node = central
-
-        self._cholesky_node = None
-        self._covariance_node = None
-        self._correlation_node = None
-        self._sigma_total_node = None
-
-        if all(f is not None for f in (constrained, free)):
-            raise RuntimeError("GaussianConstraint may not be set to constrained and free at the same time")
-        if constrained is not None:
-            self._is_constrained = constrained
-        elif free is not None:
-            self._is_constrained = not free
-        else:
-            self._is_constrained = True
-
-        if sigma is not None and covariance is not None:
-            raise InitializationError('GaussianConstraint: got both "sigma" and "covariance" as arguments')
-        if correlation is not None and sigma is None:
-            raise InitializationError('GaussianConstraint: got "correlation", but no "sigma" as arguments')
-
-        value_node = parameters._value_node
-        if correlation is not None:
-            self._correlation_node = correlation
-            self._covariance_node = CovmatrixFromCormatrix(f"V({value_node.name})")
-            self._cholesky_node = Cholesky(f"L({value_node.name})")
-            self._sigma_total_node = sigma
-            self._sigma_node = self._cholesky_node
-
-            self._sigma_total_node >> self._covariance_node.inputs['sigma']
-            correlation >> self._covariance_node
-            self._covariance_node >> self._cholesky_node
-        elif sigma is not None:
-            self._sigma_node = sigma
-        elif covariance is not None:
-            self._cholesky_node = Cholesky(f"L({value_node.name})")
-            self._sigma_node = self._cholesky_node
-            self._covariance_node = covariance
-
-            covariance >> self._cholesky_node
-        else:
-            # TODO: no sigma/covariance AND central means normalized=value?
-            raise InitializationError('GaussianConstraint: got no "sigma" and no "covariance" arguments')
-
-        self.central = self._central_node.outputs[0]
-        self.sigma = self._sigma_node.outputs[0]
-
-        self._normvalue_node = Array(
-            f'Normalized {value_node.name}',
-            zeros_like(self.central._data),
-            mark = f'norm({value_node.mark})',
-            mode='store_weak'
-        )
-        self._normvalue_node._inherit_labels(self._pars._value_node, fmt='Normalized {}')
-        self.normvalue = self._normvalue_node.outputs[0]
-
-        self._norm_node = NormalizeCorrelatedVars2(f"Normalize {value_node.name}", immediate=True)
-        self.central >> self._norm_node.inputs['central']
-        self.sigma >> self._norm_node.inputs['matrix']
-
-        (parameters.value, self.normvalue) >> self._norm_node
-
-        self._norm_node.close(recursive=True)
-        self._norm_node.touch()
-
-        value_output = self._pars.value
-        for i in range(value_output._data.size):
-            self._pars._pars.append(
-                GaussianParameter(
-                    value_output,
-                    self.central,
-                    self.sigma,
-                    i,
-                    normvalue_output=self.normvalue,
-                    parent=self
-                )
-            )
-            self._pars._norm_pars.append(
-                NormalizedGaussianParameter(
-                    self.normvalue,
-                    i,
-                    parent=self,
-                    labelfmt='[norm] {}'
-                )
-            )
-
-    @property
-    def is_constrained(self) -> bool:
-        return self._is_constrained
-
-    @property
-    def is_free(self) -> bool:
-        return not self._is_constrained
-
-    @property
-    def is_correlated(self) -> bool:
-        return not self._covariance_node is not None
-
-    @staticmethod
-    def from_numbers(
-        *,
-        central: float,
-        sigma: float,
-        label: Optional[Dict[str,str]]=None,
-        dtype: DTypeLike='d',
-        **kwargs
-    ) -> 'GaussianParameters':
-        if label is None:
-            label = {'text': 'gaussian parameter'}
-        else:
-            label = dict(label)
-        name = label.setdefault('name', 'parameter')
-
-        node_central = Array(
-            f'{name}_central',
-            array((central,), dtype=dtype),
-            label = {k: f'central: {v}' for k,v in label.items()},
-            mode='store_weak'
-        )
-
-        node_sigma = Array(
-            f'{name}_sigma',
-            array((sigma,), dtype=dtype),
-            label = {k: f'sigma: {v}' for k,v in label.items()},
-            mode='store_weak'
-        )
-
-        return GaussianConstraint(central=node_central, sigma=node_sigma, **kwargs)
-
-    def to_dict(self, **kwargs) -> dict:
-        dct = super().to_dict(**kwargs)
-        dct.update({
-            'central': self.central.data[0],
-            'sigma': self.sigma.data[0],
-            # 'normvalue': self.normvalue.data[0],
-            })
-        return dct
diff --git a/subtrees/dagflow/dagflow/wrappers.py b/subtrees/dagflow/dagflow/wrappers.py
deleted file mode 100644
index 18dec84a76b0475df33406825e6f20827b4177a5..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/dagflow/wrappers.py
+++ /dev/null
@@ -1,35 +0,0 @@
-
-from .printl import next_level, printl
-
-
-def printer(fcn, node, inputs, outputs):
-    printl(f"Evaluate {node.name}")
-    with next_level():
-        fcn(node, inputs, outputs)
-    printl(f"... done with {node.name}")
-
-
-def before_printer(fcn, node, inputs, outputs):
-    printl(f"Evaluate {node.name}: {node.label()}")
-    with next_level():
-        fcn(node, inputs, outputs)
-
-
-def after_printer(fcn, node, inputs, outputs):
-    with next_level():
-        fcn(node, inputs, outputs)
-    printl(f"Evaluate {node.name}: {node.label()}")
-
-
-def dataprinter(fcn, node, inputs, outputs):
-    fcn(node, inputs, outputs)
-    for i, output in enumerate(outputs):
-        printl("{: 2d} {}: {!s}".format(i, output.name, output._data))
-
-
-def toucher(fcn, node, inputs, outputs):
-    for i, input in enumerate(inputs):
-        printl("touch input {: 2d} {}.{}".format(i, node.name, input.name))
-        with next_level():
-            input.touch()
-    fcn(node, inputs, outputs)
diff --git a/subtrees/dagflow/example/dagflow_example.png b/subtrees/dagflow/example/dagflow_example.png
deleted file mode 100644
index a1b7b06a4a56ff7129eba31bb1690a6923f87d09..0000000000000000000000000000000000000000
Binary files a/subtrees/dagflow/example/dagflow_example.png and /dev/null differ
diff --git a/subtrees/dagflow/example/example.py b/subtrees/dagflow/example/example.py
deleted file mode 100755
index be2288fc6c9538e6d0c565ab25d05529d215870c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/example/example.py
+++ /dev/null
@@ -1,128 +0,0 @@
-from numpy import arange, copyto, result_type
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.input_extra import MissingInputAddEach
-from dagflow.lib import Array, Product, Sum, WeightedSum
-from dagflow.nodes import FunctionNode
-
-array = arange(3, dtype="d")
-debug = False
-
-
-class ThreeInputsOneOutput(FunctionNode):
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault("missing_input_handler", MissingInputAddEach())
-        super().__init__(*args, **kwargs)
-
-    def _fcn(self, _, inputs, outputs):
-        for i, output in enumerate(outputs):
-            out = output.data
-            copyto(out, inputs[3 * i].data)
-            for input in inputs[3 * i + 1 : (i + 1) * 3]:
-                out += input.data
-        return out
-
-    @property
-    def result(self):
-        return [out.data for out in self.outputs]
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        for i, output in enumerate(self.outputs):
-            inputs = self.inputs[2*i:2*(1+i)]
-            output.dd.shape = inputs[0].dd.shape
-            output.dd.dtype = result_type(tuple(inp.dd.dtype for inp in inputs))
-        self.logger.debug(
-            f"Node '{self.name}': dtype={tuple(out.dd.dtype for out in self.outputs)}, "
-            f"shape={tuple(out.dd.shape for out in self.outputs)}"
-        )
-
-
-# Check predefined Array, Sum and Product
-with Graph(debug=debug) as graph:
-    (in1, in2, in3, in4) = (
-        Array(name, array) for name in ("n1", "n2", "n3", "n4")
-    )
-    s = Sum("sum")
-    m = Product("product")
-
-    (in1, in2, in3) >> s
-    (in4, s) >> m
-    graph.close()
-
-    print("Result:", m.outputs["result"].data)
-    savegraph(graph, "dagflow_example_1a.png")
-
-# Check random generated Array, Sum and Product
-with Graph(debug=debug) as graph:
-    (in1, in2, in3, in4) = (
-        Array(name, array) for name in ("n1", "n2", "n3", "n4")
-    )
-    s = Sum("sum")
-    m = Product("product")
-
-    (in1, in2, in3) >> s
-    (in4, s) >> m
-    graph.close()
-
-    print("Result:", m.outputs["result"].data)
-    savegraph(graph, "dagflow_example_1b.png")
-
-# Check predefined Array, two Sum's and Product
-with Graph(debug=debug) as graph:
-    (in1, in2, in3, in4) = (
-        Array(name, array) for name in ("n1", "n2", "n3", "n4")
-    )
-    s = Sum("sum")
-    s2 = Sum("sum")
-    m = Product("product")
-
-    (in1, in2) >> s
-    (in3, in4) >> s2
-    (s, s2) >> m
-    graph.close()
-
-    print("Result:", m.outputs["result"].data)
-    savegraph(graph, "dagflow_example_2.png")
-
-# Check predefined Array, Sum, WeightedSum and Product
-with Graph(debug=debug) as graph:
-    (in1, in2, in3, in4) = (
-        Array(name, array) for name in ("n1", "n2", "n3", "n4")
-    )
-    weight = Array("weight", (2, 3))
-    # The same result with other weight
-    # weight = makeArray(5)("weight")
-    s = Sum("sum")
-    ws = WeightedSum("weightedsum")
-    m = Product("product")
-
-    (in1, in2) >> s  # [0,2,4]
-    (in3, in4) >> ws
-    {"weight": weight} >> ws  # [0,1,2] * 2 + [0,1,2] * 3 = [0,5,10]
-    # NOTE: also it is possible to use the old style binding:
-    #weight >> ws("weight")
-    (s, ws) >> m  # [0,2,4] * [0,5,10] = [0,10,40]
-    graph.close()
-
-    print("Result:", m.outputs["result"].data)
-    savegraph(graph, "dagflow_example_3.png")
-
-
-with Graph(debug=debug) as graph:
-    (in1, in2, in3) = (Array(name, array) for name in ("n1", "n2", "n3"))
-    (in4, in5, in6) = (
-        Array(name, (1, 0, 0)) for name in ("n4", "n5", "n6")
-    )
-    (in7, in8, in9) = (
-        Array(name, (3, 3, 3)) for name in ("n7", "n8", "n9")
-    )
-    s = ThreeInputsOneOutput("3to1")
-    (in1, in2, in3) >> s
-    (in4, in5, in6) >> s
-    (in7, in8, in9) >> s
-    graph.close()
-
-    print("Result:", s.result)
-    savegraph(graph, "dagflow_example_4.png")
diff --git a/subtrees/dagflow/example/graph_evaluation.gif b/subtrees/dagflow/example/graph_evaluation.gif
deleted file mode 100644
index 36b4b7d3f7a3d4a685531c0dc3f5f5287a853992..0000000000000000000000000000000000000000
Binary files a/subtrees/dagflow/example/graph_evaluation.gif and /dev/null differ
diff --git a/subtrees/dagflow/gindex b/subtrees/dagflow/gindex
deleted file mode 120000
index 6039cdfd830502e36edb75f37610a341d192b681..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/gindex
+++ /dev/null
@@ -1 +0,0 @@
-subtrees/gindex/gindex
\ No newline at end of file
diff --git a/subtrees/dagflow/multikeydict b/subtrees/dagflow/multikeydict
deleted file mode 120000
index 6745c387b78efd09cef70914118f4a36ec4cf335..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/multikeydict
+++ /dev/null
@@ -1 +0,0 @@
-subtrees/dictwrapper/multikeydict
\ No newline at end of file
diff --git a/subtrees/dagflow/pytest.ini b/subtrees/dagflow/pytest.ini
deleted file mode 100755
index 0f91ae46aaea5c6922d3a8f583bc520c4224ede9..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/pytest.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[pytest]
-testpaths=test/
-; addopts= --cov-report term --cov=./ --cov-report xml:cov.xml
diff --git a/subtrees/dagflow/requirements.txt b/subtrees/dagflow/requirements.txt
deleted file mode 100644
index 5e2a13d7898d88e0827c45802a0ca6833ca4616a..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/requirements.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-contextlib2
-coverage
-numba
-numpy==1.23.5
-pygraphviz
-pytest
-pytest-cov
-schema
diff --git a/subtrees/dagflow/subtrees/dictwrapper/.gitignore b/subtrees/dagflow/subtrees/dictwrapper/.gitignore
deleted file mode 100644
index 4906f4c4df53b47c9f99b611f1d4fcf3a85e209a..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/.gitignore
+++ /dev/null
@@ -1,145 +0,0 @@
-
-# Created by https://www.toptal.com/developers/gitignore/api/python
-# Edit at https://www.toptal.com/developers/gitignore?templates=python
-
-### Python ###
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-#  Usually these files are written by a python script from a template
-#  before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-*.py,cover
-.hypothesis/
-.pytest_cache/
-cover/
-
-# Translations
-*.mo
-*.pot
-
-# Django stuff:
-*.log
-local_settings.py
-db.sqlite3
-db.sqlite3-journal
-
-# Flask stuff:
-instance/
-.webassets-cache
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-.pybuilder/
-target/
-
-# Jupyter Notebook
-.ipynb_checkpoints
-
-# IPython
-profile_default/
-ipython_config.py
-
-# pyenv
-#   For a library or package, you might want to ignore these files since the code is
-#   intended to run in multiple environments; otherwise, check them in:
-# .python-version
-
-# pipenv
-#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-#   However, in case of collaboration, if having platform-specific dependencies or dependencies
-#   having no cross-platform support, pipenv may install dependencies that don't work, or not
-#   install all needed dependencies.
-#Pipfile.lock
-
-# PEP 582; used by e.g. github.com/David-OConnor/pyflow
-__pypackages__/
-
-# Celery stuff
-celerybeat-schedule
-celerybeat.pid
-
-# SageMath parsed files
-*.sage.py
-
-# Environments
-.env
-.venv
-env/
-venv/
-ENV/
-env.bak/
-venv.bak/
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-# pytype static type analyzer
-.pytype/
-
-# Cython debug symbols
-cython_debug/
-
-# End of https://www.toptal.com/developers/gitignore/api/python
diff --git a/subtrees/dagflow/subtrees/dictwrapper/.gitlab-ci.yml b/subtrees/dagflow/subtrees/dictwrapper/.gitlab-ci.yml
deleted file mode 100644
index 05a935a95d9c71b73f11b9832bdfcfe5c68de3ca..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/.gitlab-ci.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-stages:
-    - tests
-
-tests:
-    image: git.jinr.ru:5005/gna/gna-base-docker-image:latest
-    stage: tests
-
-    script:
-    - python3 -m pip install -r requirements.txt
-    - coverage run --source=. -m pytest
-    - coverage report
-    - coverage xml
-    coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
-    artifacts:
-        reports:
-            coverage_report:
-                coverage_format: cobertura
-                path: coverage.xml
-    only:
-        - master
-        - merge_requests
diff --git a/subtrees/dagflow/subtrees/dictwrapper/README.md b/subtrees/dagflow/subtrees/dictwrapper/README.md
deleted file mode 100644
index 57de37f2a6ea179a93414ee511135da9610f8f34..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Summary
-
-[![python](https://img.shields.io/badge/python-3.10-purple.svg)](https://www.python.org/)
-[![pipeline](https://git.jinr.ru/dag-computing/nestedmkdict.py/badges/master/pipeline.svg)](https://git.jinr.ru/dag-computing/nestedmkdict.py/commits/master)
-[![coverage report](https://git.jinr.ru/dag-computing/nestedmkdict.py/badges/master/coverage.svg)](https://git.jinr.ru/dag-computing/nestedmkdict.py/-/commits/master)
-<!--- Uncomment here after adding docs!
-[![pages](https://img.shields.io/badge/pages-link-white.svg)](http://dag-computing.pages.jinr.ru/nestedmkdict.py)
--->
-
-* `NestedMKDict` is a tool to work with nested dictionaries
-* `FlatMKDict` is a map-like class supporing list/set as a key and does not distinguish the order of the keys
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/__init__.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/__init__.py
deleted file mode 100644
index 8664bffb1282cadff7c8669be3de94eb890971b5..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .nestedmkdict import NestedMKDict, NestedMKDictAccess
-from .flatmkdict import FlatMKDict
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/classwrapper.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/classwrapper.py
deleted file mode 100644
index 10e08fd5471db87c33721c011be35825a5466d28..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/classwrapper.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from typing import Any
-
-class ClassWrapper(object):
-    __slots__ = ('_object', '_types', '_wrapper_class')
-    _object: Any
-    _types: Any
-    _wrapper_class: Any
-    def __init__(self, obj, *, types=None):
-        self._object = obj
-        self._types = type(obj) if types is None else types
-        self._wrapper_class = type(self)
-
-    @property
-    def object(self) -> Any:
-        return self._object
-
-    def __str__(self):
-        return str(self._object)
-
-    def __repr__(self):
-        return repr(self._object)
-
-    def __dir__(self):
-        return dir(self._object)
-
-    def __len__(self):
-        return len(self._object)
-
-    def __bool__(self):
-        return bool(self._object)
-
-    def __contains__(self, v):
-        return v in self._object
-
-    def __eq__(self, other):
-        if isinstance(other, ClassWrapper):
-            return self._object==other._object
-
-        return self._object==other
-
-    def _wrap(self, obj, **kwargs):
-        if isinstance(obj, ClassWrapper):
-            return obj
-
-        if isinstance(obj, self._types):
-            return self._wrapper_class(obj, **kwargs)
-
-        return obj
-
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py
deleted file mode 100644
index bf01147b082f51c3c510308e999e42753fb9d888..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatmkdict.py
+++ /dev/null
@@ -1,94 +0,0 @@
-from __future__ import annotations
-
-from collections import UserDict
-from collections.abc import Sequence
-from typing import Any, Callable, Generator, Optional
-
-class FlatMKDict(UserDict):
-    __slots__ = ('_protect', '_merge_flatdicts')
-    _protect: bool
-
-    def __init__(*args, protect: bool = False, **kwargs) -> None:
-        self = args[0]
-        self._protect = protect
-        self._merge_flatdicts = True
-        UserDict.__init__(*args, **kwargs)
-
-    def _process_key(self, key: Any) -> tuple:
-        # if isinstance(key, Sequence):
-        return tuple(sorted(key))
-        # else:
-        #     return frozenset((key,))
-
-    def __getitem__(self, key: Any) -> Any:
-        key = self._process_key(key)
-        return super().__getitem__(key)
-
-    def __setitem__(self, key: Any, val: Any) -> None:
-        key = self._process_key(key)
-        if self._protect and key in self:
-            raise AttributeError(
-                f"Reassigning of the existed key '{key}' is restricted, "
-                "due to the protection!"
-            )
-
-        if self._merge_flatdicts and isinstance(val, FlatMKDict):
-            print('here', key, val)
-            for subkey, subval in val.items():
-                newkey = key+subkey
-                self[newkey] = subval
-
-            return
-
-        super().__setitem__(key, val)
-
-    def __contains__(self, key: Any) -> bool:
-        key = self._process_key(key)
-        return super().__contains__(key)
-
-    def values(self, *, keys: tuple = (), **kwargs) -> Generator:
-        for _, val in self.items(*keys, **kwargs):
-            yield val
-
-    def keys(self, *args, **kwargs) -> Generator:
-        for key, _ in self.items(*args, **kwargs):
-            yield key
-
-    def items(
-        self,
-        *args,
-        filterkey: Optional[Callable[[Any], bool]] = None,
-        filterkeyelem: Optional[Callable[[Any], bool]] = None,
-    ) -> Generator:
-        """
-        Returns items from the slice by `args`.
-        If `args` are empty returns all items.
-        """
-        res = super().items()
-        if args:
-            args = set(args)
-            res = (elem for elem in res if args.issubset(elem[0]))
-        if filterkey:
-            res = (elem for elem in res if filterkey(elem[0]))
-        if filterkeyelem:
-            res = (
-                elem
-                for elem in res
-                if all(filterkeyelem(key) for key in elem[0])
-            )
-
-        yield from res
-
-    def slice(self, *args, **kwargs) -> FlatMKDict:
-        """
-        Returns new `FlatMKDict` with keys containing `args`.
-        It is possible to filter elements by `filterkey` and `filterkeyelem`.
-        """
-        return FlatMKDict(
-            self.items(
-                *args,
-                filterkey=kwargs.pop("filterkey", None),
-                filterkeyelem=kwargs.pop("filterkeyelem", None),
-            ),  # type: ignore
-            **kwargs,
-        )
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatten.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatten.py
deleted file mode 100644
index c03832857e390cb719ddade6bad5060848d04915..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/flatten.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from .nestedmkdict import NestedMKDict
-from .flatmkdict import FlatMKDict
-
-from typing import Sequence, Tuple
-
-def _select(seq: Sequence, elems_mask: set) -> Tuple[Tuple, Tuple]:
-	selected = []
-	rest = ()
-	for i, el in enumerate(reversed(seq), 0):
-		if el in elems_mask:
-			selected.append(el)
-		else:
-			rest = tuple(seq[:len(seq)-i])
-			break
-	return tuple(rest), tuple(reversed(selected))
-
-def flatten(mkdict, selkeys: Sequence=()) -> NestedMKDict:
-	selkeys_set = set(selkeys)
-	newdict = mkdict._wrap({}, parent=mkdict)
-	newdict._parent = None
-
-	for key, v in mkdict.walkitems():
-		keys_nested, keys_flat = _select(key, selkeys_set)
-		if keys_flat:
-			flatdict = newdict.get(keys_nested, None)
-			if flatdict is None:
-				newdict[keys_nested] = (flatdict:=FlatMKDict(((keys_flat, v),),))
-			elif isinstance(flatdict, FlatMKDict):
-				flatdict[keys_flat] = v
-			else:
-				raise KeyError(f'Unable to flatten: {".".join(key)}')
-		else:
-			newdict[key] = v
-
-	return newdict
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py
deleted file mode 100644
index 711351c13fb886178ca0906172ba5854fd9fd191..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdict.py
+++ /dev/null
@@ -1,319 +0,0 @@
-from .classwrapper import ClassWrapper
-from .visitor import MakeNestedMKDictVisitor
-from .nestedmkdictaccess import NestedMKDictAccess
-
-from collections.abc import Sequence, MutableMapping
-from typing import Any, Optional
-
-class NestedMKDict(ClassWrapper):
-    """Dictionary wrapper managing nested dictionaries
-        The following functionality is implemented:
-        - Tuple keys are treated to access nested dictionaries ('key1', 'key2', 'key3')
-        - Optionally sep symbol may be set to automatically split string keys into tuple keys:
-          'key1.key2.key3' will be treated as a nested key if '.' is set for the sep symbol
-        - self._ may be used to access nested dictionaries via attributes: dw.key1.key2.key3
-    """
-    __slots__ = ('_sep', '_parent', '_types', '_not_recursive_to_others')
-    _sep: str
-    _parent: Any
-    _not_recursive_to_others: bool
-    def __new__(cls, dic, *args, parent=None, sep=None, recursive_to_others=None):
-        if not isinstance(dic, (MutableMapping, NestedMKDict)):
-            return dic
-        return ClassWrapper.__new__(cls)
-
-    def __init__(self, dic, *, sep: str=None, parent: Optional[Any]=None, recursive_to_others: bool=False):
-        if isinstance(dic, NestedMKDict):
-            if sep is None:
-                sep = dic._sep
-            recursive_to_others = not dic._not_recursive_to_others
-            dic = dic._object
-        super().__init__(dic, types=type(dic))
-
-        self._sep = sep
-        self._not_recursive_to_others = not recursive_to_others
-        self._parent = parent
-        if parent:
-            if sep and sep!=parent._sep:
-                raise ValueError(f'Inconsistent separators: {sep} (self) and {parent._sep} (parent)')
-
-            self._sep = parent._sep
-            self._types = parent._types
-            self._not_recursive_to_others = parent._not_recursive_to_others
-
-    @property
-    def _(self):
-        return NestedMKDictAccess(self)
-
-    def parent(self):
-        return self._parent
-
-    def child(self, key):
-        try:
-            ret = self[key]
-        except KeyError:
-            ret = self[key]=self._types()
-            return self._wrap(ret, parent=self)
-
-        if not isinstance(ret, self._wrapper_class):
-            raise KeyError('Child {!s} is not NestedMKDict'.format(key))
-
-        return ret
-
-    def keys(self):
-        return self._object.keys()
-
-    def iterkey(self, key):
-        if isinstance(key, str):
-            if self._sep:
-                yield from key.split(self._sep)
-            else:
-                yield key
-        elif isinstance(key, Sequence):
-            for sk in key:
-                yield from self.iterkey(sk)
-        else:
-            yield key
-
-    def splitkey(self, key):
-        it = self.iterkey(key)
-        try:
-            return next(it), tuple(it)
-        except StopIteration:
-            return None, None
-
-    def get(self, key, *args, **kwargs):
-        if key==():
-            return self
-        key, rest=self.splitkey(key)
-
-        if not rest:
-            ret = self._object.get(key, *args, **kwargs)
-            return self._wrap(ret, parent=self)
-
-        sub = self._wrap(self._object.get(key), parent=self)
-        if sub is None:
-            if args:
-                return args[0]
-            raise KeyError(f"No nested key '{key}'")
-
-        if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
-            raise TypeError(f"Nested value for '{key}' has wrong type")
-
-        return sub.get(rest, *args, **kwargs)
-
-    def __getitem__(self, key):
-        if key==():
-            return self
-        head, rest=self.splitkey(key)
-
-        sub = self._object.__getitem__(head)
-        if not rest:
-            sub = self._wrap(sub, parent=self)
-            return sub
-
-        if sub is None:
-            raise KeyError( f"No nested key '{key}'" )
-
-        sub = self._wrap(sub, parent=self)
-        if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
-            raise TypeError(f"Nested value for '{key}' has wrong type")
-
-        try:
-            return sub[rest]
-        except KeyError as e:
-            raise KeyError(key) from e
-
-
-    def __delitem__(self, key):
-        if key==():
-            raise ValueError('May not delete itself')
-        key, rest=self.splitkey(key)
-
-        sub = self._wrap(self._object.__getitem__(key), parent=self)
-        if not rest:
-            del self._object[key]
-            return
-
-        if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
-            raise TypeError(f"Nested value for '{key}' has wrong type")
-
-        del sub[rest]
-
-    def setdefault(self, key, value):
-        key, rest=self.splitkey(key)
-
-        if not rest:
-            ret=self._object.setdefault(key, value)
-            return self._wrap(ret, parent=self)
-
-        if key in self:
-            sub = self._wrap(self._object.get(key), parent=self)
-        else:
-            sub = self._object[key] = self._types()
-            sub = self._wrap(sub, parent=self)
-            # # cfg._set_parent( self )
-
-        if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
-            raise TypeError(f"Nested value for '{key}' has wrong type")
-
-        return sub.setdefault(rest, value)
-
-    def set(self, key, value):
-        key, rest=self.splitkey(key)
-
-        if not rest:
-            self._object[key] = value
-            return value
-
-        if key in self:
-            sub = self._wrap(self._object.get(key), parent=self)
-        else:
-            sub = self._object[key] = self._types()
-            sub = self._wrap(sub, parent=self)
-            # # cfg._set_parent( self )
-
-        if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
-            raise TypeError(f"Nested value for '{key}' has wrong type")
-
-        return sub.__setitem__(rest, value)
-
-    __setitem__= set
-
-    def __contains__(self, key):
-        if key==():
-            return True
-        key, rest=self.splitkey(key)
-
-        if key not in self._object:
-            return False
-
-        if rest:
-            sub = self._wrap(self._object.get(key), parent=self)
-
-            if self._not_recursive_to_others and not isinstance(sub, NestedMKDict):
-                raise TypeError(f"Nested value for '{key}' has wrong type")
-
-            return rest in sub
-
-        return True
-
-    def keys(self):
-        return self._object.keys()
-
-    def items(self):
-        for k, v in self._object.items():
-            yield k, self._wrap(v, parent=self)
-
-    def values(self):
-        for v in self._object.values():
-            yield self._wrap(v, parent=self)
-
-    def copy(self) -> 'NestedMKDict':
-        return NestedMKDict(self.object.copy(), parent=self._parent, sep=self._sep, recursive_to_others=not self._not_recursive_to_others)
-
-    def deepcopy(self) -> 'NestedMKDict':
-        new = NestedMKDict(self._types(), parent=self._parent, sep=self._sep, recursive_to_others=not self._not_recursive_to_others)
-        for k, v in self.items():
-            k = k,
-            if isinstance(v, self._wrapper_class):
-                new[k] = v.deepcopy()._object
-            else:
-                new[k] = v
-
-        new._sep = self._sep
-
-        return new
-
-    def walkitems(self, startfromkey=(), *, appendstartkey=False, maxdepth=None):
-        v0 = self[startfromkey]
-        k0 = tuple(self.iterkey(startfromkey))
-
-        if maxdepth is None:
-            nextdepth=None
-        else:
-            nextdepth=max(maxdepth-len(k0)-1, 0)
-
-        if maxdepth==0 or not isinstance(v0, self._wrapper_class):
-            if appendstartkey:
-                yield k0, v0
-            else:
-                yield (), v0
-            return
-
-        if not appendstartkey:
-            k0 = ()
-
-        for k, v in v0.items():
-            k = k0+(k,)
-            if isinstance(v, self._wrapper_class):
-                for k1, v1 in v.walkitems(maxdepth=nextdepth):
-                    yield k+k1, v1
-            elif not self._not_recursive_to_others and isinstance(v, MutableMapping):
-                for k1, v1 in v.items():
-                    if isinstance(k1, tuple):
-                        yield k+k1, v1
-                    else:
-                        yield k+(k1,), v1
-            else:
-                yield k, v
-
-    def walkdicts(self):
-        yieldself=True
-        for k, v in self.items():
-            k = k,
-            if isinstance(v, self._wrapper_class):
-                yieldself=False
-                for k1, v1 in v.walkdicts():
-                    yield k+k1, v1
-        if yieldself:
-            yield (), self
-
-    def walkkeys(self, *args, **kwargs):
-        for k, _ in self.walkitems(*args, **kwargs):
-            yield k
-
-    def walkvalues(self, *args, **kwargs):
-        for _, v in self.walkitems(*args, **kwargs):
-            yield v
-
-    def visit(self, visitor, parentkey=()):
-        visitor = MakeNestedMKDictVisitor(visitor)
-
-        if not parentkey:
-            visitor.start(self)
-
-        visitor.enterdict(parentkey, self)
-        for k, v in self.items():
-            key = parentkey + (k,)
-            if isinstance(v, self._wrapper_class):
-                v.visit(visitor, parentkey=key)
-            else:
-                visitor.visit(key, v)
-
-        visitor.exitdict(parentkey, self)
-
-        if not parentkey:
-            visitor.stop(self)
-
-    def update(self, other) -> 'NestedMKDict':
-        for k, v in other.walkitems():
-            self[k] = v
-        return self
-
-    __ior__ = update
-
-    def update_missing(self, other) -> 'NestedMKDict':
-        for k, v in other.walkitems():
-            try:
-                key_already_present = k in self
-            except TypeError:
-                raise TypeError(f'Value for part({k}) is non nestable')
-            else:
-                if key_already_present:
-                    raise TypeError(f'Key {k} already present')
-            self[k] = v
-        return self
-
-    __ixor__ = update_missing
-
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdictaccess.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdictaccess.py
deleted file mode 100644
index b3b3845d32b42dd6d8f098a9c42ce637c94d5b8f..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/nestedmkdictaccess.py
+++ /dev/null
@@ -1,25 +0,0 @@
-class NestedMKDictAccess(object):
-    '''NestedMKDict wrapper. Enables attribute based access to nested dictionaries'''
-    _ = None
-    def __init__(self, dct):
-        self.__dict__['_'] = dct
-
-    def __call__(self, key):
-        return self._.child(key)._
-
-    def __getattr__(self, key):
-        ret = self._[key]
-
-        if isinstance(ret, self._._wrapper_class):
-            return ret._
-
-        return ret
-
-    def __setattr__(self, key, value):
-        self._[key]=value
-
-    def __delattr__(self, key):
-        del self._[key]
-
-    def __dir__(self):
-        return list(self._.keys())
diff --git a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/visitor.py b/subtrees/dagflow/subtrees/dictwrapper/multikeydict/visitor.py
deleted file mode 100644
index 4453be3b63661e6cf4fbf7816ca448f4b433b14a..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/multikeydict/visitor.py
+++ /dev/null
@@ -1,59 +0,0 @@
-class NestedMKDictVisitor(object):
-    def start(self, dct):
-        pass
-
-    def enterdict(self, k, v):
-        pass
-
-    def visit(self, k, v):
-        pass
-
-    def exitdict(self, k, v):
-        pass
-
-    def stop(self, dct):
-        pass
-
-def MakeNestedMKDictVisitor(fcn):
-    if isinstance(fcn, NestedMKDictVisitor):
-        return fcn
-
-    if not callable(fcn):
-        raise TypeError(f'Expect function, got {type(fcn).__name__}')
-
-    ret=NestedMKDictVisitor()
-    ret.visit = fcn
-    return ret
-
-class NestedMKDictVisitorDemostrator(NestedMKDictVisitor):
-    fmt = '{action:7s} {depth!s:>5s} {key!s:<{keylen}s} {vtype!s:<{typelen}s} {value}'
-    opts = dict(keylen=20, typelen=15)
-    def typestring(self, v):
-        return type(v).__name__
-
-    def start(self, d):
-        v = object.__repr__(d.object)
-        print('Start printing dictionary:', v)
-        self._print('Action', 'Key', 'Value', 'Type', depth='Depth')
-
-    def stop(self, _):
-        print('Done printing dictionary')
-
-    def enterdict(self, k, d):
-        d = d.object
-        v = object.__repr__(d)
-        self._print('Enter', k, v, self.typestring(d))
-
-    def exitdict(self, k, d):
-        d = d.object
-        v = object.__repr__(d)
-        self._print('Exit', k, v, self.typestring(d))
-
-    def visit(self, k, v):
-        self._print('Visit', k, v, self.typestring(v))
-
-    def _print(self, action, k, v, vtype, *, depth=None):
-        if depth is None:
-            depth = len(k)
-        print(self.fmt.format(action=action, depth=depth, key=k, vtype=vtype, value=v, **self.opts))
-
diff --git a/subtrees/dagflow/subtrees/dictwrapper/requirements.txt b/subtrees/dagflow/subtrees/dictwrapper/requirements.txt
deleted file mode 100644
index 755c43c771ca6569f968a4c78c66331e92ca2496..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/requirements.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-pytest
-pytest-cov
-coverage
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py
deleted file mode 100644
index f30ef435ebe52d7834f3b5525e557e450804ee8c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_flatmkdict.py
+++ /dev/null
@@ -1,78 +0,0 @@
-from itertools import permutations
-
-from multikeydict.flatmkdict import FlatMKDict
-from pytest import raises
-
-
-def test_getset():
-    flatmkdict = FlatMKDict()
-    safeflatmkdict = FlatMKDict(protect=True)
-    val = "val"
-    val2 = ["val", "lav"]
-    flatmkdict["a", "b", "c"] = val
-    safeflatmkdict["a", "b", "c"] = val
-    for key in permutations(("a", "b", "c")):
-        assert flatmkdict[tuple(key)] == val
-        assert safeflatmkdict[tuple(key)] == val
-    flatmkdict["c", "b", "a"] = val2
-    for key in permutations(("a", "b", "c")):
-        assert flatmkdict[tuple(key)] == val2
-        with raises(AttributeError):
-            safeflatmkdict[tuple(key)] = val2
-    safeflatmkdict._protect = False
-    for key in permutations(("a", "b", "c")):
-        safeflatmkdict[tuple(key)] = val
-
-
-def test_slice_filter():
-    flatmkdict = FlatMKDict()
-    flatmkdict["a", "b"] = 1
-    flatmkdict["a", "b", "c"] = 2
-    flatmkdict["a", "c", "d", "b"] = 3
-    assert all(
-        len(tuple(x)) == 3
-        for x in (flatmkdict.items(), flatmkdict.items("a"), flatmkdict.items("a", "b"))
-    )
-    assert len(tuple(flatmkdict.items("a", "b", "c"))) == 2
-    assert len(tuple(flatmkdict.items("a", "b", "d", "c"))) == 1
-    assert isinstance(flatmkdict.slice("a"), FlatMKDict)
-    assert all(
-        x == flatmkdict
-        for x in (
-            flatmkdict.slice("a"),
-            flatmkdict.slice("a", "b"),
-            flatmkdict.slice(
-                filterkey=lambda key: all(elem in "abcd" for elem in key)
-            ),
-            flatmkdict.slice(filterkeyelem=lambda key: key in "abcd")
-        )
-    )
-    assert flatmkdict.slice("a", "b", "c") == {
-        ("a", "b", "c"): 2,
-        ("a", "b", "c", "d"): 3,
-    }
-    assert flatmkdict.slice("a", "b", "c", "d") == {
-        ("a", "b", "c", "d"): 3,
-    }
-    assert flatmkdict.slice(
-        filterkey=lambda key: all(elem != "d" for elem in key)
-    ) == {
-        ("a", "b", "c"): 2,
-        ("a", "b"): 1,
-    }
-
-def test_merge():
-    fd = FlatMKDict()
-    fdsub = FlatMKDict()
-    fdsub['d', 'e', 'f'] = 3
-    fdsub['d', 'e', 'g'] = 4
-
-    fd['a', 'b', 'c1'] = 1
-    fd['a', 'b', 'c2'] = 2
-    fd['a', 'b', 'c4'] = fdsub
-
-    assert fd['a', 'b', 'c1'] == 1
-    assert fd['a', 'b', 'c2'] == 2
-    fd['a', 'b', 'c4', 'd', 'e', 'f'] = 3
-    fd['a', 'b', 'c4', 'd', 'e', 'g'] = 4
-
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py
deleted file mode 100644
index e0f8eae4f682615b5a5aae799fbc873c44fb4b98..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict.py
+++ /dev/null
@@ -1,336 +0,0 @@
-from multikeydict.nestedmkdict import NestedMKDict
-import pytest
-
-def test_nestedmkdict_01():
-    dw = NestedMKDict({})
-
-    assert not dw
-    assert len(dw)==0
-
-def test_nestedmkdict_02():
-    dw = NestedMKDict(dict(a=1))
-
-    assert dw
-    assert len(dw)==1
-
-def test_nestedmkdict_03():
-    d = dict(a=1, b=2, c=3)
-    dw = NestedMKDict(d)
-
-    assert dw.get('a')==1
-    assert dw.get('b')==2
-    assert dw.get('c')==3
-    assert dw.get('d')==None
-    assert dw.get('d.e')==None
-
-    assert tuple(dw.keys())==('a','b','c')
-
-@pytest.mark.parametrize('sep', [None, '.'])
-def test_nestedmkdict_04(sep):
-    dct = dict(a=1, b=2, c=3, d=dict(e=4), f=dict(g=dict(h=5)))
-    dct['z.z.z'] = 0
-    print(dct)
-    dw = NestedMKDict(dct, sep=sep)
-
-    #
-    # Test self access
-    #
-    assert dw.get(()).object is dct
-    assert dw[()].object is dct
-
-    #
-    # Test wrapping
-    #
-    assert isinstance(dw.get('d'), NestedMKDict)
-    assert isinstance(dw.get(('f', 'g')), NestedMKDict)
-
-    #
-    # Test get tuple
-    #
-    assert dw.get(('d', 'e'))==4
-    assert dw.get(('d', 'e1')) is None
-    assert dw.get(('f', 'g', 'h'))==5
-    try:
-        dw.get(('z', 'z', 'z'))
-        assert False
-    except KeyError:
-        pass
-
-    #
-    # Test getitem tuple
-    #
-    assert dw[('d', 'e')]==4
-    try:
-        dw[('d', 'e1')]
-        assert False
-    except KeyError:
-        pass
-    assert dw[('f', 'g', 'h')]==5
-
-    try:
-        dw[('z', 'z', 'z')]
-        assert False
-    except KeyError:
-        pass
-
-    #
-    # Test get sep
-    #
-    if sep:
-        assert dw.get('d.e')==4
-    else:
-        assert dw.get('d.e') is None
-
-    if sep:
-        try:
-            dw.get('z.z.z')
-            assert False
-        except KeyError:
-            pass
-    else:
-        assert dw.get('z.z.z')==0
-
-    #
-    # Test getitem sep
-    #
-    try:
-        assert dw['d.e']==4
-        assert sep is not None
-    except KeyError:
-        pass
-
-    try:
-        assert dw['f.g.h']==5
-        assert dw[('f.g', 'h')]==5
-        assert sep is not None
-    except KeyError:
-        pass
-
-    if sep:
-        try:
-            dw['z.z.z']
-            assert False
-        except KeyError:
-            pass
-    else:
-        assert dw['z.z.z']==0
-
-    #
-    # Test contains
-    #
-    assert 'a' in dw
-    assert not 'a1' in dw
-    assert 'd' in dw
-
-    #
-    # Test contains tuple
-    #
-    assert ('d', 'e') in dw
-    assert not ('k', 'e') in dw
-    assert ('f', 'g', 'h') in dw
-    assert ('f.g.h' in dw) == bool(sep)
-    assert ('z.z.z' in dw) == bool(not sep)
-
-    #
-    # Test parents
-    #
-    g = dw.get(('f', 'g'))
-    assert g.parent().parent() is dw
-
-    #
-    # Test children
-    #
-    m=dw.child(('k', 'l', 'm'))
-    assert dw.get(('k', 'l', 'm')).object is m.object
-
-    #
-    # Test recursive setitem
-    #
-    dw[('k', 'l', 'm', 'n')] = 5
-    try:
-        dw.child(tuple('klmn'))
-        assert False
-    except KeyError:
-        pass
-    assert dw.get(('k', 'l', 'm', 'n')) == 5
-
-    dw[('o.l.m.n')] = 6
-    assert dw['o.l.m.n'] == 6
-    if not sep:
-        assert dw.object['o.l.m.n'] == 6
-
-    #
-    # Test attribute access
-    #
-    assert dw._.a==1
-    assert dw._.b==2
-    assert dw._.c==3
-    assert dw._.d.e==4
-    assert dw._.f.g.h==5
-
-    dw._.f.g.h=6
-    assert dw._.f.g.h==6
-    assert dw._._ is dw
-
-def test_nestedmkdict_06_inheritance():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5, i=6)))])
-    dct['z.z.z'] = 0
-
-    class NestedMKDictA(NestedMKDict):
-        def count(self):
-            return len(tuple(self.walkitems()))
-
-        def depth(self):
-            return max([len(k) for k in self.walkkeys()])
-
-    dw = NestedMKDictA(dct, sep='.')
-    assert dw.count()==7
-    assert dw['d'].count()==1
-    assert dw['f'].count()==2
-    assert dw['f.g'].count()==2
-    assert dw._.f._.count()==2
-
-    assert dw.depth()==3
-    assert dw['d'].depth()==1
-    assert dw['f'].depth()==2
-
-def test_nestedmkdict_07_delete():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
-    dct['z.z.z'] = 0
-    dw = NestedMKDict(dct)
-
-    assert 'a' in dw
-    del dw['a']
-    assert 'a' not in dw
-
-    assert ('d', 'e') in dw
-    del dw[('d', 'e')]
-    assert ('d', 'e') not in dw
-
-    assert ('f', 'g', 'h') in dw
-    del dw._.f.g.h
-    assert ('f', 'g', 'h') not in dw
-    assert ('f', 'g') in dw
-
-def test_nestedmkdict_08_create():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
-    dct['z.z.z'] = 0
-    dw = NestedMKDict(dct, sep='.')
-
-    dw._('i.k').l=3
-    assert dw._.i.k.l==3
-
-    child = dw.child('child')
-    assert dw['child'].object=={}
-
-def test_nestedmkdict_09_dictcopy():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
-    dct['z'] = {}
-    dw = NestedMKDict(dct, sep='.')
-
-    dw1 = dw.deepcopy()
-    for i, (k, v) in enumerate(dw1.walkdicts()):
-        # print(i, k)
-        assert k in dw
-        assert v._object==dw[k]._object
-        assert v._object is not dw[k]._object
-        assert type(v._object) is type(dw[k]._object)
-    assert i==2
-
-def test_nestedmkdict_09_walkitems():
-    dct = {
-        'a': 1,
-        'b': 2,
-        'c': 3,
-        'c1': {
-            'i': {
-                'j': {
-                    'k': {
-                        'l': 6
-                    }
-                }
-            }
-        },
-        'd': {
-            'e': 4
-        },
-        'f': {
-            'g': {
-                'h': 5
-            }
-        }
-    }
-    dct['z'] = {}
-    dw = NestedMKDict(dct, sep='.')
-
-    imaxlist=[5, 0, 6, 5, 5, 5, 5, 5, 5]
-    for imax, maxdepth in zip(imaxlist, [None]+list(range(len(imaxlist)))):
-        i=0
-        print(f'{imax=}, {maxdepth=}')
-        maxk = -1
-        for i, (k, v) in enumerate(dw.walkitems(maxdepth=maxdepth)):
-            print(i, k, v)
-            assert maxdepth is None or len(k)<=maxdepth
-            maxk=max(maxk, len(k))
-        print(f'{maxk=}')
-        print()
-        assert i==imax
-
-def test_nestedmkdict_09_walk():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
-    dw = NestedMKDict(dct)
-
-    keys0 = [ ('a',), ('b', ), ('c',), ('d', 'e'), ('f', 'g', 'h') ]
-    keys = [k for k, v in dw.walkitems()]
-    assert keys==keys0
-
-    assert [(k,v) for k, v in dw.walkitems('a', appendstartkey=True)] == [(('a',), 1)]
-    assert [(k,v) for k, v in dw.walkitems('a', appendstartkey=False)] == [((), 1)]
-    assert [(k,v) for k, v in dw.walkitems('d', appendstartkey=True)] == [(('d','e'), 4)]
-    assert [(k,v) for k, v in dw.walkitems('d', appendstartkey=False)] == [(('e',), 4)]
-    assert [(k,v) for k, v in dw.walkitems(('f','g'), appendstartkey=True)] == [(('f','g', 'h'), 5)]
-    assert [(k,v) for k, v in dw.walkitems(('f','g'), appendstartkey=False)] == [(('h',), 5)]
-
-def test_nestedmkdict_10_iterkey():
-    d = dict(a=1, b=2, c=3)
-    dw = NestedMKDict(d)
-
-    assert ['a']==list(dw.iterkey('a'))
-    assert ['a.b']==list(dw.iterkey('a.b'))
-    assert ['a', 'b']==list(dw.iterkey(('a', 'b')))
-    assert [1]==list(dw.iterkey(1))
-    assert [1.0]==list(dw.iterkey(1.0))
-
-def test_nestedmkdict_11_iterkey():
-    d = dict(a=1, b=2, c=3)
-    dw = NestedMKDict(d,  sep='.')
-
-    assert ['a']==list(dw.iterkey('a'))
-    assert ['a', 'b']==list(dw.iterkey('a.b'))
-    assert ['a', 'b']==list(dw.iterkey(('a', 'b')))
-    assert [1]==list(dw.iterkey(1))
-    assert [1.0]==list(dw.iterkey(1.0))
-
-def test_nestedmkdict_setdefault_01():
-    d = dict(a=dict(b=dict(key='value')))
-    dw = NestedMKDict(d)
-
-    newdict = dict(newkey='newvalue')
-
-    sd1 = dw.setdefault(('a','b'), newdict)
-    assert isinstance(sd1, NestedMKDict)
-    assert sd1._object==d['a']['b']
-
-    sd2 = dw.setdefault(('a','c'), newdict)
-    assert isinstance(sd2, NestedMKDict)
-    assert sd2._object==newdict
-
-def test_nestedmkdict_eq_01():
-    d = dict(a=dict(b=dict(key='value')))
-    dw = NestedMKDict(d)
-
-    assert dw['a']==d['a']
-    assert d['a']==dw['a']
-    assert dw['a']!=d
-    assert dw['a']==dw['a']
-    assert dw['a'] is not dw['a']
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatmkdict.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatmkdict.py
deleted file mode 100644
index f919c529e32abd0f4a2074dbd3b9892b19d4a192..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatmkdict.py
+++ /dev/null
@@ -1,99 +0,0 @@
-from multikeydict.nestedmkdict import NestedMKDict
-from multikeydict.flatmkdict import FlatMKDict
-from pytest import raises
-
-def test_nestedmkdict_flatmkdict():
-    flatmkdict = FlatMKDict({
-        ('a1', 'b1', 'c1'): 'v1',
-        ('a2', 'b2', 'c2'): 'v2',
-        })
-    dct = {'root': {
-        'subfolder1': {
-            'key1': 'value1',
-            'key2': 'value2'
-        },
-        'subfolder2': {
-            'key1': 'value1',
-            'key2': 'value2',
-            'st': flatmkdict
-        },
-        'key0': 'value0'
-    }}
-    dw = NestedMKDict(dct, recursive_to_others=True)
-    dws = NestedMKDict(dct, sep='.', recursive_to_others=True)
-    dwerror = NestedMKDict(dct, recursive_to_others=False)
-
-    objects = (dw, dws, dwerror)
-    objectsok = (dw, dws)
-
-    assert flatmkdict['a1', 'b1', 'c1']=='v1'
-    assert flatmkdict['b1', 'a1', 'c1']=='v1'
-    assert flatmkdict['c1', 'a1', 'b1']=='v1'
-
-    for obj in objects:
-        st1 = obj['root', 'subfolder2', 'st']
-        assert st1 is flatmkdict
-
-    for obj in objectsok:
-        assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
-
-        assert ('root', 'subfolder2', 'st', 'a1', 'b1', 'c1') in obj
-        assert ('root', 'subfolder2', 'st', 'b1', 'a1', 'c1') in obj
-        assert ('root', 'subfolder2', 'st', 'c1', 'a1', 'b1') in obj
-        assert ('root', 'subfolder2', 'st', 'a2', 'b2', 'c2') in obj
-        assert ('root', 'subfolder2', 'st', 'b2', 'a2', 'c2') in obj
-        assert ('root', 'subfolder2', 'st', 'c2', 'a2', 'b2') in obj
-
-    assert dws['root.subfolder2.st.a1.b1.c1']=='v1'
-    assert dws['root.subfolder2.st.b1.a1.c1']=='v1'
-    assert dws['root.subfolder2.st.c1.a1.b1']=='v1'
-    assert dws['root.subfolder2.st.a2.b2.c2']=='v2'
-    assert dws['root.subfolder2.st.b2.a2.c2']=='v2'
-    assert dws['root.subfolder2.st.c2.a2.b2']=='v2'
-
-    assert 'root.subfolder2.st.a1.b1.c1' in dws
-    assert 'root.subfolder2.st.b1.a1.c1' in dws
-    assert 'root.subfolder2.st.c1.a1.b1' in dws
-    assert 'root.subfolder2.st.a2.b2.c2' in dws
-    assert 'root.subfolder2.st.b2.a2.c2' in dws
-    assert 'root.subfolder2.st.c2.a2.b2' in dws
-
-    assert 'root.subfolder3.st.c2.a2.b2' not in dws
-    assert 'root.subfolder2.st.c3.a2.b2' not in dws
-
-    with raises(KeyError):
-        dws['root.subfolder2.st.a1.b2.c1']
-
-    with raises(KeyError):
-        dws['root.subfolder1.st.a1.b1.c1']
-
-    with raises(TypeError):
-        dwerror['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']
-
-    with raises(TypeError):
-        dwerror.get(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'))
-
-    with raises(TypeError):
-        dwerror.get(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'), 'default')
-
-    with raises(TypeError):
-        del dwerror['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']
-
-    with raises(TypeError):
-        dwerror.setdefault(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'), 'default')
-
-    with raises(TypeError):
-        dwerror.set(('root', 'subfolder2', 'st', 'a1', 'b1', 'c1'), 'default')
-
-    with raises(TypeError):
-        ('root', 'subfolder2', 'st', 'a1', 'b1', 'c1') in dwerror
-
-    # Walks
-    for k, v in dw.walkitems():
-        print(k, v)
-
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatten.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatten.py
deleted file mode 100644
index d9a6ded7afa6ae9a1efe87a94a876b68af6785f6..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_flatten.py
+++ /dev/null
@@ -1,141 +0,0 @@
-from multikeydict.nestedmkdict import NestedMKDict
-from multikeydict.flatten import flatten, _select
-from pytest import raises
-
-from pprint import pprint
-
-def test__select():
-    a, b = _select(tuple('abcd'), set('cd'))
-    assert a==tuple('ab')
-    assert b==tuple('cd')
-
-    a, b = _select(tuple('abcd'), set('bd'))
-    assert a==tuple('abc')
-    assert b==tuple('d')
-
-    a, b = _select(tuple('abcd'), set('ab'))
-    assert a==tuple('abcd')
-    assert b==tuple()
-
-    a, b = _select(tuple('abcd'), set('ef'))
-    assert a==tuple('abcd')
-    assert b==tuple()
-
-
-def test_nestedmkdict_flatten_v01():
-    dct = {'root': {
-        'subfolder1': {
-            'key1': 'value1',
-            'key2': 'value2'
-        },
-        'subfolder2': {
-            'key1': 'value1',
-            'key2': 'value2',
-            'st': {
-                'a1': {
-                       'b1': {
-                           'c1': 'v1'
-                           }
-                       },
-                'a2': {
-                       'b2': {
-                           'c2': 'v2'
-                           }
-                       },
-                }
-        },
-        'key0': 'value0'
-    }}
-    dw = NestedMKDict(dct, recursive_to_others=True)
-    dws = NestedMKDict(dct, sep='.', recursive_to_others=True)
-
-    dwf = flatten(dw, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
-    dwsf = flatten(dws, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
-
-    for obj in (dwf, dwsf):
-        assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
-
-def test_nestedmkdict_flatten_v02():
-    dct = {'root': {
-        'subfolder1': {
-            'key1': 'value1',
-            'key2': 'value2'
-        },
-        'subfolder2': {
-            'key1': 'value1',
-            'key2': 'value2',
-            'st': {
-                'a1': {
-                       'b1': {
-                           'd1': 'extra',
-                           'c1': 'v1'
-                           }
-                       },
-                'a2': {
-                       'b2': {
-                           'c2': 'v2'
-                           }
-                       },
-                }
-        },
-        'key0': 'value0'
-    }}
-    dw = NestedMKDict(dct, recursive_to_others=True)
-
-    with raises(KeyError):
-        dwf = flatten(dw, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
-    # import IPython; IPython.embed(colors='neutral')
-    # for obj in (dwf,):
-    #     assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
-    #     assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
-    #     assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
-    #     assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
-    #     assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
-    #     assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
-    #     # FlatDict is unable to pass keys
-    #     # assert obj['root', 'subfolder2', 'st', 'd1', 'a2', 'b2']=='extra'
-
-def test_nestedmkdict_flatten_v03():
-    dct = {'root': {
-        'subfolder1': {
-            'key1': 'value1',
-            'key2': 'value2'
-        },
-        'subfolder2': {
-            'key1': 'value1',
-            'key2': 'value2',
-            'st': {
-                'a1': {
-                       'b1': {
-                           'c1': 'v1'
-                           }
-                       },
-                'a2': {
-                       'b2': {
-                           'c2': 'v2',
-                           'd1': 'extra'
-                           }
-                       },
-                }
-        },
-        'key0': 'value0'
-    }}
-    dw = NestedMKDict(dct, recursive_to_others=True)
-    dwf = flatten(dw, ('a1', 'b1', 'c1', 'a2', 'b2', 'c2'))
-    # TODO: this test is insconsistent with test_nestedmkdict_flatten_v02
-    # It does the same, but in different order.
-    pprint(dwf.object)
-    for obj in (dwf,):
-        assert obj['root', 'subfolder2', 'st', 'a1', 'b1', 'c1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'b1', 'a1', 'c1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'c1', 'a1', 'b1']=='v1'
-        assert obj['root', 'subfolder2', 'st', 'a2', 'b2', 'c2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'b2', 'a2', 'c2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'c2', 'a2', 'b2']=='v2'
-        assert obj['root', 'subfolder2', 'st', 'd1', 'a2', 'b2']=='extra'
-
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_update.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_update.py
deleted file mode 100644
index 2886fc8cd54112ff312a79489b2eefb6c7e67f1c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_nestedmkdict_update.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from multikeydict.nestedmkdict import NestedMKDict
-
-from pytest import raises
-
-def test_nestedmkdict_update_01():
-    dct1 = {
-        'a': 1,
-        'b': 2,
-        'c': 3,
-        'd': {
-            'da': 4
-        },
-        'e': {
-            'ea': {
-                'eaa': 5
-            }
-        }
-    }
-    dct2 = {
-        'c': 4,
-        'd': {
-            'da': 5,
-            'db': 6
-        },
-        'i': {
-            'ia': 7
-            }
-    }
-    dct3 = {
-        'd': {
-            'da': {
-                'daa': 1
-                }
-        }
-    }
-    dct4 = {
-        'd': 6
-    }
-    dct1_u2 = {
-            'a': 1,
-            'b': 2,
-            'c': 4,
-            'd': {
-                'da': 5,
-                'db': 6
-                },
-            'e': {
-                'ea': {
-                    'eaa': 5
-                    }
-                },
-            'i': {
-                'ia': 7
-                }
-            }
-    dw1a = NestedMKDict(dct1)
-    dw2 = NestedMKDict(dct2)
-    dw3 = NestedMKDict(dct3)
-    dw4 = NestedMKDict(dct3)
-
-    dw1 = dw1a.deepcopy()
-    dw1.update(dw2)
-    assert dw1==dct1_u2
-
-    dw1 = dw1a.deepcopy()
-    dw1|=dw2
-    assert dw1==dct1_u2
-
-    dw1 = dw1a.deepcopy()
-    with raises(TypeError):
-        dw1|=dw3
-
-    dw1 = dw1a.deepcopy()
-    with raises(TypeError):
-        dw1^=dw3
-
-    dw1 = dw1a.deepcopy()
-    with raises(TypeError):
-        dw1^=dw2
-
-    dw1 = dw1a.deepcopy()
-    with raises(TypeError):
-        dw1|=dw4
-
-    dw1 = dw1a.deepcopy()
-    with raises(TypeError):
-        dw1^=dw4
diff --git a/subtrees/dagflow/subtrees/dictwrapper/test/test_visitor.py b/subtrees/dagflow/subtrees/dictwrapper/test/test_visitor.py
deleted file mode 100644
index d135679fb003bfeeff57d548a94b16edf8243ae1..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/dictwrapper/test/test_visitor.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from multikeydict.nestedmkdict import NestedMKDict
-from multikeydict.visitor import NestedMKDictVisitorDemostrator
-
-def test_nestedmkdict_04_visitor():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
-    dct['z.z.z'] = 0
-    dw = NestedMKDict(dct)
-
-    keys0 = (('a',) , ('b',) , ('c',) , ('d', 'e'), ('f', 'g', 'h'), ('z.z.z', ))
-    values0 = (1, 2, 3, 4, 5, 0)
-
-    keys = tuple(dw.walkkeys())
-    values = tuple(dw.walkvalues())
-    assert keys==keys0
-    assert values==values0
-
-    class Visitor(object):
-        keys, values = (), ()
-        def __call__(self, k, v):
-            self.keys+=k,
-            self.values+=v,
-    v = Visitor()
-    dw.visit(v)
-    assert v.keys==keys0
-    assert v.values==values0
-
-def test_nestedmkdict_05_visitor():
-    dct = dict([('a', 1), ('b', 2), ('c', 3), ('d', dict(e=4)), ('f', dict(g=dict(h=5)))])
-    dct['z.z.z'] = 0
-    dw = NestedMKDict(dct)
-
-    dw.visit(NestedMKDictVisitorDemostrator())
diff --git a/subtrees/dagflow/subtrees/gindex/.coveragerc b/subtrees/dagflow/subtrees/gindex/.coveragerc
deleted file mode 100644
index 5aa4368367438010357e7a4f75d0bfacbf8bd6d3..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/.coveragerc
+++ /dev/null
@@ -1,6 +0,0 @@
-[run]
-source = .
-omit = ./src/*,*__init__.py
-
-[report]
-omit = ./src/*,*__init__.py
diff --git a/subtrees/dagflow/subtrees/gindex/.gitignore b/subtrees/dagflow/subtrees/gindex/.gitignore
deleted file mode 100755
index 2d8d7bb48dcc2284f5630b17b0a8f59de5b853d3..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/.gitignore
+++ /dev/null
@@ -1,51 +0,0 @@
-build
-__pycache__
-
-# Local configuration files and folders
-config_local
-.local/
-.vscode
-.direnv/
-.envrc
-.fish_functions
-matplotlibrc
-.coverage
-cov.*
-.hypothesis
-
-# Transient files (vim, etc)
-*~
-*.swp
-\#*
-.\#*
-.cache
-.lark_cache*
-.lark-cache*
-*.bak
-*.backup
-
-# vim
-UltiSnips/*
-.viminfo
-.vimrc
-.nvimrc
-*.vim
-.ycm_extra_conf.py
-
-# Latex
-*.aux
-*.pda
-*.toc
-*.log
-*.fdb*
-*.out
-*.pdf
-*.blg
-*.snm
-*.nav
-# code
-
-# Code
-tags
-*.pyc
-*.o
diff --git a/subtrees/dagflow/subtrees/gindex/.gitlab-ci.yml b/subtrees/dagflow/subtrees/gindex/.gitlab-ci.yml
deleted file mode 100755
index 9aa970619ee2e9d67144eca4ce7190ca049d59d9..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/.gitlab-ci.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-stages:
-    - tests
-
-tests:
-    image: git.jinr.ru:5005/gna/gna-base-docker-image:gparser
-    stage: tests
-
-    script:
-    - export PYTHONPATH="$PYTHONPATH:/builds/gna:/builds/gna/gassembler:/builds/gna/gassembler/src"
-    - python3 -m pip install -r requirements.txt
-    - coverage run --source=gindex -m pytest
-    - coverage report
-    - coverage xml
-    coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
-    artifacts:
-        reports:
-            coverage_report:
-                coverage_format: cobertura
-                path: coverage.xml
-    only:
-        - main
-        - merge_requests
diff --git a/subtrees/dagflow/subtrees/gindex/README.md b/subtrees/dagflow/subtrees/gindex/README.md
deleted file mode 100644
index e960e60197946d56c17a71898a850f9233a3077e..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# GAssembler
-
-[![python](https://img.shields.io/badge/python-3.8.5-blue.svg)](https://www.python.org/)
-[![pipeline](https://git.jinr.ru/gna/gassembler/badges/main/pipeline.svg)](https://git.jinr.ru/gna/gassembler/commits/main)
-[![coverage report](https://git.jinr.ru/gna/gassembler/badges/main/coverage.svg)](https://git.jinr.ru/gna/gassembler/-/commits/main)
-<!--- Uncomment here after adding docs!
-[![pages](https://img.shields.io/badge/pages-link-white.svg)](http://gna.pages.jinr.ru/gassembler)
--->
-
-The GNA parser and backend integration project
diff --git a/subtrees/dagflow/subtrees/gindex/conftest.py b/subtrees/dagflow/subtrees/gindex/conftest.py
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/subtrees/dagflow/subtrees/gindex/gindex/__init__.py b/subtrees/dagflow/subtrees/gindex/gindex/__init__.py
deleted file mode 100644
index 751de5f1dd65c7ba956521fb8888a88a80bd6755..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/gindex/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from .gindex import GIndex, GIndexInstance, GIndexName
-from .gnindex import GNIndex, GNIndexInstance
diff --git a/subtrees/dagflow/subtrees/gindex/gindex/gindex.py b/subtrees/dagflow/subtrees/gindex/gindex/gindex.py
deleted file mode 100644
index af2f44d12366871bf22c57877af9439af76bac0a..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/gindex/gindex.py
+++ /dev/null
@@ -1,425 +0,0 @@
-from __future__ import annotations
-
-from typing import Iterator, Literal, Optional, Sequence, Tuple
-from warnings import warn
-
-from attr import define, field
-from attr.validators import instance_of
-
-
-@define(hash=True, slots=True)
-class GIndexName:
-    """
-    The index name class.
-    Contains two fields: `s` ( or `short`) and `f` (or `full`).
-    If `full` is not given, sets `full=short`.
-    The fields are validated by `attr.validators` on the `str` type.
-    """
-
-    short: str = field(validator=instance_of(str))
-    full: str = field(validator=instance_of(str), default="")
-
-    @property
-    def s(self):
-        return self.short
-
-    @s.setter
-    def s(self, val):
-        self.short = val
-
-    @s.deleter
-    def s(self):
-        del self.short
-
-    @property
-    def f(self):
-        return self.full
-
-    @f.setter
-    def f(self, val):
-        self.full = val
-
-    @f.deleter
-    def f(self):
-        del self.full
-
-    def tuple(self) -> Tuple[str, str]:
-        return (self.short, self.full)
-
-    def __attrs_post_init__(self) -> None:
-        if not self.full:
-            self.full = self.short
-
-    def copy(self, deep: bool = False) -> GIndexName:
-        return (
-            GIndexName(
-                str(self.short),
-                str(self.full),
-            )
-            if deep
-            else GIndexName(
-                self.short,
-                self.full,
-            )
-        )
-
-    def copywith(self, **kwargs) -> GIndexName:
-        """Returns a copy of the object with updated fields from `kwargs`"""
-        return (
-            GIndexName(
-                short=kwargs.pop("short", self.short),
-                full=kwargs.pop("full", self.full),
-            )
-            if kwargs.pop("deep", False)
-            else GIndexName(
-                short=kwargs.pop("short", str(self.short)),
-                full=kwargs.pop("full", str(self.full)),
-            )
-        )
-
-    def __iter__(self) -> Iterator[str]:
-        yield from self.tuple()
-
-    def __getitem__(self, key: str) -> str:
-        if key in {"s", "short"}:
-            return self.short
-        elif key in {"f", "full"}:
-            return self.full
-        else:
-            raise ValueError(
-                "'key' must be in ('s', 'f', 'short', 'full'), "
-                f"but given {key}!"
-            )
-
-    def dict(self) -> dict:
-        return {"short": self.short, "full": self.full}
-
-    def __str__(self) -> str:
-        return "{" + f"short: {self.short}, full: {self.full}" + "}"
-
-    def __repr__(self) -> str:
-        return self.__str__()
-
-
-def namemode_validator(instance, attribute, value):
-    if value not in {"s", "f", "short", "full"}:
-        raise ValueError(
-            "'namemode' must be in ('s', 'f', 'short', 'full'), "
-            f"but given {value}!"
-        )
-
-
-@define(hash=True, slots=True)
-class GIndexInstance:
-    """
-    The index instance class, storing a single `value` (`type=str`)
-    and `name` (`type=GIndexName`).
-    Contains `format` method, which substitutes `value` instead of `name.short`
-    and `name.full`.
-    """
-
-    name: GIndexName = field(validator=instance_of(GIndexName))
-    value: str = field(validator=instance_of(str))
-    sep: str = field(validator=instance_of(str), default="_")
-    withname: bool = field(validator=instance_of(bool), default=False)
-    namemode: Literal["s", "f", "short", "full"] = field(
-        validator=namemode_validator, default="s"
-    )
-    namesep: str = field(validator=instance_of(str), default="")
-    _fmtstr: str = field(init=False, default="{sep}{indexname}{namesep}")
-
-    def __attrs_post_init__(self) -> None:
-        self._fmtstr += self.value
-        if not self.withname and self.namesep:
-            warn(
-                "'namesep' is not used without 'withname=True'",
-                RuntimeWarning,
-            )
-
-    def format(
-        self,
-        string: str,
-        place: Optional[str] = None,
-    ) -> str:
-        """
-        Formatting `string` with index `value`,
-        using `name.short` and `name.full`
-        """
-        if not isinstance(string, str):
-            raise TypeError(
-                f"'string' must be 'str', but given {type(string)}!"
-            )
-        elif not string:
-            return string
-        return self._format(string=string, place=place)
-
-    def _format(
-        self,
-        string: str,
-        place: Optional[str],
-    ) -> str:
-        formatted = self.formatted()
-        fmtdict = (
-            {place: formatted}
-            if place
-            else {self.name.s: formatted, self.name.f: formatted}
-        )
-        return string.format(**fmtdict)
-
-    def formatwith(
-        self,
-        string: str,
-        withname: bool = False,
-        namemode: Literal["s", "f", "short", "full"] = "s",
-        sep: Optional[str] = None,
-        namesep: Optional[str] = None,
-        place: Optional[str] = None,
-    ) -> str:
-        """
-        Formatting `string` with index `value`,
-        using `name.short` and `name.full`
-        """
-        if not isinstance(string, str):
-            raise TypeError(
-                f"'string' must be 'str', but given {type(string)}!"
-            )
-        elif not string:
-            return string
-        return self._formatwith(
-            string=string,
-            withname=withname,
-            namemode=namemode,
-            sep=sep,
-            namesep=namesep,
-            place=place,
-        )
-
-    def _formatwith(
-        self,
-        string: str,
-        withname: bool,
-        namemode: Literal["s", "f", "short", "full"],
-        sep: Optional[str],
-        namesep: Optional[str],
-        place: Optional[str],
-    ) -> str:
-        formatted = self.formattedwith(
-            withname=withname, namemode=namemode, sep=sep, namesep=namesep
-        )
-        fmtdict = (
-            {place: formatted}
-            if place
-            else {self.name.s: formatted, self.name.f: formatted}
-        )
-        return string.format(**fmtdict)
-
-    def formatted(self) -> str:
-        """
-        Formatted index with default options
-        """
-        if self.withname:
-            indexname = (
-                self.name.s if self.namemode in ("s", "short") else self.name.f
-            )
-            namesep = self.namesep
-        else:
-            indexname = ""
-            namesep = ""
-        return self._fmtstr.format(
-            sep=self.sep, indexname=indexname, namesep=namesep
-        )
-
-    def formattedwith(
-        self,
-        withname: bool = False,
-        namemode: Literal["s", "f", "short", "full"] = "s",
-        sep: Optional[str] = None,
-        namesep: Optional[str] = None,
-    ) -> str:
-        """
-        Formatted index with custom options
-        """
-        if sep is None:
-            sep = self.sep
-        elif not isinstance(sep, str):
-            raise TypeError(f"'sep' must be 'str', but given {type(sep)}!")
-        if withname:
-            if namemode in {"s", "short"}:
-                indexname = self.name.s
-            elif namemode in {"f", "full"}:
-                indexname = self.name.f
-            else:
-                raise ValueError(
-                    "'namemode' must be in ('s', 'f', 'short', 'full'), "
-                    f"but given '{namemode}'!"
-                )
-            if namesep is None:
-                namesep = sep
-            elif not isinstance(namesep, str):
-                raise TypeError(
-                    f"'namesep' must be 'str', but given {type(sep)}!"
-                )
-        else:
-            indexname = ""
-            if namesep:
-                warn(
-                    "'namesep' is not used without 'withname=True'",
-                    RuntimeWarning,
-                )
-            namesep = ""
-        return self._fmtstr.format(
-            sep=sep, indexname=indexname, namesep=namesep
-        )
-
-    def copy(self, deep: bool = False) -> GIndexInstance:
-        """Returns a copy of the object"""
-        return (
-            GIndexInstance(
-                name=self.name.copy(),
-                value=str(self.value),
-                sep=str(self.sep),
-                withname=bool(self.withname),
-                namemode=str(self.namemode),  # type:ignore
-                namesep=str(self.namesep),
-            )
-            if deep
-            else GIndexInstance(
-                name=self.name,
-                value=self.value,
-                sep=self.sep,
-                withname=self.withname,
-                namemode=self.namemode,
-                namesep=self.namesep,
-            )
-        )
-
-    def copywith(self, **kwargs) -> GIndexInstance:
-        """Returns a copy of the object with updated fields from `kwargs`"""
-        return (
-            GIndexInstance(
-                name=kwargs.pop("name", self.name.copy()),
-                value=kwargs.pop("value", str(self.value)),
-                sep=kwargs.pop("sep", str(self.sep)),
-                withname=kwargs.pop("withname", bool(self.withname)),
-                namemode=kwargs.pop("namemode", str(self.namemode)),
-                namesep=kwargs.pop("namesep", str(self.namesep)),
-            )
-            if kwargs.pop("deep", True)
-            else GIndexInstance(
-                name=kwargs.pop("name", self.name),
-                value=kwargs.pop("value", self.value),
-                sep=kwargs.pop("sep", self.sep),
-                withname=kwargs.pop("withname", self.withname),
-                namemode=kwargs.pop("namemode", self.namemode),
-                namesep=kwargs.pop("namesep", self.namesep),
-            )
-        )
-
-
-@define(hash=True, slots=True)
-class GIndex:
-    """
-    The index class, storing the `values`, `name` and usefull methods
-    """
-
-    name: GIndexName = field(validator=instance_of(GIndexName))
-    values: tuple = field(default=tuple())
-    sep: str = field(validator=instance_of(str), default="_")
-    withname: bool = field(validator=instance_of(bool), default=False)
-    namemode: Literal["s", "f", "short", "full"] = field(
-        validator=namemode_validator, default="s"
-    )
-    namesep: str = field(validator=instance_of(str), default="")
-
-    def __attrs_post_init__(self) -> None:
-        if isinstance(self.values, tuple) and self.is_unique_values():
-            return
-        if isinstance(self.values, set):
-            self.values = tuple(self.values)
-        elif not self.is_unique_values():
-            raise ValueError(f"'values' contains duplicates: {self.values}!")
-        elif isinstance(self.values, list):
-            self.values = tuple(self.values)
-        else:
-            raise TypeError(
-                f"'values' must be `list`, `tuple` or `set` (got {self.values}"
-                f"that is a {type(self.values)}!"
-            )
-
-    def __iter__(self) -> Iterator[GIndexInstance]:
-        for val in self.values:
-            yield GIndexInstance(
-                name=self.name,
-                value=val,
-                sep=self.sep,
-                withname=self.withname,
-                namemode=self.namemode,
-                namesep=self.namesep,
-            )
-
-    def instances(self) -> Sequence[GIndexInstance]:
-        return tuple(self.__iter__())
-
-    def __getitem__(self, key: int) -> GIndexInstance:
-        if not isinstance(key, int):
-            raise TypeError(f"'key' must be 'int', but given '{type(key)}'!")
-        return GIndexInstance(
-            name=self.name,
-            value=self.values[key],
-            sep=self.sep,
-            withname=self.withname,
-            namemode=self.namemode,
-            namesep=self.namesep,
-        )
-
-    def size(self) -> int:
-        """Returns the size of the list with values (number of variants)"""
-        return len(self.values)
-
-    def is_unique_values(self) -> bool:
-        """Checks if the `values` contain only unique elements"""
-        return len(self.values) == len(set(self.values))
-
-    def copy(self, deep: bool = False) -> GIndex:
-        """Returns a copy of the object"""
-        return (
-            GIndex(
-                name=self.name.copy(),
-                values=tuple(self.values),
-                sep=str(self.sep),
-                withname=bool(self.withname),
-                namemode=str(self.namemode),  # type: ignore
-                namesep=str(self.namesep),
-            )
-            if deep
-            else GIndex(
-                name=self.name,
-                values=self.values,
-                sep=self.sep,
-                withname=self.withname,
-                namemode=self.namemode,
-                namesep=self.namesep,
-            )
-        )
-
-    def copywith(self, **kwargs) -> GIndex:
-        """Returns a copy of the object with updated fields from `kwargs`"""
-        return (
-            GIndex(
-                name=kwargs.pop("name", self.name.copy()),
-                values=kwargs.pop("values", tuple(self.values)),
-                sep=kwargs.pop("sep", str(self.sep)),
-                withname=kwargs.pop("withname", bool(self.withname)),
-                namemode=kwargs.pop("namemode", str(self.namemode)),
-                namesep=kwargs.pop("namesep", str(self.namesep)),
-            )
-            if kwargs.pop("deep", False)
-            else GIndex(
-                name=kwargs.pop("name", self.name),
-                values=kwargs.pop("values", self.values),
-                sep=kwargs.pop("sep", self.sep),
-                withname=kwargs.pop("withname", self.withname),
-                namemode=kwargs.pop("namemode", self.namemode),
-                namesep=kwargs.pop("namesep", self.namesep),
-            )
-        )
diff --git a/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py b/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py
deleted file mode 100644
index adf7e6689946e56f279ca97ae6badad542a09917..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/gindex/gnindex.py
+++ /dev/null
@@ -1,588 +0,0 @@
-from __future__ import annotations
-
-from collections import UserDict
-from itertools import product
-from typing import Any, Iterator, Literal, Optional, Sequence, Tuple, Union
-
-from attr import Factory, define, field
-from attr.validators import instance_of
-
-from .gindex import GIndex, GIndexInstance, GIndexName, namemode_validator
-
-
-class GIndexNameDict(UserDict):
-    """
-    The same as usual dict, but keys are `GIndexName` objects.
-    It is possible to use a `GIndexName.short` or `GIndexName.full`,
-    instead of the `GIndexName` object itself.
-    In the `set` method new `GIndexName(key, key)` will be created
-    at usage of `key` (`type=str`) if there is no existing `GIndexName`
-    object:
-    `obj["det"] = val` the same as `obj[GIndexName("det", "det")] = val`
-    """
-
-    def __getitem__(self, key: Any) -> Any:
-        if isinstance(key, str):
-            for elem in self:
-                if key in elem:
-                    return self.data[elem]
-        return super().__getitem__(key)
-
-    def __setitem__(self, key: Any, val: Any) -> None:
-        if isinstance(key, str):
-            for elem in self:
-                if key in elem:
-                    self.data[elem] = val
-                    return
-            self[GIndexName(key, key)] = val
-        else:
-            super().__setitem__(key, val)
-
-    def __delitem__(self, key: Any) -> None:
-        if isinstance(key, str):
-            for elem in self:
-                if key in elem:
-                    del self.data[elem]
-                    return
-            raise KeyError()
-        super().__delitem__(key)
-
-    def __contains__(self, key: object) -> bool:
-        if isinstance(key, str):
-            return key in (name for names in self for name in names)
-        return super().__contains__(key)
-
-
-@define(hash=True, slots=True)
-class GNIndexInstance:
-    """
-    The n-dimensional index instance class, storing `indices`
-    (`type=list[GIndexInstance]`) and `names` (`type=dict[GIndexName, ]`).
-    Contains `format` method, which substitutes `value` instead of `name.short`
-    and `name.full`.
-
-    :param order: The tuple of the `GIndexInstance`s names
-        (`type=str`, use `name.short` or `name.full`) and any `int`,
-        that is the `string` for formatting
-    """
-
-    _indices: Tuple[GIndexInstance, ...] = field(default=tuple(), alias='indices')
-    order: tuple = field(default=tuple())
-    sep: str = field(validator=instance_of(str), default="_")
-    withname: bool = field(validator=instance_of(bool), default=False)
-    namemode: Literal["s", "f", "short", "full"] = field(
-        validator=namemode_validator, default="s"
-    )
-    namesep: str = field(validator=instance_of(str), default="")
-    dict: GIndexNameDict = field(
-        default=Factory(lambda self: self._create_dict(), takes_self=True),
-        repr=False,
-    )
-
-    @property
-    def values(self) -> Tuple[str]:
-        return tuple(instance.value for instance in self._indices)
-
-    def __attrs_post_init__(self) -> None:
-        self._check_indices()
-        if not self.order:
-            self.order = self._auto_order()
-        else:
-            self._check_order(self.order)
-        self.sort()
-
-    def sort(
-        self, order: Optional[tuple] = None, rest2end: bool = True
-    ) -> None:
-        if not order:
-            order = self.order
-        indices = [self.dict[name] for name in order if name in self.dict]
-        if len(self._indices) != len(indices):
-            names = set(self.dict.keys()) - set(order)
-            for name in names:
-                if rest2end:
-                    indices.append(self.dict[name])
-                else:
-                    indices.insert(0, self.dict[name])
-        self._indices = tuple(indices)
-
-    def _create_dict(self) -> GIndexNameDict:
-        return GIndexNameDict({val.name: val for val in self._indices})
-
-    def _auto_order(self) -> tuple:
-        return (True,) + tuple(val.name.s for val in self._indices)
-
-    def _check_order(self, order: Sequence) -> None:
-        if not isinstance(order, Sequence):
-            raise TypeError(
-                f"'order' must be `Sequence`, but given '{type(order)}'!"
-            )
-        elif not isinstance(order, tuple):
-            order = tuple(order)
-
-    def _check_indices(self) -> None:
-        if not isinstance(self._indices, (Sequence, set)):
-            raise TypeError(
-                f"'indices' must be `Sequence`, but given '{type(self._indices)}'!"
-            )
-        elif not all(isinstance(x, GIndexInstance) for x in self._indices):
-            raise ValueError(
-                "'indices' must be `Sequence[GIndexInstance]`, "
-                f"but given '{self._indices}'!"
-            )
-        elif not isinstance(self._indices, tuple):
-            self._indices = tuple(self._indices)
-
-    def formatwith(
-        self,
-        string: str,
-        withname: bool = False,
-        namemode: Literal["s", "f", "short", "full"] = "s",
-        sep: Optional[str] = None,
-        namesep: Optional[str] = None,
-        order: Optional[tuple] = None,
-    ) -> str:
-        if not isinstance(string, str):
-            raise TypeError(
-                f"'string' must be 'str', but given {type(string)}!"
-            )
-        if not order:
-            order = self.order
-        else:
-            self._check_order(order)
-        if not sep:
-            sep = self.sep
-        if not namesep:
-            namesep = self.namesep
-        ordered = []
-        for name in order:
-            if name in self.dict:
-                ordered.append(
-                    self.dict[name].formattedwith(
-                        sep=sep,
-                        withname=withname,
-                        namemode=namemode,
-                        namesep=namesep,
-                    )
-                )
-            elif isinstance(name, int):
-                ordered.append(string)
-        return "".join(ordered)
-
-    def formattedwith(
-        self,
-        withname: bool = False,
-        namemode: Literal["s", "f", "short", "full"] = "s",
-        sep: Optional[str] = None,
-        namesep: Optional[str] = None,
-        order: Optional[tuple] = None,
-    ) -> str:
-        if not order:
-            order = self.order
-        else:
-            self._check_order(order)
-        if not sep:
-            sep = self.sep
-        if not namesep:
-            namesep = self.namesep
-        return "".join(
-            self.dict[name].formattedwith(
-                withname=withname, namemode=namemode, sep=sep, namesep=namesep
-            )
-            for name in order
-            if name in self.dict
-        )
-
-    def format(self, string: str) -> str:
-        if not isinstance(string, str):
-            raise TypeError(
-                f"'string' must be 'str', but given {type(string)}!"
-            )
-        ordered = []
-        for name in self.order:
-            if name in self.dict:
-                ordered.append(self.dict[name].formatted())
-            elif name == True:
-                ordered.append(string)
-        return "".join(ordered)
-
-    def formatted(self) -> str:
-        return "".join(
-            self.dict[name].formatted()
-            for name in self.order
-            if name in self.dict
-        )
-
-    def size(self) -> int:
-        """Returns the size of the list with indices (number of variants)"""
-        return len(self._indices)
-
-    def copy(self, deep: bool = False) -> GNIndexInstance:
-        """Returns a copy of the object"""
-
-        if deep:
-            ret = GNIndexInstance(
-                indices=tuple(self._indices),
-                order=tuple(self.order),
-                sep=str(self.sep),
-                withname=bool(self.withname),
-                namemode=str(self.namemode),  # type: ignore
-                namesep=str(self.namesep),
-            )
-        else:
-            ret = GNIndexInstance(
-                indices=self._indices,
-                order=self.order,
-                sep=self.sep,
-                withname=self.withname,
-                namemode=self.namemode,
-                namesep=self.namesep,
-            )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndexInstance.copy() unparsed arguments: {kwargs}")
-
-        return ret
-
-    def copywith(self, **kwargs) -> GNIndexInstance:
-        """Returns a copy of the object with updated fields from `kwargs`"""
-        if kwargs.pop("deep", True):
-            ret = GNIndexInstance(
-                indices=kwargs.pop("indices", tuple(self._indices)),
-                order=kwargs.pop("order", tuple(self.order)),
-                sep=kwargs.pop("sep", str(self.sep)),
-                withname=kwargs.pop("withname", bool(self.withname)),
-                namemode=kwargs.pop("namemode", str(self.namemode)),
-                namesep=kwargs.pop("namesep", str(self.namesep)),
-            )
-        else:
-            ret = GNIndexInstance(
-                indices=kwargs.pop("indices", self._indices),
-                order=kwargs.pop("order", self.order),
-                sep=kwargs.pop("sep", self.sep),
-                withname=kwargs.pop("withname", self.withname),
-                namemode=kwargs.pop("namemode", self.namemode),
-                namesep=kwargs.pop("namesep", self.namesep),
-            )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndexInstance.copywith() unparsed arguments: {kwargs}")
-
-        return ret
-
-    def __iter__(self) -> Iterator[GIndexInstance]:
-        yield from self._indices
-
-    def __getitem__(self, key: int) -> GIndexInstance:
-        if not isinstance(key, int):
-            raise TypeError(f"'key' must be 'int', but given '{type(key)}'!")
-        return self._indices[key]
-
-
-@define(hash=True, slots=True)
-class GNIndex:
-    """
-    The n-dimensional index class, storing the `indices`
-    (set of the 1-dim indices), the indices `order` and usefull methods
-    """
-
-    _indices: Tuple[GIndex] = field(default=tuple(), alias='indices')
-    order: tuple = field(default=tuple())
-    sep: str = field(validator=instance_of(str), default="_")
-    withname: bool = field(validator=instance_of(bool), default=False)
-    namemode: Literal["s", "f", "short", "full"] = field(
-        validator=namemode_validator, default="s"
-    )
-    namesep: str = field(validator=instance_of(str), default="")
-    dict: GIndexNameDict = field(
-        default=Factory(lambda self: self._create_dict(), takes_self=True),
-        repr=False,
-    )
-
-    @staticmethod
-    def from_dict(data: dict) -> "GNIndex":
-        return GNIndex(
-                tuple(
-                    GIndex(GIndexName(name, name), values) if isinstance(name, str) \
-                    else GIndex(GIndexName(*name), values) \
-                    for name, values in data.items()
-                    )
-                )
-
-    def __attrs_post_init__(self) -> None:
-        self._check_indices()
-        if not self.order:
-            self.order = self._auto_order()
-        else:
-            self._check_order(self.order)
-        self.sort()
-
-    def _auto_order(self) -> tuple:
-        return (True,) + tuple(val.name.s for val in self._indices)
-
-    def _check_order(self, order: Sequence) -> None:
-        if not isinstance(order, Sequence):
-            raise TypeError(
-                f"'order' must be `Sequence`, but given '{type(order)}'!"
-            )
-        elif not isinstance(order, tuple):
-            order = tuple(order)
-
-    def _check_indices(self) -> None:
-        if not isinstance(self._indices, (Sequence, set)):
-            raise TypeError(
-                f"'indices' must be `Sequence`, but given '{type(self._indices)}'!"
-            )
-        elif not all(isinstance(x, GIndex) for x in self._indices):
-            raise ValueError(
-                "'indices' must be `Sequence[GIndex]`, "
-                f"but given '{self._indices}'!"
-            )
-        elif not isinstance(self._indices, tuple):
-            self._indices = tuple(self._indices)
-
-    def _create_dict(self) -> GIndexNameDict:
-        return GIndexNameDict({val.name: val for val in self._indices})
-
-    def rest(
-        self,
-        names: Union[
-            str,
-            GIndexName,
-            Sequence[Union[str, GIndexName, Sequence[Union[str, GIndexName]]]],
-        ],
-    ) -> Optional[GNIndex]:
-        """
-        Returns rest indices from `names`.
-
-        param names: A `str`, or `Sequence[str]`,
-        or `Sequence[Sequence[str]]` of indices names,
-        which are will be used to find the rest indices.
-        It is possible to use `GIndexName`, instead of `str`
-
-        return: A `GNIndex` with tuple of the rest indices
-        """
-        if len(self._indices) == 0:
-            return None
-        if isinstance(names, (list, tuple, set)):
-            return (
-                self.copywith(indices=indices)
-                if len(names) != 0
-                and (
-                    indices := tuple(
-                        self._rest(self.dict.copy(), names).values()
-                    )
-                )
-                else None
-            )
-        elif isinstance(names, (str, GIndexName)):
-            tmpdict = self.dict.copy()
-            tmpdict.pop(names)
-            return self.copywith(indices=tuple(tmpdict.values()))
-        raise TypeError(
-            f"'names' must be `Sequence[str]`, but given '{type(names)}'!"
-        )
-
-    def _rest(
-        self,
-        tmpdict: GIndexNameDict,
-        names: Sequence[
-            Union[str, GIndexName, Sequence[Union[str, GIndexName]]]
-        ],
-    ) -> GIndexNameDict:
-        for name in names:
-            if isinstance(name, (str, GIndexName)):
-                tmpdict.pop(name)
-            elif isinstance(name, (list, tuple, set)):
-                self._rest(tmpdict, name)
-        return tmpdict
-
-    def split(
-        self,
-        names: Sequence[
-            Union[str, GIndexName, Sequence[Union[str, GIndexName]]]
-        ],
-        rest: bool = True,
-    ) -> tuple:
-        if not isinstance(names, (list, tuple, set)):
-            raise TypeError(
-                "'names' must be Sequence[str, GIndexName], "
-                f"but given {type(names)}!"
-            )
-        return (
-            self._split(names),
-            (self.rest(names) if rest else None),
-        )
-
-    # TODO: are we need empty GNIndex or None?
-    #       if empty, it is no error while iter
-    def _split(
-        self,
-        names: Sequence[
-            Union[Sequence[Union[str, GIndexName]], str, GIndexName]
-        ],
-    ) -> GNIndex:
-        res = []
-        if isinstance(names, (list, tuple, set)):
-            res.extend(self.__split(name) for name in names)
-        else:
-            res.append(self.__split(names))
-        return self.copywith(indices=tuple(res))
-
-    def __split(self, name: Any) -> GIndex:
-        if not isinstance(name, (str, GIndexName)):
-            raise TypeError(
-                "It is possible split only by 2D Sequence[str, GIndexName]!"
-            )
-        if elem := self.dict.get(name, False):
-            return elem
-        raise ValueError(f"There is no index with name '{name}'!")
-
-    def sub(self, names: tuple) -> GNIndex:
-        return self.copywith(
-            indices=tuple(
-                val
-                for val in self._indices
-                if (val.name.s in names or val.name.f in names)
-            )
-        )
-
-    subindex = sub
-
-    def union(self, *args, **kwargs) -> GNIndex:
-        indices = [*self._indices]
-        for arg in args:
-            if not isinstance(arg, GNIndex):
-                raise TypeError(
-                    f"'args' must be `GNIndex`, but given '{type(arg)}'"
-                )
-
-            indices.extend(index for index in arg._indices if index not in indices)
-        return self.copywith(indices=indices, **kwargs)
-
-    def __add__(self, right: GNIndex) -> GNIndex:
-        if not isinstance(right, GNIndex):
-            raise TypeError(
-                f"'right' must be `GNIndex`, but given '{type(right)}'"
-            )
-        elif self.order != right.order:
-            raise AttributeError(
-                "'right' must have the same `order` as the left,"
-                f"but given '{self.order=}', '{right.order=}'"
-            )
-        return self.copywith(indices=set(self._indices + right._indices))
-
-    def __or__(self, right: GNIndex) -> GNIndex:
-        return self.__add__(right)
-
-    def __sub__(self, right: GNIndex) -> GNIndex:
-        if not isinstance(right, GNIndex):
-            raise TypeError(
-                f"'right' must be `GNIndex`, but given '{type(right)}'"
-            )
-        elif self.order != right.order:
-            raise AttributeError(
-                "'right' must have the same `order` as the left,"
-                f"but given '{self.order=}', '{right.order=}'"
-            )
-        return self.copywith(indices=set(self._indices) - set(right._indices))
-
-    def __xor__(self, right: GNIndex) -> GNIndex:
-        return self.__sub__(right)
-
-    def sort(self, order: Optional[tuple] = None) -> None:
-        if not order:
-            order = self.order
-        tmpdict = self.dict.copy()
-        indices = [tmpdict.pop(name) for name in order if name in tmpdict]
-        if idxs := tmpdict.values():
-            indices.extend(idxs)
-        self._indices = tuple(indices)
-
-    def dim(self) -> int:
-        """Returns the dimension of the index (size of the indices list)"""
-        return len(self._indices)
-
-    @property
-    def values(self) -> Tuple[Tuple[str]]:
-        return tuple(n.values for n in self)
-
-    def instances(self) -> Tuple[Tuple[GNIndexInstance, ...], ...]:
-        """Returns a tuple of the indices instances tuples (2D version)"""
-        return tuple(ind.instances() for ind in self._indices)
-
-    def instances1d(self) -> Tuple[GNIndexInstance, ...]:
-        """Returns a tuple of the indices instances (1D version)"""
-        return tuple(inst for ind in self._indices for inst in ind.instances())
-
-    def names(self) -> tuple:
-        return tuple(val.name for val in self._indices)
-
-    def names1d(
-        self, namemode: Literal["s", "f", "short", "full"] = "s"
-    ) -> tuple:
-        return tuple(val.name[namemode] for val in self._indices)
-
-    def __iter__(self) -> Iterator[GNIndexInstance]:
-        for val in product(*self.instances()):
-            yield GNIndexInstance(
-                indices=val,  # type:ignore
-                order=self.order,
-                sep=self.sep,
-                withname=self.withname,
-                namemode=self.namemode,
-                namesep=self.namesep,
-            )
-
-    def copy(self, deep: bool = False) -> GNIndex:
-        """Returns a copy of the object"""
-        if deep:
-            ret = GNIndex(
-                indices=tuple(self._indices),
-                order=tuple(self.order),
-                sep=str(self.sep),
-                withname=bool(self.withname),
-                namemode=str(self.namemode),  # type:ignore
-                namesep=str(self.namesep),
-            )
-        else:
-            ret = GNIndex(
-                indices=self._indices,
-                order=self.order,
-                sep=self.sep,
-                withname=self.withname,
-                namemode=self.namemode,
-                namesep=self.namesep,
-            )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndex.copy() unparsed arguments: {kwargs}")
-
-        return ret
-
-    def copywith(self, **kwargs) -> GNIndex:
-        """Returns a copy of the object with updated fields from `kwargs`"""
-
-        if kwargs.pop("deep", True):
-            ret = GNIndex(
-                indices=kwargs.pop("indices", tuple(self._indices)),
-                order=kwargs.pop("order", tuple(self.order)),
-                sep=kwargs.pop("sep", str(self.sep)),
-                withname=kwargs.pop("withname", bool(self.withname)),
-                namemode=kwargs.pop("namemode", str(self.namemode)),
-                namesep=kwargs.pop("namesep", str(self.namesep)),
-            )
-        else:
-            ret = GNIndex(
-                indices=kwargs.pop("indices", self._indices),
-                order=kwargs.pop("order", self.order),
-                sep=kwargs.pop("sep", self.sep),
-                withname=kwargs.pop("withname", self.withname),
-                namemode=kwargs.pop("namemode", self.namemode),
-                namesep=kwargs.pop("namesep", self.namesep),
-            )
-
-        if kwargs:
-            raise RuntimeError(f"GNIndex.copywith() unparsed arguments: {kwargs}")
-
-        return ret
diff --git a/subtrees/dagflow/subtrees/gindex/pytest.ini b/subtrees/dagflow/subtrees/gindex/pytest.ini
deleted file mode 100755
index 306a4bec26a6aec2df5fedbaab4ead38a2df9256..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/pytest.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[pytest]
-testpaths=tests/
-;addopts= --cov-report term --cov=./ --cov-report xml:cov.xml
diff --git a/subtrees/dagflow/subtrees/gindex/requirements.txt b/subtrees/dagflow/subtrees/gindex/requirements.txt
deleted file mode 100644
index 70bf9a12eea95805252de0d4a6a671da7280e1dd..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-pytest
-pytest-benchmark
-pytest-cov
-sphinx
-pydata_sphinx_theme
-m2r2
-schema
-attrs==23.1.0
--e git+https://git.jinr.ru/gna/gparser.git#egg=gparser
diff --git a/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py b/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py
deleted file mode 100644
index f0e24bdf4d353d2fc31e6cb48871740b611f6095..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/tests/test_gindex.py
+++ /dev/null
@@ -1,245 +0,0 @@
-from itertools import product
-
-import pytest
-from gindex.gindex import GIndex, GIndexInstance, GIndexName
-from gindex.gnindex import GNIndex, GNIndexInstance
-
-@pytest.fixture
-def detector_name() -> GIndexName:
-    return GIndexName("det", "detector")
-
-
-@pytest.fixture
-def subdetector_name() -> GIndexName:
-    return GIndexName("subdet", "subdetector")
-
-
-@pytest.fixture
-def index_values() -> tuple:
-    return ("01", "02", "03")
-
-
-@pytest.fixture
-def detector(detector_name, index_values) -> GIndex:
-    return GIndex(detector_name, index_values)
-
-
-@pytest.fixture
-def subdetector(subdetector_name, index_values) -> GIndex:
-    return GIndex(subdetector_name, index_values)
-
-
-@pytest.fixture
-def detector01(detector) -> GIndexInstance:
-    return detector[0]
-
-
-def test_gindex(detector, subdetector, detector01):
-    assert detector[0] == detector01
-    assert detector01.copywith(value="02") == detector[1]
-    assert id(detector01.name) == id(detector[0].name)
-    assert all(detector.name == x.name for x in detector)
-    assert all(subdetector.name == x.name for x in subdetector)
-    assert detector01.copy() == detector01
-    assert (
-        detector.copywith(name=subdetector.name, values=subdetector.values)
-        == subdetector
-    )
-    assert (
-        subdetector.copywith(name=detector.name, values=detector.values)
-        == detector
-    )
-
-
-def test_gindex_format(detector01, detector_name):
-    assert all(
-        x == "" for x in (detector01.formatwith(""), detector01.format(""))
-    )
-    assert all(
-        x == "_01"
-        for x in (
-            detector01.formattedwith(),
-            detector01.formatted(),
-            detector01.formatwith("{here}", place="here"),
-        )
-    )
-    assert (
-        detector01.formatwith(
-            "back{nind}",
-            sep="|",
-            withname=True,
-            namemode="f",
-            namesep=".",
-            place="nind",
-        )
-        == "back|detector.01"
-    )
-    for name in detector_name:
-        assert detector01.format("{" f"{name}" "}") == "_01"
-        assert detector01.format("back{" f"{name}" "}") == "back_01"
-        assert (
-            detector01.formatwith("back{" f"{name}" "}", sep=".") == "back.01"
-        )
-        assert (
-            detector01.formatwith("back{" f"{name}" "}", withname=True)
-            == "back_det_01"
-        )
-        assert (
-            detector01.formatwith(
-                "back{" f"{name}" "}", withname=True, namesep="."
-            )
-            == "back_det.01"
-        )
-        assert (
-            detector01.formatwith(
-                "back{" f"{name}" "}", withname=True, namemode="f"
-            )
-            == "back_detector_01"
-        )
-        assert (
-            detector01.formatwith(
-                "back{" f"{name}" "}",
-                sep="|",
-                withname=True,
-                namemode="f",
-                namesep=".",
-            )
-            == "back|detector.01"
-        )
-
-
-def test_gnindex(detector, subdetector):
-    nind = GNIndexInstance(indices=(detector[0], subdetector[0]))
-    assert nind
-    assert nind.format(string="Spectrum") == (
-        f"Spectrum{detector[0].sep}{detector[0].value}"
-        f"{subdetector[0].sep}{subdetector[0].value}"
-    )
-    assert nind.formatwith(string="Spectrum", order=("det", -1, "subdet")) == (
-        f"{detector[0].sep}{detector[0].value}Spectrum"
-        f"{subdetector[0].sep}{subdetector[0].value}"
-    )
-    assert nind.formatwith(string="Spectrum", order=("subdet", "det", -1)) == (
-        f"{subdetector[0].sep}{subdetector[0].value}"
-        f"{detector[0].sep}{detector[0].value}Spectrum"
-    )
-    assert nind.formatwith(string="Spectrum", order=("det", -1)) == (
-        f"{detector[0].sep}{detector[0].value}Spectrum"
-    )
-
-
-def test_gnindex_iter(detector, subdetector, index_values):
-    sep = "_"
-    nind = GNIndex(indices=(detector, subdetector), sep=sep)
-    nvals = tuple(
-        sep.join(pair) for pair in product(index_values, index_values)
-    )
-    for i, inst in enumerate(nind):
-        assert isinstance(inst, GNIndexInstance)
-        assert inst.formattedwith(sep=sep) == f"{sep}{nvals[i]}"
-
-
-def test_gnindex_arithmetic(detector, subdetector):
-    gorder = ("det", "subdet", "i")
-    nind = GNIndex(indices=(detector, subdetector), order=gorder)
-    ind = GIndex(GIndexName("i", "index"), ("1", "2"))
-    nind2 = GNIndex(indices=(detector, ind), order=gorder)
-    nind3 = GNIndex(indices=(ind,), order=gorder)
-    # `sub` and `-`
-    assert all(x - x == x.copywith(indices=tuple()) for x in (nind, nind2))
-    assert all(
-        x.sub(("new",)) == x.copywith(indices=tuple()) for x in (nind, nind2)
-    )
-    assert all(x.sub(x.names1d()) == x for x in (nind, nind2))
-    assert nind2.sub(("i",)) == nind.copywith(indices=(ind,))
-    # `merge` and  `+`
-    assert all(
-        len(x._indices) == len(nind._indices)
-        and set(x._indices) == set(nind._indices)
-        and x.order == gorder
-        for x in (nind + nind, nind | nind, nind.union(nind))
-    )
-    assert all(
-        (y := nind + nind2) and y == x and y.order == gorder
-        for x in (
-            nind.copywith(indices={detector, subdetector, ind}),
-            nind2.copywith(indices={detector, subdetector, ind}),
-            nind.union(nind3),
-            nind | nind2,
-        )
-    )
-
-
-def test_gnindex_rest_split(
-    detector, subdetector, detector_name, subdetector_name
-):
-    gorder = ("det", "subdet", "i")
-    iname = GIndexName("i", "index")
-    ind = GIndex(iname, ("1", "2"))
-    nind = GNIndex(indices=(detector, subdetector, ind), order=gorder)
-    # test `dict`
-    assert all(
-        x in nind.dict
-        for x in (
-            iname,
-            detector_name,
-            subdetector_name,
-            "i",
-            "index",
-            *detector_name,
-            *subdetector_name,
-        )
-    )
-    # test `rest`
-    for elem in (
-        nind.rest(val)
-        for val in ("det", "detector", ("det",), ("detector",), detector_name)
-    ):
-        assert isinstance(elem, GNIndex)
-        assert elem.order == nind.order
-        assert elem._indices == (subdetector, ind)
-    for elem in (
-        nind.rest(val) for val in (iname, "i", "index", ("i",), ("index",))
-    ):
-        assert isinstance(elem, GNIndex)
-        assert elem.order == nind.order
-        assert elem._indices == (detector, subdetector)
-    # test `split`
-    assert nind, None == nind.split(nind.names1d())
-    assert nind.copywith(indices=tuple()), nind == nind.split(tuple())
-    for elem, rest in (
-        nind.split(val) for val in (("det",), ("detector",), (detector_name,))
-    ):
-        assert isinstance(elem, GNIndex) and isinstance(rest, GNIndex)
-        assert elem.order == nind.order and rest.order == nind.order
-        assert elem._indices == (detector,) and rest._indices == (subdetector, ind)
-    for elem, rest in (
-        nind.split(val)
-        for val in (
-            ("subdet",),
-            ("subdetector",),
-            (subdetector_name,),
-        )
-    ):
-        assert isinstance(elem, GNIndex) and isinstance(rest, GNIndex)
-        assert elem.order == nind.order and rest.order == nind.order
-        assert elem._indices == (subdetector,) and rest._indices == (detector, ind)
-    for elem, rest in (
-        nind.split(val)
-        for val in (
-            ("detector", "subdet"),
-            ("det", "subdetector"),
-            (detector_name, subdetector_name),
-        )
-    ):
-        assert isinstance(elem, GNIndex) and isinstance(rest, GNIndex)
-        assert elem.order == nind.order and rest.order == nind.order
-        assert elem._indices == (detector, subdetector) and rest._indices == (ind,)
-
-
-def test_gnindex_order_exception(detector, subdetector, detector_name):
-    orders = (object, 12, {4, 3, 2}, detector_name, detector)
-    with pytest.raises(TypeError):
-        for order in orders:
-            GNIndexInstance(indices=(detector[0], subdetector[0]), order=order)  # type: ignore
-            GNIndex(values=(detector, subdetector), order=order)  # type: ignore
diff --git a/subtrees/dagflow/subtrees/gindex/tests/test_init.py b/subtrees/dagflow/subtrees/gindex/tests/test_init.py
deleted file mode 100644
index 09f4ac623733d01c17ad41cf290d66e8a071149f..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/subtrees/gindex/tests/test_init.py
+++ /dev/null
@@ -1,23 +0,0 @@
-
-from gindex import GNIndex
-
-def test_init():
-	gi1 = GNIndex.from_dict({
-		'a': ('a1', 'a2', 'a3'),
-		'b': ('b1', 'b2', 'b3'),
-		'c': ('c1', 'c2', 'b3'),
-		})
-
-	gi2 = GNIndex.from_dict({
-		('a', 'alpha'): ('a1', 'a2', 'a3'),
-		('b', 'beta'): ('b1', 'b2', 'b3'),
-		'c': ('c1', 'c2', 'b3'),
-		})
-
-
-	check = [
-			('a1', 'b1', 'c1'),
-			('a1', 'b1', 'c2'),
-			]
-	for idx, cmpto in zip(gi2, check):
-		assert idx.values==cmpto
diff --git a/subtrees/dagflow/test/core/allocation.py b/subtrees/dagflow/test/core/allocation.py
deleted file mode 100644
index ccf8baf0f098447cca22abd7d663dc65f81fb95f..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/core/allocation.py
+++ /dev/null
@@ -1,34 +0,0 @@
-import numpy as np
-
-from dagflow.graph import Graph
-
-def test_output_allocation_1():
-	data = np.arange(12, dtype='d').reshape(3,4)
-	with Graph(close=True) as graph:
-		n1 = graph.add_node("node1", typefunc=False)
-		n2 = graph.add_node("node2", typefunc=False)
-
-		out1 = n1._add_output("o1", data=data, allocatable=False)
-		in1 = n2._add_input("i1")
-
-		out1 >> in1
-
-	assert (data==out1.data).all()
-
-def test_output_allocation_2():
-	data = np.arange(12, dtype='d').reshape(3,4)
-	with Graph(close=True) as graph:
-		n1 = graph.add_node("node1", typefunc=False)
-		n2 = graph.add_node("node2", typefunc=False)
-
-		out1 = n1._add_output("o1", dtype=data.dtype, shape=data.shape)
-		in1 = n2._add_input("i1", data=data)
-
-		out1 >> in1
-
-	assert (data==out1.data).all()
-	assert (data==in1.data).all()
-	assert (data==in1._own_data).all()
-	assert data.dtype==out1.data.dtype
-	assert data.dtype==in1.data.dtype
-	assert data.dtype==in1._own_data.dtype
diff --git a/subtrees/dagflow/test/core/outputs.py b/subtrees/dagflow/test/core/outputs.py
deleted file mode 100644
index 37263af43e94d38742de00c88f09fd8627187607..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/core/outputs.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from dagflow.lib.Array import Array
-from dagflow.lib.Sum import Sum
-from dagflow.graph import Graph
-from dagflow.output import SettableOutput
-from dagflow.exception import CriticalError
-import pytest
-
-def test_SettableOutput_01():
-	value = 123.
-	array_in = (value, )
-	array_alt = (value+1, )
-	with Graph() as g:
-		va = Array('test', array_in)
-		s = Sum('add')
-		va >> s
-	g.close()
-
-	va.taint()
-	newout = SettableOutput.take_over(va.outputs.array)
-	newout.set(array_alt)
-
-	assert va.outputs.array is newout
-	assert s.inputs[0].parent_output is newout
-	assert s.outputs.result.data==array_alt
-
-def test_SettableOutput_02():
-	"""Test SettableOutput, Node.invalidate_parents()"""
-	value = 123.
-	array_in = (value, )
-	array_alt = (value+1, )
-	with Graph() as g:
-		va = Array('test', array_in)
-		sm1 = Sum('add 1')
-		sm2 = Sum('add 2')
-		va >> sm1 >> sm2
-	g.close()
-
-	output1 = va.outputs[0]
-	output2 = sm1.outputs[0]
-	output3 = sm2.outputs[0]
-
-	assert va.tainted==True
-	assert sm1.tainted==True
-	assert sm2.tainted==True
-	assert va.invalid==False
-	assert sm1.invalid==False
-	assert sm2.invalid==False
-	assert output3.data==array_in
-	assert va.tainted==False
-	assert sm1.tainted==False
-	assert sm2.tainted==False
-
-	newout = SettableOutput.take_over(sm1.outputs[0])
-	assert va.tainted==False
-	assert sm1.tainted==False
-	assert sm2.tainted==False
-	assert va.invalid==False
-	assert sm1.invalid==False
-	assert sm2.invalid==False
-	assert output3.data==array_in
-
-	newout.set(array_alt)
-	assert va.tainted==True
-	assert sm1.tainted==False
-	assert sm2.tainted==True
-	assert va.invalid==True
-	assert sm1.invalid==False
-	assert sm2.invalid==False
-	assert output2.data==array_alt
-	assert output3.data==array_alt
-	with pytest.raises(CriticalError):
-		output1.data==array_alt
-
-	va.invalid = False
-	assert va.tainted==True
-	assert sm1.tainted==True
-	assert sm2.tainted==True
-	assert va.invalid==False
-	assert sm1.invalid==False
-	assert sm2.invalid==False
-	assert output3.data==array_in
-	assert output2.data==array_in
-	assert output1.data==array_in
-
diff --git a/subtrees/dagflow/test/nodes/test_Array.py b/subtrees/dagflow/test/nodes/test_Array.py
deleted file mode 100644
index 46f86032c2cb92b9628687d1b4b45e57fc6781b5..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_Array.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.Sum import Sum
-from dagflow.graphviz import savegraph
-
-from numpy import arange
-import pytest
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_Array_00(dtype):
-    array = arange(12.0, dtype=dtype).reshape(3,4)
-    with Graph(close=True) as graph:
-        arr1 = Array('array: store', array, mode='store')
-        arr2 = Array('array: store (weak)', array, mode='store_weak')
-        arr3 = Array('array: fill', array, mode='fill')
-
-    assert arr1.tainted==True
-    assert arr2.tainted==True
-    assert arr3.tainted==True
-
-    out1 = arr1.outputs['array']
-    out2 = arr2.outputs['array']
-    out3 = arr3.outputs['array']
-
-    assert out1.owns_buffer == True
-    assert out2.owns_buffer == False
-    assert out3.owns_buffer == True
-
-    assert out1.allocatable == False
-    assert out2.allocatable == True
-    assert out3.allocatable == True
-
-    assert (out1._data==array).all()
-    assert (out2._data==array).all()
-    assert (out3._data==0.0).all()
-
-    result1 = arr1.get_data(0)
-    result2 = arr2.get_data(0)
-    result3 = arr3.get_data(0)
-
-    assert (result1==array).all()
-    assert (result2==array).all()
-    assert (result3==array).all()
-    assert arr1.tainted==False
-    assert arr2.tainted==False
-    assert arr3.tainted==False
-
-    savegraph(graph, f"output/test_array_00_{dtype}.png")
-
-def test_Array_01_set():
-    value = 123.
-    array_in = (value, )
-    array_alt = (value+1, )
-    va = Array('test', array_in)
-    sm = Sum('sum')
-    va >> sm
-    va.close()
-    sm.close()
-
-    output = va.outputs[0]
-    output2 = sm.outputs[0]
-
-    assert va.tainted==True
-    assert sm.tainted==True
-    assert output.data[0]==value
-    assert output2.data[0]==value
-    assert va.tainted==False
-    assert sm.tainted==False
-
-    assert va.set(array_in, check_taint=True)==False
-    assert va.tainted==False
-    assert sm.tainted==False
-    assert (output.data==array_in).all()
-    assert (output2.data==array_in).all()
-
-    assert va.set(array_in)==True
-    assert va.tainted==False
-    assert sm.tainted==True
-    assert (output.data==array_in).all()
-    assert (output2.data==array_in).all()
-    assert va.tainted==False
-    assert sm.tainted==False
-
-    assert va.set(array_alt, check_taint=True)==True
-    assert va.tainted==False
-    assert sm.tainted==True
-    assert (output.data==array_alt).all()
-    assert (output2.data==array_alt).all()
-    assert va.tainted==False
-    assert sm.tainted==False
diff --git a/subtrees/dagflow/test/nodes/test_Cholesky.py b/subtrees/dagflow/test/nodes/test_Cholesky.py
deleted file mode 100644
index 328f5985e9d3a3ac72e58be9f1033d65d955031f..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_Cholesky.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.exception import TypeFunctionError
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.Cholesky import Cholesky
-import numpy as np
-import scipy
-from pytest import raises
-from dagflow.graphviz import savegraph
-
-import pytest
-
-@pytest.mark.parametrize("dtype", ('d', 'f'))
-def test_Cholesky_00(dtype):
-    inV = np.array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
-    inV2 = inV@inV
-    inD = np.diag(inV)
-    inL2d1 = scipy.linalg.cholesky(inV, lower=True)
-    inL2d2 = scipy.linalg.cholesky(inV2, lower=True)
-    inL1d = np.sqrt(inD)
-
-    with Graph(close=True) as graph:
-        V1 = Array('V1', inV, mode='store')
-        V2 = Array('V2', inV2, mode='store')
-        D = Array('D', (inD), mode='store')
-        chol2d = Cholesky('Cholesky 2d')
-        chol1d = Cholesky('Cholesky 1d')
-        (V1, V2) >> chol2d
-        D >> chol1d
-
-    assert V1.tainted==True
-    assert V2.tainted==True
-    assert chol2d.tainted==True
-    assert chol1d.tainted==True
-
-    result2d1 = chol2d.get_data(0)
-    result2d2 = chol2d.get_data(1)
-    result1d = chol1d.get_data(0)
-    assert V1.tainted==False
-    assert V2.tainted==False
-    assert D.tainted==False
-    assert chol2d.tainted==False
-    assert chol1d.tainted==False
-
-    assert np.allclose(inL2d1, result2d1, atol=0, rtol=0)
-    assert np.allclose(inL2d2, result2d2, atol=0, rtol=0)
-    assert np.allclose(inL1d, result1d, atol=0, rtol=0)
-
-    savegraph(graph, f"output/test_Cholesky_00_{dtype}.png")
-
-def test_Cholesky_01_typefunctions():
-    inV = np.array([
-        [10, 2,   1],
-        [ 2, 12,  3],
-        ], dtype='d')
-
-    with Graph() as g1:
-        V1 = Array('V1', inV, mode='store')
-        chol1 = Cholesky('Cholesky')
-        V1 >> chol1
-
-    with Graph() as g2:
-        V2 = Array('V2', inV[0], mode='store')
-        chol2 = Cholesky('Cholesky')
-        V2 >> chol1
-
-    with raises(TypeFunctionError):
-        g1.close()
-
-    with raises(TypeFunctionError):
-        g2.close()
diff --git a/subtrees/dagflow/test/nodes/test_CovmatrixFromCormatrix.py b/subtrees/dagflow/test/nodes/test_CovmatrixFromCormatrix.py
deleted file mode 100644
index 67478f1745a8a24c308c68456ee929b7e9a87262..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_CovmatrixFromCormatrix.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-
-from numpy import arange
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.lib.Array import Array
-from dagflow.lib.CovmatrixFromCormatrix import CovmatrixFromCormatrix
-
-from numpy import array, allclose, tril
-import pytest
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_CovmatrixFromCormatrix_00(dtype):
-    inSigma = arange(1.0, 4.0, dtype=dtype)
-    inC = array([
-        [1.0, 0.5, 0.0],
-        [0.5, 1.0, 0.9],
-        [0.0, 0.9, 1.0],
-        ],
-        dtype=dtype)
-    with Graph(close=True) as graph:
-        matrix = Array('matrix', inC)
-        sigma = Array('sigma', inSigma)
-        cov = CovmatrixFromCormatrix('covariance')
-
-        sigma >> cov.inputs['sigma']
-        matrix >> cov
-
-    inV = inC * inSigma[:,None] * inSigma[None,:]
-    V = cov.get_data()
-
-    assert allclose(inV, V, atol=0, rtol=0)
-    assert allclose(tril(V), tril(V.T), atol=0, rtol=0)
-
-    savegraph(graph, f"output/test_CovmatrixFromCormatrix_00_{dtype}.png", show=['all'])
-
diff --git a/subtrees/dagflow/test/nodes/test_ElSumSq.py b/subtrees/dagflow/test/nodes/test_ElSumSq.py
deleted file mode 100644
index a3e0f3c30906226a9d6f94078ffeed0a300f5fe8..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_ElSumSq.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.ElSumSq import ElSumSq
-from dagflow.graphviz import savegraph
-
-from numpy import arange, sum
-import pytest
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_ElSumSq_01(dtype):
-    arrays_in = tuple(arange(12, dtype=dtype)*i for i in (1, 2, 3))
-    arrays2_in = tuple(a**2 for a in arrays_in)
-
-    with Graph(close=True) as graph:
-        arrays = tuple(Array('test', array_in) for array_in in arrays_in)
-        sm = ElSumSq('sumsq')
-        arrays >> sm
-
-    output = sm.outputs[0]
-
-    res = sum(arrays2_in)
-    assert sm.tainted==True
-    assert all(output.data==res)
-    assert sm.tainted==False
-
-    arrays2_in = (arrays2_in[1],) + arrays2_in[1:]
-    res = sum(arrays2_in)
-    assert arrays[0].set(arrays[1].get_data())
-    assert sm.tainted==True
-    assert all(output.data==res)
-    assert sm.tainted==False
-
-    savegraph(graph, f"output/test_SumSq_00_{dtype}.png", show='all')
diff --git a/subtrees/dagflow/test/nodes/test_Integrator.py b/subtrees/dagflow/test/nodes/test_Integrator.py
deleted file mode 100644
index 23459b5a0ced4755b8996d3454d62743bb809216..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_Integrator.py
+++ /dev/null
@@ -1,242 +0,0 @@
-#!/usr/bin/env python
-from dagflow.exception import TypeFunctionError
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.Integrator import Integrator
-from dagflow.lib.IntegratorSampler import IntegratorSampler
-from dagflow.lib.NodeManyToOne import NodeManyToOne
-from dagflow.lib.NodeOneToOne import NodeOneToOne
-from dagflow.lib.trigonometry import Cos, Sin
-from numpy import allclose, linspace, meshgrid, pi, vectorize
-from pytest import mark, raises
-
-
-@mark.parametrize("align", ("left", "center", "right"))
-def test_Integrator_rect_center(align, debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        npoints = 10
-        edges = Array("edges", linspace(0, pi, npoints + 1))
-        ordersX = Array("ordersX", [1000] * npoints, edges=edges["array"])
-        A = Array("A", edges._data[:-1])
-        B = Array("B", edges._data[1:])
-        sampler = IntegratorSampler("sampler", mode="rect", align=align)
-        integrator = Integrator("integrator")
-        cosf = Cos("cos")
-        sinf = Sin("sin")
-        ordersX >> sampler("ordersX")
-        sampler.outputs["x"] >> cosf
-        A >> sinf
-        B >> sinf
-        sampler.outputs["weights"] >> integrator("weights")
-        cosf.outputs[0] >> integrator
-        ordersX >> integrator("ordersX")
-    res = sinf.outputs[1].data - sinf.outputs[0].data
-    assert allclose(integrator.outputs[0].data, res, atol=1e-4)
-    assert integrator.outputs[0].dd.axes_edges == [edges["array"]]
-
-
-def test_Integrator_trap(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        npoints = 10
-        edges = Array("edges", linspace(0, pi, npoints + 1))
-        ordersX = Array("ordersX", [1000] * npoints, edges=edges["array"])
-        A = Array("A", edges._data[:-1])
-        B = Array("B", edges._data[1:])
-        sampler = IntegratorSampler("sampler", mode="trap")
-        integrator = Integrator("integrator")
-        cosf = Cos("cos")
-        sinf = Sin("sin")
-        ordersX >> sampler("ordersX")
-        sampler.outputs["x"] >> cosf
-        A >> sinf
-        B >> sinf
-        sampler.outputs["weights"] >> integrator("weights")
-        cosf.outputs[0] >> integrator
-        ordersX >> integrator("ordersX")
-    res = sinf.outputs[1].data - sinf.outputs[0].data
-    assert allclose(integrator.outputs[0].data, res, atol=1e-2)
-    assert integrator.outputs[0].dd.axes_edges == [edges["array"]]
-
-
-def f0(x: float) -> float:
-    return 4 * x**3 + 3 * x**2 + 2 * x - 1
-
-
-def fres(x: float) -> float:
-    return x**4 + x**3 + x**2 - x
-
-
-vecF0 = vectorize(f0)
-vecFres = vectorize(fres)
-
-
-class Polynomial0(NodeOneToOne):
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            out.data[:] = vecF0(inp.data)
-        return list(outputs.iter_data())
-
-
-class PolynomialRes(NodeOneToOne):
-    def _fcn(self, _, inputs, outputs):
-        for inp, out in zip(inputs, outputs):
-            out.data[:] = vecFres(inp.data)
-        return list(outputs.iter_data())
-
-
-def test_Integrator_gl1d(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        npoints = 10
-        edges = Array("edges", linspace(0, 10, npoints + 1))
-        ordersX = Array("ordersX", [2] * npoints, edges=edges["array"])
-        A = Array("A", edges._data[:-1])
-        B = Array("B", edges._data[1:])
-        sampler = IntegratorSampler("sampler", mode="gl")
-        integrator = Integrator("integrator")
-        poly0 = Polynomial0("poly0")
-        polyres = PolynomialRes("polyres")
-        ordersX >> sampler("ordersX")
-        sampler.outputs["x"] >> poly0
-        A >> polyres
-        B >> polyres
-        sampler.outputs["weights"] >> integrator("weights")
-        poly0.outputs[0] >> integrator
-        ordersX >> integrator("ordersX")
-    res = polyres.outputs[1].data - polyres.outputs[0].data
-    assert allclose(integrator.outputs[0].data, res, atol=1e-10)
-    assert integrator.outputs[0].dd.axes_edges == [edges["array"]]
-
-
-def test_Integrator_gl2d(debug_graph):
-    class Polynomial1(NodeManyToOne):
-        def _fcn(self, _, inputs, outputs):
-            outputs["result"].data[:] = vecF0(inputs[1].data) * vecF0(
-                inputs[0].data
-            )
-            return list(outputs.iter_data())
-
-    with Graph(debug=debug_graph, close=True):
-        npointsX, npointsY = 10, 20
-        edgesX = Array("edgesX", linspace(0, 10, npointsX + 1))
-        edgesY = Array("edgesY", linspace(0, 10, npointsY + 1))
-        ordersX = Array("ordersX", [2] * npointsX, edges=edgesX["array"])
-        ordersY = Array("ordersY", [2] * npointsY, edges=edgesY["array"])
-        x0, y0 = meshgrid(edgesX._data[:-1], edgesY._data[:-1], indexing="ij")
-        x1, y1 = meshgrid(edgesX._data[1:], edgesY._data[1:], indexing="ij")
-        X0, X1 = Array("X0", x0), Array("X1", x1)
-        Y0, Y1 = Array("Y0", y0), Array("Y1", y1)
-        sampler = IntegratorSampler("sampler", mode="2d")
-        integrator = Integrator("integrator")
-        poly0 = Polynomial1("poly0")
-        polyres = PolynomialRes("polyres")
-        ordersX >> sampler("ordersX")
-        ordersY >> sampler("ordersY")
-        sampler.outputs["x"] >> poly0
-        sampler.outputs["y"] >> poly0
-        X0 >> polyres
-        X1 >> polyres
-        Y0 >> polyres
-        Y1 >> polyres
-        sampler.outputs["weights"] >> integrator("weights")
-        poly0.outputs[0] >> integrator
-        ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    res = (polyres.outputs[1].data - polyres.outputs[0].data) * (
-        polyres.outputs[3].data - polyres.outputs[2].data
-    )
-    assert allclose(integrator.outputs[0].data, res, atol=1e-10)
-    assert integrator.outputs[0].dd.axes_edges == [
-        edgesX["array"],
-        edgesY["array"],
-    ]
-
-
-# test wrong ordersX: edges not given
-def test_Integrator_edges_0(debug_graph):
-    arr = [1.0, 2.0, 3.0]
-    with Graph(debug=debug_graph):
-        arr1 = Array("array", arr)
-        weights = Array("weights", arr)
-        ordersX = Array("ordersX", [1, 2, 3])
-        integrator = Integrator("integrator")
-        arr1 >> integrator
-        weights >> integrator("weights")
-        ordersX >> integrator("ordersX")
-    with raises(TypeFunctionError):
-        integrator.close()
-
-
-# test wrong ordersX: edges is wrong
-def test_Integrator_edges_1(debug_graph):
-    arr = [1.0, 2.0, 3.0]
-    with Graph(debug=debug_graph, close=False):
-        edges = Array("edges", [0.0, 1.0, 2.0])
-        with raises(TypeFunctionError):
-            arr1 = Array("array", arr, edges=edges["array"])
-        edges = Array("edges", [0.0, 1.0, 2.0, 3.0])
-        arr1 = Array("array", arr, edges=edges["array"])
-        weights = Array("weights", arr)
-        ordersX = Array("ordersX", [1, 2, 3])
-        integrator = Integrator("integrator")
-        arr1 >> integrator
-        weights >> integrator("weights")
-        ordersX >> integrator("ordersX")
-    with raises(TypeFunctionError):
-        integrator.close()
-
-
-# test wrong ordersX: sum(ordersX) != shape
-def test_Integrator_02(debug_graph):
-    arr = [1.0, 2.0, 3.0]
-    with Graph(debug=debug_graph):
-        edges = Array("edges", [0.0, 1.0, 2.0, 3.0])
-        arr1 = Array("array", arr, edges=edges["array"])
-        weights = Array("weights", arr, edges=edges["array"])
-        ordersX = Array("ordersX", [1, 2, 3], edges=edges["array"])
-        integrator = Integrator("integrator")
-        arr1 >> integrator
-        weights >> integrator("weights")
-        ordersX >> integrator("ordersX")
-    with raises(TypeFunctionError):
-        integrator.close()
-
-
-# test wrong ordersX: sum(ordersX[i]) != shape[i]
-def test_Integrator_03(debug_graph):
-    arr = [1.0, 2.0, 3.0]
-    with Graph(debug=debug_graph, close=False):
-        edgesX = Array("edgesX", [-1.0, 0.0, 1.0])
-        edgesY = Array("edgesY", [-2.0, -1, 0.0, 1.0])
-        arr1 = Array(
-            "array", [arr, arr], edges=[edgesX["array"], edgesY["array"]]
-        )
-        weights = Array(
-            "weights", [arr, arr], edges=[edgesX["array"], edgesY["array"]]
-        )
-        ordersX = Array("ordersX", [1, 3], edges=edgesX["array"])
-        ordersY = Array("ordersY", [1, 0, 0], edges=edgesY["array"])
-        integrator = Integrator("integrator")
-        arr1 >> integrator
-        weights >> integrator("weights")
-        ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    with raises(TypeFunctionError):
-        integrator.close()
-
-
-# test wrong shape
-def test_Integrator_04(debug_graph):
-    with Graph(debug=debug_graph, close=False):
-        arr1 = Array("array", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
-        arr2 = Array("array", [[1.0, 2.0, 3.0, 4.0], [1.0, 2.0, 3.0, 4.0]])
-        weights = Array("weights", [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])
-        ordersX = Array("ordersX", [0, 2])
-        ordersY = Array("ordersY", [1, 1, 1, 3])
-        integrator = Integrator("integrator")
-        arr1 >> integrator
-        arr2 >> integrator
-        weights >> integrator("weights")
-        ordersX >> integrator("ordersX")
-        ordersY >> integrator("ordersY")
-    with raises(TypeFunctionError):
-        integrator.close()
diff --git a/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars.py b/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars.py
deleted file mode 100644
index 234b3e233d4d7b7d22b88d43349753bfc96d4f98..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars.py
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/usr/bin/env python
-
-from numpy import arange
-from dagflow.exception import TypeFunctionError
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.lib.Array import Array
-from dagflow.lib.NormalizeCorrelatedVars import NormalizeCorrelatedVars
-from dagflow.lib.Cholesky import Cholesky
-
-from numpy import array, arange, allclose, sqrt
-from scipy.linalg import solve_triangular, cholesky
-
-import pytest
-from pytest import raises
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_NormalizeCorrelatedVars_00(dtype):
-    inCentral = arange(3.0, dtype=dtype)*100.0
-    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
-    inD = inV.diagonal()
-    inL = cholesky(inV, lower=True)
-    inLd = sqrt(inD)
-    inOffset = array((-10.0, 20.0, 30.0), dtype=dtype)
-    inVec = inCentral + inOffset
-    with Graph(close=True) as graph:
-        matrix = Array('matrix', inV)
-        diag = Array('diag', inD)
-        Lmatrix = Cholesky('cholesky 1d')
-        Ldiag = Cholesky('cholesky 2d')
-        central = Array('central', inCentral)
-        vec = Array('vec', inVec)
-        norm1d_fwd = NormalizeCorrelatedVars('norm1d fwd')
-        norm2d_fwd = NormalizeCorrelatedVars('norm2d fwd')
-
-        norm1d_bwd = NormalizeCorrelatedVars('norm1d bwd', mode='backward')
-        norm2d_bwd = NormalizeCorrelatedVars('norm2d bwd', mode='backward')
-
-        central >> norm1d_fwd.inputs['central']
-        central >> norm2d_fwd.inputs['central']
-        central >> norm1d_bwd.inputs['central']
-        central >> norm2d_bwd.inputs['central']
-
-        matrix >> Lmatrix
-        diag   >> Ldiag
-
-        Lmatrix >> norm2d_fwd.inputs['matrix']
-        Ldiag   >> norm1d_fwd.inputs['matrix']
-        Lmatrix >> norm2d_bwd.inputs['matrix']
-        Ldiag   >> norm1d_bwd.inputs['matrix']
-
-        vec >> norm1d_fwd >> norm1d_bwd
-        vec >> norm2d_fwd >> norm2d_bwd
-
-    nodes = (
-        matrix, diag,
-        Lmatrix, Ldiag,
-        central, vec,
-        norm1d_fwd, norm2d_fwd,
-        norm1d_bwd, norm2d_bwd,
-    )
-
-    assert all(node.tainted==True for node in nodes)
-    back_matrix = norm2d_bwd.get_data(0)
-    back_diag = norm1d_bwd.get_data(0)
-
-    assert all(node.tainted==False for node in nodes)
-
-    result_matrix = norm2d_fwd.get_data(0)
-    result_diag = norm1d_fwd.get_data(0)
-
-    norm1 = solve_triangular(inL, inOffset, lower=True)
-    norm2 = inOffset/inLd
-
-    if debug:
-        print('V:', inV)
-        print('Vdiag:', inD)
-        print('L:', inL)
-        print('Ldiag:', inLd)
-        print('Central:', inCentral)
-        print('In:', inVec)
-        print('Offset:', inOffset)
-        print('Norm 1:', norm1)
-        print('Norm 2:', norm2)
-        print('Rec 1:', back_matrix)
-        print('Rec 2:', back_diag)
-        print('Diff 1:', inVec-back_matrix)
-        print('Diff 2:', inVec-back_diag)
-
-    assert allclose(norm1, result_matrix, atol=0, rtol=0)
-    assert allclose(norm2, result_diag, atol=0, rtol=0)
-    assert allclose(inVec, back_matrix, atol=1.e-14, rtol=0)
-    assert allclose(inVec, back_diag, atol=0, rtol=0)
-
-    savegraph(graph, f"output/test_NormalizeCorrelatedVars_00_{dtype}.png")
-
-def test_NormalizeCorrelatedVars_01(dtype='d'):
-    inVec = arange(4.0, dtype=dtype)*100.0
-    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
-    inD = inV.diagonal()
-    with Graph() as graph1:
-        diag = Array('diag', inD)
-        vec = Array('vec', inVec)
-        norm1d_fwd = NormalizeCorrelatedVars('norm1d fwd')
-
-        vec  >> norm1d_fwd.inputs['central']
-        diag >> norm1d_fwd.inputs['matrix']
-
-    with Graph() as graph2:
-        matrix = Array('matrix', inV)
-        vec = Array('vec', inVec)
-        norm2d_fwd = NormalizeCorrelatedVars('norm2d fwd')
-
-        vec >> norm2d_fwd.inputs['central']
-        matrix >> norm2d_fwd.inputs['matrix']
-
-    with raises(TypeFunctionError):
-        graph1.close()
-
-    with raises(TypeFunctionError):
-        graph2.close()
-
diff --git a/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars2.py b/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars2.py
deleted file mode 100644
index e5ee5172762873563c125d991fb7d4c53eec3aed..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_NormalizeCorrelatedVars2.py
+++ /dev/null
@@ -1,254 +0,0 @@
-#!/usr/bin/env python
-
-from numpy import arange
-from dagflow.exception import TypeFunctionError
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.lib.Array import Array
-from dagflow.lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
-from dagflow.lib.Cholesky import Cholesky
-
-from numpy import array, arange, allclose, sqrt, full_like, zeros_like, ones_like, finfo
-from scipy.linalg import solve_triangular, cholesky
-
-import pytest
-from pytest import raises
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_NormalizeCorrelatedVars2_00(dtype):
-    fp_tolerance = finfo(dtype).resolution*2
-
-    inCentral = arange(3.0, dtype=dtype)*100.0
-    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
-    inD = inV.diagonal()
-    inL = cholesky(inV, lower=True)
-    inLd = sqrt(inD)
-    inOffset = array((-10.0, 20.0, 30.0), dtype=dtype)
-    inVec = inCentral + inOffset
-    inNorm = full_like(inVec, -100)
-    with Graph(close=True) as graph:
-        var_matrix = Array('var_matrix', inV)
-        var_diag = Array('var_diag', inD)
-        Lmatrix = Cholesky('cholesky 1d')
-        Ldiag = Cholesky('cholesky 2d')
-        central = Array('central', inCentral)
-        value1d = Array('vec 1d', inVec, mode='store_weak')
-        normvalue1d = Array('normvalue 1d', inNorm, mode='store_weak')
-        value2d = Array('vec 2d', inVec, mode='store_weak')
-        normvalue2d = Array('normvalue 2d', inNorm, mode='store_weak')
-        norm1d = NormalizeCorrelatedVars2('norm1d')
-        norm2d = NormalizeCorrelatedVars2('norm2d')
-
-        central >> norm1d.inputs['central']
-        central >> norm2d.inputs['central']
-
-        var_matrix >> Lmatrix
-        var_diag   >> Ldiag
-
-        Lmatrix >> norm2d.inputs['matrix']
-        Ldiag   >> norm1d.inputs['matrix']
-
-        (value1d, normvalue1d) >> norm1d
-        (value2d, normvalue2d) >> norm2d
-
-
-    nodes = (
-        var_matrix, var_diag,
-        Lmatrix, Ldiag,
-        central,
-        value1d, normvalue1d,
-        value1d, normvalue2d,
-        norm1d, norm2d,
-    )
-
-    assert all(node.tainted==True for node in nodes)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-
-    assert all(node.tainted==False for node in nodes)
-    assert all(inNorm!=back_matrix)
-    assert all(inNorm!=back_diag)
-
-    norm1 = solve_triangular(inL, inOffset, lower=True)
-    norm2 = inOffset/inLd
-
-    if debug:
-        print('V:', inV)
-        print('Vdiag:', inD)
-        print('L:', inL)
-        print('Ldiag:', inLd)
-        print('Central:', inCentral)
-        print('In:', inVec)
-        print('Offset:', inOffset)
-        print('Norm 1:', norm1)
-        print('Norm 2:', norm2)
-        print('Rec 1:', back_matrix)
-        print('Rec 2:', back_diag)
-        print('Diff 1:', inVec-back_matrix)
-        print('Diff 2:', inVec-back_diag)
-
-    assert allclose(norm1, norm_matrix, atol=0, rtol=0)
-    assert allclose(norm2, norm_diag, atol=0, rtol=0)
-    assert allclose(inVec, back_matrix, atol=0, rtol=0)
-    assert allclose(inVec, back_diag, atol=0, rtol=0)
-
-    #
-    # Set norm value
-    #
-    inZeros = zeros_like(inVec)
-    normvalue1d.set(inZeros)
-    normvalue2d.set(inZeros)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    assert allclose(inZeros, norm_matrix, atol=0, rtol=0)
-    assert allclose(inZeros, norm_diag, atol=0, rtol=0)
-    assert allclose(inCentral, back_matrix, atol=0, rtol=0)
-    assert allclose(inCentral, back_diag, atol=0, rtol=0)
-
-    #
-    # Set normvalue
-    #
-    inOnes = ones_like(inVec)
-    normvalue1d.set(inOnes)
-    normvalue2d.set(inOnes)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    checkDiagOnes = inCentral + inLd
-    checkMatrixOnes = inCentral + inL@inOnes
-    assert allclose(inOnes, norm_matrix, atol=0, rtol=0)
-    assert allclose(inOnes, norm_diag, atol=0, rtol=0)
-    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
-    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
-
-    #
-    # Set value (with immediate flag)
-    #
-    norm2d._immediate = True
-    norm1d._immediate = True
-    value1d.set(inCentral)
-    value2d.set(inCentral)
-    norm_matrix = norm2d.outputs[1]._data
-    norm_diag = norm1d.outputs[1]._data
-    back_matrix = norm2d.outputs[0]._data
-    back_diag = norm1d.outputs[0]._data
-    assert allclose(inZeros, norm_matrix, atol=0, rtol=0)
-    assert allclose(inZeros, norm_diag, atol=0, rtol=0)
-    assert allclose(inCentral, back_matrix, atol=0, rtol=0)
-    assert allclose(inCentral, back_diag, atol=0, rtol=0)
-
-    #
-    # Set value (with immediate flag)
-    #
-    norm2d._immediate = True
-    norm1d._immediate = True
-    normvalue1d.set(inOnes)
-    normvalue2d.set(inOnes)
-    norm_matrix = norm2d.outputs[1]._data
-    norm_diag = norm1d.outputs[1]._data
-    back_matrix = norm2d.outputs[0]._data
-    back_diag = norm1d.outputs[0]._data
-    assert allclose(inOnes, norm_matrix, atol=0, rtol=0)
-    assert allclose(inOnes, norm_diag, atol=0, rtol=0)
-    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
-    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
-
-    #
-    # Set central
-    #
-    norm2d._immediate = False
-    norm1d._immediate = False
-    central.set(-inOnes)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    assert all(norm_matrix!=inOnes)
-    assert all(norm_matrix!=inOnes)
-    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
-    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
-
-    #
-    # Revert central
-    #
-    central.set(inCentral)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    assert allclose(inOnes, norm_matrix, atol=fp_tolerance, rtol=0)
-    assert allclose(inOnes, norm_diag, atol=fp_tolerance, rtol=0)
-    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
-    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
-
-    #
-    # Set sigma
-    #
-    var_matrix.set(inV*2)
-    var_diag.set(inD*2)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    assert all(norm_matrix!=inOnes)
-    assert all(norm_matrix!=inOnes)
-    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
-    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
-
-    #
-    # Revert sigma
-    #
-    var_matrix.set(inV)
-    var_diag.set(inD)
-    back_matrix = norm2d.get_data(0)
-    back_diag = norm1d.get_data(0)
-    norm_matrix = norm2d.get_data(1)
-    norm_diag = norm1d.get_data(1)
-    assert allclose(inOnes, norm_matrix, atol=fp_tolerance, rtol=0)
-    assert allclose(inOnes, norm_diag, atol=fp_tolerance, rtol=0)
-    assert allclose(checkMatrixOnes, back_matrix, atol=0, rtol=0)
-    assert allclose(checkDiagOnes, back_diag, atol=0, rtol=0)
-
-    savegraph(graph, f"output/test_NormalizeCorrelatedVars2_00_{dtype}.png", show=['all'])
-
-def test_NormalizeCorrelatedVars2_01(dtype='d'):
-    inVec = arange(4.0, dtype=dtype)*100.0
-    inNorm = full_like(inVec, -100)
-    inV = array([[10, 2,   1], [ 2, 12,  3], [ 1,  3, 13]], dtype=dtype)
-    inD = inV.diagonal()
-    with Graph() as graph1:
-        var_diag = Array('var_diag', inD)
-        vec = Array('vec', inVec, mode='store_weak')
-        nvec = Array('vec', inNorm, mode='store_weak')
-        norm1d = NormalizeCorrelatedVars2('norm1d')
-
-        vec  >> norm1d.inputs['central']
-        var_diag >> norm1d.inputs['matrix']
-
-        (vec, nvec) >> norm1d
-
-    with Graph() as graph2:
-        var_matrix = Array('var_matrix', inV)
-        vec = Array('vec', inVec, mode='store_weak')
-        nvec = Array('vec', inNorm, mode='store_weak')
-        norm2d = NormalizeCorrelatedVars2('norm2d')
-
-        vec >> norm2d.inputs['central']
-        var_matrix >> norm2d.inputs['matrix']
-
-        (vec, nvec) >> norm2d
-
-    with raises(TypeFunctionError):
-        graph1.close()
-
-    with raises(TypeFunctionError):
-        graph2.close()
-
diff --git a/subtrees/dagflow/test/nodes/test_Sum.py b/subtrees/dagflow/test/nodes/test_Sum.py
deleted file mode 100644
index 1ab8897d58c6beda0f9679dc2f6a9279ac022ab4..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_Sum.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.Sum import Sum
-from dagflow.graphviz import savegraph
-
-from numpy import arange, sum
-import pytest
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_Sum_01(dtype):
-    arrays_in = tuple(arange(12, dtype=dtype)*i for i in (1, 2, 3))
-
-    with Graph(close=True) as graph:
-        arrays = tuple(Array('test', array_in) for array_in in arrays_in)
-        sm = Sum('sum')
-        arrays >> sm
-
-    output = sm.outputs[0]
-
-    res = sum(arrays_in, axis=0)
-
-    assert sm.tainted==True
-    assert all(output.data==res)
-    assert sm.tainted==False
-
-    arrays_in = (arrays_in[1],) + arrays_in[1:]
-    res = sum(arrays_in, axis=0)
-    assert arrays[0].set(arrays[1].get_data())
-    assert sm.tainted==True
-    assert all(output.data==res)
-    assert sm.tainted==False
-
-    savegraph(graph, f"output/test_sum_00_{dtype}.png")
diff --git a/subtrees/dagflow/test/nodes/test_SumMatOrDiag.py b/subtrees/dagflow/test/nodes/test_SumMatOrDiag.py
deleted file mode 100644
index 5b8bacfe276436e5d3730ab936987c00117b7543..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_SumMatOrDiag.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.SumMatOrDiag import SumMatOrDiag
-from dagflow.graphviz import savegraph
-from dagflow.exception import TypeFunctionError
-
-from numpy import arange, diag, allclose
-import pytest
-from pytest import raises
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_SumMatOrDiag_01(dtype):
-    for size in (5, 4):
-        array1  = arange(size, dtype=dtype)+1.0
-        array2  = arange(size, dtype=dtype)*3
-        matrix1 = arange(size*size, dtype=dtype).reshape(size, size)+1.0
-        matrix2 = arange(size*size, dtype=dtype).reshape(size, size)*2.5
-        arrays_in = (array1, array2, matrix1, matrix2)
-
-        combinations = ((0,), (2,), (0, 1), (0, 2), (2, 0), (0, 1, 2), (2, 3), (0, 1, 2, 3))
-
-        sms = []
-
-        with Graph(close=True) as graph:
-            arrays = tuple(Array(f'test {i}', array_in) for i, array_in in enumerate(arrays_in))
-
-            for cmb in combinations:
-                sm = SumMatOrDiag(f'sum {cmb}')
-                tuple(arrays[i] for i in cmb) >> sm
-                sms.append(sm)
-
-        for cmb, sm in zip(combinations, sms):
-            res = 0.0
-            all1d = True
-            for i in cmb:
-                array_in = arrays_in[i]
-                if len(array_in.shape)==1:
-                    array_in = diag(array_in)
-                else:
-                    all1d = False
-                res += array_in
-
-            if all1d:
-                res = diag(res)
-
-            assert sm.tainted==True
-            output = sm.outputs[0]
-            assert allclose(output.data, res, rtol=0, atol=0)
-            assert sm.tainted==False
-
-        savegraph(graph, f"output/test_SumMatOrDiag_00_{dtype}_{size}.png", show='all')
-
-def test_SumMatOrDiag_02(dtype='d'):
-    size = 5
-    in_array1  = arange(size, dtype=dtype)                                      # 0
-    in_array2  = arange(size+1, dtype=dtype)                                    # 1
-    in_matrix1 = arange(size*size, dtype=dtype).reshape(size, size)             # 2
-    in_matrix2 = arange(size*(size+1), dtype=dtype).reshape(size, size+1)       # 3
-    in_matrix3 = arange((size+1)*(size+1), dtype=dtype).reshape(size+1, size+1) # 4
-    arrays_in = (in_array1, in_array2, in_matrix1, in_matrix2, in_matrix3)
-
-    combinations = (
-            (0, 1), (0, 3), (0, 4),
-            (3, 0), (4, 0),
-            (2, 3), (2, 4)
-            )
-    with Graph(close=False):
-        arrays = tuple(Array(f'test {i}', array_in) for i, array_in in enumerate(arrays_in))
-
-        for i1, i2 in combinations:
-            sm = SumMatOrDiag(f'sum')
-            (arrays[i1], arrays[i2]) >> sm
-
-            with raises(TypeFunctionError):
-                sm.close()
-
diff --git a/subtrees/dagflow/test/nodes/test_SumSq.py b/subtrees/dagflow/test/nodes/test_SumSq.py
deleted file mode 100644
index d70a48113e2f57d0ceca9f028c669c36f1cfd8b0..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_SumSq.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.lib.Array import Array
-from dagflow.lib.SumSq import SumSq
-from dagflow.graphviz import savegraph
-
-from numpy import arange, sum
-import pytest
-
-debug = False
-
-@pytest.mark.parametrize('dtype', ('d', 'f'))
-def test_SumSq_01(dtype):
-    arrays_in = tuple(arange(12, dtype=dtype)*i for i in (1, 2, 3))
-    arrays2_in = tuple(a**2 for a in arrays_in)
-
-    with Graph(close=True) as graph:
-        arrays = tuple(Array('test', array_in) for array_in in arrays_in)
-        sm = SumSq('sumsq')
-        arrays >> sm
-
-    output = sm.outputs[0]
-
-    res = sum(arrays2_in, axis=0)
-
-    assert sm.tainted==True
-    assert all(output.data==res)
-    assert sm.tainted==False
-
-    arrays2_in = (arrays2_in[1],) + arrays2_in[1:]
-    res = sum(arrays2_in, axis=0)
-    assert arrays[0].set(arrays[1].get_data())
-    assert sm.tainted==True
-    assert all(output.data==res)
-    assert sm.tainted==False
-
-    savegraph(graph, f"output/test_SumSq_00_{dtype}.png")
diff --git a/subtrees/dagflow/test/nodes/test_View.py b/subtrees/dagflow/test/nodes/test_View.py
deleted file mode 100644
index 6150da820ac9fe1271e50bab0b91c47c6fca3b10..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_View.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-
-from numpy import arange
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.lib.View import View
-from dagflow.lib.Array import Array
-
-debug = False
-
-def test_View_00():
-    """Create four nodes: sum up three of them, multiply the result by the fourth
-    Use graph context to create the graph.
-    Use one-line code for connecting the nodes
-    """
-    array = arange(5.0)
-    with Graph(close=True) as graph:
-        initial = Array('array', array)
-        view = View("view")
-        view2 = View("view2")
-
-        initial >> view >> view2
-
-    assert initial.tainted==True
-    assert view.tainted==True
-    assert view2.tainted==True
-
-    result = view.get_data()
-    result2 = view2.get_data()
-    assert (result==array).all()
-    assert (result2==array).all()
-    assert view.tainted==False
-    assert view2.tainted==False
-    assert initial.tainted==False
-
-    d1=initial.outputs[0]._data
-    d2=view.outputs[0]._data
-    d3=view2.outputs[0]._data
-    assert (d1==d2).all()
-    assert (d1==d3).all()
-    d1[:]=-1
-    assert (d2==-1).all()
-    assert (d3==-1).all()
-
-    initial.taint()
-    assert initial.tainted==True
-    assert view.tainted==True
-    assert view2.tainted==True
-
-    view2.touch()
-    savegraph(graph, "output/test_View_00.png")
diff --git a/subtrees/dagflow/test/nodes/test_ViewConcat.py b/subtrees/dagflow/test/nodes/test_ViewConcat.py
deleted file mode 100644
index f9703cb99dae4f80ee9169687e4d722ba3de7472..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/nodes/test_ViewConcat.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-
-from pytest import raises
-import numpy as np
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.lib.ViewConcat import ViewConcat
-from dagflow.lib.View import View
-from dagflow.lib.NormalizeCorrelatedVars2 import NormalizeCorrelatedVars2
-from dagflow.lib.Array import Array
-from dagflow.exception import ConnectionError
-
-import pytest
-
-debug = False
-
-@pytest.mark.parametrize('closemode', ['graph', 'recursive'])
-def test_ViewConcat_00(closemode):
-    """Create four nodes: sum up three of them, multiply the result by the fourth
-    Use graph context to create the graph.
-    Use one-line code for connecting the nodes
-    """
-    closegraph = closemode=='graph'
-
-    array1 = np.arange(5.0)
-    array2 = np.ones(shape=10, dtype='d')
-    array3 = np.zeros(shape=12, dtype='d')-1
-    array = np.concatenate((array1, array2, array3))
-    arrays = (array1, array2, array3)
-    n1, n2, _ = (a.size for a in arrays)
-    with Graph(debug=debug, close=closegraph) as graph:
-        inputs = [Array('array', array, mode='fill') for array in arrays]
-        concat = ViewConcat("concat")
-        view = View("view")
-
-        inputs >> concat >> view
-
-    if not closegraph:
-        view.close()
-
-    graph.print()
-
-    assert all(initial.tainted==True for initial in inputs)
-    assert concat.tainted==True
-    assert view.tainted==True
-
-    result = concat.get_data()
-    result_view = view.get_data()
-    assert (result==array).all()
-    assert (result_view==array).all()
-    assert concat.tainted==False
-    assert view.tainted==False
-    assert all(i.tainted==False for i in inputs)
-
-    data1, data2, data3 = (i.get_data(0) for i in inputs)
-    datac = concat.get_data(0)
-    datav = view.get_data(0)
-    assert all(data1==datac[:data1.size])
-    assert all(data2==datac[n1:n1+data2.size])
-    assert all(data3==datac[n1+n2:n1+n2+data3.size])
-
-    data1[2]=-1
-    data2[:]=-1
-    data3[::2]=-2
-    assert all(data1==datac[:data1.size])
-    assert all(data2==datac[n1:n1+data2.size])
-    assert all(data3==datac[n1+n2:n1+n2+data3.size])
-    assert all(data1==datav[:data1.size])
-    assert all(data2==datav[n1:n1+data2.size])
-    assert all(data3==datav[n1+n2:n1+n2+data3.size])
-
-    inputs[1].taint()
-    assert concat.tainted==True
-    assert view.tainted==True
-
-    view.touch()
-    savegraph(graph, "output/test_ViewConcat_00.png")
-
-def test_ViewConcat_01():
-    with Graph() as graph:
-        concat = ViewConcat("concat")
-        concat2 = ViewConcat("concat 2")
-        view = View('view')
-        normnode = NormalizeCorrelatedVars2('normvars')
-
-        with raises(ConnectionError):
-            view >> concat
-
-        with raises(ConnectionError):
-            normnode.outputs[0] >> concat
-
-        with raises(ConnectionError):
-            concat >> normnode.inputs[0]
-
-        with raises(ConnectionError):
-            concat >> concat2
-
-    savegraph(graph, "output/test_ViewConcat_01.png")
diff --git a/subtrees/dagflow/test/parameters/test_load_parameters.py b/subtrees/dagflow/test/parameters/test_load_parameters.py
deleted file mode 100644
index 9152828bc44660a82f84f739f424b88ae42647ce..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/parameters/test_load_parameters.py
+++ /dev/null
@@ -1,133 +0,0 @@
-from dagflow.graphviz import savegraph
-from dagflow.graph import Graph
-from dagflow.bundles.load_parameters import load_parameters
-
-cfg1 = {
-        'parameters': {
-            'var1': 1.0,
-            'var2': 1.0,
-            'sub1': {
-                'var3': 2.0
-                }
-            },
-        'format': 'value',
-        'state': 'variable',
-        'labels': {
-            'var1': {
-                'text': 'text label 1',
-                'latex': r'\LaTeX label 1',
-                'name': 'v1-1'
-                },
-            'var2': 'simple label 2',
-            },
-        }
-cfg1a = {
-        'parameters': {
-            'var1': 1.0,
-            'var2': 1.0,
-            'sub1': {
-                'var3': 2.0
-                }
-            },
-        'format': 'value',
-        'state': 'fixed',
-        'labels': {
-            'var1': {
-                'text': 'text label 1',
-                'latex': r'\LaTeX label 1',
-                'name': 'v1-1'
-                },
-            'var2': 'simple label 2',
-            },
-        }
-
-cfg2 = {
-        'parameters': {
-            'var1': (1.0, 1.0, 0.1),
-            'var2': (1.0, 2.0, 0.1),
-            'sub1': {
-                'var3': (2.0, 1.0, 0.1)
-                }
-            },
-        'path': 'sub.folder',
-        'format': ('value', 'central', 'sigma_absolute'),
-        'state': 'fixed',
-        'labels': {
-            'var1': {
-                'text': 'text label 1',
-                'latex': r'\LaTeX label 1',
-                'name': 'v1-2'
-                },
-            'var2': 'simple label 2'
-            },
-        }
-
-cfg3 = {
-        'parameters': {
-            'var1': [1.0, 1.0, 0.1],
-            'var2': (1.0, 2.0, 0.1),
-            'sub1': {
-                'var3': (2.0, 3.0, 0.1)
-                }
-            },
-        'labels': {
-            'var1': {
-                'text': 'text label 1',
-                'latex': r'\LaTeX label 1',
-                'name': 'v1-3'
-                },
-            'var2': 'simple label 2'
-            },
-        'format': ['value', 'central', 'sigma_relative'],
-        'state': 'fixed',
-        }
-
-cfg4 = {
-        'parameters': {
-            'var1': (1.0, 1.0, 10),
-            'var2': (1.0, 2.0, 10),
-            'sub1': {
-                'var3': (2.0, 3.0, 10)
-                }
-            },
-        'labels': {
-            'var1': {
-                'text': 'text label 1',
-                'latex': r'\LaTeX label 1',
-                },
-            'var2': 'simple label 2'
-            },
-        'format': ('value', 'central', 'sigma_percent'),
-        'state': 'variable',
-        }
-
-cfg5 = {
-        'parameters': {
-            'var1': (1.0, 10),
-            'var2': (2.0, 10),
-            'sub1': {
-                'var3': (3.0, 10)
-                }
-            },
-        'labels': {
-            'var1': {
-                'text': 'text label 1',
-                'latex': r'\LaTeX label 1',
-                },
-            'var2': 'simple label 2'
-            },
-        'format': ('central', 'sigma_percent'),
-        'state': 'variable',
-        }
-
-from pprint import pprint
-def test_load_parameters_v01():
-    cfgs = (cfg1, cfg1a, cfg2, cfg3, cfg4, cfg5)
-    with Graph(close=True) as g:
-        for i, cfg in enumerate(cfgs):
-            vars = load_parameters(cfg)
-            print(cfg['state'])
-            print(i, end=' ')
-            pprint(vars.object)
-
-    savegraph(g, 'output/test_load_parameters.pdf', show='all')
diff --git a/subtrees/dagflow/test/parameters/test_parameters.py b/subtrees/dagflow/test/parameters/test_parameters.py
deleted file mode 100644
index 0e17d6bc93b8764b11a480c655cd2cb1a950f6ff..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/parameters/test_parameters.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.lib import Array
-from dagflow.parameters import GaussianParameters
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.exception import CriticalError
-
-from numpy import square, allclose
-import pytest
-
-@pytest.mark.parametrize('mode', ('single', 'uncorr', 'cov', 'cov1d'))
-def test_variables_00_variable(mode) -> None:
-    value_in    = [1.1, 1.8, 5.0]
-    central_in  = [1.0, 2.0, 3.0]
-    sigma_in    = [1.0, 0.5, 2.0]
-    corrs_in    = [-0.1, 0.5, -0.9] # 01, 02, 12
-    variance_in = square(sigma_in)
-    zeros_in    = [0.0, 0.0, 0.0]
-
-    if mode=='single':
-        value_in = value_in[:1]
-        central_in = central_in[:1]
-        sigma_in = sigma_in[:1]
-        zeros_in = zeros_in[:1]
-
-    with Graph(debug=False, close=False) as graph:
-        value   = Array("variable", value_in, mode='store_weak', mark='v')
-        central = Array("central",  central_in, mark='vâ‚€')
-
-        if mode in ('single', 'uncorr', 'cor'):
-            sigma = Array("sigma", sigma_in, mark='σ')
-
-        if mode in ('single', 'uncorr'):
-            gp = GaussianParameters(value, central, sigma=sigma)
-        elif mode=='cov':
-            covariance = Array("covariance", [
-                    [variance_in[0],                      corrs_in[0]*sigma_in[0]*sigma_in[1], corrs_in[1]*sigma_in[0]*sigma_in[2]],
-                    [corrs_in[0]*sigma_in[0]*sigma_in[1], variance_in[1],                      corrs_in[2]*sigma_in[1]*sigma_in[2]],
-                    [corrs_in[1]*sigma_in[0]*sigma_in[2], corrs_in[2]*sigma_in[1]*sigma_in[2], variance_in[2]]
-                                ],
-                               mark='V')
-            gp = GaussianParameters(value, central, covariance=covariance)
-        elif mode=='cov1d':
-            covariance = Array("covariance", variance_in, mark='diag(V)')
-            gp = GaussianParameters(value, central, covariance=covariance)
-        elif mode=='cor':
-            correlation = Array("correlation", [
-                [1.0,         corrs_in[0], corrs_in[1]],
-                [corrs_in[0], 1.0,         corrs_in[2]],
-                [corrs_in[1], corrs_in[2], 1.0],
-                ], mark='C')
-            gp = GaussianParameters(value, central, sigma=sigma, correlation=correlation)
-        else:
-            raise RuntimeError(f"Invalid mode {mode}")
-
-    try:
-        graph.close()
-    except CriticalError as error:
-        savegraph(graph, f"output/test_variables_00_{mode}.png")
-        raise error
-
-    value_out0 = gp.value.data.copy()
-    normvalue_out0 = gp.constraint.normvalue.data
-    assert allclose(value_in, value_out0, atol=0, rtol=0)
-    assert all(normvalue_out0!=0)
-
-    gp.constraint.normvalue.set(zeros_in)
-    value_out1 = gp.value.data
-    normvalue_out1 = gp.constraint.normvalue.data
-    assert allclose(central_in, value_out1, atol=0, rtol=0)
-    assert allclose(normvalue_out1, 0.0, atol=0, rtol=0)
-
-    gp.value.set(value_out0)
-    value_out2 = gp.value.data
-    normvalue_out2 = gp.constraint.normvalue.data
-    assert allclose(value_in, value_out2, atol=0, rtol=0)
-    assert allclose(normvalue_out2, normvalue_out0, atol=0, rtol=0)
-
-    savegraph(graph, f"output/test_variables_00_{mode}.png", show=['all'])
-    savegraph(graph, f"output/test_variables_00_{mode}.pdf", show=['all'])
-
diff --git a/subtrees/dagflow/test/test_class.py b/subtrees/dagflow/test/test_class.py
deleted file mode 100755
index 041591dba0fe942c362eaf1ec40e0d91ab105936..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_class.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-
-
-from numpy import arange
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.lib.Array import Array
-from dagflow.lib.Product import Product
-from dagflow.lib.Sum import Sum
-from dagflow.printl import current_level, printl, set_prefix_function
-from dagflow.wrappers import *
-
-set_prefix_function(lambda: "{:<2d} ".format(current_level()))
-debug = False
-
-
-def test_00():
-    """Create four nodes: sum up three of them, multiply the result by the fourth
-    Use graph context to create the graph.
-    Use one-line code for connecting the nodes
-    """
-    array = arange(5)
-    names = "n1", "n2", "n3", "n4"
-    with Graph(debug=debug) as graph:
-        initials = [Array(name, array) for name in names]
-        s = Sum("add")
-        m = Product("mul")
-
-    (initials[3], (initials[:3] >> s)) >> m
-
-    graph._wrap_fcns(dataprinter, printer)
-    graph.close()
-
-    s.print()
-    m.print()
-
-    result = m.outputs["result"].data
-    printl(result)
-
-    savegraph(graph, "output/class_00.pdf")
diff --git a/subtrees/dagflow/test/test_close_open.py b/subtrees/dagflow/test/test_close_open.py
deleted file mode 100644
index dcb936ff95f05659d6ee8370e06426fdc11d78f0..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_close_open.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.lib import Array, Product, Sum, WeightedSum
-from numpy import arange, array
-
-
-def test_00(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr = Array("arr", arange(3, dtype="d"))  # [0, 1, 2]
-        ws = WeightedSum("weightedsum")
-        (arr, arr) >> ws
-        Array("weight", (2, 3)) >> ws("weight")
-    assert ws.closed
-    assert (ws.outputs["result"].data == [0, 5, 10]).all()
-    assert arr.open()
-    assert not ws.inputs["weight"].closed
-    assert not arr.closed
-
-
-def test_01(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr1 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
-        arr2 = Array("arr2", array((3, 2, 1), dtype="d"))
-        sum = Sum("sum")
-        (arr1, arr2) >> sum
-    assert sum.closed
-    assert (sum.outputs["result"].data == [3, 3, 3]).all()
-    assert sum.open()
-    assert all((not sum.closed, arr1.closed, arr2.closed))
-    assert arr1.open()
-    assert all((not sum.closed, not arr1.closed, arr2.closed))
-    assert arr2.open()
-    assert all((not sum.closed, not arr1.closed, not arr2.closed))
-
-
-def test_02(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr1 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
-        arr2 = Array("arr2", array((3, 2, 1), dtype="d"))
-        arr3 = Array("unity", array((1, 1, 1), dtype="d"))
-        sum1 = Sum("sum1")
-        sum2 = Sum("sum2")
-        prod = Product("product")
-        (arr1, arr2, arr3) >> sum1  # [4, 4, 4]
-        (arr3, sum1) >> prod  # [4, 4, 4]
-        (arr1, prod) >> sum2  # [4, 5, 6]
-    assert sum2.closed
-    assert (sum2.outputs["result"].data == [4, 5, 6]).all()
-    assert arr1.open()
-    assert arr2.closed
-    assert arr3.closed
-    assert not arr1.closed
-    assert not prod.closed
-    assert not sum1.closed
-
-
-def test_03(debug_graph):
-    with Graph(debug=debug_graph, close=False):
-        arr1 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
-        arr2 = Array("arr2", array((3, 2, 1), dtype="d"))
-        arr3 = Array("unity", array((1, 1, 1), dtype="d"))
-        sum1 = Sum("sum1")
-        sum2 = Sum("sum2")
-        prod = Product("product")
-        (arr1, arr2, arr3) >> sum1  # [4, 4, 4]
-        (arr3, sum1) >> prod  # [4, 4, 4]
-        (arr1, prod) >> sum2  # [4, 5, 6]
-
-    with Graph(debug=debug_graph, close=True):
-        arr4 = Array("arr1", arange(3, dtype="d"))  # [0, 1, 2]
-        sum3 = Sum("sum3")
-        (sum2, arr4) >> sum3  # [4, 7, 8]
-    assert arr1.closed
-    assert arr2.closed
-    assert arr3.closed
-    assert arr4.closed
-    assert sum2.closed
-    assert sum3.closed
-    assert (sum3.outputs["result"].data == [4, 6, 8]).all()
-    assert arr1.open()
-    assert arr2.closed
-    assert arr3.closed
-    assert arr4.closed
-    assert not arr1.closed
-    assert not prod.closed
-    assert not sum1.closed
-    assert not sum2.closed
diff --git a/subtrees/dagflow/test/test_connection.py b/subtrees/dagflow/test/test_connection.py
deleted file mode 100755
index d23dfb49b91776e67f9c4fafa859727cf79ac39c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_connection.py
+++ /dev/null
@@ -1,166 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.exception import ClosedGraphError, UnclosedGraphError
-from dagflow.graph import Graph
-from dagflow.input import Input
-from dagflow.nodes import FunctionNode
-from dagflow.output import Output
-from dagflow.wrappers import *
-from pytest import raises
-
-nodeargs = {"typefunc": lambda: True}
-
-
-def test_01():
-    i = Input("input", None)
-    o = Output("output", None)
-
-    o >> i
-
-
-def test_02():
-    n1 = FunctionNode("node1")
-    n2 = FunctionNode("node2")
-
-    n1.add_output("o1")
-    n1.add_output("o2")
-
-    n2.add_input("i1")
-    n2.add_input("i2")
-    n2.add_output("o1")
-
-    n1 >> n2
-
-
-def test_03():
-    n1 = FunctionNode("node1")
-    n2 = FunctionNode("node2")
-
-    out = n1.add_output("o1")
-
-    n2.add_input("i1")
-    n2.add_output("o1")
-
-    out >> n2
-
-
-def test_04():
-    n1 = FunctionNode("node1")
-    n2 = FunctionNode("node2")
-
-    out = n1.add_output("o1")
-
-    n2.add_pair("i1", "o1")
-
-    final = out >> n2
-
-
-def test_05():
-    n1 = FunctionNode("node1", **nodeargs)
-    n2 = FunctionNode("node2", **nodeargs)
-
-    out1 = n1.add_output("o1", allocatable=False)
-    out2 = n1.add_output("o2", allocatable=False)
-
-    _, final = n2.add_pair("i1", "o1", output_kws={"allocatable": False})
-    n2.add_input("i2")
-
-    (out1, out2) >> n2
-
-    n2.close()
-    assert n2.closed
-    assert n1.closed
-    with raises(ClosedGraphError):
-        n2.add_input("i3")
-    with raises(ClosedGraphError):
-        n1.add_output("o3")
-    final.data
-
-
-def test_06():
-    n1 = FunctionNode("node1", **nodeargs)
-    n2 = FunctionNode("node2", **nodeargs)
-
-    out1 = n1._add_output("o1", allocatable=False)
-    out2 = n1._add_output("o2", allocatable=False)
-
-    _, final = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
-    n2._add_input("i2")
-
-    (out1, out2) >> n2
-
-    n1.close(recursive=False)
-    assert n1.closed
-    assert not n2.closed
-    n2.close(recursive=False)
-    assert n2.closed
-    with raises(ClosedGraphError):
-        n2.add_input("i3")
-    with raises(ClosedGraphError):
-        n1.add_output("o3")
-    final.data
-
-
-def test_07():
-    g = Graph()
-    n1 = g.add_node("node1", **nodeargs)
-    n2 = g.add_node("node2", **nodeargs)
-    g._wrap_fcns(toucher, printer)
-
-    out1 = n1._add_output("o1", allocatable=False)
-    out2 = n1._add_output("o2", allocatable=False)
-
-    _, final = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
-    n2._add_input("i2")
-
-    (out1, out2) >> n2
-
-    with raises(UnclosedGraphError):
-        final.data
-    g.close()
-    with raises(ClosedGraphError):
-        n2.add_input("i3")
-    with raises(ClosedGraphError):
-        n1.add_output("o3")
-    final.data
-
-
-def test_08():
-    g = Graph()
-    n1 = g.add_node("node1", **nodeargs)
-    n2 = g.add_node("node2", **nodeargs)
-    n3 = g.add_node("node3", **nodeargs)
-    g._wrap_fcns(toucher, printer)
-
-    out1 = n1._add_output("o1", allocatable=False)
-    out2 = n1._add_output("o2", allocatable=False)
-
-    _, out3 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
-    n2._add_input("i2")
-
-    _, final = n3._add_pair("i1", "o1", output_kws={"allocatable": False})
-
-    (out1, out2) >> n2
-    out3 >> n3
-
-    with raises(UnclosedGraphError):
-        final.data
-    g.close()
-    with raises(ClosedGraphError):
-        n2.add_input("i3")
-    with raises(ClosedGraphError):
-        n1.add_output("o3")
-    with raises(ClosedGraphError):
-        n3.add_pair("i3", "o3")
-    final.data
-
-    print()
-    final.data
-
-    print("Taint n2")
-    n2.taint()
-    final.data
-
-    print("Taint n3")
-    n3.taint()
-    final.data
diff --git a/subtrees/dagflow/test/test_containers.py b/subtrees/dagflow/test/test_containers.py
deleted file mode 100755
index 1e19a94a0bafd98c6d983f251266c49c22475585..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_containers.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-
-import contextlib
-
-from dagflow.input import Input, Inputs
-from dagflow.legs import Legs
-from dagflow.output import Output
-
-
-def test_01():
-    inputs = Inputs()
-
-    input1 = Input("i1", None)
-    input2 = Input("i2", None)
-    input3 = Input("i3", None)
-
-    inputs.add( (input1, input2) )
-    inputs.add( input3 )
-
-    print(inputs)
-
-    print(inputs[0])
-    print(inputs[1])
-    print(inputs[2])
-
-    try:
-        print(inputs[3])
-    except IndexError:
-        pass
-    else:
-        raise RuntimeError("fail")
-
-    print(inputs["i1"])
-    print(inputs["i2"])
-    print(inputs[("i1", "i3")])
-
-    print(inputs["i1"])
-    print(inputs["i2"])
-    print(inputs["i3"])
-    with contextlib.suppress(KeyError):
-        print(inputs["i4"])
-
-
-def test_02():
-    inputs = Inputs()
-    print(inputs)
-
-    output1 = Output("o1", None)
-
-    try:
-        inputs.add( output1 )
-    except Exception:
-        pass
-    else:
-        raise RuntimeError("fail")
-
-
-def test_03():
-    print("test3")
-    input1 = Input("i1", None)
-    input2 = Input("i2", None)
-    input3 = Input("i3", None)
-
-    output1 = Output("o1", None)
-    output2 = Output("o2", None)
-
-    legs = Legs((input1, input2, input3), (output1, output2))
-    print(legs)
-    legs.print()
-    print()
-
-    legs1 = legs[None, "o1"]
-    print(legs1)
-    # legs1.print()
-    print()
-
-    legs2 = legs[:, "o1"]
-    print(legs2)
-    legs2.print()
-    print()
-
-    legs3 = legs[("i1", "i3"), "o1"]
-    print(legs3)
-    legs3.print()
-    print()
diff --git a/subtrees/dagflow/test/test_function_switch.py b/subtrees/dagflow/test/test_function_switch.py
deleted file mode 100644
index 2d700799e736fbcfeb99c77bec9a172375902dfa..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_function_switch.py
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/usr/bin/env python
-
-from dagflow.graph import Graph
-from dagflow.input_extra import MissingInputAddOne
-from dagflow.lib import Array
-from dagflow.nodes import FunctionNode
-from numpy import arange, array, copyto, result_type
-
-
-class SumIntProductFloatElseNothing(FunctionNode):
-    def __init__(self, name, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(name, **kwargs)
-        self._functions.update(
-            {"int": self._fcn_int, "float": self._fcn_float}
-        )
-
-    def _fcn(self, _, inputs, outputs):
-        return outputs[0].data
-
-    def _fcn_int(self, _, inputs, outputs):
-        out = outputs[0].data
-        copyto(out, inputs[0].data.copy())
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                out += input.data
-        return out
-
-    def _fcn_float(self, _, inputs, outputs):
-        out = outputs[0].data
-        copyto(out, inputs[0].data.copy())
-        if len(inputs) > 1:
-            for input in inputs[1:]:
-                out *= input.data
-        return out
-
-    def _typefunc(self) -> bool:
-        if self.inputs[0].dd.dtype == "i":
-            self.fcn = self._functions.get("int")
-        elif self.inputs[0].dd.dtype == "d":
-            self.fcn = self._functions.get("float")
-        self.outputs["result"].dd.shape = self.inputs[0].dd.shape
-        self.outputs["result"].dd.dtype = result_type(
-            *tuple(inp.dd.dtype for inp in self.inputs)
-        )
-        self.logger.debug(
-            f"Node '{self.name}': dtype={self.outputs['result'].dd.dtype}, "
-            f"shape={self.outputs['result'].dd.shape}, function={self.fcn.__name__}"
-        )
-        return True
-
-
-def test_00(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr = Array("arr", array(("1", "2", "3")))
-        node = SumIntProductFloatElseNothing("node")
-        (arr, arr) >> node
-    assert (node.outputs["result"].data == ["", "", ""]).all()
-
-
-def test_01(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr = Array("arr", arange(3, dtype="i"))  # [0, 1, 2]
-        node = SumIntProductFloatElseNothing("node")
-        (arr, arr) >> node
-    assert (node.outputs["result"].data == [0, 2, 4]).all()
-
-
-def test_02(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr = Array("arr", arange(3, dtype="d"))  # [0, 1, 2]
-        node = SumIntProductFloatElseNothing("node")
-        (arr, arr) >> node
-    assert (node.outputs["result"].data == [0, 1, 4]).all()
diff --git a/subtrees/dagflow/test/test_graph.py b/subtrees/dagflow/test/test_graph.py
deleted file mode 100755
index eb070b2fde9fb230c6e78a37cd35fd20468ac5ed..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_graph.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#!/usr/bin/env python
-from dagflow.graph import Graph
-from dagflow.graphviz import GraphDot
-from dagflow.printl import current_level, set_prefix_function
-from dagflow.wrappers import *
-
-set_prefix_function(
-    lambda: "{:<2d} ".format(current_level()),
-)
-nodeargs = dict(typefunc=lambda: True)
-
-
-def test_01():
-    """Simple test of the graph plotter"""
-    g = Graph()
-    n1 = g.add_node("node1", **nodeargs)
-    n2 = g.add_node("node2", **nodeargs)
-    n3 = g.add_node("node3", **nodeargs)
-    g._wrap_fcns(toucher, printer)
-
-    out1 = n1._add_output("o1", allocatable=False)
-    out2 = n1._add_output("o2", allocatable=False)
-
-    _, out3 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
-    n2._add_input("i2")
-    n3._add_pair("i1", "o1", output_kws={"allocatable": False})
-
-    print(f"{out1=}, {out2=}")
-    (out1, out2) >> n2
-    out3 >> n3
-    g.close()
-
-    d = GraphDot(g)
-    d.savegraph("output/test1_00.png")
-
-
-def test_02():
-    """Simple test of the graph plotter"""
-    g = Graph()
-    n1 = g.add_node("node1", **nodeargs)
-    n2 = g.add_node("node2", **nodeargs)
-    n3 = g.add_node("node3", **nodeargs)
-    g._wrap_fcns(toucher, printer)
-
-    out1 = n1._add_output("o1", allocatable=False)
-    out2 = n1._add_output("o2", allocatable=False)
-
-    _, out3 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
-    n2._add_input("i2")
-
-    _, final = n3._add_pair("i1", "o1", output_kws={"allocatable": False})
-
-    (out1, out2) >> n2
-    out3 >> n3
-    g.close()
-
-    d = GraphDot(g)
-    d.savegraph("output/test2_00.png")
-
-    final.data
-    d = GraphDot(g)
-    d.savegraph("output/test2_01.png")
-
-
-def test_02a():
-    """Simple test of the graph plotter"""
-    g = Graph()
-    n1 = g.add_node("node1", **nodeargs)
-    n2 = g.add_node("node2", **nodeargs)
-    n3 = g.add_node("node3", **nodeargs)
-    n4 = g.add_node("node4", **nodeargs)
-    g._wrap_fcns(toucher, printer)
-
-    out1 = n1._add_output("o1", allocatable=False)
-
-    in2, out2 = n2._add_pair("i1", "o1", output_kws={"allocatable": False})
-    in3, out3 = n3._add_pair("i1", "o1", output_kws={"allocatable": False})
-    in4, out4 = n4._add_pair("i1", "o1", output_kws={"allocatable": False})
-
-    out1.repeat() >> (in2, in3, in4)
-    g.close()
-
-    d = GraphDot(g)
-    d.savegraph("output/test2a_00.png")
-
-    print(out4.data)
-    d = GraphDot(g)
-    d.savegraph("output/test2a_01.png")
diff --git a/subtrees/dagflow/test/test_graph_big.py b/subtrees/dagflow/test/test_graph_big.py
deleted file mode 100755
index 71a71ef43a02d28270d00a0e80a06908bd0af77c..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_graph_big.py
+++ /dev/null
@@ -1,164 +0,0 @@
-#!/usr/bin/env python
-from dagflow.graph import Graph
-from dagflow.graphviz import GraphDot
-from dagflow.printl import current_level, set_prefix_function
-from dagflow.wrappers import *
-
-set_prefix_function(lambda: "{:<2d} ".format(current_level()))
-
-counter = 0
-nodeargs = dict(typefunc=lambda: True)
-
-
-def test_graph_big_01():
-    """Create a graph of nodes and test evaluation features"""
-    g = Graph()
-    label = None
-
-    def plot(suffix=""):
-        global counter
-        d = GraphDot(g)
-        newlabel = label and label + suffix or suffix
-        if newlabel is not None:
-            d.set_label(newlabel)
-        d.savegraph("output/test_graph_big_{:03d}.png".format(counter))
-        counter += 1
-
-    def plotter(fcn, node, inputs, outputs):
-        plot(f"[start evaluating {node.name}]")
-        fcn(node, inputs, outputs)
-        plot(f"[done evaluating {node.name}]")
-
-    A1 = g.add_node("A1", **nodeargs)
-    A2 = g.add_node("A2", auto_freeze=True, label="{name}|frozen", **nodeargs)
-    A3 = g.add_node("A3", immediate=True, label="{name}|immediate", **nodeargs)
-    B = g.add_node("B", **nodeargs)
-    C1 = g.add_node("C1", **nodeargs)
-    C2 = g.add_node("C2", **nodeargs)
-    D = g.add_node("D", **nodeargs)
-    E = g.add_node("E", **nodeargs)
-    F = g.add_node("F", **nodeargs)
-    H = g.add_node("H", **nodeargs)
-    P = g.add_node("P", immediate=True, label="{name}|immediate", **nodeargs)
-
-    g._wrap_fcns(toucher, printer, plotter)
-
-    A1._add_output("o1", allocatable=False)
-    A2._add_output("o1", allocatable=False)
-    P._add_output("o1", allocatable=False)
-    A3._add_pair("i1", "o1", output_kws={"allocatable": False})
-    B._add_pair(
-        ("i1", "i2", "i3", "i4"),
-        ("o1", "o2"),
-        output_kws={"allocatable": False},
-    )
-    C1._add_output("o1", allocatable=False)
-    C2._add_output("o1", allocatable=False)
-    D._add_pair("i1", "o1", output_kws={"allocatable": False})
-    D._add_pair("i2", "o2", output_kws={"allocatable": False})
-    H._add_pair("i1", "o1", output_kws={"allocatable": False})
-    _, other = F._add_pair("i1", "o1", output_kws={"allocatable": False})
-    _, final = E._add_pair("i1", "o1", output_kws={"allocatable": False})
-
-    (A1, A2, (P >> A3), D[:1]) >> B >> (E, H)
-    ((C1, C2) >> D[:, 1]) >> F
-
-    g.print()
-    g.close()
-
-    label = "Initial graph state."
-    plot()
-
-    label = "Read E..."
-    plot()
-    plot()
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
-
-    label = "Taint D."
-    plot()
-    plot()
-    plot()
-    D.taint()
-    plot()
-    label = "Read F..."
-    other.data
-    label = "Done reading F."
-    plot()
-
-    label = "Read E..."
-    plot()
-    plot()
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
-
-    label = "Taint A2."
-    plot()
-    plot()
-    plot()
-    A2.taint()
-    plot()
-    label = "Read E..."
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
-
-    label = "Unfreeze A2 (tainted)."
-    plot()
-    plot()
-    plot()
-    A2.unfreeze()
-    plot()
-    label = "Read E..."
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
-
-    label = "Unfreeze A2 (not tainted)."
-    plot()
-    plot()
-    plot()
-    A2.unfreeze()
-    plot()
-    label = "Read E..."
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
-
-    label = "Taint P"
-    plot()
-    plot()
-    plot()
-    P.taint()
-    plot()
-    label = "Read E..."
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
-
-    label = "Invalidate P"
-    plot()
-    plot()
-    plot()
-    P.invalid = True
-    plot()
-
-    label = "Validate P"
-    plot()
-    plot()
-    plot()
-    P.invalid = False
-    plot()
-    label = "Read E..."
-    plot()
-    final.data
-    label = "Done reading E."
-    plot()
diff --git a/subtrees/dagflow/test/test_hooks.py b/subtrees/dagflow/test/test_hooks.py
deleted file mode 100755
index 5678da9ecfb42add4173f9db008b85f064b5c378..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_hooks.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-
-from numpy import arange, copyto, result_type
-from pytest import raises
-
-from dagflow.exception import (
-    CriticalError,
-    ReconnectionError,
-    UnclosedGraphError,
-)
-from dagflow.graph import Graph
-from dagflow.input_extra import MissingInputAddOne
-from dagflow.lib.Array import Array
-from dagflow.lib.WeightedSum import WeightedSum
-from dagflow.nodes import FunctionNode
-
-
-class ThreeInputsSum(FunctionNode):
-    def __init__(self, *args, **kwargs):
-        kwargs.setdefault(
-            "missing_input_handler", MissingInputAddOne(output_fmt="result")
-        )
-        super().__init__(*args, **kwargs)
-
-    def _fcn(self, _, inputs, outputs):
-        out = outputs["result"].data
-        copyto(out, inputs[0].data.copy())
-        for input in inputs[1:3]:
-            out += input.data
-        return out
-
-    def _typefunc(self) -> None:
-        """A output takes this function to determine the dtype and shape"""
-        if (y := len(self.inputs)) != 3:
-            raise CriticalError(
-                f"The node must have only 3 inputs, but given {y}: {self.inputs}!"
-            )
-        self.outputs["result"].dd.shape = self.inputs[0].dd.shape
-        self.outputs["result"].dd.dtype = result_type(
-            *tuple(inp.dd.dtype for inp in self.inputs)
-        )
-        self.logger.debug(
-            f"Node '{self.name}': dtype={self.outputs['result'].dd.dtype}, "
-            f"shape={self.outputs['result'].dd.shape}"
-        )
-
-
-def test_00(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr = Array("arr", arange(3, dtype="i"))  # [0, 1, 2]
-        node = ThreeInputsSum("threesum")
-        for _ in range(3):
-            # Error while evaluating before len(inputs) == 3
-            with raises(UnclosedGraphError):
-                node.eval()
-            arr >> node
-    assert (node.outputs["result"].data == [0, 3, 6]).all()
-
-
-def test_01(debug_graph):
-    with Graph(debug=debug_graph, close=True):
-        arr = Array("arr", arange(3, dtype="i"))  # [0, 1, 2]
-        ws = WeightedSum("weightedsum")
-        (arr, arr) >> ws
-        # Error while eval before setting the weight input
-        with raises(UnclosedGraphError):
-            ws.eval()
-        # multiply the first input by 2 and the second one by 3
-        Array("weight", (2, 3)) >> ws("weight")
-    with raises(ReconnectionError):
-        Array("weight", (2, 3)) >> ws("weight")
-    assert (ws.outputs["result"].data == [0, 5, 10]).all()
diff --git a/subtrees/dagflow/test/test_input_handler.py b/subtrees/dagflow/test/test_input_handler.py
deleted file mode 100755
index d8a763aa96e2e858fcb7ddea637861dc027c8b31..0000000000000000000000000000000000000000
--- a/subtrees/dagflow/test/test_input_handler.py
+++ /dev/null
@@ -1,266 +0,0 @@
-#!/usr/bin/env python
-"""Test missing input handlers"""
-
-from contextlib import suppress
-
-from dagflow.graph import Graph
-from dagflow.graphviz import savegraph
-from dagflow.input_extra import *
-from dagflow.wrappers import *
-
-nodeargs = dict(typefunc=lambda: True)
-
-
-def test_00():
-    """Test default handler: fail on connect"""
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add", missing_input_handler=MissingInputFail, **nodeargs
-    )
-    graph.close()
-
-    with suppress(Exception):
-        (in1, in2, in3) >> s
-    savegraph(
-        graph, "output/missing_input_handler_00.pdf", label="Fail on connect"
-    )
-
-
-def test_01():
-    """Test InputAdd handler: add new input on each new connect"""
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add",
-        missing_input_handler=MissingInputAdd(
-            output_kws={"allocatable": False}
-        ),
-        **nodeargs
-    )
-
-    (in1, in2, in3) >> s
-    in4 >> s
-
-    print()
-    print("test 01")
-    s.print()
-    graph.close()
-
-    savegraph(
-        graph, "output/missing_input_handler_01.pdf", label="Add only inputs"
-    )
-
-
-def test_02():
-    """
-    Test InputAddPair handler: add new input on each new connect
-    and connect them as inputs to another input
-    """
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add",
-        missing_input_handler=MissingInputAddPair(
-            output_kws={"allocatable": False}
-        ),
-        **nodeargs
-    )
-
-    (in1, in2, in3) >> s
-    in4 >> s
-
-    print()
-    print("test 02")
-    s.print()
-
-    for input, output in zip(s.inputs, s.outputs):
-        assert input.child_output is output
-    graph.close()
-
-    savegraph(
-        graph,
-        "output/missing_input_handler_02.pdf",
-        label="Add inputs and an output for each input",
-    )
-
-
-def test_03():
-    """
-    Test InputAddOne handler: add new input on each new connect and
-    add an output if needed
-    """
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add",
-        missing_input_handler=MissingInputAddOne(
-            output_kws={"allocatable": False}
-        ),
-        **nodeargs
-    )
-
-    (in1, in2, in3) >> s
-    in4 >> s
-
-    print()
-    print("test 03")
-    s.print()
-    graph.close()
-
-    savegraph(
-        graph,
-        "output/missing_input_handler_03.pdf",
-        label="Add only inputs and only one output",
-    )
-
-
-def test_04():
-    """
-    Test InputAddOne handler: add new input on each new connect and
-    add an output if needed.
-    This version also sets the input for each input
-    """
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add",
-        missing_input_handler=MissingInputAddOne(
-            add_child_output=True, output_kws={"allocatable": False}
-        ),
-        **nodeargs
-    )
-
-    (in1, in2, in3) >> s
-    in4 >> s
-
-    print()
-    print("test 04")
-    s.print()
-
-    output = s.outputs[0]
-    for input in s.inputs:
-        assert input.child_output is output
-    graph.close()
-
-    savegraph(
-        graph,
-        "output/missing_input_handler_04.pdf",
-        label="Add inputs and only one output",
-    )
-
-
-def test_05():
-    """
-    Test InputAddEach handler: add new input on each new connect and
-    add an output for each >> group
-    """
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add",
-        missing_input_handler=MissingInputAddEach(
-            add_child_output=False, output_kws={"allocatable": False}
-        ),
-        **nodeargs
-    )
-
-    (in1, in2, in3) >> s
-    in4 >> s
-
-    print()
-    print("test 05")
-    s.print()
-    graph.close()
-
-    savegraph(
-        graph,
-        "output/missing_input_handler_05.pdf",
-        label="Add inputs and an output for each block",
-    )
-
-
-def test_06():
-    """
-    Test InputAddEach handler: add new input on each new connect and
-    add an output for each >> group.
-    This version also sets the child_output for each input
-    """
-    graph = Graph()
-
-    in1 = graph.add_node("n1", **nodeargs)
-    in2 = graph.add_node("n2", **nodeargs)
-    in3 = graph.add_node("n3", **nodeargs)
-    in4 = graph.add_node("n4", **nodeargs)
-    for node in (in1, in2, in3, in4):
-        node.add_output("o1", allocatable=False)
-
-    s = graph.add_node(
-        "add",
-        missing_input_handler=MissingInputAddEach(
-            add_child_output=True, output_kws={"allocatable": False}
-        ),
-        **nodeargs
-    )
-
-    (in1, in2, in3) >> s
-    in4 >> s
-
-    print()
-    print("test 06")
-    s.print()
-
-    o1, o2 = s.outputs
-    for input in s.inputs[:3]:
-        assert input.child_output is o1
-    for input in s.inputs[3:]:
-        assert input.child_output is o2
-    graph.close()
-
-    savegraph(
-        graph,
-        "output/missing_input_handler_06.pdf",
-        label="Add inputs and an output for each block",
-    )