diff --git a/DDDigi/include/DDDigi/noise/DigiRandomNoise.h b/DDDigi/include/DDDigi/noise/DigiRandomNoise.h index 3e20a878f93c0713f8b26594dedbba3815d468d2..51a1e3b39a99e95257a3284b017acb5c0570cb74 100644 --- a/DDDigi/include/DDDigi/noise/DigiRandomNoise.h +++ b/DDDigi/include/DDDigi/noise/DigiRandomNoise.h @@ -15,7 +15,7 @@ /// Framework include files #include <DDDigi/DigiSignalProcessor.h> -#include <DDDigi/FalphaNoise.h> +#include <DDDigi/noise/FalphaNoise.h> /// Namespace for the AIDA detector description toolkit namespace dd4hep { diff --git a/DDDigi/src/DigiData.cpp b/DDDigi/src/DigiData.cpp index cf7bff4f63f3c30b341666f058280dc97d7477be..68b7a933f6a74c70d6e356af768fbece05423f9a 100644 --- a/DDDigi/src/DigiData.cpp +++ b/DDDigi/src/DigiData.cpp @@ -92,12 +92,12 @@ std::size_t DepositMapping::merge(DepositMapping&& updates) { /// Merge new deposit map onto existing map std::size_t ParticleMapping::merge(ParticleMapping&& updates) { std::size_t update_size = updates.size(); +#if defined(__GNUC__) && (__GNUC__ >= 10) for( ParticleMapping::value_type& c : updates ) { Particle part(std::move(c.second)); -#if defined(__GNUC__) && (__GNUC__ >= 10) this->push(c.first, std::move(part)); -#endif } +#endif return update_size; } @@ -105,13 +105,12 @@ void ParticleMapping::push(Key key, Particle&& part) { #if defined(__GNUC__) && (__GNUC__ < 10) /// Lower compiler version have a bad implementation of std::any bool ret = false; - if ( part.history.has_value() ) {} #else bool ret = this->emplace(key.key, std::move(part)).second; #endif if ( !ret ) { - except("ParticleMapping","Error in particle map. Duplicate ID: mask:%04X Number:%d", - key.values.mask, key.values.item); + except("ParticleMapping","Error in particle map. Duplicate ID: mask:%04X Number:%d History:%s", + key.values.mask, key.values.item, yes_no(part.history.has_value())); } } @@ -126,14 +125,13 @@ bool DataSegment::emplace(Key key, std::any&& item) { #if defined(__GNUC__) && (__GNUC__ < 10) /// Lower compiler version have a bad implementation of std::any bool ret = false; - if ( item.has_value() ) {} #else bool ret = data.emplace(key.key, std::move(item)).second; #endif if ( !ret ) { Key k(key); - except("DataSegment","Error in DataSegment map. Duplicate ID: mask:%04X Number:%d", - k.values.mask, k.values.item); + except("DataSegment","Error in DataSegment map. Duplicate ID: mask:%04X Number:%d Value:%s", + k.values.mask, k.values.item, yes_no(item.has_value())); } return ret; } diff --git a/DDDigi/src/DigiKernel.cpp b/DDDigi/src/DigiKernel.cpp index 1c41cba4f35c62f706f0f7ce56a9dd1fa07e4f1e..32c5f044e08bb7e16b2d3e19a89919619f96ca6a 100644 --- a/DDDigi/src/DigiKernel.cpp +++ b/DDDigi/src/DigiKernel.cpp @@ -319,8 +319,8 @@ DigiActionSequence& DigiKernel::outputAction() const { /// Submit a bunch of actions to be executed in parallel void DigiKernel::submit (const std::vector<CallWrapper*>& actions) const { - bool parallel = 0 != internals->tbbInit && internals->numThreads>0; #ifdef DD4HEP_USE_TBB + bool parallel = 0 != internals->tbbInit && internals->numThreads>0; if ( parallel ) { tbb::task_group que; for ( auto* algo : actions ) diff --git a/DDDigi/src/DigiMultiContainerProcessor.cpp b/DDDigi/src/DigiMultiContainerProcessor.cpp index 9e1f648cc03afbc2129826872f90b4b4f51bed33..00680217903e36dac88bb9cbf5bb32c1583377dc 100644 --- a/DDDigi/src/DigiMultiContainerProcessor.cpp +++ b/DDDigi/src/DigiMultiContainerProcessor.cpp @@ -112,5 +112,5 @@ DigiContainerProcessor::DigiContainerProcessor(const DigiKernel& kernel, const s /// Main functional callback if specific work is known void DigiContainerProcessor::execute(DigiContext& context, WorkItems& data) const { - info("Hello there %p", (void*)&data); + info("Hello there [Context:%p] %p", (void*)&context, (void*)&data); } diff --git a/examples/DDDigi/scripts/DigiTest.py b/examples/DDDigi/scripts/DigiTest.py index 84cf2d952feca4ab101c1642999d50bb09eef57f..f47175518e0cf281c9b76b205fa52a31da164461 100644 --- a/examples/DDDigi/scripts/DigiTest.py +++ b/examples/DDDigi/scripts/DigiTest.py @@ -82,9 +82,19 @@ class Test(dddigi.Digitize): def data_containers(self): return list(self.attenuation.keys()) - def containers(self, first, last): + def containers(self, count): keys = list(self.attenuation.keys()) - return keys[first:last] + conts = [] + result = [] + cnt = 0 + for i in range(count): + if cnt > count: + result.append(cont) + cont = [] + cnt = 0 + cont.append(keys[i]) + cnt = cnt + 1 + return result def check_creation(self, objs): for o in objs: diff --git a/examples/DDDigi/scripts/TestMultiContainerParallel.py b/examples/DDDigi/scripts/TestMultiContainerParallel.py index 150143cd31d4bb9ef2c6c4957fef92190fb53abb..c491555b02f035ae4d879df0fa2f5f8c387ca2d1 100644 --- a/examples/DDDigi/scripts/TestMultiContainerParallel.py +++ b/examples/DDDigi/scripts/TestMultiContainerParallel.py @@ -24,13 +24,10 @@ def run(): event = digi.event_action('DigiSequentialActionSequence/EventAction') proc = event.adopt_action('DigiMultiContainerProcessor/ContainerProc', input_masks=[0x0, 0x1, 0x2, 0x3]) - cont = digi.data_containers() - num = int((len(cont)+2)/3) - for i in range(num): - #merge = digi.event_action('DigiSegmentDepositPrint/SegmentPrint_%03d'%(i,), register=None) + conts = digi.containers(3) + for i in range(len(conts)): merge = dddigi.Action(digi.kernel(), 'DigiContainerProcessor/SegmentPrint_%03d'%(i,)); - conts = digi.containers(i*3,(i+1)*3) - proc.adopt_processor(merge, conts) + proc.adopt_processor(merge, conts[i]) #dump = event.adopt_action('DigiStoreDump/StoreDump') #digi.check_creation([combine, dump, splitter])