CoreNEURON
setup_fornetcon.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================
7 */
8 
13 #include <map>
14 #include <utility>
15 
16 namespace coreneuron {
17 
18 /**
19  If FOR_NETCON in use, setup NrnThread fornetcon related info.
20 
21  i.e NrnThread._fornetcon_perm_indices, NrnThread._fornetcon_weight_perm,
22  and the relevant dparam element of each mechanism instance that uses
23  a FOR_NETCONS statement.
24 
25  Makes use of nrn_fornetcon_cnt_, nrn_fornetcon_type_,
26  and nrn_fornetcon_index_ that were specified during registration of
27  mechanisms that use FOR_NETCONS.
28 
29  nrn_fornetcon_cnt_ is the number of mechanisms that use FOR_NETCONS,
30  nrn_fornetcon_type_ is an int array of size nrn_fornetcon_cnt, that specifies
31  the mechanism type.
32  nrn_fornetcon_index_ is an int array of size nrn_fornetcon_cnt, that
33  specifies the index into an instance's dparam int array having the
34  fornetcon semantics.
35 
36  FOR_NETCONS (args) means to loop over all NetCon connecting to this
37  target instance and args are the names of the items of each NetCon's
38  weight vector (same as the enclosing NET_RECEIVE but possible different
39  local names).
40 
41  NrnThread._weights is a vector of weight groups where the number of groups
42  is the number of NetCon in this thread and each group has a size
43  equal to the number of args in the target NET_RECEIVE block. The order
44  of these groups is the NetCon Object order in HOC (the construction order).
45  So the weight vector indices for the NetCons in the FOR_NETCONS loop
46  are not adjacent.
47 
48  NrnThread._fornetcon_weight_perm is an index vector into the
49  NrnThread._weight vector such that the list of indices that targets a
50  mechanism instance are adjacent.
51  NrnThread._fornetcon_perm_indices is an index vector into the
52  NrnThread._fornetcon_weight_perm to the first of the list of NetCon weights
53  that target the instance. The index of _fornetcon_perm_indices
54  containing this first in the list is stored in the mechanism instances
55  dparam at the dparam's semantic fornetcon slot. (Note that the next index
56  points to the first index of the next target instance.)
57 
58 **/
59 
60 static int* fornetcon_slot(const int mtype,
61  const int instance,
62  const int fnslot,
63  const NrnThread& nt) {
64  int layout = corenrn.get_mech_data_layout()[mtype];
65  int sz = corenrn.get_prop_dparam_size()[mtype];
66  Memb_list* ml = nt._ml_list[mtype];
67  int* fn = nullptr;
68  if (layout == Layout::AoS) {
69  fn = ml->pdata + (instance * sz + fnslot);
70  } else if (layout == Layout::SoA) {
71  int padded_cnt = nrn_soa_padded_size(ml->nodecount, layout);
72  fn = ml->pdata + (fnslot * padded_cnt + instance);
73  }
74  return fn;
75 }
76 
78  if (nrn_fornetcon_cnt_ == 0) {
79  return;
80  }
81 
82  // Mechanism types in use that have FOR_NETCONS statements
83  // Nice to have the dparam fornetcon slot as well so use map
84  // instead of set
85  std::map<int, int> type_to_slot;
86  for (int i = 0; i < nrn_fornetcon_cnt_; ++i) {
87  int type = nrn_fornetcon_type_[i];
88  Memb_list* ml = nt._ml_list[type];
89  if (ml && ml->nodecount) {
90  type_to_slot[type] = nrn_fornetcon_index_[i];
91  }
92  }
93  if (type_to_slot.empty()) {
94  return;
95  }
96 
97  // How many NetCons (weight groups) are involved.
98  // Also count how many weight groups for each target instance.
99  // For the latter we can count in the dparam fornetcon slot.
100 
101  // zero the dparam fornetcon slot for counting and count number of slots.
102  size_t n_perm_indices = 0;
103  for (const auto& kv: type_to_slot) {
104  int mtype = kv.first;
105  int fnslot = kv.second;
106  int nodecount = nt._ml_list[mtype]->nodecount;
107  for (int i = 0; i < nodecount; ++i) {
108  int* fn = fornetcon_slot(mtype, i, fnslot, nt);
109  *fn = 0;
110  n_perm_indices += 1;
111  }
112  }
113 
114  // Count how many weight groups for each slot and total number of weight groups
115  size_t n_weight_perm = 0;
116  for (int i = 0; i < nt.n_netcon; ++i) {
117  NetCon& nc = nt.netcons[i];
118  int mtype = nc.target_->_type;
119  auto search = type_to_slot.find(mtype);
120  if (search != type_to_slot.end()) {
121  int i_instance = nc.target_->_i_instance;
122  int* fn = fornetcon_slot(mtype, i_instance, search->second, nt);
123  *fn += 1;
124  n_weight_perm += 1;
125  }
126  }
127 
128  // Displacement vector has an extra element since the number for last item
129  // at n-1 is x[n] - x[n-1] and number for first is x[0] = 0.
130  delete[] std::exchange(nt._fornetcon_perm_indices, nullptr);
131  delete[] std::exchange(nt._fornetcon_weight_perm, nullptr);
132  // Manual memory management because of needing to copy NrnThread to the GPU
133  // and update device-side pointers there. Note the {} ensure the allocated
134  // arrays are zero-initalised.
135  nt._fornetcon_perm_indices_size = n_perm_indices + 1;
137  nt._fornetcon_weight_perm_size = n_weight_perm;
139 
140  // From dparam fornetcon slots, compute displacement vector, and
141  // set the dparam fornetcon slot to the index of the displacement vector
142  // to allow later filling the _fornetcon_weight_perm.
143  size_t i_perm_indices = 0;
144  nt._fornetcon_perm_indices[0] = 0;
145  for (const auto& kv: type_to_slot) {
146  int mtype = kv.first;
147  int fnslot = kv.second;
148  int nodecount = nt._ml_list[mtype]->nodecount;
149  for (int i = 0; i < nodecount; ++i) {
150  int* fn = fornetcon_slot(mtype, i, fnslot, nt);
151  nt._fornetcon_perm_indices[i_perm_indices + 1] =
152  nt._fornetcon_perm_indices[i_perm_indices] + size_t(*fn);
153  *fn = int(nt._fornetcon_perm_indices[i_perm_indices]);
154  i_perm_indices += 1;
155  }
156  }
157 
158  // One more iteration over NetCon to fill in weight index for
159  // nt._fornetcon_weight_perm. To help with this we increment the
160  // dparam fornetcon slot on each use.
161  for (int i = 0; i < nt.n_netcon; ++i) {
162  NetCon& nc = nt.netcons[i];
163  int mtype = nc.target_->_type;
164  auto search = type_to_slot.find(mtype);
165  if (search != type_to_slot.end()) {
166  int i_instance = nc.target_->_i_instance;
167  int* fn = fornetcon_slot(mtype, i_instance, search->second, nt);
168  size_t nc_w_index = size_t(nc.u.weight_index_);
169  nt._fornetcon_weight_perm[size_t(*fn)] = nc_w_index;
170  *fn += 1; // next item conceptually adjacent
171  }
172  }
173 
174  // Put back the proper values into the dparam fornetcon slot
175  i_perm_indices = 0;
176  for (const auto& kv: type_to_slot) {
177  int mtype = kv.first;
178  int fnslot = kv.second;
179  int nodecount = nt._ml_list[mtype]->nodecount;
180  for (int i = 0; i < nodecount; ++i) {
181  int* fn = fornetcon_slot(mtype, i, fnslot, nt);
182  *fn = int(i_perm_indices);
183  i_perm_indices += 1;
184  }
185  }
186 }
187 
188 } // namespace coreneuron
coreneuron::CoreNeuron::get_mech_data_layout
auto & get_mech_data_layout()
Definition: coreneuron.hpp:174
coreneuron::NrnThread::netcons
NetCon * netcons
Definition: multicore.hpp:87
coreneuron::nrn_fornetcon_type_
int * nrn_fornetcon_type_
Definition: register_mech.cpp:51
coreneuron::nrn_fornetcon_cnt_
int nrn_fornetcon_cnt_
Definition: register_mech.cpp:50
coreneuron::NrnThread::_fornetcon_weight_perm
size_t * _fornetcon_weight_perm
Definition: multicore.hpp:152
coreneuron::nrn_fornetcon_index_
int * nrn_fornetcon_index_
Definition: register_mech.cpp:52
coreneuron::fornetcon_slot
static int * fornetcon_slot(const int mtype, const int instance, const int fnslot, const NrnThread &nt)
If FOR_NETCON in use, setup NrnThread fornetcon related info.
Definition: setup_fornetcon.cpp:60
coreneuron::NetCon::u
union coreneuron::NetCon::@0 u
coreneuron::Memb_list
Definition: mechanism.hpp:131
coreneuron::nrn_soa_padded_size
int nrn_soa_padded_size(int cnt, int layout)
calculate size after padding for specific memory layout
Definition: mem_layout_util.cpp:15
coreneuron::NrnThread::_fornetcon_weight_perm_size
std::size_t _fornetcon_weight_perm_size
Definition: multicore.hpp:151
coreneuron.hpp
coreneuron
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
Definition: corenrn_parameters.cpp:12
coreneuron::i
int i
Definition: cellorder.cpp:485
coreneuron::NrnThread::_ml_list
Memb_list ** _ml_list
Definition: multicore.hpp:81
nrniv_decl.h
coreneuron::setup_fornetcon_info
void setup_fornetcon_info(NrnThread &nt)
If FOR_NETCON in use, setup NrnThread fornetcon related info.
Definition: setup_fornetcon.cpp:77
coreneuron::CoreNeuron::get_prop_dparam_size
auto & get_prop_dparam_size()
Definition: coreneuron.hpp:170
setup_fornetcon.hpp
coreneuron::NetCon
Definition: netcon.hpp:47
coreneuron::NrnThread
Definition: multicore.hpp:75
coreneuron::NrnThread::_fornetcon_perm_indices_size
std::size_t _fornetcon_perm_indices_size
Definition: multicore.hpp:149
netcon.hpp
coreneuron::AoS
@ AoS
Definition: nrniv_decl.h:69
coreneuron::NetCon::weight_index_
int weight_index_
Definition: netcon.hpp:53
nodecount
#define nodecount
Definition: md1redef.h:39
coreneuron::corenrn
CoreNeuron corenrn
Definition: multicore.cpp:53
coreneuron::Memb_list::nodecount
int nodecount
Definition: mechanism.hpp:144
coreneuron::Memb_list::pdata
Datum * pdata
Definition: mechanism.hpp:140
coreneuron::NrnThread::_fornetcon_perm_indices
size_t * _fornetcon_perm_indices
Definition: multicore.hpp:150
coreneuron::Point_process::_type
short _type
Definition: mechanism.hpp:37
coreneuron::NrnThread::n_netcon
int n_netcon
Definition: multicore.hpp:92
coreneuron::SoA
@ SoA
Definition: nrniv_decl.h:69
coreneuron::NetCon::target_
Point_process * target_
Definition: netcon.hpp:51
coreneuron::Point_process::_i_instance
int _i_instance
Definition: mechanism.hpp:36