CoreNEURON
phase2.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================
7 */
8 
21 
22 #if defined(_OPENMP)
23 #include <omp.h>
24 #endif
25 
26 int (*nrn2core_get_dat2_1_)(int tid,
27  int& n_real_cell,
28  int& ngid,
29  int& n_real_gid,
30  int& nnode,
31  int& ndiam,
32  int& nmech,
33  int*& tml_index,
34  int*& ml_nodecount,
35  int& nidata,
36  int& nvdata,
37  int& nweight);
38 
39 int (*nrn2core_get_dat2_2_)(int tid,
40  int*& v_parent_index,
41  double*& a,
42  double*& b,
43  double*& area,
44  double*& v,
45  double*& diamvec);
46 
47 int (*nrn2core_get_dat2_mech_)(int tid,
48  size_t i,
49  int dsz_inst,
50  int*& nodeindices,
51  double*& data,
52  int*& pdata,
53  std::vector<int>& pointer2type);
54 
55 int (*nrn2core_get_dat2_3_)(int tid,
56  int nweight,
57  int*& output_vindex,
58  double*& output_threshold,
59  int*& netcon_pnttype,
60  int*& netcon_pntindex,
61  double*& weights,
62  double*& delays);
63 
64 int (*nrn2core_get_dat2_corepointer_)(int tid, int& n);
65 
67  int type,
68  int& icnt,
69  int& dcnt,
70  int*& iarray,
71  double*& darray);
72 
73 int (*nrn2core_get_dat2_vecplay_)(int tid, std::vector<int>& indices);
74 
76  int i,
77  int& vptype,
78  int& mtype,
79  int& ix,
80  int& sz,
81  double*& yvec,
82  double*& tvec,
83  int& last_index,
84  int& discon_index,
85  int& ubound_index);
86 
87 namespace coreneuron {
88 template <typename T>
89 inline void mech_data_layout_transform(T* data, int cnt, int sz, int layout) {
90  if (layout == Layout::AoS) {
91  return;
92  }
93  // layout is equal to Layout::SoA
94  int align_cnt = nrn_soa_padded_size(cnt, layout);
95  std::vector<T> d(cnt * sz);
96  // copy matrix
97  for (int i = 0; i < cnt; ++i) {
98  for (int j = 0; j < sz; ++j) {
99  d[i * sz + j] = data[i * sz + j];
100  }
101  }
102  // transform memory layout
103  for (int i = 0; i < cnt; ++i) {
104  for (int j = 0; j < sz; ++j) {
105  data[i + j * align_cnt] = d[i * sz + j];
106  }
107  }
108 }
109 
111  n_real_cell = F.read_int();
112  n_output = F.read_int();
113  n_real_output = F.read_int();
114  n_node = F.read_int();
115  n_diam = F.read_int();
116  n_mech = F.read_int();
117  mech_types = std::vector<int>(n_mech, 0);
118  nodecounts = std::vector<int>(n_mech, 0);
119  for (int i = 0; i < n_mech; ++i) {
120  mech_types[i] = F.read_int();
121  nodecounts[i] = F.read_int();
122  }
123 
124  // check mechanism compatibility before reading data
125  check_mechanism();
126 
127  n_idata = F.read_int();
128  n_vdata = F.read_int();
129  int n_weight = F.read_int();
130  v_parent_index = (int*) ecalloc_align(n_node, sizeof(int));
132 
133  int n_data_padded = nrn_soa_padded_size(n_node, SOA_LAYOUT);
134  {
135  { // Compute size of _data and allocate
136  int n_data = 6 * n_data_padded;
137  if (n_diam > 0) {
138  n_data += n_data_padded;
139  }
140  for (int i = 0; i < n_mech; ++i) {
141  int layout = corenrn.get_mech_data_layout()[mech_types[i]];
142  int n = nodecounts[i];
144  n_data = nrn_soa_byte_align(n_data);
145  n_data += nrn_soa_padded_size(n, layout) * sz;
146  }
147  _data = (double*) ecalloc_align(n_data, sizeof(double));
148  }
149  F.read_array<double>(_data + 2 * n_data_padded, n_node);
150  F.read_array<double>(_data + 3 * n_data_padded, n_node);
151  F.read_array<double>(_data + 5 * n_data_padded, n_node);
152  F.read_array<double>(_data + 4 * n_data_padded, n_node);
153  if (n_diam > 0) {
154  F.read_array<double>(_data + 6 * n_data_padded, n_node);
155  }
156  }
157 
158  size_t offset = 6 * n_data_padded;
159  if (n_diam > 0) {
160  offset += n_data_padded;
161  }
162  for (int i = 0; i < n_mech; ++i) {
163  int layout = corenrn.get_mech_data_layout()[mech_types[i]];
164  int n = nodecounts[i];
167  offset = nrn_soa_byte_align(offset);
168  std::vector<int> nodeindices;
170  nodeindices = F.read_vector<int>(n);
171  }
172  F.read_array<double>(_data + offset, sz * n);
173  offset += nrn_soa_padded_size(n, layout) * sz;
174  std::vector<int> pdata;
175  if (dsz > 0) {
176  pdata = F.read_vector<int>(dsz * n);
177  }
178  tmls.emplace_back(TML{nodeindices, pdata, mech_types[i], {}, {}});
179  if (dsz > 0) {
180  int sz = F.read_int();
181  if (sz) {
182  auto& p2t = tmls.back().pointer2type;
183  p2t = F.read_vector<int>(sz);
184  }
185  }
186  }
187  output_vindex = F.read_vector<int>(nt.n_presyn);
189  pnttype = F.read_vector<int>(nt.n_netcon);
190  pntindex = F.read_vector<int>(nt.n_netcon);
191  weights = F.read_vector<double>(n_weight);
192  delay = F.read_vector<double>(nt.n_netcon);
194 
195  for (int i = 0; i < n_mech; ++i) {
196  if (!corenrn.get_bbcore_read()[mech_types[i]]) {
197  continue;
198  }
199  tmls[i].type = F.read_int();
200  int icnt = F.read_int();
201  int dcnt = F.read_int();
202  if (icnt > 0) {
203  tmls[i].iArray = F.read_vector<int>(icnt);
204  }
205  if (dcnt > 0) {
206  tmls[i].dArray = F.read_vector<double>(dcnt);
207  }
208  }
209 
210  int n_vec_play_continuous = F.read_int();
211  vec_play_continuous.reserve(n_vec_play_continuous);
212  for (int i = 0; i < n_vec_play_continuous; ++i) {
213  VecPlayContinuous_ item;
214  item.vtype = F.read_int();
215  item.mtype = F.read_int();
216  item.ix = F.read_int();
217  int sz = F.read_int();
218  item.yvec = IvocVect(sz);
219  item.tvec = IvocVect(sz);
220  F.read_array<double>(item.yvec.data(), sz);
221  F.read_array<double>(item.tvec.data(), sz);
222  vec_play_continuous.push_back(std::move(item));
223  }
224 
225  // store current checkpoint state to continue reading mapping
226  // The checkpoint numbering in phase 3 is a continuing of phase 2, and so will be restored
227  F.record_checkpoint();
228 
229  if (F.eof())
230  return;
231 
232  nrn_assert(F.read_int() == n_vec_play_continuous);
233 
234  for (int i = 0; i < n_vec_play_continuous; ++i) {
235  auto& vecPlay = vec_play_continuous[i];
236  vecPlay.last_index = F.read_int();
237  vecPlay.discon_index = F.read_int();
238  vecPlay.ubound_index = F.read_int();
239  }
240 
241  patstim_index = F.read_int();
242 
243  nrn_assert(F.read_int() == -1);
244 
245  for (int i = 0; i < nt.n_presyn; ++i) {
246  preSynConditionEventFlags.push_back(F.read_int());
247  }
248 
249  nrn_assert(F.read_int() == -1);
250  restore_events(F);
251 
252  nrn_assert(F.read_int() == -1);
253  restore_events(F);
254 }
255 
256 void Phase2::read_direct(int thread_id, const NrnThread& nt) {
257  int* types_ = nullptr;
258  int* nodecounts_ = nullptr;
259  int n_weight;
260  (*nrn2core_get_dat2_1_)(thread_id,
261  n_real_cell,
262  n_output,
264  n_node,
265  n_diam,
266  n_mech,
267  types_,
268  nodecounts_,
269  n_idata,
270  n_vdata,
271  n_weight);
272  mech_types = std::vector<int>(types_, types_ + n_mech);
273  delete[] types_;
274 
275  nodecounts = std::vector<int>(nodecounts_, nodecounts_ + n_mech);
276  delete[] nodecounts_;
277 
278  check_mechanism();
279 
280  // TODO: fix it in the future
281  int n_data_padded = nrn_soa_padded_size(n_node, SOA_LAYOUT);
282  int n_data = 6 * n_data_padded;
283  if (n_diam > 0) {
284  n_data += n_data_padded;
285  }
286  for (int i = 0; i < n_mech; ++i) {
287  int layout = corenrn.get_mech_data_layout()[mech_types[i]];
288  int n = nodecounts[i];
290  n_data = nrn_soa_byte_align(n_data);
291  n_data += nrn_soa_padded_size(n, layout) * sz;
292  }
293  _data = (double*) ecalloc_align(n_data, sizeof(double));
294 
295  v_parent_index = (int*) ecalloc_align(n_node, sizeof(int));
296  double* actual_a = _data + 2 * n_data_padded;
297  double* actual_b = _data + 3 * n_data_padded;
298  double* actual_v = _data + 4 * n_data_padded;
299  double* actual_area = _data + 5 * n_data_padded;
300  double* actual_diam = n_diam > 0 ? _data + 6 * n_data_padded : nullptr;
301  (*nrn2core_get_dat2_2_)(
302  thread_id, v_parent_index, actual_a, actual_b, actual_area, actual_v, actual_diam);
303 
304  tmls.resize(n_mech);
305 
306  auto& param_sizes = corenrn.get_prop_param_size();
307  auto& dparam_sizes = corenrn.get_prop_dparam_size();
308  int dsz_inst = 0;
309  size_t offset = 6 * n_data_padded;
310  if (n_diam > 0)
311  offset += n_data_padded;
312  for (int i = 0; i < n_mech; ++i) {
313  auto& tml = tmls[i];
314  int type = mech_types[i];
315  int layout = corenrn.get_mech_data_layout()[type];
316  offset = nrn_soa_byte_align(offset);
317 
318  tml.type = type;
319  // artificial cell don't use nodeindices
320  if (!corenrn.get_is_artificial()[type]) {
321  tml.nodeindices.resize(nodecounts[i]);
322  }
323  tml.pdata.resize(nodecounts[i] * dparam_sizes[type]);
324 
325  int* nodeindices_ = nullptr;
326  double* data_ = _data + offset;
327  int* pdata_ = const_cast<int*>(tml.pdata.data());
328  (*nrn2core_get_dat2_mech_)(thread_id,
329  i,
330  dparam_sizes[type] > 0 ? dsz_inst : 0,
331  nodeindices_,
332  data_,
333  pdata_,
334  tml.pointer2type);
335  if (dparam_sizes[type] > 0)
336  dsz_inst++;
337  offset += nrn_soa_padded_size(nodecounts[i], layout) * param_sizes[type];
338  if (nodeindices_) {
339  std::copy(nodeindices_, nodeindices_ + nodecounts[i], tml.nodeindices.data());
340  free(nodeindices_); // not free_memory because this is allocated by NEURON?
341  }
342  if (corenrn.get_is_artificial()[type]) {
343  assert(nodeindices_ == nullptr);
344  }
345  }
346 
347  int* output_vindex_ = nullptr;
348  double* output_threshold_ = nullptr;
349  int* pnttype_ = nullptr;
350  int* pntindex_ = nullptr;
351  double* weight_ = nullptr;
352  double* delay_ = nullptr;
353  (*nrn2core_get_dat2_3_)(thread_id,
354  n_weight,
355  output_vindex_,
356  output_threshold_,
357  pnttype_,
358  pntindex_,
359  weight_,
360  delay_);
361 
362  output_vindex = std::vector<int>(output_vindex_, output_vindex_ + nt.n_presyn);
363  delete[] output_vindex_;
364 
365  output_threshold = std::vector<double>(output_threshold_, output_threshold_ + n_real_output);
366  delete[] output_threshold_;
367 
368  int n_netcon = nt.n_netcon;
369  pnttype = std::vector<int>(pnttype_, pnttype_ + n_netcon);
370  delete[] pnttype_;
371 
372  pntindex = std::vector<int>(pntindex_, pntindex_ + n_netcon);
373  delete[] pntindex_;
374 
375  weights = std::vector<double>(weight_, weight_ + n_weight);
376  delete[] weight_;
377 
378  delay = std::vector<double>(delay_, delay_ + n_netcon);
379  delete[] delay_;
380 
381  (*nrn2core_get_dat2_corepointer_)(nt.id, num_point_process);
382 
383  for (int i = 0; i < n_mech; ++i) {
384  // not all mod files have BBCOREPOINTER data to read
385  if (!corenrn.get_bbcore_read()[mech_types[i]]) {
386  continue;
387  }
388  int icnt;
389  int* iArray_ = nullptr;
390  int dcnt;
391  double* dArray_ = nullptr;
392  (*nrn2core_get_dat2_corepointer_mech_)(nt.id, tmls[i].type, icnt, dcnt, iArray_, dArray_);
393  tmls[i].iArray.resize(icnt);
394  std::copy(iArray_, iArray_ + icnt, tmls[i].iArray.begin());
395  delete[] iArray_;
396 
397  tmls[i].dArray.resize(dcnt);
398  std::copy(dArray_, dArray_ + dcnt, tmls[i].dArray.begin());
399  delete[] dArray_;
400  }
401 
402  // Get from NEURON, the VecPlayContinuous indices in
403  // NetCvode::fixed_play_ for this thread.
404  std::vector<int> indices_vec_play_continuous;
405  (*nrn2core_get_dat2_vecplay_)(thread_id, indices_vec_play_continuous);
406 
407  // i is an index into NEURON's NetCvode::fixed_play_ for this thread.
408  for (auto i: indices_vec_play_continuous) {
409  VecPlayContinuous_ item;
410  // yvec_ and tvec_ are not deleted as that space is within
411  // NEURON Vector
412  double *yvec_, *tvec_;
413  int sz;
414  (*nrn2core_get_dat2_vecplay_inst_)(thread_id,
415  i,
416  item.vtype,
417  item.mtype,
418  item.ix,
419  sz,
420  yvec_,
421  tvec_,
422  item.last_index,
423  item.discon_index,
424  item.ubound_index);
425  item.yvec = IvocVect(sz);
426  item.tvec = IvocVect(sz);
427  std::copy(yvec_, yvec_ + sz, item.yvec.data());
428  std::copy(tvec_, tvec_ + sz, item.tvec.data());
429  vec_play_continuous.push_back(std::move(item));
430  }
431 }
432 
433 /// Check if MOD file used between NEURON and CoreNEURON is same
435  int diff_mech_count = 0;
436  for (int i = 0; i < n_mech; ++i) {
437  if (std::any_of(corenrn.get_different_mechanism_type().begin(),
439  [&](int e) { return e == mech_types[i]; })) {
440  if (nrnmpi_myid == 0) {
441  printf("Error: %s is a different MOD file than used by NEURON!\n",
443  }
444  diff_mech_count++;
445  }
446  }
447 
448  if (diff_mech_count > 0) {
449  if (nrnmpi_myid == 0) {
450  printf(
451  "Error : NEURON and CoreNEURON must use same mod files for compatibility, %d "
452  "different mod file(s) found. Re-compile special and special-core!\n",
453  diff_mech_count);
454  nrn_abort(1);
455  }
456  }
457 }
458 
459 /// Perform in memory transformation between AoS<>SoA for integer data
461  int nodecount,
462  int* pdata,
463  int i,
464  int dparam_size,
465  int layout,
466  int n_node_) {
467  for (int iml = 0; iml < nodecount; ++iml) {
468  int* pd = pdata + nrn_i_layout(iml, nodecount, i, dparam_size, layout);
469  int ix = *pd; // relative to beginning of _actual_*
470  nrn_assert((ix >= 0) && (ix < n_node_));
471  *pd = elem0 + ix; // relative to nt._data
472  }
473 }
474 
475 void Phase2::set_net_send_buffer(Memb_list** ml_list, const std::vector<int>& pnt_offset) {
476  // NetReceiveBuffering
477  for (auto& net_buf_receive: corenrn.get_net_buf_receive()) {
478  int type = net_buf_receive.second;
479  // Does this thread have this type.
480  Memb_list* ml = ml_list[type];
481  if (ml) { // needs a NetReceiveBuffer
482  NetReceiveBuffer_t* nrb =
484  assert(!ml->_net_receive_buffer);
485  ml->_net_receive_buffer = nrb;
486  nrb->_pnt_offset = pnt_offset[type];
487 
488  // begin with a size equal to the number of instances, or at least 8
489  nrb->_size = std::max(8, ml->nodecount);
490  nrb->_pnt_index = (int*) ecalloc_align(nrb->_size, sizeof(int));
491  nrb->_displ = (int*) ecalloc_align(nrb->_size + 1, sizeof(int));
492  nrb->_nrb_index = (int*) ecalloc_align(nrb->_size, sizeof(int));
493  nrb->_weight_index = (int*) ecalloc_align(nrb->_size, sizeof(int));
494  nrb->_nrb_t = (double*) ecalloc_align(nrb->_size, sizeof(double));
495  nrb->_nrb_flag = (double*) ecalloc_align(nrb->_size, sizeof(double));
496  }
497  }
498 
499  // NetSendBuffering
500  for (int type: corenrn.get_net_buf_send_type()) {
501  // Does this thread have this type.
502  Memb_list* ml = ml_list[type];
503  if (ml) { // needs a NetSendBuffer
504  assert(!ml->_net_send_buffer);
505  // begin with a size equal to twice number of instances
506  NetSendBuffer_t* nsb = new NetSendBuffer_t(ml->nodecount * 2);
507  ml->_net_send_buffer = nsb;
508  }
509  }
510 }
511 
513  int type;
514  while ((type = F.read_int()) != 0) {
515  double time;
516  F.read_array(&time, 1);
517  switch (type) {
518  case NetConType: {
519  auto event = std::make_shared<NetConType_>();
520  event->time = time;
521  event->netcon_index = F.read_int();
522  events.emplace_back(type, event);
523  break;
524  }
525  case SelfEventType: {
526  auto event = std::make_shared<SelfEventType_>();
527  event->time = time;
528  event->target_type = F.read_int();
529  event->point_proc_instance = F.read_int();
530  event->target_instance = F.read_int();
531  F.read_array(&event->flag, 1);
532  event->movable = F.read_int();
533  event->weight_index = F.read_int();
534  events.emplace_back(type, event);
535  break;
536  }
537  case PreSynType: {
538  auto event = std::make_shared<PreSynType_>();
539  event->time = time;
540  event->presyn_index = F.read_int();
541  events.emplace_back(type, event);
542  break;
543  }
544  case NetParEventType: {
545  auto event = std::make_shared<NetParEvent_>();
546  event->time = time;
547  events.emplace_back(type, event);
548  break;
549  }
550  case PlayRecordEventType: {
551  auto event = std::make_shared<PlayRecordEventType_>();
552  event->time = time;
553  event->play_record_type = F.read_int();
554  if (event->play_record_type == VecPlayContinuousType) {
555  event->vecplay_index = F.read_int();
556  events.emplace_back(type, event);
557  } else {
558  nrn_assert(0);
559  }
560  break;
561  }
562  default: {
563  nrn_assert(0);
564  break;
565  }
566  }
567  }
568 }
569 
570 void Phase2::fill_before_after_lists(NrnThread& nt, const std::vector<Memb_func>& memb_func) {
571  /// Fill the BA lists
572  std::vector<BAMech*> before_after_map(memb_func.size());
573  for (int i = 0; i < BEFORE_AFTER_SIZE; ++i) {
574  for (size_t ii = 0; ii < memb_func.size(); ++ii) {
575  before_after_map[ii] = nullptr;
576  }
577  // Save first before-after block only. In case of multiple before-after blocks with the
578  // same mech type, we will get subsequent ones using linked list below.
579  for (auto bam = corenrn.get_bamech()[i]; bam; bam = bam->next) {
580  if (!before_after_map[bam->type]) {
581  before_after_map[bam->type] = bam;
582  }
583  }
584  // necessary to keep in order wrt multiple BAMech with same mech type
585  NrnThreadBAList** ptbl = nt.tbl + i;
586  for (auto tml = nt.tml; tml; tml = tml->next) {
587  if (before_after_map[tml->index]) {
588  int mtype = tml->index;
589  for (auto bam = before_after_map[mtype]; bam && bam->type == mtype;
590  bam = bam->next) {
591  auto tbl = (NrnThreadBAList*) emalloc(sizeof(NrnThreadBAList));
592  *ptbl = tbl;
593  tbl->next = nullptr;
594  tbl->bam = bam;
595  tbl->ml = tml->ml;
596  ptbl = &(tbl->next);
597  }
598  }
599  }
600  }
601 }
602 
603 void Phase2::pdata_relocation(const NrnThread& nt, const std::vector<Memb_func>& memb_func) {
604  // Some pdata may index into data which has been reordered from AoS to
605  // SoA. The four possibilities are if semantics is -1 (area), -5 (pointer),
606  // -9 (diam), // or 0-999 (ion variables).
607  // Note that pdata has a layout and the // type block in nt.data into which
608  // it indexes, has a layout.
609 
610  // For faster search of tmls[i].type == type, use a map.
611  // (perhaps would be better to replace tmls so that we can use tmls[type].
612  std::map<int, size_t> type2itml;
613  for (size_t i = 0; i < tmls.size(); ++i) {
614  if (tmls[i].pointer2type.size()) {
615  type2itml[tmls[i].type] = i;
616  }
617  }
618 
619  for (auto tml = nt.tml; tml; tml = tml->next) {
620  int type = tml->index;
621  int layout = corenrn.get_mech_data_layout()[type];
622  int* pdata = tml->ml->pdata;
623  int cnt = tml->ml->nodecount;
624  int szdp = corenrn.get_prop_dparam_size()[type];
625  int* semantics = memb_func[type].dparam_semantics;
626 
627  // compute only for ARTIFICIAL_CELL (has useful area pointer with semantics=-1)
628  if (!corenrn.get_is_artificial()[type]) {
629  if (szdp) {
630  if (!semantics)
631  continue; // temporary for HDFReport, Binreport which will be skipped in
632  // bbcore_write of HBPNeuron
633  nrn_assert(semantics);
634  }
635 
636  for (int i = 0; i < szdp; ++i) {
637  int s = semantics[i];
638  switch (s) {
639  case -1: // area
641  nt._actual_area - nt._data, cnt, pdata, i, szdp, layout, nt.end);
642  break;
643  case -9: // diam
645  nt._actual_diam - nt._data, cnt, pdata, i, szdp, layout, nt.end);
646  break;
647  case -5: // pointer assumes a pointer to membrane voltage
648  // or mechanism data in this thread. The value of the
649  // pointer on the NEURON side was analyzed by
650  // nrn_dblpntr2nrncore which returned the
651  // mechanism index and type. At this moment the index
652  // is in pdata and the type is in tmls[type].pointer2type.
653  // However the latter order is according to the nested
654  // iteration for nodecount { for szdp {}}
655  // Also the nodecount POINTER instances of mechanism
656  // might possibly point to differnt range variables.
657  // Therefore it is not possible to use transform_int_data
658  // and the transform must be done one at a time.
659  // So we do nothing here and separately iterate
660  // after this loop instead of the former voltage only
661  /**
662  transform_int_data(
663  nt._actual_v - nt._data, cnt, pdata, i, szdp, layout, nt.end);
664  **/
665  break;
666  default:
667  if (s >= 0 && s < 1000) { // ion
668  int etype = s;
669  /* if ion is SoA, must recalculate pdata values */
670  /* if ion is AoS, have to deal with offset */
671  Memb_list* eml = nt._ml_list[etype];
672  int edata0 = eml->data - nt._data;
673  int ecnt = eml->nodecount;
674  int esz = corenrn.get_prop_param_size()[etype];
675  for (int iml = 0; iml < cnt; ++iml) {
676  int* pd = pdata + nrn_i_layout(iml, cnt, i, szdp, layout);
677  int ix = *pd; // relative to the ion data
678  nrn_assert((ix >= 0) && (ix < ecnt * esz));
679  /* Original pd order assumed ecnt groups of esz */
680  *pd = edata0 + nrn_param_layout(ix, etype, eml);
681  }
682  }
683  }
684  }
685  // Handle case -5 POINTER transformation (see comment above)
686  auto search = type2itml.find(type);
687  if (search != type2itml.end()) {
688  auto& ptypes = tmls[type2itml[type]].pointer2type;
689  assert(ptypes.size());
690  size_t iptype = 0;
691  for (int iml = 0; iml < cnt; ++iml) {
692  for (int i = 0; i < szdp; ++i) {
693  if (semantics[i] == -5) { // POINTER
694  int* pd = pdata + nrn_i_layout(iml, cnt, i, szdp, layout);
695  int ix = *pd; // relative to elem0
696  int ptype = ptypes[iptype++];
697  if (ptype == voltage) {
698  nrn_assert((ix >= 0) && (ix < nt.end));
699  int elem0 = nt._actual_v - nt._data;
700  *pd = elem0 + ix;
701  } else {
702  Memb_list* pml = nt._ml_list[ptype];
703  int pcnt = pml->nodecount;
704  int psz = corenrn.get_prop_param_size()[ptype];
705  nrn_assert((ix >= 0) && (ix < pcnt * psz));
706  int elem0 = pml->data - nt._data;
707  *pd = elem0 + nrn_param_layout(ix, ptype, pml);
708  }
709  }
710  }
711  }
712  ptypes.clear();
713  }
714  }
715  }
716 }
717 
718 void Phase2::set_dependencies(const NrnThread& nt, const std::vector<Memb_func>& memb_func) {
719  /* here we setup the mechanism dependencies. if there is a mechanism dependency
720  * then we allocate an array for tml->dependencies otherwise set it to nullptr.
721  * In order to find out the "real" dependencies i.e. dependent mechanism
722  * exist at the same compartment, we compare the nodeindices of mechanisms
723  * returned by nrn_mech_depend.
724  */
725 
726  /* temporary array for dependencies */
727  int* mech_deps = (int*) ecalloc(memb_func.size(), sizeof(int));
728 
729  for (auto tml = nt.tml; tml; tml = tml->next) {
730  /* initialize to null */
731  tml->dependencies = nullptr;
732  tml->ndependencies = 0;
733 
734  /* get dependencies from the models */
735  int deps_cnt = nrn_mech_depend(tml->index, mech_deps);
736 
737  /* if dependencies, setup dependency array */
738  if (deps_cnt) {
739  /* store "real" dependencies in the vector */
740  std::vector<int> actual_mech_deps;
741 
742  Memb_list* ml = tml->ml;
743  int* nodeindices = ml->nodeindices;
744 
745  /* iterate over dependencies */
746  for (int j = 0; j < deps_cnt; j++) {
747  /* memb_list of dependency mechanism */
748  Memb_list* dml = nt._ml_list[mech_deps[j]];
749 
750  /* dependency mechanism may not exist in the model */
751  if (!dml)
752  continue;
753 
754  /* take nodeindices for comparison */
755  int* dnodeindices = dml->nodeindices;
756 
757  /* set_intersection function needs temp vector to push the common values */
758  std::vector<int> node_intersection;
759 
760  /* make sure they have non-zero nodes and find their intersection */
761  if ((ml->nodecount > 0) && (dml->nodecount > 0)) {
762  std::set_intersection(nodeindices,
763  nodeindices + ml->nodecount,
764  dnodeindices,
765  dnodeindices + dml->nodecount,
766  std::back_inserter(node_intersection));
767  }
768 
769  /* if they intersect in the nodeindices, it's real dependency */
770  if (!node_intersection.empty()) {
771  actual_mech_deps.push_back(mech_deps[j]);
772  }
773  }
774 
775  /* copy actual_mech_deps to dependencies */
776  if (!actual_mech_deps.empty()) {
777  tml->ndependencies = actual_mech_deps.size();
778  tml->dependencies = (int*) ecalloc(actual_mech_deps.size(), sizeof(int));
779  std::copy(actual_mech_deps.begin(), actual_mech_deps.end(), tml->dependencies);
780  }
781  }
782  }
783 
784  /* free temp dependency array */
785  free(mech_deps);
786 }
787 
788 void Phase2::handle_weights(NrnThread& nt, int n_netcon, NrnThreadChkpnt& ntc) {
789  nt.n_weight = weights.size();
790  // weights in netcons order in groups defined by Point_process target type.
791  nt.weights = (double*) ecalloc_align(nt.n_weight, sizeof(double));
792  std::copy(weights.begin(), weights.end(), nt.weights);
793 
794  int iw = 0;
795  for (int i = 0; i < n_netcon; ++i) {
796  NetCon& nc = nt.netcons[i];
797  nc.u.weight_index_ = iw;
798  if (pnttype[i] != 0) {
800  } else {
801  iw += 1;
802  }
803  }
804  assert(iw == nt.n_weight);
805 
806  // Nontrivial if FOR_NETCON in use by some mechanisms
808 
809 
810 #if CHKPNTDEBUG
811  ntc.delay = new double[n_netcon];
812  memcpy(ntc.delay, delay.data(), n_netcon * sizeof(double));
813 #endif
814  for (int i = 0; i < n_netcon; ++i) {
815  NetCon& nc = nt.netcons[i];
816  nc.delay_ = delay[i];
817  }
818 }
819 
821  const std::vector<Memb_func>& memb_func,
822  NrnThreadChkpnt& ntc) {
823  // BBCOREPOINTER information
824 #if CHKPNTDEBUG
825  ntc.nbcp = num_point_process;
826  ntc.bcpicnt = new int[n_mech];
827  ntc.bcpdcnt = new int[n_mech];
828  ntc.bcptype = new int[n_mech];
829  size_t point_proc_id = 0;
830 #endif
831  for (int i = 0; i < n_mech; ++i) {
832  int type = mech_types[i];
833  if (!corenrn.get_bbcore_read()[type]) {
834  continue;
835  }
836  type = tmls[i].type; // This is not an error, but it has to be fixed I think
837 #if CHKPNTDEBUG
838  ntc.bcptype[point_proc_id] = type;
839  ntc.bcpicnt[point_proc_id] = tmls[i].iArray.size();
840  ntc.bcpdcnt[point_proc_id] = tmls[i].dArray.size();
841  point_proc_id++;
842 #endif
843  int ik = 0;
844  int dk = 0;
845  Memb_list* ml = nt._ml_list[type];
846  int dsz = corenrn.get_prop_param_size()[type];
847  int pdsz = corenrn.get_prop_dparam_size()[type];
848  int cntml = ml->nodecount;
849  int layout = corenrn.get_mech_data_layout()[type];
850  for (int j = 0; j < cntml; ++j) {
851  int jp = j;
852  if (ml->_permute) {
853  jp = ml->_permute[j];
854  }
855  double* d = ml->data;
856  Datum* pd = ml->pdata;
857  d += nrn_i_layout(jp, cntml, 0, dsz, layout);
858  pd += nrn_i_layout(jp, cntml, 0, pdsz, layout);
859  int aln_cntml = nrn_soa_padded_size(cntml, layout);
860  (*corenrn.get_bbcore_read()[type])(tmls[i].dArray.data(),
861  tmls[i].iArray.data(),
862  &dk,
863  &ik,
864  0,
865  aln_cntml,
866  d,
867  pd,
868  ml->_thread,
869  &nt,
870  ml,
871  0.0);
872  }
873  assert(dk == static_cast<int>(tmls[i].dArray.size()));
874  assert(ik == static_cast<int>(tmls[i].iArray.size()));
875  }
876 }
877 
879  // VecPlayContinuous instances
880  // No attempt at memory efficiency
881  nt.n_vecplay = vec_play_continuous.size();
882  if (nt.n_vecplay) {
883  nt._vecplay = new void*[nt.n_vecplay];
884  } else {
885  nt._vecplay = nullptr;
886  }
887 #if CHKPNTDEBUG
888  ntc.vecplay_ix = new int[nt.n_vecplay];
889  ntc.vtype = new int[nt.n_vecplay];
890  ntc.mtype = new int[nt.n_vecplay];
891 #endif
892  for (int i = 0; i < nt.n_vecplay; ++i) {
893  auto& vecPlay = vec_play_continuous[i];
894  nrn_assert(vecPlay.vtype == VecPlayContinuousType);
895 #if CHKPNTDEBUG
896  ntc.vtype[i] = vecPlay.vtype;
897 #endif
898 #if CHKPNTDEBUG
899  ntc.mtype[i] = vecPlay.mtype;
900 #endif
901  Memb_list* ml = nt._ml_list[vecPlay.mtype];
902 #if CHKPNTDEBUG
903  ntc.vecplay_ix[i] = vecPlay.ix;
904 #endif
905 
906  vecPlay.ix = nrn_param_layout(vecPlay.ix, vecPlay.mtype, ml);
907  if (ml->_permute) {
908  vecPlay.ix = nrn_index_permute(vecPlay.ix, vecPlay.mtype, ml);
909  }
910  nt._vecplay[i] = new VecPlayContinuous(ml->data + vecPlay.ix,
911  std::move(vecPlay.yvec),
912  std::move(vecPlay.tvec),
913  nullptr,
914  nt.id);
915  }
916 }
917 
918 void Phase2::populate(NrnThread& nt, const UserParams& userParams) {
920  ntc.file_id = userParams.gidgroups[nt.id];
921 
922  nt.ncell = n_real_cell;
923  nt.end = n_node;
925 
926 #if CHKPNTDEBUG
927  ntc.n_outputgids = n_output;
928  ntc.nmech = n_mech;
929 #endif
930 
931  /// Checkpoint in coreneuron is defined for both phase 1 and phase 2 since they are written
932  /// together
933  nt._ml_list = (Memb_list**) ecalloc_align(corenrn.get_memb_funcs().size(), sizeof(Memb_list*));
934 
935  auto& memb_func = corenrn.get_memb_funcs();
936 #if CHKPNTDEBUG
937  ntc.mlmap = new Memb_list_chkpnt*[memb_func.size()];
938  for (int i = 0; i < memb_func.size(); ++i) {
939  ntc.mlmap[i] = nullptr;
940  }
941 #endif
942 
943  nt.stream_id = 0;
944  nt.compute_gpu = 0;
945  auto& nrn_prop_param_size_ = corenrn.get_prop_param_size();
946  auto& nrn_prop_dparam_size_ = corenrn.get_prop_dparam_size();
947 
948 /* read_phase2 is being called from openmp region
949  * and hence we can set the stream equal to current thread id.
950  * In fact we could set gid as stream_id when we will have nrn threads
951  * greater than number of omp threads.
952  */
953 #if defined(_OPENMP)
954  nt.stream_id = omp_get_thread_num();
955 #endif
956 
957  int shadow_rhs_cnt = 0;
958  nt.shadow_rhs_cnt = 0;
959 
960  NrnThreadMembList* tml_last = nullptr;
961  for (int i = 0; i < n_mech; ++i) {
962  auto tml =
963  create_tml(nt, i, memb_func[mech_types[i]], shadow_rhs_cnt, mech_types, nodecounts);
964 
965  nt._ml_list[tml->index] = tml->ml;
966 
967 #if CHKPNTDEBUG
968  Memb_list_chkpnt* mlc = new Memb_list_chkpnt;
969  ntc.mlmap[tml->index] = mlc;
970 #endif
971 
972  if (nt.tml) {
973  tml_last->next = tml;
974  } else {
975  nt.tml = tml;
976  }
977  tml_last = tml;
978  }
979 
980  if (shadow_rhs_cnt) {
981  nt._shadow_rhs = (double*) ecalloc_align(nrn_soa_padded_size(shadow_rhs_cnt, 0),
982  sizeof(double));
983  nt._shadow_d = (double*) ecalloc_align(nrn_soa_padded_size(shadow_rhs_cnt, 0),
984  sizeof(double));
985  nt.shadow_rhs_cnt = shadow_rhs_cnt;
986  }
987 
988  nt.mapping = nullptr; // section segment mapping
989 
990  nt._nidata = n_idata;
991  if (nt._nidata)
992  nt._idata = (int*) ecalloc(nt._nidata, sizeof(int));
993  else
994  nt._idata = nullptr;
995  // see patternstim.cpp
996  int extra_nv = (&nt == nrn_threads) ? nrn_extra_thread0_vdata : 0;
997  nt._nvdata = n_vdata;
998  if (nt._nvdata + extra_nv)
999  nt._vdata = (void**) ecalloc_align(nt._nvdata + extra_nv, sizeof(void*));
1000  else
1001  nt._vdata = nullptr;
1002 
1003  // The data format begins with the matrix data
1004  int n_data_padded = nrn_soa_padded_size(nt.end, SOA_LAYOUT);
1005  nt._data = _data;
1006  nt._actual_rhs = nt._data + 0 * n_data_padded;
1007  nt._actual_d = nt._data + 1 * n_data_padded;
1008  nt._actual_a = nt._data + 2 * n_data_padded;
1009  nt._actual_b = nt._data + 3 * n_data_padded;
1010  nt._actual_v = nt._data + 4 * n_data_padded;
1011  nt._actual_area = nt._data + 5 * n_data_padded;
1012  nt._actual_diam = n_diam ? nt._data + 6 * n_data_padded : nullptr;
1013 
1014  size_t offset = 6 * n_data_padded;
1015  if (n_diam) {
1016  // in the rare case that a mechanism has dparam with diam semantics
1017  // then actual_diam array added after matrix in nt._data
1018  // Generally wasteful since only a few diam are pointed to.
1019  // Probably better to move the diam semantics to the p array of the mechanism
1020  offset += n_data_padded;
1021  }
1022 
1023  // Memb_list.data points into the nt._data array.
1024  // Also count the number of Point_process
1025  int num_point_process = 0;
1026  for (auto tml = nt.tml; tml; tml = tml->next) {
1027  Memb_list* ml = tml->ml;
1028  int type = tml->index;
1029  int layout = corenrn.get_mech_data_layout()[type];
1030  int n = ml->nodecount;
1031  int sz = nrn_prop_param_size_[type];
1032  offset = nrn_soa_byte_align(offset);
1033  ml->data = nt._data + offset;
1034  offset += nrn_soa_padded_size(n, layout) * sz;
1035  if (corenrn.get_pnt_map()[type] > 0) {
1036  num_point_process += n;
1037  }
1038  }
1040  sizeof(Point_process)); // includes acell with and
1041  // without gid
1043  nt._ndata = offset;
1044 
1045 
1046  // matrix info
1048 
1049 #if CHKPNTDEBUG
1050  ntc.parent = new int[nt.end];
1051  memcpy(ntc.parent, nt._v_parent_index, nt.end * sizeof(int));
1052  ntc.area = new double[nt.end];
1053  memcpy(ntc.area, nt._actual_area, nt.end * sizeof(double));
1054 #endif
1055 
1056  int synoffset = 0;
1057  std::vector<int> pnt_offset(memb_func.size());
1058 
1059  // All the mechanism data and pdata.
1060  // Also fill in the pnt_offset
1061  // Complete spec of Point_process except for the acell presyn_ field.
1062  int itml = 0;
1063  for (auto tml = nt.tml; tml; tml = tml->next, ++itml) {
1064  int type = tml->index;
1065  Memb_list* ml = tml->ml;
1066  int n = ml->nodecount;
1067  int szp = nrn_prop_param_size_[type];
1068  int szdp = nrn_prop_dparam_size_[type];
1069  int layout = corenrn.get_mech_data_layout()[type];
1070 
1071  ml->nodeindices = (int*) ecalloc_align(ml->nodecount, sizeof(int));
1072  std::copy(tmls[itml].nodeindices.begin(), tmls[itml].nodeindices.end(), ml->nodeindices);
1073 
1074  mech_data_layout_transform<double>(ml->data, n, szp, layout);
1075 
1076  if (szdp) {
1077  ml->pdata = (int*) ecalloc_align(nrn_soa_padded_size(n, layout) * szdp, sizeof(int));
1078  std::copy(tmls[itml].pdata.begin(), tmls[itml].pdata.end(), ml->pdata);
1079  mech_data_layout_transform<int>(ml->pdata, n, szdp, layout);
1080 
1081 #if CHKPNTDEBUG // Not substantive. Only for debugging.
1082  Memb_list_chkpnt* mlc = ntc.mlmap[type];
1083  mlc->pdata_not_permuted = (int*) coreneuron::ecalloc_align(n * szdp, sizeof(int));
1084  if (layout == Layout::AoS) { // only copy
1085  for (int i = 0; i < n; ++i) {
1086  for (int j = 0; j < szdp; ++j) {
1087  mlc->pdata_not_permuted[i * szdp + j] = ml->pdata[i * szdp + j];
1088  }
1089  }
1090  } else if (layout == Layout::SoA) { // transpose and unpad
1091  int align_cnt = nrn_soa_padded_size(n, layout);
1092  for (int i = 0; i < n; ++i) {
1093  for (int j = 0; j < szdp; ++j) {
1094  mlc->pdata_not_permuted[i * szdp + j] = ml->pdata[i + j * align_cnt];
1095  }
1096  }
1097  }
1098 #endif
1099  } else {
1100  ml->pdata = nullptr;
1101  }
1102  if (corenrn.get_pnt_map()[type] > 0) { // POINT_PROCESS mechanism including acell
1103  int cnt = ml->nodecount;
1104  Point_process* pnt = nullptr;
1105  pnt = nt.pntprocs + synoffset;
1106  pnt_offset[type] = synoffset;
1107  synoffset += cnt;
1108  for (int i = 0; i < cnt; ++i) {
1109  Point_process* pp = pnt + i;
1110  pp->_type = type;
1111  pp->_i_instance = i;
1112  nt._vdata[ml->pdata[nrn_i_layout(i, cnt, 1, szdp, layout)]] = pp;
1113  pp->_tid = nt.id;
1114  }
1115  }
1116  }
1117 
1118  // pnt_offset needed for SelfEvent transfer from NEURON. Not needed on GPU.
1119  // Ugh. Related but not same as NetReceiveBuffer._pnt_offset
1120  nt._pnt_offset = pnt_offset;
1121 
1122  pdata_relocation(nt, memb_func);
1123 
1124  /* if desired, apply the node permutation. This involves permuting
1125  at least the node parameter arrays for a, b, and area (and diam) and all
1126  integer vector values that index into nodes. This could have been done
1127  when originally filling the arrays with AoS ordered data, but can also
1128  be done now, after the SoA transformation. The latter has the advantage
1129  that the present order is consistent with all the layout values. Note
1130  that after this portion of the permutation, a number of other node index
1131  vectors will be read and will need to be permuted as well in subsequent
1132  sections of this function.
1133  */
1135  nt._permute = interleave_order(nt.id, nt.ncell, nt.end, nt._v_parent_index);
1136  }
1137  if (nt._permute) {
1138  int* p = nt._permute;
1139  permute_data(nt._actual_a, nt.end, p);
1140  permute_data(nt._actual_b, nt.end, p);
1141  permute_data(nt._actual_area, nt.end, p);
1143  nt.end,
1144  p); // need if restore or finitialize does not initialize voltage
1145  if (nt._actual_diam) {
1146  permute_data(nt._actual_diam, nt.end, p);
1147  }
1148  // index values change as well as ordering
1149  permute_ptr(nt._v_parent_index, nt.end, p);
1150  node_permute(nt._v_parent_index, nt.end, p);
1151 
1152 #if CORENRN_DEBUG
1153  for (int i = 0; i < nt.end; ++i) {
1154  printf("parent[%d] = %d\n", i, nt._v_parent_index[i]);
1155  }
1156 #endif
1157 
1158  // specify the ml->_permute and sort the nodeindices
1159  // Have to calculate all the permute before updating pdata in case
1160  // POINTER to data of other mechanisms exist.
1161  for (auto tml = nt.tml; tml; tml = tml->next) {
1162  if (tml->ml->nodeindices) { // not artificial
1163  permute_nodeindices(tml->ml, p);
1164  }
1165  }
1166  for (auto tml = nt.tml; tml; tml = tml->next) {
1167  if (tml->ml->nodeindices) { // not artificial
1168  permute_ml(tml->ml, tml->index, nt);
1169  }
1170  }
1171 
1172  // permute the Point_process._i_instance
1173  for (int i = 0; i < nt.n_pntproc; ++i) {
1174  Point_process& pp = nt.pntprocs[i];
1175  Memb_list* ml = nt._ml_list[pp._type];
1176  if (ml->_permute) {
1177  pp._i_instance = ml->_permute[pp._i_instance];
1178  }
1179  }
1180  }
1181 
1182  set_dependencies(nt, memb_func);
1183 
1184  fill_before_after_lists(nt, memb_func);
1185 
1186  // for fast watch statement checking
1187  // setup a list of types that have WATCH statement
1188  {
1189  int sz = 0; // count the types with WATCH
1190  for (auto tml = nt.tml; tml; tml = tml->next) {
1191  if (corenrn.get_watch_check()[tml->index]) {
1192  ++sz;
1193  }
1194  }
1195  if (sz) {
1196  nt._watch_types = (int*) ecalloc(sz + 1, sizeof(int)); // nullptr terminated
1197  sz = 0;
1198  for (auto tml = nt.tml; tml; tml = tml->next) {
1199  if (corenrn.get_watch_check()[tml->index]) {
1200  nt._watch_types[sz++] = tml->index;
1201  }
1202  }
1203  }
1204  }
1205  auto& pnttype2presyn = corenrn.get_pnttype2presyn();
1206  auto& nrn_has_net_event_ = corenrn.get_has_net_event();
1207  // create the nt.pnt2presyn_ix array of arrays.
1208  nt.pnt2presyn_ix = (int**) ecalloc(nrn_has_net_event_.size(), sizeof(int*));
1209  for (size_t i = 0; i < nrn_has_net_event_.size(); ++i) {
1210  Memb_list* ml = nt._ml_list[nrn_has_net_event_[i]];
1211  if (ml && ml->nodecount > 0) {
1212  nt.pnt2presyn_ix[i] = (int*) ecalloc(ml->nodecount, sizeof(int));
1213  }
1214  }
1215 
1216  // Real cells are at the beginning of the nt.presyns followed by
1217  // acells (with and without gids mixed together)
1218  // Here we associate the real cells with voltage pointers and
1219  // acell PreSyn with the Point_process.
1220  // nt.presyns order same as output_vindex order
1221 #if CHKPNTDEBUG
1222  ntc.output_vindex = new int[nt.n_presyn];
1223  memcpy(ntc.output_vindex, output_vindex.data(), nt.n_presyn * sizeof(int));
1224 #endif
1225  if (nt._permute) {
1226  // only indices >= 0 (i.e. _actual_v indices) will be changed.
1227  node_permute(output_vindex.data(), nt.n_presyn, nt._permute);
1228  }
1229 #if CHKPNTDEBUG
1230  ntc.output_threshold = new double[n_real_output];
1231  memcpy(ntc.output_threshold, output_threshold.data(), n_real_output * sizeof(double));
1232 #endif
1233 
1234  for (int i = 0; i < nt.n_presyn; ++i) { // real cells
1235  PreSyn* ps = nt.presyns + i;
1236 
1237  int ix = output_vindex[i];
1238  if (ix == -1 && i < n_real_output) { // real cell without a presyn
1239  continue;
1240  }
1241  if (ix < 0) {
1242  ix = -ix;
1243  int index = ix / 1000;
1244  int type = ix % 1000;
1245  Point_process* pnt = nt.pntprocs + (pnt_offset[type] + index);
1246  ps->pntsrc_ = pnt;
1247  // pnt->_presyn = ps;
1248  int ip2ps = pnttype2presyn[pnt->_type];
1249  if (ip2ps >= 0) {
1250  nt.pnt2presyn_ix[ip2ps][pnt->_i_instance] = i;
1251  }
1252  if (ps->gid_ < 0) {
1253  ps->gid_ = -1;
1254  }
1255  } else {
1256  assert(ps->gid_ > -1);
1257  ps->thvar_index_ = ix; // index into _actual_v
1258  assert(ix < nt.end);
1259  ps->threshold_ = output_threshold[i];
1260  }
1261  }
1262 
1263  // initial net_send_buffer size about 1% of number of presyns
1264  // nt._net_send_buffer_size = nt.ncell/100 + 1;
1265  // but, to avoid reallocation complexity on GPU ...
1267  nt._net_send_buffer = (int*) ecalloc_align(nt._net_send_buffer_size, sizeof(int));
1268 
1269  int nnetcon = nt.n_netcon;
1270 
1271  // it may happen that Point_process structures will be made unnecessary
1272  // by factoring into NetCon.
1273 
1274 #if CHKPNTDEBUG
1275  ntc.pnttype = new int[nnetcon];
1276  ntc.pntindex = new int[nnetcon];
1277  memcpy(ntc.pnttype, pnttype.data(), nnetcon * sizeof(int));
1278  memcpy(ntc.pntindex, pntindex.data(), nnetcon * sizeof(int));
1279 #endif
1280  for (int i = 0; i < nnetcon; ++i) {
1281  int type = pnttype[i];
1282  if (type > 0) {
1283  int index = pnt_offset[type] + pntindex[i]; /// Potentially uninitialized pnt_offset[],
1284  /// check for previous assignments
1285  NetCon& nc = nt.netcons[i];
1286  nc.target_ = nt.pntprocs + index;
1287  nc.active_ = true;
1288  }
1289  }
1290 
1291  handle_weights(nt, nnetcon, ntc);
1292 
1293  get_info_from_bbcore(nt, memb_func, ntc);
1294 
1295  set_vec_play(nt, ntc);
1296 
1297  if (!events.empty()) {
1298  userParams.checkPoints.restore_tqueue(nt, *this);
1299  }
1300 
1301  set_net_send_buffer(nt._ml_list, pnt_offset);
1302 }
1303 } // namespace coreneuron
coreneuron::CoreNeuron::get_mech_data_layout
auto & get_mech_data_layout()
Definition: coreneuron.hpp:174
nrn2core_get_dat2_vecplay_
int(* nrn2core_get_dat2_vecplay_)(int tid, std::vector< int > &indices)
Definition: phase2.cpp:73
coreneuron::NrnThread::netcons
NetCon * netcons
Definition: multicore.hpp:87
coreneuron::NrnThread::n_real_output
int n_real_output
Definition: multicore.hpp:95
coreneuron::CheckPoints::restore_tqueue
void restore_tqueue(NrnThread &, const Phase2 &p2)
Definition: nrn_checkpoint.cpp:857
coreneuron::NetCon::delay_
double delay_
Definition: netcon.hpp:50
coreneuron::Phase2::patstim_index
int patstim_index
Definition: phase2.hpp:68
coreneuron::interleave_order
int * interleave_order(int ith, int ncell, int nnode, int *parent)
Function that performs the permutation of the cells such that the execution threads access coalesced ...
Definition: cellorder.cpp:290
nrn2core_get_dat2_vecplay_inst_
int(* nrn2core_get_dat2_vecplay_inst_)(int tid, int i, int &vptype, int &mtype, int &ix, int &sz, double *&yvec, double *&tvec, int &last_index, int &discon_index, int &ubound_index)
Definition: phase2.cpp:75
coreneuron::NrnThread::_vdata
void ** _vdata
Definition: multicore.hpp:108
nrn2core_get_dat2_mech_
int(* nrn2core_get_dat2_mech_)(int tid, size_t i, int dsz_inst, int *&nodeindices, double *&data, int *&pdata, std::vector< int > &pointer2type)
Definition: phase2.cpp:47
coreneuron::nrn_soa_byte_align
size_t nrn_soa_byte_align(size_t size)
return the new offset considering the byte aligment settings
Definition: mem_layout_util.cpp:20
coreneuron::voltage
@ voltage
Definition: nrniv_decl.h:19
coreneuron::NrnThread::_shadow_d
double * _shadow_d
Definition: multicore.hpp:120
coreneuron::FileHandler::record_checkpoint
void record_checkpoint()
Record current chkpnt state.
Definition: nrn_filehandler.hpp:80
coreneuron::nrnthread_chkpnt
NrnThreadChkpnt * nrnthread_chkpnt
Definition: nrn_checkpoint.cpp:651
coreneuron::ecalloc
void * ecalloc(size_t n, size_t size)
Definition: nrnoc_aux.cpp:85
utils.hpp
coreneuron::Phase2::delay
std::vector< double > delay
Definition: phase2.hpp:125
coreneuron::Phase2::n_vdata
int n_vdata
Definition: phase2.hpp:101
coreneuron::NrnThreadChkpnt::file_id
int file_id
Definition: nrn_checkpoint.hpp:88
coreneuron::Phase2::read_direct
void read_direct(int thread_id, const NrnThread &nt)
Definition: phase2.cpp:256
coreneuron::Point_process
Definition: mechanism.hpp:35
coreneuron::Datum
int Datum
Definition: nrnconf.h:23
nrn2core_get_dat2_1_
int(* nrn2core_get_dat2_1_)(int tid, int &n_real_cell, int &ngid, int &n_real_gid, int &nnode, int &ndiam, int &nmech, int *&tml_index, int *&ml_nodecount, int &nidata, int &nvdata, int &nweight)
Definition: phase2.cpp:26
coreneuron::CoreNeuron::get_net_buf_send_type
auto & get_net_buf_send_type()
Definition: coreneuron.hpp:158
coreneuron::Phase2::output_vindex
std::vector< int > output_vindex
Definition: phase2.hpp:120
NetConType
#define NetConType
Definition: netcon.hpp:27
coreneuron::Phase2::n_real_cell
int n_real_cell
Definition: phase2.hpp:92
SOA_LAYOUT
#define SOA_LAYOUT
Definition: data_layout.hpp:11
nrnoc_aux.hpp
data
Definition: alignment.cpp:18
coreneuron::CoreNeuron::get_is_artificial
auto & get_is_artificial()
Definition: coreneuron.hpp:178
coreneuron::UserParams::checkPoints
CheckPoints & checkPoints
Definition: user_params.hpp:41
coreneuron::Phase2::n_mech
int n_mech
Definition: phase2.hpp:97
coreneuron::NrnThread::_net_send_buffer_size
int _net_send_buffer_size
Definition: multicore.hpp:138
mem_layout_util.hpp
coreneuron::NrnThread::presyns
PreSyn * presyns
Definition: multicore.hpp:83
coreneuron::FileHandler::read_int
int read_int()
Parse a single integer entry.
Definition: nrn_filehandler.cpp:57
coreneuron::PreSyn::threshold_
double threshold_
Definition: netcon.hpp:113
coreneuron::Phase2::mech_types
std::vector< int > mech_types
Definition: phase2.hpp:98
coreneuron::NrnThread::_watch_types
int * _watch_types
Definition: multicore.hpp:142
coreneuron::NrnThread::id
int id
Definition: multicore.hpp:99
coreneuron::Phase2::set_dependencies
void set_dependencies(const NrnThread &nt, const std::vector< Memb_func > &memb_func)
Definition: phase2.cpp:718
coreneuron::Phase2::tmls
std::vector< TML > tmls
Definition: phase2.hpp:119
coreneuron::NetCon::u
union coreneuron::NetCon::@0 u
nrn2core_get_dat2_3_
int(* nrn2core_get_dat2_3_)(int tid, int nweight, int *&output_vindex, double *&output_threshold, int *&netcon_pnttype, int *&netcon_pntindex, double *&weights, double *&delays)
Definition: phase2.cpp:55
coreneuron::NetSendBuffer_t
Definition: mechanism.hpp:62
coreneuron::ii
int ii
Definition: cellorder.cpp:486
coreneuron::permute_data
void permute_data(double *vec, int n, int *p)
Definition: node_permute.cpp:349
nrn2core_get_dat2_corepointer_
int(* nrn2core_get_dat2_corepointer_)(int tid, int &n)
Definition: phase2.cpp:64
coreneuron::Memb_list
Definition: mechanism.hpp:131
coreneuron::Phase2::weights
std::vector< double > weights
Definition: phase2.hpp:124
coreneuron::Phase2::VecPlayContinuous_::discon_index
int discon_index
Definition: phase2.hpp:64
coreneuron::NrnThread::tml
NrnThreadMembList * tml
Definition: multicore.hpp:80
coreneuron::Phase2::n_diam
int n_diam
Definition: phase2.hpp:96
coreneuron::nrn_soa_padded_size
int nrn_soa_padded_size(int cnt, int layout)
calculate size after padding for specific memory layout
Definition: mem_layout_util.cpp:15
coreneuron::interleave_permute_type
int interleave_permute_type
Definition: cellorder.cpp:28
coreneuron::NrnThread::_actual_rhs
double * _actual_rhs
Definition: multicore.hpp:111
coreneuron::Phase2::pdata_relocation
void pdata_relocation(const NrnThread &nt, const std::vector< Memb_func > &memb_func)
Definition: phase2.cpp:603
coreneuron::NrnThread::compute_gpu
int compute_gpu
Definition: multicore.hpp:136
coreneuron::NrnThread::_pnt_offset
std::vector< int > _pnt_offset
Definition: multicore.hpp:154
pdata
#define pdata
Definition: md1redef.h:37
coreneuron::NrnThreadBAList
Definition: multicore.hpp:46
coreneuron.hpp
coreneuron::NetReceiveBuffer_t::_pnt_index
int * _pnt_index
Definition: mechanism.hpp:45
coreneuron::NrnThread::_actual_a
double * _actual_a
Definition: multicore.hpp:113
coreneuron::NrnThread::_actual_diam
double * _actual_diam
Definition: multicore.hpp:117
coreneuron::NrnThread::_nvdata
size_t _nvdata
Definition: multicore.hpp:104
coreneuron::Phase2::_data
double * _data
Definition: phase2.hpp:110
coreneuron
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
Definition: corenrn_parameters.cpp:12
coreneuron::UserParams
This structure is data needed is several part of nrn_setup, phase1 and phase2.
Definition: user_params.hpp:18
coreneuron::NetReceiveBuffer_t::_nrb_index
int * _nrb_index
Definition: mechanism.hpp:43
coreneuron::NrnThread::n_weight
int n_weight
Definition: multicore.hpp:91
coreneuron::NrnThread::n_pntproc
int n_pntproc
Definition: multicore.hpp:90
coreneuron::Phase2::v_parent_index
int * v_parent_index
Definition: phase2.hpp:102
coreneuron::NrnThread::tbl
NrnThreadBAList * tbl[BEFORE_AFTER_SIZE]
Definition: multicore.hpp:133
coreneuron::Phase2::handle_weights
void handle_weights(NrnThread &nt, int n_netcon, NrnThreadChkpnt &ntc)
Definition: phase2.cpp:788
coreneuron::Phase2::VecPlayContinuous_::vtype
int vtype
Definition: phase2.hpp:57
coreneuron::NrnThread::_v_parent_index
int * _v_parent_index
Definition: multicore.hpp:126
coreneuron::i
int i
Definition: cellorder.cpp:485
coreneuron::Phase2::get_info_from_bbcore
void get_info_from_bbcore(NrnThread &nt, const std::vector< Memb_func > &memb_func, NrnThreadChkpnt &ntc)
Definition: phase2.cpp:820
coreneuron::Phase2::n_node
int n_node
Definition: phase2.hpp:95
coreneuron::NrnThread::_ml_list
Memb_list ** _ml_list
Definition: multicore.hpp:81
coreneuron::PreSyn
Definition: netcon.hpp:104
VecPlayContinuousType
#define VecPlayContinuousType
Definition: vrecitem.h:17
coreneuron::Phase2::pnttype
std::vector< int > pnttype
Definition: phase2.hpp:122
coreneuron::Phase2::VecPlayContinuous_::ubound_index
int ubound_index
Definition: phase2.hpp:65
coreneuron::Phase2::n_real_output
int n_real_output
Definition: phase2.hpp:94
coreneuron::Phase2::vec_play_continuous
std::vector< VecPlayContinuous_ > vec_play_continuous
Definition: phase2.hpp:67
coreneuron::setup_fornetcon_info
void setup_fornetcon_info(NrnThread &nt)
If FOR_NETCON in use, setup NrnThread fornetcon related info.
Definition: setup_fornetcon.cpp:77
i
#define i
Definition: md1redef.h:19
coreneuron::NrnThread::_idata
int * _idata
Definition: multicore.hpp:107
coreneuron::FileHandler
Definition: nrn_filehandler.hpp:32
coreneuron::nrn_param_layout
int nrn_param_layout(int i, int mtype, Memb_list *ml)
Definition: mem_layout_util.cpp:52
coreneuron::Phase2::transform_int_data
void transform_int_data(int elem0, int nodecount, int *pdata, int i, int dparam_size, int layout, int n_node_)
Perform in memory transformation between AoS<>SoA for integer data.
Definition: phase2.cpp:460
coreneuron::CoreNeuron::get_prop_dparam_size
auto & get_prop_dparam_size()
Definition: coreneuron.hpp:170
coreneuron::FileHandler::eof
bool eof()
nothing more to read
Definition: nrn_filehandler.cpp:45
coreneuron::NrnThread::shadow_rhs_cnt
int shadow_rhs_cnt
Definition: multicore.hpp:135
setup_fornetcon.hpp
coreneuron::Phase2::n_idata
int n_idata
Definition: phase2.hpp:100
coreneuron::CoreNeuron::get_memb_funcs
auto & get_memb_funcs()
Definition: coreneuron.hpp:134
coreneuron::NetCon
Definition: netcon.hpp:47
coreneuron::CoreNeuron::get_has_net_event
auto & get_has_net_event()
Definition: coreneuron.hpp:202
coreneuron::CoreNeuron::get_bbcore_read
auto & get_bbcore_read()
Definition: coreneuron.hpp:210
coreneuron::UserParams::gidgroups
const int *const gidgroups
Array of cell group numbers (indices)
Definition: user_params.hpp:35
coreneuron::NetReceiveBuffer_t
Definition: mechanism.hpp:41
coreneuron::NrnThread::_net_send_buffer
int * _net_send_buffer
Definition: multicore.hpp:140
coreneuron::CoreNeuron::get_watch_check
auto & get_watch_check()
Definition: coreneuron.hpp:198
coreneuron::Phase2::set_vec_play
void set_vec_play(NrnThread &nt, NrnThreadChkpnt &ntc)
Definition: phase2.cpp:878
coreneuron::NrnThread::n_presyn
int n_presyn
Definition: multicore.hpp:94
coreneuron::Phase2::set_net_send_buffer
void set_net_send_buffer(Memb_list **ml_list, const std::vector< int > &pnt_offset)
Definition: phase2.cpp:475
coreneuron::NrnThread::_ndata
size_t _ndata
Definition: multicore.hpp:103
node_permute.h
coreneuron::nrn_index_permute
int nrn_index_permute(int ix, int type, Memb_list *ml)
Definition: node_permute.cpp:363
coreneuron::Phase2::output_threshold
std::vector< double > output_threshold
Definition: phase2.hpp:121
SelfEventType
#define SelfEventType
Definition: netcon.hpp:28
coreneuron::NrnThread
Definition: multicore.hpp:75
coreneuron::NrnThreadMembList
Definition: multicore.hpp:32
coreneuron::permute_ptr
void permute_ptr(int *vec, int n, int *p)
Definition: node_permute.cpp:345
coreneuron::NrnThreadBAList::next
NrnThreadBAList * next
Definition: multicore.hpp:49
coreneuron::Phase2::VecPlayContinuous_::mtype
int mtype
Definition: phase2.hpp:58
cnt
#define cnt
Definition: tqueue.hpp:44
coreneuron::create_tml
NrnThreadMembList * create_tml(NrnThread &nt, int mech_id, Memb_func &memb_func, int &shadow_rhs_cnt, const std::vector< int > &mech_types, const std::vector< int > &nodecounts)
Definition: multicore.cpp:64
coreneuron::NetReceiveBuffer_t::_weight_index
int * _weight_index
Definition: mechanism.hpp:46
coreneuron::NrnThread::_shadow_rhs
double * _shadow_rhs
Definition: multicore.hpp:118
PreSynType
#define PreSynType
Definition: netcon.hpp:29
coreneuron::node_permute
void node_permute(int *vec, int n, int *permute)
Definition: node_permute.cpp:337
coreneuron::nrn_mech_depend
int nrn_mech_depend(int type, int *dependencies)
Definition: register_mech.cpp:294
coreneuron::NrnThread::stream_id
int stream_id
Definition: multicore.hpp:137
coreneuron::Phase2::pntindex
std::vector< int > pntindex
Definition: phase2.hpp:123
coreneuron::NrnThread::_data
double * _data
Definition: multicore.hpp:106
coreneuron::AoS
@ AoS
Definition: nrniv_decl.h:69
coreneuron::NetCon::weight_index_
int weight_index_
Definition: netcon.hpp:53
coreneuron::permute_ml
void permute_ml(Memb_list *ml, int type, NrnThread &nt)
Definition: node_permute.cpp:353
nodecount
#define nodecount
Definition: md1redef.h:39
coreneuron::nrn_threads
NrnThread * nrn_threads
Definition: multicore.cpp:56
coreneuron::corenrn
CoreNeuron corenrn
Definition: multicore.cpp:53
coreneuron::nrn_i_layout
int nrn_i_layout(int icnt, int cnt, int isz, int sz, int layout)
This function return the index in a flat array of a matrix coordinate (icnt, isz).
Definition: mem_layout_util.cpp:32
coreneuron::PreSyn::thvar_index_
int thvar_index_
Definition: netcon.hpp:114
coreneuron::Phase2::restore_events
void restore_events(FileHandler &F)
Definition: phase2.cpp:512
coreneuron::Phase2::n_output
int n_output
Definition: phase2.hpp:93
coreneuron::Memb_list::_net_send_buffer
NetSendBuffer_t * _net_send_buffer
Definition: mechanism.hpp:143
coreneuron::NrnThread::pnt2presyn_ix
int ** pnt2presyn_ix
Definition: multicore.hpp:85
coreneuron::IvocVect
fixed_vector< double > IvocVect
Definition: ivocvect.hpp:72
coreneuron::Phase2::preSynConditionEventFlags
std::vector< int > preSynConditionEventFlags
Definition: phase2.hpp:30
coreneuron::nrn_abort
void nrn_abort(int errcode)
Definition: utils.cpp:13
coreneuron::NrnThread::_actual_d
double * _actual_d
Definition: multicore.hpp:112
coreneuron::NrnThread::_nidata
size_t _nidata
Definition: multicore.hpp:105
coreneuron::FileHandler::read_array
T * read_array(T *p, size_t count)
Read an integer array of fixed length.
Definition: nrn_filehandler.hpp:181
coreneuron::Memb_list::nodecount
int nodecount
Definition: mechanism.hpp:144
coreneuron::Phase2::num_point_process
int num_point_process
Definition: phase2.hpp:126
coreneuron::Phase2::TML
Definition: phase2.hpp:111
coreneuron::CoreNeuron::get_net_buf_receive
auto & get_net_buf_receive()
Definition: coreneuron.hpp:154
coreneuron::CoreNeuron::get_different_mechanism_type
auto & get_different_mechanism_type()
Definition: coreneuron.hpp:142
coreneuron::Memb_list::_net_receive_buffer
NetReceiveBuffer_t * _net_receive_buffer
Definition: mechanism.hpp:142
coreneuron::Memb_list::pdata
Datum * pdata
Definition: mechanism.hpp:140
coreneuron::Phase2::check_mechanism
void check_mechanism()
Check if MOD file used between NEURON and CoreNEURON is same.
Definition: phase2.cpp:434
coreneuron::Phase2::VecPlayContinuous_::ix
int ix
Definition: phase2.hpp:59
PlayRecordEventType
#define PlayRecordEventType
Definition: vrecitem.h:18
coreneuron::Phase2::populate
void populate(NrnThread &nt, const UserParams &userParams)
Definition: phase2.cpp:918
coreneuron::NrnThread::_vecplay
void ** _vecplay
Definition: multicore.hpp:109
coreneuron::Phase2::VecPlayContinuous_::tvec
IvocVect tvec
Definition: phase2.hpp:61
coreneuron::NrnThreadMembList::next
NrnThreadMembList * next
Definition: multicore.hpp:33
coreneuron::Point_process::_type
short _type
Definition: mechanism.hpp:37
nrn2core_get_dat2_corepointer_mech_
int(* nrn2core_get_dat2_corepointer_mech_)(int tid, int type, int &icnt, int &dcnt, int *&iarray, double *&darray)
Definition: phase2.cpp:66
coreneuron::nrn_get_mechname
const char * nrn_get_mechname(int type)
Definition: mk_mech.cpp:145
weights
#define weights
Definition: md1redef.h:42
coreneuron::PreSyn::pntsrc_
Point_process * pntsrc_
Definition: netcon.hpp:115
coreneuron::NrnThread::weights
double * weights
Definition: multicore.hpp:88
coreneuron::CoreNeuron::get_pnt_map
auto & get_pnt_map()
Definition: coreneuron.hpp:146
cellorder.hpp
area
#define area
Definition: md1redef.h:12
BEFORE_AFTER_SIZE
#define BEFORE_AFTER_SIZE
Definition: membfunc.hpp:72
nrn_checkpoint.hpp
data
#define data
Definition: md1redef.h:36
coreneuron::Phase2::VecPlayContinuous_::yvec
IvocVect yvec
Definition: phase2.hpp:60
coreneuron::nrn_extra_thread0_vdata
int nrn_extra_thread0_vdata
Definition: patternstim.cpp:46
coreneuron::NrnThreadChkpnt
Definition: nrn_checkpoint.hpp:87
coreneuron::Phase2::fill_before_after_lists
void fill_before_after_lists(NrnThread &nt, const std::vector< Memb_func > &memb_func)
Definition: phase2.cpp:570
coreneuron::emalloc
static void * emalloc(size_t size)
Definition: mpispike.cpp:30
vrecitem.h
multicore.hpp
coreneuron::NrnThread::pntprocs
Point_process * pntprocs
Definition: multicore.hpp:82
data_layout.hpp
coreneuron::NetReceiveBuffer_t::_nrb_flag
double * _nrb_flag
Definition: mechanism.hpp:48
coreneuron::CoreNeuron::get_prop_param_size
auto & get_prop_param_size()
Definition: coreneuron.hpp:166
coreneuron::Phase2::events
std::vector< std::pair< int, std::shared_ptr< EventTypeBase > > > events
Definition: phase2.hpp:70
coreneuron::mech_data_layout_transform
void mech_data_layout_transform(T *data, int cnt, int sz, int layout)
Definition: phase2.cpp:89
coreneuron::NrnThread::_actual_v
double * _actual_v
Definition: multicore.hpp:115
v
#define v
Definition: md1redef.h:11
coreneuron::Point_process::_tid
short _tid
Definition: mechanism.hpp:38
coreneuron::NetCon::active_
bool active_
Definition: netcon.hpp:49
coreneuron::NrnThread::mapping
void * mapping
Definition: multicore.hpp:143
phase2.hpp
coreneuron::Memb_list::_thread
ThreadDatum * _thread
Definition: mechanism.hpp:141
coreneuron::NrnThread::n_netcon
int n_netcon
Definition: multicore.hpp:92
coreneuron::Phase2::read_file
void read_file(FileHandler &F, const NrnThread &nt)
Definition: phase2.cpp:110
nrn2core_get_dat2_2_
int(* nrn2core_get_dat2_2_)(int tid, int *&v_parent_index, double *&a, double *&b, double *&area, double *&v, double *&diamvec)
Definition: phase2.cpp:39
coreneuron::permute_nodeindices
void permute_nodeindices(Memb_list *ml, int *p)
Definition: node_permute.cpp:431
coreneuron::CoreNeuron::get_bamech
auto & get_bamech()
Definition: coreneuron.hpp:162
nodeindices
#define nodeindices
Definition: md1redef.h:35
coreneuron::NrnThread::_actual_b
double * _actual_b
Definition: multicore.hpp:114
coreneuron::nrnmpi_myid
int nrnmpi_myid
Definition: nrnmpi_def_cinc.cpp:11
coreneuron::NrnThreadBAList::bam
BAMech * bam
Definition: multicore.hpp:48
coreneuron::PreSyn::gid_
int gid_
Definition: netcon.hpp:112
coreneuron::Memb_list::data
double * data
Definition: mechanism.hpp:139
coreneuron::NrnThread::end
int end
Definition: multicore.hpp:98
coreneuron::NetReceiveBuffer_t::_pnt_offset
int _pnt_offset
Definition: mechanism.hpp:52
coreneuron::NetReceiveBuffer_t::_displ
int * _displ
Definition: mechanism.hpp:42
coreneuron::FileHandler::read_vector
std::vector< T > read_vector(size_t count)
Definition: nrn_filehandler.hpp:192
NetParEventType
#define NetParEventType
Definition: netcon.hpp:30
coreneuron::NrnThread::_actual_area
double * _actual_area
Definition: multicore.hpp:116
coreneuron::CoreNeuron::get_pnttype2presyn
auto & get_pnttype2presyn()
Definition: coreneuron.hpp:206
coreneuron::ecalloc_align
void * ecalloc_align(size_t n, size_t size, size_t alignment)
coreneuron::Phase2::nodecounts
std::vector< int > nodecounts
Definition: phase2.hpp:99
coreneuron::NetReceiveBuffer_t::_size
int _size
Definition: mechanism.hpp:51
nrn_assert
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
coreneuron::SoA
@ SoA
Definition: nrniv_decl.h:69
coreneuron::Phase2::VecPlayContinuous_
Definition: phase2.hpp:56
coreneuron::NrnThread::ncell
int ncell
Definition: multicore.hpp:97
coreneuron::NrnThread::_permute
int * _permute
Definition: multicore.hpp:127
coreneuron::CoreNeuron::get_pnt_receive_size
auto & get_pnt_receive_size()
Definition: coreneuron.hpp:194
coreneuron::VecPlayContinuous
Definition: vrecitem.h:57
coreneuron::NetCon::target_
Point_process * target_
Definition: netcon.hpp:51
coreneuron::NetReceiveBuffer_t::_nrb_t
double * _nrb_t
Definition: mechanism.hpp:47
coreneuron::Memb_list::nodeindices
int * nodeindices
Definition: mechanism.hpp:137
coreneuron::Phase2::VecPlayContinuous_::last_index
int last_index
Definition: phase2.hpp:63
coreneuron::Memb_list::_permute
int * _permute
Definition: mechanism.hpp:138
coreneuron::Point_process::_i_instance
int _i_instance
Definition: mechanism.hpp:36
coreneuron::NrnThread::n_vecplay
int n_vecplay
Definition: multicore.hpp:101