CoreNEURON
nrn_checkpoint.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 #include <iostream>
9 #include <sstream>
10 #include <cassert>
11 #include <memory>
12 
28 
29 namespace coreneuron {
30 // Those functions comes from mod file directly
32 extern void checkpoint_restore_patternstim(int, double, _threadargsproto_);
33 
34 CheckPoints::CheckPoints(const std::string& save, const std::string& restore)
35  : save_(save)
36  , restore_(restore)
37  , restored(false) {
38  if (!save.empty()) {
39  if (nrnmpi_myid == 0) {
40  mkdir_p(save.c_str());
41  }
42  }
43 }
44 
45 /// todo : need to broadcast this rather than all reading a double
46 double CheckPoints::restore_time() const {
47  if (!should_restore()) {
48  return 0.;
49  }
50 
51  double rtime = 0.;
52  FileHandler f;
53  std::string filename = restore_ + "/time.dat";
54  f.open(filename, std::ios::in);
55  f.read_array(&rtime, 1);
56  f.close();
57  return rtime;
58 }
59 
60 void CheckPoints::write_checkpoint(NrnThread* nt, int nb_threads) const {
61  if (!should_save()) {
62  return;
63  }
64 
65 #if NRNMPI
68  }
69 #endif
70 
71  /**
72  * if openmp threading needed:
73  * #pragma omp parallel for private(i) shared(nt, nb_threads) schedule(runtime)
74  */
75  for (int i = 0; i < nb_threads; i++) {
76  if (nt[i].ncell || nt[i].tml) {
77  write_phase2(nt[i]);
78  }
79  }
80 
81  if (nrnmpi_myid == 0) {
82  write_time();
83  }
84 #if NRNMPI
87  }
88 #endif
89 }
90 
91 // Factor out the body of ion handling below as the same code
92 // handles POINTER
93 static int nrn_original_aos_index(int etype, int ix, NrnThread& nt, int** ml_pinv) {
94  // Determine ei_instance and ei from etype and ix.
95  // Deal with existing permutation and SoA.
96  Memb_list* eml = nt._ml_list[etype];
97  int ecnt = eml->nodecount;
98  int esz = corenrn.get_prop_param_size()[etype];
99  int elayout = corenrn.get_mech_data_layout()[etype];
100  // current index into eml->data is a function
101  // of elayout, eml._permute, ei_instance, ei, and
102  // eml padding.
103  int p = ix - (eml->data - nt._data);
104  assert(p >= 0 && p < eml->_nodecount_padded * esz);
105  int ei_instance, ei;
106  nrn_inverse_i_layout(p, ei_instance, ecnt, ei, esz, elayout);
107  if (elayout == Layout::SoA) {
108  if (eml->_permute) {
109  if (!ml_pinv[etype]) {
110  ml_pinv[etype] = inverse_permute(eml->_permute, eml->nodecount);
111  }
112  ei_instance = ml_pinv[etype][ei_instance];
113  }
114  }
115  return ei_instance * esz + ei;
116 }
117 
119  FileHandler fh;
120 
122  auto filename = get_save_path() + "/" + std::to_string(ntc.file_id) + "_2.dat";
123 
124  fh.open(filename, std::ios::out);
125  fh.checkpoint(2);
126 
127  int n_outputgid = 0; // calculate PreSyn with gid >= 0
128  for (int i = 0; i < nt.n_presyn; ++i) {
129  if (nt.presyns[i].gid_ >= 0) {
130  ++n_outputgid;
131  }
132  }
133 
134  fh << nt.ncell << " ncell\n";
135  fh << n_outputgid << " ngid\n";
136 #if CHKPNTDEBUG
137  assert(ntc.n_outputgids == n_outputgid);
138 #endif
139 
140  fh << nt.n_real_output << " n_real_output\n";
141  fh << nt.end << " nnode\n";
142  fh << ((nt._actual_diam == nullptr) ? 0 : nt.end) << " ndiam\n";
143  int nmech = 0;
144  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
145  if (tml->index != patstimtype) { // skip PatternStim
146  ++nmech;
147  }
148  }
149 
150  fh << nmech << " nmech\n";
151 #if CHKPNTDEBUG
152  assert(nmech == ntc.nmech);
153 #endif
154 
155  for (NrnThreadMembList* current_tml = nt.tml; current_tml; current_tml = current_tml->next) {
156  if (current_tml->index == patstimtype) {
157  continue;
158  }
159  fh << current_tml->index << "\n";
160  fh << current_tml->ml->nodecount << "\n";
161  }
162 
163  fh << nt._nidata << " nidata\n";
164  fh << nt._nvdata << " nvdata\n";
165  fh << nt.n_weight << " nweight\n";
166 
167  // see comment about parent in node_permute.cpp
168  int* pinv_nt = nullptr;
169  if (nt._permute) {
170  int* d = new int[nt.end];
171  pinv_nt = inverse_permute(nt._permute, nt.end);
172  for (int i = 0; i < nt.end; ++i) {
173  int x = nt._v_parent_index[nt._permute[i]];
174  if (x >= 0) {
175  d[i] = pinv_nt[x];
176  } else {
177  d[i] = 0; // really should be -1;
178  }
179  }
180 #if CHKPNTDEBUG
181  for (int i = 0; i < nt.end; ++i) {
182  assert(d[i] == ntc.parent[i]);
183  }
184 #endif
185  fh.write_array<int>(d, nt.end);
186  delete[] d;
187  } else {
188 #if CHKPNTDEBUG
189  for (int i = 0; i < nt.end; ++i) {
190  assert(nt._v_parent_index[i] == ntc.parent[i]);
191  }
192 #endif
193  fh.write_array<int>(nt._v_parent_index, nt.end);
194  pinv_nt = new int[nt.end];
195  for (int i = 0; i < nt.end; ++i) {
196  pinv_nt[i] = i;
197  }
198  }
199 
200  data_write(fh, nt._actual_a, nt.end, 1, 0, nt._permute);
201  data_write(fh, nt._actual_b, nt.end, 1, 0, nt._permute);
202 
203 #if CHKPNTDEBUG
204  for (int i = 0; i < nt.end; ++i) {
205  assert(nt._actual_area[i] == ntc.area[pinv_nt[i]]);
206  }
207 #endif
208 
209  data_write(fh, nt._actual_area, nt.end, 1, 0, nt._permute);
210  data_write(fh, nt._actual_v, nt.end, 1, 0, nt._permute);
211 
212  if (nt._actual_diam) {
213  data_write(fh, nt._actual_diam, nt.end, 1, 0, nt._permute);
214  }
215 
216  auto& memb_func = corenrn.get_memb_funcs();
217  // will need the ml_pinv inverse permutation of ml._permute for ions and POINTER
218  int** ml_pinv = (int**) ecalloc(memb_func.size(), sizeof(int*));
219 
220  for (NrnThreadMembList* current_tml = nt.tml; current_tml; current_tml = current_tml->next) {
221  Memb_list* ml = current_tml->ml;
222  int type = current_tml->index;
223  if (type == patstimtype) {
224  continue;
225  }
226  int cnt = ml->nodecount;
227  auto& nrn_prop_param_size_ = corenrn.get_prop_param_size();
228  auto& nrn_prop_dparam_size_ = corenrn.get_prop_dparam_size();
229  auto& nrn_is_artificial_ = corenrn.get_is_artificial();
230 
231  int sz = nrn_prop_param_size_[type];
232  int layout = corenrn.get_mech_data_layout()[type];
233  int* semantics = memb_func[type].dparam_semantics;
234 
235  if (!nrn_is_artificial_[type]) {
236  // ml->nodeindices values are permuted according to nt._permute
237  // and locations according to ml._permute
238  // i.e. according to comment in node_permute.cpp
239  // nodelist[p_m[i]] = p[nodelist_original[i]
240  // so pinv[nodelist[p_m[i]] = nodelist_original[i]
241  int* nd_ix = new int[cnt];
242  for (int i = 0; i < cnt; ++i) {
243  int ip = ml->_permute ? ml->_permute[i] : i;
244  int ipval = ml->nodeindices[ip];
245  nd_ix[i] = pinv_nt[ipval];
246  }
247  fh.write_array<int>(nd_ix, cnt);
248  delete[] nd_ix;
249  }
250 
251  data_write(fh, ml->data, cnt, sz, layout, ml->_permute);
252 
253  sz = nrn_prop_dparam_size_[type];
254  if (sz) {
255  // need to update some values according to Datum semantics.
256  int* d = soa2aos(ml->pdata, cnt, sz, layout, ml->_permute);
257  std::vector<int> pointer2type; // voltage or mechanism type (starts empty)
258  if (!nrn_is_artificial_[type]) {
259  for (int i_instance = 0; i_instance < cnt; ++i_instance) {
260  for (int i = 0; i < sz; ++i) {
261  int ix = i_instance * sz + i;
262  int s = semantics[i];
263  if (s == -1) { // area
264  int p = pinv_nt[d[ix] - (nt._actual_area - nt._data)];
265  d[ix] = p; // relative _actual_area
266  } else if (s == -9) { // diam
267  int p = pinv_nt[d[ix] - (nt._actual_diam - nt._data)];
268 
269  d[ix] = p; // relative to _actual_diam
270  } else if (s == -5) { // POINTER
271  // loop over instances, then sz, means that we
272  // visit consistent with natural order of
273  // pointer2type
274 
275  // Relevant code that this has to invert
276  // is permute/node_permute.cpp :: update_pdata_values with
277  // respect to permutation, and
278  // io/phase2.cpp :: Phase2::pdata_relocation
279  // with respect to that AoS -> SoA
280 
281  // Step 1: what mechanism is d[ix] pointing to
282  int ptype = type_of_ntdata(nt, d[ix], i_instance == 0);
283  pointer2type.push_back(ptype);
284 
285  // Step 2: replace d[ix] with AoS index relative to type
286  if (ptype == voltage) {
287  int p = pinv_nt[d[ix] - (nt._actual_v - nt._data)];
288  d[ix] = p; // relative to _actual_v
289  } else {
290  // Since we know ptype, the situation is
291  // identical to ion below. (which was factored
292  // out into the following function.
293  d[ix] = nrn_original_aos_index(ptype, d[ix], nt, ml_pinv);
294  }
295  } else if (s >= 0 && s < 1000) { // ion
296  d[ix] = nrn_original_aos_index(s, d[ix], nt, ml_pinv);
297  }
298 #if CHKPNTDEBUG
299  if (s != -8) { // WATCH values change
300  assert(d[ix] ==
301  ntc.mlmap[type]->pdata_not_permuted[i_instance * sz + i]);
302  }
303 #endif
304  }
305  }
306  }
307  fh.write_array<int>(d, cnt * sz);
308  delete[] d;
309  size_t s = pointer2type.size();
310  fh << s << " npointer\n";
311  if (s) {
312  fh.write_array<int>(pointer2type.data(), s);
313  }
314  }
315  }
316 
317  int nnetcon = nt.n_netcon;
318 
319  int* output_vindex = new int[nt.n_presyn];
320  double* output_threshold = new double[nt.n_real_output];
321  for (int i = 0; i < nt.n_presyn; ++i) {
322  PreSyn* ps = nt.presyns + i;
323  if (ps->thvar_index_ >= 0) {
324  // real cell and index into (permuted) actual_v
325  // if any assert fails in this loop then we have faulty understanding
326  // of the for (int i = 0; i < nt.n_presyn; ++i) loop in nrn_setup.cpp
327  assert(ps->thvar_index_ < nt.end);
328  assert(ps->pntsrc_ == nullptr);
329  output_threshold[i] = ps->threshold_;
330  output_vindex[i] = pinv_nt[ps->thvar_index_];
331  } else if (i < nt.n_real_output) { // real cell without a presyn
332  output_threshold[i] = 0.0; // the way it was set in nrnbbcore_write.cpp
333  output_vindex[i] = -1;
334  } else {
335  Point_process* pnt = ps->pntsrc_;
336  assert(pnt);
337  int type = pnt->_type;
338  int ix = pnt->_i_instance;
339  if (nt._ml_list[type]->_permute) {
340  // pnt->_i_instance is the permuted index into pnt->_type
341  if (!ml_pinv[type]) {
342  Memb_list* ml = nt._ml_list[type];
343  ml_pinv[type] = inverse_permute(ml->_permute, ml->nodecount);
344  }
345  ix = ml_pinv[type][ix];
346  }
347  output_vindex[i] = -(ix * 1000 + type);
348  }
349  }
350  fh.write_array<int>(output_vindex, nt.n_presyn);
351  fh.write_array<double>(output_threshold, nt.n_real_output);
352 #if CHKPNTDEBUG
353  for (int i = 0; i < nt.n_presyn; ++i) {
354  assert(ntc.output_vindex[i] == output_vindex[i]);
355  }
356  for (int i = 0; i < nt.n_real_output; ++i) {
357  assert(ntc.output_threshold[i] == output_threshold[i]);
358  }
359 #endif
360  delete[] output_vindex;
361  delete[] output_threshold;
362  delete[] pinv_nt;
363 
364  int synoffset = 0;
365  std::vector<int> pnt_offset(memb_func.size(), -1);
366  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
367  int type = tml->index;
368  if (corenrn.get_pnt_map()[type] > 0) {
369  pnt_offset[type] = synoffset;
370  synoffset += tml->ml->nodecount;
371  }
372  }
373 
374  int* pnttype = new int[nnetcon];
375  int* pntindex = new int[nnetcon];
376  double* delay = new double[nnetcon];
377  for (int i = 0; i < nnetcon; ++i) {
378  NetCon& nc = nt.netcons[i];
379  Point_process* pnt = nc.target_;
380  if (pnt == nullptr) {
381  // nrn_setup.cpp allows type <=0 which generates nullptr target.
382  pnttype[i] = 0;
383  pntindex[i] = -1;
384  } else {
385  pnttype[i] = pnt->_type;
386 
387  // todo: this seems most natural, but does not work. Perhaps should look
388  // into how pntindex determined in nrnbbcore_write.cpp and change there.
389  // int ix = pnt->_i_instance;
390  // if (ml_pinv[pnt->_type]) {
391  // ix = ml_pinv[pnt->_type][ix];
392  // }
393 
394  // follow the inverse of nrn_setup.cpp using pnt_offset computed above.
395  int ix = (pnt - nt.pntprocs) - pnt_offset[pnt->_type];
396  pntindex[i] = ix;
397  }
398  delay[i] = nc.delay_;
399  }
400  fh.write_array<int>(pnttype, nnetcon);
401  fh.write_array<int>(pntindex, nnetcon);
402  fh.write_array<double>(nt.weights, nt.n_weight);
403  fh.write_array<double>(delay, nnetcon);
404 #if CHKPNTDEBUG
405  for (int i = 0; i < nnetcon; ++i) {
406  assert(ntc.pnttype[i] == pnttype[i]);
407  assert(ntc.pntindex[i] == pntindex[i]);
408  assert(ntc.delay[i] == delay[i]);
409  }
410 #endif
411  delete[] pnttype;
412  delete[] pntindex;
413  delete[] delay;
414 
415  // BBCOREPOINTER
416  int nbcp = 0;
417  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
418  if (corenrn.get_bbcore_read()[tml->index] && tml->index != patstimtype) {
419  ++nbcp;
420  }
421  }
422 
423  fh << nbcp << " bbcorepointer\n";
424 #if CHKPNTDEBUG
425  assert(nbcp == ntc.nbcp);
426 #endif
427  nbcp = 0;
428  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
429  if (corenrn.get_bbcore_read()[tml->index] && tml->index != patstimtype) {
430  int i = nbcp++;
431  int type = tml->index;
432  assert(corenrn.get_bbcore_write()[type]);
433  Memb_list* ml = tml->ml;
434  double* d = nullptr;
435  Datum* pd = nullptr;
436  int layout = corenrn.get_mech_data_layout()[type];
437  int dsz = corenrn.get_prop_param_size()[type];
438  int pdsz = corenrn.get_prop_dparam_size()[type];
439  int aln_cntml = nrn_soa_padded_size(ml->nodecount, layout);
440  fh << type << "\n";
441  int icnt = 0;
442  int dcnt = 0;
443  // data size and allocate
444  for (int j = 0; j < ml->nodecount; ++j) {
445  int jp = j;
446  if (ml->_permute) {
447  jp = ml->_permute[j];
448  }
449  d = ml->data + nrn_i_layout(jp, ml->nodecount, 0, dsz, layout);
450  pd = ml->pdata + nrn_i_layout(jp, ml->nodecount, 0, pdsz, layout);
451  (*corenrn.get_bbcore_write()[type])(
452  nullptr, nullptr, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->_thread, &nt, ml, 0.0);
453  }
454  fh << icnt << "\n";
455  fh << dcnt << "\n";
456 #if CHKPNTDEBUG
457  assert(ntc.bcptype[i] == type);
458  assert(ntc.bcpicnt[i] == icnt);
459  assert(ntc.bcpdcnt[i] == dcnt);
460 #endif
461  int* iArray = nullptr;
462  double* dArray = nullptr;
463  if (icnt) {
464  iArray = new int[icnt];
465  }
466  if (dcnt) {
467  dArray = new double[dcnt];
468  }
469  icnt = dcnt = 0;
470  for (int j = 0; j < ml->nodecount; j++) {
471  int jp = j;
472 
473  if (ml->_permute) {
474  jp = ml->_permute[j];
475  }
476 
477  d = ml->data + nrn_i_layout(jp, ml->nodecount, 0, dsz, layout);
478  pd = ml->pdata + nrn_i_layout(jp, ml->nodecount, 0, pdsz, layout);
479 
480  (*corenrn.get_bbcore_write()[type])(
481  dArray, iArray, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->_thread, &nt, ml, 0.0);
482  }
483 
484  if (icnt) {
485  fh.write_array<int>(iArray, icnt);
486  delete[] iArray;
487  }
488 
489  if (dcnt) {
490  fh.write_array<double>(dArray, dcnt);
491  delete[] dArray;
492  }
493  ++i;
494  }
495  }
496 
497  fh << nt.n_vecplay << " VecPlay instances\n";
498  for (int i = 0; i < nt.n_vecplay; i++) {
499  PlayRecord* pr = (PlayRecord*) nt._vecplay[i];
500  int vtype = pr->type();
501  int mtype = -1;
502  int ix = -1;
503 
504  // not as efficient as possible but there should not be too many
505  Memb_list* ml = nullptr;
506  for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) {
507  ml = tml->ml;
508  int nn = corenrn.get_prop_param_size()[tml->index] * ml->nodecount;
509  if (nn && pr->pd_ >= ml->data && pr->pd_ < (ml->data + nn)) {
510  mtype = tml->index;
511  ix = (pr->pd_ - ml->data);
512  break;
513  }
514  }
515  assert(mtype >= 0);
516  int icnt, isz;
518  icnt,
519  ml->nodecount,
520  isz,
521  corenrn.get_prop_param_size()[mtype],
522  corenrn.get_mech_data_layout()[mtype]);
523  if (ml_pinv[mtype]) {
524  icnt = ml_pinv[mtype][icnt];
525  }
526  ix = nrn_i_layout(
527  icnt, ml->nodecount, isz, corenrn.get_prop_param_size()[mtype], AOS_LAYOUT);
528 
529  fh << vtype << "\n";
530  fh << mtype << "\n";
531  fh << ix << "\n";
532 #if CHKPNTDEBUG
533  assert(ntc.vtype[i] == vtype);
534  assert(ntc.mtype[i] == mtype);
535  assert(ntc.vecplay_ix[i] == ix);
536 #endif
537  if (vtype == VecPlayContinuousType) {
539  int sz = vpc->y_.size();
540  fh << sz << "\n";
541  fh.write_array<double>(vpc->y_.data(), sz);
542  fh.write_array<double>(vpc->t_.data(), sz);
543  } else {
544  std::cerr << "Error checkpointing vecplay type" << std::endl;
545  assert(0);
546  }
547  }
548 
549  for (size_t i = 0; i < memb_func.size(); ++i) {
550  if (ml_pinv[i]) {
551  delete[] ml_pinv[i];
552  }
553  }
554  free(ml_pinv);
555 
556  write_tqueue(nt, fh);
557  fh.close();
558 }
559 
561  FileHandler f;
562  auto filename = get_save_path() + "/time.dat";
563  f.open(filename, std::ios::out);
564  f.write_array(&t, 1);
565  f.close();
566 }
567 
568 // A call to finitialize must be avoided after restoring the checkpoint
569 // as that would change all states to a voltage clamp initialization.
570 // Nevertheless t and some spike exchange and other computer state needs to
571 // be initialized.
572 // Also it is occasionally the case that nrn_init allocates data so we
573 // need to call it but avoid the internal call to initmodel.
574 // Consult finitialize.c to help decide what should be here
576  dt2thread(-1.);
579 
581 
582  // if PatternStim exists, needs initialization
583  for (NrnThreadMembList* tml = nrn_threads[0].tml; tml; tml = tml->next) {
584  if (tml->index == patstimtype && patstim_index >= 0 && patstim_te > 0.0) {
585  Memb_list* ml = tml->ml;
587  patstim_te,
588  /* below correct only for AoS */
589  0,
590  ml->nodecount,
591  ml->data,
592  ml->pdata,
593  ml->_thread,
594  nrn_threads,
595  ml,
596  0.0);
597  break;
598  }
599  }
600 
601  // Check that bbcore_write is defined if we want to use checkpoint
602  for (NrnThreadMembList* tml = nrn_threads[0].tml; tml; tml = tml->next) {
603  auto type = tml->index;
604  if (corenrn.get_bbcore_read()[type] && !corenrn.get_bbcore_write()[type]) {
605  auto memb_func = corenrn.get_memb_func(type);
606  fprintf(stderr,
607  "Checkpoint is requested involving BBCOREPOINTER but there is no bbcore_write"
608  " function for %s\n",
609  memb_func.sym);
610  assert(corenrn.get_bbcore_write()[type]);
611  }
612  }
613 
614 
615  return restored;
616 }
617 
618 template <typename T>
619 T* CheckPoints::soa2aos(T* data, int cnt, int sz, int layout, int* permute) const {
620  // inverse of F -> data. Just a copy if layout=1. If SoA,
621  // original file order depends on padding and permutation.
622  // Good for a, b, area, v, diam, Memb_list.data, or anywhere values do not change.
623  T* d = new T[cnt * sz];
624  if (layout == Layout::AoS) {
625  for (int i = 0; i < cnt * sz; ++i) {
626  d[i] = data[i];
627  }
628  } else if (layout == Layout::SoA) {
629  int align_cnt = nrn_soa_padded_size(cnt, layout);
630  for (int i = 0; i < cnt; ++i) {
631  int ip = i;
632  if (permute) {
633  ip = permute[i];
634  }
635  for (int j = 0; j < sz; ++j) {
636  d[i * sz + j] = data[ip + j * align_cnt];
637  }
638  }
639  }
640  return d;
641 }
642 
643 template <typename T>
644 void CheckPoints::data_write(FileHandler& F, T* data, int cnt, int sz, int layout, int* permute)
645  const {
646  T* d = soa2aos(data, cnt, sz, layout, permute);
647  F.write_array<T>(d, cnt * sz);
648  delete[] d;
649 }
650 
652 
654 
656  DiscreteEvent* d = (DiscreteEvent*) q->data_;
657 
658  // printf(" p %.20g %d\n", q->t_, d->type());
659  // d->pr("", q->t_, net_cvode_instance);
660 
661  if (!d->require_checkpoint()) {
662  return;
663  }
664 
665  fh << d->type() << "\n";
666  fh.write_array(&q->t_, 1);
667 
668  switch (d->type()) {
669  case NetConType: {
670  NetCon* nc = (NetCon*) d;
671  assert(nc >= nt.netcons && (nc < (nt.netcons + nt.n_netcon)));
672  fh << (nc - nt.netcons) << "\n";
673  break;
674  }
675  case SelfEventType: {
676  SelfEvent* se = (SelfEvent*) d;
677  fh << int(se->target_->_type) << "\n";
678  fh << se->target_ - nt.pntprocs << "\n"; // index of nrnthread.pntprocs
679  fh << se->target_->_i_instance << "\n"; // not needed except for assert check
680  fh.write_array(&se->flag_, 1);
681  fh << (se->movable_ - nt._vdata) << "\n"; // DANGEROUS?
682  fh << se->weight_index_ << "\n";
683  // printf(" %d %ld %d %g %ld %d\n", se->target_->_type, se->target_ - nt.pntprocs,
684  // se->target_->_i_instance, se->flag_, se->movable_ - nt._vdata, se->weight_index_);
685  break;
686  }
687  case PreSynType: {
688  PreSyn* ps = (PreSyn*) d;
689  assert(ps >= nt.presyns && (ps < (nt.presyns + nt.n_presyn)));
690  fh << (ps - nt.presyns) << "\n";
691  break;
692  }
693  case NetParEventType: {
694  // nothing extra to write
695  break;
696  }
697  case PlayRecordEventType: {
698  PlayRecord* pr = ((PlayRecordEvent*) d)->plr_;
699  fh << pr->type() << "\n";
700  if (pr->type() == VecPlayContinuousType) {
702  int ix = -1;
703  for (int i = 0; i < nt.n_vecplay; ++i) {
704  // if too many for fast search, put ix in the instance
705  if (nt._vecplay[i] == (void*) vpc) {
706  ix = i;
707  break;
708  }
709  }
710  assert(ix >= 0);
711  fh << ix << "\n";
712  } else {
713  assert(0);
714  }
715  break;
716  }
717  default: {
718  // In particular, InputPreSyn does not appear in tqueue as it
719  // immediately fans out to NetCon.
720  assert(0);
721  break;
722  }
723  }
724 }
725 
727  std::shared_ptr<Phase2::EventTypeBase> event,
728  NrnThread& nt) {
729  // printf("restore tqitem type=%d time=%.20g\n", type, time);
730 
731  switch (type) {
732  case NetConType: {
733  auto e = static_cast<Phase2::NetConType_*>(event.get());
734  // printf(" NetCon %d\n", netcon_index);
735  NetCon* nc = nt.netcons + e->netcon_index;
736  nc->send(e->time, net_cvode_instance, &nt);
737  break;
738  }
739  case SelfEventType: {
740  auto e = static_cast<Phase2::SelfEventType_*>(event.get());
741  if (e->target_type == patstimtype) {
742  if (nt.id == 0) {
743  patstim_te = e->time;
744  }
745  break;
746  }
747  Point_process* pnt = nt.pntprocs + e->point_proc_instance;
748  // printf(" SelfEvent %d %d %d %g %d %d\n", target_type, point_proc_instance,
749  // target_instance, flag, movable, weight_index);
750  nrn_assert(e->target_instance == pnt->_i_instance);
751  nrn_assert(e->target_type == pnt->_type);
752  net_send(nt._vdata + e->movable, e->weight_index, pnt, e->time, e->flag);
753  break;
754  }
755  case PreSynType: {
756  auto e = static_cast<Phase2::PreSynType_*>(event.get());
757  // printf(" PreSyn %d\n", presyn_index);
758  PreSyn* ps = nt.presyns + e->presyn_index;
759  int gid = ps->output_index_;
760  ps->output_index_ = -1;
761  ps->send(e->time, net_cvode_instance, &nt);
762  ps->output_index_ = gid;
763  break;
764  }
765  case NetParEventType: {
766  // nothing extra to read
767  // printf(" NetParEvent\n");
768  break;
769  }
770  case PlayRecordEventType: {
771  auto e = static_cast<Phase2::PlayRecordEventType_*>(event.get());
772  VecPlayContinuous* vpc = (VecPlayContinuous*) (nt._vecplay[e->vecplay_index]);
773  vpc->e_->send(e->time, net_cvode_instance, &nt);
774  break;
775  }
776  default: {
777  assert(0);
778  break;
779  }
780  }
781 }
782 
784  // VecPlayContinuous
785  fh << nt.n_vecplay << " VecPlayContinuous state\n";
786  for (int i = 0; i < nt.n_vecplay; ++i) {
788  fh << vpc->last_index_ << "\n";
789  fh << vpc->discon_index_ << "\n";
790  fh << vpc->ubound_index_ << "\n";
791  }
792 
793  // PatternStim
794  int patstim_index = -1;
795  for (NrnThreadMembList* tml = nrn_threads[0].tml; tml; tml = tml->next) {
796  if (tml->index == patstimtype) {
797  Memb_list* ml = tml->ml;
799  /* below correct only for AoS */
800  0,
801  ml->nodecount,
802  ml->data,
803  ml->pdata,
804  ml->_thread,
805  nrn_threads,
806  ml,
807  0.0);
808  break;
809  }
810  }
811  fh << patstim_index << " PatternStim\n";
812 
813  // Avoid extra spikes due to some presyn voltages above threshold
814  fh << -1 << " Presyn ConditionEvent flags\n";
815  for (int i = 0; i < nt.n_presyn; ++i) {
816  // PreSyn.flag_ not used. HPC memory utilizes PreSynHelper.flag_ array
817  fh << nt.presyns_helper[i].flag_ << "\n";
818  }
819 
821  // printf("write_tqueue %d %p\n", nt.id, ndt.tqe_);
822  TQueue<QTYPE>* tqe = ntd.tqe_;
823  TQItem* q;
824 
825  fh << -1 << " TQItems from atomic_dq\n";
826  while ((q = tqe->atomic_dq(1e20)) != nullptr) {
827  write_tqueue(q, nt, fh);
828  }
829  fh << 0 << "\n";
830  fh << -1 << " TQItemsfrom binq_\n";
831  for (q = tqe->binq_->first(); q; q = tqe->binq_->next(q)) {
832  write_tqueue(q, nt, fh);
833  }
834  fh << 0 << "\n";
835 }
836 
837 // Read a tqueue/checkpoint
838 // int :: should be equal to the previous n_vecplay
839 // n_vecplay:
840 // int: last_index
841 // int: discon_index
842 // int: ubound_index
843 // int: patstim_index
844 // int: should be -1
845 // n_presyn:
846 // int: flags of presyn_helper
847 // int: should be -1
848 // null terminated:
849 // int: type
850 // array of size 1:
851 // double: time
852 // ... depends of the type
853 // int: should be -1
854 // null terminated:
855 // int: TO BE DEFINED
856 // ... depends of the type
858  restored = true;
859 
860  for (int i = 0; i < nt.n_vecplay; ++i) {
862  auto& vec = p2.vec_play_continuous[i];
863  vpc->last_index_ = vec.last_index;
864  vpc->discon_index_ = vec.discon_index;
865  vpc->ubound_index_ = vec.ubound_index;
866  }
867 
868  // PatternStim
869  patstim_index = p2.patstim_index; // PatternStim
870  if (nt.id == 0) {
871  patstim_te = -1.0; // changed if relevant SelfEvent;
872  }
873 
874  for (int i = 0; i < nt.n_presyn; ++i) {
876  }
877 
878  for (const auto& event: p2.events) {
879  restore_tqitem(event.first, event.second, nt);
880  }
881 }
882 
883 } // namespace coreneuron
coreneuron::CoreNeuron::get_mech_data_layout
auto & get_mech_data_layout()
Definition: coreneuron.hpp:174
coreneuron::Phase2
Definition: phase2.hpp:24
coreneuron::NrnThread::netcons
NetCon * netcons
Definition: multicore.hpp:87
coreneuron::NrnThread::n_real_output
int n_real_output
Definition: multicore.hpp:95
coreneuron::VecPlayContinuous::t_
IvocVect t_
Definition: vrecitem.h:80
coreneuron::TQueue::atomic_dq
TQItem * atomic_dq(double til)
coreneuron::CheckPoints::restore_tqueue
void restore_tqueue(NrnThread &, const Phase2 &p2)
Definition: nrn_checkpoint.cpp:857
coreneuron::NetCon::delay_
double delay_
Definition: netcon.hpp:50
coreneuron::TQueue::binq_
BinQ * binq_
Definition: tqueue.hpp:167
coreneuron::Phase2::NetConType_
Definition: phase2.hpp:36
coreneuron::Phase2::patstim_index
int patstim_index
Definition: phase2.hpp:68
coreneuron::type_of_ntdata
int type_of_ntdata(NrnThread &nt, int i, bool reset)
Definition: node_permute.cpp:186
coreneuron::pntindex
static int pntindex
Definition: prcellstate.cpp:24
coreneuron::NrnThread::_vdata
void ** _vdata
Definition: multicore.hpp:108
coreneuron::voltage
@ voltage
Definition: nrniv_decl.h:19
coreneuron::CheckPoints::soa2aos
T * soa2aos(T *data, int cnt, int sz, int layout, int *permute) const
Definition: nrn_checkpoint.cpp:619
coreneuron::CheckPoints::initialize
bool initialize()
Definition: nrn_checkpoint.cpp:575
coreneuron::nrnthread_chkpnt
NrnThreadChkpnt * nrnthread_chkpnt
Definition: nrn_checkpoint.cpp:651
coreneuron::ecalloc
void * ecalloc(size_t n, size_t size)
Definition: nrnoc_aux.cpp:85
coreneuron::NrnThreadChkpnt::file_id
int file_id
Definition: nrn_checkpoint.hpp:88
coreneuron::FileHandler::close
void close()
Close currently open file.
Definition: nrn_filehandler.cpp:104
coreneuron::nrnmpi_barrier
mpi_function< cnrn_make_integral_constant_t(nrnmpi_barrier_impl)> nrnmpi_barrier
Definition: nrnmpidec.cpp:42
coreneuron::Point_process
Definition: mechanism.hpp:35
coreneuron::dt2thread
void dt2thread(double adt)
Definition: fadvance_core.cpp:70
coreneuron::Datum
int Datum
Definition: nrnconf.h:23
coreneuron::CheckPoints::write_time
void write_time() const
Definition: nrn_checkpoint.cpp:560
coreneuron::CheckPoints::get_save_path
std::string get_save_path() const
Definition: nrn_checkpoint.hpp:20
NetConType
#define NetConType
Definition: netcon.hpp:27
coreneuron::BinQ::next
TQItem * next(TQItem *)
Definition: tqueue.cpp:101
coreneuron::TQItem
Definition: tqueue.hpp:69
nrnoc_aux.hpp
data
Definition: alignment.cpp:18
coreneuron::CoreNeuron::get_is_artificial
auto & get_is_artificial()
Definition: coreneuron.hpp:178
coreneuron::CoreNeuron::get_memb_func
auto & get_memb_func(size_t idx)
Definition: coreneuron.hpp:138
coreneuron::NrnThread::presyns
PreSyn * presyns
Definition: multicore.hpp:83
coreneuron::NrnThread::presyns_helper
PreSynHelper * presyns_helper
Definition: multicore.hpp:84
coreneuron::CheckPoints::should_save
bool should_save() const
Definition: nrn_checkpoint.hpp:26
coreneuron::nrn_inverse_i_layout
void nrn_inverse_i_layout(int i, int &icnt, int cnt, int &isz, int sz, int layout)
Definition: nrn_setup.cpp:669
coreneuron::PreSyn::threshold_
double threshold_
Definition: netcon.hpp:113
coreneuron::CoreNeuron::get_bbcore_write
auto & get_bbcore_write()
Definition: coreneuron.hpp:214
coreneuron::PreSyn::send
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:409
coreneuron::NrnThread::id
int id
Definition: multicore.hpp:99
coreneuron::SelfEvent::flag_
double flag_
Definition: netcon.hpp:72
netcvode.hpp
coreneuron::PreSyn::output_index_
int output_index_
Definition: netcon.hpp:111
coreneuron::Memb_list
Definition: mechanism.hpp:131
coreneuron::TQItem::t_
double t_
Definition: tqueue.hpp:72
coreneuron::NrnThread::tml
NrnThreadMembList * tml
Definition: multicore.hpp:80
coreneuron::nrn_soa_padded_size
int nrn_soa_padded_size(int cnt, int layout)
calculate size after padding for specific memory layout
Definition: mem_layout_util.cpp:15
coreneuron::NetCvode::p
NetCvodeThreadData * p
Definition: netcvode.hpp:64
coreneuron::VecPlayContinuous::ubound_index_
std::size_t ubound_index_
Definition: vrecitem.h:84
coreneuron.hpp
coreneuron::nrn_original_aos_index
static int nrn_original_aos_index(int etype, int ix, NrnThread &nt, int **ml_pinv)
Definition: nrn_checkpoint.cpp:93
coreneuron::NrnThread::_actual_a
double * _actual_a
Definition: multicore.hpp:113
coreneuron::NrnThread::_actual_diam
double * _actual_diam
Definition: multicore.hpp:117
coreneuron::NrnThread::_nvdata
size_t _nvdata
Definition: multicore.hpp:104
coreneuron::PreSynHelper::flag_
int flag_
Definition: multicore.hpp:72
coreneuron::CheckPoints::restore_time
double restore_time() const
todo : need to broadcast this rather than all reading a double
Definition: nrn_checkpoint.cpp:46
coreneuron
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
Definition: corenrn_parameters.cpp:12
coreneuron::NetCvodeThreadData::tqe_
TQueue< QTYPE > * tqe_
Definition: netcvode.hpp:49
coreneuron::VecPlayContinuous::discon_index_
std::size_t discon_index_
Definition: vrecitem.h:83
coreneuron::CheckPoints::restored
bool restored
Definition: nrn_checkpoint.hpp:43
coreneuron::NrnThread::n_weight
int n_weight
Definition: multicore.hpp:91
coreneuron::ncell
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;}}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) if(nt->compute_gpu) async(nt->stream_id)) nrn_pragma_omp(target teams loop if(nt->compute_gpu)) for(int icore=0;icore< ncore;icore+=warpsize) { int iwarp=icore/warpsize;int ic=icore &(warpsize - 1);int ncycle=ncycles[iwarp];int *stride=strides+stridedispl[iwarp];int root=rootbegin[iwarp];int lastroot=rootbegin[iwarp+1];int firstnode=nodebegin[iwarp];int lastnode=nodebegin[iwarp+1];triang_interleaved2(nt, ic, ncycle, stride, lastnode);bksub_interleaved2(nt, root+ic, lastroot, ic, ncycle, stride, firstnode);} nrn_pragma_acc(wait(nt->stream_id))}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
Definition: cellorder.cpp:636
coreneuron::t
double t
Definition: register_mech.cpp:22
corenrn_parameters.hpp
mkdir_p
int mkdir_p(const char *path)
Creates directory if doesn't exisit (similar to mkdir -p)
Definition: file_utils.cpp:20
coreneuron::NrnThread::_v_parent_index
int * _v_parent_index
Definition: multicore.hpp:126
coreneuron::i
int i
Definition: cellorder.cpp:485
nrn_setup.hpp
coreneuron::CheckPoints::should_restore
bool should_restore() const
Definition: nrn_checkpoint.hpp:29
mod2c_core_thread.hpp
coreneuron::NrnThread::_ml_list
Memb_list ** _ml_list
Definition: multicore.hpp:81
nrniv_decl.h
coreneuron::PreSyn
Definition: netcon.hpp:104
VecPlayContinuousType
#define VecPlayContinuousType
Definition: vrecitem.h:17
coreneuron::Phase2::vec_play_continuous
std::vector< VecPlayContinuous_ > vec_play_continuous
Definition: phase2.hpp:67
coreneuron::DiscreteEvent::type
virtual int type() const
Definition: netcon.hpp:38
coreneuron::FileHandler
Definition: nrn_filehandler.hpp:32
coreneuron::CoreNeuron::get_prop_dparam_size
auto & get_prop_dparam_size()
Definition: coreneuron.hpp:170
coreneuron::DiscreteEvent
Definition: netcon.hpp:33
coreneuron::CoreNeuron::get_memb_funcs
auto & get_memb_funcs()
Definition: coreneuron.hpp:134
coreneuron::NetCon
Definition: netcon.hpp:47
coreneuron::CoreNeuron::get_bbcore_read
auto & get_bbcore_read()
Definition: coreneuron.hpp:210
coreneuron::FileHandler::write_array
void write_array(T *p, size_t nb_elements)
Write an 1D array.
Definition: nrn_filehandler.hpp:203
coreneuron::NetCon::send
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:372
coreneuron::NrnThread::n_presyn
int n_presyn
Definition: multicore.hpp:94
coreneuron::FileHandler::open
void open(const std::string &filename, std::ios::openmode mode=std::ios::in)
Preserving chkpnt state, move to a new file.
Definition: nrn_filehandler.cpp:25
node_permute.h
SelfEventType
#define SelfEventType
Definition: netcon.hpp:28
coreneuron::NrnThread
Definition: multicore.hpp:75
coreneuron::NrnThreadMembList
Definition: multicore.hpp:32
cnt
#define cnt
Definition: tqueue.hpp:44
coreneuron::PlayRecordEvent
Definition: vrecitem.h:21
coreneuron::CheckPoints::data_write
void data_write(FileHandler &F, T *data, int cnt, int sz, int layout, int *permute) const
Definition: nrn_checkpoint.cpp:644
coreneuron::CheckPoints::CheckPoints
CheckPoints(const std::string &save, const std::string &restore)
Definition: nrn_checkpoint.cpp:34
coreneuron::VecPlayContinuous::e_
PlayRecordEvent * e_
Definition: vrecitem.h:86
PreSynType
#define PreSynType
Definition: netcon.hpp:29
coreneuron::patstimtype
int patstimtype
Definition: nrn_checkpoint.cpp:653
coreneuron::corenrn_param
corenrn_parameters corenrn_param
Printing method.
Definition: corenrn_parameters.cpp:268
coreneuron::VecPlayContinuous::y_
IvocVect y_
Definition: vrecitem.h:79
coreneuron::CheckPoints::restore_tqitem
void restore_tqitem(int type, std::shared_ptr< Phase2::EventTypeBase > event, NrnThread &nt)
Definition: nrn_checkpoint.cpp:726
coreneuron::CheckPoints::write_checkpoint
void write_checkpoint(NrnThread *nt, int nb_threads) const
Definition: nrn_checkpoint.cpp:60
coreneuron::NrnThread::_data
double * _data
Definition: multicore.hpp:106
coreneuron::AoS
@ AoS
Definition: nrniv_decl.h:69
_threadargsproto_
#define _threadargsproto_
Definition: mod2c_core_thread.hpp:24
coreneuron::nrn_threads
NrnThread * nrn_threads
Definition: multicore.cpp:56
coreneuron::corenrn
CoreNeuron corenrn
Definition: multicore.cpp:53
coreneuron::nrn_i_layout
int nrn_i_layout(int icnt, int cnt, int isz, int sz, int layout)
This function return the index in a flat array of a matrix coordinate (icnt, isz).
Definition: mem_layout_util.cpp:32
coreneuron::PreSyn::thvar_index_
int thvar_index_
Definition: netcon.hpp:114
coreneuron::CheckPoints::write_phase2
void write_phase2(NrnThread &nt) const
Definition: nrn_checkpoint.cpp:118
coreneuron::DiscreteEvent::send
virtual void send(double deliverytime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:362
coreneuron::net_send
void net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:77
coreneuron::nrn_spike_exchange_init
void nrn_spike_exchange_init()
Definition: netpar.cpp:238
coreneuron::Phase2::preSynConditionEventFlags
std::vector< int > preSynConditionEventFlags
Definition: phase2.hpp:30
coreneuron::NrnThread::_nidata
size_t _nidata
Definition: multicore.hpp:105
coreneuron::FileHandler::read_array
T * read_array(T *p, size_t count)
Read an integer array of fixed length.
Definition: nrn_filehandler.hpp:181
coreneuron::Memb_list::nodecount
int nodecount
Definition: mechanism.hpp:144
coreneuron::Memb_list::pdata
Datum * pdata
Definition: mechanism.hpp:140
coreneuron::NetCvodeThreadData
Definition: netcvode.hpp:46
PlayRecordEventType
#define PlayRecordEventType
Definition: vrecitem.h:18
coreneuron::VecPlayContinuous::last_index_
std::size_t last_index_
Definition: vrecitem.h:82
coreneuron::allocate_data_in_mechanism_nrn_init
void allocate_data_in_mechanism_nrn_init()
Definition: finitialize.cpp:21
coreneuron::NrnThread::_vecplay
void ** _vecplay
Definition: multicore.hpp:109
coreneuron::SelfEvent
Definition: netcon.hpp:70
coreneuron::NrnThreadMembList::next
NrnThreadMembList * next
Definition: multicore.hpp:33
coreneuron::Point_process::_type
short _type
Definition: mechanism.hpp:37
coreneuron::PreSyn::pntsrc_
Point_process * pntsrc_
Definition: netcon.hpp:115
coreneuron::NrnThread::weights
double * weights
Definition: multicore.hpp:88
coreneuron::CheckPoints::patstim_te
double patstim_te
Definition: nrn_checkpoint.hpp:45
coreneuron::CoreNeuron::get_pnt_map
auto & get_pnt_map()
Definition: coreneuron.hpp:146
coreneuron::SelfEvent::target_
Point_process * target_
Definition: netcon.hpp:73
coreneuron::net_cvode_instance
NetCvode * net_cvode_instance
Definition: netcvode.cpp:35
nrn_checkpoint.hpp
coreneuron::inverse_permute
int * inverse_permute(int *p, int n)
Definition: node_permute.cpp:131
coreneuron::PlayRecord
Definition: vrecitem.h:37
coreneuron::NrnThreadChkpnt
Definition: nrn_checkpoint.hpp:87
vrecitem.h
multicore.hpp
coreneuron::NrnThread::pntprocs
Point_process * pntprocs
Definition: multicore.hpp:82
data_layout.hpp
coreneuron::CoreNeuron::get_prop_param_size
auto & get_prop_param_size()
Definition: coreneuron.hpp:166
coreneuron::Phase2::PreSynType_
Definition: phase2.hpp:47
coreneuron::Phase2::events
std::vector< std::pair< int, std::shared_ptr< EventTypeBase > > > events
Definition: phase2.hpp:70
coreneuron::corenrn_parameters_data::mpi_enable
bool mpi_enable
Initialization seed for random number generator (int)
Definition: corenrn_parameters.hpp:59
coreneuron::FileHandler::checkpoint
int checkpoint() const
Query chkpnt state.
Definition: nrn_filehandler.hpp:70
coreneuron::NrnThread::_actual_v
double * _actual_v
Definition: multicore.hpp:115
coreneuron::Memb_list::_thread
ThreadDatum * _thread
Definition: mechanism.hpp:141
coreneuron::TQItem::data_
DiscreteEvent * data_
Definition: tqueue.hpp:71
coreneuron::NrnThread::n_netcon
int n_netcon
Definition: multicore.hpp:92
netpar.hpp
coreneuron::checkpoint_save_patternstim
int checkpoint_save_patternstim(_threadargsproto_)
coreneuron::CheckPoints::write_tqueue
void write_tqueue(TQItem *q, NrnThread &nt, FileHandler &fh) const
Definition: nrn_checkpoint.cpp:655
coreneuron::TQueue< QTYPE >
coreneuron::NrnThread::_actual_b
double * _actual_b
Definition: multicore.hpp:114
coreneuron::nrnmpi_myid
int nrnmpi_myid
Definition: nrnmpi_def_cinc.cpp:11
coreneuron::PreSyn::gid_
int gid_
Definition: netcon.hpp:112
coreneuron::BinQ::first
TQItem * first()
Definition: tqueue.cpp:93
coreneuron::Memb_list::data
double * data
Definition: mechanism.hpp:139
coreneuron::NrnThread::end
int end
Definition: multicore.hpp:98
coreneuron::CheckPoints::patstim_index
int patstim_index
Definition: nrn_checkpoint.hpp:44
coreneuron::Phase2::SelfEventType_
Definition: phase2.hpp:39
coreneuron::PlayRecord::type
virtual int type() const
Definition: vrecitem.h:49
coreneuron::if
if(ncell==0)
Definition: cellorder.cpp:637
coreneuron::checkpoint_restore_patternstim
void checkpoint_restore_patternstim(int, double, _threadargsproto_)
NetParEventType
#define NetParEventType
Definition: netcon.hpp:30
coreneuron::NrnThread::_actual_area
double * _actual_area
Definition: multicore.hpp:116
coreneuron::nrn_thread_table_check
void nrn_thread_table_check()
Definition: multicore.cpp:168
file_utils.hpp
nrn_assert
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
coreneuron::SoA
@ SoA
Definition: nrniv_decl.h:69
coreneuron::permute
static int permute(int i, NrnThread &nt)
Definition: prcellstate.cpp:28
coreneuron::NrnThread::ncell
int ncell
Definition: multicore.hpp:97
coreneuron::NrnThread::_permute
int * _permute
Definition: multicore.hpp:127
coreneuron::PlayRecord::pd_
double * pd_
Definition: vrecitem.h:53
coreneuron::VecPlayContinuous
Definition: vrecitem.h:57
coreneuron::NetCon::target_
Point_process * target_
Definition: netcon.hpp:51
coreneuron::Memb_list::nodeindices
int * nodeindices
Definition: mechanism.hpp:137
coreneuron::SelfEvent::movable_
void ** movable_
Definition: netcon.hpp:74
coreneuron::Phase2::PlayRecordEventType_
Definition: phase2.hpp:51
AOS_LAYOUT
#define AOS_LAYOUT
Definition: data_layout.hpp:12
coreneuron::Memb_list::_permute
int * _permute
Definition: mechanism.hpp:138
nrn_filehandler.hpp
coreneuron::DiscreteEvent::require_checkpoint
virtual bool require_checkpoint()
Definition: netcon.hpp:41
coreneuron::SelfEvent::weight_index_
int weight_index_
Definition: netcon.hpp:75
coreneuron::CheckPoints::restore_
const std::string restore_
Definition: nrn_checkpoint.hpp:42
coreneuron::Point_process::_i_instance
int _i_instance
Definition: mechanism.hpp:36
coreneuron::NrnThread::n_vecplay
int n_vecplay
Definition: multicore.hpp:101