CoreNEURON
netcvode.cpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
4 #
5 # See top-level LICENSE file for details.
6 # =============================================================================.
7 */
8 
9 #include <float.h>
10 #include <map>
11 #include <mutex>
12 
13 #include "coreneuron/nrnconf.h"
28 
29 namespace coreneuron {
30 #define PP2NT(pp) (nrn_threads + (pp)->_tid)
31 #define PP2t(pp) (PP2NT(pp)->_t)
32 //#define POINT_RECEIVE(type, tar, w, f) (*pnt_receive[type])(tar, w, f)
33 
34 double NetCvode::eps_;
37 
38 /// Flag to use the bin queue
40 
41 void mk_netcvode() {
42  if (!net_cvode_instance) {
44  }
45 }
46 
47 #ifdef DEBUG
48 // temporary
49 static int nrn_errno_check(int type) {
50  printf("nrn_errno_check() was called on pid %d: errno=%d type=%d\n", nrnmpi_myid, errno, type);
51  // assert(0);
52  type = 0;
53  return 1;
54 }
55 #endif
56 
57 // for _OPENACC and/or NET_RECEIVE_BUFFERING
58 // sem 0:3 send event move
59 void net_sem_from_gpu(int sendtype,
60  int i_vdata,
61  int weight_index_,
62  int ith,
63  int ipnt,
64  double td,
65  double flag) {
66  NrnThread& nt = nrn_threads[ith];
67  Point_process* pnt = (Point_process*) nt._vdata[ipnt];
68  if (sendtype == 0) {
69  net_send(nt._vdata + i_vdata, weight_index_, pnt, td, flag);
70  } else if (sendtype == 2) {
71  net_move(nt._vdata + i_vdata, pnt, td);
72  } else {
73  net_event(pnt, td);
74  }
75 }
76 
77 void net_send(void** v, int weight_index_, Point_process* pnt, double td, double flag) {
78  NrnThread* nt = PP2NT(pnt);
80  SelfEvent* se = new SelfEvent;
81  se->flag_ = flag;
82  se->target_ = pnt;
84  if (v >= nt->_vdata) {
85  se->movable_ = v; // needed for SaveState
86  }
87  assert(net_cvode_instance);
89  if (td < nt->_t) {
90  char buf[100];
91  sprintf(buf, "net_send td-t = %g", td - nt->_t);
92  se->pr(buf, td, net_cvode_instance);
93  abort();
94  hoc_execerror("net_send delay < 0", 0);
95  }
96  TQItem* q = net_cvode_instance->event(td, se, nt);
97  if (flag == 1.0 && v >= nt->_vdata) {
98  *v = (void*) q;
99  }
100  // printf("net_send %g %s %g %p\n", td, pnt_name(pnt), flag, *v);
101 }
102 
103 void artcell_net_send(void** v, int weight_index_, Point_process* pnt, double td, double flag) {
104  net_send(v, weight_index_, pnt, td, flag);
105 }
106 
107 void net_event(Point_process* pnt, double time) {
108  NrnThread* nt = PP2NT(pnt);
109  PreSyn* ps = nt->presyns +
111  if (ps) {
112  if (time < nt->_t) {
113  char buf[100];
114  sprintf(buf, "net_event time-t = %g", time - nt->_t);
115  ps->pr(buf, time, net_cvode_instance);
116  hoc_execerror("net_event time < t", 0);
117  }
118  ps->send(time, net_cvode_instance, nt);
119  }
120 }
121 
123  : tqe_{new TQueue<QTYPE>()} {
124  inter_thread_events_.reserve(1000);
125 }
126 
128  delete tqe_;
129 }
130 
131 /// If the PreSyn is on a different thread than the target,
132 /// we have to lock the buffer
134  std::lock_guard<OMP_Mutex> lock(mut);
135  inter_thread_events_.emplace_back(InterThreadEvent{db, td});
136 }
137 
140 }
141 
143  std::lock_guard<OMP_Mutex> lock(mut);
144  for (const auto& ite: inter_thread_events_) {
145  nc->bin_event(ite.t_, ite.de_, nt);
146  }
147  inter_thread_events_.clear();
148 }
149 
151  eps_ = 100. * DBL_EPSILON;
152 #if PRINT_EVENT
153  print_event_ = 1;
154 #else
155  print_event_ = 0;
156 #endif
157  pcnt_ = 0;
158  p = nullptr;
159  p_construct(1);
160  // eventually these should not have to be thread safe
161  // for parallel network simulations hardly any presyns have
162  // a threshold and it can be very inefficient to check the entire
163  // presyn list for thresholds during the fixed step method.
164  // So keep a threshold list.
165 }
166 
168  if (net_cvode_instance == this) {
169  net_cvode_instance = nullptr;
170  }
171 
172  p_construct(0);
173 }
174 
177 }
178 
180  if (pcnt_ != n) {
181  if (p) {
182  delete[] p;
183  p = nullptr;
184  }
185 
186  if (n > 0)
187  p = new NetCvodeThreadData[n];
188  else
189  p = nullptr;
190 
191  pcnt_ = n;
192  }
193 
194  for (int i = 0; i < n; ++i)
195  p[i].unreffed_event_cnt_ = 0;
196 }
197 
199  if (nrn_use_bin_queue_) {
200 #if PRINT_EVENT
201  if (print_event_) {
202  db->pr("binq send", td, this);
203  }
204 #endif
205  return p[nt->id].tqe_->enqueue_bin(td, db);
206  } else {
207 #if PRINT_EVENT
208  if (print_event_) {
209  db->pr("send", td, this);
210  }
211 #endif
212  return p[nt->id].tqe_->insert(td, db);
213  }
214 }
215 
217 #if PRINT_EVENT
218  if (print_event_) {
219  db->pr("send", td, this);
220  }
221 #endif
222  return p[nt->id].tqe_->insert(td, db);
223 }
224 
226  // DiscreteEvents may already have gone out of existence so the tqe_
227  // may contain many invalid item data pointers
228  enqueueing_ = 0;
229  for (int i = 0; i < nrn_nthread; ++i) {
230  NetCvodeThreadData& d = p[i];
231  delete d.tqe_;
232  d.tqe_ = new TQueue<QTYPE>();
233  d.unreffed_event_cnt_ = 0;
234  d.inter_thread_events_.clear();
235  d.tqe_->nshift_ = -1;
236  d.tqe_->shift_bin(nrn_threads->_t - 0.5 * nrn_threads->_dt);
237  }
238 }
239 
241  for (int i = 0; i < nrn_nthread; ++i) {
242  p[i].tqe_->nshift_ = -1;
243  p[i].tqe_->shift_bin(nrn_threads->_t - 0.5 * nrn_threads->_dt);
244  }
245 
246  for (int tid = 0; tid < nrn_nthread; ++tid) { // can be done in parallel
247  NrnThread* nt = nrn_threads + tid;
248 
249  for (int ipre = 0; ipre < nt->n_presyn; ++ipre) {
250  PreSyn* ps = nt->presyns + ipre;
251  ps->flag_ = false;
252  }
253 
254  for (int inetc = 0; inetc < nt->n_netcon; ++inetc) {
255  NetCon* d = nt->netcons + inetc;
256  if (d->target_) {
257  int type = d->target_->_type;
258  if (corenrn.get_pnt_receive_init()[type]) {
259  (*corenrn.get_pnt_receive_init()[type])(d->target_, d->u.weight_index_, 0);
260  } else {
261  int cnt = corenrn.get_pnt_receive_size()[type];
262  double* wt = nt->weights + d->u.weight_index_;
263  // not the first
264  for (int j = 1; j < cnt; ++j) {
265  wt[j] = 0.;
266  }
267  }
268  }
269  }
270  }
271 }
272 
273 bool NetCvode::deliver_event(double til, NrnThread* nt) {
274  TQItem* q = p[nt->id].tqe_->atomic_dq(til);
275  if (q == nullptr) {
276  return false;
277  }
278 
279  DiscreteEvent* de = q->data_;
280  double tt = q->t_;
281  delete q;
282 #if PRINT_EVENT
283  if (print_event_) {
284  de->pr("deliver", tt, this);
285  }
286 #endif
287  de->deliver(tt, this, nt);
288 
289  /// In case of a self event we need to delete the self event
290  if (de->type() == SelfEventType) {
291  delete static_cast<SelfEvent*>(de);
292  }
293  return true;
294 }
295 
296 void net_move(void** v, Point_process* pnt, double tt) {
297  // assert, if possible that *v == pnt->movable.
298  if (!(*v))
299  hoc_execerror("No event with flag=1 for net_move in ",
300  corenrn.get_memb_func(pnt->_type).sym);
301 
302  TQItem* q = (TQItem*) (*v);
303  // printf("net_move tt=%g %s *v=%p\n", tt, memb_func[pnt->_type].sym, *v);
304  if (tt < PP2t(pnt))
305  nrn_assert(0);
306 
307  net_cvode_instance->move_event(q, tt, PP2NT(pnt));
308 }
309 
310 void artcell_net_move(void** v, Point_process* pnt, double tt) {
311  net_move(v, pnt, tt);
312 }
313 
314 void NetCvode::move_event(TQItem* q, double tnew, NrnThread* nt) {
315  int tid = nt->id;
316 
317 #if PRINT_EVENT
318  if (print_event_) {
319  SelfEvent* se = (SelfEvent*) q->data_;
320  printf("NetCvode::move_event self event target %s t=%g, old=%g new=%g\n",
322  nt->_t,
323  q->t_,
324  tnew);
325  }
326 #endif
327 
328  p[tid].tqe_->move(q, tnew);
329 }
330 
331 void NetCvode::deliver_events(double til, NrnThread* nt) {
332  // printf("deliver_events til %20.15g\n", til);
333  /// Enqueue any outstanding events in the interthread event buffer
334  p[nt->id].enqueue(this, nt);
335 
336  /// Deliver events. When the map is used, the loop is explicit
337  while (deliver_event(til, nt))
338  ;
339 }
340 
341 void PreSyn::record(double tt) {
342  spikevec_lock();
343  if (gid_ > -1) {
344  spikevec_gid.push_back(gid_);
345  spikevec_time.push_back(tt);
346  }
347  spikevec_unlock();
348 }
349 
351  if (value(nt) > 0.0) {
352  if (flag_ == false) {
353  flag_ = true;
354  return true;
355  }
356  } else {
357  flag_ = false;
358  }
359  return false;
360 }
361 
362 void DiscreteEvent::send(double tt, NetCvode* ns, NrnThread* nt) {
363  ns->event(tt, this, nt);
364 }
365 
366 void DiscreteEvent::deliver(double /* tt */, NetCvode* /* ns */, NrnThread* /* nt */) {}
367 
368 void DiscreteEvent::pr(const char* s, double tt, NetCvode* /* ns */) {
369  printf("%s DiscreteEvent %.15g\n", s, tt);
370 }
371 
372 void NetCon::send(double tt, NetCvode* ns, NrnThread* nt) {
373  if (active_ && target_) {
374  nrn_assert(PP2NT(target_) == nt);
375  ns->bin_event(tt, this, PP2NT(target_));
376  }
377 }
378 
379 void NetCon::deliver(double tt, NetCvode* /* ns */, NrnThread* nt) {
381 
382  if (PP2NT(target_) != nt)
383  printf("NetCon::deliver nt=%d target=%d\n", nt->id, PP2NT(target_)->id);
384 
385  nrn_assert(PP2NT(target_) == nt);
386  int typ = target_->_type;
387  nt->_t = tt;
388 
389  // printf("NetCon::deliver t=%g tt=%g %s\n", t, tt, pnt_name(target_));
390  std::string ss("net-receive-");
391  ss += nrn_get_mechname(typ);
392  Instrumentor::phase p_get_pnt_receive(ss.c_str());
393  (*corenrn.get_pnt_receive()[typ])(target_, u.weight_index_, 0);
394 #ifdef DEBUG
395  if (errno && nrn_errno_check(typ))
396  hoc_warning("errno set during NetCon deliver to NET_RECEIVE", (char*) 0);
397 #endif
398 }
399 
400 void NetCon::pr(const char* s, double tt, NetCvode* /* ns */) {
401  Point_process* pp = target_;
402  printf("%s NetCon target=%s[%d] %.15g\n",
403  s,
404  corenrn.get_memb_func(pp->_type).sym,
405  pp->_i_instance,
406  tt);
407 }
408 
409 void PreSyn::send(double tt, NetCvode* ns, NrnThread* nt) {
410  record(tt);
411  for (int i = nc_cnt_ - 1; i >= 0; --i) {
413  if (d->active_ && d->target_) {
414  NrnThread* n = PP2NT(d->target_);
415 
416  if (nt == n)
417  ns->bin_event(tt + d->delay_, d, n);
418  else
419  ns->p[n->id].interthread_send(tt + d->delay_, d, n);
420  }
421  }
422 
423 #if NRNMPI
424  if (output_index_ >= 0) {
425 #if NRN_MULTISEND
426  if (use_multisend_) {
427  nrn_multisend_send(this, tt, nt);
428  } else {
429 #else
430  {
431 #endif
432  if (nrn_use_localgid_) {
433  nrn_outputevent(localgid_, tt);
434  } else {
436  }
437  }
438  }
439 #endif // NRNMPI
440 }
441 
442 void InputPreSyn::send(double tt, NetCvode* ns, NrnThread* nt) {
443  for (int i = nc_cnt_ - 1; i >= 0; --i) {
445  if (d->active_ && d->target_) {
446  NrnThread* n = PP2NT(d->target_);
447 
448  if (nt == n)
449  ns->bin_event(tt + d->delay_, d, n);
450  else
451  ns->p[n->id].interthread_send(tt + d->delay_, d, n);
452  }
453  }
454 }
455 
457  assert(0); // no PreSyn delay.
458 }
459 
461  assert(0); // no InputPreSyn delay.
462 }
463 
464 void SelfEvent::deliver(double tt, NetCvode* ns, NrnThread* nt) {
465  nrn_assert(nt == PP2NT(target_));
466  PP2t(target_) = tt;
467  // printf("SelfEvent::deliver t=%g tt=%g %s\n", PP2t(target_), tt, pnt_name(target_));
468  call_net_receive(ns);
469 }
470 
472  (*corenrn.get_pnt_receive()[target_->_type])(target_, weight_index_, flag_);
473 
474 #ifdef DEBUG
475  if (errno && nrn_errno_check(target_->_type))
476  hoc_warning("errno set during SelfEvent deliver to NET_RECEIVE", (char*) 0);
477 #endif
478 
479  NetCvodeThreadData& nctd = ns->p[PP2NT(target_)->id];
480  --nctd.unreffed_event_cnt_;
481 }
482 
483 void SelfEvent::pr(const char* s, double tt, NetCvode*) {
484  printf("%s", s);
485  printf(" SelfEvent target=%s %.15g flag=%g\n", pnt_name(target_), tt, flag_);
486 }
487 
488 void ncs2nrn_integrate(double tstop) {
489  int total_sim_steps = static_cast<int>((tstop - nrn_threads->_t) / dt + 1e-9);
490 
491  if (total_sim_steps > 3 && !nrn_have_gaps) {
492  nrn_fixed_step_group_minimal(total_sim_steps);
493  } else {
494  nrn_fixed_single_steps_minimal(total_sim_steps, tstop);
495  }
496 
497  // handle all the pending flag=1 self events
498  for (int i = 0; i < nrn_nthread; ++i)
500 }
501 
502 // factored this out from deliver_net_events so we can
503 // stay in the cache
504 // net_send_buffer added so checking can be done on gpu
505 // while event queueing is on cpu.
506 // Remember: passsing reference variable causes cray
507 // compiler bug
508 
509 static bool pscheck(double var, double thresh, int* flag) {
510  if (var > thresh) {
511  if (*flag == false) {
512  *flag = true;
513  return true;
514  }
515  } else {
516  *flag = false;
517  }
518  return false;
519 }
520 
522  return nt->_actual_v[thvar_index_] - threshold_;
523 }
524 
525 void NetCvode::check_thresh(NrnThread* nt) { // for default method
526  Instrumentor::phase p("check-threshold");
527  double teps = 1e-10;
528 
529  nt->_net_send_buffer_cnt = 0;
530  int net_send_buf_count = 0;
531  PreSyn* presyns = nt->presyns;
532  PreSynHelper* presyns_helper = nt->presyns_helper;
533  double* actual_v = nt->_actual_v;
534 
535  if (nt->ncell == 0)
536  return;
537 
538  nrn_pragma_acc(parallel loop present(
539  nt [0:1], presyns_helper [0:nt->n_presyn], presyns [0:nt->n_presyn], actual_v [0:nt->end])
540  copy(net_send_buf_count) if (nt->compute_gpu) async(nt->stream_id))
541  nrn_pragma_omp(target teams distribute parallel for map(tofrom: net_send_buf_count) if(nt->compute_gpu))
542  for (int i = 0; i < nt->n_real_output; ++i) {
543  PreSyn* ps = presyns + i;
544  PreSynHelper* psh = presyns_helper + i;
545  int idx = 0;
546  int thidx = ps->thvar_index_;
547  double v = actual_v[thidx];
548  double threshold = ps->threshold_;
549  int* flag = &(psh->flag_);
550 
551  if (pscheck(v, threshold, flag)) {
552 #ifndef CORENEURON_ENABLE_GPU
553  nt->_net_send_buffer_cnt = net_send_buf_count;
555  nt->_net_send_buffer_size *= 2;
556  nt->_net_send_buffer = (int*) erealloc(nt->_net_send_buffer,
557  nt->_net_send_buffer_size * sizeof(int));
558  }
559 #endif
560 
561  nrn_pragma_acc(atomic capture)
562  nrn_pragma_omp(atomic capture)
563  idx = net_send_buf_count++;
564 
565  nt->_net_send_buffer[idx] = i;
566  }
567  }
568  nrn_pragma_acc(wait(nt->stream_id))
569  nt->_net_send_buffer_cnt = net_send_buf_count;
570 
571  if (nt->compute_gpu && nt->_net_send_buffer_cnt) {
572 #ifdef CORENEURON_ENABLE_GPU
573  int* nsbuffer = nt->_net_send_buffer;
574 #endif
575  nrn_pragma_acc(update host(nsbuffer [0:nt->_net_send_buffer_cnt]) async(nt->stream_id))
576  nrn_pragma_acc(wait(nt->stream_id))
577  nrn_pragma_omp(target update from(nsbuffer [0:nt->_net_send_buffer_cnt]))
578  }
579 
580  // on CPU...
581  for (int i = 0; i < nt->_net_send_buffer_cnt; ++i) {
582  PreSyn* ps = nt->presyns + nt->_net_send_buffer[i];
583  ps->send(nt->_t + teps, net_cvode_instance, nt);
584  }
585 
586  // Types that have WATCH statements. If exist, then last element is 0.
587  if (nt->_watch_types) {
588  for (int i = 0; nt->_watch_types[i] != 0; ++i) {
589  int type = nt->_watch_types[i];
590  (*corenrn.get_watch_check()[type])(nt, nt->_ml_list[type]);
591  // may generate net_send events (with 0 (teps) delay)
592  }
593  }
594 }
595 
596 // WATCH statements are rare. Conceptually they are very similar to
597 // PreSyn thresholds as above but an optimal peformance implementation for GPU is
598 // not obvious. Each WATCH statement threshold test could make use of
599 // pscheck. Note that it is possible that there are several active WATCH
600 // statements for a given POINT_PROCESS instance as well as none active.
601 // Also WATCH statements switch between active and inactive state.
602 //
603 // In NEURON,
604 // both PreSyn and WatchCondition were subclasses of ConditionEvent. When
605 // a WatchCondition fired in the fixed step method, it was placed on the queue
606 // with a delivery time of t+teps. WatchCondition::deliver called the NET_RECEIVE
607 // block with proper flag ( but nullptr weight vector). WatchConditions
608 // were created,added/removed,destroyed from a list as necessary.
609 // Perhaps the most commonly used WATCH statement is in the context of a
610 // ThresholdDetect Point_process which watches voltage and compares to
611 // an instance specific threshold parameter. A firing ThresholdDetect instance
612 // would call net_event(tdeliver) which then feeds into the standard
613 // artcell PreSyn sequence (using pntsrc_ instead of thvar_index_).
614 //
615 // So... the PreSyns have the same order as they are checked (although PreSyn
616 // data is AoS instead of SoA and nested 'if' means a failure of SIMD.)
617 // But if multiple WATCH, there is (from one kind of implementation viewpoint),
618 // yet another 'if' with regard to whether a WATCH is active. And if there
619 // are multiple WATCH, the size of the list is dynamic.
620 //
621 // An experimental implementation is to check all WATCH of all instances
622 // of a type with the proviso that there is an active flag for each WATCH.
623 // ie. active, below, var1, var2 are all SoA (except one of the var may
624 // be voltage). Can use 'if (active && pscheck(var1, var2, &below)'
625 // The mod file net_send_buffering fragments can be used which
626 // ultimately call net_send using a transient SelfEvent. ie. all
627 // checking computation takes place in the context of the mod file without
628 // using explicit WatchCondition instances.
629 
630 // events including binqueue events up to t+dt/2
631 void NetCvode::deliver_net_events(NrnThread* nt) { // for default method
632 #if NRN_MULTISEND
633  if (use_multisend_ && nt->id == 0) {
635  }
636 #endif
637  int tid = nt->id;
638  double tsav = nt->_t;
639  double tm = nt->_t + 0.5 * nt->_dt;
640 tryagain:
641  // one of the events on the main queue may be a NetParEvent
642  // which due to dt round off error can result in an event
643  // placed on the bin queue to be delivered now, which
644  // can put 0 delay events on to the main queue. So loop til
645  // no events. The alternative would be to deliver an idt=0 event
646  // immediately but that would very much change the sequence
647  // with respect to what is being done here and it is unclear
648  // how to fix the value of t there. This can be a do while loop
649  // but I do not want to affect the case of not using a bin queue.
650 
651  if (nrn_use_bin_queue_) {
652  TQItem* q;
653  while ((q = p[tid].tqe_->dequeue_bin()) != 0) {
654  DiscreteEvent* db = q->data_;
655 
656 #if PRINT_EVENT
657  if (print_event_) {
658  db->pr("binq deliver", nrn_threads->_t, this);
659  }
660 #endif
661 
662  delete q;
663  db->deliver(nt->_t, this, nt);
664  }
665  // assert(int(tm/nt->_dt)%1000 == p[tid].tqe_->nshift_);
666  }
667 
668  deliver_events(tm, nt);
669 
670  if (nrn_use_bin_queue_) {
671  if (p[tid].tqe_->top()) {
672  goto tryagain;
673  }
674  p[tid].tqe_->shift_bin(tm);
675  }
676 
677  nt->_t = tsav;
678 
679  /*before executing on gpu, we have to update the NetReceiveBuffer_t on GPU */
681 
682  for (auto& net_buf_receive: corenrn.get_net_buf_receive()) {
683  std::string ss("net-buf-receive-");
684  ss += nrn_get_mechname(net_buf_receive.second);
685  Instrumentor::phase p_net_buf_receive(ss.c_str());
686  (*net_buf_receive.first)(nt);
687  }
688 }
689 } // namespace coreneuron
coreneuron::pscheck
static bool pscheck(double var, double thresh, int *flag)
Definition: netcvode.cpp:509
coreneuron::net_event
void net_event(Point_process *, double)
Definition: netcvode.cpp:107
coreneuron::nrn_multisend_send
void nrn_multisend_send(PreSyn *, double t, NrnThread *)
coreneuron::NrnThread::netcons
NetCon * netcons
Definition: multicore.hpp:87
coreneuron::NrnThread::n_real_output
int n_real_output
Definition: multicore.hpp:95
coreneuron::TQueue::atomic_dq
TQItem * atomic_dq(double til)
coreneuron::NetCvode::deliver_net_events
void deliver_net_events(NrnThread *)
Definition: netcvode.cpp:631
coreneuron::NetCon::delay_
double delay_
Definition: netcon.hpp:50
coreneuron::nrn_use_localgid_
bool nrn_use_localgid_
coreneuron::ConditionEvent::flag_
int flag_
Definition: netcon.hpp:100
coreneuron::nrn2ncs_outputevent
void nrn2ncs_outputevent(int netcon_output_index, double firetime)
multisend.hpp
coreneuron::nrn_use_bin_queue_
bool nrn_use_bin_queue_
Flag to use the bin queue.
Definition: netcvode.cpp:39
membfunc.hpp
coreneuron::NetCvodeThreadData::enqueue
void enqueue(NetCvode *, NrnThread *)
Definition: netcvode.cpp:142
coreneuron::NrnThread::_vdata
void ** _vdata
Definition: multicore.hpp:108
coreneuron::nrn_nthread
int nrn_nthread
Definition: multicore.cpp:55
coreneuron::spikevec_gid
std::vector< int > spikevec_gid
Definition: output_spikes.cpp:46
coreneuron::TQueue::enqueue_bin
TQItem * enqueue_bin(double t, DiscreteEvent *data)
Definition: tqueue.ipp:71
coreneuron::ConditionEvent::check
virtual bool check(NrnThread *)
Definition: netcvode.cpp:350
coreneuron::pnt_name
char * pnt_name(Point_process *pnt)
Definition: nrnoc_aux.cpp:26
coreneuron::nrn_fixed_single_steps_minimal
void nrn_fixed_single_steps_minimal(int total_sim_steps, double tstop)
--> Coreneuron
Definition: fadvance_core.cpp:128
coreneuron::netcon_in_presyn_order_
std::vector< NetCon * > netcon_in_presyn_order_
InputPreSyn.nc_index_ to + InputPreSyn.nc_cnt_ give the NetCon*.
Definition: nrn_setup.cpp:161
coreneuron::NrnThread::_net_send_buffer_cnt
int _net_send_buffer_cnt
Definition: multicore.hpp:139
coreneuron::Point_process
Definition: mechanism.hpp:35
coreneuron::NrnThread::_t
double _t
Definition: multicore.hpp:76
coreneuron::TQItem
Definition: tqueue.hpp:69
coreneuron::NetCvode::check_thresh
void check_thresh(NrnThread *)
Definition: netcvode.cpp:525
nrnoc_aux.hpp
coreneuron::PreSynHelper
Definition: multicore.hpp:71
coreneuron::CoreNeuron::get_memb_func
auto & get_memb_func(size_t idx)
Definition: coreneuron.hpp:138
coreneuron::NrnThread::_net_send_buffer_size
int _net_send_buffer_size
Definition: multicore.hpp:138
coreneuron::NrnThread::presyns
PreSyn * presyns
Definition: multicore.hpp:83
output_spikes.hpp
coreneuron::NetCvodeThreadData::inter_thread_events_
std::vector< InterThreadEvent > inter_thread_events_
Definition: netcvode.hpp:50
coreneuron::NrnThread::presyns_helper
PreSynHelper * presyns_helper
Definition: multicore.hpp:84
coreneuron::NetCvode::p_construct
void p_construct(int)
Definition: netcvode.cpp:179
nrn_acc_manager.hpp
coreneuron::PreSyn::threshold_
double threshold_
Definition: netcon.hpp:113
coreneuron::NrnThread::_watch_types
int * _watch_types
Definition: multicore.hpp:142
coreneuron::PreSyn::send
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:409
coreneuron::NrnThread::id
int id
Definition: multicore.hpp:99
coreneuron::SelfEvent::flag_
double flag_
Definition: netcon.hpp:72
nrn_pragma_omp
nrn_pragma_acc(routine seq) nrn_pragma_omp(declare target) philox4x32_ctr_t coreneuron_random123_philox4x32_helper(coreneuron nrn_pragma_omp(end declare target) namespace coreneuron
Provide a helper function in global namespace that is declared target for OpenMP offloading to functi...
Definition: nrnran123.h:69
coreneuron::NetCon::u
union coreneuron::NetCon::@0 u
netcvode.hpp
coreneuron::PreSyn::output_index_
int output_index_
Definition: netcon.hpp:111
coreneuron::net_move
void net_move(void **, Point_process *, double)
Definition: netcvode.cpp:296
coreneuron::hoc_execerror
void hoc_execerror(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:39
coreneuron::TQItem::t_
double t_
Definition: tqueue.hpp:72
coreneuron::cvode_active_
bool cvode_active_
Definition: netcvode.cpp:36
coreneuron::NetCvode::event
TQItem * event(double tdeliver, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:216
coreneuron::NetCvode::p
NetCvodeThreadData * p
Definition: netcvode.hpp:64
coreneuron::hoc_warning
void hoc_warning(const char *s1, const char *s2)
Definition: nrnoc_aux.cpp:44
coreneuron::NrnThread::compute_gpu
int compute_gpu
Definition: multicore.hpp:136
profiler_interface.h
coreneuron::SelfEvent::call_net_receive
void call_net_receive(NetCvode *)
Definition: netcvode.cpp:471
coreneuron::use_multisend_
bool use_multisend_
Definition: multisend.cpp:53
coreneuron.hpp
coreneuron::NetCvodeThreadData::interthread_send
void interthread_send(double, DiscreteEvent *, NrnThread *)
If the PreSyn is on a different thread than the target, we have to lock the buffer.
Definition: netcvode.cpp:133
coreneuron::ncs2nrn_integrate
void ncs2nrn_integrate(double tstop)
Definition: netcvode.cpp:488
coreneuron::PreSynHelper::flag_
int flag_
Definition: multicore.hpp:72
coreneuron::TQueue::nshift_
int nshift_
Definition: tqueue.hpp:147
coreneuron
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
Definition: corenrn_parameters.cpp:12
coreneuron::NetCvodeThreadData::tqe_
TQueue< QTYPE > * tqe_
Definition: netcvode.hpp:49
coreneuron::i
int i
Definition: cellorder.cpp:485
coreneuron::TQueue::insert
TQItem * insert(double t, DiscreteEvent *data)
PP2NT
#define PP2NT(pp)
Definition: netcvode.cpp:30
coreneuron::NrnThread::_dt
double _dt
Definition: multicore.hpp:77
ivocvect.hpp
coreneuron::ConditionEvent::value
virtual double value(NrnThread *)
Definition: netcon.hpp:96
coreneuron::NrnThread::_ml_list
Memb_list ** _ml_list
Definition: multicore.hpp:81
coreneuron::update
void update(NrnThread *_nt)
Definition: fadvance_core.cpp:201
nrniv_decl.h
coreneuron::PreSyn
Definition: netcon.hpp:104
coreneuron::dt
double dt
Definition: register_mech.cpp:22
coreneuron::NetCvode::enqueueing_
int enqueueing_
Definition: netcvode.hpp:63
coreneuron::mk_netcvode
void mk_netcvode()
Definition: netcvode.cpp:41
coreneuron::DiscreteEvent::type
virtual int type() const
Definition: netcon.hpp:38
coreneuron::NetCvode::clear_events
void clear_events()
Definition: netcvode.cpp:225
coreneuron::DiscreteEvent
Definition: netcon.hpp:33
coreneuron::InterThreadEvent
Definition: netcvode.hpp:41
coreneuron::NetCon
Definition: netcon.hpp:47
coreneuron::TQueue::shift_bin
void shift_bin(double _t_)
Definition: tqueue.hpp:136
coreneuron::NrnThread::_net_send_buffer
int * _net_send_buffer
Definition: multicore.hpp:140
coreneuron::CoreNeuron::get_watch_check
auto & get_watch_check()
Definition: coreneuron.hpp:198
coreneuron::NetCon::send
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:372
coreneuron::NrnThread::n_presyn
int n_presyn
Definition: multicore.hpp:94
coreneuron::spikevec_time
std::vector< double > spikevec_time
--> Coreneuron as SpikeBuffer class
Definition: output_spikes.cpp:45
SelfEventType
#define SelfEventType
Definition: netcon.hpp:28
coreneuron::NrnThread
Definition: multicore.hpp:75
PP2t
#define PP2t(pp)
Definition: netcvode.cpp:31
coreneuron::NetCvode::bin_event
TQItem * bin_event(double tdeliver, DiscreteEvent *, NrnThread *)
Definition: netcvode.cpp:198
coreneuron::SelfEvent::deliver
virtual void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:464
coreneuron::Instrumentor::phase
Definition: profiler_interface.h:289
cnt
#define cnt
Definition: tqueue.hpp:44
netcon.hpp
coreneuron::NrnThread::stream_id
int stream_id
Definition: multicore.hpp:137
coreneuron::NetCvode::NetCvode
NetCvode(void)
Definition: netcvode.cpp:150
coreneuron::CoreNeuron::get_pnt_receive_init
auto & get_pnt_receive_init()
Definition: coreneuron.hpp:190
coreneuron::PreSyn::value
virtual double value(NrnThread *) override
Definition: netcvode.cpp:521
coreneuron::NetCon::deliver
virtual void deliver(double, NetCvode *ns, NrnThread *) override
Definition: netcvode.cpp:379
coreneuron::NetCon::weight_index_
int weight_index_
Definition: netcon.hpp:53
coreneuron::nrn_threads
NrnThread * nrn_threads
Definition: multicore.cpp:56
coreneuron::corenrn
CoreNeuron corenrn
Definition: multicore.cpp:53
coreneuron::PreSyn::thvar_index_
int thvar_index_
Definition: netcon.hpp:114
coreneuron::DiscreteEvent::send
virtual void send(double deliverytime, NetCvode *, NrnThread *)
Definition: netcvode.cpp:362
coreneuron::net_send
void net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:77
coreneuron::NrnThread::pnt2presyn_ix
int ** pnt2presyn_ix
Definition: multicore.hpp:85
coreneuron::PreSyn::nc_index_
int nc_index_
Definition: netcon.hpp:109
coreneuron::PreSyn::nc_cnt_
int nc_cnt_
Definition: netcon.hpp:110
coreneuron::PreSyn::type
virtual int type() const override
Definition: netcon.hpp:121
coreneuron::CoreNeuron::get_net_buf_receive
auto & get_net_buf_receive()
Definition: coreneuron.hpp:154
coreneuron::update_net_receive_buffer
void update_net_receive_buffer(NrnThread *nt)
Definition: nrn_acc_manager.cpp:928
coreneuron::NetCvodeThreadData
Definition: netcvode.hpp:46
nrnconf.h
coreneuron::nrn_p_construct
void nrn_p_construct()
Definition: netcvode.cpp:175
coreneuron::nrn_fixed_step_group_minimal
void nrn_fixed_step_group_minimal(int total_sim_steps)
Definition: fadvance_core.cpp:148
coreneuron::TQueue::move
void move(TQItem *, double tnew)
coreneuron::SelfEvent
Definition: netcon.hpp:70
coreneuron::Point_process::_type
short _type
Definition: mechanism.hpp:37
coreneuron::NetCvode::deliver_events
void deliver_events(double til, NrnThread *)
Definition: netcvode.cpp:331
coreneuron::artcell_net_move
void artcell_net_move(void **, Point_process *, double)
Definition: netcvode.cpp:310
coreneuron::PreSyn::deliver
virtual void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:456
weight_index_
#define weight_index_
Definition: md1redef.h:43
coreneuron::nrn_get_mechname
const char * nrn_get_mechname(int type)
Definition: mk_mech.cpp:145
coreneuron::interthread_enqueue
void interthread_enqueue(NrnThread *nt)
Definition: netcvode.cpp:138
coreneuron::NetCvode::eps_
static double eps_
Definition: netcvode.hpp:65
coreneuron::NrnThread::weights
double * weights
Definition: multicore.hpp:88
coreneuron::NetCvode::~NetCvode
virtual ~NetCvode()
Definition: netcvode.cpp:167
coreneuron::SelfEvent::target_
Point_process * target_
Definition: netcon.hpp:73
coreneuron::net_cvode_instance
NetCvode * net_cvode_instance
Definition: netcvode.cpp:35
coreneuron::SelfEvent::pr
virtual void pr(const char *, double t, NetCvode *) override
Definition: netcvode.cpp:483
coreneuron::nrn_multisend_advance
void nrn_multisend_advance()
coreneuron::nrn_outputevent
void nrn_outputevent(unsigned char, double)
multicore.hpp
coreneuron::NetCvode::deliver_event
bool deliver_event(double til, NrnThread *)
Definition: netcvode.cpp:273
coreneuron::NetCvodeThreadData::NetCvodeThreadData
NetCvodeThreadData()
Definition: netcvode.cpp:122
coreneuron::NetCvodeThreadData::unreffed_event_cnt_
int unreffed_event_cnt_
Definition: netcvode.hpp:48
coreneuron::NrnThread::_actual_v
double * _actual_v
Definition: multicore.hpp:115
v
#define v
Definition: md1redef.h:11
coreneuron::NetCon::active_
bool active_
Definition: netcon.hpp:49
coreneuron::InputPreSyn::send
virtual void send(double sendtime, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:442
coreneuron::TQItem::data_
DiscreteEvent * data_
Definition: tqueue.hpp:71
coreneuron::NrnThread::n_netcon
int n_netcon
Definition: multicore.hpp:92
netpar.hpp
coreneuron::erealloc
void * erealloc(void *ptr, size_t size)
Definition: nrnoc_aux.cpp:94
coreneuron::NetCvodeThreadData::mut
OMP_Mutex mut
Definition: netcvode.hpp:51
coreneuron::DiscreteEvent::pr
virtual void pr(const char *, double t, NetCvode *)
Definition: netcvode.cpp:368
coreneuron::nrn_have_gaps
bool nrn_have_gaps
variables defined in coreneuron library
Definition: partrans.cpp:21
coreneuron::NetCvode::print_event_
int print_event_
Definition: netcvode.hpp:61
coreneuron::spikevec_unlock
void spikevec_unlock()
Definition: output_spikes.cpp:63
coreneuron::TQueue< QTYPE >
coreneuron::NetCvodeThreadData::~NetCvodeThreadData
virtual ~NetCvodeThreadData()
Definition: netcvode.cpp:127
coreneuron::nrnmpi_myid
int nrnmpi_myid
Definition: nrnmpi_def_cinc.cpp:11
coreneuron::NetCvode::pcnt_
int pcnt_
Definition: netcvode.hpp:62
coreneuron::artcell_net_send
void artcell_net_send(void **, int, Point_process *, double, double)
Definition: netcvode.cpp:103
coreneuron::PreSyn::gid_
int gid_
Definition: netcon.hpp:112
coreneuron::spikevec_lock
void spikevec_lock()
Definition: output_spikes.cpp:59
coreneuron::NrnThread::end
int end
Definition: multicore.hpp:98
coreneuron::nrn_pragma_acc
nrn_pragma_acc(routine vector) static void triang_interleaved2(NrnThread *nt
Definition: ivocvect.cpp:30
coreneuron::NetCvode
Definition: netcvode.hpp:59
coreneuron::if
if(ncell==0)
Definition: cellorder.cpp:637
coreneuron::net_sem_from_gpu
void net_sem_from_gpu(int sendtype, int i_vdata, int, int ith, int ipnt, double, double)
Definition: netcvode.cpp:59
coreneuron::NetCon::pr
virtual void pr(const char *, double t, NetCvode *) override
Definition: netcvode.cpp:400
coreneuron::CoreNeuron::get_pnttype2presyn
auto & get_pnttype2presyn()
Definition: coreneuron.hpp:206
nrn_assert
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
Definition: nrn_assert.h:33
coreneuron::InputPreSyn::deliver
virtual void deliver(double, NetCvode *, NrnThread *) override
Definition: netcvode.cpp:460
coreneuron::NetCvode::move_event
void move_event(TQItem *, double, NrnThread *)
Definition: netcvode.cpp:314
nrn_assert.h
coreneuron::NrnThread::ncell
int ncell
Definition: multicore.hpp:97
coreneuron::PreSyn::record
void record(double t)
Definition: netcvode.cpp:341
coreneuron::CoreNeuron::get_pnt_receive_size
auto & get_pnt_receive_size()
Definition: coreneuron.hpp:194
coreneuron::NetCon::target_
Point_process * target_
Definition: netcon.hpp:51
coreneuron::SelfEvent::movable_
void ** movable_
Definition: netcon.hpp:74
coreneuron::NetCvode::init_events
void init_events()
Definition: netcvode.cpp:240
coreneuron::DiscreteEvent::deliver
virtual void deliver(double t, NetCvode *, NrnThread *)
Definition: netcvode.cpp:366
coreneuron::SelfEvent::weight_index_
int weight_index_
Definition: netcon.hpp:75
coreneuron::Point_process::_i_instance
int _i_instance
Definition: mechanism.hpp:36
coreneuron::CoreNeuron::get_pnt_receive
auto & get_pnt_receive()
Definition: coreneuron.hpp:186