CoreNEURON
sparse_thread.hpp
Go to the documentation of this file.
1 /*
2 # =============================================================================
3 # Originally sparse.c from SCoP library, Copyright (c) 1989-90 Duke University
4 # =============================================================================
5 # Subsequent extensive prototype and memory layout changes for CoreNEURON
6 #
7 # Copyright (c) 2016 - 2022 Blue Brain Project/EPFL
8 #
9 # See top-level LICENSE file for details.
10 # =============================================================================.
11 */
12 #pragma once
15 
16 namespace coreneuron {
17 namespace scopmath {
18 namespace sparse {
19 // Methods that may be called from offloaded regions are declared inline.
20 inline void delete_item(Item* item) {
21  item->next->prev = item->prev;
22  item->prev->next = item->next;
23  item->prev = nullptr;
24  item->next = nullptr;
25 }
26 
27 /*link ii before item*/
28 inline void linkitem(Item* item, Item* ii) {
29  ii->prev = item->prev;
30  ii->next = item;
31  item->prev = ii;
32  ii->prev->next = ii;
33 }
34 
35 inline void insert(SparseObj* so, Item* item) {
36  Item* ii{};
37  for (ii = so->orderlist->next; ii != so->orderlist; ii = ii->next) {
38  if (ii->norder >= item->norder) {
39  break;
40  }
41  }
42  linkitem(ii, item);
43 }
44 
45 /* note: solution order refers to the following
46  diag[varord[row]]->row = row = diag[varord[row]]->col
47  rowst[varord[row]]->row = row
48  varord[el->row] < varord[el->c_right->row]
49  varord[el->col] < varord[el->r_down->col]
50 */
51 inline void increase_order(SparseObj* so, unsigned row) {
52  /* order of row increases by 1. Maintain the orderlist. */
53  if (!so->do_flag)
54  return;
55  Item* order = so->roworder[row];
56  delete_item(order);
57  order->norder++;
58  insert(so, order);
59 }
60 
61 /**
62  * Return pointer to (row, col) element maintaining order in rows.
63  *
64  * See check_assert in minorder for info about how this matrix is supposed to
65  * look. If new_elem is nonzero and an element would otherwise be created, new
66  * is used instead. This is because linking an element is highly nontrivial. The
67  * biggest difference is that elements are no longer removed and this saves much
68  * time allocating and freeing during the solve phase.
69  */
70 template <enabled_code code_to_enable = enabled_code::all>
71 Elm* getelm(SparseObj* so, unsigned row, unsigned col, Elm* new_elem) {
72  Elm *el, *elnext;
73 
74  unsigned vrow = so->varord[row];
75  unsigned vcol = so->varord[col];
76 
77  if (vrow == vcol) {
78  return so->diag[vrow]; /* a common case */
79  }
80  if (vrow > vcol) { /* in the lower triangle */
81  /* search downward from diag[vcol] */
82  for (el = so->diag[vcol];; el = elnext) {
83  elnext = el->r_down;
84  if (!elnext) {
85  break;
86  } else if (elnext->row == row) { /* found it */
87  return elnext;
88  } else if (so->varord[elnext->row] > vrow) {
89  break;
90  }
91  }
92  /* insert below el */
93  if (!new_elem) {
94  if constexpr (code_to_enable == enabled_code::compute_only) {
95  // Dynamic allocation should not happen during the compute phase.
96  assert(false);
97  } else {
98  new_elem = new Elm{};
99  new_elem->value = new double[so->_cntml_padded];
100  increase_order(so, row);
101  }
102  }
103  new_elem->r_down = el->r_down;
104  el->r_down = new_elem;
105  new_elem->r_up = el;
106  if (new_elem->r_down) {
107  new_elem->r_down->r_up = new_elem;
108  }
109  /* search leftward from diag[vrow] */
110  for (el = so->diag[vrow];; el = elnext) {
111  elnext = el->c_left;
112  if (!elnext) {
113  break;
114  } else if (so->varord[elnext->col] < vcol) {
115  break;
116  }
117  }
118  /* insert to left of el */
119  new_elem->c_left = el->c_left;
120  el->c_left = new_elem;
121  new_elem->c_right = el;
122  if (new_elem->c_left) {
123  new_elem->c_left->c_right = new_elem;
124  } else {
125  so->rowst[vrow] = new_elem;
126  }
127  } else { /* in the upper triangle */
128  /* search upward from diag[vcol] */
129  for (el = so->diag[vcol];; el = elnext) {
130  elnext = el->r_up;
131  if (!elnext) {
132  break;
133  } else if (elnext->row == row) { /* found it */
134  return elnext;
135  } else if (so->varord[elnext->row] < vrow) {
136  break;
137  }
138  }
139  /* insert above el */
140  if (!new_elem) {
141  if constexpr (code_to_enable == enabled_code::compute_only) {
142  assert(false);
143  } else {
144  new_elem = new Elm{};
145  new_elem->value = new double[so->_cntml_padded];
146  increase_order(so, row);
147  }
148  }
149  new_elem->r_up = el->r_up;
150  el->r_up = new_elem;
151  new_elem->r_down = el;
152  if (new_elem->r_up) {
153  new_elem->r_up->r_down = new_elem;
154  }
155  /* search right from diag[vrow] */
156  for (el = so->diag[vrow];; el = elnext) {
157  elnext = el->c_right;
158  if (!elnext) {
159  break;
160  } else if (so->varord[elnext->col] > vcol) {
161  break;
162  }
163  }
164  /* insert to right of el */
165  new_elem->c_right = el->c_right;
166  el->c_right = new_elem;
167  new_elem->c_left = el;
168  if (new_elem->c_right) {
169  new_elem->c_right->c_left = new_elem;
170  }
171  }
172  new_elem->row = row;
173  new_elem->col = col;
174  return new_elem;
175 }
176 
177 /**
178  * The following routines support the concept of a list. Modified from modl. The
179  * list is a doubly linked list. A special item with element 0 is always at the
180  * tail of the list and is denoted as the List pointer itself. list->next point
181  * to the first item in the list and list->prev points to the last item in the
182  * list. i.e. the list is circular. Note that in an empty list next and prev
183  * points to itself.
184  *
185  * It is intended that this implementation be hidden from the user via the
186  * following function calls.
187  */
188 inline List* newlist() {
189  auto* ii = new Item{};
190  ii->prev = ii;
191  ii->next = ii;
192  return ii;
193 }
194 
195 /*free the list but not the elements*/
196 inline void freelist(List* list) {
197  Item* i2;
198  for (Item* i1 = list->next; i1 != list; i1 = i2) {
199  i2 = i1->next;
200  delete i1;
201  }
202  delete list;
203 }
204 
205 inline void check_assert(SparseObj* so) {
206  /* check that all links are consistent */
207  for (unsigned i = 1; i <= so->neqn; i++) {
208  assert(so->diag[i]);
209  assert(so->diag[i]->row == so->diag[i]->col);
210  assert(so->varord[so->diag[i]->row] == i);
211  assert(so->rowst[i]->row == so->diag[i]->row);
212  for (Elm* el = so->rowst[i]; el; el = el->c_right) {
213  if (el == so->rowst[i]) {
214  assert(el->c_left == nullptr);
215  } else {
216  assert(el->c_left->c_right == el);
217  assert(so->varord[el->c_left->col] < so->varord[el->col]);
218  }
219  }
220  for (Elm* el = so->diag[i]->r_down; el; el = el->r_down) {
221  assert(el->r_up->r_down == el);
222  assert(so->varord[el->r_up->row] < so->varord[el->row]);
223  }
224  for (Elm* el = so->diag[i]->r_up; el; el = el->r_up) {
225  assert(el->r_down->r_up == el);
226  assert(so->varord[el->r_down->row] > so->varord[el->row]);
227  }
228  }
229 }
230 
231 /* at this point row links are out of order for diag[i]->col
232  and col links are out of order for diag[i]->row */
233 inline void re_link(SparseObj* so, unsigned i) {
234  for (Elm* el = so->rowst[i]; el; el = el->c_right) {
235  /* repair hole */
236  if (el->r_up)
237  el->r_up->r_down = el->r_down;
238  if (el->r_down)
239  el->r_down->r_up = el->r_up;
240  }
241 
242  for (Elm* el = so->diag[i]->r_down; el; el = el->r_down) {
243  /* repair hole */
244  if (el->c_right)
245  el->c_right->c_left = el->c_left;
246  if (el->c_left)
247  el->c_left->c_right = el->c_right;
248  else
249  so->rowst[so->varord[el->row]] = el->c_right;
250  }
251 
252  for (Elm* el = so->diag[i]->r_up; el; el = el->r_up) {
253  /* repair hole */
254  if (el->c_right)
255  el->c_right->c_left = el->c_left;
256  if (el->c_left)
257  el->c_left->c_right = el->c_right;
258  else
259  so->rowst[so->varord[el->row]] = el->c_right;
260  }
261 
262  /* matrix is consistent except that diagonal row elements are unlinked from
263  their columns and the diagonal column elements are unlinked from their
264  rows.
265  For simplicity discard all knowledge of links and use getelm to relink
266  */
267  Elm *dright, *dleft, *dup, *ddown, *elnext;
268 
269  so->rowst[i] = so->diag[i];
270  dright = so->diag[i]->c_right;
271  dleft = so->diag[i]->c_left;
272  dup = so->diag[i]->r_up;
273  ddown = so->diag[i]->r_down;
274  so->diag[i]->c_right = so->diag[i]->c_left = nullptr;
275  so->diag[i]->r_up = so->diag[i]->r_down = nullptr;
276  for (Elm* el = dright; el; el = elnext) {
277  elnext = el->c_right;
278  getelm(so, el->row, el->col, el);
279  }
280  for (Elm* el = dleft; el; el = elnext) {
281  elnext = el->c_left;
282  getelm(so, el->row, el->col, el);
283  }
284  for (Elm* el = dup; el; el = elnext) {
285  elnext = el->r_up;
286  getelm(so, el->row, el->col, el);
287  }
288  for (Elm* el = ddown; el; el = elnext) {
289  elnext = el->r_down;
290  getelm(so, el->row, el->col, el);
291  }
292 }
293 
294 inline void free_elm(SparseObj* so) {
295  /* free all elements */
296  for (unsigned i = 1; i <= so->neqn; i++) {
297  so->rowst[i] = nullptr;
298  so->diag[i] = nullptr;
299  }
300 }
301 
302 inline void init_minorder(SparseObj* so) {
303  /* matrix has been set up. Construct the orderlist and orderfind
304  vector.
305  */
306 
307  so->do_flag = 1;
308  if (so->roworder) {
309  for (unsigned i = 1; i <= so->nroworder; ++i) {
310  delete so->roworder[i];
311  }
312  delete[] so->roworder;
313  }
314  so->roworder = new Item* [so->neqn + 1] {};
315  so->nroworder = so->neqn;
316  if (so->orderlist) {
317  freelist(so->orderlist);
318  }
319  so->orderlist = newlist();
320  for (unsigned i = 1; i <= so->neqn; i++) {
321  so->roworder[i] = new Item{};
322  }
323  for (unsigned i = 1; i <= so->neqn; i++) {
324  unsigned j = 0;
325  for (auto el = so->rowst[i]; el; el = el->c_right) {
326  j++;
327  }
328  so->roworder[so->diag[i]->row]->elm = so->diag[i];
329  so->roworder[so->diag[i]->row]->norder = j;
330  insert(so, so->roworder[so->diag[i]->row]);
331  }
332 }
333 
334 inline void reduce_order(SparseObj* so, unsigned row) {
335  /* order of row decreases by 1. Maintain the orderlist. */
336 
337  if (!so->do_flag)
338  return;
339  Item* order = so->roworder[row];
340  delete_item(order);
341  order->norder--;
342  insert(so, order);
343 }
344 
345 inline void get_next_pivot(SparseObj* so, unsigned i) {
346  /* get varord[i], etc. from the head of the orderlist. */
347  Item* order = so->orderlist->next;
348  assert(order != so->orderlist);
349 
350  unsigned j;
351  if ((j = so->varord[order->elm->row]) != i) {
352  /* push order lists down by 1 and put new diag in empty slot */
353  assert(j > i);
354  Elm* el = so->rowst[j];
355  for (; j > i; j--) {
356  so->diag[j] = so->diag[j - 1];
357  so->rowst[j] = so->rowst[j - 1];
358  so->varord[so->diag[j]->row] = j;
359  }
360  so->diag[i] = order->elm;
361  so->rowst[i] = el;
362  so->varord[so->diag[i]->row] = i;
363  /* at this point row links are out of order for diag[i]->col
364  and col links are out of order for diag[i]->row */
365  re_link(so, i);
366  }
367 
368  /* now make sure all needed elements exist */
369  for (Elm* el = so->diag[i]->r_down; el; el = el->r_down) {
370  for (Elm* pivot = so->diag[i]->c_right; pivot; pivot = pivot->c_right) {
371  getelm(so, el->row, pivot->col, nullptr);
372  }
373  reduce_order(so, el->row);
374  }
375  delete_item(order);
376 }
377 
378 /* reallocate space for matrix */
379 inline void initeqn(SparseObj* so, unsigned maxeqn) {
380  if (maxeqn == so->neqn)
381  return;
382  free_elm(so);
383  so->neqn = maxeqn;
384  delete[] so->rowst;
385  delete[] so->diag;
386  delete[] so->varord;
387  delete[] so->rhs;
388  delete[] so->ngetcall;
389  so->elmpool = nullptr;
390  so->rowst = new Elm*[maxeqn + 1];
391  so->diag = new Elm*[maxeqn + 1];
392  so->varord = new unsigned[maxeqn + 1];
393  so->rhs = new double[(maxeqn + 1) * so->_cntml_padded];
394  so->ngetcall = new unsigned[so->_cntml_padded];
395  for (unsigned i = 1; i <= maxeqn; i++) {
396  so->varord[i] = i;
397  so->diag[i] = new Elm{};
398  so->diag[i]->value = new double[so->_cntml_padded];
399  so->rowst[i] = so->diag[i];
400  so->diag[i]->row = i;
401  so->diag[i]->col = i;
402  so->diag[i]->r_down = so->diag[i]->r_up = nullptr;
403  so->diag[i]->c_right = so->diag[i]->c_left = nullptr;
404  }
405  unsigned nn = so->neqn * so->_cntml_padded;
406  for (unsigned i = 0; i < nn; ++i) {
407  so->rhs[i] = 0.;
408  }
409 }
410 
411 /**
412  * Minimum ordering algorithm to determine the order that the matrix should be
413  * solved. Also make sure all needed elements are present. This does not mess up
414  * the matrix.
415  */
416 inline void spar_minorder(SparseObj* so) {
417  check_assert(so);
418  init_minorder(so);
419  for (unsigned i = 1; i <= so->neqn; i++) {
420  get_next_pivot(so, i);
421  }
422  so->do_flag = 0;
423  check_assert(so);
424 }
425 
426 inline void init_coef_list(SparseObj* so, int _iml) {
427  so->ngetcall[_iml] = 0;
428  for (unsigned i = 1; i <= so->neqn; i++) {
429  for (Elm* el = so->rowst[i]; el; el = el->c_right) {
430  el->value[_iml] = 0.;
431  }
432  }
433 }
434 
435 #if defined(scopmath_sparse_d) || defined(scopmath_sparse_ix) || defined(scopmath_sparse_s) || \
436  defined(scopmath_sparse_x)
437 #error "naming clash on sparse_thread.hpp-internal macros"
438 #endif
439 #define scopmath_sparse_ix(arg) ((arg) *_STRIDE)
440 inline void subrow(SparseObj* so, Elm* pivot, Elm* rowsub, int _iml) {
441  unsigned int const _cntml_padded{so->_cntml_padded};
442  double const r{rowsub->value[_iml] / pivot->value[_iml]};
443  so->rhs[scopmath_sparse_ix(rowsub->row)] -= so->rhs[scopmath_sparse_ix(pivot->row)] * r;
444  so->numop++;
445  for (auto el = pivot->c_right; el; el = el->c_right) {
446  for (rowsub = rowsub->c_right; rowsub->col != el->col; rowsub = rowsub->c_right) {
447  }
448  rowsub->value[_iml] -= el->value[_iml] * r;
449  so->numop++;
450  }
451 }
452 
453 inline void bksub(SparseObj* so, int _iml) {
454  int _cntml_padded = so->_cntml_padded;
455  for (unsigned i = so->neqn; i >= 1; i--) {
456  for (Elm* el = so->diag[i]->c_right; el; el = el->c_right) {
457  so->rhs[scopmath_sparse_ix(el->row)] -= el->value[_iml] *
458  so->rhs[scopmath_sparse_ix(el->col)];
459  so->numop++;
460  }
461  so->rhs[scopmath_sparse_ix(so->diag[i]->row)] /= so->diag[i]->value[_iml];
462  so->numop++;
463  }
464 }
465 
466 inline int matsol(SparseObj* so, int _iml) {
467  /* Upper triangularization */
468  so->numop = 0;
469  for (unsigned i = 1; i <= so->neqn; i++) {
470  Elm* pivot{so->diag[i]};
471  if (fabs(pivot->value[_iml]) <= ROUNDOFF) {
472  return SINGULAR;
473  }
474  // Eliminate all elements in pivot column. The OpenACC annotation here
475  // is to avoid problems with nvc++'s automatic paralellisation; see:
476  // https://forums.developer.nvidia.com/t/device-kernel-hangs-at-o-and-above/212733
477  nrn_pragma_acc(loop seq)
478  for (auto el = pivot->r_down; el; el = el->r_down) {
479  subrow(so, pivot, el, _iml);
480  }
481  }
482  bksub(so, _iml);
483  return SUCCESS;
484 }
485 
486 template <typename SPFUN>
487 void create_coef_list(SparseObj* so, int n, SPFUN fun, _threadargsproto_) {
488  initeqn(so, (unsigned) n);
489  so->phase = 1;
490  so->ngetcall[0] = 0;
491  fun(so, so->rhs, _threadargs_); // std::invoke in C++17
492  if (so->coef_list) {
493  free(so->coef_list);
494  }
495  so->coef_list_size = so->ngetcall[0];
496  so->coef_list = new double*[so->coef_list_size];
497  spar_minorder(so);
498  so->phase = 2;
499  so->ngetcall[0] = 0;
500  fun(so, so->rhs, _threadargs_); // std::invoke in C++17
501  so->phase = 0;
502 }
503 
504 template <enabled_code code_to_enable = enabled_code::all>
505 double* thread_getelm(SparseObj* so, int row, int col, int _iml) {
506  if (!so->phase) {
507  return so->coef_list[so->ngetcall[_iml]++];
508  }
509  Elm* el = scopmath::sparse::getelm<code_to_enable>(so, (unsigned) row, (unsigned) col, nullptr);
510  if (so->phase == 1) {
511  so->ngetcall[_iml]++;
512  } else {
513  so->coef_list[so->ngetcall[_iml]++] = el->value;
514  }
515  return el->value;
516 }
517 } // namespace sparse
518 } // namespace scopmath
519 
520 // Methods that may be called from translated MOD files are kept outside the
521 // scopmath::sparse namespace.
522 #define scopmath_sparse_s(arg) _p[scopmath_sparse_ix(s[arg])]
523 #define scopmath_sparse_d(arg) _p[scopmath_sparse_ix(d[arg])]
524 
525 /**
526  * sparse matrix dynamic allocation: create_coef_list makes a list for fast
527  * setup, does minimum ordering and ensures all elements needed are present.
528  * This could easily be made recursive but it isn't right now.
529  */
530 template <typename SPFUN>
531 void* nrn_cons_sparseobj(SPFUN fun, int n, Memb_list* ml, _threadargsproto_) {
532  // fill in the unset _threadargsproto_ assuming _iml = 0;
533  _iml = 0; /* from _threadargsproto_ */
534  _p = ml->data;
535  _ppvar = ml->pdata;
536  _v = _nt->_actual_v[ml->nodeindices[_iml]];
537  SparseObj* so{new SparseObj};
538  so->_cntml_padded = _cntml_padded;
541  return so;
542 }
543 
544 /**
545  * This is an experimental numerical method for SCoP-3 which integrates kinetic
546  * rate equations. It is intended to be used only by models generated by MODL,
547  * and its identity is meant to be concealed from the user.
548  *
549  * @param n number of state variables
550  * @param s array of pointers to the state variables
551  * @param d array of pointers to the derivatives of states
552  * @param t pointer to the independent variable
553  * @param dt the time step
554  * @param fun callable corresponding to the kinetic block equations
555  * @param prhs pointer to right hand side vector (answer on return) does not
556  * have to be allocated by caller. (this is no longer quite right)
557  * @param linflag solve as linear equations, when nonlinear, all states are
558  * forced >= 0
559  */
560 template <typename F>
562  int n,
563  int* s,
564  int* d,
565  double* t,
566  double dt,
567  F fun,
568  int linflag,
570  int i, j, ierr;
571  double err;
572 
573  for (i = 0; i < n; i++) { /*save old state*/
575  }
576  for (err = 1, j = 0; err > CONVERGE; j++) {
578  fun(so, so->rhs, _threadargs_); // std::invoke in C++17
579  if ((ierr = scopmath::sparse::matsol(so, _iml))) {
580  return ierr;
581  }
582  for (err = 0., i = 1; i <= n; i++) { /* why oh why did I write it from 1 */
584  if (!linflag && scopmath_sparse_s(i - 1) < 0.) {
585  scopmath_sparse_s(i - 1) = 0.;
586  }
587  err += fabs(so->rhs[scopmath_sparse_ix(i)]);
588  }
589  if (j > MAXSTEPS) {
590  return EXCEED_ITERS;
591  }
592  if (linflag)
593  break;
594  }
596  fun(so, so->rhs, _threadargs_); // std::invoke in C++17
597  for (i = 0; i < n; i++) { /*restore Dstate at t+dt*/
599  }
600  return SUCCESS;
601 }
602 #undef scopmath_sparse_d
603 #undef scopmath_sparse_ix
604 #undef scopmath_sparse_s
605 #define scopmath_sparse_x(arg) _p[x[arg] * _STRIDE]
606 /* for solving ax=b */
607 template <typename SPFUN>
608 int _cvode_sparse_thread(void** vpr, int n, int* x, SPFUN fun, _threadargsproto_) {
609  SparseObj* so = (SparseObj*) (*vpr);
610  if (!so) {
611  so = new SparseObj{};
612  *vpr = so;
613  }
614  scopmath::sparse::create_coef_list(so, n, fun, _threadargs_); /* calls fun twice */
616  fun(so, so->rhs, _threadargs_); // std::invoke in C++17
617  int ierr;
618  if ((ierr = scopmath::sparse::matsol(so, _iml))) {
619  return ierr;
620  }
621  for (int i = 1; i <= n; i++) { /* why oh why did I write it from 1 */
622  scopmath_sparse_x(i - 1) = so->rhs[i];
623  }
624  return SUCCESS;
625 }
626 #undef scopmath_sparse_x
627 
629  if (!so) {
630  return;
631  }
633  delete[] so->rowst;
634  delete[] so->diag;
635  delete[] so->varord;
636  delete[] so->rhs;
637  delete[] so->coef_list;
638  if (so->roworder) {
639  for (int ii = 1; ii <= so->nroworder; ++ii) {
640  delete so->roworder[ii];
641  }
642  delete[] so->roworder;
643  }
644  if (so->orderlist) {
646  }
647  delete so;
648 }
649 } // namespace coreneuron
coreneuron::SparseObj::rowst
Elm ** rowst
Definition: mod2c_core_thread.hpp:48
scopmath_sparse_x
#define scopmath_sparse_x(arg)
Definition: sparse_thread.hpp:605
ROUNDOFF
#define ROUNDOFF
Definition: errcodes.h:31
coreneuron::scopmath::sparse::getelm
Elm * getelm(SparseObj *so, unsigned row, unsigned col, Elm *new_elem)
Return pointer to (row, col) element maintaining order in rows.
Definition: sparse_thread.hpp:71
coreneuron::SparseObj::do_flag
int do_flag
Definition: mod2c_core_thread.hpp:66
coreneuron::Elm::value
double * value
Definition: mod2c_core_thread.hpp:31
coreneuron::scopmath::sparse::check_assert
void check_assert(SparseObj *so)
Definition: sparse_thread.hpp:205
SUCCESS
#define SUCCESS
Definition: errcodes.h:48
coreneuron::scopmath::sparse::create_coef_list
void create_coef_list(SparseObj *so, int n, SPFUN fun, _threadargsproto_)
Definition: sparse_thread.hpp:487
coreneuron::ii
int ii
Definition: cellorder.cpp:486
coreneuron::SparseObj::elmpool
void * elmpool
Definition: mod2c_core_thread.hpp:50
coreneuron::Memb_list
Definition: mechanism.hpp:131
coreneuron::scopmath::sparse::increase_order
void increase_order(SparseObj *so, unsigned row)
Definition: sparse_thread.hpp:51
coreneuron::SparseObj::roworder
Item ** roworder
Definition: mod2c_core_thread.hpp:62
coreneuron::SparseObj::numop
int numop
Definition: mod2c_core_thread.hpp:57
coreneuron::SparseObj::coef_list_size
unsigned coef_list_size
Definition: mod2c_core_thread.hpp:58
coreneuron::SparseObj::nroworder
int nroworder
Definition: mod2c_core_thread.hpp:61
coreneuron::scopmath::sparse::spar_minorder
void spar_minorder(SparseObj *so)
Minimum ordering algorithm to determine the order that the matrix should be solved.
Definition: sparse_thread.hpp:416
coreneuron::scopmath::sparse::init_minorder
void init_minorder(SparseObj *so)
Definition: sparse_thread.hpp:302
coreneuron
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
Definition: corenrn_parameters.cpp:12
coreneuron::scopmath::sparse::matsol
int matsol(SparseObj *so, int _iml)
Definition: sparse_thread.hpp:466
coreneuron::t
double t
Definition: register_mech.cpp:22
coreneuron::nrn_sparseobj_delete_from_device
void nrn_sparseobj_delete_from_device(SparseObj *so)
Definition: nrn_acc_manager.cpp:1392
coreneuron::i
int i
Definition: cellorder.cpp:485
_threadargs_
#define _threadargs_
Definition: mod2c_core_thread.hpp:23
coreneuron::scopmath::sparse::free_elm
void free_elm(SparseObj *so)
Definition: sparse_thread.hpp:294
mod2c_core_thread.hpp
coreneuron::dt
double dt
Definition: register_mech.cpp:22
SINGULAR
#define SINGULAR
Definition: errcodes.h:50
EXCEED_ITERS
#define EXCEED_ITERS
Definition: errcodes.h:49
coreneuron::Elm::r_down
struct Elm * r_down
Definition: mod2c_core_thread.hpp:33
coreneuron::SparseObj::varord
unsigned * varord
Definition: mod2c_core_thread.hpp:53
coreneuron::scopmath::sparse::initeqn
void initeqn(SparseObj *so, unsigned maxeqn)
Definition: sparse_thread.hpp:379
coreneuron::scopmath::sparse::bksub
void bksub(SparseObj *so, int _iml)
Definition: sparse_thread.hpp:453
coreneuron::Elm::row
unsigned row
Definition: mod2c_core_thread.hpp:29
coreneuron::SparseObj
Definition: mod2c_core_thread.hpp:47
coreneuron::SparseObj::diag
Elm ** diag
Definition: mod2c_core_thread.hpp:49
coreneuron::scopmath::sparse::linkitem
void linkitem(Item *item, Item *ii)
Definition: sparse_thread.hpp:28
coreneuron::SparseObj::phase
int phase
Definition: mod2c_core_thread.hpp:56
coreneuron::SparseObj::orderlist
List * orderlist
Definition: mod2c_core_thread.hpp:64
coreneuron::scopmath::sparse::re_link
void re_link(SparseObj *so, unsigned i)
Definition: sparse_thread.hpp:233
coreneuron::SparseObj::_cntml_padded
unsigned _cntml_padded
Definition: mod2c_core_thread.hpp:52
scopmath_sparse_s
#define scopmath_sparse_s(arg)
Definition: sparse_thread.hpp:522
coreneuron::scopmath::sparse::newlist
List * newlist()
The following routines support the concept of a list.
Definition: sparse_thread.hpp:188
coreneuron::Elm
Definition: mod2c_core_thread.hpp:28
_threadargsproto_
#define _threadargsproto_
Definition: mod2c_core_thread.hpp:24
coreneuron::Item::norder
unsigned norder
Definition: mod2c_core_thread.hpp:40
coreneuron::sparse_thread
int sparse_thread(SparseObj *so, int n, int *s, int *d, double *t, double dt, F fun, int linflag, _threadargsproto_)
This is an experimental numerical method for SCoP-3 which integrates kinetic rate equations.
Definition: sparse_thread.hpp:561
coreneuron::scopmath::sparse::init_coef_list
void init_coef_list(SparseObj *so, int _iml)
Definition: sparse_thread.hpp:426
coreneuron::nrn_cons_sparseobj
void * nrn_cons_sparseobj(SPFUN fun, int n, Memb_list *ml, _threadargsproto_)
sparse matrix dynamic allocation: create_coef_list makes a list for fast setup, does minimum ordering...
Definition: sparse_thread.hpp:531
coreneuron::Memb_list::pdata
Datum * pdata
Definition: mechanism.hpp:140
coreneuron::scopmath::sparse::reduce_order
void reduce_order(SparseObj *so, unsigned row)
Definition: sparse_thread.hpp:334
coreneuron::SparseObj::coef_list
double ** coef_list
Definition: mod2c_core_thread.hpp:59
coreneuron::_cvode_sparse_thread
int _cvode_sparse_thread(void **vpr, int n, int *x, SPFUN fun, _threadargsproto_)
Definition: sparse_thread.hpp:608
coreneuron::Elm::c_left
struct Elm * c_left
Definition: mod2c_core_thread.hpp:34
errcodes.h
coreneuron::Elm::col
unsigned col
Definition: mod2c_core_thread.hpp:30
coreneuron::SparseObj::neqn
unsigned neqn
Definition: mod2c_core_thread.hpp:51
scopmath_sparse_ix
#define scopmath_sparse_ix(arg)
Definition: sparse_thread.hpp:439
coreneuron::scopmath::sparse::subrow
void subrow(SparseObj *so, Elm *pivot, Elm *rowsub, int _iml)
Definition: sparse_thread.hpp:440
coreneuron::Elm::c_right
struct Elm * c_right
Definition: mod2c_core_thread.hpp:35
coreneuron::_nrn_destroy_sparseobj_thread
void _nrn_destroy_sparseobj_thread(SparseObj *so)
Definition: sparse_thread.hpp:628
coreneuron::scopmath::sparse::get_next_pivot
void get_next_pivot(SparseObj *so, unsigned i)
Definition: sparse_thread.hpp:345
coreneuron::Item::next
Item * next
Definition: mod2c_core_thread.hpp:41
coreneuron::scopmath::sparse::delete_item
void delete_item(Item *item)
Definition: sparse_thread.hpp:20
coreneuron::scopmath::sparse::thread_getelm
double * thread_getelm(SparseObj *so, int row, int col, int _iml)
Definition: sparse_thread.hpp:505
CONVERGE
#define CONVERGE
Definition: errcodes.h:34
coreneuron::nrn_sparseobj_copyto_device
void nrn_sparseobj_copyto_device(SparseObj *so)
Definition: nrn_acc_manager.cpp:1309
coreneuron::scopmath::enabled_code::compute_only
@ compute_only
scopmath_sparse_d
#define scopmath_sparse_d(arg)
Definition: sparse_thread.hpp:523
coreneuron::SparseObj::rhs
double * rhs
Definition: mod2c_core_thread.hpp:54
coreneuron::Item
Definition: mod2c_core_thread.hpp:38
coreneuron::Elm::r_up
struct Elm * r_up
Definition: mod2c_core_thread.hpp:32
coreneuron::Memb_list::data
double * data
Definition: mechanism.hpp:139
coreneuron::nrn_pragma_acc
nrn_pragma_acc(routine vector) static void triang_interleaved2(NrnThread *nt
Definition: ivocvect.cpp:30
coreneuron::Item::elm
Elm * elm
Definition: mod2c_core_thread.hpp:39
coreneuron::scopmath::sparse::insert
void insert(SparseObj *so, Item *item)
Definition: sparse_thread.hpp:35
MAXSTEPS
#define MAXSTEPS
Definition: errcodes.h:39
coreneuron::Item::prev
Item * prev
Definition: mod2c_core_thread.hpp:42
coreneuron::scopmath::sparse::freelist
void freelist(List *list)
Definition: sparse_thread.hpp:196
coreneuron::SparseObj::ngetcall
unsigned * ngetcall
Definition: mod2c_core_thread.hpp:55
coreneuron::Memb_list::nodeindices
int * nodeindices
Definition: mechanism.hpp:137