User Guide
codegen_coreneuron_cpp_visitor.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2023 Blue Brain Project, EPFL.
3  * See the top-level LICENSE file for details.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
9 
10 #include <algorithm>
11 #include <chrono>
12 #include <cmath>
13 #include <ctime>
14 #include <regex>
15 
16 #include "ast/all.hpp"
20 #include "config/config.h"
21 #include "lexer/token_mapping.hpp"
22 #include "parser/c11_driver.hpp"
23 #include "utils/logger.hpp"
24 #include "utils/string_utils.hpp"
30 
31 namespace nmodl {
32 namespace codegen {
33 
34 using namespace ast;
35 
36 using visitor::DefUseAnalyzeVisitor;
37 using visitor::DUState;
38 using visitor::RenameVisitor;
39 using visitor::SymtabVisitor;
40 using visitor::VarUsageVisitor;
41 
43 
44 extern const std::regex regex_special_chars;
45 
46 /****************************************************************************************/
47 /* Generic information getters */
48 /****************************************************************************************/
49 
50 
52  return "C++ (api-compatibility)";
53 }
54 
55 
57  return "CoreNEURON";
58 }
59 
60 
61 /****************************************************************************************/
62 /* Common helper routines accross codegen functions */
63 /****************************************************************************************/
64 
65 
66 int CodegenCoreneuronCppVisitor::position_of_float_var(const std::string& name) const {
67  int index = 0;
68  for (const auto& var: codegen_float_variables) {
69  if (var->get_name() == name) {
70  return index;
71  }
72  index += var->get_length();
73  }
74  throw std::logic_error(name + " variable not found");
75 }
76 
77 
78 int CodegenCoreneuronCppVisitor::position_of_int_var(const std::string& name) const {
79  int index = 0;
80  for (const auto& var: codegen_int_variables) {
81  if (var.symbol->get_name() == name) {
82  return index;
83  }
84  index += var.symbol->get_length();
85  }
86  throw std::logic_error(name + " variable not found");
87 }
88 
89 
90 /**
91  * \details Often top level verbatim blocks use variables with old names.
92  * Here we process if we are processing verbatim block at global scope.
93  */
94 std::string CodegenCoreneuronCppVisitor::process_verbatim_token(const std::string& token) {
95  const std::string& name = token;
96 
97  /*
98  * If given token is procedure name and if it's defined
99  * in the current mod file then it must be replaced
100  */
101  if (program_symtab->is_method_defined(token)) {
102  return method_name(token);
103  }
104 
105  /*
106  * Check if token is commongly used variable name in
107  * verbatim block like nt, \c \_threadargs etc. If so, replace
108  * it and return.
109  */
110  auto new_name = replace_if_verbatim_variable(name);
111  if (new_name != name) {
112  return get_variable_name(new_name, false);
113  }
114 
115  /*
116  * For top level verbatim blocks we shouldn't replace variable
117  * names with Instance because arguments are provided from coreneuron
118  * and they are missing inst.
119  */
120  auto use_instance = !printing_top_verbatim_blocks;
121  return get_variable_name(token, use_instance);
122 }
123 
124 
125 /**
126  * \details This can be override in the backend. For example, parameters can be constant
127  * except in INITIAL block where they are set to 0. As initial block is/can be
128  * executed on c++/cpu backend, gpu backend can mark the parameter as constant.
129  */
130 bool CodegenCoreneuronCppVisitor::is_constant_variable(const std::string& name) const {
131  auto symbol = program_symtab->lookup_in_scope(name);
132  bool is_constant = false;
133  if (symbol != nullptr) {
134  // per mechanism ion variables needs to be updated from neuron/coreneuron values
135  if (info.is_ion_variable(name)) {
136  is_constant = false;
137  }
138  // for parameter variable to be const, make sure it's write count is 0
139  // and it's not used in the verbatim block
140  else if (symbol->has_any_property(NmodlType::param_assign) &&
141  info.variables_in_verbatim.find(name) == info.variables_in_verbatim.end() &&
142  symbol->get_write_count() == 0) {
143  is_constant = true;
144  }
145  }
146  return is_constant;
147 }
148 
149 
150 /****************************************************************************************/
151 /* Backend specific routines */
152 /****************************************************************************************/
153 
154 
156  // backend specific, do nothing
157 }
158 
159 
161  // backend specific, do nothing
162 }
163 
164 
166  // backend specific, do nothing
167 }
168 
169 
171  // backend specific, do nothing
172 }
173 
174 
176  // backend specific, do nothing
177 }
178 
179 
181  // backend specific, do nothing
182 }
183 
184 
186  // backend specific, do nothing
187 }
188 
189 
190 /**
191  * \details Each kernel such as \c nrn\_init, \c nrn\_state and \c nrn\_cur could be offloaded
192  * to accelerator. In this case, at very top level, we print pragma
193  * for data present. For example:
194  *
195  * \code{.cpp}
196  * void nrn_state(...) {
197  * #pragma acc data present (nt, ml...)
198  * {
199  *
200  * }
201  * }
202  * \endcode
203  */
205  // backend specific, do nothing
206 }
207 
208 
210  // backend specific, do nothing
211 }
212 
213 
215  // backend specific, do nothing
216 }
217 
218 
220  // backend specific, do nothing
221 }
222 
223 
224 /**
225  * \details Depending programming model and compiler, we print compiler hint
226  * for parallelization. For example:
227  *
228  * \code
229  * #pragma omp simd
230  * for(int id = 0; id < nodecount; id++) {
231  *
232  * #pragma acc parallel loop
233  * for(int id = 0; id < nodecount; id++) {
234  * \endcode
235  */
237  BlockType /* type */,
238  const ast::Block* block) {
239  // ivdep allows SIMD parallelisation of a block/loop but doesn't provide
240  // a standard mechanism for atomics. Also, even with openmp 5.0, openmp
241  // atomics do not enable vectorisation under "omp simd" (gives compiler
242  // error with gcc < 9 if atomic and simd pragmas are nested). So, emit
243  // ivdep/simd pragma when no MUTEXLOCK/MUTEXUNLOCK/PROTECT statements
244  // are used in the given block.
245  std::vector<std::shared_ptr<const ast::Ast>> nodes;
246  if (block) {
247  nodes = collect_nodes(*block,
251  }
252  if (nodes.empty()) {
253  printer->add_line("#pragma omp simd");
254  printer->add_line("#pragma ivdep");
255  }
256 }
257 
258 
260  return info.point_process;
261 }
262 
263 
265  if (info.point_process) {
266  printer->fmt_line("double* shadow_rhs = nt->{};", naming::NTHREAD_RHS_SHADOW);
267  printer->fmt_line("double* shadow_d = nt->{};", naming::NTHREAD_D_SHADOW);
268  }
269 }
270 
271 
273  if (info.point_process) {
274  printer->add_line("shadow_rhs[id] = rhs;");
275  printer->add_line("shadow_d[id] = g;");
276  } else {
277  auto rhs_op = operator_for_rhs();
278  auto d_op = operator_for_d();
279  printer->fmt_line("vec_rhs[node_id] {} rhs;", rhs_op);
280  printer->fmt_line("vec_d[node_id] {} g;", d_op);
281  }
282 }
283 
284 
286  auto rhs_op = operator_for_rhs();
287  auto d_op = operator_for_d();
288  if (info.point_process) {
289  printer->add_line("int node_id = node_index[id];");
290  printer->fmt_line("vec_rhs[node_id] {} shadow_rhs[id];", rhs_op);
291  printer->fmt_line("vec_d[node_id] {} shadow_d[id];", d_op);
292  }
293 }
294 
295 
296 /**
297  * In the current implementation of CPU/CPP backend we need to emit atomic pragma
298  * only with PROTECT construct (atomic rduction requirement for other cases on CPU
299  * is handled via separate shadow vectors).
300  */
302  printer->add_line("#pragma omp atomic update");
303 }
304 
305 
307  // backend specific, nothing for cpu
308 }
309 
310 
312  // backend specific, nothing for cpu
313 }
314 
315 
317  // no separate namespace for C++ (cpu) backend
318 }
319 
320 
322  // no separate namespace for C++ (cpu) backend
323 }
324 
325 
327  // backend specific, nothing for cpu
328 }
329 
330 
332  return optimize_ionvar_copies;
333 }
334 
335 
337  printer->add_newline(2);
338  auto args = "size_t num, size_t size, size_t alignment = 16";
339  printer->fmt_push_block("static inline void* mem_alloc({})", args);
340  printer->add_line("void* ptr;");
341  printer->add_line("posix_memalign(&ptr, alignment, num*size);");
342  printer->add_line("memset(ptr, 0, size);");
343  printer->add_line("return ptr;");
344  printer->pop_block();
345 
346  printer->add_newline(2);
347  printer->push_block("static inline void mem_free(void* ptr)");
348  printer->add_line("free(ptr);");
349  printer->pop_block();
350 }
351 
352 
354  printer->add_newline(2);
355  printer->push_block("static inline void coreneuron_abort()");
356  printer->add_line("abort();");
357  printer->pop_block();
358 }
359 
360 
361 /****************************************************************************************/
362 /* Printing routines for code generation */
363 /****************************************************************************************/
364 
365 
367  if (info.top_verbatim_blocks.empty()) {
368  return;
369  }
370  print_namespace_stop();
371 
372  printer->add_newline(2);
373  printer->add_line("using namespace coreneuron;");
374 
375  printing_top_verbatim_blocks = true;
376 
377  for (const auto& block: info.top_blocks) {
378  if (block->is_verbatim()) {
379  printer->add_newline(2);
380  block->accept(*this);
381  }
382  }
383 
384  printing_top_verbatim_blocks = false;
385 
386  print_namespace_start();
387 }
388 
389 
391  if (info.functions.empty() && info.procedures.empty()) {
392  return;
393  }
394 
395  printer->add_newline(2);
396  for (const auto& node: info.functions) {
397  print_function_declaration(*node, node->get_node_name());
398  printer->add_text(';');
399  printer->add_newline();
400  }
401  for (const auto& node: info.procedures) {
402  print_function_declaration(*node, node->get_node_name());
403  printer->add_text(';');
404  printer->add_newline();
405  }
406 }
407 
408 
409 static const TableStatement* get_table_statement(const ast::Block& node) {
410  // TableStatementVisitor v;
411 
412  const auto& table_statements = collect_nodes(node, {AstNodeType::TABLE_STATEMENT});
413 
414  if (table_statements.size() != 1) {
415  auto message = fmt::format("One table statement expected in {} found {}",
416  node.get_node_name(),
417  table_statements.size());
418  throw std::runtime_error(message);
419  }
420  return dynamic_cast<const TableStatement*>(table_statements.front().get());
421 }
422 
423 
424 std::tuple<bool, int> CodegenCoreneuronCppVisitor::check_if_var_is_array(const std::string& name) {
425  auto symbol = program_symtab->lookup_in_scope(name);
426  if (!symbol) {
427  throw std::runtime_error(
428  fmt::format("CodegenCoreneuronCppVisitor:: {} not found in symbol table!", name));
429  }
430  if (symbol->is_array()) {
431  return {true, symbol->get_length()};
432  } else {
433  return {false, 0};
434  }
435 }
436 
437 
439  auto statement = get_table_statement(node);
440  auto table_variables = statement->get_table_vars();
441  auto depend_variables = statement->get_depend_vars();
442  const auto& from = statement->get_from();
443  const auto& to = statement->get_to();
444  auto name = node.get_node_name();
445  auto internal_params = internal_method_parameters();
446  auto with = statement->get_with()->eval();
447  auto use_table_var = get_variable_name(naming::USE_TABLE_VARIABLE);
448  auto tmin_name = get_variable_name("tmin_" + name);
449  auto mfac_name = get_variable_name("mfac_" + name);
450  auto float_type = default_float_data_type();
451 
452  printer->add_newline(2);
453  print_device_method_annotation();
454  printer->fmt_push_block("void check_{}({})",
455  method_name(name),
456  get_parameter_str(internal_params));
457  {
458  printer->fmt_push_block("if ({} == 0)", use_table_var);
459  printer->add_line("return;");
460  printer->pop_block();
461 
462  printer->add_line("static bool make_table = true;");
463  for (const auto& variable: depend_variables) {
464  printer->fmt_line("static {} save_{};", float_type, variable->get_node_name());
465  }
466 
467  for (const auto& variable: depend_variables) {
468  const auto& var_name = variable->get_node_name();
469  const auto& instance_name = get_variable_name(var_name);
470  printer->fmt_push_block("if (save_{} != {})", var_name, instance_name);
471  printer->add_line("make_table = true;");
472  printer->pop_block();
473  }
474 
475  printer->push_block("if (make_table)");
476  {
477  printer->add_line("make_table = false;");
478 
479  printer->add_indent();
480  printer->add_text(tmin_name, " = ");
481  from->accept(*this);
482  printer->add_text(';');
483  printer->add_newline();
484 
485  printer->add_indent();
486  printer->add_text("double tmax = ");
487  to->accept(*this);
488  printer->add_text(';');
489  printer->add_newline();
490 
491 
492  printer->fmt_line("double dx = (tmax-{}) / {}.;", tmin_name, with);
493  printer->fmt_line("{} = 1./dx;", mfac_name);
494 
495  printer->fmt_line("double x = {};", tmin_name);
496  printer->fmt_push_block("for (std::size_t i = 0; i < {}; x += dx, i++)", with + 1);
497  auto function = method_name("f_" + name);
498  if (node.is_procedure_block()) {
499  printer->fmt_line("{}({}, x);", function, internal_method_arguments());
500  for (const auto& variable: table_variables) {
501  auto var_name = variable->get_node_name();
502  auto instance_name = get_variable_name(var_name);
503  auto table_name = get_variable_name("t_" + var_name);
504  auto [is_array, array_length] = check_if_var_is_array(var_name);
505  if (is_array) {
506  for (int j = 0; j < array_length; j++) {
507  printer->fmt_line(
508  "{}[{}][i] = {}[{}];", table_name, j, instance_name, j);
509  }
510  } else {
511  printer->fmt_line("{}[i] = {};", table_name, instance_name);
512  }
513  }
514  } else {
515  auto table_name = get_variable_name("t_" + name);
516  printer->fmt_line("{}[i] = {}({}, x);",
517  table_name,
518  function,
519  internal_method_arguments());
520  }
521  printer->pop_block();
522 
523  for (const auto& variable: depend_variables) {
524  auto var_name = variable->get_node_name();
525  auto instance_name = get_variable_name(var_name);
526  printer->fmt_line("save_{} = {};", var_name, instance_name);
527  }
528  }
529  printer->pop_block();
530  }
531  printer->pop_block();
532 }
533 
534 
536  auto name = node.get_node_name();
537  auto statement = get_table_statement(node);
538  auto table_variables = statement->get_table_vars();
539  auto with = statement->get_with()->eval();
540  auto use_table_var = get_variable_name(naming::USE_TABLE_VARIABLE);
541  auto tmin_name = get_variable_name("tmin_" + name);
542  auto mfac_name = get_variable_name("mfac_" + name);
543  auto function_name = method_name("f_" + name);
544 
545  printer->add_newline(2);
546  print_function_declaration(node, name);
547  printer->push_block();
548  {
549  const auto& params = node.get_parameters();
550  printer->fmt_push_block("if ({} == 0)", use_table_var);
551  if (node.is_procedure_block()) {
552  printer->fmt_line("{}({}, {});",
553  function_name,
554  internal_method_arguments(),
555  params[0].get()->get_node_name());
556  printer->add_line("return 0;");
557  } else {
558  printer->fmt_line("return {}({}, {});",
559  function_name,
560  internal_method_arguments(),
561  params[0].get()->get_node_name());
562  }
563  printer->pop_block();
564 
565  printer->fmt_line("double xi = {} * ({} - {});",
566  mfac_name,
567  params[0].get()->get_node_name(),
568  tmin_name);
569  printer->push_block("if (isnan(xi))");
570  if (node.is_procedure_block()) {
571  for (const auto& var: table_variables) {
572  auto var_name = get_variable_name(var->get_node_name());
573  auto [is_array, array_length] = check_if_var_is_array(var->get_node_name());
574  if (is_array) {
575  for (int j = 0; j < array_length; j++) {
576  printer->fmt_line("{}[{}] = xi;", var_name, j);
577  }
578  } else {
579  printer->fmt_line("{} = xi;", var_name);
580  }
581  }
582  printer->add_line("return 0;");
583  } else {
584  printer->add_line("return xi;");
585  }
586  printer->pop_block();
587 
588  printer->fmt_push_block("if (xi <= 0. || xi >= {}.)", with);
589  printer->fmt_line("int index = (xi <= 0.) ? 0 : {};", with);
590  if (node.is_procedure_block()) {
591  for (const auto& variable: table_variables) {
592  auto var_name = variable->get_node_name();
593  auto instance_name = get_variable_name(var_name);
594  auto table_name = get_variable_name("t_" + var_name);
595  auto [is_array, array_length] = check_if_var_is_array(var_name);
596  if (is_array) {
597  for (int j = 0; j < array_length; j++) {
598  printer->fmt_line(
599  "{}[{}] = {}[{}][index];", instance_name, j, table_name, j);
600  }
601  } else {
602  printer->fmt_line("{} = {}[index];", instance_name, table_name);
603  }
604  }
605  printer->add_line("return 0;");
606  } else {
607  auto table_name = get_variable_name("t_" + name);
608  printer->fmt_line("return {}[index];", table_name);
609  }
610  printer->pop_block();
611 
612  printer->add_line("int i = int(xi);");
613  printer->add_line("double theta = xi - double(i);");
614  if (node.is_procedure_block()) {
615  for (const auto& var: table_variables) {
616  auto var_name = var->get_node_name();
617  auto instance_name = get_variable_name(var_name);
618  auto table_name = get_variable_name("t_" + var_name);
619  auto [is_array, array_length] = check_if_var_is_array(var->get_node_name());
620  if (is_array) {
621  for (size_t j = 0; j < array_length; j++) {
622  printer->fmt_line(
623  "{0}[{1}] = {2}[{1}][i] + theta*({2}[{1}][i+1]-{2}[{1}][i]);",
624  instance_name,
625  j,
626  table_name);
627  }
628  } else {
629  printer->fmt_line("{0} = {1}[i] + theta*({1}[i+1]-{1}[i]);",
630  instance_name,
631  table_name);
632  }
633  }
634  printer->add_line("return 0;");
635  } else {
636  auto table_name = get_variable_name("t_" + name);
637  printer->fmt_line("return {0}[i] + theta * ({0}[i+1] - {0}[i]);", table_name);
638  }
639  }
640  printer->pop_block();
641 }
642 
643 
645  if (info.table_count == 0) {
646  return;
647  }
648 
649  printer->add_newline(2);
650  auto name = method_name("check_table_thread");
651  auto parameters = external_method_parameters(true);
652 
653  printer->fmt_push_block("static void {} ({})", name, parameters);
654  printer->add_line("setup_instance(nt, ml);");
655  printer->fmt_line("auto* const inst = static_cast<{0}*>(ml->instance);", instance_struct());
656  printer->add_line("double v = 0;");
657 
658  for (const auto& function: info.functions_with_table) {
659  auto method_name_str = method_name("check_" + function->get_node_name());
660  auto arguments = internal_method_arguments();
661  printer->fmt_line("{}({});", method_name_str, arguments);
662  }
663 
664  printer->pop_block();
665 }
666 
667 
669  const std::string& name) {
670  printer->add_newline(2);
671  print_function_declaration(node, name);
672  printer->add_text(" ");
673  printer->push_block();
674 
675  // function requires return variable declaration
676  if (node.is_function_block()) {
677  auto type = default_float_data_type();
678  printer->fmt_line("{} ret_{} = 0.0;", type, name);
679  } else {
680  printer->fmt_line("int ret_{} = 0;", name);
681  }
682 
683  print_statement_block(*node.get_statement_block(), false, false);
684  printer->fmt_line("return ret_{};", name);
685  printer->pop_block();
686 }
687 
688 
690  auto name = node.get_node_name();
691 
692  if (info.function_uses_table(name)) {
693  auto new_name = "f_" + name;
694  print_function_or_procedure(node, new_name);
695  print_table_check_function(node);
696  print_table_replacement_function(node);
697  } else {
698  print_function_or_procedure(node, name);
699  }
700 }
701 
702 
704  print_function_procedure_helper(node);
705 }
706 
707 
709  auto name = node.get_node_name();
710 
711  // name of return variable
712  std::string return_var;
713  if (info.function_uses_table(name)) {
714  return_var = "ret_f_" + name;
715  } else {
716  return_var = "ret_" + name;
717  }
718 
719  // first rename return variable name
720  auto block = node.get_statement_block().get();
721  RenameVisitor v(name, return_var);
722  block->accept(v);
723 
724  print_function_procedure_helper(node);
725 }
726 
727 
729  auto name = node.get_node_name();
730  const auto& p = node.get_parameters();
731  auto params = internal_method_parameters();
732  for (const auto& i: p) {
733  params.emplace_back("", "double", "", i->get_node_name());
734  }
735  printer->fmt_line("double {}({})", method_name(name), get_parameter_str(params));
736  printer->push_block();
737  printer->fmt_line("double _arg[{}];", p.size());
738  for (size_t i = 0; i < p.size(); ++i) {
739  printer->fmt_line("_arg[{}] = {};", i, p[i]->get_node_name());
740  }
741  printer->fmt_line("return hoc_func_table({}, {}, _arg);",
742  get_variable_name(std::string("_ptable_" + name), true),
743  p.size());
744  printer->pop_block();
745 
746  printer->fmt_push_block("double table_{}()", method_name(name));
747  printer->fmt_line("hoc_spec_table(&{}, {});",
748  get_variable_name(std::string("_ptable_" + name)),
749  p.size());
750  printer->add_line("return 0.;");
751  printer->pop_block();
752 }
753 
754 
755 /**
756  * @brief Checks whether the functor_block generated by sympy solver modifies any variable outside
757  * its scope. If it does then return false, so that the operator() of the struct functor of the
758  * Eigen Newton solver doesn't have const qualifier.
759  *
760  * @param variable_block Statement Block of the variables declarations used in the functor struct of
761  * the solver
762  * @param functor_block Actual code being printed in the operator() of the functor struct of the
763  * solver
764  * @return True if operator() is const else False
765  */
767  const ast::StatementBlock& functor_block) {
768  // Create complete_block with both variable declarations (done in variable_block) and solver
769  // part (done in functor_block) to be able to run the SymtabVisitor and DefUseAnalyzeVisitor
770  // then and get the proper DUChains for the variables defined in the variable_block
771  ast::StatementBlock complete_block(functor_block);
772  // Typically variable_block has only one statement, a statement containing the declaration
773  // of the local variables
774  for (const auto& statement: variable_block.get_statements()) {
775  complete_block.insert_statement(complete_block.get_statements().begin(), statement);
776  }
777 
778  // Create Symbol Table for complete_block
779  auto model_symbol_table = std::make_shared<symtab::ModelSymbolTable>();
780  SymtabVisitor(model_symbol_table.get()).visit_statement_block(complete_block);
781  // Initialize DefUseAnalyzeVisitor to generate the DUChains for the variables defined in the
782  // variable_block
783  DefUseAnalyzeVisitor v(*complete_block.get_symbol_table());
784 
785  // Check the DUChains for all the variables in the variable_block
786  // If variable is defined in complete_block don't add const quilifier in operator()
787  auto is_functor_const = true;
788  const auto& variables = collect_nodes(variable_block, {ast::AstNodeType::LOCAL_VAR});
789  for (const auto& variable: variables) {
790  const auto& chain = v.analyze(complete_block, variable->get_node_name());
791  is_functor_const = !(chain.eval() == DUState::D || chain.eval() == DUState::LD ||
792  chain.eval() == DUState::CD);
793  if (!is_functor_const) {
794  break;
795  }
796  }
797 
798  return is_functor_const;
799 }
800 
801 
803  const ast::EigenNewtonSolverBlock& node) {
804  // functor that evaluates F(X) and J(X) for
805  // Newton solver
806  auto float_type = default_float_data_type();
807  int N = node.get_n_state_vars()->get_value();
808 
809  const auto functor_name = info.functor_names[&node];
810  printer->fmt_push_block("struct {0}", functor_name);
811  printer->add_line("NrnThread* nt;");
812  printer->add_line(instance_struct(), "* inst;");
813  printer->add_line("int id, pnodecount;");
814  printer->add_line("double v;");
815  printer->add_line("const Datum* indexes;");
816  printer->add_line("double* data;");
817  printer->add_line("ThreadDatum* thread;");
818 
819  if (ion_variable_struct_required()) {
820  print_ion_variable();
821  }
822 
823  print_statement_block(*node.get_variable_block(), false, false);
824  printer->add_newline();
825 
826  printer->push_block("void initialize()");
827  print_statement_block(*node.get_initialize_block(), false, false);
828  printer->pop_block();
829  printer->add_newline();
830 
831  printer->fmt_line(
832  "{0}(NrnThread* nt, {1}* inst, int id, int pnodecount, double v, const Datum* indexes, "
833  "double* data, ThreadDatum* thread) : "
834  "nt{{nt}}, inst{{inst}}, id{{id}}, pnodecount{{pnodecount}}, v{{v}}, indexes{{indexes}}, "
835  "data{{data}}, thread{{thread}} "
836  "{{}}",
837  functor_name,
838  instance_struct());
839 
840  printer->add_indent();
841 
842  const auto& variable_block = *node.get_variable_block();
843  const auto& functor_block = *node.get_functor_block();
844 
845  printer->fmt_text(
846  "void operator()(const Eigen::Matrix<{0}, {1}, 1>& nmodl_eigen_xm, Eigen::Matrix<{0}, {1}, "
847  "1>& nmodl_eigen_fm, "
848  "Eigen::Matrix<{0}, {1}, {1}>& nmodl_eigen_jm) {2}",
849  float_type,
850  N,
851  is_functor_const(variable_block, functor_block) ? "const " : "");
852  printer->push_block();
853  printer->fmt_line("const {}* nmodl_eigen_x = nmodl_eigen_xm.data();", float_type);
854  printer->fmt_line("{}* nmodl_eigen_j = nmodl_eigen_jm.data();", float_type);
855  printer->fmt_line("{}* nmodl_eigen_f = nmodl_eigen_fm.data();", float_type);
856  print_statement_block(functor_block, false, false);
857  printer->pop_block();
858  printer->add_newline();
859 
860  // assign newton solver results in matrix X to state vars
861  printer->push_block("void finalize()");
862  print_statement_block(*node.get_finalize_block(), false, false);
863  printer->pop_block();
864 
865  printer->pop_block(";");
866 }
867 
868 
869 void CodegenCoreneuronCppVisitor::print_eigen_linear_solver(const std::string& float_type, int N) {
870  if (N <= 4) {
871  // Faster compared to LU, given the template specialization in Eigen.
872  printer->add_multi_line(R"CODE(
873  bool invertible;
874  nmodl_eigen_jm.computeInverseWithCheck(nmodl_eigen_jm_inv,invertible);
875  nmodl_eigen_xm = nmodl_eigen_jm_inv*nmodl_eigen_fm;
876  if (!invertible) assert(false && "Singular or ill-conditioned matrix (Eigen::inverse)!");
877  )CODE");
878  } else {
879  // In Eigen the default storage order is ColMajor.
880  // Crout's implementation requires matrices stored in RowMajor order (C++-style arrays).
881  // Therefore, the transposeInPlace is critical such that the data() method to give the rows
882  // instead of the columns.
883  printer->add_line("if (!nmodl_eigen_jm.IsRowMajor) nmodl_eigen_jm.transposeInPlace();");
884 
885  // pivot vector
886  printer->fmt_line("Eigen::Matrix<int, {}, 1> pivot;", N);
887  printer->fmt_line("Eigen::Matrix<{0}, {1}, 1> rowmax;", float_type, N);
888 
889  // In-place LU-Decomposition (Crout Algo) : Jm is replaced by its LU-decomposition
890  printer->fmt_line(
891  "if (nmodl::crout::Crout<{0}>({1}, nmodl_eigen_jm.data(), pivot.data(), rowmax.data()) "
892  "< 0) assert(false && \"Singular or ill-conditioned matrix (nmodl::crout)!\");",
893  float_type,
894  N);
895 
896  // Solve the linear system : Forward/Backward substitution part
897  printer->fmt_line(
898  "nmodl::crout::solveCrout<{0}>({1}, nmodl_eigen_jm.data(), nmodl_eigen_fm.data(), "
899  "nmodl_eigen_xm.data(), pivot.data());",
900  float_type,
901  N);
902  }
903 }
904 
905 
906 /****************************************************************************************/
907 /* Code-specific helper routines */
908 /****************************************************************************************/
909 
910 
912  if (ion_variable_struct_required()) {
913  return "id, pnodecount, inst, ionvar, data, indexes, thread, nt, v";
914  }
915  return "id, pnodecount, inst, data, indexes, thread, nt, v";
916 }
917 
918 
919 /**
920  * @todo: figure out how to correctly handle qualifiers
921  */
923  ParamVector params;
924  params.emplace_back("", "int", "", "id");
925  params.emplace_back("", "int", "", "pnodecount");
926  params.emplace_back("", fmt::format("{}*", instance_struct()), "", "inst");
927  if (ion_variable_struct_required()) {
928  params.emplace_back("", "IonCurVar&", "", "ionvar");
929  }
930  params.emplace_back("", "double*", "", "data");
931  params.emplace_back("const ", "Datum*", "", "indexes");
932  params.emplace_back("", "ThreadDatum*", "", "thread");
933  params.emplace_back("", "NrnThread*", "", "nt");
934  params.emplace_back("", "double", "", "v");
935  return params;
936 }
937 
938 
940  return "id, pnodecount, data, indexes, thread, nt, ml, v";
941 }
942 
943 
945  if (table) {
946  return "int id, int pnodecount, double* data, Datum* indexes, "
947  "ThreadDatum* thread, NrnThread* nt, Memb_list* ml, int tml_id";
948  }
949  return "int id, int pnodecount, double* data, Datum* indexes, "
950  "ThreadDatum* thread, NrnThread* nt, Memb_list* ml, double v";
951 }
952 
953 
955  if (ion_variable_struct_required()) {
956  return "id, pnodecount, ionvar, data, indexes, thread, nt, ml, v";
957  }
958  return "id, pnodecount, data, indexes, thread, nt, ml, v";
959 }
960 
961 
962 /**
963  * Function call arguments when function or procedure is defined in the
964  * same mod file itself
965  */
967  if (ion_variable_struct_required()) {
968  return "id, pnodecount, inst, ionvar, data, indexes, thread, nt, v";
969  }
970  return "id, pnodecount, inst, data, indexes, thread, nt, v";
971 }
972 
973 
974 /**
975  * Replace commonly used variables in the verbatim blocks into their corresponding
976  * variable name in the new code generation backend.
977  */
980  name = naming::VERBATIM_VARIABLES_MAPPING.at(name);
981  }
982 
983  /**
984  * if function is defined the same mod file then the arguments must
985  * contain mechanism instance as well.
986  */
987  if (name == naming::THREAD_ARGS) {
988  if (internal_method_call_encountered) {
989  name = nrn_thread_internal_arguments();
990  internal_method_call_encountered = false;
991  } else {
992  name = nrn_thread_arguments();
993  }
994  }
995  if (name == naming::THREAD_ARGS_PROTO) {
996  name = external_method_parameters();
997  }
998  return name;
999 }
1000 
1001 
1002 /**
1003  * Processing commonly used constructs in the verbatim blocks.
1004  * @todo : this is still ad-hoc and requires re-implementation to
1005  * handle it more elegantly.
1006  */
1007 std::string CodegenCoreneuronCppVisitor::process_verbatim_text(std::string const& text) {
1009  driver.scan_string(text);
1010  auto tokens = driver.all_tokens();
1011  std::string result;
1012  for (size_t i = 0; i < tokens.size(); i++) {
1013  auto token = tokens[i];
1014 
1015  // check if we have function call in the verbatim block where
1016  // function is defined in the same mod file
1017  if (program_symtab->is_method_defined(token) && tokens[i + 1] == "(") {
1018  internal_method_call_encountered = true;
1019  }
1020  auto name = process_verbatim_token(token);
1021 
1022  if (token == (std::string("_") + naming::TQITEM_VARIABLE)) {
1023  name.insert(0, 1, '&');
1024  }
1025  if (token == "_STRIDE") {
1026  name = "pnodecount+id";
1027  }
1028  result += name;
1029  }
1030  return result;
1031 }
1032 
1033 
1035  auto nrn_channel_info_var_name = get_channel_info_var_name();
1036  auto nrn_cur = nrn_cur_required() ? method_name(naming::NRN_CUR_METHOD) : "nullptr";
1037  auto nrn_state = nrn_state_required() ? method_name(naming::NRN_STATE_METHOD) : "nullptr";
1038  auto nrn_alloc = method_name(naming::NRN_ALLOC_METHOD);
1039  auto nrn_init = method_name(naming::NRN_INIT_METHOD);
1040  auto const nrn_private_constructor = method_name(naming::NRN_PRIVATE_CONSTRUCTOR_METHOD);
1041  auto const nrn_private_destructor = method_name(naming::NRN_PRIVATE_DESTRUCTOR_METHOD);
1042  return fmt::format("{}, {}, {}, nullptr, {}, {}, {}, {}, first_pointer_var_index()",
1043  nrn_channel_info_var_name,
1044  nrn_alloc,
1045  nrn_cur,
1046  nrn_state,
1047  nrn_init,
1048  nrn_private_constructor,
1049  nrn_private_destructor);
1050 }
1051 
1052 
1053 std::string CodegenCoreneuronCppVisitor::conc_write_statement(const std::string& ion_name,
1054  const std::string& concentration,
1055  int index) {
1056  auto conc_var_name = get_variable_name(naming::ION_VARNAME_PREFIX + concentration);
1057  auto style_var_name = get_variable_name("style_" + ion_name);
1058  return fmt::format(
1059  "nrn_wrote_conc({}_type,"
1060  " &({}),"
1061  " {},"
1062  " {},"
1063  " nrn_ion_global_map,"
1064  " {},"
1065  " nt->_ml_list[{}_type]->_nodecount_padded)",
1066  ion_name,
1067  conc_var_name,
1068  index,
1069  style_var_name,
1070  get_variable_name(naming::CELSIUS_VARIABLE),
1071  ion_name);
1072 }
1073 
1074 
1075 /****************************************************************************************/
1076 /* Code-specific printing routines for code generation */
1077 /****************************************************************************************/
1078 
1079 
1081  printer->add_newline(2);
1082  print_device_method_annotation();
1083  printer->push_block("static inline int first_pointer_var_index()");
1084  printer->fmt_line("return {};", info.first_pointer_var_index);
1085  printer->pop_block();
1086 }
1087 
1088 
1090  printer->add_newline(2);
1091  print_device_method_annotation();
1092  printer->push_block("static inline int first_random_var_index()");
1093  printer->fmt_line("return {};", info.first_random_var_index);
1094  printer->pop_block();
1095 }
1096 
1097 
1099  printer->add_newline(2);
1100  print_device_method_annotation();
1101  printer->push_block("static inline int float_variables_size()");
1102  printer->fmt_line("return {};", float_variables_size());
1103  printer->pop_block();
1104 
1105  printer->add_newline(2);
1106  print_device_method_annotation();
1107  printer->push_block("static inline int int_variables_size()");
1108  printer->fmt_line("return {};", int_variables_size());
1109  printer->pop_block();
1110 }
1111 
1112 
1114  if (!net_receive_exist()) {
1115  return;
1116  }
1117  printer->add_newline(2);
1118  print_device_method_annotation();
1119  printer->push_block("static inline int num_net_receive_args()");
1120  printer->fmt_line("return {};", info.num_net_receive_parameters);
1121  printer->pop_block();
1122 }
1123 
1124 
1126  printer->add_newline(2);
1127  print_device_method_annotation();
1128  printer->push_block("static inline int get_mech_type()");
1129  // false => get it from the host-only global struct, not the instance structure
1130  printer->fmt_line("return {};", get_variable_name("mech_type", false));
1131  printer->pop_block();
1132 }
1133 
1134 
1136  printer->add_newline(2);
1137  print_device_method_annotation();
1138  printer->push_block("static inline Memb_list* get_memb_list(NrnThread* nt)");
1139  printer->push_block("if (!nt->_ml_list)");
1140  printer->add_line("return nullptr;");
1141  printer->pop_block();
1142  printer->add_line("return nt->_ml_list[get_mech_type()];");
1143  printer->pop_block();
1144 }
1145 
1146 
1148  printer->add_newline(2);
1149  printer->push_block("namespace coreneuron");
1150 }
1151 
1152 
1154  printer->pop_block();
1155 }
1156 
1157 
1158 /**
1159  * \details There are three types of thread variables currently considered:
1160  * - top local thread variables
1161  * - thread variables in the mod file
1162  * - thread variables for solver
1163  *
1164  * These variables are allocated into different thread structures and have
1165  * corresponding thread ids. Thread id start from 0. In mod2c implementation,
1166  * thread_data_index is increased at various places and it is used to
1167  * decide the index of thread.
1168  */
1170  if (info.vectorize && info.derivimplicit_used()) {
1171  int tid = info.derivimplicit_var_thread_id;
1172  int list = info.derivimplicit_list_num;
1173 
1174  // clang-format off
1175  printer->add_newline(2);
1176  printer->add_line("/** thread specific helper routines for derivimplicit */");
1177 
1178  printer->add_newline(1);
1179  printer->fmt_push_block("static inline int* deriv{}_advance(ThreadDatum* thread)", list);
1180  printer->fmt_line("return &(thread[{}].i);", tid);
1181  printer->pop_block();
1182  printer->add_newline();
1183 
1184  printer->fmt_push_block("static inline int dith{}()", list);
1185  printer->fmt_line("return {};", tid+1);
1186  printer->pop_block();
1187  printer->add_newline();
1188 
1189  printer->fmt_push_block("static inline void** newtonspace{}(ThreadDatum* thread)", list);
1190  printer->fmt_line("return &(thread[{}]._pvoid);", tid+2);
1191  printer->pop_block();
1192  }
1193 
1194  if (info.vectorize && !info.thread_variables.empty()) {
1195  printer->add_newline(2);
1196  printer->add_line("/** tid for thread variables */");
1197  printer->push_block("static inline int thread_var_tid()");
1198  printer->fmt_line("return {};", info.thread_var_thread_id);
1199  printer->pop_block();
1200  }
1201 
1202  if (info.vectorize && !info.top_local_variables.empty()) {
1203  printer->add_newline(2);
1204  printer->add_line("/** tid for top local tread variables */");
1205  printer->push_block("static inline int top_local_var_tid()");
1206  printer->fmt_line("return {};", info.top_local_thread_id);
1207  printer->pop_block();
1208  }
1209  // clang-format on
1210 }
1211 
1212 
1213 /****************************************************************************************/
1214 /* Routines for returning variable name */
1215 /****************************************************************************************/
1216 
1217 
1219  bool use_instance) const {
1220  auto name = symbol->get_name();
1221  auto dimension = symbol->get_length();
1222  auto position = position_of_float_var(name);
1223  // clang-format off
1224  if (symbol->is_array()) {
1225  if (use_instance) {
1226  return fmt::format("(inst->{}+id*{})", name, dimension);
1227  }
1228  return fmt::format("(data + {}*pnodecount + id*{})", position, dimension);
1229  }
1230  if (use_instance) {
1231  return fmt::format("inst->{}[id]", name);
1232  }
1233  return fmt::format("data[{}*pnodecount + id]", position);
1234  // clang-format on
1235 }
1236 
1237 
1239  const std::string& name,
1240  bool use_instance) const {
1241  auto position = position_of_int_var(name);
1242  // clang-format off
1243  if (symbol.is_index) {
1244  if (use_instance) {
1245  return fmt::format("inst->{}[{}]", name, position);
1246  }
1247  return fmt::format("indexes[{}]", position);
1248  }
1249  if (symbol.is_integer) {
1250  if (use_instance) {
1251  return fmt::format("inst->{}[{}*pnodecount+id]", name, position);
1252  }
1253  return fmt::format("indexes[{}*pnodecount+id]", position);
1254  }
1255  if (use_instance) {
1256  return fmt::format("inst->{}[indexes[{}*pnodecount + id]]", name, position);
1257  }
1258  auto data = symbol.is_vdata ? "_vdata" : "_data";
1259  return fmt::format("nt->{}[indexes[{}*pnodecount + id]]", data, position);
1260  // clang-format on
1261 }
1262 
1263 
1265  bool use_instance) const {
1266  if (use_instance) {
1267  return fmt::format("inst->{}->{}", naming::INST_GLOBAL_MEMBER, symbol->get_name());
1268  } else {
1269  return fmt::format("{}.{}", global_struct_instance(), symbol->get_name());
1270  }
1271 }
1272 
1273 
1274 std::string CodegenCoreneuronCppVisitor::get_variable_name(const std::string& name,
1275  bool use_instance) const {
1276  const std::string& varname = update_if_ion_variable_name(name);
1277 
1278  // clang-format off
1279  auto symbol_comparator = [&varname](const SymbolType& sym) {
1280  return varname == sym->get_name();
1281  };
1282 
1283  auto index_comparator = [&varname](const IndexVariableInfo& var) {
1284  return varname == var.symbol->get_name();
1285  };
1286  // clang-format on
1287 
1288  // float variable
1289  auto f = std::find_if(codegen_float_variables.begin(),
1290  codegen_float_variables.end(),
1291  symbol_comparator);
1292  if (f != codegen_float_variables.end()) {
1293  return float_variable_name(*f, use_instance);
1294  }
1295 
1296  // integer variable
1297  auto i =
1298  std::find_if(codegen_int_variables.begin(), codegen_int_variables.end(), index_comparator);
1299  if (i != codegen_int_variables.end()) {
1300  return int_variable_name(*i, varname, use_instance);
1301  }
1302 
1303  // global variable
1304  auto g = std::find_if(codegen_global_variables.begin(),
1305  codegen_global_variables.end(),
1306  symbol_comparator);
1307  if (g != codegen_global_variables.end()) {
1308  return global_variable_name(*g, use_instance);
1309  }
1310 
1311  if (varname == naming::NTHREAD_DT_VARIABLE) {
1312  return std::string("nt->_") + naming::NTHREAD_DT_VARIABLE;
1313  }
1314 
1315  // t in net_receive method is an argument to function and hence it should
1316  // ne used instead of nt->_t which is current time of thread
1317  if (varname == naming::NTHREAD_T_VARIABLE && !printing_net_receive) {
1318  return std::string("nt->_") + naming::NTHREAD_T_VARIABLE;
1319  }
1320 
1321  auto const iter =
1322  std::find_if(info.neuron_global_variables.begin(),
1323  info.neuron_global_variables.end(),
1324  [&varname](auto const& entry) { return entry.first->get_name() == varname; });
1325  if (iter != info.neuron_global_variables.end()) {
1326  std::string ret;
1327  if (use_instance) {
1328  ret = "*(inst->";
1329  }
1330  ret.append(varname);
1331  if (use_instance) {
1332  ret.append(")");
1333  }
1334  return ret;
1335  }
1336 
1337  // otherwise return original name
1338  return varname;
1339 }
1340 
1341 
1342 /****************************************************************************************/
1343 /* Main printing routines for code generation */
1344 /****************************************************************************************/
1345 
1346 
1348  time_t current_time{};
1349  time(&current_time);
1350  std::string data_time_str{std::ctime(&current_time)};
1351  auto version = nmodl::Version::NMODL_VERSION + " [" + nmodl::Version::GIT_REVISION + "]";
1352 
1353  printer->add_line("/*********************************************************");
1354  printer->add_line("Model Name : ", info.mod_suffix);
1355  printer->add_line("Filename : ", info.mod_file, ".mod");
1356  printer->add_line("NMODL Version : ", nmodl_version());
1357  printer->fmt_line("Vectorized : {}", info.vectorize);
1358  printer->fmt_line("Threadsafe : {}", info.thread_safe);
1359  printer->add_line("Created : ", stringutils::trim(data_time_str));
1360  printer->add_line("Simulator : ", simulator_name());
1361  printer->add_line("Backend : ", backend_name());
1362  printer->add_line("NMODL Compiler : ", version);
1363  printer->add_line("*********************************************************/");
1364 }
1365 
1366 
1368  printer->add_newline();
1369  printer->add_multi_line(R"CODE(
1370  #include <math.h>
1371  #include <stdio.h>
1372  #include <stdlib.h>
1373  #include <string.h>
1374  )CODE");
1375 }
1376 
1377 
1379  printer->add_newline();
1380  printer->add_multi_line(R"CODE(
1381  #include <coreneuron/gpu/nrn_acc_manager.hpp>
1382  #include <coreneuron/mechanism/mech/mod2c_core_thread.hpp>
1383  #include <coreneuron/mechanism/register_mech.hpp>
1384  #include <coreneuron/nrnconf.h>
1385  #include <coreneuron/nrniv/nrniv_decl.h>
1386  #include <coreneuron/sim/multicore.hpp>
1387  #include <coreneuron/sim/scopmath/newton_thread.hpp>
1388  #include <coreneuron/utils/ivocvect.hpp>
1389  #include <coreneuron/utils/nrnoc_aux.hpp>
1390  #include <coreneuron/utils/randoms/nrnran123.h>
1391  )CODE");
1392  if (info.eigen_newton_solver_exist) {
1393  printer->add_line("#include <newton/newton.hpp>");
1394  }
1395  if (info.eigen_linear_solver_exist) {
1396  if (std::accumulate(info.state_vars.begin(),
1397  info.state_vars.end(),
1398  0,
1399  [](int l, const SymbolType& variable) {
1400  return l += variable->get_length();
1401  }) > 4) {
1402  printer->add_line("#include <crout/crout.hpp>");
1403  } else {
1404  printer->add_line("#include <Eigen/Dense>");
1405  printer->add_line("#include <Eigen/LU>");
1406  }
1407  }
1408 }
1409 
1410 
1412  if (info.primes_size == 0) {
1413  return;
1414  }
1415  const auto count_prime_variables = [](auto size, const SymbolType& symbol) {
1416  return size += symbol->get_length();
1417  };
1418  const auto prime_variables_by_order_size =
1419  std::accumulate(info.prime_variables_by_order.begin(),
1420  info.prime_variables_by_order.end(),
1421  0,
1422  count_prime_variables);
1423  if (info.primes_size != prime_variables_by_order_size) {
1424  throw std::runtime_error{
1425  fmt::format("primes_size = {} differs from prime_variables_by_order.size() = {}, "
1426  "this should not happen.",
1427  info.primes_size,
1428  info.prime_variables_by_order.size())};
1429  }
1430  auto const initializer_list = [&](auto const& primes, const char* prefix) -> std::string {
1431  if (!print_initializers) {
1432  return {};
1433  }
1434  std::string list{"{"};
1435  for (auto iter = primes.begin(); iter != primes.end(); ++iter) {
1436  auto const& prime = *iter;
1437  list.append(std::to_string(position_of_float_var(prefix + prime->get_name())));
1438  if (std::next(iter) != primes.end()) {
1439  list.append(", ");
1440  }
1441  }
1442  list.append("}");
1443  return list;
1444  };
1445  printer->fmt_line("int slist1[{}]{};",
1446  info.primes_size,
1447  initializer_list(info.prime_variables_by_order, ""));
1448  printer->fmt_line("int dlist1[{}]{};",
1449  info.primes_size,
1450  initializer_list(info.prime_variables_by_order, "D"));
1451  codegen_global_variables.push_back(make_symbol("slist1"));
1452  codegen_global_variables.push_back(make_symbol("dlist1"));
1453  // additional list for derivimplicit method
1454  if (info.derivimplicit_used()) {
1455  auto primes = program_symtab->get_variables_with_properties(NmodlType::prime_name);
1456  printer->fmt_line("int slist2[{}]{};", info.primes_size, initializer_list(primes, ""));
1457  codegen_global_variables.push_back(make_symbol("slist2"));
1458  }
1459 }
1460 
1461 
1462 /**
1463  * \details Variables required for type of ion, type of point process etc. are
1464  * of static int type. For the C++ backend type, it's ok to have
1465  * these variables as file scoped static variables.
1466  *
1467  * Initial values of state variables (h0) are also defined as static
1468  * variables. Note that the state could be ion variable and it could
1469  * be also range variable. Hence lookup into symbol table before.
1470  *
1471  * When model is not vectorized (shouldn't be the case in coreneuron)
1472  * the top local variables become static variables.
1473  *
1474  * Note that static variables are already initialized to 0. We do the
1475  * same for some variables to keep same code as neuron.
1476  */
1477 // NOLINTNEXTLINE(readability-function-cognitive-complexity)
1479  const auto value_initialize = print_initializers ? "{}" : "";
1480 
1481  auto float_type = default_float_data_type();
1482  printer->add_newline(2);
1483  printer->add_line("/** all global variables */");
1484  printer->fmt_push_block("struct {}", global_struct());
1485 
1486  for (const auto& ion: info.ions) {
1487  auto name = fmt::format("{}_type", ion.name);
1488  printer->fmt_line("int {}{};", name, value_initialize);
1489  codegen_global_variables.push_back(make_symbol(name));
1490  }
1491 
1492  if (info.point_process) {
1493  printer->fmt_line("int point_type{};", value_initialize);
1494  codegen_global_variables.push_back(make_symbol("point_type"));
1495  }
1496 
1497  for (const auto& var: info.state_vars) {
1498  auto name = var->get_name() + "0";
1499  auto symbol = program_symtab->lookup(name);
1500  if (symbol == nullptr) {
1501  printer->fmt_line("{} {}{};", float_type, name, value_initialize);
1502  codegen_global_variables.push_back(make_symbol(name));
1503  }
1504  }
1505 
1506  // Neuron and Coreneuron adds "v" to global variables when vectorize
1507  // is false. But as v is always local variable and passed as argument,
1508  // we don't need to use global variable v
1509 
1510  auto& top_locals = info.top_local_variables;
1511  if (!info.vectorize && !top_locals.empty()) {
1512  for (const auto& var: top_locals) {
1513  auto name = var->get_name();
1514  auto length = var->get_length();
1515  if (var->is_array()) {
1516  printer->fmt_line("{} {}[{}] /* TODO init top-local-array */;",
1517  float_type,
1518  name,
1519  length);
1520  } else {
1521  printer->fmt_line("{} {} /* TODO init top-local */;", float_type, name);
1522  }
1523  codegen_global_variables.push_back(var);
1524  }
1525  }
1526 
1527  if (!info.thread_variables.empty()) {
1528  printer->fmt_line("int thread_data_in_use{};", value_initialize);
1529  printer->fmt_line("{} thread_data[{}] /* TODO init thread_data */;",
1530  float_type,
1531  info.thread_var_data_size);
1532  codegen_global_variables.push_back(make_symbol("thread_data_in_use"));
1533  auto symbol = make_symbol("thread_data");
1534  symbol->set_as_array(info.thread_var_data_size);
1535  codegen_global_variables.push_back(symbol);
1536  }
1537 
1538  // TODO: remove this entirely?
1539  printer->fmt_line("int reset{};", value_initialize);
1540  codegen_global_variables.push_back(make_symbol("reset"));
1541 
1542  printer->fmt_line("int mech_type{};", value_initialize);
1543  codegen_global_variables.push_back(make_symbol("mech_type"));
1544 
1545  for (const auto& var: info.global_variables) {
1546  auto name = var->get_name();
1547  auto length = var->get_length();
1548  if (var->is_array()) {
1549  printer->fmt_line("{} {}[{}] /* TODO init const-array */;", float_type, name, length);
1550  } else {
1551  double value{};
1552  if (auto const& value_ptr = var->get_value()) {
1553  value = *value_ptr;
1554  }
1555  printer->fmt_line("{} {}{};",
1556  float_type,
1557  name,
1558  print_initializers ? fmt::format("{{{:g}}}", value) : std::string{});
1559  }
1560  codegen_global_variables.push_back(var);
1561  }
1562 
1563  for (const auto& var: info.constant_variables) {
1564  auto const name = var->get_name();
1565  auto* const value_ptr = var->get_value().get();
1566  double const value{value_ptr ? *value_ptr : 0};
1567  printer->fmt_line("{} {}{};",
1568  float_type,
1569  name,
1570  print_initializers ? fmt::format("{{{:g}}}", value) : std::string{});
1571  codegen_global_variables.push_back(var);
1572  }
1573 
1574  print_sdlists_init(print_initializers);
1575 
1576  if (info.table_count > 0) {
1577  printer->fmt_line("double usetable{};", print_initializers ? "{1}" : "");
1578  codegen_global_variables.push_back(make_symbol(naming::USE_TABLE_VARIABLE));
1579 
1580  for (const auto& block: info.functions_with_table) {
1581  const auto& name = block->get_node_name();
1582  printer->fmt_line("{} tmin_{}{};", float_type, name, value_initialize);
1583  printer->fmt_line("{} mfac_{}{};", float_type, name, value_initialize);
1584  codegen_global_variables.push_back(make_symbol("tmin_" + name));
1585  codegen_global_variables.push_back(make_symbol("mfac_" + name));
1586  }
1587 
1588  for (const auto& variable: info.table_statement_variables) {
1589  auto const name = "t_" + variable->get_name();
1590  auto const num_values = variable->get_num_values();
1591  if (variable->is_array()) {
1592  int array_len = variable->get_length();
1593  printer->fmt_line(
1594  "{} {}[{}][{}]{};", float_type, name, array_len, num_values, value_initialize);
1595  } else {
1596  printer->fmt_line("{} {}[{}]{};", float_type, name, num_values, value_initialize);
1597  }
1598  codegen_global_variables.push_back(make_symbol(name));
1599  }
1600  }
1601 
1602  for (const auto& f: info.function_tables) {
1603  printer->fmt_line("void* _ptable_{}{{}};", f->get_node_name());
1604  codegen_global_variables.push_back(make_symbol("_ptable_" + f->get_node_name()));
1605  }
1606 
1607  if (info.vectorize && info.thread_data_index) {
1608  printer->fmt_line("ThreadDatum ext_call_thread[{}]{};",
1609  info.thread_data_index,
1610  value_initialize);
1611  codegen_global_variables.push_back(make_symbol("ext_call_thread"));
1612  }
1613 
1614  printer->pop_block(";");
1615 
1616  print_global_var_struct_assertions();
1617  print_global_var_struct_decl();
1618 }
1619 
1620 
1621 /**
1622  * Print structs that encapsulate information about scalar and
1623  * vector elements of type global and thread variables.
1624  */
1626  auto variable_printer =
1627  [&](const std::vector<SymbolType>& variables, bool if_array, bool if_vector) {
1628  for (const auto& variable: variables) {
1629  if (variable->is_array() == if_array) {
1630  // false => do not use the instance struct, which is not
1631  // defined in the global declaration that we are printing
1632  auto name = get_variable_name(variable->get_name(), false);
1633  auto ename = add_escape_quote(variable->get_name() + "_" + info.mod_suffix);
1634  auto length = variable->get_length();
1635  if (if_vector) {
1636  printer->fmt_line("{{{}, {}, {}}},", ename, name, length);
1637  } else {
1638  printer->fmt_line("{{{}, &{}}},", ename, name);
1639  }
1640  }
1641  }
1642  };
1643 
1644  auto globals = info.global_variables;
1645  auto thread_vars = info.thread_variables;
1646 
1647  if (info.table_count > 0) {
1648  globals.push_back(make_symbol(naming::USE_TABLE_VARIABLE));
1649  }
1650 
1651  printer->add_newline(2);
1652  printer->add_line("/** connect global (scalar) variables to hoc -- */");
1653  printer->add_line("static DoubScal hoc_scalar_double[] = {");
1654  printer->increase_indent();
1655  variable_printer(globals, false, false);
1656  variable_printer(thread_vars, false, false);
1657  printer->add_line("{nullptr, nullptr}");
1658  printer->decrease_indent();
1659  printer->add_line("};");
1660 
1661  printer->add_newline(2);
1662  printer->add_line("/** connect global (array) variables to hoc -- */");
1663  printer->add_line("static DoubVec hoc_vector_double[] = {");
1664  printer->increase_indent();
1665  variable_printer(globals, true, true);
1666  variable_printer(thread_vars, true, true);
1667  printer->add_line("{nullptr, nullptr, 0}");
1668  printer->decrease_indent();
1669  printer->add_line("};");
1670 }
1671 
1672 
1673 /**
1674  * Return registration type for a given BEFORE/AFTER block
1675  * /param block A BEFORE/AFTER block being registered
1676  *
1677  * Depending on a block type i.e. BEFORE or AFTER and also type
1678  * of it's associated block i.e. BREAKPOINT, INITIAL, SOLVE and
1679  * STEP, the registration type (as an integer) is calculated.
1680  * These values are then interpreted by CoreNEURON internally.
1681  */
1682 static std::string get_register_type_for_ba_block(const ast::Block* block) {
1683  std::string register_type{};
1684  BAType ba_type{};
1685  /// before block have value 10 and after block 20
1686  if (block->is_before_block()) {
1687  // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers)
1688  register_type = "BAType::Before";
1689  ba_type =
1690  dynamic_cast<const ast::BeforeBlock*>(block)->get_bablock()->get_type()->get_value();
1691  } else {
1692  // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers)
1693  register_type = "BAType::After";
1694  ba_type =
1695  dynamic_cast<const ast::AfterBlock*>(block)->get_bablock()->get_type()->get_value();
1696  }
1697 
1698  /// associated blocks have different values (1 to 4) based on type.
1699  /// These values are based on neuron/coreneuron implementation details.
1700  if (ba_type == BATYPE_BREAKPOINT) {
1701  register_type += " + BAType::Breakpoint";
1702  } else if (ba_type == BATYPE_SOLVE) {
1703  register_type += " + BAType::Solve";
1704  } else if (ba_type == BATYPE_INITIAL) {
1705  register_type += " + BAType::Initial";
1706  } else if (ba_type == BATYPE_STEP) {
1707  register_type += " + BAType::Step";
1708  } else {
1709  throw std::runtime_error("Unhandled Before/After type encountered during code generation");
1710  }
1711  return register_type;
1712 }
1713 
1714 
1715 /**
1716  * \details Every mod file has register function to connect with the simulator.
1717  * Various information about mechanism and callbacks get registered with
1718  * the simulator using suffix_reg() function.
1719  *
1720  * Here are details:
1721  * - We should exclude that callback based on the solver, watch statements.
1722  * - If nrn_get_mechtype is < -1 means that mechanism is not used in the
1723  * context of neuron execution and hence could be ignored in coreneuron
1724  * execution.
1725  * - Ions are internally defined and their types can be queried similar to
1726  * other mechanisms.
1727  * - hoc_register_var may not be needed in the context of coreneuron
1728  * - We assume net receive buffer is on. This is because generated code is
1729  * compatible for cpu as well as gpu target.
1730  */
1731 // NOLINTNEXTLINE(readability-function-cognitive-complexity)
1733  printer->add_newline(2);
1734  printer->add_line("/** register channel with the simulator */");
1735  printer->fmt_push_block("void _{}_reg()", info.mod_file);
1736 
1737  // type related information
1738  auto suffix = add_escape_quote(info.mod_suffix);
1739  printer->add_newline();
1740  printer->fmt_line("int mech_type = nrn_get_mechtype({});", suffix);
1741  printer->fmt_line("{} = mech_type;", get_variable_name("mech_type", false));
1742  printer->push_block("if (mech_type == -1)");
1743  printer->add_line("return;");
1744  printer->pop_block();
1745 
1746  printer->add_newline();
1747  printer->add_line("_nrn_layout_reg(mech_type, 0);"); // 0 for SoA
1748 
1749  // register mechanism
1750  const auto mech_arguments = register_mechanism_arguments();
1751  const auto number_of_thread_objects = num_thread_objects();
1752  if (info.point_process) {
1753  printer->fmt_line("point_register_mech({}, {}, {}, {});",
1754  mech_arguments,
1755  info.constructor_node ? method_name(naming::NRN_CONSTRUCTOR_METHOD)
1756  : "nullptr",
1757  info.destructor_node ? method_name(naming::NRN_DESTRUCTOR_METHOD)
1758  : "nullptr",
1759  number_of_thread_objects);
1760  } else {
1761  printer->fmt_line("register_mech({}, {});", mech_arguments, number_of_thread_objects);
1762  if (info.constructor_node) {
1763  printer->fmt_line("register_constructor({});",
1764  method_name(naming::NRN_CONSTRUCTOR_METHOD));
1765  }
1766  }
1767 
1768  // types for ion
1769  for (const auto& ion: info.ions) {
1770  printer->fmt_line("{} = nrn_get_mechtype({});",
1771  get_variable_name(ion.name + "_type", false),
1772  add_escape_quote(ion.name + "_ion"));
1773  }
1774  printer->add_newline();
1775 
1776  /*
1777  * Register callbacks for thread allocation and cleanup. Note that thread_data_index
1778  * represent total number of thread used minus 1 (i.e. index of last thread).
1779  */
1780  if (info.vectorize && (info.thread_data_index != 0)) {
1781  // false to avoid getting the copy from the instance structure
1782  printer->fmt_line("thread_mem_init({});", get_variable_name("ext_call_thread", false));
1783  }
1784 
1785  if (!info.thread_variables.empty()) {
1786  printer->fmt_line("{} = 0;", get_variable_name("thread_data_in_use"));
1787  }
1788 
1789  if (info.thread_callback_register) {
1790  printer->add_line("_nrn_thread_reg0(mech_type, thread_mem_cleanup);");
1791  printer->add_line("_nrn_thread_reg1(mech_type, thread_mem_init);");
1792  }
1793 
1794  if (info.emit_table_thread()) {
1795  auto name = method_name("check_table_thread");
1796  printer->fmt_line("_nrn_thread_table_reg(mech_type, {});", name);
1797  }
1798 
1799  // register read/write callbacks for pointers
1800  if (info.bbcore_pointer_used) {
1801  printer->add_line("hoc_reg_bbcore_read(mech_type, bbcore_read);");
1802  printer->add_line("hoc_reg_bbcore_write(mech_type, bbcore_write);");
1803  }
1804 
1805  // register size of double and int elements
1806  // clang-format off
1807  printer->add_line("hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size());");
1808  // clang-format on
1809 
1810  // register semantics for index variables
1811  for (auto& semantic: info.semantics) {
1812  auto args =
1813  fmt::format("mech_type, {}, {}", semantic.index, add_escape_quote(semantic.name));
1814  printer->fmt_line("hoc_register_dparam_semantics({});", args);
1815  }
1816 
1817  if (info.is_watch_used()) {
1818  auto watch_fun = compute_method_name(BlockType::Watch);
1819  printer->fmt_line("hoc_register_watch_check({}, mech_type);", watch_fun);
1820  }
1821 
1822  if (info.write_concentration) {
1823  printer->add_line("nrn_writes_conc(mech_type, 0);");
1824  }
1825 
1826  // register various information for point process type
1827  if (info.net_event_used) {
1828  printer->add_line("add_nrn_has_net_event(mech_type);");
1829  }
1830  if (info.artificial_cell) {
1831  printer->fmt_line("add_nrn_artcell(mech_type, {});", info.tqitem_index);
1832  }
1833  if (net_receive_buffering_required()) {
1834  printer->fmt_line("hoc_register_net_receive_buffering({}, mech_type);",
1835  method_name("net_buf_receive"));
1836  }
1837  if (info.num_net_receive_parameters != 0) {
1838  auto net_recv_init_arg = "nullptr";
1839  if (info.net_receive_initial_node != nullptr) {
1840  net_recv_init_arg = "net_init";
1841  }
1842  printer->fmt_line("set_pnt_receive(mech_type, {}, {}, num_net_receive_args());",
1843  method_name("net_receive"),
1844  net_recv_init_arg);
1845  }
1846  if (info.for_netcon_used) {
1847  // index where information about FOR_NETCON is stored in the integer array
1848  const auto index =
1849  std::find_if(info.semantics.begin(), info.semantics.end(), [](const IndexSemantics& a) {
1850  return a.name == naming::FOR_NETCON_SEMANTIC;
1851  })->index;
1852  printer->fmt_line("add_nrn_fornetcons(mech_type, {});", index);
1853  }
1854 
1855  if (info.net_event_used || info.net_send_used) {
1856  printer->add_line("hoc_register_net_send_buffering(mech_type);");
1857  }
1858 
1859  /// register all before/after blocks
1860  for (size_t i = 0; i < info.before_after_blocks.size(); i++) {
1861  // register type and associated function name for the block
1862  const auto& block = info.before_after_blocks[i];
1863  std::string register_type = get_register_type_for_ba_block(block);
1864  std::string function_name = method_name(fmt::format("nrn_before_after_{}", i));
1865  printer->fmt_line("hoc_reg_ba(mech_type, {}, {});", function_name, register_type);
1866  }
1867 
1868  // register variables for hoc
1869  printer->add_line("hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL);");
1870  printer->pop_block();
1871 }
1872 
1873 
1875  if (!info.thread_callback_register) {
1876  return;
1877  }
1878 
1879  // thread_mem_init callback
1880  printer->add_newline(2);
1881  printer->add_line("/** thread memory allocation callback */");
1882  printer->push_block("static void thread_mem_init(ThreadDatum* thread) ");
1883 
1884  if (info.vectorize && info.derivimplicit_used()) {
1885  printer->fmt_line("thread[dith{}()].pval = nullptr;", info.derivimplicit_list_num);
1886  }
1887  if (info.vectorize && (info.top_local_thread_size != 0)) {
1888  auto length = info.top_local_thread_size;
1889  auto allocation = fmt::format("(double*)mem_alloc({}, sizeof(double))", length);
1890  printer->fmt_line("thread[top_local_var_tid()].pval = {};", allocation);
1891  }
1892  if (info.thread_var_data_size != 0) {
1893  auto length = info.thread_var_data_size;
1894  auto thread_data = get_variable_name("thread_data");
1895  auto thread_data_in_use = get_variable_name("thread_data_in_use");
1896  auto allocation = fmt::format("(double*)mem_alloc({}, sizeof(double))", length);
1897  printer->fmt_push_block("if ({})", thread_data_in_use);
1898  printer->fmt_line("thread[thread_var_tid()].pval = {};", allocation);
1899  printer->chain_block("else");
1900  printer->fmt_line("thread[thread_var_tid()].pval = {};", thread_data);
1901  printer->fmt_line("{} = 1;", thread_data_in_use);
1902  printer->pop_block();
1903  }
1904  printer->pop_block();
1905  printer->add_newline(2);
1906 
1907 
1908  // thread_mem_cleanup callback
1909  printer->add_line("/** thread memory cleanup callback */");
1910  printer->push_block("static void thread_mem_cleanup(ThreadDatum* thread) ");
1911 
1912  // clang-format off
1913  if (info.vectorize && info.derivimplicit_used()) {
1914  int n = info.derivimplicit_list_num;
1915  printer->fmt_line("free(thread[dith{}()].pval);", n);
1916  printer->fmt_line("nrn_destroy_newtonspace(static_cast<NewtonSpace*>(*newtonspace{}(thread)));", n);
1917  }
1918  // clang-format on
1919 
1920  if (info.top_local_thread_size != 0) {
1921  auto line = "free(thread[top_local_var_tid()].pval);";
1922  printer->add_line(line);
1923  }
1924  if (info.thread_var_data_size != 0) {
1925  auto thread_data = get_variable_name("thread_data");
1926  auto thread_data_in_use = get_variable_name("thread_data_in_use");
1927  printer->fmt_push_block("if (thread[thread_var_tid()].pval == {})", thread_data);
1928  printer->fmt_line("{} = 0;", thread_data_in_use);
1929  printer->chain_block("else");
1930  printer->add_line("free(thread[thread_var_tid()].pval);");
1931  printer->pop_block();
1932  }
1933  printer->pop_block();
1934 }
1935 
1936 
1938  auto const value_initialize = print_initializers ? "{}" : "";
1939  auto int_type = default_int_data_type();
1940  printer->add_newline(2);
1941  printer->add_line("/** all mechanism instance variables and global variables */");
1942  printer->fmt_push_block("struct {} ", instance_struct());
1943 
1944  for (auto const& [var, type]: info.neuron_global_variables) {
1945  auto const name = var->get_name();
1946  printer->fmt_line("{}* {}{};",
1947  type,
1948  name,
1949  print_initializers ? fmt::format("{{&coreneuron::{}}}", name)
1950  : std::string{});
1951  }
1952  for (auto& var: codegen_float_variables) {
1953  const auto& name = var->get_name();
1954  auto type = get_range_var_float_type(var);
1955  auto qualifier = is_constant_variable(name) ? "const " : "";
1956  printer->fmt_line("{}{}* {}{};", qualifier, type, name, value_initialize);
1957  }
1958  for (auto& var: codegen_int_variables) {
1959  const auto& name = var.symbol->get_name();
1960  if (var.is_index || var.is_integer) {
1961  auto qualifier = var.is_constant ? "const " : "";
1962  printer->fmt_line("{}{}* {}{};", qualifier, int_type, name, value_initialize);
1963  } else {
1964  auto qualifier = var.is_constant ? "const " : "";
1965  auto type = var.is_vdata ? "void*" : default_float_data_type();
1966  printer->fmt_line("{}{}* {}{};", qualifier, type, name, value_initialize);
1967  }
1968  }
1969 
1970  printer->fmt_line("{}* {}{};",
1971  global_struct(),
1973  print_initializers ? fmt::format("{{&{}}}", global_struct_instance())
1974  : std::string{});
1975  printer->pop_block(";");
1976 }
1977 
1978 
1980  if (!ion_variable_struct_required()) {
1981  return;
1982  }
1983  printer->add_newline(2);
1984  printer->add_line("/** ion write variables */");
1985  printer->push_block("struct IonCurVar");
1986 
1987  std::string float_type = default_float_data_type();
1988  std::vector<std::string> members;
1989 
1990  for (auto& ion: info.ions) {
1991  for (auto& var: ion.writes) {
1992  printer->fmt_line("{} {};", float_type, var);
1993  members.push_back(var);
1994  }
1995  }
1996  for (auto& var: info.currents) {
1997  if (!info.is_ion_variable(var)) {
1998  printer->fmt_line("{} {};", float_type, var);
1999  members.push_back(var);
2000  }
2001  }
2002 
2003  print_ion_var_constructor(members);
2004 
2005  printer->pop_block(";");
2006 }
2007 
2008 
2010  const std::vector<std::string>& members) {
2011  // constructor
2012  printer->add_newline();
2013  printer->add_indent();
2014  printer->add_text("IonCurVar() : ");
2015  for (int i = 0; i < members.size(); i++) {
2016  printer->fmt_text("{}(0)", members[i]);
2017  if (i + 1 < members.size()) {
2018  printer->add_text(", ");
2019  }
2020  }
2021  printer->add_text(" {}");
2022  printer->add_newline();
2023 }
2024 
2025 
2027  printer->add_line("IonCurVar ionvar;");
2028 }
2029 
2030 
2032  // nothing for cpu
2033 }
2034 
2035 
2037  auto type = float_data_type();
2038  printer->add_newline(2);
2039  printer->add_line("/** allocate and setup array for range variable */");
2040  printer->fmt_push_block("static inline {}* setup_range_variable(double* variable, int n)",
2041  type);
2042  printer->fmt_line("{0}* data = ({0}*) mem_alloc(n, sizeof({0}));", type);
2043  printer->push_block("for(size_t i = 0; i < n; i++)");
2044  printer->add_line("data[i] = variable[i];");
2045  printer->pop_block();
2046  printer->add_line("return data;");
2047  printer->pop_block();
2048 }
2049 
2050 
2051 /**
2052  * \details If floating point type like "float" is specified on command line then
2053  * we can't turn all variables to new type. This is because certain variables
2054  * are pointers to internal variables (e.g. ions). Hence, we check if given
2055  * variable can be safely converted to new type. If so, return new type.
2056  */
2058  // clang-format off
2059  auto with = NmodlType::read_ion_var
2060  | NmodlType::write_ion_var
2061  | NmodlType::pointer_var
2062  | NmodlType::bbcore_pointer_var
2063  | NmodlType::extern_neuron_variable;
2064  // clang-format on
2065  bool need_default_type = symbol->has_any_property(with);
2066  if (need_default_type) {
2067  return default_float_data_type();
2068  }
2069  return float_data_type();
2070 }
2071 
2072 
2074  if (range_variable_setup_required()) {
2075  print_setup_range_variable();
2076  }
2077 
2078  printer->add_newline();
2079  printer->add_line("// Allocate instance structure");
2080  printer->fmt_push_block("static void {}(NrnThread* nt, Memb_list* ml, int type)",
2082  printer->add_line("assert(!ml->instance);");
2083  printer->add_line("assert(!ml->global_variables);");
2084  printer->add_line("assert(ml->global_variables_size == 0);");
2085  printer->fmt_line("auto* const inst = new {}{{}};", instance_struct());
2086  printer->fmt_line("assert(inst->{} == &{});",
2088  global_struct_instance());
2089  printer->add_line("ml->instance = inst;");
2090  printer->fmt_line("ml->global_variables = inst->{};", naming::INST_GLOBAL_MEMBER);
2091  printer->fmt_line("ml->global_variables_size = sizeof({});", global_struct());
2092  printer->pop_block();
2093  printer->add_newline();
2094 
2095  auto const cast_inst_and_assert_validity = [&]() {
2096  printer->fmt_line("auto* const inst = static_cast<{}*>(ml->instance);", instance_struct());
2097  printer->add_line("assert(inst);");
2098  printer->fmt_line("assert(inst->{});", naming::INST_GLOBAL_MEMBER);
2099  printer->fmt_line("assert(inst->{} == &{});",
2101  global_struct_instance());
2102  printer->fmt_line("assert(inst->{} == ml->global_variables);", naming::INST_GLOBAL_MEMBER);
2103  printer->fmt_line("assert(ml->global_variables_size == sizeof({}));", global_struct());
2104  };
2105 
2106  // Must come before print_instance_struct_copy_to_device and
2107  // print_instance_struct_delete_from_device
2108  print_instance_struct_transfer_routine_declarations();
2109 
2110  printer->add_line("// Deallocate the instance structure");
2111  printer->fmt_push_block("static void {}(NrnThread* nt, Memb_list* ml, int type)",
2113  cast_inst_and_assert_validity();
2114 
2115  // delete random streams
2116  if (info.random_variables.size()) {
2117  printer->add_line("int pnodecount = ml->_nodecount_padded;");
2118  printer->add_line("int nodecount = ml->nodecount;");
2119  printer->add_line("Datum* indexes = ml->pdata;");
2120  printer->push_block("for (int id = 0; id < nodecount; id++)");
2121  for (const auto& var: info.random_variables) {
2122  const auto& name = get_variable_name(var->get_name());
2123  printer->fmt_line("nrnran123_deletestream((nrnran123_State*){});", name);
2124  }
2125  printer->pop_block();
2126  }
2127  print_instance_struct_delete_from_device();
2128  printer->add_multi_line(R"CODE(
2129  delete inst;
2130  ml->instance = nullptr;
2131  ml->global_variables = nullptr;
2132  ml->global_variables_size = 0;
2133  )CODE");
2134  printer->pop_block();
2135  printer->add_newline();
2136 
2137 
2138  printer->add_line("/** initialize mechanism instance variables */");
2139  printer->push_block("static inline void setup_instance(NrnThread* nt, Memb_list* ml)");
2140  cast_inst_and_assert_validity();
2141 
2142  std::string stride;
2143  printer->add_line("int pnodecount = ml->_nodecount_padded;");
2144  stride = "*pnodecount";
2145 
2146  printer->add_line("Datum* indexes = ml->pdata;");
2147 
2148  auto const float_type = default_float_data_type();
2149 
2150  int id = 0;
2151  std::vector<std::string> ptr_members{naming::INST_GLOBAL_MEMBER};
2152  for (auto const& [var, type]: info.neuron_global_variables) {
2153  ptr_members.push_back(var->get_name());
2154  }
2155  ptr_members.reserve(ptr_members.size() + codegen_float_variables.size() +
2156  codegen_int_variables.size());
2157  for (auto& var: codegen_float_variables) {
2158  auto name = var->get_name();
2159  auto range_var_type = get_range_var_float_type(var);
2160  if (float_type == range_var_type) {
2161  auto const variable = fmt::format("ml->data+{}{}", id, stride);
2162  printer->fmt_line("inst->{} = {};", name, variable);
2163  } else {
2164  // TODO what MOD file exercises this?
2165  printer->fmt_line("inst->{} = setup_range_variable(ml->data+{}{}, pnodecount);",
2166  name,
2167  id,
2168  stride);
2169  }
2170  ptr_members.push_back(std::move(name));
2171  id += var->get_length();
2172  }
2173 
2174  for (auto& var: codegen_int_variables) {
2175  auto name = var.symbol->get_name();
2176  auto const variable = [&var]() {
2177  if (var.is_index || var.is_integer) {
2178  return "ml->pdata";
2179  } else if (var.is_vdata) {
2180  return "nt->_vdata";
2181  } else {
2182  return "nt->_data";
2183  }
2184  }();
2185  printer->fmt_line("inst->{} = {};", name, variable);
2186  ptr_members.push_back(std::move(name));
2187  }
2188  print_instance_struct_copy_to_device();
2189  printer->pop_block(); // setup_instance
2190  printer->add_newline();
2191 
2192  print_instance_struct_transfer_routines(ptr_members);
2193 }
2194 
2195 
2197  if (info.artificial_cell) {
2198  printer->add_line("double v = 0.0;");
2199  } else {
2200  printer->add_line("int node_id = node_index[id];");
2201  printer->add_line("double v = voltage[node_id];");
2202  print_v_unused();
2203  }
2204 
2205  if (ion_variable_struct_required()) {
2206  printer->add_line("IonCurVar ionvar;");
2207  }
2208 
2209  // read ion statements
2210  auto read_statements = ion_read_statements(BlockType::Initial);
2211  for (auto& statement: read_statements) {
2212  printer->add_line(statement);
2213  }
2214 
2215  // initialize state variables (excluding ion state)
2216  for (auto& var: info.state_vars) {
2217  auto name = var->get_name();
2218  if (!info.is_ionic_conc(name)) {
2219  auto lhs = get_variable_name(name);
2220  auto rhs = get_variable_name(name + "0");
2221  if (var->is_array()) {
2222  for (int i = 0; i < var->get_length(); ++i) {
2223  printer->fmt_line("{}[{}] = {};", lhs, i, rhs);
2224  }
2225  } else {
2226  printer->fmt_line("{} = {};", lhs, rhs);
2227  }
2228  }
2229  }
2230 
2231  // initial block
2232  if (node != nullptr) {
2233  const auto& block = node->get_statement_block();
2234  print_statement_block(*block, false, false);
2235  }
2236 
2237  // write ion statements
2238  auto write_statements = ion_write_statements(BlockType::Initial);
2239  for (auto& statement: write_statements) {
2240  auto text = process_shadow_update_statement(statement, BlockType::Initial);
2241  printer->add_line(text);
2242  }
2243 }
2244 
2245 
2247  BlockType type,
2248  const std::string& function_name) {
2249  std::string method;
2250  if (function_name.empty()) {
2251  method = compute_method_name(type);
2252  } else {
2253  method = function_name;
2254  }
2255  auto args = "NrnThread* nt, Memb_list* ml, int type";
2256 
2257  // watch statement function doesn't have type argument
2258  if (type == BlockType::Watch) {
2259  args = "NrnThread* nt, Memb_list* ml";
2260  }
2261 
2262  print_global_method_annotation();
2263  printer->fmt_push_block("void {}({})", method, args);
2264  if (type != BlockType::Destructor && type != BlockType::Constructor) {
2265  // We do not (currently) support DESTRUCTOR and CONSTRUCTOR blocks
2266  // running anything on the GPU.
2267  print_kernel_data_present_annotation_block_begin();
2268  } else {
2269  /// TODO: Remove this when the code generation is propery done
2270  /// Related to https://github.com/BlueBrain/nmodl/issues/692
2271  printer->add_line("#ifndef CORENEURON_BUILD");
2272  }
2273  printer->add_multi_line(R"CODE(
2274  int nodecount = ml->nodecount;
2275  int pnodecount = ml->_nodecount_padded;
2276  const int* node_index = ml->nodeindices;
2277  double* data = ml->data;
2278  const double* voltage = nt->_actual_v;
2279  )CODE");
2280 
2281  if (type == BlockType::Equation) {
2282  printer->add_line("double* vec_rhs = nt->_actual_rhs;");
2283  printer->add_line("double* vec_d = nt->_actual_d;");
2284  print_rhs_d_shadow_variables();
2285  }
2286  printer->add_line("Datum* indexes = ml->pdata;");
2287  printer->add_line("ThreadDatum* thread = ml->_thread;");
2288 
2289  if (type == BlockType::Initial) {
2290  printer->add_newline();
2291  printer->add_line("setup_instance(nt, ml);");
2292  }
2293  printer->fmt_line("auto* const inst = static_cast<{}*>(ml->instance);", instance_struct());
2294  printer->add_newline(1);
2295 }
2296 
2298  printer->add_newline(2);
2299  printer->add_line("/** initialize channel */");
2300 
2301  print_global_function_common_code(BlockType::Initial);
2302  if (info.derivimplicit_used()) {
2303  printer->add_newline();
2304  int nequation = info.num_equations;
2305  int list_num = info.derivimplicit_list_num;
2306  // clang-format off
2307  printer->fmt_line("int& deriv_advance_flag = *deriv{}_advance(thread);", list_num);
2308  printer->add_line("deriv_advance_flag = 0;");
2309  print_deriv_advance_flag_transfer_to_device();
2310  printer->fmt_line("auto ns = newtonspace{}(thread);", list_num);
2311  printer->fmt_line("auto& th = thread[dith{}()];", list_num);
2312  printer->push_block("if (*ns == nullptr)");
2313  printer->fmt_line("int vec_size = 2*{}*pnodecount*sizeof(double);", nequation);
2314  printer->fmt_line("double* vec = makevector(vec_size);", nequation);
2315  printer->fmt_line("th.pval = vec;", list_num);
2316  printer->fmt_line("*ns = nrn_cons_newtonspace({}, pnodecount);", nequation);
2317  print_newtonspace_transfer_to_device();
2318  printer->pop_block();
2319  // clang-format on
2320  }
2321 
2322  // update global variable as those might be updated via python/hoc API
2323  // NOTE: CoreNEURON has enough information to do this on its own, which
2324  // would be neater.
2325  print_global_variable_device_update_annotation();
2326 
2327  if (skip_init_check) {
2328  printer->push_block("if (_nrn_skip_initmodel == 0)");
2329  }
2330 
2331  if (!info.changed_dt.empty()) {
2332  printer->fmt_line("double _save_prev_dt = {};",
2333  get_variable_name(naming::NTHREAD_DT_VARIABLE));
2334  printer->fmt_line("{} = {};",
2335  get_variable_name(naming::NTHREAD_DT_VARIABLE),
2336  info.changed_dt);
2337  print_dt_update_to_device();
2338  }
2339 
2340  print_channel_iteration_block_parallel_hint(BlockType::Initial, info.initial_node);
2341  printer->push_block("for (int id = 0; id < nodecount; id++)");
2342 
2343  if (info.net_receive_node != nullptr) {
2344  printer->fmt_line("{} = -1e20;", get_variable_name("tsave"));
2345  }
2346 
2347  print_initial_block(info.initial_node);
2348  printer->pop_block();
2349 
2350  if (!info.changed_dt.empty()) {
2351  printer->fmt_line("{} = _save_prev_dt;", get_variable_name(naming::NTHREAD_DT_VARIABLE));
2352  print_dt_update_to_device();
2353  }
2354 
2355  printer->pop_block();
2356 
2357  if (info.derivimplicit_used()) {
2358  printer->add_line("deriv_advance_flag = 1;");
2359  print_deriv_advance_flag_transfer_to_device();
2360  }
2361 
2362  if (info.net_send_used && !info.artificial_cell) {
2363  print_send_event_move();
2364  }
2365 
2366  print_kernel_data_present_annotation_block_end();
2367  if (skip_init_check) {
2368  printer->pop_block();
2369  }
2370 }
2371 
2373  size_t block_id) {
2374  std::string ba_type;
2375  std::shared_ptr<ast::BABlock> ba_block;
2376 
2377  if (node->is_before_block()) {
2378  ba_block = dynamic_cast<const ast::BeforeBlock*>(node)->get_bablock();
2379  ba_type = "BEFORE";
2380  } else {
2381  ba_block = dynamic_cast<const ast::AfterBlock*>(node)->get_bablock();
2382  ba_type = "AFTER";
2383  }
2384 
2385  std::string ba_block_type = ba_block->get_type()->eval();
2386 
2387  /// name of the before/after function
2388  std::string function_name = method_name(fmt::format("nrn_before_after_{}", block_id));
2389 
2390  /// print common function code like init/state/current
2391  printer->add_newline(2);
2392  printer->fmt_line("/** {} of block type {} # {} */", ba_type, ba_block_type, block_id);
2393  print_global_function_common_code(BlockType::BeforeAfter, function_name);
2394 
2395  print_channel_iteration_block_parallel_hint(BlockType::BeforeAfter, node);
2396  printer->push_block("for (int id = 0; id < nodecount; id++)");
2397 
2398  printer->add_line("int node_id = node_index[id];");
2399  printer->add_line("double v = voltage[node_id];");
2400  print_v_unused();
2401 
2402  // read ion statements
2403  const auto& read_statements = ion_read_statements(BlockType::Equation);
2404  for (auto& statement: read_statements) {
2405  printer->add_line(statement);
2406  }
2407 
2408  /// print main body
2409  printer->add_indent();
2410  print_statement_block(*ba_block->get_statement_block());
2411  printer->add_newline();
2412 
2413  // write ion statements
2414  const auto& write_statements = ion_write_statements(BlockType::Equation);
2415  for (auto& statement: write_statements) {
2416  auto text = process_shadow_update_statement(statement, BlockType::Equation);
2417  printer->add_line(text);
2418  }
2419 
2420  /// loop end including data annotation block
2421  printer->pop_block();
2422  printer->pop_block();
2423  print_kernel_data_present_annotation_block_end();
2424 }
2425 
2427  printer->add_newline(2);
2428  print_global_function_common_code(BlockType::Constructor);
2429  if (info.constructor_node != nullptr) {
2430  const auto& block = info.constructor_node->get_statement_block();
2431  print_statement_block(*block, false, false);
2432  }
2433  printer->add_line("#endif");
2434  printer->pop_block();
2435 }
2436 
2437 
2439  printer->add_newline(2);
2440  print_global_function_common_code(BlockType::Destructor);
2441  if (info.destructor_node != nullptr) {
2442  const auto& block = info.destructor_node->get_statement_block();
2443  print_statement_block(*block, false, false);
2444  }
2445  printer->add_line("#endif");
2446  printer->pop_block();
2447 }
2448 
2449 
2451  for (const auto& functor_name: info.functor_names) {
2452  printer->add_newline(2);
2453  print_functor_definition(*functor_name.first);
2454  }
2455 }
2456 
2457 
2459  printer->add_newline(2);
2460  auto method = method_name(naming::NRN_ALLOC_METHOD);
2461  printer->fmt_push_block("static void {}(double* data, Datum* indexes, int type)", method);
2462  printer->add_line("// do nothing");
2463  printer->pop_block();
2464 }
2465 
2466 /**
2467  * \todo Number of watch could be more than number of statements
2468  * according to grammar. Check if this is correctly handled in neuron
2469  * and coreneuron.
2470  */
2472  if (info.watch_statements.empty()) {
2473  return;
2474  }
2475 
2476  printer->add_newline(2);
2477  auto inst = fmt::format("{}* inst", instance_struct());
2478 
2479  printer->fmt_push_block(
2480  "static void nrn_watch_activate({}, int id, int pnodecount, int watch_id, "
2481  "double v, bool &watch_remove)",
2482  inst);
2483 
2484  // initialize all variables only during first watch statement
2485  printer->push_block("if (watch_remove == false)");
2486  for (int i = 0; i < info.watch_count; i++) {
2487  auto name = get_variable_name(fmt::format("watch{}", i + 1));
2488  printer->fmt_line("{} = 0;", name);
2489  }
2490  printer->add_line("watch_remove = true;");
2491  printer->pop_block();
2492 
2493  /**
2494  * \todo Similar to neuron/coreneuron we are using
2495  * first watch and ignoring rest.
2496  */
2497  for (int i = 0; i < info.watch_statements.size(); i++) {
2498  auto statement = info.watch_statements[i];
2499  printer->fmt_push_block("if (watch_id == {})", i);
2500 
2501  auto varname = get_variable_name(fmt::format("watch{}", i + 1));
2502  printer->add_indent();
2503  printer->fmt_text("{} = 2 + (", varname);
2504  auto watch = statement->get_statements().front();
2505  watch->get_expression()->visit_children(*this);
2506  printer->add_text(");");
2507  printer->add_newline();
2508 
2509  printer->pop_block();
2510  }
2511  printer->pop_block();
2512 }
2513 
2514 
2515 /**
2516  * \todo Similar to print_watch_activate, we are using only
2517  * first watch. need to verify with neuron/coreneuron about rest.
2518  */
2520  if (info.watch_statements.empty()) {
2521  return;
2522  }
2523 
2524  printer->add_newline(2);
2525  printer->add_line("/** routine to check watch activation */");
2526  print_global_function_common_code(BlockType::Watch);
2527 
2528  // WATCH statements appears in NET_RECEIVE block and while printing
2529  // net_receive function we already check if it contains any MUTEX/PROTECT
2530  // constructs. As WATCH is not a top level block but list of statements,
2531  // we don't need to have ivdep pragma related check
2532  print_channel_iteration_block_parallel_hint(BlockType::Watch, nullptr);
2533 
2534  printer->push_block("for (int id = 0; id < nodecount; id++)");
2535 
2536  if (info.is_voltage_used_by_watch_statements()) {
2537  printer->add_line("int node_id = node_index[id];");
2538  printer->add_line("double v = voltage[node_id];");
2539  print_v_unused();
2540  }
2541 
2542  // flat to make sure only one WATCH statement can be triggered at a time
2543  printer->add_line("bool watch_untriggered = true;");
2544 
2545  for (int i = 0; i < info.watch_statements.size(); i++) {
2546  auto statement = info.watch_statements[i];
2547  const auto& watch = statement->get_statements().front();
2548  const auto& varname = get_variable_name(fmt::format("watch{}", i + 1));
2549 
2550  // start block 1
2551  printer->fmt_push_block("if ({}&2 && watch_untriggered)", varname);
2552 
2553  // start block 2
2554  printer->add_indent();
2555  printer->add_text("if (");
2556  watch->get_expression()->accept(*this);
2557  printer->add_text(") {");
2558  printer->add_newline();
2559  printer->increase_indent();
2560 
2561  // start block 3
2562  printer->fmt_push_block("if (({}&1) == 0)", varname);
2563 
2564  printer->add_line("watch_untriggered = false;");
2565 
2566  const auto& tqitem = get_variable_name("tqitem");
2567  const auto& point_process = get_variable_name("point_process");
2568  printer->add_indent();
2569  printer->add_text("net_send_buffering(");
2570  const auto& t = get_variable_name("t");
2571  printer->fmt_text("nt, ml->_net_send_buffer, 0, {}, -1, {}, {}+0.0, ",
2572  tqitem,
2573  point_process,
2574  t);
2575  watch->get_value()->accept(*this);
2576  printer->add_text(");");
2577  printer->add_newline();
2578  printer->pop_block();
2579 
2580  printer->add_line(varname, " = 3;");
2581  // end block 3
2582 
2583  // start block 3
2584  printer->decrease_indent();
2585  printer->push_block("} else");
2586  printer->add_line(varname, " = 2;");
2587  printer->pop_block();
2588  // end block 3
2589 
2590  printer->pop_block();
2591  // end block 1
2592  }
2593 
2594  printer->pop_block();
2595  print_send_event_move();
2596  print_kernel_data_present_annotation_block_end();
2597  printer->pop_block();
2598 }
2599 
2600 
2602  bool need_mech_inst) {
2603  printer->add_multi_line(R"CODE(
2604  int tid = pnt->_tid;
2605  int id = pnt->_i_instance;
2606  double v = 0;
2607  )CODE");
2608 
2609  if (info.artificial_cell || node.is_initial_block()) {
2610  printer->add_line("NrnThread* nt = nrn_threads + tid;");
2611  printer->add_line("Memb_list* ml = nt->_ml_list[pnt->_type];");
2612  }
2613  if (node.is_initial_block()) {
2614  print_kernel_data_present_annotation_block_begin();
2615  }
2616 
2617  printer->add_multi_line(R"CODE(
2618  int nodecount = ml->nodecount;
2619  int pnodecount = ml->_nodecount_padded;
2620  double* data = ml->data;
2621  double* weights = nt->weights;
2622  Datum* indexes = ml->pdata;
2623  ThreadDatum* thread = ml->_thread;
2624  )CODE");
2625  if (need_mech_inst) {
2626  printer->fmt_line("auto* const inst = static_cast<{0}*>(ml->instance);", instance_struct());
2627  }
2628 
2629  if (node.is_initial_block()) {
2630  print_net_init_acc_serial_annotation_block_begin();
2631  }
2632 
2633  // rename variables but need to see if they are actually used
2634  auto parameters = info.net_receive_node->get_parameters();
2635  if (!parameters.empty()) {
2636  int i = 0;
2637  printer->add_newline();
2638  for (auto& parameter: parameters) {
2639  auto name = parameter->get_node_name();
2640  bool var_used = VarUsageVisitor().variable_used(node, "(*" + name + ")");
2641  if (var_used) {
2642  printer->fmt_line("double* {} = weights + weight_index + {};", name, i);
2643  RenameVisitor vr(name, "*" + name);
2644  node.visit_children(vr);
2645  }
2646  i++;
2647  }
2648  }
2649 }
2650 
2651 
2653  auto const& arguments = node.get_arguments();
2654  const auto& tqitem = get_variable_name("tqitem");
2655  std::string weight_index = "weight_index";
2656  std::string pnt = "pnt";
2657 
2658  // for functions not generated from NET_RECEIVE blocks (i.e. top level INITIAL block)
2659  // the weight_index argument is 0.
2660  if (!printing_net_receive && !printing_net_init) {
2661  weight_index = "0";
2662  auto var = get_variable_name("point_process");
2663  if (info.artificial_cell) {
2664  pnt = "(Point_process*)" + var;
2665  }
2666  }
2667 
2668  // artificial cells don't use spike buffering
2669  // clang-format off
2670  if (info.artificial_cell) {
2671  printer->fmt_text("artcell_net_send(&{}, {}, {}, nt->_t+", tqitem, weight_index, pnt);
2672  } else {
2673  const auto& point_process = get_variable_name("point_process");
2674  const auto& t = get_variable_name("t");
2675  printer->add_text("net_send_buffering(");
2676  printer->fmt_text("nt, ml->_net_send_buffer, 0, {}, {}, {}, {}+", tqitem, weight_index, point_process, t);
2677  }
2678  // clang-format off
2679  print_vector_elements(arguments, ", ");
2680  printer->add_text(')');
2681 }
2682 
2683 
2685  if (!printing_net_receive && !printing_net_init) {
2686  throw std::runtime_error("Error : net_move only allowed in NET_RECEIVE block");
2687  }
2688 
2689  auto const& arguments = node.get_arguments();
2690  const auto& tqitem = get_variable_name("tqitem");
2691  std::string weight_index = "-1";
2692  std::string pnt = "pnt";
2693 
2694  // artificial cells don't use spike buffering
2695  // clang-format off
2696  if (info.artificial_cell) {
2697  printer->fmt_text("artcell_net_move(&{}, {}, ", tqitem, pnt);
2698  print_vector_elements(arguments, ", ");
2699  printer->add_text(")");
2700  } else {
2701  const auto& point_process = get_variable_name("point_process");
2702  printer->add_text("net_send_buffering(");
2703  printer->fmt_text("nt, ml->_net_send_buffer, 2, {}, {}, {}, ", tqitem, weight_index, point_process);
2704  print_vector_elements(arguments, ", ");
2705  printer->add_text(", 0.0");
2706  printer->add_text(")");
2707  }
2708 }
2709 
2710 
2712  const auto& arguments = node.get_arguments();
2713  if (info.artificial_cell) {
2714  printer->add_text("net_event(pnt, ");
2715  print_vector_elements(arguments, ", ");
2716  } else {
2717  const auto& point_process = get_variable_name("point_process");
2718  printer->add_text("net_send_buffering(");
2719  printer->fmt_text("nt, ml->_net_send_buffer, 1, -1, -1, {}, ", point_process);
2720  print_vector_elements(arguments, ", ");
2721  printer->add_text(", 0.0");
2722  }
2723  printer->add_text(")");
2724 }
2725 
2726 /**
2727  * Rename arguments to NET_RECEIVE block with corresponding pointer variable
2728  *
2729  * Arguments to NET_RECEIVE block are packed and passed via weight vector. These
2730  * variables need to be replaced with corresponding pointer variable. For example,
2731  * if mod file is like
2732  *
2733  * \code{.mod}
2734  * NET_RECEIVE (weight, R){
2735  * INITIAL {
2736  * R=1
2737  * }
2738  * }
2739  * \endcode
2740  *
2741  * then generated code for initial block should be:
2742  *
2743  * \code{.cpp}
2744  * double* R = weights + weight_index + 0;
2745  * (*R) = 1.0;
2746  * \endcode
2747  *
2748  * So, the `R` in AST needs to be renamed with `(*R)`.
2749  */
2750 static void rename_net_receive_arguments(const ast::NetReceiveBlock& net_receive_node, const ast::Node& node) {
2751  const auto& parameters = net_receive_node.get_parameters();
2752  for (auto& parameter: parameters) {
2753  const auto& name = parameter->get_node_name();
2754  auto var_used = VarUsageVisitor().variable_used(node, name);
2755  if (var_used) {
2756  RenameVisitor vr(name, "(*" + name + ")");
2757  node.get_statement_block()->visit_children(vr);
2758  }
2759  }
2760 }
2761 
2762 
2764  const auto node = info.net_receive_initial_node;
2765  if (node == nullptr) {
2766  return;
2767  }
2768 
2769  // rename net_receive arguments used in the initial block of net_receive
2770  rename_net_receive_arguments(*info.net_receive_node, *node);
2771 
2772  printing_net_init = true;
2773  auto args = "Point_process* pnt, int weight_index, double flag";
2774  printer->add_newline(2);
2775  printer->add_line("/** initialize block for net receive */");
2776  printer->fmt_push_block("static void net_init({})", args);
2777  auto block = node->get_statement_block().get();
2778  if (block->get_statements().empty()) {
2779  printer->add_line("// do nothing");
2780  } else {
2781  print_net_receive_common_code(*node);
2782  print_statement_block(*block, false, false);
2783  if (node->is_initial_block()) {
2784  print_net_init_acc_serial_annotation_block_end();
2785  print_kernel_data_present_annotation_block_end();
2786  printer->add_line("auto& nsb = ml->_net_send_buffer;");
2787  print_net_send_buf_update_to_host();
2788  }
2789  }
2790  printer->pop_block();
2791  printing_net_init = false;
2792 }
2793 
2794 
2796  printer->add_newline();
2797  printer->add_line("NetSendBuffer_t* nsb = ml->_net_send_buffer;");
2798  print_net_send_buf_update_to_host();
2799  printer->push_block("for (int i=0; i < nsb->_cnt; i++)");
2800  printer->add_multi_line(R"CODE(
2801  int type = nsb->_sendtype[i];
2802  int tid = nt->id;
2803  double t = nsb->_nsb_t[i];
2804  double flag = nsb->_nsb_flag[i];
2805  int vdata_index = nsb->_vdata_index[i];
2806  int weight_index = nsb->_weight_index[i];
2807  int point_index = nsb->_pnt_index[i];
2808  net_sem_from_gpu(type, vdata_index, weight_index, tid, point_index, t, flag);
2809  )CODE");
2810  printer->pop_block();
2811  printer->add_line("nsb->_cnt = 0;");
2812  print_net_send_buf_count_update_to_device();
2813 }
2814 
2815 
2817  return fmt::format("void {}(NrnThread* nt)", method_name("net_buf_receive"));
2818 }
2819 
2820 
2822  printer->add_line("Memb_list* ml = get_memb_list(nt);");
2823  printer->push_block("if (!ml)");
2824  printer->add_line("return;");
2825  printer->pop_block();
2826  printer->add_newline();
2827 }
2828 
2829 
2831  printer->add_line("int count = nrb->_displ_cnt;");
2832  print_channel_iteration_block_parallel_hint(BlockType::NetReceive, info.net_receive_node);
2833  printer->push_block("for (int i = 0; i < count; i++)");
2834 }
2835 
2836 
2838  printer->pop_block();
2839 }
2840 
2841 
2843  if (!net_receive_required() || info.artificial_cell) {
2844  return;
2845  }
2846  printer->add_newline(2);
2847  printer->push_block(net_receive_buffering_declaration());
2848 
2849  print_get_memb_list();
2850 
2851  const auto& net_receive = method_name("net_receive_kernel");
2852 
2853  print_kernel_data_present_annotation_block_begin();
2854 
2855  printer->add_line("NetReceiveBuffer_t* nrb = ml->_net_receive_buffer;");
2856  if (need_mech_inst) {
2857  printer->fmt_line("auto* const inst = static_cast<{0}*>(ml->instance);", instance_struct());
2858  }
2859  print_net_receive_loop_begin();
2860  printer->add_line("int start = nrb->_displ[i];");
2861  printer->add_line("int end = nrb->_displ[i+1];");
2862  printer->push_block("for (int j = start; j < end; j++)");
2863  printer->add_multi_line(R"CODE(
2864  int index = nrb->_nrb_index[j];
2865  int offset = nrb->_pnt_index[index];
2866  double t = nrb->_nrb_t[index];
2867  int weight_index = nrb->_weight_index[index];
2868  double flag = nrb->_nrb_flag[index];
2869  Point_process* point_process = nt->pntprocs + offset;
2870  )CODE");
2871  printer->add_line(net_receive, "(t, point_process, inst, nt, ml, weight_index, flag);");
2872  printer->pop_block();
2873  print_net_receive_loop_end();
2874 
2875  print_device_stream_wait();
2876  printer->add_line("nrb->_displ_cnt = 0;");
2877  printer->add_line("nrb->_cnt = 0;");
2878 
2879  if (info.net_send_used || info.net_event_used) {
2880  print_send_event_move();
2881  }
2882 
2883  print_kernel_data_present_annotation_block_end();
2884  printer->pop_block();
2885 }
2886 
2887 
2889  printer->add_line("i = nsb->_cnt++;");
2890 }
2891 
2892 
2894  printer->push_block("if (i >= nsb->_size)");
2895  printer->add_line("nsb->grow();");
2896  printer->pop_block();
2897 }
2898 
2899 
2901  if (!net_send_buffer_required()) {
2902  return;
2903  }
2904 
2905  printer->add_newline(2);
2906  print_device_method_annotation();
2907  auto args =
2908  "const NrnThread* nt, NetSendBuffer_t* nsb, int type, int vdata_index, "
2909  "int weight_index, int point_index, double t, double flag";
2910  printer->fmt_push_block("static inline void net_send_buffering({})", args);
2911  printer->add_line("int i = 0;");
2912  print_net_send_buffering_cnt_update();
2913  print_net_send_buffering_grow();
2914  printer->push_block("if (i < nsb->_size)");
2915  printer->add_multi_line(R"CODE(
2916  nsb->_sendtype[i] = type;
2917  nsb->_vdata_index[i] = vdata_index;
2918  nsb->_weight_index[i] = weight_index;
2919  nsb->_pnt_index[i] = point_index;
2920  nsb->_nsb_t[i] = t;
2921  nsb->_nsb_flag[i] = flag;
2922  )CODE");
2923  printer->pop_block();
2924  printer->pop_block();
2925 }
2926 
2927 
2929  if (!net_receive_required()) {
2930  return;
2931  }
2932 
2933  printing_net_receive = true;
2934  const auto node = info.net_receive_node;
2935 
2936  // rename net_receive arguments used in the block itself
2937  rename_net_receive_arguments(*info.net_receive_node, *node);
2938 
2939  std::string name;
2940  ParamVector params;
2941  if (!info.artificial_cell) {
2942  name = method_name("net_receive_kernel");
2943  params.emplace_back("", "double", "", "t");
2944  params.emplace_back("", "Point_process*", "", "pnt");
2945  params.emplace_back("", fmt::format("{}*", instance_struct()),
2946  "", "inst");
2947  params.emplace_back("", "NrnThread*", "", "nt");
2948  params.emplace_back("", "Memb_list*", "", "ml");
2949  params.emplace_back("", "int", "", "weight_index");
2950  params.emplace_back("", "double", "", "flag");
2951  } else {
2952  name = method_name("net_receive");
2953  params.emplace_back("", "Point_process*", "", "pnt");
2954  params.emplace_back("", "int", "", "weight_index");
2955  params.emplace_back("", "double", "", "flag");
2956  }
2957 
2958  printer->add_newline(2);
2959  printer->fmt_push_block("static inline void {}({})", name, get_parameter_str(params));
2960  print_net_receive_common_code(*node, info.artificial_cell);
2961  if (info.artificial_cell) {
2962  printer->add_line("double t = nt->_t;");
2963  }
2964 
2965  // set voltage variable if it is used in the block (e.g. for WATCH statement)
2966  auto v_used = VarUsageVisitor().variable_used(*node->get_statement_block(), "v");
2967  if (v_used) {
2968  printer->add_line("int node_id = ml->nodeindices[id];");
2969  printer->add_line("v = nt->_actual_v[node_id];");
2970  }
2971 
2972  printer->fmt_line("{} = t;", get_variable_name("tsave"));
2973 
2974  if (info.is_watch_used()) {
2975  printer->add_line("bool watch_remove = false;");
2976  }
2977 
2978  printer->add_indent();
2979  node->get_statement_block()->accept(*this);
2980  printer->add_newline();
2981  printer->pop_block();
2982 
2983  printing_net_receive = false;
2984 }
2985 
2986 
2988  if (!net_receive_required()) {
2989  return;
2990  }
2991 
2992  printing_net_receive = true;
2993  if (!info.artificial_cell) {
2994  const auto& name = method_name("net_receive");
2995  ParamVector params;
2996  params.emplace_back("", "Point_process*", "", "pnt");
2997  params.emplace_back("", "int", "", "weight_index");
2998  params.emplace_back("", "double", "", "flag");
2999  printer->add_newline(2);
3000  printer->fmt_push_block("static void {}({})", name, get_parameter_str(params));
3001  printer->add_line("NrnThread* nt = nrn_threads + pnt->_tid;");
3002  printer->add_line("Memb_list* ml = get_memb_list(nt);");
3003  printer->add_line("NetReceiveBuffer_t* nrb = ml->_net_receive_buffer;");
3004  printer->push_block("if (nrb->_cnt >= nrb->_size)");
3005  printer->add_line("realloc_net_receive_buffer(nt, ml);");
3006  printer->pop_block();
3007  printer->add_multi_line(R"CODE(
3008  int id = nrb->_cnt;
3009  nrb->_pnt_index[id] = pnt-nt->pntprocs;
3010  nrb->_weight_index[id] = weight_index;
3011  nrb->_nrb_t[id] = nt->_t;
3012  nrb->_nrb_flag[id] = flag;
3013  nrb->_cnt++;
3014  )CODE");
3015  printer->pop_block();
3016  }
3017  printing_net_receive = false;
3018 }
3019 
3020 
3021 /**
3022  * \todo Data is not derived. Need to add instance into instance struct?
3023  * data used here is wrong in AoS because as in original implementation,
3024  * data is not incremented every iteration for AoS. May be better to derive
3025  * actual variable names? [resolved now?]
3026  * slist needs to added as local variable
3027  */
3029  auto ext_args = external_method_arguments();
3030  auto ext_params = external_method_parameters();
3031  auto suffix = info.mod_suffix;
3032  auto list_num = info.derivimplicit_list_num;
3033  auto block_name = block.get_node_name();
3034  auto primes_size = info.primes_size;
3035  auto stride = "*pnodecount+id";
3036 
3037  printer->add_newline(2);
3038 
3039  printer->push_block("namespace");
3040  printer->fmt_push_block("struct _newton_{}_{}", block_name, info.mod_suffix);
3041  printer->fmt_push_block("int operator()({}) const", external_method_parameters());
3042  auto const instance = fmt::format("auto* const inst = static_cast<{0}*>(ml->instance);",
3043  instance_struct());
3044  auto const slist1 = fmt::format("auto const& slist{} = {};",
3045  list_num,
3046  get_variable_name(fmt::format("slist{}", list_num)));
3047  auto const slist2 = fmt::format("auto& slist{} = {};",
3048  list_num + 1,
3049  get_variable_name(fmt::format("slist{}", list_num + 1)));
3050  auto const dlist1 = fmt::format("auto const& dlist{} = {};",
3051  list_num,
3052  get_variable_name(fmt::format("dlist{}", list_num)));
3053  auto const dlist2 = fmt::format(
3054  "double* dlist{} = static_cast<double*>(thread[dith{}()].pval) + ({}*pnodecount);",
3055  list_num + 1,
3056  list_num,
3057  info.primes_size);
3058  printer->add_line(instance);
3059  if (ion_variable_struct_required()) {
3060  print_ion_variable();
3061  }
3062  printer->fmt_line("double* savstate{} = static_cast<double*>(thread[dith{}()].pval);",
3063  list_num,
3064  list_num);
3065  printer->add_line(slist1);
3066  printer->add_line(dlist1);
3067  printer->add_line(dlist2);
3068 
3069  print_statement_block(*block.get_statement_block(), false, false);
3070 
3071  printer->add_line("int counter = -1;");
3072  printer->fmt_push_block("for (int i=0; i<{}; i++)", info.num_primes);
3073  printer->fmt_push_block("if (*deriv{}_advance(thread))", list_num);
3074  printer->fmt_line(
3075  "dlist{0}[(++counter){1}] = "
3076  "data[dlist{2}[i]{1}]-(data[slist{2}[i]{1}]-savstate{2}[i{1}])/nt->_dt;",
3077  list_num + 1,
3078  stride,
3079  list_num);
3080  printer->chain_block("else");
3081  printer->fmt_line("dlist{0}[(++counter){1}] = data[slist{2}[i]{1}]-savstate{2}[i{1}];",
3082  list_num + 1,
3083  stride,
3084  list_num);
3085  printer->pop_block();
3086  printer->pop_block();
3087  printer->add_line("return 0;");
3088  printer->pop_block(); // operator()
3089  printer->pop_block(";"); // struct
3090  printer->pop_block(); // namespace
3091  printer->add_newline();
3092  printer->fmt_push_block("int {}_{}({})", block_name, suffix, ext_params);
3093  printer->add_line(instance);
3094  printer->fmt_line("double* savstate{} = (double*) thread[dith{}()].pval;", list_num, list_num);
3095  printer->add_line(slist1);
3096  printer->add_line(slist2);
3097  printer->add_line(dlist2);
3098  printer->fmt_push_block("for (int i=0; i<{}; i++)", info.num_primes);
3099  printer->fmt_line("savstate{}[i{}] = data[slist{}[i]{}];", list_num, stride, list_num, stride);
3100  printer->pop_block();
3101  printer->fmt_line(
3102  "int reset = nrn_newton_thread(static_cast<NewtonSpace*>(*newtonspace{}(thread)), {}, "
3103  "slist{}, _newton_{}_{}{{}}, dlist{}, {});",
3104  list_num,
3105  primes_size,
3106  list_num + 1,
3107  block_name,
3108  suffix,
3109  list_num + 1,
3110  ext_args);
3111  printer->add_line("return reset;");
3112  printer->pop_block();
3113  printer->add_newline(2);
3114 }
3115 
3116 
3118  // nothing to do on cpu
3119 }
3120 
3121 
3122 /****************************************************************************************/
3123 /* Print nrn_state routine */
3124 /****************************************************************************************/
3125 
3126 
3128  if (!nrn_state_required()) {
3129  return;
3130  }
3131 
3132  printer->add_newline(2);
3133  printer->add_line("/** update state */");
3134  print_global_function_common_code(BlockType::State);
3135  print_channel_iteration_block_parallel_hint(BlockType::State, info.nrn_state_block);
3136  printer->push_block("for (int id = 0; id < nodecount; id++)");
3137 
3138  printer->add_line("int node_id = node_index[id];");
3139  printer->add_line("double v = voltage[node_id];");
3140  print_v_unused();
3141 
3142  /**
3143  * \todo Eigen solver node also emits IonCurVar variable in the functor
3144  * but that shouldn't update ions in derivative block
3145  */
3146  if (ion_variable_struct_required()) {
3147  print_ion_variable();
3148  }
3149 
3150  auto read_statements = ion_read_statements(BlockType::State);
3151  for (auto& statement: read_statements) {
3152  printer->add_line(statement);
3153  }
3154 
3155  if (info.nrn_state_block) {
3156  info.nrn_state_block->visit_children(*this);
3157  }
3158 
3159  if (info.currents.empty() && info.breakpoint_node != nullptr) {
3160  auto block = info.breakpoint_node->get_statement_block();
3161  print_statement_block(*block, false, false);
3162  }
3163 
3164  const auto& write_statements = ion_write_statements(BlockType::State);
3165  for (auto& statement: write_statements) {
3166  const auto& text = process_shadow_update_statement(statement, BlockType::State);
3167  printer->add_line(text);
3168  }
3169  printer->pop_block();
3170 
3171  print_kernel_data_present_annotation_block_end();
3172 
3173  printer->pop_block();
3174 }
3175 
3176 
3177 /****************************************************************************************/
3178 /* Print nrn_cur related routines */
3179 /****************************************************************************************/
3180 
3181 
3183  const auto& args = internal_method_parameters();
3184  const auto& block = node.get_statement_block();
3185  printer->add_newline(2);
3186  print_device_method_annotation();
3187  printer->fmt_push_block("inline double nrn_current_{}({})",
3188  info.mod_suffix,
3189  get_parameter_str(args));
3190  printer->add_line("double current = 0.0;");
3191  print_statement_block(*block, false, false);
3192  for (auto& current: info.currents) {
3193  const auto& name = get_variable_name(current);
3194  printer->fmt_line("current += {};", name);
3195  }
3196  printer->add_line("return current;");
3197  printer->pop_block();
3198 }
3199 
3200 
3202  const auto& block = node.get_statement_block();
3203  print_statement_block(*block, false, false);
3204  if (!info.currents.empty()) {
3205  std::string sum;
3206  for (const auto& current: info.currents) {
3207  auto var = breakpoint_current(current);
3208  sum += get_variable_name(var);
3209  if (&current != &info.currents.back()) {
3210  sum += "+";
3211  }
3212  }
3213  printer->fmt_line("double rhs = {};", sum);
3214  }
3215 
3216  std::string sum;
3217  for (const auto& conductance: info.conductances) {
3218  auto var = breakpoint_current(conductance.variable);
3219  sum += get_variable_name(var);
3220  if (&conductance != &info.conductances.back()) {
3221  sum += "+";
3222  }
3223  }
3224  printer->fmt_line("double g = {};", sum);
3225 
3226  for (const auto& conductance: info.conductances) {
3227  if (!conductance.ion.empty()) {
3228  const auto& lhs = std::string(naming::ION_VARNAME_PREFIX) + "di" + conductance.ion + "dv";
3229  const auto& rhs = get_variable_name(conductance.variable);
3230  const ShadowUseStatement statement{lhs, "+=", rhs};
3231  const auto& text = process_shadow_update_statement(statement, BlockType::Equation);
3232  printer->add_line(text);
3233  }
3234  }
3235 }
3236 
3237 
3239  printer->fmt_line("double g = nrn_current_{}({}+0.001);",
3240  info.mod_suffix,
3241  internal_method_arguments());
3242  for (auto& ion: info.ions) {
3243  for (auto& var: ion.writes) {
3244  if (ion.is_ionic_current(var)) {
3245  const auto& name = get_variable_name(var);
3246  printer->fmt_line("double di{} = {};", ion.name, name);
3247  }
3248  }
3249  }
3250  printer->fmt_line("double rhs = nrn_current_{}({});",
3251  info.mod_suffix,
3252  internal_method_arguments());
3253  printer->add_line("g = (g-rhs)/0.001;");
3254  for (auto& ion: info.ions) {
3255  for (auto& var: ion.writes) {
3256  if (ion.is_ionic_current(var)) {
3257  const auto& lhs = std::string(naming::ION_VARNAME_PREFIX) + "di" + ion.name + "dv";
3258  auto rhs = fmt::format("(di{}-{})/0.001", ion.name, get_variable_name(var));
3259  if (info.point_process) {
3260  auto area = get_variable_name(naming::NODE_AREA_VARIABLE);
3261  rhs += fmt::format("*1.e2/{}", area);
3262  }
3263  const ShadowUseStatement statement{lhs, "+=", rhs};
3264  const auto& text = process_shadow_update_statement(statement, BlockType::Equation);
3265  printer->add_line(text);
3266  }
3267  }
3268  }
3269 }
3270 
3271 
3273  printer->add_line("int node_id = node_index[id];");
3274  printer->add_line("double v = voltage[node_id];");
3275  print_v_unused();
3276  if (ion_variable_struct_required()) {
3277  print_ion_variable();
3278  }
3279 
3280  const auto& read_statements = ion_read_statements(BlockType::Equation);
3281  for (auto& statement: read_statements) {
3282  printer->add_line(statement);
3283  }
3284 
3285  if (info.conductances.empty()) {
3286  print_nrn_cur_non_conductance_kernel();
3287  } else {
3288  print_nrn_cur_conductance_kernel(node);
3289  }
3290 
3291  const auto& write_statements = ion_write_statements(BlockType::Equation);
3292  for (auto& statement: write_statements) {
3293  auto text = process_shadow_update_statement(statement, BlockType::Equation);
3294  printer->add_line(text);
3295  }
3296 
3297  if (info.point_process) {
3298  const auto& area = get_variable_name(naming::NODE_AREA_VARIABLE);
3299  printer->fmt_line("double mfactor = 1.e2/{};", area);
3300  printer->add_line("g = g*mfactor;");
3301  printer->add_line("rhs = rhs*mfactor;");
3302  }
3303 
3304  print_g_unused();
3305 }
3306 
3307 
3309  if (!info.electrode_current) {
3310  return;
3311  }
3312  std::string rhs, d;
3313  auto rhs_op = operator_for_rhs();
3314  auto d_op = operator_for_d();
3315  if (info.point_process) {
3316  rhs = "shadow_rhs[id]";
3317  d = "shadow_d[id]";
3318  } else {
3319  rhs = "rhs";
3320  d = "g";
3321  }
3322 
3323  printer->push_block("if (nt->nrn_fast_imem)");
3324  if (nrn_cur_reduction_loop_required()) {
3325  printer->push_block("for (int id = 0; id < nodecount; id++)");
3326  printer->add_line("int node_id = node_index[id];");
3327  }
3328  printer->fmt_line("nt->nrn_fast_imem->nrn_sav_rhs[node_id] {} {};", rhs_op, rhs);
3329  printer->fmt_line("nt->nrn_fast_imem->nrn_sav_d[node_id] {} {};", d_op, d);
3330  if (nrn_cur_reduction_loop_required()) {
3331  printer->pop_block();
3332  }
3333  printer->pop_block();
3334 }
3335 
3336 
3338  if (!nrn_cur_required()) {
3339  return;
3340  }
3341 
3342  if (info.conductances.empty()) {
3343  print_nrn_current(*info.breakpoint_node);
3344  }
3345 
3346  printer->add_newline(2);
3347  printer->add_line("/** update current */");
3348  print_global_function_common_code(BlockType::Equation);
3349  print_channel_iteration_block_parallel_hint(BlockType::Equation, info.breakpoint_node);
3350  printer->push_block("for (int id = 0; id < nodecount; id++)");
3351  print_nrn_cur_kernel(*info.breakpoint_node);
3352  print_nrn_cur_matrix_shadow_update();
3353  if (!nrn_cur_reduction_loop_required()) {
3354  print_fast_imem_calculation();
3355  }
3356  printer->pop_block();
3357 
3358  if (nrn_cur_reduction_loop_required()) {
3359  printer->push_block("for (int id = 0; id < nodecount; id++)");
3360  print_nrn_cur_matrix_shadow_reduction();
3361  printer->pop_block();
3362  print_fast_imem_calculation();
3363  }
3364 
3365  print_kernel_data_present_annotation_block_end();
3366  printer->pop_block();
3367 }
3368 
3369 
3370 /****************************************************************************************/
3371 /* Main code printing entry points */
3372 /****************************************************************************************/
3373 
3375  print_standard_includes();
3376  print_backend_includes();
3377  print_coreneuron_includes();
3378 }
3379 
3380 
3382  print_namespace_start();
3383  print_backend_namespace_start();
3384 }
3385 
3386 
3388  print_backend_namespace_stop();
3389  print_namespace_stop();
3390 }
3391 
3392 
3394  print_first_pointer_var_index_getter();
3395  print_first_random_var_index_getter();
3396  print_net_receive_arg_size_getter();
3397  print_thread_getters();
3398  print_num_variable_getter();
3399  print_mech_type_getter();
3400  print_memb_list_getter();
3401 }
3402 
3403 
3405  print_mechanism_global_var_structure(print_initializers);
3406  print_mechanism_range_var_structure(print_initializers);
3407  print_ion_var_structure();
3408 }
3409 
3410 
3412  if (!info.vectorize) {
3413  return;
3414  }
3415  printer->add_multi_line(R"CODE(
3416  #if NRN_PRCELLSTATE
3417  inst->v_unused[id] = v;
3418  #endif
3419  )CODE");
3420 }
3421 
3422 
3424  printer->add_multi_line(R"CODE(
3425  #if NRN_PRCELLSTATE
3426  inst->g_unused[id] = g;
3427  #endif
3428  )CODE");
3429 }
3430 
3431 
3433  print_top_verbatim_blocks();
3434  for (const auto& procedure: info.procedures) {
3435  print_procedure(*procedure);
3436  }
3437  for (const auto& function: info.functions) {
3438  print_function(*function);
3439  }
3440  for (const auto& function: info.function_tables) {
3441  print_function_tables(*function);
3442  }
3443  for (size_t i = 0; i < info.before_after_blocks.size(); i++) {
3444  print_before_after_block(info.before_after_blocks[i], i);
3445  }
3446  for (const auto& callback: info.derivimplicit_callbacks) {
3447  const auto& block = *callback->get_node_to_solve();
3448  print_derivimplicit_kernel(block);
3449  }
3450  print_net_send_buffering();
3451  print_net_init();
3452  print_watch_activate();
3453  print_watch_check();
3454  print_net_receive_kernel();
3455  print_net_receive();
3456  print_net_receive_buffering();
3457  print_nrn_init();
3458  print_nrn_cur();
3459  print_nrn_state();
3460 }
3461 
3462 
3464  print_backend_info();
3465  print_headers_include();
3466  print_namespace_begin();
3467  print_nmodl_constants();
3468  print_prcellstate_macros();
3469  print_mechanism_info();
3470  print_data_structures(true);
3471  print_global_variables_for_hoc();
3472  print_common_getters();
3473  print_memory_allocation_routine();
3474  print_abort_routine();
3475  print_thread_memory_callbacks();
3476  print_instance_variable_setup();
3477  print_nrn_alloc();
3478  print_nrn_constructor();
3479  print_nrn_destructor();
3480  print_function_prototypes();
3481  print_functors_definitions();
3482  print_compute_functions();
3483  print_check_table_thread_function();
3484  print_mechanism_register();
3485  print_namespace_end();
3486 }
3487 
3488 
3489 /****************************************************************************************/
3490 /* Overloaded visitor routines */
3491 /****************************************************************************************/
3492 
3493 
3495  printer->fmt_line("{}_{}({});",
3496  node.get_node_to_solve()->get_node_name(),
3497  info.mod_suffix,
3498  external_method_arguments());
3499 }
3500 
3501 
3503  const ast::EigenNewtonSolverBlock& node) {
3504  // solution vector to store copy of state vars for Newton solver
3505  printer->add_newline();
3506 
3507  auto float_type = default_float_data_type();
3508  int N = node.get_n_state_vars()->get_value();
3509  printer->fmt_line("Eigen::Matrix<{}, {}, 1> nmodl_eigen_xm;", float_type, N);
3510  printer->fmt_line("{}* nmodl_eigen_x = nmodl_eigen_xm.data();", float_type);
3511 
3512  print_statement_block(*node.get_setup_x_block(), false, false);
3513 
3514  // call newton solver with functor and X matrix that contains state vars
3515  printer->add_line("// call newton solver");
3516  printer->fmt_line("{} newton_functor(nt, inst, id, pnodecount, v, indexes, data, thread);",
3517  info.functor_names[&node]);
3518  printer->add_line("newton_functor.initialize();");
3519  printer->add_line(
3520  "int newton_iterations = nmodl::newton::newton_solver(nmodl_eigen_xm, newton_functor);");
3521  printer->add_line(
3522  "if (newton_iterations < 0) assert(false && \"Newton solver did not converge!\");");
3523 
3524  // assign newton solver results in matrix X to state vars
3525  print_statement_block(*node.get_update_states_block(), false, false);
3526  printer->add_line("newton_functor.finalize();");
3527 }
3528 
3529 
3531  const ast::EigenLinearSolverBlock& node) {
3532  printer->add_newline();
3533 
3534  const std::string float_type = default_float_data_type();
3535  int N = node.get_n_state_vars()->get_value();
3536  printer->fmt_line("Eigen::Matrix<{0}, {1}, 1> nmodl_eigen_xm, nmodl_eigen_fm;", float_type, N);
3537  printer->fmt_line("Eigen::Matrix<{0}, {1}, {1}> nmodl_eigen_jm;", float_type, N);
3538  if (N <= 4)
3539  printer->fmt_line("Eigen::Matrix<{0}, {1}, {1}> nmodl_eigen_jm_inv;", float_type, N);
3540  printer->fmt_line("{}* nmodl_eigen_x = nmodl_eigen_xm.data();", float_type);
3541  printer->fmt_line("{}* nmodl_eigen_j = nmodl_eigen_jm.data();", float_type);
3542  printer->fmt_line("{}* nmodl_eigen_f = nmodl_eigen_fm.data();", float_type);
3543  print_statement_block(*node.get_variable_block(), false, false);
3544  print_statement_block(*node.get_initialize_block(), false, false);
3545  print_statement_block(*node.get_setup_x_block(), false, false);
3546 
3547  printer->add_newline();
3548  print_eigen_linear_solver(float_type, N);
3549  printer->add_newline();
3550 
3551  print_statement_block(*node.get_update_states_block(), false, false);
3552  print_statement_block(*node.get_finalize_block(), false, false);
3553 }
3554 
3555 
3557  // For_netcon should take the same arguments as net_receive and apply the operations
3558  // in the block to the weights of the netcons. Since all the weights are on the same vector,
3559  // weights, we have a mask of operations that we apply iteratively, advancing the offset
3560  // to the next netcon.
3561  const auto& args = node.get_parameters();
3562  RenameVisitor v;
3563  const auto& statement_block = node.get_statement_block();
3564  for (size_t i_arg = 0; i_arg < args.size(); ++i_arg) {
3565  // sanitize node_name since we want to substitute names like (*w) as they are
3566  auto old_name =
3567  std::regex_replace(args[i_arg]->get_node_name(), regex_special_chars, R"(\$&)");
3568  const auto& new_name = fmt::format("weights[{} + nt->_fornetcon_weight_perm[i]]", i_arg);
3569  v.set(old_name, new_name);
3570  statement_block->accept(v);
3571  }
3572 
3573  const auto index =
3574  std::find_if(info.semantics.begin(), info.semantics.end(), [](const IndexSemantics& a) {
3575  return a.name == naming::FOR_NETCON_SEMANTIC;
3576  })->index;
3577 
3578  printer->fmt_text("const size_t offset = {}*pnodecount + id;", index);
3579  printer->add_newline();
3580  printer->add_line(
3581  "const size_t for_netcon_start = nt->_fornetcon_perm_indices[indexes[offset]];");
3582  printer->add_line(
3583  "const size_t for_netcon_end = nt->_fornetcon_perm_indices[indexes[offset] + 1];");
3584 
3585  printer->add_line("for (auto i = for_netcon_start; i < for_netcon_end; ++i) {");
3586  printer->increase_indent();
3587  print_statement_block(*statement_block, false, false);
3588  printer->decrease_indent();
3589 
3590  printer->add_line("}");
3591 }
3592 
3593 
3595  printer->add_text(fmt::format("nrn_watch_activate(inst, id, pnodecount, {}, v, watch_remove)",
3596  current_watch_statement++));
3597 }
3598 
3599 } // namespace codegen
3600 } // namespace nmodl
nmodl::codegen::get_table_statement
static const TableStatement * get_table_statement(const ast::Block &node)
Definition: codegen_coreneuron_cpp_visitor.cpp:409
nmodl::codegen::CodegenCoreneuronCppVisitor::print_procedure
virtual void print_procedure(const ast::ProcedureBlock &node) override
Print NMODL procedure in target backend code.
Definition: codegen_coreneuron_cpp_visitor.cpp:703
nmodl::codegen::CodegenCoreneuronCppVisitor::print_device_method_annotation
virtual void print_device_method_annotation()
Print the backend specific device method annotation.
Definition: codegen_coreneuron_cpp_visitor.cpp:306
nmodl::ast::BeforeBlock
Represents a BEFORE block in NMODL.
Definition: before_block.hpp:38
nmodl::codegen::CodegenCoreneuronCppVisitor::print_coreneuron_includes
void print_coreneuron_includes()
Print includes from coreneuron.
Definition: codegen_coreneuron_cpp_visitor.cpp:1378
nmodl::codegen::CodegenCoreneuronCppVisitor::net_receive_buffering_declaration
virtual std::string net_receive_buffering_declaration()
Generate the target backend code for the net_receive_buffering function delcaration.
Definition: codegen_coreneuron_cpp_visitor.cpp:2816
nmodl::ast::DerivimplicitCallback::get_node_to_solve
std::shared_ptr< Block > get_node_to_solve() const noexcept
Getter for member variable DerivimplicitCallback::node_to_solve.
Definition: derivimplicit_callback.hpp:143
nmodl::codegen::CodegenCoreneuronCppVisitor::nrn_cur_reduction_loop_required
virtual bool nrn_cur_reduction_loop_required()
Check if reduction block in nrn_cur required.
Definition: codegen_coreneuron_cpp_visitor.cpp:259
nmodl::codegen::CodegenCoreneuronCppVisitor::print_backend_namespace_start
virtual void print_backend_namespace_start()
Prints the start of namespace for the backend-specific code.
Definition: codegen_coreneuron_cpp_visitor.cpp:316
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_move_call
void print_net_move_call(const ast::FunctionCall &node) override
Print call to net_move.
Definition: codegen_coreneuron_cpp_visitor.cpp:2684
nmodl::ast::Node
Base class for all AST node.
Definition: node.hpp:40
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mechanism_range_var_structure
void print_mechanism_range_var_structure(bool print_initializers) override
Print the structure that wraps all range and int variables required for the NMODL.
Definition: codegen_coreneuron_cpp_visitor.cpp:1937
nmodl::codegen::IndexVariableInfo::is_index
bool is_index
if this is pure index (e.g.
Definition: codegen_cpp_visitor.hpp:121
nmodl::codegen::naming::CELSIUS_VARIABLE
static constexpr char CELSIUS_VARIABLE[]
global temperature variable
Definition: codegen_naming.hpp:90
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buf_count_update_to_host
virtual void print_net_send_buf_count_update_to_host() const
Print the code to update NetSendBuffer_t count from device to host.
Definition: codegen_coreneuron_cpp_visitor.cpp:165
nmodl::ast::BATYPE_STEP
@ BATYPE_STEP
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_prototypes
void print_function_prototypes() override
Print function and procedures prototype declaration.
Definition: codegen_coreneuron_cpp_visitor.cpp:390
nmodl::codegen::CodegenCoreneuronCppVisitor::print_functors_definitions
void print_functors_definitions()
Go through the map of EigenNewtonSolverBlock s and their corresponding functor names and print the fu...
Definition: codegen_coreneuron_cpp_visitor.cpp:2450
nmodl::codegen::CodegenCoreneuronCppVisitor::print_namespace_stop
void print_namespace_stop() override
Prints the end of the coreneuron namespace.
Definition: codegen_coreneuron_cpp_visitor.cpp:1153
nmodl::codegen::IndexVariableInfo
Helper to represent information about index/int variables.
Definition: codegen_cpp_visitor.hpp:111
nmodl::codegen::CodegenCoreneuronCppVisitor::print_v_unused
void print_v_unused() const override
Set v_unused (voltage) for NRN_PRCELLSTATE feature.
Definition: codegen_coreneuron_cpp_visitor.cpp:3411
nmodl::visitor::DUState
DUState
Represent a state in Def-Use chain.
Definition: defuse_analyze_visitor.hpp:28
nmodl::codegen::CodegenCoreneuronCppVisitor::print_ion_variable
virtual void print_ion_variable()
Print the ion variable struct.
Definition: codegen_coreneuron_cpp_visitor.cpp:2026
nmodl::codegen::CodegenCoreneuronCppVisitor::print_derivimplicit_kernel
void print_derivimplicit_kernel(const ast::Block &block)
Print derivative kernel when derivimplicit method is used.
Definition: codegen_coreneuron_cpp_visitor.cpp:3028
nmodl::codegen::CodegenCoreneuronCppVisitor::print_device_stream_wait
virtual void print_device_stream_wait() const
Print the code to synchronise/wait on stream specific to NrnThread.
Definition: codegen_coreneuron_cpp_visitor.cpp:185
nmodl::codegen::BlockType::Destructor
@ Destructor
destructor block
nmodl::ast::AstNodeType::MUTEX_UNLOCK
@ MUTEX_UNLOCK
type of ast::MutexUnlock
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buf_count_update_to_device
virtual void print_net_send_buf_count_update_to_device() const
Print the code to update NetSendBuffer_t count from host to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:175
nmodl::ast::BAType
BAType
enum type to distinguish BEFORE or AFTER blocks
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_functor_definition
void print_functor_definition(const ast::EigenNewtonSolverBlock &node)
Based on the EigenNewtonSolverBlock passed print the definition needed for its functor.
Definition: codegen_coreneuron_cpp_visitor.cpp:802
nmodl::codegen::CodegenCoreneuronCppVisitor::global_variable_name
std::string global_variable_name(const SymbolType &symbol, bool use_instance=true) const override
Determine the variable name for a global variable given its symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:1264
nmodl::ast::BATYPE_INITIAL
@ BATYPE_INITIAL
Definition: ast_common.hpp:80
nmodl::ast::FunctionBlock
TODO.
Definition: function_block.hpp:39
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_eigen_linear_solver_block
void visit_eigen_linear_solver_block(const ast::EigenLinearSolverBlock &node) override
visit node of type ast::EigenLinearSolverBlock
Definition: codegen_coreneuron_cpp_visitor.cpp:3530
nmodl::codegen::CodegenCoreneuronCppVisitor::print_newtonspace_transfer_to_device
virtual void print_newtonspace_transfer_to_device() const
Print code block to transfer newtonspace structure to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:3117
nmodl::codegen::naming::NRN_PRIVATE_DESTRUCTOR_METHOD
constexpr char NRN_PRIVATE_DESTRUCTOR_METHOD[]
nrn_private_destructor method in generated code
Definition: codegen_naming.hpp:147
nmodl::codegen::CodegenCoreneuronCppVisitor::print_namespace_end
void print_namespace_end() override
Print end of namespaces.
Definition: codegen_coreneuron_cpp_visitor.cpp:3387
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_call
void print_net_send_call(const ast::FunctionCall &node) override
Print call to net_send.
Definition: codegen_coreneuron_cpp_visitor.cpp:2652
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_loop_begin
virtual void print_net_receive_loop_begin()
Print the code for the main net_receive loop.
Definition: codegen_coreneuron_cpp_visitor.cpp:2830
nmodl::ast::FunctionTableBlock::get_parameters
const ArgumentVector & get_parameters() const noexcept override
Getter for member variable FunctionTableBlock::parameters.
Definition: function_table_block.hpp:199
nmodl::ast::NetReceiveBlock
TODO.
Definition: net_receive_block.hpp:39
nmodl::codegen::naming::NTHREAD_D_SHADOW
static constexpr char NTHREAD_D_SHADOW[]
shadow d variable in neuron thread structure
Definition: codegen_naming.hpp:87
nmodl::codegen::CodegenCppVisitor::SymbolType
std::shared_ptr< symtab::Symbol > SymbolType
Definition: codegen_cpp_visitor.hpp:238
nmodl::codegen::CodegenCoreneuronCppVisitor::print_kernel_data_present_annotation_block_begin
virtual void print_kernel_data_present_annotation_block_begin()
Print accelerator annotations indicating data presence on device.
Definition: codegen_coreneuron_cpp_visitor.cpp:204
nmodl::ast::FunctionTableBlock
TODO.
Definition: function_table_block.hpp:39
nmodl::codegen::CodegenCoreneuronCppVisitor::print_backend_info
void print_backend_info() override
Print top file header printed in generated code.
Definition: codegen_coreneuron_cpp_visitor.cpp:1347
nmodl::codegen::CodegenCoreneuronCppVisitor::print_g_unused
void print_g_unused() const override
Set g_unused (conductance) for NRN_PRCELLSTATE feature.
Definition: codegen_coreneuron_cpp_visitor.cpp:3423
nmodl::ast::NetReceiveBlock::get_parameters
const ArgumentVector & get_parameters() const noexcept override
Getter for member variable NetReceiveBlock::parameters.
Definition: net_receive_block.hpp:176
nmodl::ast::AstNodeType::MUTEX_LOCK
@ MUTEX_LOCK
type of ast::MutexLock
nmodl::codegen::CodegenCoreneuronCppVisitor::print_common_getters
void print_common_getters()
Print common getters.
Definition: codegen_coreneuron_cpp_visitor.cpp:3393
nmodl::codegen::CodegenCoreneuronCppVisitor::int_variable_name
std::string int_variable_name(const IndexVariableInfo &symbol, const std::string &name, bool use_instance) const override
Determine the name of an int variable given its symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:1238
nmodl::codegen::CodegenCoreneuronCppVisitor::check_if_var_is_array
std::tuple< bool, int > check_if_var_is_array(const std::string &name)
Check if the given name exist in the symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:424
nmodl::codegen::CodegenCoreneuronCppVisitor::print_initial_block
void print_initial_block(const ast::InitialBlock *node)
Print initial block statements.
Definition: codegen_coreneuron_cpp_visitor.cpp:2196
nmodl::ast::EigenNewtonSolverBlock::get_initialize_block
std::shared_ptr< StatementBlock > get_initialize_block() const noexcept
Getter for member variable EigenNewtonSolverBlock::initialize_block.
Definition: eigen_newton_solver_block.hpp:203
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_kernel
void print_net_receive_kernel()
Print net_receive kernel function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2928
nmodl::codegen::CodegenCoreneuronCppVisitor::print_thread_getters
void print_thread_getters()
Print the getter method for thread variables and ids.
Definition: codegen_coreneuron_cpp_visitor.cpp:1169
nmodl::codegen::CodegenCoreneuronCppVisitor::simulator_name
std::string simulator_name() override
Name of the simulator the code was generated for.
Definition: codegen_coreneuron_cpp_visitor.cpp:56
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buffering
void print_net_send_buffering()
Print kernel for buffering net_send events.
Definition: codegen_coreneuron_cpp_visitor.cpp:2900
nmodl::codegen::get_register_type_for_ba_block
static std::string get_register_type_for_ba_block(const ast::Block *block)
Return registration type for a given BEFORE/AFTER block /param block A BEFORE/AFTER block being regis...
Definition: codegen_coreneuron_cpp_visitor.cpp:1682
nmodl
encapsulates code generation backend implementations
Definition: ast_common.hpp:26
nmodl::ast::InitialBlock::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable InitialBlock::statement_block.
Definition: initial_block.hpp:184
nmodl::codegen::MemberType::index
@ index
index / int variables
nmodl::visitor::SymtabVisitor::visit_statement_block
void visit_statement_block(ast::StatementBlock &node) override
visit node of type ast::StatementBlock
Definition: symtab_visitor.cpp:99
nmodl::codegen::CodegenCoreneuronCppVisitor::print_fast_imem_calculation
virtual void print_fast_imem_calculation() override
Print fast membrane current calculation code.
Definition: codegen_coreneuron_cpp_visitor.cpp:3308
nmodl::codegen::CodegenCoreneuronCppVisitor::print_dt_update_to_device
virtual void print_dt_update_to_device() const
Print the code to update dt from host to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:180
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_init_acc_serial_annotation_block_end
virtual void print_net_init_acc_serial_annotation_block_end()
Print accelerator kernels end annotation for net_init kernel.
Definition: codegen_coreneuron_cpp_visitor.cpp:219
nmodl::ast::EigenLinearSolverBlock::get_setup_x_block
std::shared_ptr< StatementBlock > get_setup_x_block() const noexcept
Getter for member variable EigenLinearSolverBlock::setup_x_block.
Definition: eigen_linear_solver_block.hpp:210
nmodl::codegen::naming::NRN_CONSTRUCTOR_METHOD
static constexpr char NRN_CONSTRUCTOR_METHOD[]
nrn_constructor method in generated code
Definition: codegen_naming.hpp:138
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_init
void print_net_init()
Print initial block in the net receive block.
Definition: codegen_coreneuron_cpp_visitor.cpp:2763
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_event_call
void print_net_event_call(const ast::FunctionCall &node) override
Print call to net_event.
Definition: codegen_coreneuron_cpp_visitor.cpp:2711
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_destructor
void print_nrn_destructor() override
Print nrn_destructor function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2438
nmodl::codegen::CodegenCoreneuronCppVisitor::print_sdlists_init
void print_sdlists_init(bool print_initializers) override
Definition: codegen_coreneuron_cpp_visitor.cpp:1411
nmodl::ast::EigenLinearSolverBlock::get_variable_block
std::shared_ptr< StatementBlock > get_variable_block() const noexcept
Getter for member variable EigenLinearSolverBlock::variable_block.
Definition: eigen_linear_solver_block.hpp:192
nmodl::codegen::CodegenCoreneuronCppVisitor::print_headers_include
void print_headers_include() override
Print all includes.
Definition: codegen_coreneuron_cpp_visitor.cpp:3374
nmodl::codegen::CodegenCoreneuronCppVisitor::print_send_event_move
void print_send_event_move()
Print send event move block used in net receive as well as watch.
Definition: codegen_coreneuron_cpp_visitor.cpp:2795
nmodl::ast::StatementBlock::insert_statement
StatementVector::const_iterator insert_statement(StatementVector::const_iterator position, const std::shared_ptr< Statement > &n)
Insert member to statements.
Definition: ast.cpp:3130
nmodl::codegen::CodegenCoreneuronCppVisitor::print_table_replacement_function
void print_table_replacement_function(const ast::Block &node)
Print replacement function for function or procedure using table.
Definition: codegen_coreneuron_cpp_visitor.cpp:535
nmodl::codegen::naming::NTHREAD_DT_VARIABLE
static constexpr char NTHREAD_DT_VARIABLE[]
dt variable in neuron thread structure
Definition: codegen_naming.hpp:99
nmodl::ast::Ast::is_procedure_block
virtual bool is_procedure_block() const noexcept
Check if the ast node is an instance of ast::ProcedureBlock.
Definition: ast.cpp:144
token_mapping.hpp
Map different tokens from lexer to token types.
nmodl::ast::TableStatement
Represents TABLE statement in NMODL.
Definition: table_statement.hpp:39
nmodl::codegen::CodegenCoreneuronCppVisitor::print_atomic_reduction_pragma
virtual void print_atomic_reduction_pragma() override
Print atomic update pragma for reduction statements.
Definition: codegen_coreneuron_cpp_visitor.cpp:301
var_usage_visitor.hpp
Check if variable is used in given block.
string_utils.hpp
Implement string manipulation functions.
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_variables_for_hoc
void print_global_variables_for_hoc() override
Print byte arrays that register scalar and vector variables for hoc interface.
Definition: codegen_coreneuron_cpp_visitor.cpp:1625
nmodl::codegen::naming::NODE_AREA_VARIABLE
static constexpr char NODE_AREA_VARIABLE[]
inbuilt neuron variable for area of the compartment
Definition: codegen_naming.hpp:57
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_or_procedure
void print_function_or_procedure(const ast::Block &node, const std::string &name) override
Print nmodl function or procedure (common code)
Definition: codegen_coreneuron_cpp_visitor.cpp:668
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_common_code
void print_net_receive_common_code(const ast::Block &node, bool need_mech_inst=true)
Print the common code section for net receive related methods.
Definition: codegen_coreneuron_cpp_visitor.cpp:2601
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_current
void print_nrn_current(const ast::BreakpointBlock &node) override
Print the nrn_current kernel.
Definition: codegen_coreneuron_cpp_visitor.cpp:3182
nmodl::visitor::SymtabVisitor
Concrete visitor for constructing symbol table from AST.
Definition: symtab_visitor.hpp:37
nmodl::codegen::CodegenCoreneuronCppVisitor::conc_write_statement
std::string conc_write_statement(const std::string &ion_name, const std::string &concentration, int index) override
Generate Function call statement for nrn_wrote_conc.
Definition: codegen_coreneuron_cpp_visitor.cpp:1053
nmodl::ast::Ast::is_function_block
virtual bool is_function_block() const noexcept
Check if the ast node is an instance of ast::FunctionBlock.
Definition: ast.cpp:142
nmodl::ast::EigenNewtonSolverBlock::get_n_state_vars
std::shared_ptr< Integer > get_n_state_vars() const noexcept
Getter for member variable EigenNewtonSolverBlock::n_state_vars.
Definition: eigen_newton_solver_block.hpp:185
nmodl::codegen::naming::NRN_ALLOC_METHOD
static constexpr char NRN_ALLOC_METHOD[]
nrn_alloc method in generated code
Definition: codegen_naming.hpp:150
nmodl::Version::GIT_REVISION
static const std::string GIT_REVISION
git revision id
Definition: config.h:33
nmodl::codegen::IndexVariableInfo::is_integer
bool is_integer
if this is an integer (e.g.
Definition: codegen_cpp_visitor.hpp:125
nmodl::codegen::CodegenCoreneuronCppVisitor::print_first_pointer_var_index_getter
void print_first_pointer_var_index_getter()
Print the getter method for index position of first pointer variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:1080
nmodl::codegen::CodegenCoreneuronCppVisitor::print_setup_range_variable
void print_setup_range_variable()
Print the function that initialize range variable with different data type.
Definition: codegen_coreneuron_cpp_visitor.cpp:2036
nmodl::codegen::CodegenCoreneuronCppVisitor::internal_method_arguments
std::string internal_method_arguments() override
Arguments for functions that are defined and used internally.
Definition: codegen_coreneuron_cpp_visitor.cpp:911
nmodl::parser::CDriver
Class that binds all pieces together for parsing C verbatim blocks.
Definition: c11_driver.hpp:37
nmodl::visitor::DefUseAnalyzeVisitor
Visitor to return Def-Use chain for a given variable in the block/node
Definition: defuse_analyze_visitor.hpp:214
nmodl::ast::BreakpointBlock::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable BreakpointBlock::statement_block.
Definition: breakpoint_block.hpp:188
nmodl::codegen::CodegenCoreneuronCppVisitor::nrn_thread_arguments
std::string nrn_thread_arguments() const override
Arguments for "_threadargs_" macro in neuron implementation.
Definition: codegen_coreneuron_cpp_visitor.cpp:954
nmodl::ast::EigenNewtonSolverBlock
Represent newton solver solution block based on Eigen.
Definition: eigen_newton_solver_block.hpp:38
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_arg_size_getter
void print_net_receive_arg_size_getter()
Print the getter method for getting number of arguments for net_receive.
Definition: codegen_coreneuron_cpp_visitor.cpp:1113
codegen_helper_visitor.hpp
Helper visitor to gather AST information to help code generation.
nmodl::ast::EigenLinearSolverBlock::get_initialize_block
std::shared_ptr< StatementBlock > get_initialize_block() const noexcept
Getter for member variable EigenLinearSolverBlock::initialize_block.
Definition: eigen_linear_solver_block.hpp:201
nmodl::visitor::DefUseAnalyzeVisitor::analyze
DUChain analyze(const ast::Ast &node, const std::string &name)
compute def-use chain for a variable within the node
Definition: defuse_analyze_visitor.cpp:419
nmodl::ast::FunctionTableBlock::get_node_name
std::string get_node_name() const override
Return name of the node.
Definition: ast.cpp:3800
nmodl::ast::EigenLinearSolverBlock::get_update_states_block
std::shared_ptr< StatementBlock > get_update_states_block() const noexcept
Getter for member variable EigenLinearSolverBlock::update_states_block.
Definition: eigen_linear_solver_block.hpp:219
nmodl::codegen::CodegenCoreneuronCppVisitor::is_functor_const
bool is_functor_const(const ast::StatementBlock &variable_block, const ast::StatementBlock &functor_block)
Checks whether the functor_block generated by sympy solver modifies any variable outside its scope.
Definition: codegen_coreneuron_cpp_visitor.cpp:766
codegen_naming.hpp
nmodl::ast::StatementBlock::get_statements
const StatementVector & get_statements() const noexcept
Getter for member variable StatementBlock::statements.
Definition: statement_block.hpp:221
nmodl::visitor::VarUsageVisitor
Check if variable is used in given block.
Definition: var_usage_visitor.hpp:35
nmodl::ast::Block
Base class for all block scoped nodes.
Definition: block.hpp:41
nmodl::ast::InitialBlock
Represents a INITIAL block in the NMODL.
Definition: initial_block.hpp:49
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_matrix_shadow_update
virtual void print_nrn_cur_matrix_shadow_update()
Print the update to matrix elements with/without shadow vectors.
Definition: codegen_coreneuron_cpp_visitor.cpp:272
nmodl::codegen::CodegenCoreneuronCppVisitor::register_mechanism_arguments
std::string register_mechanism_arguments() const override
Arguments for register_mech or point_register_mech function.
Definition: codegen_coreneuron_cpp_visitor.cpp:1034
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_init_acc_serial_annotation_block_begin
virtual void print_net_init_acc_serial_annotation_block_begin()
Print accelerator kernels begin annotation for net_init kernel.
Definition: codegen_coreneuron_cpp_visitor.cpp:214
nmodl::ast::EigenNewtonSolverBlock::get_setup_x_block
std::shared_ptr< StatementBlock > get_setup_x_block() const noexcept
Getter for member variable EigenNewtonSolverBlock::setup_x_block.
Definition: eigen_newton_solver_block.hpp:212
nmodl::codegen::CodegenCoreneuronCppVisitor::print_deriv_advance_flag_transfer_to_device
virtual void print_deriv_advance_flag_transfer_to_device() const
Print the code to copy derivative advance flag to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:155
nmodl::codegen::CodegenCoreneuronCppVisitor::print_first_random_var_index_getter
void print_first_random_var_index_getter()
Print the getter method for index position of first RANDOM variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:1089
nmodl::stringutils::trim
static std::string trim(std::string text)
Definition: string_utils.hpp:63
nmodl::codegen::naming::NTHREAD_RHS_SHADOW
static constexpr char NTHREAD_RHS_SHADOW[]
shadow rhs variable in neuron thread structure
Definition: codegen_naming.hpp:84
visitor_utils.hpp
Utility functions for visitors implementation.
nmodl::codegen::BlockType::Equation
@ Equation
breakpoint block
nmodl::codegen::naming::USE_TABLE_VARIABLE
static constexpr char USE_TABLE_VARIABLE[]
global variable to indicate if table is used
Definition: codegen_naming.hpp:72
nmodl::ast::WatchStatement
Represent WATCH statement in NMODL.
Definition: watch_statement.hpp:39
nmodl::ast::EigenLinearSolverBlock::get_finalize_block
std::shared_ptr< StatementBlock > get_finalize_block() const noexcept
Getter for member variable EigenLinearSolverBlock::finalize_block.
Definition: eigen_linear_solver_block.hpp:228
nmodl::codegen::CodegenCoreneuronCppVisitor::print_eigen_linear_solver
virtual void print_eigen_linear_solver(const std::string &float_type, int N)
Definition: codegen_coreneuron_cpp_visitor.cpp:869
nmodl::codegen::CodegenCoreneuronCppVisitor::print_before_after_block
virtual void print_before_after_block(const ast::Block *node, size_t block_id)
Print NMODL before / after block in target backend code.
Definition: codegen_coreneuron_cpp_visitor.cpp:2372
nmodl::ast::BreakpointBlock
Represents a BREAKPOINT block in NMODL.
Definition: breakpoint_block.hpp:53
driver
nmodl::parser::UnitDriver driver
Definition: parser.cpp:28
nmodl::codegen::regex_special_chars
const std::regex regex_special_chars
Definition: codegen_cpp_visitor.cpp:610
nmodl::ast::FunctionCall
TODO.
Definition: function_call.hpp:38
codegen_coreneuron_cpp_visitor.hpp
Visitor for printing C++ code compatible with legacy api of CoreNEURON
nmodl::codegen::naming::VERBATIM_VARIABLES_MAPPING
static const std::unordered_map< std::string, std::string > VERBATIM_VARIABLES_MAPPING
commonly used variables in verbatim block and how they should be mapped to new code generation backen...
Definition: codegen_naming.hpp:180
nmodl::codegen::BlockType::Constructor
@ Constructor
constructor block
nmodl::ast::StatementBlock::get_symbol_table
symtab::SymbolTable * get_symbol_table() const override
Return associated symbol table for the current ast node.
Definition: statement_block.hpp:164
nmodl::codegen::CodegenCoreneuronCppVisitor::print_ion_var_structure
void print_ion_var_structure()
Print structure of ion variables used for local copies.
Definition: codegen_coreneuron_cpp_visitor.cpp:1979
c11_driver.hpp
nmodl::codegen::CodegenCoreneuronCppVisitor::internal_method_parameters
ParamVector internal_method_parameters() override
Parameters for internally defined functions.
Definition: codegen_coreneuron_cpp_visitor.cpp:922
nmodl::codegen::CodegenCoreneuronCppVisitor::position_of_int_var
int position_of_int_var(const std::string &name) const override
Determine the position in the data array for a given int variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:78
nmodl::codegen::CodegenCoreneuronCppVisitor::print_channel_iteration_block_parallel_hint
virtual void print_channel_iteration_block_parallel_hint(BlockType type, const ast::Block *block)
Print pragma annotations for channel iterations.
Definition: codegen_coreneuron_cpp_visitor.cpp:236
nmodl::codegen::CodegenCoreneuronCppVisitor::is_constant_variable
virtual bool is_constant_variable(const std::string &name) const
Check if variable is qualified as constant.
Definition: codegen_coreneuron_cpp_visitor.cpp:130
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_procedure_helper
void print_function_procedure_helper(const ast::Block &node) override
Common helper function to help printing function or procedure blocks.
Definition: codegen_coreneuron_cpp_visitor.cpp:689
nmodl::codegen::CodegenCoreneuronCppVisitor::external_method_parameters
const char * external_method_parameters(bool table=false) noexcept override
Parameters for functions in generated code that are called back from external code.
Definition: codegen_coreneuron_cpp_visitor.cpp:944
nmodl::ast::ForNetcon::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable ForNetcon::statement_block.
Definition: for_netcon.hpp:185
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_for_netcon
void visit_for_netcon(const ast::ForNetcon &node) override
visit node of type ast::ForNetcon
Definition: codegen_coreneuron_cpp_visitor.cpp:3556
nmodl::codegen::CodegenCoreneuronCppVisitor::print_compute_functions
virtual void print_compute_functions() override
Print all compute functions for every backend.
Definition: codegen_coreneuron_cpp_visitor.cpp:3432
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_conductance_kernel
void print_nrn_cur_conductance_kernel(const ast::BreakpointBlock &node) override
Print the nrn_cur kernel with NMODL conductance keyword provisions.
Definition: codegen_coreneuron_cpp_visitor.cpp:3201
nmodl::codegen::IndexSemantics
Represent semantic information for index variable.
Definition: codegen_info.hpp:170
nmodl::codegen::BlockType::Watch
@ Watch
watch block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_memory_allocation_routine
virtual void print_memory_allocation_routine() const
Print memory allocation routine.
Definition: codegen_coreneuron_cpp_visitor.cpp:336
nmodl::symtab::syminfo::to_string
std::string to_string(const T &obj)
Definition: symbol_properties.hpp:279
nmodl::codegen::CodegenCoreneuronCppVisitor::print_check_table_thread_function
void print_check_table_thread_function()
Print check_table functions.
Definition: codegen_coreneuron_cpp_visitor.cpp:644
nmodl::ast::Ast::is_initial_block
virtual bool is_initial_block() const noexcept
Check if the ast node is an instance of ast::InitialBlock.
Definition: ast.cpp:124
nmodl::codegen::BlockType::NetReceive
@ NetReceive
net_receive block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_namespace_begin
void print_namespace_begin() override
Print start of namespaces.
Definition: codegen_coreneuron_cpp_visitor.cpp:3381
nmodl::codegen::CodegenCoreneuronCppVisitor::backend_name
virtual std::string backend_name() const override
Name of the code generation backend.
Definition: codegen_coreneuron_cpp_visitor.cpp:51
nmodl::codegen::naming::NRN_INIT_METHOD
static constexpr char NRN_INIT_METHOD[]
nrn_init method in generated code
Definition: codegen_naming.hpp:135
nmodl::codegen::CodegenCoreneuronCppVisitor::print_rhs_d_shadow_variables
virtual void print_rhs_d_shadow_variables()
Print the setup method for setting matrix shadow vectors.
Definition: codegen_coreneuron_cpp_visitor.cpp:264
nmodl::ast::FunctionCall::get_arguments
const ExpressionVector & get_arguments() const noexcept
Getter for member variable FunctionCall::arguments.
Definition: function_call.hpp:166
nmodl::codegen::CodegenCoreneuronCppVisitor::process_verbatim_text
std::string process_verbatim_text(std::string const &text) override
Process a verbatim block for possible variable renaming.
Definition: codegen_coreneuron_cpp_visitor.cpp:1007
nmodl::codegen::naming::THREAD_ARGS
static constexpr char THREAD_ARGS[]
verbatim name of the variable for nrn thread arguments
Definition: codegen_naming.hpp:165
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive
void print_net_receive()
Print net_receive function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2987
nmodl::codegen::rename_net_receive_arguments
static void rename_net_receive_arguments(const ast::NetReceiveBlock &net_receive_node, const ast::Node &node)
Rename arguments to NET_RECEIVE block with corresponding pointer variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:2750
nmodl::codegen::CodegenCoreneuronCppVisitor::optimize_ion_variable_copies
bool optimize_ion_variable_copies() const override
Check if ion variable copies should be avoided.
Definition: codegen_coreneuron_cpp_visitor.cpp:331
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function
void print_function(const ast::FunctionBlock &node) override
Print NMODL function in target backend code.
Definition: codegen_coreneuron_cpp_visitor.cpp:708
nmodl::codegen::CodegenCoreneuronCppVisitor::print_top_verbatim_blocks
void print_top_verbatim_blocks()
Print top level (global scope) verbatim blocks.
Definition: codegen_coreneuron_cpp_visitor.cpp:366
nmodl::ast::EigenLinearSolverBlock::get_n_state_vars
std::shared_ptr< Integer > get_n_state_vars() const noexcept
Getter for member variable EigenLinearSolverBlock::n_state_vars.
Definition: eigen_linear_solver_block.hpp:183
nmodl::visitor::RenameVisitor
Blindly rename given variable to new name
Definition: rename_visitor.hpp:43
nmodl::codegen::naming::NRN_CUR_METHOD
static constexpr char NRN_CUR_METHOD[]
nrn_cur method in generated code
Definition: codegen_naming.hpp:156
codegen_utils.hpp
Implement utility functions for codegen visitors.
nmodl::codegen::naming::TQITEM_VARIABLE
static constexpr char TQITEM_VARIABLE[]
inbuilt neuron variable for tqitem process
Definition: codegen_naming.hpp:66
nmodl::visitor::RenameVisitor::set
void set(const std::string &old_name, std::string new_name)
Definition: rename_visitor.hpp:97
nmodl::ast::AstNodeType::LOCAL_VAR
@ LOCAL_VAR
type of ast::LocalVar
nmodl::ast::EigenLinearSolverBlock
Represent linear solver solution block based on Eigen.
Definition: eigen_linear_solver_block.hpp:38
nmodl::collect_nodes
std::vector< std::shared_ptr< const ast::Ast > > collect_nodes(const ast::Ast &node, const std::vector< ast::AstNodeType > &types)
traverse node recursively and collect nodes of given types
Definition: visitor_utils.cpp:205
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mechanism_global_var_structure
void print_mechanism_global_var_structure(bool print_initializers) override
Print the structure that wraps all global variables used in the NMODL.
Definition: codegen_coreneuron_cpp_visitor.cpp:1478
defuse_analyze_visitor.hpp
Visitor to return Def-Use chain for a given variable in the block/node
nmodl::ast::FunctionBlock::get_node_name
std::string get_node_name() const override
Return name of the node.
Definition: ast.cpp:3963
nmodl::ast::EigenNewtonSolverBlock::get_functor_block
std::shared_ptr< StatementBlock > get_functor_block() const noexcept
Getter for member variable EigenNewtonSolverBlock::functor_block.
Definition: eigen_newton_solver_block.hpp:221
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_eigen_newton_solver_block
void visit_eigen_newton_solver_block(const ast::EigenNewtonSolverBlock &node) override
visit node of type ast::EigenNewtonSolverBlock
Definition: codegen_coreneuron_cpp_visitor.cpp:3502
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_constructor
void print_nrn_constructor() override
Print nrn_constructor function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2426
nmodl::codegen::CodegenCoreneuronCppVisitor::print_kernel_data_present_annotation_block_end
virtual void print_kernel_data_present_annotation_block_end()
Print matching block end of accelerator annotations for data presence on device.
Definition: codegen_coreneuron_cpp_visitor.cpp:209
nmodl::ast::BATYPE_SOLVE
@ BATYPE_SOLVE
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_alloc
void print_nrn_alloc() override
Print nrn_alloc function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2458
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_matrix_shadow_reduction
virtual void print_nrn_cur_matrix_shadow_reduction()
Print the reduction to matrix elements from shadow vectors.
Definition: codegen_coreneuron_cpp_visitor.cpp:285
nmodl::codegen::CodegenCoreneuronCppVisitor::print_memb_list_getter
void print_memb_list_getter()
Print the getter method for returning membrane list from NrnThread.
Definition: codegen_coreneuron_cpp_visitor.cpp:1135
nmodl::codegen::CodegenCoreneuronCppVisitor::print_watch_check
void print_watch_check()
Print watch activate function.
Definition: codegen_coreneuron_cpp_visitor.cpp:2519
nmodl::codegen::naming::INST_GLOBAL_MEMBER
static constexpr char INST_GLOBAL_MEMBER[]
instance struct member pointing to the global variable structure
Definition: codegen_naming.hpp:93
nmodl::codegen::CodegenCoreneuronCppVisitor::external_method_arguments
const char * external_method_arguments() noexcept override
Arguments for external functions called from generated code.
Definition: codegen_coreneuron_cpp_visitor.cpp:939
nmodl::ast::AstNodeType::PROTECT_STATEMENT
@ PROTECT_STATEMENT
type of ast::ProtectStatement
nmodl::codegen::CodegenCoreneuronCppVisitor::print_instance_variable_setup
void print_instance_variable_setup()
Print the function that initialize instance structure.
Definition: codegen_coreneuron_cpp_visitor.cpp:2073
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_init
void print_nrn_init(bool skip_init_check=true)
Print the nrn_init function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2297
nmodl::ast::StatementBlock
Represents block encapsulating list of statements.
Definition: statement_block.hpp:53
nmodl::codegen::CodegenCoreneuronCppVisitor::get_variable_name
std::string get_variable_name(const std::string &name, bool use_instance=true) const override
Determine variable name in the structure of mechanism properties.
Definition: codegen_coreneuron_cpp_visitor.cpp:1274
nmodl::symtab::syminfo::NmodlType
NmodlType
NMODL variable properties.
Definition: symbol_properties.hpp:116
nmodl::codegen::CodegenCoreneuronCppVisitor::print_namespace_start
void print_namespace_start() override
Prints the start of the coreneuron namespace.
Definition: codegen_coreneuron_cpp_visitor.cpp:1147
nmodl::ast::ForNetcon
TODO.
Definition: for_netcon.hpp:39
nmodl::ast::BATYPE_BREAKPOINT
@ BATYPE_BREAKPOINT
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_abort_routine
virtual void print_abort_routine() const
Print backend specific abort routine.
Definition: codegen_coreneuron_cpp_visitor.cpp:353
nmodl::codegen::CodegenCoreneuronCppVisitor::print_table_check_function
void print_table_check_function(const ast::Block &node)
Print check_function() for functions or procedure using table.
Definition: codegen_coreneuron_cpp_visitor.cpp:438
nmodl::ast::ForNetcon::get_parameters
const ArgumentVector & get_parameters() const noexcept override
Getter for member variable ForNetcon::parameters.
Definition: for_netcon.hpp:176
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur
void print_nrn_cur() override
Print nrn_cur / current update function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:3337
nmodl::codegen::CodegenCoreneuronCppVisitor::get_range_var_float_type
std::string get_range_var_float_type(const SymbolType &symbol)
Returns floating point type for given range variable symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:2057
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buffering_cnt_update
virtual void print_net_send_buffering_cnt_update() const
Print the code related to the update of NetSendBuffer_t cnt.
Definition: codegen_coreneuron_cpp_visitor.cpp:2888
nmodl::ast::EigenNewtonSolverBlock::get_variable_block
std::shared_ptr< StatementBlock > get_variable_block() const noexcept
Getter for member variable EigenNewtonSolverBlock::variable_block.
Definition: eigen_newton_solver_block.hpp:194
nmodl::codegen::CodegenCoreneuronCppVisitor::print_thread_memory_callbacks
void print_thread_memory_callbacks()
Print thread related memory allocation and deallocation callbacks.
Definition: codegen_coreneuron_cpp_visitor.cpp:1874
nmodl::ast::DerivimplicitCallback
Represent a callback to NEURON's derivimplicit solver.
Definition: derivimplicit_callback.hpp:38
nmodl::codegen::ShadowUseStatement
Represents ion write statement during code generation.
Definition: codegen_cpp_visitor.hpp:150
nmodl::ast::ProcedureBlock
TODO.
Definition: procedure_block.hpp:39
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_buffering
void print_net_receive_buffering(bool need_mech_inst=true)
Print kernel for buffering net_receive events.
Definition: codegen_coreneuron_cpp_visitor.cpp:2842
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_function_common_code
virtual void print_global_function_common_code(BlockType type, const std::string &function_name="") override
Print common code for global functions like nrn_init, nrn_cur and nrn_state.
Definition: codegen_coreneuron_cpp_visitor.cpp:2246
logger.hpp
Implement logger based on spdlog library.
nmodl::Version::NMODL_VERSION
static const std::string NMODL_VERSION
project tagged version in the cmake
Definition: config.h:36
nmodl::codegen::IndexVariableInfo::is_vdata
bool is_vdata
if variable resides in vdata field of NrnThread typically true for bbcore pointer
Definition: codegen_cpp_visitor.hpp:117
nmodl::ast::FunctionBlock::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable FunctionBlock::statement_block.
Definition: function_block.hpp:219
nmodl::codegen::BlockType
BlockType
Helper to represent various block types.
Definition: codegen_cpp_visitor.hpp:56
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mechanism_register
void print_mechanism_register() override
Print the mechanism registration function.
Definition: codegen_coreneuron_cpp_visitor.cpp:1732
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_method_annotation
virtual void print_global_method_annotation()
Print backend specific global method annotation.
Definition: codegen_coreneuron_cpp_visitor.cpp:311
nmodl::codegen::naming::NRN_STATE_METHOD
static constexpr char NRN_STATE_METHOD[]
nrn_state method in generated code
Definition: codegen_naming.hpp:153
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_kernel
void print_nrn_cur_kernel(const ast::BreakpointBlock &node) override
Print main body of nrn_cur function.
Definition: codegen_coreneuron_cpp_visitor.cpp:3272
config.h
Version information and units file path.
nmodl::codegen::CodegenCoreneuronCppVisitor::nrn_thread_internal_arguments
std::string nrn_thread_internal_arguments() override
Arguments for "_threadargs_" macro in neuron implementation.
Definition: codegen_coreneuron_cpp_visitor.cpp:966
nmodl::codegen::CodegenCoreneuronCppVisitor::print_device_atomic_capture_annotation
virtual void print_device_atomic_capture_annotation() const
Print pragma annotation for increase and capture of variable in automatic way.
Definition: codegen_coreneuron_cpp_visitor.cpp:160
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_derivimplicit_callback
void visit_derivimplicit_callback(const ast::DerivimplicitCallback &node) override
visit node of type ast::DerivimplicitCallback
Definition: codegen_coreneuron_cpp_visitor.cpp:3494
nmodl::codegen::CodegenCoreneuronCppVisitor::print_get_memb_list
virtual void print_get_memb_list()
Print the target backend code for defining and checking a local Memb_list variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:2821
nmodl::codegen::CodegenCoreneuronCppVisitor::print_num_variable_getter
void print_num_variable_getter()
Print the getter methods for float and integer variables count.
Definition: codegen_coreneuron_cpp_visitor.cpp:1098
nmodl::ast::Ast::get_statement_block
virtual std::shared_ptr< StatementBlock > get_statement_block() const
Return associated statement block for the AST node.
Definition: ast.cpp:32
nmodl::codegen::CodegenCoreneuronCppVisitor::print_standard_includes
void print_standard_includes() override
Print standard C/C++ includes.
Definition: codegen_coreneuron_cpp_visitor.cpp:1367
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_loop_end
virtual void print_net_receive_loop_end()
Print the code for closing the main net_receive loop.
Definition: codegen_coreneuron_cpp_visitor.cpp:2837
nmodl::ast::Block::visit_children
virtual void visit_children(visitor::Visitor &v) override
visit children i.e.
Definition: ast.cpp:388
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mech_type_getter
void print_mech_type_getter()
Print the getter method for returning mechtype.
Definition: codegen_coreneuron_cpp_visitor.cpp:1125
nmodl::ast::AfterBlock
Represents a AFTER block in NMODL.
Definition: after_block.hpp:51
nmodl::ast::Ast::is_before_block
virtual bool is_before_block() const noexcept
Check if the ast node is an instance of ast::BeforeBlock.
Definition: ast.cpp:152
nmodl::ast::EigenNewtonSolverBlock::get_update_states_block
std::shared_ptr< StatementBlock > get_update_states_block() const noexcept
Getter for member variable EigenNewtonSolverBlock::update_states_block.
Definition: eigen_newton_solver_block.hpp:230
nmodl::codegen::naming::NTHREAD_T_VARIABLE
static constexpr char NTHREAD_T_VARIABLE[]
t variable in neuron thread structure
Definition: codegen_naming.hpp:96
nmodl::codegen::CodegenCoreneuronCppVisitor::float_variable_name
std::string float_variable_name(const SymbolType &symbol, bool use_instance) const override
Determine the name of a float variable given its symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:1218
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_variable_device_update_annotation
virtual void print_global_variable_device_update_annotation()
Print the pragma annotation to update global variables from host to the device.
Definition: codegen_coreneuron_cpp_visitor.cpp:2031
rename_visitor.hpp
Blindly rename given variable to new name
nmodl::codegen::BlockType::State
@ State
derivative block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_tables
void print_function_tables(const ast::FunctionTableBlock &node)
Print NMODL function_table in target backend code.
Definition: codegen_coreneuron_cpp_visitor.cpp:728
symtab_visitor.hpp
THIS FILE IS GENERATED AT BUILD TIME AND SHALL NOT BE EDITED.
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_watch_statement
virtual void visit_watch_statement(const ast::WatchStatement &node) override
visit node of type ast::WatchStatement
Definition: codegen_coreneuron_cpp_visitor.cpp:3594
nmodl::ast::Block::get_parameters
virtual const ArgumentVector & get_parameters() const
Definition: block.hpp:50
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_non_conductance_kernel
void print_nrn_cur_non_conductance_kernel() override
Print the nrn_cur kernel without NMODL conductance keyword provisions.
Definition: codegen_coreneuron_cpp_visitor.cpp:3238
nmodl::visitor::VarUsageVisitor::variable_used
bool variable_used(const ast::Node &node, std::string name)
Definition: var_usage_visitor.cpp:26
nmodl::codegen::CodegenCoreneuronCppVisitor::print_backend_includes
virtual void print_backend_includes()
Print backend specific includes (none needed for C++ backend)
Definition: codegen_coreneuron_cpp_visitor.cpp:326
nmodl::codegen::CodegenCoreneuronCppVisitor::process_verbatim_token
std::string process_verbatim_token(const std::string &token)
Process a token in a verbatim block for possible variable renaming.
Definition: codegen_coreneuron_cpp_visitor.cpp:94
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_state
void print_nrn_state() override
Print nrn_state / state update function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:3127
nmodl::codegen::naming::NRN_DESTRUCTOR_METHOD
static constexpr char NRN_DESTRUCTOR_METHOD[]
nrn_destructor method in generated code
Definition: codegen_naming.hpp:141
nmodl::codegen::CodegenCoreneuronCppVisitor::print_codegen_routines
virtual void print_codegen_routines() override
Print entry point to code generation.
Definition: codegen_coreneuron_cpp_visitor.cpp:3463
nmodl::codegen::BlockType::Initial
@ Initial
initial block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_watch_activate
void print_watch_activate()
Print watch activate function.
Definition: codegen_coreneuron_cpp_visitor.cpp:2471
nmodl::codegen::naming::THREAD_ARGS_PROTO
static constexpr char THREAD_ARGS_PROTO[]
verbatim name of the variable for nrn thread arguments in prototype
Definition: codegen_naming.hpp:168
nmodl::codegen::BlockType::BeforeAfter
@ BeforeAfter
before / after block
nmodl::ast::EigenNewtonSolverBlock::get_finalize_block
std::shared_ptr< StatementBlock > get_finalize_block() const noexcept
Getter for member variable EigenNewtonSolverBlock::finalize_block.
Definition: eigen_newton_solver_block.hpp:239
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buffering_grow
virtual void print_net_send_buffering_grow()
Print statement that grows NetSendBuffering_t structure if needed.
Definition: codegen_coreneuron_cpp_visitor.cpp:2893
nmodl::codegen::naming::NRN_PRIVATE_CONSTRUCTOR_METHOD
constexpr char NRN_PRIVATE_CONSTRUCTOR_METHOD[]
nrn_private_constructor method in generated code
Definition: codegen_naming.hpp:144
nmodl::codegen::CodegenCoreneuronCppVisitor::print_backend_namespace_stop
virtual void print_backend_namespace_stop()
Prints the end of namespace for the backend-specific code.
Definition: codegen_coreneuron_cpp_visitor.cpp:321
nmodl::ast::Ast::get_node_name
virtual std::string get_node_name() const
Return name of of the node.
Definition: ast.cpp:28
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buf_update_to_host
virtual void print_net_send_buf_update_to_host() const
Print the code to update NetSendBuffer_t from device to host.
Definition: codegen_coreneuron_cpp_visitor.cpp:170
all.hpp
Auto generated AST classes declaration.
nmodl::codegen::CodegenCoreneuronCppVisitor::print_data_structures
void print_data_structures(bool print_initializers) override
Print all classes.
Definition: codegen_coreneuron_cpp_visitor.cpp:3404
nmodl::codegen::CodegenCoreneuronCppVisitor::replace_if_verbatim_variable
std::string replace_if_verbatim_variable(std::string name)
Replace commonly used verbatim variables.
Definition: codegen_coreneuron_cpp_visitor.cpp:978
nmodl::codegen::CodegenCoreneuronCppVisitor::print_ion_var_constructor
virtual void print_ion_var_constructor(const std::vector< std::string > &members)
Print constructor of ion variables.
Definition: codegen_coreneuron_cpp_visitor.cpp:2009
nmodl::codegen::CodegenCoreneuronCppVisitor::position_of_float_var
int position_of_float_var(const std::string &name) const override
Determine the position in the data array for a given float variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:66
nmodl::codegen::CodegenCppVisitor::ParamVector
std::vector< std::tuple< std::string, std::string, std::string, std::string > > ParamVector
A vector of parameters represented by a 4-tuple of strings:
Definition: codegen_cpp_visitor.hpp:250
nmodl::codegen::naming::ION_VARNAME_PREFIX
static constexpr char ION_VARNAME_PREFIX[]
prefix for ion variable
Definition: codegen_naming.hpp:171