User Guide
codegen_coreneuron_cpp_visitor.cpp
Go to the documentation of this file.
1 /*
2  * Copyright 2023 Blue Brain Project, EPFL.
3  * See the top-level LICENSE file for details.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
9 
10 #include <algorithm>
11 #include <chrono>
12 #include <cmath>
13 #include <ctime>
14 #include <regex>
15 
16 #include "ast/all.hpp"
21 #include "config/config.h"
22 #include "lexer/token_mapping.hpp"
23 #include "parser/c11_driver.hpp"
24 #include "solver/solver.hpp"
25 #include "utils/logger.hpp"
26 #include "utils/string_utils.hpp"
32 
33 namespace nmodl {
34 namespace codegen {
35 
36 using namespace ast;
37 
38 using visitor::DefUseAnalyzeVisitor;
39 using visitor::DUState;
40 using visitor::RenameVisitor;
41 using visitor::SymtabVisitor;
42 using visitor::VarUsageVisitor;
43 
45 
46 extern const std::regex regex_special_chars;
47 
48 /****************************************************************************************/
49 /* Generic information getters */
50 /****************************************************************************************/
51 
52 
54  return "C++ (api-compatibility)";
55 }
56 
57 
59  return "CoreNEURON";
60 }
61 
63  return info.vectorize;
64 }
65 
66 /****************************************************************************************/
67 /* Common helper routines accross codegen functions */
68 /****************************************************************************************/
69 
70 
71 int CodegenCoreneuronCppVisitor::position_of_float_var(const std::string& name) const {
72  return get_prefixsum_from_name(codegen_float_variables, name);
73 }
74 
75 
76 int CodegenCoreneuronCppVisitor::position_of_int_var(const std::string& name) const {
77  return get_prefixsum_from_name(codegen_int_variables, name);
78 }
79 
80 
81 /**
82  * \details Often top level verbatim blocks use variables with old names.
83  * Here we process if we are processing verbatim block at global scope.
84  */
85 std::string CodegenCoreneuronCppVisitor::process_verbatim_token(const std::string& token) {
86  const std::string& name = token;
87 
88  /*
89  * If given token is procedure name and if it's defined
90  * in the current mod file then it must be replaced
91  */
92  if (program_symtab->is_method_defined(token)) {
93  return method_name(token);
94  }
95 
96  /*
97  * Check if token is commongly used variable name in
98  * verbatim block like nt, \c \_threadargs etc. If so, replace
99  * it and return.
100  */
101  auto new_name = replace_if_verbatim_variable(name);
102  if (new_name != name) {
103  new_name = get_variable_name(new_name, false);
104 
105  if (name == (std::string("_") + naming::TQITEM_VARIABLE)) {
106  new_name.insert(0, 1, '&');
107  }
108 
109  return new_name;
110  }
111 
112  /*
113  * For top level verbatim blocks we shouldn't replace variable
114  * names with Instance because arguments are provided from coreneuron
115  * and they are missing inst.
116  */
117  auto use_instance = !printing_top_verbatim_blocks;
118  return get_variable_name(token, use_instance);
119 }
120 
122  const auto& text = node.get_statement()->eval();
123  printer->add_line("// VERBATIM");
124  const auto& result = process_verbatim_text(text);
125 
126  const auto& statements = stringutils::split_string(result, '\n');
127  for (const auto& statement: statements) {
128  const auto& trimed_stmt = stringutils::trim_newline(statement);
129  if (trimed_stmt.find_first_not_of(' ') != std::string::npos) {
130  printer->add_line(trimed_stmt);
131  }
132  }
133  printer->add_line("// ENDVERBATIM");
134 }
135 
136 
137 /**
138  * \details This can be override in the backend. For example, parameters can be constant
139  * except in INITIAL block where they are set to 0. As initial block is/can be
140  * executed on c++/cpu backend, gpu backend can mark the parameter as constant.
141  */
142 bool CodegenCoreneuronCppVisitor::is_constant_variable(const std::string& name) const {
143  auto symbol = program_symtab->lookup_in_scope(name);
144  bool is_constant = false;
145  if (symbol != nullptr) {
146  // per mechanism ion variables needs to be updated from neuron/coreneuron values
147  if (info.is_ion_variable(name)) {
148  is_constant = false;
149  }
150  // for parameter variable to be const, make sure it's write count is 0
151  // and it's not used in the verbatim block
152  else if (symbol->has_any_property(NmodlType::param_assign) &&
153  info.variables_in_verbatim.find(name) == info.variables_in_verbatim.end() &&
154  symbol->get_write_count() == 0) {
155  is_constant = true;
156  }
157  }
158  return is_constant;
159 }
160 
161 
162 /****************************************************************************************/
163 /* Backend specific routines */
164 /****************************************************************************************/
165 
166 
168  // backend specific, do nothing
169 }
170 
171 
173  // backend specific, do nothing
174 }
175 
176 
178  // backend specific, do nothing
179 }
180 
181 
183  // backend specific, do nothing
184 }
185 
186 
188  // backend specific, do nothing
189 }
190 
191 
193  // backend specific, do nothing
194 }
195 
196 
198  // backend specific, do nothing
199 }
200 
201 
202 /**
203  * \details Each kernel such as \c nrn\_init, \c nrn\_state and \c nrn\_cur could be offloaded
204  * to accelerator. In this case, at very top level, we print pragma
205  * for data present. For example:
206  *
207  * \code{.cpp}
208  * void nrn_state(...) {
209  * #pragma acc data present (nt, ml...)
210  * {
211  *
212  * }
213  * }
214  * \endcode
215  */
217  // backend specific, do nothing
218 }
219 
220 
222  // backend specific, do nothing
223 }
224 
225 
227  // backend specific, do nothing
228 }
229 
230 
232  // backend specific, do nothing
233 }
234 
235 
237  return info.point_process;
238 }
239 
240 
242  if (info.point_process) {
243  printer->fmt_line("double* shadow_rhs = nt->{};", naming::NTHREAD_RHS_SHADOW);
244  printer->fmt_line("double* shadow_d = nt->{};", naming::NTHREAD_D_SHADOW);
245  }
246 }
247 
248 
250  if (info.point_process) {
251  printer->add_line("shadow_rhs[id] = rhs;");
252  printer->add_line("shadow_d[id] = g;");
253  } else {
254  auto rhs_op = operator_for_rhs();
255  auto d_op = operator_for_d();
256  printer->fmt_line("vec_rhs[node_id] {} rhs;", rhs_op);
257  printer->fmt_line("vec_d[node_id] {} g;", d_op);
258  }
259 }
260 
261 
263  auto rhs_op = operator_for_rhs();
264  auto d_op = operator_for_d();
265  if (info.point_process) {
266  printer->add_line("int node_id = node_index[id];");
267  printer->fmt_line("vec_rhs[node_id] {} shadow_rhs[id];", rhs_op);
268  printer->fmt_line("vec_d[node_id] {} shadow_d[id];", d_op);
269  }
270 }
271 
272 
273 /**
274  * In the current implementation of CPU/CPP backend we need to emit atomic pragma
275  * only with PROTECT construct (atomic rduction requirement for other cases on CPU
276  * is handled via separate shadow vectors).
277  */
279  printer->add_line("#pragma omp atomic update");
280 }
281 
282 
284  // backend specific, nothing for cpu
285 }
286 
287 
289  // backend specific, nothing for cpu
290 }
291 
292 
294  return optimize_ionvar_copies;
295 }
296 
297 
299  printer->add_newline(2);
300  auto args = "size_t num, size_t size, size_t alignment = 64";
301  printer->fmt_push_block("static inline void* mem_alloc({})", args);
302  printer->add_line(
303  "size_t aligned_size = ((num*size + alignment - 1) / alignment) * alignment;");
304  printer->add_line("void* ptr = aligned_alloc(alignment, aligned_size);");
305  printer->add_line("memset(ptr, 0, aligned_size);");
306  printer->add_line("return ptr;");
307  printer->pop_block();
308 
309  printer->add_newline(2);
310  printer->push_block("static inline void mem_free(void* ptr)");
311  printer->add_line("free(ptr);");
312  printer->pop_block();
313 }
314 
315 
317  printer->add_newline(2);
318  printer->push_block("static inline void coreneuron_abort()");
319  printer->add_line("abort();");
320  printer->pop_block();
321 }
322 
323 
324 /****************************************************************************************/
325 /* Printing routines for code generation */
326 /****************************************************************************************/
327 
328 
330  if (info.functions.empty() && info.procedures.empty()) {
331  return;
332  }
333 
334  printer->add_newline(2);
335  for (const auto& node: info.functions) {
336  print_function_declaration(*node, node->get_node_name());
337  printer->add_text(';');
338  printer->add_newline();
339  }
340  for (const auto& node: info.procedures) {
341  print_function_declaration(*node, node->get_node_name());
342  printer->add_text(';');
343  printer->add_newline();
344  }
345 }
346 
347 
349  if (info.table_count == 0) {
350  return;
351  }
352 
353  printer->add_newline(2);
354  auto name = method_name("check_table_thread");
355  auto parameters = get_parameter_str(external_method_parameters(true));
356 
357  printer->fmt_push_block("static void {} ({})", name, parameters);
358  printer->add_line("setup_instance(nt, ml);");
359  printer->fmt_line("auto* const inst = static_cast<{0}*>(ml->instance);", instance_struct());
360  printer->add_line("double v = 0;");
361 
362  for (const auto& function: info.functions_with_table) {
363  auto method_name_str = table_update_function_name(function->get_node_name());
364  auto arguments = internal_method_arguments();
365  printer->fmt_line("{}({});", method_name_str, arguments);
366  }
367 
368  printer->pop_block();
369 }
370 
371 
373  const ast::Block& node,
374  const std::string& name,
375  const std::unordered_set<CppObjectSpecifier>& specifiers) {
376  printer->add_newline(2);
377  print_function_declaration(node, name, specifiers);
378  printer->add_text(" ");
379  printer->push_block();
380 
381  // function requires return variable declaration
382  if (node.is_function_block()) {
383  auto type = default_float_data_type();
384  printer->fmt_line("{} ret_{} = 0.0;", type, name);
385  } else {
386  printer->fmt_line("int ret_{} = 0;", name);
387  }
388 
389  print_statement_block(*node.get_statement_block(), false, false);
390  printer->fmt_line("return ret_{};", name);
391  printer->pop_block();
392 }
393 
394 
396  auto name = node.get_node_name();
397 
398  if (info.function_uses_table(name)) {
399  auto new_name = "f_" + name;
400  print_function_or_procedure(node, new_name);
401  print_table_check_function(node);
402  print_table_replacement_function(node);
403  } else {
404  print_function_or_procedure(node, name);
405  }
406 }
407 
408 
409 /****************************************************************************************/
410 /* Code-specific helper routines */
411 /****************************************************************************************/
412 
413 void CodegenCoreneuronCppVisitor::add_variable_tqitem(std::vector<IndexVariableInfo>& variables) {
414  // for non-artificial cell, when net_receive buffering is enabled
415  // then tqitem is an offset
416  if (info.net_send_used) {
417  if (info.artificial_cell) {
418  variables.emplace_back(make_symbol(naming::TQITEM_VARIABLE), true);
419  } else {
420  variables.emplace_back(make_symbol(naming::TQITEM_VARIABLE), false, false, true);
421  variables.back().is_constant = true;
422  }
423  info.tqitem_index = static_cast<int>(variables.size() - 1);
424  }
425 }
426 
428  std::vector<IndexVariableInfo>& variables) {
429  /// note that this variable is not printed in neuron implementation
430  if (info.artificial_cell) {
431  variables.emplace_back(make_symbol(naming::POINT_PROCESS_VARIABLE), true);
432  } else {
433  variables.emplace_back(make_symbol(naming::POINT_PROCESS_VARIABLE), false, false, true);
434  variables.back().is_constant = true;
435  }
436 }
437 
439  return get_arg_str(internal_method_parameters());
440 }
441 
442 
443 /**
444  * @todo: figure out how to correctly handle qualifiers
445  */
447  ParamVector params = {{"", "int", "", "id"},
448  {"", "int", "", "pnodecount"},
449  {"", fmt::format("{}*", instance_struct()), "", "inst"}};
450  if (ion_variable_struct_required()) {
451  params.emplace_back("", "IonCurVar&", "", "ionvar");
452  }
453  ParamVector other_params = {{"", "double*", "", "data"},
454  {"const ", "Datum*", "", "indexes"},
455  {"", "ThreadDatum*", "", "thread"},
456  {"", "NrnThread*", "", "nt"},
457  {"", "double", "", "v"}};
458  params.insert(params.end(), other_params.begin(), other_params.end());
459  return params;
460 }
461 
462 
464  return get_arg_str(external_method_parameters());
465 }
466 
467 
469  bool table) noexcept {
470  ParamVector args = {{"", "int", "", "id"},
471  {"", "int", "", "pnodecount"},
472  {"", "double*", "", "data"},
473  {"", "Datum*", "", "indexes"},
474  {"", "ThreadDatum*", "", "thread"},
475  {"", "NrnThread*", "", "nt"},
476  {"", "Memb_list*", "", "ml"}};
477  if (table) {
478  args.emplace_back("", "int", "", "tml_id");
479  } else {
480  args.emplace_back("", "double", "", "v");
481  }
482  return args;
483 }
484 
485 
487  if (ion_variable_struct_required()) {
488  return "id, pnodecount, ionvar, data, indexes, thread, nt, ml, v";
489  }
490  return "id, pnodecount, data, indexes, thread, nt, ml, v";
491 }
492 
493 
494 /**
495  * Function call arguments when function or procedure is defined in the
496  * same mod file itself
497  */
499  return get_arg_str(internal_method_parameters());
500 }
501 
502 std::pair<CodegenCoreneuronCppVisitor::ParamVector, CodegenCoreneuronCppVisitor::ParamVector>
504  auto params = internal_method_parameters();
505  for (const auto& i: node.get_parameters()) {
506  params.emplace_back("", "double", "", i->get_node_name());
507  }
508  return {params, internal_method_parameters()};
509 }
510 
511 
512 /**
513  * Replace commonly used variables in the verbatim blocks into their corresponding
514  * variable name in the new code generation backend.
515  */
518  name = naming::VERBATIM_VARIABLES_MAPPING.at(name);
519  }
520 
521  /**
522  * if function is defined the same mod file then the arguments must
523  * contain mechanism instance as well.
524  */
525  if (name == naming::THREAD_ARGS) {
526  if (internal_method_call_encountered) {
527  name = nrn_thread_internal_arguments();
528  internal_method_call_encountered = false;
529  } else {
530  name = nrn_thread_arguments();
531  }
532  }
533  if (name == naming::THREAD_ARGS_PROTO) {
534  name = get_parameter_str(external_method_parameters());
535  }
536  return name;
537 }
538 
539 
540 /**
541  * Processing commonly used constructs in the verbatim blocks.
542  * @todo : this is still ad-hoc and requires re-implementation to
543  * handle it more elegantly.
544  */
545 std::string CodegenCoreneuronCppVisitor::process_verbatim_text(std::string const& text) {
547  driver.scan_string(text);
548  auto tokens = driver.all_tokens();
549  std::string result;
550  for (size_t i = 0; i < tokens.size(); i++) {
551  auto token = tokens[i];
552 
553  // check if we have function call in the verbatim block where
554  // function is defined in the same mod file
555  if (program_symtab->is_method_defined(token) && tokens[i + 1] == "(") {
556  internal_method_call_encountered = true;
557  }
558  result += process_verbatim_token(token);
559  }
560  return result;
561 }
562 
563 
565  auto nrn_channel_info_var_name = get_channel_info_var_name();
566  auto nrn_cur = nrn_cur_required() ? method_name(naming::NRN_CUR_METHOD) : "nullptr";
567  auto nrn_state = nrn_state_required() ? method_name(naming::NRN_STATE_METHOD) : "nullptr";
568  auto nrn_alloc = method_name(naming::NRN_ALLOC_METHOD);
569  auto nrn_init = method_name(naming::NRN_INIT_METHOD);
570  auto const nrn_private_constructor = method_name(naming::NRN_PRIVATE_CONSTRUCTOR_METHOD);
571  auto const nrn_private_destructor = method_name(naming::NRN_PRIVATE_DESTRUCTOR_METHOD);
572  return fmt::format("{}, {}, {}, nullptr, {}, {}, {}, {}, first_pointer_var_index()",
573  nrn_channel_info_var_name,
574  nrn_alloc,
575  nrn_cur,
576  nrn_state,
577  nrn_init,
578  nrn_private_constructor,
579  nrn_private_destructor);
580 }
581 
582 
584  std::vector<ShadowUseStatement>& statements,
585  const Ion& ion,
586  const std::string& concentration) {
587  int index = 0;
588  if (ion.is_intra_cell_conc(concentration)) {
589  index = 1;
590  } else if (ion.is_extra_cell_conc(concentration)) {
591  index = 2;
592  } else {
593  /// \todo Unhandled case in neuron implementation
594  throw std::logic_error(fmt::format("codegen error for {} ion", ion.name));
595  }
596  auto ion_type_name = fmt::format("{}_type", ion.name);
597  auto lhs = fmt::format("int {}", ion_type_name);
598  auto op = "=";
599  auto rhs = get_variable_name(ion_type_name);
600  statements.push_back(ShadowUseStatement{lhs, op, rhs});
601 
602  auto ion_name = ion.name;
603  auto conc_var_name = get_variable_name(naming::ION_VARNAME_PREFIX + concentration);
604  auto style_var_name = get_variable_name("style_" + ion_name);
605  auto statement = fmt::format(
606  "nrn_wrote_conc({}_type,"
607  " &({}),"
608  " {},"
609  " {},"
610  " nrn_ion_global_map,"
611  " {},"
612  " nt->_ml_list[{}_type]->_nodecount_padded)",
613  ion_name,
614  conc_var_name,
615  index,
616  style_var_name,
617  get_variable_name(naming::CELSIUS_VARIABLE),
618  ion_name);
619 
620  statements.push_back(ShadowUseStatement{statement, "", ""});
621 }
622 
623 
624 /****************************************************************************************/
625 /* Code-specific printing routines for code generation */
626 /****************************************************************************************/
627 
628 
630  printer->add_newline(2);
631  printer->push_block("static inline int first_pointer_var_index()");
632  printer->fmt_line("return {};", info.first_pointer_var_index);
633  printer->pop_block();
634 }
635 
636 
638  printer->add_newline(2);
639  printer->push_block("static inline int first_random_var_index()");
640  printer->fmt_line("return {};", info.first_random_var_index);
641  printer->pop_block();
642 }
643 
644 
646  printer->add_newline(2);
647  printer->push_block("static inline int float_variables_size()");
648  printer->fmt_line("return {};", float_variables_size());
649  printer->pop_block();
650 
651  printer->add_newline(2);
652  printer->push_block("static inline int int_variables_size()");
653  printer->fmt_line("return {};", int_variables_size());
654  printer->pop_block();
655 }
656 
657 
659  if (!net_receive_exist()) {
660  return;
661  }
662  printer->add_newline(2);
663  printer->push_block("static inline int num_net_receive_args()");
664  printer->fmt_line("return {};", info.num_net_receive_parameters);
665  printer->pop_block();
666 }
667 
668 
670  printer->add_newline(2);
671  printer->push_block("static inline int get_mech_type()");
672  // false => get it from the host-only global struct, not the instance structure
673  printer->fmt_line("return {};", get_variable_name("mech_type", false));
674  printer->pop_block();
675 }
676 
677 
679  printer->add_newline(2);
680  printer->push_block("static inline Memb_list* get_memb_list(NrnThread* nt)");
681  printer->push_block("if (!nt->_ml_list)");
682  printer->add_line("return nullptr;");
683  printer->pop_block();
684  printer->add_line("return nt->_ml_list[get_mech_type()];");
685  printer->pop_block();
686 }
687 
688 
690  return "coreneuron";
691 }
692 
693 /**
694  * \details There are three types of thread variables currently considered:
695  * - top local thread variables
696  * - thread variables in the mod file
697  * - thread variables for solver
698  *
699  * These variables are allocated into different thread structures and have
700  * corresponding thread ids. Thread id start from 0. In mod2c implementation,
701  * thread_data_index is increased at various places and it is used to
702  * decide the index of thread.
703  */
705  if (info.vectorize && info.derivimplicit_used()) {
706  int tid = info.derivimplicit_var_thread_id;
707  int list = info.derivimplicit_list_num;
708 
709  // clang-format off
710  printer->add_newline(2);
711  printer->add_line("/** thread specific helper routines for derivimplicit */");
712 
713  printer->add_newline(1);
714  printer->fmt_push_block("static inline int* deriv{}_advance(ThreadDatum* thread)", list);
715  printer->fmt_line("return &(thread[{}].i);", tid);
716  printer->pop_block();
717  printer->add_newline();
718 
719  printer->fmt_push_block("static inline int dith{}()", list);
720  printer->fmt_line("return {};", tid+1);
721  printer->pop_block();
722  printer->add_newline();
723 
724  printer->fmt_push_block("static inline void** newtonspace{}(ThreadDatum* thread)", list);
725  printer->fmt_line("return &(thread[{}]._pvoid);", tid+2);
726  printer->pop_block();
727  }
728 
729  if (info.vectorize && !info.thread_variables.empty()) {
730  printer->add_newline(2);
731  printer->add_line("/** tid for thread variables */");
732  printer->push_block("static inline int thread_var_tid()");
733  printer->fmt_line("return {};", info.thread_var_thread_id);
734  printer->pop_block();
735  }
736 
737  if (info.vectorize && !info.top_local_variables.empty()) {
738  printer->add_newline(2);
739  printer->add_line("/** tid for top local tread variables */");
740  printer->push_block("static inline int top_local_var_tid()");
741  printer->fmt_line("return {};", info.top_local_thread_id);
742  printer->pop_block();
743  }
744  // clang-format on
745 }
746 
747 
748 /****************************************************************************************/
749 /* Routines for returning variable name */
750 /****************************************************************************************/
751 
752 
754  bool use_instance) const {
755  auto name = symbol->get_name();
756  auto dimension = symbol->get_length();
757  auto position = position_of_float_var(name);
758  if (symbol->is_array()) {
759  if (use_instance) {
760  return fmt::format("(inst->{}+id*{})", name, dimension);
761  }
762  return fmt::format("(data + {}*pnodecount + id*{})", position, dimension);
763  }
764  if (use_instance) {
765  return fmt::format("inst->{}[id]", name);
766  }
767  return fmt::format("data[{}*pnodecount + id]", position);
768 }
769 
770 
772  const std::string& name,
773  bool use_instance) const {
774  auto position = position_of_int_var(name);
775  // clang-format off
776  if (symbol.is_index) {
777  if (use_instance) {
778  return fmt::format("inst->{}[{}]", name, position);
779  }
780  return fmt::format("indexes[{}]", position);
781  }
782  if (symbol.is_integer) {
783  if (use_instance) {
784  return fmt::format("inst->{}[{}*pnodecount+id]", name, position);
785  }
786  return fmt::format("indexes[{}*pnodecount+id]", position);
787  }
788  if (use_instance) {
789  return fmt::format("inst->{}[indexes[{}*pnodecount + id]]", name, position);
790  }
791  auto data = symbol.is_vdata ? "_vdata" : "_data";
792  return fmt::format("nt->{}[indexes[{}*pnodecount + id]]", data, position);
793  // clang-format on
794 }
795 
796 
798  bool use_instance) const {
799  if (use_instance) {
800  return fmt::format("inst->{}->{}", naming::INST_GLOBAL_MEMBER, symbol->get_name());
801  } else {
802  return fmt::format("{}.{}", global_struct_instance(), symbol->get_name());
803  }
804 }
805 
806 
807 std::string CodegenCoreneuronCppVisitor::get_variable_name(const std::string& name,
808  bool use_instance) const {
809  const std::string& varname = update_if_ion_variable_name(name);
810 
811  // clang-format off
812  auto symbol_comparator = [&varname](const SymbolType& sym) {
813  return varname == sym->get_name();
814  };
815 
816  auto index_comparator = [&varname](const IndexVariableInfo& var) {
817  return varname == var.symbol->get_name();
818  };
819  // clang-format on
820 
821  // float variable
822  auto f = std::find_if(codegen_float_variables.begin(),
823  codegen_float_variables.end(),
824  symbol_comparator);
825  if (f != codegen_float_variables.end()) {
826  return float_variable_name(*f, use_instance);
827  }
828 
829  // integer variable
830  auto i =
831  std::find_if(codegen_int_variables.begin(), codegen_int_variables.end(), index_comparator);
832  if (i != codegen_int_variables.end()) {
833  auto full_name = int_variable_name(*i, varname, use_instance);
834  auto pos = position_of_int_var(varname);
835 
836  if (info.semantics[pos].name == naming::RANDOM_SEMANTIC) {
837  return "(nrnran123_State*) " + full_name;
838  }
839  return full_name;
840  }
841 
842  // global variable
843  auto g = std::find_if(codegen_global_variables.begin(),
844  codegen_global_variables.end(),
845  symbol_comparator);
846  if (g != codegen_global_variables.end()) {
847  return global_variable_name(*g, use_instance);
848  }
849 
850  if (varname == naming::NTHREAD_DT_VARIABLE) {
851  return std::string("nt->_") + naming::NTHREAD_DT_VARIABLE;
852  }
853 
854  // t in net_receive method is an argument to function and hence it should
855  // ne used instead of nt->_t which is current time of thread
856  if (varname == naming::NTHREAD_T_VARIABLE && !printing_net_receive) {
857  return std::string("nt->_") + naming::NTHREAD_T_VARIABLE;
858  }
859 
860  auto const iter =
861  std::find_if(info.neuron_global_variables.begin(),
862  info.neuron_global_variables.end(),
863  [&varname](auto const& entry) { return entry.first->get_name() == varname; });
864  if (iter != info.neuron_global_variables.end()) {
865  std::string ret;
866  if (use_instance) {
867  ret = "*(inst->";
868  }
869  ret.append(varname);
870  if (use_instance) {
871  ret.append(")");
872  }
873  return ret;
874  }
875 
876  // otherwise return original name
877  return varname;
878 }
879 
880 
881 /****************************************************************************************/
882 /* Main printing routines for code generation */
883 /****************************************************************************************/
884 
885 
887  printer->add_newline();
888  printer->add_multi_line(R"CODE(
889  #include <math.h>
890  #include <stdio.h>
891  #include <stdlib.h>
892  #include <string.h>
893  )CODE");
894 }
895 
896 
898  printer->add_newline();
899  printer->add_multi_line(R"CODE(
900  #include <coreneuron/gpu/nrn_acc_manager.hpp>
901  #include <coreneuron/mechanism/mech/mod2c_core_thread.hpp>
902  #include <coreneuron/mechanism/register_mech.hpp>
903  #include <coreneuron/nrnconf.h>
904  #include <coreneuron/nrniv/nrniv_decl.h>
905  #include <coreneuron/sim/multicore.hpp>
906  #include <coreneuron/sim/scopmath/newton_thread.hpp>
907  #include <coreneuron/utils/ivocvect.hpp>
908  #include <coreneuron/utils/nrnoc_aux.hpp>
909  #include <coreneuron/utils/randoms/nrnran123.h>
910  )CODE");
911  if (info.eigen_newton_solver_exist) {
912  printer->add_multi_line(nmodl::solvers::newton_hpp);
913  }
914  if (info.eigen_linear_solver_exist) {
915  if (std::accumulate(info.state_vars.begin(),
916  info.state_vars.end(),
917  0,
918  [](int l, const SymbolType& variable) {
919  return l += variable->get_length();
920  }) > 4) {
921  printer->add_multi_line(nmodl::solvers::crout_hpp);
922  } else {
923  printer->add_line("#include <Eigen/Dense>");
924  printer->add_line("#include <Eigen/LU>");
925  }
926  }
927 }
928 
929 
930 void CodegenCoreneuronCppVisitor::print_sdlists_init(bool print_initializers) {
931  if (info.primes_size == 0) {
932  return;
933  }
934  const auto count_prime_variables = [](auto size, const SymbolType& symbol) {
935  return size += symbol->get_length();
936  };
937  const auto prime_variables_by_order_size =
938  std::accumulate(info.prime_variables_by_order.begin(),
939  info.prime_variables_by_order.end(),
940  0,
941  count_prime_variables);
942  if (info.primes_size != prime_variables_by_order_size) {
943  throw std::runtime_error{
944  fmt::format("primes_size = {} differs from prime_variables_by_order.size() = {}, "
945  "this should not happen.",
946  info.primes_size,
947  info.prime_variables_by_order.size())};
948  }
949  auto const initializer_list = [&](auto const& primes, const char* prefix) -> std::string {
950  if (!print_initializers) {
951  return {};
952  }
953  std::string list{"{"};
954  for (auto iter = primes.begin(); iter != primes.end(); ++iter) {
955  auto const& prime = *iter;
956  list.append(std::to_string(position_of_float_var(prefix + prime->get_name())));
957  if (std::next(iter) != primes.end()) {
958  list.append(", ");
959  }
960  }
961  list.append("}");
962  return list;
963  };
964  printer->fmt_line("int slist1[{}]{};",
965  info.primes_size,
966  initializer_list(info.prime_variables_by_order, ""));
967  printer->fmt_line("int dlist1[{}]{};",
968  info.primes_size,
969  initializer_list(info.prime_variables_by_order, "D"));
970  codegen_global_variables.push_back(make_symbol("slist1"));
971  codegen_global_variables.push_back(make_symbol("dlist1"));
972  // additional list for derivimplicit method
973  if (info.derivimplicit_used()) {
974  auto primes = program_symtab->get_variables_with_properties(NmodlType::prime_name);
975  printer->fmt_line("int slist2[{}]{};", info.primes_size, initializer_list(primes, ""));
976  codegen_global_variables.push_back(make_symbol("slist2"));
977  }
978 }
979 
980 
982  return ParamVector{{"", "NrnThread*", "", "nt"},
983  {"", fmt::format("{}*", instance_struct()), "", "inst"},
984  {"", "int", "", "id"},
985  {"", "int", "", "pnodecount"},
986  {"", "double", "", "v"},
987  {"const ", "Datum*", "", "indexes"},
988  {"", "double*", "", "data"},
989  {"", "ThreadDatum*", "", "thread"}};
990 }
991 
992 
993 /**
994  * \details Variables required for type of ion, type of point process etc. are
995  * of static int type. For the C++ backend type, it's ok to have
996  * these variables as file scoped static variables.
997  *
998  * Initial values of state variables (h0) are also defined as static
999  * variables. Note that the state could be ion variable and it could
1000  * be also range variable. Hence lookup into symbol table before.
1001  *
1002  * When model is not vectorized (shouldn't be the case in coreneuron)
1003  * the top local variables become static variables.
1004  *
1005  * Note that static variables are already initialized to 0. We do the
1006  * same for some variables to keep same code as neuron.
1007  */
1008 // NOLINTNEXTLINE(readability-function-cognitive-complexity)
1010  const auto value_initialize = print_initializers ? "{}" : "";
1011 
1012  auto float_type = default_float_data_type();
1013  printer->add_newline(2);
1014  printer->add_line("/** all global variables */");
1015  printer->fmt_push_block("struct {}", global_struct());
1016 
1017  for (const auto& ion: info.ions) {
1018  auto name = fmt::format("{}_type", ion.name);
1019  printer->fmt_line("int {}{};", name, value_initialize);
1020  codegen_global_variables.push_back(make_symbol(name));
1021  }
1022 
1023  if (info.point_process) {
1024  printer->fmt_line("int point_type{};", value_initialize);
1025  codegen_global_variables.push_back(make_symbol("point_type"));
1026  }
1027 
1028  for (const auto& var: info.state_vars) {
1029  auto name = var->get_name() + "0";
1030  auto symbol = program_symtab->lookup(name);
1031  if (symbol == nullptr) {
1032  printer->fmt_line("{} {}{};", float_type, name, value_initialize);
1033  codegen_global_variables.push_back(make_symbol(name));
1034  }
1035  }
1036 
1037  // Neuron and Coreneuron adds "v" to global variables when vectorize
1038  // is false. But as v is always local variable and passed as argument,
1039  // we don't need to use global variable v
1040 
1041  auto& top_locals = info.top_local_variables;
1042  if (!info.vectorize && !top_locals.empty()) {
1043  for (const auto& var: top_locals) {
1044  auto name = var->get_name();
1045  auto length = var->get_length();
1046  if (var->is_array()) {
1047  printer->fmt_line("{} {}[{}] /* TODO init top-local-array */;",
1048  float_type,
1049  name,
1050  length);
1051  } else {
1052  printer->fmt_line("{} {} /* TODO init top-local */;", float_type, name);
1053  }
1054  codegen_global_variables.push_back(var);
1055  }
1056  }
1057 
1058  if (!info.thread_variables.empty()) {
1059  printer->fmt_line("int thread_data_in_use{};", value_initialize);
1060  printer->fmt_line("{} thread_data[{}] /* TODO init thread_data */;",
1061  float_type,
1062  info.thread_var_data_size);
1063  codegen_global_variables.push_back(make_symbol("thread_data_in_use"));
1064  auto symbol = make_symbol("thread_data");
1065  symbol->set_as_array(info.thread_var_data_size);
1066  codegen_global_variables.push_back(symbol);
1067  }
1068 
1069  // TODO: remove this entirely?
1070  printer->fmt_line("int reset{};", value_initialize);
1071  codegen_global_variables.push_back(make_symbol("reset"));
1072 
1073  printer->fmt_line("int mech_type{};", value_initialize);
1074  codegen_global_variables.push_back(make_symbol("mech_type"));
1075 
1076  for (const auto& var: info.global_variables) {
1077  auto name = var->get_name();
1078  auto length = var->get_length();
1079  if (var->is_array()) {
1080  printer->fmt_line("{} {}[{}] /* TODO init const-array */;", float_type, name, length);
1081  } else {
1082  double value{};
1083  if (auto const& value_ptr = var->get_value()) {
1084  value = *value_ptr;
1085  }
1086  printer->fmt_line("{} {}{};",
1087  float_type,
1088  name,
1089  print_initializers ? fmt::format("{{{:g}}}", value) : std::string{});
1090  }
1091  codegen_global_variables.push_back(var);
1092  }
1093 
1094  for (const auto& var: info.constant_variables) {
1095  auto const name = var->get_name();
1096  auto* const value_ptr = var->get_value().get();
1097  double const value{value_ptr ? *value_ptr : 0};
1098  printer->fmt_line("{} {}{};",
1099  float_type,
1100  name,
1101  print_initializers ? fmt::format("{{{:g}}}", value) : std::string{});
1102  codegen_global_variables.push_back(var);
1103  }
1104 
1105  print_sdlists_init(print_initializers);
1106 
1107  if (info.table_count > 0) {
1108  printer->fmt_line("double usetable{};", print_initializers ? "{1}" : "");
1109  codegen_global_variables.push_back(make_symbol(naming::USE_TABLE_VARIABLE));
1110 
1111  for (const auto& block: info.functions_with_table) {
1112  const auto& name = block->get_node_name();
1113  printer->fmt_line("{} tmin_{}{};", float_type, name, value_initialize);
1114  printer->fmt_line("{} mfac_{}{};", float_type, name, value_initialize);
1115  codegen_global_variables.push_back(make_symbol("tmin_" + name));
1116  codegen_global_variables.push_back(make_symbol("mfac_" + name));
1117  }
1118 
1119  for (const auto& variable: info.table_statement_variables) {
1120  auto const name = "t_" + variable->get_name();
1121  auto const num_values = variable->get_num_values();
1122  if (variable->is_array()) {
1123  int array_len = variable->get_length();
1124  printer->fmt_line(
1125  "{} {}[{}][{}]{};", float_type, name, array_len, num_values, value_initialize);
1126  } else {
1127  printer->fmt_line("{} {}[{}]{};", float_type, name, num_values, value_initialize);
1128  }
1129  codegen_global_variables.push_back(make_symbol(name));
1130  }
1131  }
1132 
1133  print_global_struct_function_table_ptrs();
1134 
1135  if (info.vectorize && info.thread_data_index) {
1136  printer->fmt_line("ThreadDatum ext_call_thread[{}]{};",
1137  info.thread_data_index,
1138  value_initialize);
1139  codegen_global_variables.push_back(make_symbol("ext_call_thread"));
1140  }
1141 
1142  printer->pop_block(";");
1143 
1144  print_global_var_struct_assertions();
1145  print_global_var_struct_decl();
1146 }
1147 
1148 
1149 /**
1150  * Print structs that encapsulate information about scalar and
1151  * vector elements of type global and thread variables.
1152  */
1154  auto variable_printer =
1155  [&](const std::vector<SymbolType>& variables, bool if_array, bool if_vector) {
1156  for (const auto& variable: variables) {
1157  if (variable->is_array() == if_array) {
1158  // false => do not use the instance struct, which is not
1159  // defined in the global declaration that we are printing
1160  auto name = get_variable_name(variable->get_name(), false);
1161  auto ename = add_escape_quote(variable->get_name() + "_" + info.mod_suffix);
1162  auto length = variable->get_length();
1163  if (if_vector) {
1164  printer->fmt_line("{{{}, {}, {}}},", ename, name, length);
1165  } else {
1166  printer->fmt_line("{{{}, &{}}},", ename, name);
1167  }
1168  }
1169  }
1170  };
1171 
1172  auto globals = info.global_variables;
1173  auto thread_vars = info.thread_variables;
1174 
1175  if (info.table_count > 0) {
1176  globals.push_back(make_symbol(naming::USE_TABLE_VARIABLE));
1177  }
1178 
1179  printer->add_newline(2);
1180  printer->add_line("/** connect global (scalar) variables to hoc -- */");
1181  printer->add_line("static DoubScal hoc_scalar_double[] = {");
1182  printer->increase_indent();
1183  variable_printer(globals, false, false);
1184  variable_printer(thread_vars, false, false);
1185  printer->add_line("{nullptr, nullptr}");
1186  printer->decrease_indent();
1187  printer->add_line("};");
1188 
1189  printer->add_newline(2);
1190  printer->add_line("/** connect global (array) variables to hoc -- */");
1191  printer->add_line("static DoubVec hoc_vector_double[] = {");
1192  printer->increase_indent();
1193  variable_printer(globals, true, true);
1194  variable_printer(thread_vars, true, true);
1195  printer->add_line("{nullptr, nullptr, 0}");
1196  printer->decrease_indent();
1197  printer->add_line("};");
1198 }
1199 
1200 
1201 /**
1202  * Return registration type for a given BEFORE/AFTER block
1203  * /param block A BEFORE/AFTER block being registered
1204  *
1205  * Depending on a block type i.e. BEFORE or AFTER and also type
1206  * of it's associated block i.e. BREAKPOINT, INITIAL, SOLVE and
1207  * STEP, the registration type (as an integer) is calculated.
1208  * These values are then interpreted by CoreNEURON internally.
1209  */
1210 static std::string get_register_type_for_ba_block(const ast::Block* block) {
1211  std::string register_type{};
1212  BAType ba_type{};
1213  /// before block have value 10 and after block 20
1214  if (block->is_before_block()) {
1215  // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers)
1216  register_type = "BAType::Before";
1217  ba_type =
1218  dynamic_cast<const ast::BeforeBlock*>(block)->get_bablock()->get_type()->get_value();
1219  } else {
1220  // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers)
1221  register_type = "BAType::After";
1222  ba_type =
1223  dynamic_cast<const ast::AfterBlock*>(block)->get_bablock()->get_type()->get_value();
1224  }
1225 
1226  /// associated blocks have different values (1 to 4) based on type.
1227  /// These values are based on neuron/coreneuron implementation details.
1228  if (ba_type == BATYPE_BREAKPOINT) {
1229  register_type += " + BAType::Breakpoint";
1230  } else if (ba_type == BATYPE_SOLVE) {
1231  register_type += " + BAType::Solve";
1232  } else if (ba_type == BATYPE_INITIAL) {
1233  register_type += " + BAType::Initial";
1234  } else if (ba_type == BATYPE_STEP) {
1235  register_type += " + BAType::Step";
1236  } else {
1237  throw std::runtime_error("Unhandled Before/After type encountered during code generation");
1238  }
1239  return register_type;
1240 }
1241 
1242 
1243 /**
1244  * \details Every mod file has register function to connect with the simulator.
1245  * Various information about mechanism and callbacks get registered with
1246  * the simulator using suffix_reg() function.
1247  *
1248  * Here are details:
1249  * - We should exclude that callback based on the solver, watch statements.
1250  * - If nrn_get_mechtype is < -1 means that mechanism is not used in the
1251  * context of neuron execution and hence could be ignored in coreneuron
1252  * execution.
1253  * - Ions are internally defined and their types can be queried similar to
1254  * other mechanisms.
1255  * - hoc_register_var may not be needed in the context of coreneuron
1256  * - We assume net receive buffer is on. This is because generated code is
1257  * compatible for cpu as well as gpu target.
1258  */
1259 // NOLINTNEXTLINE(readability-function-cognitive-complexity)
1261  printer->add_newline(2);
1262  printer->add_line("/** register channel with the simulator */");
1263  printer->fmt_push_block("void _{}_reg()", info.mod_file);
1264 
1265  // type related information
1266  auto suffix = add_escape_quote(info.mod_suffix);
1267  printer->add_newline();
1268  printer->fmt_line("int mech_type = nrn_get_mechtype({});", suffix);
1269  printer->fmt_line("{} = mech_type;", get_variable_name("mech_type", false));
1270  printer->push_block("if (mech_type == -1)");
1271  printer->add_line("return;");
1272  printer->pop_block();
1273 
1274  printer->add_newline();
1275  printer->add_line("_nrn_layout_reg(mech_type, 0);"); // 0 for SoA
1276 
1277  // register mechanism
1278  const auto mech_arguments = register_mechanism_arguments();
1279  const auto number_of_thread_objects = num_thread_objects();
1280  if (info.point_process) {
1281  printer->fmt_line("point_register_mech({}, {}, {}, {});",
1282  mech_arguments,
1283  info.constructor_node ? method_name(naming::NRN_CONSTRUCTOR_METHOD)
1284  : "nullptr",
1285  info.destructor_node ? method_name(naming::NRN_DESTRUCTOR_METHOD)
1286  : "nullptr",
1287  number_of_thread_objects);
1288  } else {
1289  printer->fmt_line("register_mech({}, {});", mech_arguments, number_of_thread_objects);
1290  if (info.constructor_node) {
1291  printer->fmt_line("register_constructor({});",
1292  method_name(naming::NRN_CONSTRUCTOR_METHOD));
1293  }
1294  }
1295 
1296  // types for ion
1297  for (const auto& ion: info.ions) {
1298  printer->fmt_line("{} = nrn_get_mechtype({});",
1299  get_variable_name(ion.name + "_type", false),
1300  add_escape_quote(ion.name + "_ion"));
1301  }
1302  printer->add_newline();
1303 
1304  /*
1305  * Register callbacks for thread allocation and cleanup. Note that thread_data_index
1306  * represent total number of thread used minus 1 (i.e. index of last thread).
1307  */
1308  if (info.vectorize && (info.thread_data_index != 0)) {
1309  // false to avoid getting the copy from the instance structure
1310  printer->fmt_line("thread_mem_init({});", get_variable_name("ext_call_thread", false));
1311  }
1312 
1313  if (!info.thread_variables.empty()) {
1314  printer->fmt_line("{} = 0;", get_variable_name("thread_data_in_use"));
1315  }
1316 
1317  if (info.thread_callback_register) {
1318  printer->add_line("_nrn_thread_reg0(mech_type, thread_mem_cleanup);");
1319  printer->add_line("_nrn_thread_reg1(mech_type, thread_mem_init);");
1320  }
1321 
1322  if (info.emit_table_thread()) {
1323  auto name = method_name("check_table_thread");
1324  printer->fmt_line("_nrn_thread_table_reg(mech_type, {});", name);
1325  }
1326 
1327  // register read/write callbacks for pointers
1328  if (info.bbcore_pointer_used) {
1329  printer->add_line("hoc_reg_bbcore_read(mech_type, bbcore_read);");
1330  printer->add_line("hoc_reg_bbcore_write(mech_type, bbcore_write);");
1331  }
1332 
1333  // register size of double and int elements
1334  // clang-format off
1335  printer->add_line("hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size());");
1336  // clang-format on
1337 
1338  // register semantics for index variables
1339  for (auto& semantic: info.semantics) {
1340  auto args =
1341  fmt::format("mech_type, {}, {}", semantic.index, add_escape_quote(semantic.name));
1342  printer->fmt_line("hoc_register_dparam_semantics({});", args);
1343  }
1344 
1345  if (info.is_watch_used()) {
1346  auto watch_fun = compute_method_name(BlockType::Watch);
1347  printer->fmt_line("hoc_register_watch_check({}, mech_type);", watch_fun);
1348  }
1349 
1350  if (info.write_concentration) {
1351  printer->add_line("nrn_writes_conc(mech_type, 0);");
1352  }
1353 
1354  // register various information for point process type
1355  if (info.net_event_used) {
1356  printer->add_line("add_nrn_has_net_event(mech_type);");
1357  }
1358  if (info.artificial_cell) {
1359  printer->fmt_line("add_nrn_artcell(mech_type, {});", info.tqitem_index);
1360  }
1361  if (net_receive_buffering_required()) {
1362  printer->fmt_line("hoc_register_net_receive_buffering({}, mech_type);",
1363  method_name("net_buf_receive"));
1364  }
1365  if (info.num_net_receive_parameters != 0) {
1366  auto net_recv_init_arg = "nullptr";
1367  if (info.net_receive_initial_node != nullptr) {
1368  net_recv_init_arg = "net_init";
1369  }
1370  printer->fmt_line("set_pnt_receive(mech_type, {}, {}, num_net_receive_args());",
1371  method_name("net_receive"),
1372  net_recv_init_arg);
1373  }
1374  if (info.for_netcon_used) {
1375  const auto index = position_of_int_var(naming::FOR_NETCON_VARIABLE);
1376  printer->fmt_line("add_nrn_fornetcons(mech_type, {});", index);
1377  }
1378 
1379  if (info.net_event_used || info.net_send_used) {
1380  printer->add_line("hoc_register_net_send_buffering(mech_type);");
1381  }
1382 
1383  /// register all before/after blocks
1384  for (size_t i = 0; i < info.before_after_blocks.size(); i++) {
1385  // register type and associated function name for the block
1386  const auto& block = info.before_after_blocks[i];
1387  std::string register_type = get_register_type_for_ba_block(block);
1388  std::string function_name = method_name(fmt::format("nrn_before_after_{}", i));
1389  printer->fmt_line("hoc_reg_ba(mech_type, {}, {});", function_name, register_type);
1390  }
1391 
1392  // register variables for hoc
1393  printer->add_line("hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL);");
1394  printer->pop_block();
1395 }
1396 
1397 
1399  if (!info.thread_callback_register) {
1400  return;
1401  }
1402 
1403  // thread_mem_init callback
1404  printer->add_newline(2);
1405  printer->add_line("/** thread memory allocation callback */");
1406  printer->push_block("static void thread_mem_init(ThreadDatum* thread) ");
1407 
1408  if (info.vectorize && info.derivimplicit_used()) {
1409  printer->fmt_line("thread[dith{}()].pval = nullptr;", info.derivimplicit_list_num);
1410  }
1411  if (info.vectorize && (info.top_local_thread_size != 0)) {
1412  auto length = info.top_local_thread_size;
1413  auto allocation = fmt::format("(double*)mem_alloc({}, sizeof(double))", length);
1414  printer->fmt_line("thread[top_local_var_tid()].pval = {};", allocation);
1415  }
1416  if (info.thread_var_data_size != 0) {
1417  auto length = info.thread_var_data_size;
1418  auto thread_data = get_variable_name("thread_data");
1419  auto thread_data_in_use = get_variable_name("thread_data_in_use");
1420  auto allocation = fmt::format("(double*)mem_alloc({}, sizeof(double))", length);
1421  printer->fmt_push_block("if ({})", thread_data_in_use);
1422  printer->fmt_line("thread[thread_var_tid()].pval = {};", allocation);
1423  printer->chain_block("else");
1424  printer->fmt_line("thread[thread_var_tid()].pval = {};", thread_data);
1425  printer->fmt_line("{} = 1;", thread_data_in_use);
1426  printer->pop_block();
1427  }
1428  printer->pop_block();
1429  printer->add_newline(2);
1430 
1431 
1432  // thread_mem_cleanup callback
1433  printer->add_line("/** thread memory cleanup callback */");
1434  printer->push_block("static void thread_mem_cleanup(ThreadDatum* thread) ");
1435 
1436  // clang-format off
1437  if (info.vectorize && info.derivimplicit_used()) {
1438  int n = info.derivimplicit_list_num;
1439  printer->fmt_line("free(thread[dith{}()].pval);", n);
1440  printer->fmt_line("nrn_destroy_newtonspace(static_cast<NewtonSpace*>(*newtonspace{}(thread)));", n);
1441  }
1442  // clang-format on
1443 
1444  if (info.top_local_thread_size != 0) {
1445  auto line = "free(thread[top_local_var_tid()].pval);";
1446  printer->add_line(line);
1447  }
1448  if (info.thread_var_data_size != 0) {
1449  auto thread_data = get_variable_name("thread_data");
1450  auto thread_data_in_use = get_variable_name("thread_data_in_use");
1451  printer->fmt_push_block("if (thread[thread_var_tid()].pval == {})", thread_data);
1452  printer->fmt_line("{} = 0;", thread_data_in_use);
1453  printer->chain_block("else");
1454  printer->add_line("free(thread[thread_var_tid()].pval);");
1455  printer->pop_block();
1456  }
1457  printer->pop_block();
1458 }
1459 
1460 
1462  auto const value_initialize = print_initializers ? "{}" : "";
1463  auto int_type = default_int_data_type();
1464  printer->add_newline(2);
1465  printer->add_line("/** all mechanism instance variables and global variables */");
1466  printer->fmt_push_block("struct {} ", instance_struct());
1467 
1468  for (auto const& [var, type]: info.neuron_global_variables) {
1469  auto const name = var->get_name();
1470  printer->fmt_line("{}* {}{};",
1471  type,
1472  name,
1473  print_initializers ? fmt::format("{{&coreneuron::{}}}", name)
1474  : std::string{});
1475  }
1476  for (auto& var: codegen_float_variables) {
1477  const auto& name = var->get_name();
1478  auto type = get_range_var_float_type(var);
1479  auto qualifier = is_constant_variable(name) ? "const " : "";
1480  printer->fmt_line("{}{}* {}{};", qualifier, type, name, value_initialize);
1481  }
1482  for (auto& var: codegen_int_variables) {
1483  const auto& name = var.symbol->get_name();
1484  if (var.is_index || var.is_integer) {
1485  auto qualifier = var.is_constant ? "const " : "";
1486  printer->fmt_line("{}{}* {}{};", qualifier, int_type, name, value_initialize);
1487  } else {
1488  auto qualifier = var.is_constant ? "const " : "";
1489  auto type = var.is_vdata ? "void*" : default_float_data_type();
1490  printer->fmt_line("{}{}* {}{};", qualifier, type, name, value_initialize);
1491  }
1492  }
1493 
1494  printer->fmt_line("{}* {}{};",
1495  global_struct(),
1497  print_initializers ? fmt::format("{{&{}}}", global_struct_instance())
1498  : std::string{});
1499  printer->pop_block(";");
1500 }
1501 
1502 
1504  if (!ion_variable_struct_required()) {
1505  return;
1506  }
1507  printer->add_newline(2);
1508  printer->add_line("/** ion write variables */");
1509  printer->push_block("struct IonCurVar");
1510 
1511  std::string float_type = default_float_data_type();
1512  std::vector<std::string> members;
1513 
1514  for (auto& ion: info.ions) {
1515  for (auto& var: ion.writes) {
1516  printer->fmt_line("{} {};", float_type, var);
1517  members.push_back(var);
1518  }
1519  }
1520  for (auto& var: info.currents) {
1521  if (!info.is_ion_variable(var)) {
1522  printer->fmt_line("{} {};", float_type, var);
1523  members.push_back(var);
1524  }
1525  }
1526 
1527  print_ion_var_constructor(members);
1528 
1529  printer->pop_block(";");
1530 }
1531 
1532 
1534  const std::vector<std::string>& members) {
1535  // constructor
1536  printer->add_newline();
1537  printer->add_indent();
1538  printer->add_text("IonCurVar() : ");
1539  for (int i = 0; i < members.size(); i++) {
1540  printer->fmt_text("{}(0)", members[i]);
1541  if (i + 1 < members.size()) {
1542  printer->add_text(", ");
1543  }
1544  }
1545  printer->add_text(" {}");
1546  printer->add_newline();
1547 }
1548 
1549 
1551  printer->add_line("IonCurVar ionvar;");
1552 }
1553 
1554 
1556  // nothing for cpu
1557 }
1558 
1559 
1561  auto type = float_data_type();
1562  printer->add_newline(2);
1563  printer->add_line("/** allocate and setup array for range variable */");
1564  printer->fmt_push_block("static inline {}* setup_range_variable(double* variable, int n)",
1565  type);
1566  printer->fmt_line("{0}* data = ({0}*) mem_alloc(n, sizeof({0}));", type);
1567  printer->push_block("for(size_t i = 0; i < n; i++)");
1568  printer->add_line("data[i] = variable[i];");
1569  printer->pop_block();
1570  printer->add_line("return data;");
1571  printer->pop_block();
1572 }
1573 
1574 
1575 /**
1576  * \details If floating point type like "float" is specified on command line then
1577  * we can't turn all variables to new type. This is because certain variables
1578  * are pointers to internal variables (e.g. ions). Hence, we check if given
1579  * variable can be safely converted to new type. If so, return new type.
1580  */
1582  // clang-format off
1583  auto with = NmodlType::read_ion_var
1584  | NmodlType::write_ion_var
1585  | NmodlType::pointer_var
1586  | NmodlType::bbcore_pointer_var
1587  | NmodlType::extern_neuron_variable;
1588  // clang-format on
1589  bool need_default_type = symbol->has_any_property(with);
1590  if (need_default_type) {
1591  return default_float_data_type();
1592  }
1593  return float_data_type();
1594 }
1595 
1596 
1598  if (range_variable_setup_required()) {
1599  print_setup_range_variable();
1600  }
1601 
1602  printer->add_newline();
1603  printer->add_line("// Allocate instance structure");
1604  printer->fmt_push_block("static void {}(NrnThread* nt, Memb_list* ml, int type)",
1606  printer->add_line("assert(!ml->instance);");
1607  printer->add_line("assert(!ml->global_variables);");
1608  printer->add_line("assert(ml->global_variables_size == 0);");
1609  printer->fmt_line("auto* const inst = new {}{{}};", instance_struct());
1610  printer->fmt_line("assert(inst->{} == &{});",
1612  global_struct_instance());
1613  printer->add_line("ml->instance = inst;");
1614  printer->fmt_line("ml->global_variables = inst->{};", naming::INST_GLOBAL_MEMBER);
1615  printer->fmt_line("ml->global_variables_size = sizeof({});", global_struct());
1616  printer->pop_block();
1617  printer->add_newline();
1618 
1619  auto const cast_inst_and_assert_validity = [&]() {
1620  printer->fmt_line("auto* const inst = static_cast<{}*>(ml->instance);", instance_struct());
1621  printer->add_line("assert(inst);");
1622  printer->fmt_line("assert(inst->{});", naming::INST_GLOBAL_MEMBER);
1623  printer->fmt_line("assert(inst->{} == &{});",
1625  global_struct_instance());
1626  printer->fmt_line("assert(inst->{} == ml->global_variables);", naming::INST_GLOBAL_MEMBER);
1627  printer->fmt_line("assert(ml->global_variables_size == sizeof({}));", global_struct());
1628  };
1629 
1630  // Must come before print_instance_struct_copy_to_device and
1631  // print_instance_struct_delete_from_device
1632  print_instance_struct_transfer_routine_declarations();
1633 
1634  printer->add_line("// Deallocate the instance structure");
1635  printer->fmt_push_block("static void {}(NrnThread* nt, Memb_list* ml, int type)",
1637  cast_inst_and_assert_validity();
1638 
1639  // delete random streams
1640  if (info.random_variables.size()) {
1641  printer->add_line("int pnodecount = ml->_nodecount_padded;");
1642  printer->add_line("int nodecount = ml->nodecount;");
1643  printer->add_line("Datum* indexes = ml->pdata;");
1644  printer->push_block("for (int id = 0; id < nodecount; id++)");
1645  for (const auto& var: info.random_variables) {
1646  const auto& name = get_variable_name(var->get_name());
1647  printer->fmt_line("nrnran123_deletestream({});", name);
1648  }
1649  printer->pop_block();
1650  }
1651  print_instance_struct_delete_from_device();
1652  printer->add_multi_line(R"CODE(
1653  delete inst;
1654  ml->instance = nullptr;
1655  ml->global_variables = nullptr;
1656  ml->global_variables_size = 0;
1657  )CODE");
1658  printer->pop_block();
1659  printer->add_newline();
1660 
1661 
1662  printer->add_line("/** initialize mechanism instance variables */");
1663  printer->push_block("static inline void setup_instance(NrnThread* nt, Memb_list* ml)");
1664  cast_inst_and_assert_validity();
1665 
1666  std::string stride;
1667  printer->add_line("int pnodecount = ml->_nodecount_padded;");
1668  stride = "*pnodecount";
1669 
1670  printer->add_line("Datum* indexes = ml->pdata;");
1671 
1672  auto const float_type = default_float_data_type();
1673 
1674  int id = 0;
1675  std::vector<std::string> ptr_members{naming::INST_GLOBAL_MEMBER};
1676  for (auto const& [var, type]: info.neuron_global_variables) {
1677  ptr_members.push_back(var->get_name());
1678  }
1679  ptr_members.reserve(ptr_members.size() + codegen_float_variables.size() +
1680  codegen_int_variables.size());
1681  for (auto& var: codegen_float_variables) {
1682  auto name = var->get_name();
1683  auto range_var_type = get_range_var_float_type(var);
1684  if (float_type == range_var_type) {
1685  auto const variable = fmt::format("ml->data+{}{}", id, stride);
1686  printer->fmt_line("inst->{} = {};", name, variable);
1687  } else {
1688  // TODO what MOD file exercises this?
1689  printer->fmt_line("inst->{} = setup_range_variable(ml->data+{}{}, pnodecount);",
1690  name,
1691  id,
1692  stride);
1693  }
1694  ptr_members.push_back(std::move(name));
1695  id += var->get_length();
1696  }
1697 
1698  for (auto& var: codegen_int_variables) {
1699  auto name = var.symbol->get_name();
1700  auto const variable = [&var]() {
1701  if (var.is_index || var.is_integer) {
1702  return "ml->pdata";
1703  } else if (var.is_vdata) {
1704  return "nt->_vdata";
1705  } else {
1706  return "nt->_data";
1707  }
1708  }();
1709  printer->fmt_line("inst->{} = {};", name, variable);
1710  ptr_members.push_back(std::move(name));
1711  }
1712  print_instance_struct_copy_to_device();
1713  printer->pop_block(); // setup_instance
1714  printer->add_newline();
1715 
1716  print_instance_struct_transfer_routines(ptr_members);
1717 }
1718 
1719 
1721  if (info.artificial_cell) {
1722  printer->add_line("double v = 0.0;");
1723  } else {
1724  printer->add_line("int node_id = node_index[id];");
1725  printer->add_line("double v = voltage[node_id];");
1726  print_v_unused();
1727  }
1728 
1729  if (ion_variable_struct_required()) {
1730  printer->add_line("IonCurVar ionvar;");
1731  }
1732 
1733  // read ion statements
1734  auto read_statements = ion_read_statements(BlockType::Initial);
1735  for (auto& statement: read_statements) {
1736  printer->add_line(statement);
1737  }
1738 
1739  print_rename_state_vars();
1740 
1741  // initial block
1742  if (node != nullptr) {
1743  const auto& block = node->get_statement_block();
1744  print_statement_block(*block, false, false);
1745  }
1746 
1747  // write ion statements
1748  auto write_statements = ion_write_statements(BlockType::Initial);
1749  for (auto& statement: write_statements) {
1750  auto text = process_shadow_update_statement(statement, BlockType::Initial);
1751  printer->add_line(text);
1752  }
1753 }
1754 
1755 
1757  BlockType type,
1758  const std::string& function_name) {
1759  std::string method;
1760  if (function_name.empty()) {
1761  method = compute_method_name(type);
1762  } else {
1763  method = function_name;
1764  }
1765  auto args = "NrnThread* nt, Memb_list* ml, int type";
1766 
1767  // watch statement function doesn't have type argument
1768  if (type == BlockType::Watch) {
1769  args = "NrnThread* nt, Memb_list* ml";
1770  }
1771 
1772  print_global_method_annotation();
1773  printer->fmt_push_block("void {}({})", method, args);
1774  if (type != BlockType::Destructor && type != BlockType::Constructor) {
1775  // We do not (currently) support DESTRUCTOR and CONSTRUCTOR blocks
1776  // running anything on the GPU.
1777  print_kernel_data_present_annotation_block_begin();
1778  } else {
1779  /// TODO: Remove this when the code generation is propery done
1780  /// Related to https://github.com/BlueBrain/nmodl/issues/692
1781  printer->add_line("#ifndef CORENEURON_BUILD");
1782  }
1783  printer->add_multi_line(R"CODE(
1784  int nodecount = ml->nodecount;
1785  int pnodecount = ml->_nodecount_padded;
1786  const int* node_index = ml->nodeindices;
1787  double* data = ml->data;
1788  const double* voltage = nt->_actual_v;
1789  )CODE");
1790 
1791  if (type == BlockType::Equation) {
1792  printer->add_line("double* vec_rhs = nt->_actual_rhs;");
1793  printer->add_line("double* vec_d = nt->_actual_d;");
1794  print_rhs_d_shadow_variables();
1795  }
1796  printer->add_line("Datum* indexes = ml->pdata;");
1797  printer->add_line("ThreadDatum* thread = ml->_thread;");
1798 
1799  if (type == BlockType::Initial) {
1800  printer->add_newline();
1801  printer->add_line("setup_instance(nt, ml);");
1802  }
1803  printer->fmt_line("auto* const inst = static_cast<{}*>(ml->instance);", instance_struct());
1804  printer->add_newline(1);
1805 }
1806 
1808  printer->add_newline(2);
1809  printer->add_line("/** initialize channel */");
1810 
1811  print_global_function_common_code(BlockType::Initial);
1812  if (info.derivimplicit_used()) {
1813  printer->add_newline();
1814  int nequation = info.num_equations;
1815  int list_num = info.derivimplicit_list_num;
1816  // clang-format off
1817  printer->fmt_line("int& deriv_advance_flag = *deriv{}_advance(thread);", list_num);
1818  printer->add_line("deriv_advance_flag = 0;");
1819  print_deriv_advance_flag_transfer_to_device();
1820  printer->fmt_line("auto ns = newtonspace{}(thread);", list_num);
1821  printer->fmt_line("auto& th = thread[dith{}()];", list_num);
1822  printer->push_block("if (*ns == nullptr)");
1823  printer->fmt_line("int vec_size = 2*{}*pnodecount*sizeof(double);", nequation);
1824  printer->fmt_line("double* vec = makevector(vec_size);", nequation);
1825  printer->fmt_line("th.pval = vec;", list_num);
1826  printer->fmt_line("*ns = nrn_cons_newtonspace({}, pnodecount);", nequation);
1827  print_newtonspace_transfer_to_device();
1828  printer->pop_block();
1829  // clang-format on
1830  }
1831 
1832  // update global variable as those might be updated via python/hoc API
1833  // NOTE: CoreNEURON has enough information to do this on its own, which
1834  // would be neater.
1835  print_global_variable_device_update_annotation();
1836 
1837  if (skip_init_check) {
1838  printer->push_block("if (_nrn_skip_initmodel == 0)");
1839  }
1840 
1841  if (!info.changed_dt.empty()) {
1842  printer->fmt_line("double _save_prev_dt = {};",
1843  get_variable_name(naming::NTHREAD_DT_VARIABLE));
1844  printer->fmt_line("{} = {};",
1845  get_variable_name(naming::NTHREAD_DT_VARIABLE),
1846  info.changed_dt);
1847  print_dt_update_to_device();
1848  }
1849 
1850  print_parallel_iteration_hint(BlockType::Initial, info.initial_node);
1851  printer->push_block("for (int id = 0; id < nodecount; id++)");
1852 
1853  if (info.net_receive_node != nullptr) {
1854  printer->fmt_line("{} = -1e20;", get_variable_name("tsave"));
1855  }
1856 
1857  print_initial_block(info.initial_node);
1858  printer->pop_block();
1859 
1860  if (!info.changed_dt.empty()) {
1861  printer->fmt_line("{} = _save_prev_dt;", get_variable_name(naming::NTHREAD_DT_VARIABLE));
1862  print_dt_update_to_device();
1863  }
1864 
1865  printer->pop_block();
1866 
1867  if (info.derivimplicit_used()) {
1868  printer->add_line("deriv_advance_flag = 1;");
1869  print_deriv_advance_flag_transfer_to_device();
1870  }
1871 
1872  if (info.net_send_used && !info.artificial_cell) {
1873  print_send_event_move();
1874  }
1875 
1876  print_kernel_data_present_annotation_block_end();
1877  if (skip_init_check) {
1878  printer->pop_block();
1879  }
1880 }
1881 
1883  size_t block_id) {
1884  std::string ba_type;
1885  std::shared_ptr<ast::BABlock> ba_block;
1886 
1887  if (node->is_before_block()) {
1888  ba_block = dynamic_cast<const ast::BeforeBlock*>(node)->get_bablock();
1889  ba_type = "BEFORE";
1890  } else {
1891  ba_block = dynamic_cast<const ast::AfterBlock*>(node)->get_bablock();
1892  ba_type = "AFTER";
1893  }
1894 
1895  std::string ba_block_type = ba_block->get_type()->eval();
1896 
1897  /// name of the before/after function
1898  std::string function_name = method_name(fmt::format("nrn_before_after_{}", block_id));
1899 
1900  /// print common function code like init/state/current
1901  printer->add_newline(2);
1902  printer->fmt_line("/** {} of block type {} # {} */", ba_type, ba_block_type, block_id);
1903  print_global_function_common_code(BlockType::BeforeAfter, function_name);
1904 
1905  print_parallel_iteration_hint(BlockType::BeforeAfter, node);
1906  printer->push_block("for (int id = 0; id < nodecount; id++)");
1907 
1908  printer->add_line("int node_id = node_index[id];");
1909  printer->add_line("double v = voltage[node_id];");
1910  print_v_unused();
1911 
1912  // read ion statements
1913  const auto& read_statements = ion_read_statements(BlockType::Equation);
1914  for (auto& statement: read_statements) {
1915  printer->add_line(statement);
1916  }
1917 
1918  /// print main body
1919  printer->add_indent();
1920  print_statement_block(*ba_block->get_statement_block());
1921  printer->add_newline();
1922 
1923  // write ion statements
1924  const auto& write_statements = ion_write_statements(BlockType::Equation);
1925  for (auto& statement: write_statements) {
1926  auto text = process_shadow_update_statement(statement, BlockType::Equation);
1927  printer->add_line(text);
1928  }
1929 
1930  /// loop end including data annotation block
1931  printer->pop_block();
1932  printer->pop_block();
1933  print_kernel_data_present_annotation_block_end();
1934 }
1935 
1937  printer->add_newline(2);
1938  print_global_function_common_code(BlockType::Constructor);
1939  if (info.constructor_node != nullptr) {
1940  const auto& block = info.constructor_node->get_statement_block();
1941  print_statement_block(*block, false, false);
1942  }
1943  printer->add_line("#endif");
1944  printer->pop_block();
1945 }
1946 
1947 
1949  printer->add_newline(2);
1950  print_global_function_common_code(BlockType::Destructor);
1951  if (info.destructor_node != nullptr) {
1952  const auto& block = info.destructor_node->get_statement_block();
1953  print_statement_block(*block, false, false);
1954  }
1955  printer->add_line("#endif");
1956  printer->pop_block();
1957 }
1958 
1959 
1961  printer->add_newline(2);
1962  auto method = method_name(naming::NRN_ALLOC_METHOD);
1963  printer->fmt_push_block("static void {}(double* data, Datum* indexes, int type)", method);
1964  printer->add_line("// do nothing");
1965  printer->pop_block();
1966 }
1967 
1968 /**
1969  * \todo Number of watch could be more than number of statements
1970  * according to grammar. Check if this is correctly handled in neuron
1971  * and coreneuron.
1972  */
1974  if (info.watch_statements.empty()) {
1975  return;
1976  }
1977 
1978  printer->add_newline(2);
1979  auto inst = fmt::format("{}* inst", instance_struct());
1980 
1981  printer->fmt_push_block(
1982  "static void nrn_watch_activate({}, int id, int pnodecount, int watch_id, "
1983  "double v, bool &watch_remove)",
1984  inst);
1985 
1986  // initialize all variables only during first watch statement
1987  printer->push_block("if (watch_remove == false)");
1988  for (int i = 0; i < info.watch_count; i++) {
1989  auto name = get_variable_name(fmt::format("watch{}", i + 1));
1990  printer->fmt_line("{} = 0;", name);
1991  }
1992  printer->add_line("watch_remove = true;");
1993  printer->pop_block();
1994 
1995  /**
1996  * \todo Similar to neuron/coreneuron we are using
1997  * first watch and ignoring rest.
1998  */
1999  for (int i = 0; i < info.watch_statements.size(); i++) {
2000  auto statement = info.watch_statements[i];
2001  printer->fmt_push_block("if (watch_id == {})", i);
2002 
2003  auto varname = get_variable_name(fmt::format("watch{}", i + 1));
2004  printer->add_indent();
2005  printer->fmt_text("{} = 2 + (", varname);
2006  auto watch = statement->get_statements().front();
2007  watch->get_expression()->visit_children(*this);
2008  printer->add_text(");");
2009  printer->add_newline();
2010 
2011  printer->pop_block();
2012  }
2013  printer->pop_block();
2014 }
2015 
2016 
2017 /**
2018  * \todo Similar to print_watch_activate, we are using only
2019  * first watch. need to verify with neuron/coreneuron about rest.
2020  */
2022  if (info.watch_statements.empty()) {
2023  return;
2024  }
2025 
2026  printer->add_newline(2);
2027  printer->add_line("/** routine to check watch activation */");
2028  print_global_function_common_code(BlockType::Watch);
2029 
2030  // WATCH statements appears in NET_RECEIVE block and while printing
2031  // net_receive function we already check if it contains any MUTEX/PROTECT
2032  // constructs. As WATCH is not a top level block but list of statements,
2033  // we don't need to have ivdep pragma related check
2034  print_parallel_iteration_hint(BlockType::Watch, nullptr);
2035 
2036  printer->push_block("for (int id = 0; id < nodecount; id++)");
2037 
2038  if (info.is_voltage_used_by_watch_statements()) {
2039  printer->add_line("int node_id = node_index[id];");
2040  printer->add_line("double v = voltage[node_id];");
2041  print_v_unused();
2042  }
2043 
2044  // flat to make sure only one WATCH statement can be triggered at a time
2045  printer->add_line("bool watch_untriggered = true;");
2046 
2047  for (int i = 0; i < info.watch_statements.size(); i++) {
2048  auto statement = info.watch_statements[i];
2049  const auto& watch = statement->get_statements().front();
2050  const auto& varname = get_variable_name(fmt::format("watch{}", i + 1));
2051 
2052  // start block 1
2053  printer->fmt_push_block("if ({}&2 && watch_untriggered)", varname);
2054 
2055  // start block 2
2056  printer->add_indent();
2057  printer->add_text("if (");
2058  watch->get_expression()->accept(*this);
2059  printer->add_text(") {");
2060  printer->add_newline();
2061  printer->increase_indent();
2062 
2063  // start block 3
2064  printer->fmt_push_block("if (({}&1) == 0)", varname);
2065 
2066  printer->add_line("watch_untriggered = false;");
2067 
2068  const auto& tqitem = get_variable_name("tqitem");
2069  const auto& point_process = get_variable_name("point_process");
2070  printer->add_indent();
2071  printer->add_text("net_send_buffering(");
2072  const auto& t = get_variable_name("t");
2073  printer->fmt_text("nt, ml->_net_send_buffer, 0, {}, -1, {}, {}+0.0, ",
2074  tqitem,
2075  point_process,
2076  t);
2077  watch->get_value()->accept(*this);
2078  printer->add_text(");");
2079  printer->add_newline();
2080  printer->pop_block();
2081 
2082  printer->add_line(varname, " = 3;");
2083  // end block 3
2084 
2085  // start block 3
2086  printer->decrease_indent();
2087  printer->push_block("} else");
2088  printer->add_line(varname, " = 2;");
2089  printer->pop_block();
2090  // end block 3
2091 
2092  printer->pop_block();
2093  // end block 1
2094  }
2095 
2096  printer->pop_block();
2097  print_send_event_move();
2098  print_kernel_data_present_annotation_block_end();
2099  printer->pop_block();
2100 }
2101 
2102 
2104  bool need_mech_inst) {
2105  printer->add_multi_line(R"CODE(
2106  int tid = pnt->_tid;
2107  int id = pnt->_i_instance;
2108  double v = 0;
2109  )CODE");
2110 
2111  if (info.artificial_cell || node.is_initial_block()) {
2112  printer->add_line("NrnThread* nt = nrn_threads + tid;");
2113  printer->add_line("Memb_list* ml = nt->_ml_list[pnt->_type];");
2114  }
2115  if (node.is_initial_block()) {
2116  print_kernel_data_present_annotation_block_begin();
2117  }
2118 
2119  printer->add_multi_line(R"CODE(
2120  int nodecount = ml->nodecount;
2121  int pnodecount = ml->_nodecount_padded;
2122  double* data = ml->data;
2123  double* weights = nt->weights;
2124  Datum* indexes = ml->pdata;
2125  ThreadDatum* thread = ml->_thread;
2126  )CODE");
2127  if (need_mech_inst) {
2128  printer->fmt_line("auto* const inst = static_cast<{0}*>(ml->instance);", instance_struct());
2129  }
2130 
2131  if (node.is_initial_block()) {
2132  print_net_init_acc_serial_annotation_block_begin();
2133  }
2134 
2135  // rename variables but need to see if they are actually used
2136  auto parameters = info.net_receive_node->get_parameters();
2137  if (!parameters.empty()) {
2138  int i = 0;
2139  printer->add_newline();
2140  for (auto& parameter: parameters) {
2141  auto name = parameter->get_node_name();
2142  bool var_used = VarUsageVisitor().variable_used(node, "(*" + name + ")");
2143  if (var_used) {
2144  printer->fmt_line("double* {} = weights + weight_index + {};", name, i);
2145  RenameVisitor vr(name, "*" + name);
2146  node.visit_children(vr);
2147  }
2148  i++;
2149  }
2150  }
2151 }
2152 
2153 
2155  auto const& arguments = node.get_arguments();
2156  const auto& tqitem = get_variable_name("tqitem");
2157  std::string weight_index = "weight_index";
2158  std::string pnt = "pnt";
2159 
2160  // for functions not generated from NET_RECEIVE blocks (i.e. top level INITIAL block)
2161  // the weight_index argument is 0.
2162  if (!printing_net_receive && !printing_net_init) {
2163  weight_index = "0";
2164  auto var = get_variable_name("point_process");
2165  if (info.artificial_cell) {
2166  pnt = "(Point_process*)" + var;
2167  }
2168  }
2169 
2170  // artificial cells don't use spike buffering
2171  // clang-format off
2172  if (info.artificial_cell) {
2173  printer->fmt_text("artcell_net_send(&{}, {}, {}, nt->_t+", tqitem, weight_index, pnt);
2174  } else {
2175  const auto& point_process = get_variable_name("point_process");
2176  const auto& t = get_variable_name("t");
2177  printer->add_text("net_send_buffering(");
2178  printer->fmt_text("nt, ml->_net_send_buffer, 0, {}, {}, {}, {}+", tqitem, weight_index, point_process, t);
2179  }
2180  // clang-format off
2181  print_vector_elements(arguments, ", ");
2182  printer->add_text(')');
2183 }
2184 
2185 
2187  if (!printing_net_receive && !printing_net_init) {
2188  throw std::runtime_error("Error : net_move only allowed in NET_RECEIVE block");
2189  }
2190 
2191  auto const& arguments = node.get_arguments();
2192  const auto& tqitem = get_variable_name("tqitem");
2193  std::string weight_index = "-1";
2194  std::string pnt = "pnt";
2195 
2196  // artificial cells don't use spike buffering
2197  // clang-format off
2198  if (info.artificial_cell) {
2199  printer->fmt_text("artcell_net_move(&{}, {}, ", tqitem, pnt);
2200  print_vector_elements(arguments, ", ");
2201  printer->add_text(")");
2202  } else {
2203  const auto& point_process = get_variable_name("point_process");
2204  printer->add_text("net_send_buffering(");
2205  printer->fmt_text("nt, ml->_net_send_buffer, 2, {}, {}, {}, ", tqitem, weight_index, point_process);
2206  print_vector_elements(arguments, ", ");
2207  printer->add_text(", 0.0");
2208  printer->add_text(")");
2209  }
2210 }
2211 
2212 
2214  const auto& arguments = node.get_arguments();
2215  if (info.artificial_cell) {
2216  printer->add_text("net_event(pnt, ");
2217  print_vector_elements(arguments, ", ");
2218  } else {
2219  const auto& point_process = get_variable_name("point_process");
2220  printer->add_text("net_send_buffering(");
2221  printer->fmt_text("nt, ml->_net_send_buffer, 1, -1, -1, {}, ", point_process);
2222  print_vector_elements(arguments, ", ");
2223  printer->add_text(", 0.0");
2224  }
2225  printer->add_text(")");
2226 }
2227 
2229  auto name = node.get_node_name();
2230  const auto& arguments = node.get_arguments();
2231  printer->add_text(method_name(name), '(');
2232 
2233  printer->add_text(internal_method_arguments());
2234  if (!arguments.empty()) {
2235  printer->add_text(", ");
2236  }
2237 
2238  print_vector_elements(arguments, ", ");
2239  printer->add_text(')');
2240 }
2241 
2242 /**
2243  * Rename arguments to NET_RECEIVE block with corresponding pointer variable
2244  *
2245  * Arguments to NET_RECEIVE block are packed and passed via weight vector. These
2246  * variables need to be replaced with corresponding pointer variable. For example,
2247  * if mod file is like
2248  *
2249  * \code{.mod}
2250  * NET_RECEIVE (weight, R){
2251  * INITIAL {
2252  * R=1
2253  * }
2254  * }
2255  * \endcode
2256  *
2257  * then generated code for initial block should be:
2258  *
2259  * \code{.cpp}
2260  * double* R = weights + weight_index + 0;
2261  * (*R) = 1.0;
2262  * \endcode
2263  *
2264  * So, the `R` in AST needs to be renamed with `(*R)`.
2265  */
2266 static void rename_net_receive_arguments(const ast::NetReceiveBlock& net_receive_node, const ast::Node& node) {
2267  const auto& parameters = net_receive_node.get_parameters();
2268  for (auto& parameter: parameters) {
2269  const auto& name = parameter->get_node_name();
2270  auto var_used = VarUsageVisitor().variable_used(node, name);
2271  if (var_used) {
2272  RenameVisitor vr(name, "(*" + name + ")");
2273  node.get_statement_block()->visit_children(vr);
2274  }
2275  }
2276 }
2277 
2278 
2280  const auto node = info.net_receive_initial_node;
2281  if (node == nullptr) {
2282  return;
2283  }
2284 
2285  // rename net_receive arguments used in the initial block of net_receive
2286  rename_net_receive_arguments(*info.net_receive_node, *node);
2287 
2288  printing_net_init = true;
2289  auto args = "Point_process* pnt, int weight_index, double flag";
2290  printer->add_newline(2);
2291  printer->add_line("/** initialize block for net receive */");
2292  printer->fmt_push_block("static void net_init({})", args);
2293  auto block = node->get_statement_block().get();
2294  if (block->get_statements().empty()) {
2295  printer->add_line("// do nothing");
2296  } else {
2297  print_net_receive_common_code(*node);
2298  print_statement_block(*block, false, false);
2299  if (node->is_initial_block()) {
2300  print_net_init_acc_serial_annotation_block_end();
2301  print_kernel_data_present_annotation_block_end();
2302  printer->add_line("auto& nsb = ml->_net_send_buffer;");
2303  print_net_send_buf_update_to_host();
2304  }
2305  }
2306  printer->pop_block();
2307  printing_net_init = false;
2308 }
2309 
2310 
2312  printer->add_newline();
2313  printer->add_line("NetSendBuffer_t* nsb = ml->_net_send_buffer;");
2314  print_net_send_buf_update_to_host();
2315  printer->push_block("for (int i=0; i < nsb->_cnt; i++)");
2316  printer->add_multi_line(R"CODE(
2317  int type = nsb->_sendtype[i];
2318  int tid = nt->id;
2319  double t = nsb->_nsb_t[i];
2320  double flag = nsb->_nsb_flag[i];
2321  int vdata_index = nsb->_vdata_index[i];
2322  int weight_index = nsb->_weight_index[i];
2323  int point_index = nsb->_pnt_index[i];
2324  net_sem_from_gpu(type, vdata_index, weight_index, tid, point_index, t, flag);
2325  )CODE");
2326  printer->pop_block();
2327  printer->add_line("nsb->_cnt = 0;");
2328  print_net_send_buf_count_update_to_device();
2329 }
2330 
2331 
2333  return fmt::format("void {}(NrnThread* nt)", method_name("net_buf_receive"));
2334 }
2335 
2336 
2338  printer->add_line("Memb_list* ml = get_memb_list(nt);");
2339  printer->push_block("if (!ml)");
2340  printer->add_line("return;");
2341  printer->pop_block();
2342  printer->add_newline();
2343 }
2344 
2345 
2347  printer->add_line("int count = nrb->_displ_cnt;");
2348  print_parallel_iteration_hint(BlockType::NetReceive, info.net_receive_node);
2349  printer->push_block("for (int i = 0; i < count; i++)");
2350 }
2351 
2352 
2354  printer->pop_block();
2355 }
2356 
2357 
2359  if (!net_receive_required() || info.artificial_cell) {
2360  return;
2361  }
2362  printer->add_newline(2);
2363  printer->push_block(net_receive_buffering_declaration());
2364 
2365  print_get_memb_list();
2366 
2367  const auto& net_receive = method_name("net_receive_kernel");
2368 
2369  print_kernel_data_present_annotation_block_begin();
2370 
2371  printer->add_line("NetReceiveBuffer_t* nrb = ml->_net_receive_buffer;");
2372  if (need_mech_inst) {
2373  printer->fmt_line("auto* const inst = static_cast<{0}*>(ml->instance);", instance_struct());
2374  }
2375  print_net_receive_loop_begin();
2376  printer->add_line("int start = nrb->_displ[i];");
2377  printer->add_line("int end = nrb->_displ[i+1];");
2378  printer->push_block("for (int j = start; j < end; j++)");
2379  printer->add_multi_line(R"CODE(
2380  int index = nrb->_nrb_index[j];
2381  int offset = nrb->_pnt_index[index];
2382  double t = nrb->_nrb_t[index];
2383  int weight_index = nrb->_weight_index[index];
2384  double flag = nrb->_nrb_flag[index];
2385  Point_process* point_process = nt->pntprocs + offset;
2386  )CODE");
2387  printer->add_line(net_receive, "(t, point_process, inst, nt, ml, weight_index, flag);");
2388  printer->pop_block();
2389  print_net_receive_loop_end();
2390 
2391  print_device_stream_wait();
2392  printer->add_line("nrb->_displ_cnt = 0;");
2393  printer->add_line("nrb->_cnt = 0;");
2394 
2395  if (info.net_send_used || info.net_event_used) {
2396  print_send_event_move();
2397  }
2398 
2399  print_kernel_data_present_annotation_block_end();
2400  printer->pop_block();
2401 }
2402 
2403 
2405  printer->add_line("i = nsb->_cnt++;");
2406 }
2407 
2408 
2410  printer->push_block("if (i >= nsb->_size)");
2411  printer->add_line("nsb->grow();");
2412  printer->pop_block();
2413 }
2414 
2415 
2417  if (!net_send_buffer_required()) {
2418  return;
2419  }
2420 
2421  printer->add_newline(2);
2422  auto args =
2423  "const NrnThread* nt, NetSendBuffer_t* nsb, int type, int vdata_index, "
2424  "int weight_index, int point_index, double t, double flag";
2425  printer->fmt_push_block("static inline void net_send_buffering({})", args);
2426  printer->add_line("int i = 0;");
2427  print_net_send_buffering_cnt_update();
2428  print_net_send_buffering_grow();
2429  printer->push_block("if (i < nsb->_size)");
2430  printer->add_multi_line(R"CODE(
2431  nsb->_sendtype[i] = type;
2432  nsb->_vdata_index[i] = vdata_index;
2433  nsb->_weight_index[i] = weight_index;
2434  nsb->_pnt_index[i] = point_index;
2435  nsb->_nsb_t[i] = t;
2436  nsb->_nsb_flag[i] = flag;
2437  )CODE");
2438  printer->pop_block();
2439  printer->pop_block();
2440 }
2441 
2442 
2444  if (!net_receive_required()) {
2445  return;
2446  }
2447 
2448  printing_net_receive = true;
2449  const auto node = info.net_receive_node;
2450 
2451  // rename net_receive arguments used in the block itself
2452  rename_net_receive_arguments(*info.net_receive_node, *node);
2453 
2454  std::string name;
2455  ParamVector params;
2456  if (!info.artificial_cell) {
2457  name = method_name("net_receive_kernel");
2458  params.emplace_back("", "double", "", "t");
2459  params.emplace_back("", "Point_process*", "", "pnt");
2460  params.emplace_back("", fmt::format("{}*", instance_struct()),
2461  "", "inst");
2462  params.emplace_back("", "NrnThread*", "", "nt");
2463  params.emplace_back("", "Memb_list*", "", "ml");
2464  params.emplace_back("", "int", "", "weight_index");
2465  params.emplace_back("", "double", "", "flag");
2466  } else {
2467  name = method_name("net_receive");
2468  params.emplace_back("", "Point_process*", "", "pnt");
2469  params.emplace_back("", "int", "", "weight_index");
2470  params.emplace_back("", "double", "", "flag");
2471  }
2472 
2473  printer->add_newline(2);
2474  printer->fmt_push_block("static inline void {}({})", name, get_parameter_str(params));
2475  print_net_receive_common_code(*node, info.artificial_cell);
2476  if (info.artificial_cell) {
2477  printer->add_line("double t = nt->_t;");
2478  }
2479 
2480  // set voltage variable if it is used in the block (e.g. for WATCH statement)
2481  auto v_used = VarUsageVisitor().variable_used(*node->get_statement_block(), "v");
2482  if (v_used) {
2483  printer->add_line("int node_id = ml->nodeindices[id];");
2484  printer->add_line("v = nt->_actual_v[node_id];");
2485  }
2486 
2487  printer->fmt_line("{} = t;", get_variable_name("tsave"));
2488 
2489  if (info.is_watch_used()) {
2490  printer->add_line("bool watch_remove = false;");
2491  }
2492 
2493  printer->add_indent();
2494  node->get_statement_block()->accept(*this);
2495  printer->add_newline();
2496  printer->pop_block();
2497 
2498  printing_net_receive = false;
2499 }
2500 
2501 
2503  if (!net_receive_required()) {
2504  return;
2505  }
2506 
2507  printing_net_receive = true;
2508  if (!info.artificial_cell) {
2509  const auto& name = method_name("net_receive");
2510  ParamVector params = {
2511  {"", "Point_process*", "", "pnt"},
2512  {"", "int", "", "weight_index"},
2513  {"", "double", "", "flag"}};
2514  printer->add_newline(2);
2515  printer->fmt_push_block("static void {}({})", name, get_parameter_str(params));
2516  printer->add_line("NrnThread* nt = nrn_threads + pnt->_tid;");
2517  printer->add_line("Memb_list* ml = get_memb_list(nt);");
2518  printer->add_line("NetReceiveBuffer_t* nrb = ml->_net_receive_buffer;");
2519  printer->push_block("if (nrb->_cnt >= nrb->_size)");
2520  printer->add_line("realloc_net_receive_buffer(nt, ml);");
2521  printer->pop_block();
2522  printer->add_multi_line(R"CODE(
2523  int id = nrb->_cnt;
2524  nrb->_pnt_index[id] = pnt-nt->pntprocs;
2525  nrb->_weight_index[id] = weight_index;
2526  nrb->_nrb_t[id] = nt->_t;
2527  nrb->_nrb_flag[id] = flag;
2528  nrb->_cnt++;
2529  )CODE");
2530  printer->pop_block();
2531  }
2532  printing_net_receive = false;
2533 }
2534 
2535 
2536 /**
2537  * \todo Data is not derived. Need to add instance into instance struct?
2538  * data used here is wrong in AoS because as in original implementation,
2539  * data is not incremented every iteration for AoS. May be better to derive
2540  * actual variable names? [resolved now?]
2541  * slist needs to added as local variable
2542  */
2544  auto ext_args = external_method_arguments();
2545  auto ext_params = get_parameter_str(external_method_parameters());
2546  auto suffix = info.mod_suffix;
2547  auto list_num = info.derivimplicit_list_num;
2548  auto block_name = block.get_node_name();
2549  auto primes_size = info.primes_size;
2550  auto stride = "*pnodecount+id";
2551 
2552  printer->add_newline(2);
2553 
2554  printer->push_block("namespace");
2555  printer->fmt_push_block("struct _newton_{}_{}", block_name, info.mod_suffix);
2556  printer->fmt_push_block("int operator()({}) const", get_parameter_str(external_method_parameters()));
2557  auto const instance = fmt::format("auto* const inst = static_cast<{0}*>(ml->instance);",
2558  instance_struct());
2559  auto const slist1 = fmt::format("auto const& slist{} = {};",
2560  list_num,
2561  get_variable_name(fmt::format("slist{}", list_num)));
2562  auto const slist2 = fmt::format("auto& slist{} = {};",
2563  list_num + 1,
2564  get_variable_name(fmt::format("slist{}", list_num + 1)));
2565  auto const dlist1 = fmt::format("auto const& dlist{} = {};",
2566  list_num,
2567  get_variable_name(fmt::format("dlist{}", list_num)));
2568  auto const dlist2 = fmt::format(
2569  "double* dlist{} = static_cast<double*>(thread[dith{}()].pval) + ({}*pnodecount);",
2570  list_num + 1,
2571  list_num,
2572  info.primes_size);
2573  printer->add_line(instance);
2574  if (ion_variable_struct_required()) {
2575  print_ion_variable();
2576  }
2577  printer->fmt_line("double* savstate{} = static_cast<double*>(thread[dith{}()].pval);",
2578  list_num,
2579  list_num);
2580  printer->add_line(slist1);
2581  printer->add_line(dlist1);
2582  printer->add_line(dlist2);
2583 
2584  print_statement_block(*block.get_statement_block(), false, false);
2585 
2586  printer->add_line("int counter = -1;");
2587  printer->fmt_push_block("for (int i=0; i<{}; i++)", info.num_primes);
2588  printer->fmt_push_block("if (*deriv{}_advance(thread))", list_num);
2589  printer->fmt_line(
2590  "dlist{0}[(++counter){1}] = "
2591  "data[dlist{2}[i]{1}]-(data[slist{2}[i]{1}]-savstate{2}[i{1}])/nt->_dt;",
2592  list_num + 1,
2593  stride,
2594  list_num);
2595  printer->chain_block("else");
2596  printer->fmt_line("dlist{0}[(++counter){1}] = data[slist{2}[i]{1}]-savstate{2}[i{1}];",
2597  list_num + 1,
2598  stride,
2599  list_num);
2600  printer->pop_block();
2601  printer->pop_block();
2602  printer->add_line("return 0;");
2603  printer->pop_block(); // operator()
2604  printer->pop_block(";"); // struct
2605  printer->pop_block(); // namespace
2606  printer->add_newline();
2607  printer->fmt_push_block("int {}_{}({})", block_name, suffix, ext_params);
2608  printer->add_line(instance);
2609  printer->fmt_line("double* savstate{} = (double*) thread[dith{}()].pval;", list_num, list_num);
2610  printer->add_line(slist1);
2611  printer->add_line(slist2);
2612  printer->add_line(dlist2);
2613  printer->fmt_push_block("for (int i=0; i<{}; i++)", info.num_primes);
2614  printer->fmt_line("savstate{}[i{}] = data[slist{}[i]{}];", list_num, stride, list_num, stride);
2615  printer->pop_block();
2616  printer->fmt_line(
2617  "int reset = nrn_newton_thread(static_cast<NewtonSpace*>(*newtonspace{}(thread)), {}, "
2618  "slist{}, _newton_{}_{}{{}}, dlist{}, {});",
2619  list_num,
2620  primes_size,
2621  list_num + 1,
2622  block_name,
2623  suffix,
2624  list_num + 1,
2625  ext_args);
2626  printer->add_line("return reset;");
2627  printer->pop_block();
2628  printer->add_newline(2);
2629 }
2630 
2631 
2633  // nothing to do on cpu
2634 }
2635 
2636 
2637 /****************************************************************************************/
2638 /* Print nrn_state routine */
2639 /****************************************************************************************/
2640 
2641 
2643  if (!nrn_state_required()) {
2644  return;
2645  }
2646 
2647  printer->add_newline(2);
2648  printer->add_line("/** update state */");
2649  print_global_function_common_code(BlockType::State);
2650  print_parallel_iteration_hint(BlockType::State, info.nrn_state_block);
2651  printer->push_block("for (int id = 0; id < nodecount; id++)");
2652 
2653  printer->add_line("int node_id = node_index[id];");
2654  printer->add_line("double v = voltage[node_id];");
2655  print_v_unused();
2656 
2657  /**
2658  * \todo Eigen solver node also emits IonCurVar variable in the functor
2659  * but that shouldn't update ions in derivative block
2660  */
2661  if (ion_variable_struct_required()) {
2662  print_ion_variable();
2663  }
2664 
2665  auto read_statements = ion_read_statements(BlockType::State);
2666  for (auto& statement: read_statements) {
2667  printer->add_line(statement);
2668  }
2669 
2670  if (info.nrn_state_block) {
2671  info.nrn_state_block->visit_children(*this);
2672  }
2673 
2674  if (info.currents.empty() && info.breakpoint_node != nullptr) {
2675  auto block = info.breakpoint_node->get_statement_block();
2676  print_statement_block(*block, false, false);
2677  }
2678 
2679  const auto& write_statements = ion_write_statements(BlockType::State);
2680  for (auto& statement: write_statements) {
2681  const auto& text = process_shadow_update_statement(statement, BlockType::State);
2682  printer->add_line(text);
2683  }
2684  printer->pop_block();
2685 
2686  print_kernel_data_present_annotation_block_end();
2687 
2688  printer->pop_block();
2689 }
2690 
2691 
2692 /****************************************************************************************/
2693 /* Print nrn_cur related routines */
2694 /****************************************************************************************/
2695 
2696 
2698  const auto& args = internal_method_parameters();
2699  const auto& block = node.get_statement_block();
2700  printer->add_newline(2);
2701  printer->fmt_push_block("inline double nrn_current_{}({})",
2702  info.mod_suffix,
2703  get_parameter_str(args));
2704  printer->add_line("double current = 0.0;");
2705  print_statement_block(*block, false, false);
2706  for (auto& current: info.currents) {
2707  const auto& name = get_variable_name(current);
2708  printer->fmt_line("current += {};", name);
2709  }
2710  printer->add_line("return current;");
2711  printer->pop_block();
2712 }
2713 
2714 
2716  const auto& block = node.get_statement_block();
2717  print_statement_block(*block, false, false);
2718  if (!info.currents.empty()) {
2719  std::string sum;
2720  for (const auto& current: info.currents) {
2721  auto var = breakpoint_current(current);
2722  sum += get_variable_name(var);
2723  if (&current != &info.currents.back()) {
2724  sum += "+";
2725  }
2726  }
2727  printer->fmt_line("double rhs = {};", sum);
2728  }
2729 
2730  std::string sum;
2731  for (const auto& conductance: info.conductances) {
2732  auto var = breakpoint_current(conductance.variable);
2733  sum += get_variable_name(var);
2734  if (&conductance != &info.conductances.back()) {
2735  sum += "+";
2736  }
2737  }
2738  printer->fmt_line("double g = {};", sum);
2739 
2740  for (const auto& conductance: info.conductances) {
2741  if (!conductance.ion.empty()) {
2742  const auto& lhs = std::string(naming::ION_VARNAME_PREFIX) + "di" + conductance.ion + "dv";
2743  const auto& rhs = get_variable_name(conductance.variable);
2744  const ShadowUseStatement statement{lhs, "+=", rhs};
2745  const auto& text = process_shadow_update_statement(statement, BlockType::Equation);
2746  printer->add_line(text);
2747  }
2748  }
2749 }
2750 
2751 
2753  printer->fmt_line("double g = nrn_current_{}({}+0.001);",
2754  info.mod_suffix,
2755  internal_method_arguments());
2756  for (auto& ion: info.ions) {
2757  for (auto& var: ion.writes) {
2758  if (ion.is_ionic_current(var)) {
2759  const auto& name = get_variable_name(var);
2760  printer->fmt_line("double di{} = {};", ion.name, name);
2761  }
2762  }
2763  }
2764  printer->fmt_line("double rhs = nrn_current_{}({});",
2765  info.mod_suffix,
2766  internal_method_arguments());
2767  printer->add_line("g = (g-rhs)/0.001;");
2768  for (auto& ion: info.ions) {
2769  for (auto& var: ion.writes) {
2770  if (ion.is_ionic_current(var)) {
2771  const auto& lhs = std::string(naming::ION_VARNAME_PREFIX) + "di" + ion.name + "dv";
2772  auto rhs = fmt::format("(di{}-{})/0.001", ion.name, get_variable_name(var));
2773  if (info.point_process) {
2774  auto area = get_variable_name(naming::NODE_AREA_VARIABLE);
2775  rhs += fmt::format("*1.e2/{}", area);
2776  }
2777  const ShadowUseStatement statement{lhs, "+=", rhs};
2778  const auto& text = process_shadow_update_statement(statement, BlockType::Equation);
2779  printer->add_line(text);
2780  }
2781  }
2782  }
2783 }
2784 
2785 
2787  printer->add_line("int node_id = node_index[id];");
2788  printer->add_line("double v = voltage[node_id];");
2789  print_v_unused();
2790  if (ion_variable_struct_required()) {
2791  print_ion_variable();
2792  }
2793 
2794  const auto& read_statements = ion_read_statements(BlockType::Equation);
2795  for (auto& statement: read_statements) {
2796  printer->add_line(statement);
2797  }
2798 
2799  if (info.conductances.empty()) {
2800  print_nrn_cur_non_conductance_kernel();
2801  } else {
2802  print_nrn_cur_conductance_kernel(node);
2803  }
2804 
2805  const auto& write_statements = ion_write_statements(BlockType::Equation);
2806  for (auto& statement: write_statements) {
2807  auto text = process_shadow_update_statement(statement, BlockType::Equation);
2808  printer->add_line(text);
2809  }
2810 
2811  if (info.point_process) {
2812  const auto& area = get_variable_name(naming::NODE_AREA_VARIABLE);
2813  printer->fmt_line("double mfactor = 1.e2/{};", area);
2814  printer->add_line("g = g*mfactor;");
2815  printer->add_line("rhs = rhs*mfactor;");
2816  }
2817 
2818  print_g_unused();
2819 }
2820 
2821 
2823  if (!info.electrode_current) {
2824  return;
2825  }
2826  std::string rhs, d;
2827  auto rhs_op = operator_for_rhs();
2828  auto d_op = operator_for_d();
2829  if (info.point_process) {
2830  rhs = "shadow_rhs[id]";
2831  d = "shadow_d[id]";
2832  } else {
2833  rhs = "rhs";
2834  d = "g";
2835  }
2836 
2837  printer->push_block("if (nt->nrn_fast_imem)");
2838  if (nrn_cur_reduction_loop_required()) {
2839  printer->push_block("for (int id = 0; id < nodecount; id++)");
2840  printer->add_line("int node_id = node_index[id];");
2841  }
2842  printer->fmt_line("nt->nrn_fast_imem->nrn_sav_rhs[node_id] {} {};", rhs_op, rhs);
2843  printer->fmt_line("nt->nrn_fast_imem->nrn_sav_d[node_id] {} {};", d_op, d);
2844  if (nrn_cur_reduction_loop_required()) {
2845  printer->pop_block();
2846  }
2847  printer->pop_block();
2848 }
2849 
2850 
2852  if (!nrn_cur_required()) {
2853  return;
2854  }
2855 
2856  if (info.conductances.empty()) {
2857  print_nrn_current(*info.breakpoint_node);
2858  }
2859 
2860  printer->add_newline(2);
2861  printer->add_line("/** update current */");
2862  print_global_function_common_code(BlockType::Equation);
2863  print_parallel_iteration_hint(BlockType::Equation, info.breakpoint_node);
2864  printer->push_block("for (int id = 0; id < nodecount; id++)");
2865  print_nrn_cur_kernel(*info.breakpoint_node);
2866  print_nrn_cur_matrix_shadow_update();
2867  if (!nrn_cur_reduction_loop_required()) {
2868  print_fast_imem_calculation();
2869  }
2870  printer->pop_block();
2871 
2872  if (nrn_cur_reduction_loop_required()) {
2873  printer->push_block("for (int id = 0; id < nodecount; id++)");
2874  print_nrn_cur_matrix_shadow_reduction();
2875  printer->pop_block();
2876  print_fast_imem_calculation();
2877  }
2878 
2879  print_kernel_data_present_annotation_block_end();
2880  printer->pop_block();
2881 }
2882 
2883 
2884 /****************************************************************************************/
2885 /* Main code printing entry points */
2886 /****************************************************************************************/
2887 
2889  print_standard_includes();
2890  print_backend_includes();
2891  print_coreneuron_includes();
2892 }
2893 
2894 
2896  print_first_pointer_var_index_getter();
2897  print_first_random_var_index_getter();
2898  print_net_receive_arg_size_getter();
2899  print_thread_getters();
2900  print_num_variable_getter();
2901  print_mech_type_getter();
2902  print_memb_list_getter();
2903 }
2904 
2905 
2907  print_mechanism_global_var_structure(print_initializers);
2908  print_mechanism_range_var_structure(print_initializers);
2909  print_ion_var_structure();
2910 }
2911 
2912 
2914  if (!info.vectorize) {
2915  return;
2916  }
2917  printer->add_multi_line(R"CODE(
2918  #if NRN_PRCELLSTATE
2919  inst->v_unused[id] = v;
2920  #endif
2921  )CODE");
2922 }
2923 
2924 
2926  printer->add_multi_line(R"CODE(
2927  #if NRN_PRCELLSTATE
2928  inst->g_unused[id] = g;
2929  #endif
2930  )CODE");
2931 }
2932 
2933 
2935  print_top_verbatim_blocks();
2936  for (const auto& procedure: info.procedures) {
2937  print_procedure(*procedure);
2938  }
2939  for (const auto& function: info.functions) {
2940  print_function(*function);
2941  }
2942  for (const auto& function: info.function_tables) {
2943  print_function_tables(*function);
2944  }
2945  for (size_t i = 0; i < info.before_after_blocks.size(); i++) {
2946  print_before_after_block(info.before_after_blocks[i], i);
2947  }
2948  for (const auto& callback: info.derivimplicit_callbacks) {
2949  const auto& block = *callback->get_node_to_solve();
2950  print_derivimplicit_kernel(block);
2951  }
2952  print_net_send_buffering();
2953  print_net_init();
2954  print_watch_activate();
2955  print_watch_check();
2956  print_net_receive_kernel();
2957  print_net_receive();
2958  print_net_receive_buffering();
2959  print_nrn_init();
2960  print_nrn_cur();
2961  print_nrn_state();
2962 }
2963 
2964 
2966  print_backend_info();
2967  print_headers_include();
2968  print_namespace_start();
2969  print_nmodl_constants();
2970  print_prcellstate_macros();
2971  print_mechanism_info();
2972  print_data_structures(true);
2973  print_global_variables_for_hoc();
2974  print_common_getters();
2975  print_memory_allocation_routine();
2976  print_abort_routine();
2977  print_thread_memory_callbacks();
2978  print_instance_variable_setup();
2979  print_nrn_alloc();
2980  print_nrn_constructor();
2981  print_nrn_destructor();
2982  print_function_prototypes();
2983  print_functors_definitions();
2984  print_compute_functions();
2985  print_check_table_thread_function();
2986  print_mechanism_register();
2987  print_namespace_stop();
2988 }
2989 
2990 
2991 /****************************************************************************************/
2992 /* Overloaded visitor routines */
2993 /****************************************************************************************/
2994 
2995 
2997  printer->fmt_line("{}_{}({});",
2998  node.get_node_to_solve()->get_node_name(),
2999  info.mod_suffix,
3000  external_method_arguments());
3001 }
3002 
3003 
3005  // For_netcon should take the same arguments as net_receive and apply the operations
3006  // in the block to the weights of the netcons. Since all the weights are on the same vector,
3007  // weights, we have a mask of operations that we apply iteratively, advancing the offset
3008  // to the next netcon.
3009  const auto& args = node.get_parameters();
3010  RenameVisitor v;
3011  const auto& statement_block = node.get_statement_block();
3012  for (size_t i_arg = 0; i_arg < args.size(); ++i_arg) {
3013  // sanitize node_name since we want to substitute names like (*w) as they are
3014  auto old_name =
3015  std::regex_replace(args[i_arg]->get_node_name(), regex_special_chars, R"(\$&)");
3016  const auto& new_name = fmt::format("weights[{} + nt->_fornetcon_weight_perm[i]]", i_arg);
3017  v.set(old_name, new_name);
3018  statement_block->accept(v);
3019  }
3020 
3021  const auto index = position_of_int_var(naming::FOR_NETCON_VARIABLE);
3022 
3023  printer->fmt_text("const size_t offset = {}*pnodecount + id;", index);
3024  printer->add_newline();
3025  printer->add_line(
3026  "const size_t for_netcon_start = nt->_fornetcon_perm_indices[indexes[offset]];");
3027  printer->add_line(
3028  "const size_t for_netcon_end = nt->_fornetcon_perm_indices[indexes[offset] + 1];");
3029 
3030  printer->push_block("for (auto i = for_netcon_start; i < for_netcon_end; ++i)");
3031  print_statement_block(*statement_block, false, false);
3032  printer->pop_block();
3033 }
3034 
3035 
3037  printer->add_text(fmt::format("nrn_watch_activate(inst, id, pnodecount, {}, v, watch_remove)",
3038  current_watch_statement++));
3039 }
3040 
3041 
3043  print_atomic_reduction_pragma();
3044  printer->add_indent();
3045  node.get_expression()->accept(*this);
3046  printer->add_text(";");
3047 }
3048 
3049 
3050 
3051 } // namespace codegen
3052 } // namespace nmodl
nmodl::ast::FunctionCall::get_node_name
std::string get_node_name() const override
Return name of the node.
Definition: ast.cpp:7065
nmodl::ast::BeforeBlock
Represents a BEFORE block in NMODL.
Definition: before_block.hpp:38
nmodl::codegen::CodegenCoreneuronCppVisitor::print_coreneuron_includes
void print_coreneuron_includes()
Print includes from coreneuron.
Definition: codegen_coreneuron_cpp_visitor.cpp:897
nmodl::codegen::CodegenCoreneuronCppVisitor::needs_v_unused
bool needs_v_unused() const override
Definition: codegen_coreneuron_cpp_visitor.cpp:62
nmodl::codegen::CodegenCoreneuronCppVisitor::net_receive_buffering_declaration
virtual std::string net_receive_buffering_declaration()
Generate the target backend code for the net_receive_buffering function delcaration.
Definition: codegen_coreneuron_cpp_visitor.cpp:2332
nmodl::ast::DerivimplicitCallback::get_node_to_solve
std::shared_ptr< Block > get_node_to_solve() const noexcept
Getter for member variable DerivimplicitCallback::node_to_solve.
Definition: derivimplicit_callback.hpp:143
nmodl::codegen::CodegenCoreneuronCppVisitor::nrn_cur_reduction_loop_required
virtual bool nrn_cur_reduction_loop_required()
Check if reduction block in nrn_cur required.
Definition: codegen_coreneuron_cpp_visitor.cpp:236
nmodl::codegen::naming::RANDOM_SEMANTIC
static constexpr char RANDOM_SEMANTIC[]
semantic type for RANDOM variable
Definition: codegen_naming.hpp:132
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_move_call
void print_net_move_call(const ast::FunctionCall &node) override
Print call to net_move.
Definition: codegen_coreneuron_cpp_visitor.cpp:2186
nmodl::ast::Node
Base class for all AST node.
Definition: node.hpp:40
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mechanism_range_var_structure
void print_mechanism_range_var_structure(bool print_initializers) override
Print the structure that wraps all range and int variables required for the NMODL.
Definition: codegen_coreneuron_cpp_visitor.cpp:1461
nmodl::codegen::IndexVariableInfo::is_index
bool is_index
if this is pure index (e.g.
Definition: codegen_cpp_visitor.hpp:137
nmodl::ast::Verbatim
Represents a C code block.
Definition: verbatim.hpp:38
nmodl::codegen::naming::FOR_NETCON_VARIABLE
static constexpr char FOR_NETCON_VARIABLE[]
name of the integer variabe to store FOR_NETCON info.
Definition: codegen_naming.hpp:144
nmodl::codegen::naming::CELSIUS_VARIABLE
static constexpr char CELSIUS_VARIABLE[]
global temperature variable
Definition: codegen_naming.hpp:99
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buf_count_update_to_host
virtual void print_net_send_buf_count_update_to_host() const
Print the code to update NetSendBuffer_t count from device to host.
Definition: codegen_coreneuron_cpp_visitor.cpp:177
nmodl::ast::BATYPE_STEP
@ BATYPE_STEP
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_prototypes
void print_function_prototypes() override
Print function and procedures prototype declaration.
Definition: codegen_coreneuron_cpp_visitor.cpp:329
nmodl::codegen::CodegenCoreneuronCppVisitor::process_verbatim_text
std::string process_verbatim_text(std::string const &text)
Process a verbatim block for possible variable renaming.
Definition: codegen_coreneuron_cpp_visitor.cpp:545
nmodl::codegen::IndexVariableInfo
Helper to represent information about index/int variables.
Definition: codegen_cpp_visitor.hpp:127
nmodl::codegen::CodegenCoreneuronCppVisitor::print_v_unused
void print_v_unused() const override
Set v_unused (voltage) for NRN_PRCELLSTATE feature.
Definition: codegen_coreneuron_cpp_visitor.cpp:2913
nmodl::visitor::DUState
DUState
Represent a state in Def-Use chain.
Definition: defuse_analyze_visitor.hpp:28
nmodl::codegen::CodegenCoreneuronCppVisitor::print_derivimplicit_kernel
void print_derivimplicit_kernel(const ast::Block &block)
Print derivative kernel when derivimplicit method is used.
Definition: codegen_coreneuron_cpp_visitor.cpp:2543
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_protect_statement
void visit_protect_statement(const ast::ProtectStatement &node) override
visit node of type ast::ProtectStatement
Definition: codegen_coreneuron_cpp_visitor.cpp:3042
nmodl::codegen::CodegenCoreneuronCppVisitor::print_device_stream_wait
virtual void print_device_stream_wait() const
Print the code to synchronise/wait on stream specific to NrnThread.
Definition: codegen_coreneuron_cpp_visitor.cpp:197
nmodl::codegen::BlockType::Destructor
@ Destructor
destructor block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buf_count_update_to_device
virtual void print_net_send_buf_count_update_to_device() const
Print the code to update NetSendBuffer_t count from host to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:187
nmodl::ast::BAType
BAType
enum type to distinguish BEFORE or AFTER blocks
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::global_variable_name
std::string global_variable_name(const SymbolType &symbol, bool use_instance=true) const override
Determine the variable name for a global variable given its symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:797
nmodl::ast::BATYPE_INITIAL
@ BATYPE_INITIAL
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_newtonspace_transfer_to_device
virtual void print_newtonspace_transfer_to_device() const
Print code block to transfer newtonspace structure to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:2632
nmodl::codegen::naming::NRN_PRIVATE_DESTRUCTOR_METHOD
constexpr char NRN_PRIVATE_DESTRUCTOR_METHOD[]
nrn_private_destructor method in generated code
Definition: codegen_naming.hpp:159
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_call
void print_net_send_call(const ast::FunctionCall &node) override
Print call to net_send.
Definition: codegen_coreneuron_cpp_visitor.cpp:2154
nmodl::codegen::naming::POINT_PROCESS_VARIABLE
static constexpr char POINT_PROCESS_VARIABLE[]
inbuilt neuron variable for point process
Definition: codegen_naming.hpp:72
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_loop_begin
virtual void print_net_receive_loop_begin()
Print the code for the main net_receive loop.
Definition: codegen_coreneuron_cpp_visitor.cpp:2346
nmodl::ast::FunctionTableBlock::get_parameters
const ArgumentVector & get_parameters() const noexcept override
Getter for member variable FunctionTableBlock::parameters.
Definition: function_table_block.hpp:199
nmodl::ast::NetReceiveBlock
TODO.
Definition: net_receive_block.hpp:39
nmodl::codegen::naming::NTHREAD_D_SHADOW
static constexpr char NTHREAD_D_SHADOW[]
shadow d variable in neuron thread structure
Definition: codegen_naming.hpp:96
nmodl::codegen::CodegenCppVisitor::SymbolType
std::shared_ptr< symtab::Symbol > SymbolType
Definition: codegen_cpp_visitor.hpp:285
nmodl::codegen::CodegenCoreneuronCppVisitor::print_kernel_data_present_annotation_block_begin
virtual void print_kernel_data_present_annotation_block_begin()
Print accelerator annotations indicating data presence on device.
Definition: codegen_coreneuron_cpp_visitor.cpp:216
nmodl::ast::FunctionTableBlock
TODO.
Definition: function_table_block.hpp:39
nmodl::codegen::CodegenCoreneuronCppVisitor::print_g_unused
void print_g_unused() const override
Set g_unused (conductance) for NRN_PRCELLSTATE feature.
Definition: codegen_coreneuron_cpp_visitor.cpp:2925
nmodl::ast::NetReceiveBlock::get_parameters
const ArgumentVector & get_parameters() const noexcept override
Getter for member variable NetReceiveBlock::parameters.
Definition: net_receive_block.hpp:176
nmodl::codegen::CodegenCoreneuronCppVisitor::print_common_getters
void print_common_getters()
Print common getters.
Definition: codegen_coreneuron_cpp_visitor.cpp:2895
nmodl::codegen::CodegenCoreneuronCppVisitor::int_variable_name
std::string int_variable_name(const IndexVariableInfo &symbol, const std::string &name, bool use_instance) const override
Determine the name of an int variable given its symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:771
nmodl::codegen::CodegenCoreneuronCppVisitor::print_initial_block
void print_initial_block(const ast::InitialBlock *node)
Print initial block statements.
Definition: codegen_coreneuron_cpp_visitor.cpp:1720
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_kernel
void print_net_receive_kernel()
Print net_receive kernel function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2443
nmodl::codegen::CodegenCoreneuronCppVisitor::print_thread_getters
void print_thread_getters()
Print the getter method for thread variables and ids.
Definition: codegen_coreneuron_cpp_visitor.cpp:704
nmodl::codegen::CodegenCoreneuronCppVisitor::simulator_name
std::string simulator_name() override
Name of the simulator the code was generated for.
Definition: codegen_coreneuron_cpp_visitor.cpp:58
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buffering
void print_net_send_buffering()
Print kernel for buffering net_send events.
Definition: codegen_coreneuron_cpp_visitor.cpp:2416
nmodl::codegen::CodegenCoreneuronCppVisitor::namespace_name
virtual std::string namespace_name() override
Name of "our" namespace.
Definition: codegen_coreneuron_cpp_visitor.cpp:689
nmodl::codegen::get_register_type_for_ba_block
static std::string get_register_type_for_ba_block(const ast::Block *block)
Return registration type for a given BEFORE/AFTER block /param block A BEFORE/AFTER block being regis...
Definition: codegen_coreneuron_cpp_visitor.cpp:1210
nmodl
encapsulates code generation backend implementations
Definition: ast_common.hpp:26
nmodl::ast::InitialBlock::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable InitialBlock::statement_block.
Definition: initial_block.hpp:184
nmodl::codegen::MemberType::index
@ index
index / int variables
nmodl::codegen::CodegenCoreneuronCppVisitor::print_fast_imem_calculation
void print_fast_imem_calculation() override
Print fast membrane current calculation code.
Definition: codegen_coreneuron_cpp_visitor.cpp:2822
nmodl::codegen::CodegenCoreneuronCppVisitor::print_dt_update_to_device
virtual void print_dt_update_to_device() const
Print the code to update dt from host to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:192
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_init_acc_serial_annotation_block_end
virtual void print_net_init_acc_serial_annotation_block_end()
Print accelerator kernels end annotation for net_init kernel.
Definition: codegen_coreneuron_cpp_visitor.cpp:231
nmodl::codegen::CodegenCoreneuronCppVisitor::external_method_parameters
const ParamVector external_method_parameters(bool table=false) noexcept override
Parameters for functions in generated code that are called back from external code.
Definition: codegen_coreneuron_cpp_visitor.cpp:468
nmodl::codegen::Ion
Represent ions used in mod file.
Definition: codegen_info.hpp:53
nmodl::codegen::naming::NRN_CONSTRUCTOR_METHOD
static constexpr char NRN_CONSTRUCTOR_METHOD[]
nrn_constructor method in generated code
Definition: codegen_naming.hpp:150
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_init
void print_net_init()
Print initial block in the net receive block.
Definition: codegen_coreneuron_cpp_visitor.cpp:2279
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_event_call
void print_net_event_call(const ast::FunctionCall &node) override
Print call to net_event.
Definition: codegen_coreneuron_cpp_visitor.cpp:2213
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_destructor
void print_nrn_destructor() override
Print nrn_destructor function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:1948
nmodl::codegen::CodegenCoreneuronCppVisitor::print_sdlists_init
void print_sdlists_init(bool print_initializers) override
Definition: codegen_coreneuron_cpp_visitor.cpp:930
nmodl::codegen::CodegenCoreneuronCppVisitor::print_headers_include
void print_headers_include() override
Print all includes.
Definition: codegen_coreneuron_cpp_visitor.cpp:2888
nmodl::codegen::CodegenCoreneuronCppVisitor::print_send_event_move
void print_send_event_move()
Print send event move block used in net receive as well as watch.
Definition: codegen_coreneuron_cpp_visitor.cpp:2311
nmodl::codegen::CodegenCoreneuronCppVisitor::append_conc_write_statements
void append_conc_write_statements(std::vector< ShadowUseStatement > &statements, const Ion &ion, const std::string &concentration) override
Generate Function call statement for nrn_wrote_conc.
Definition: codegen_coreneuron_cpp_visitor.cpp:583
nmodl::codegen::get_prefixsum_from_name
int get_prefixsum_from_name(const std::vector< T > &variables, const std::string &name)
Definition: codegen_cpp_visitor.hpp:206
nmodl::codegen::Ion::name
std::string name
name of the ion
Definition: codegen_info.hpp:55
nmodl::codegen::naming::NTHREAD_DT_VARIABLE
static constexpr char NTHREAD_DT_VARIABLE[]
dt variable in neuron thread structure
Definition: codegen_naming.hpp:108
token_mapping.hpp
Map different tokens from lexer to token types.
var_usage_visitor.hpp
Check if variable is used in given block.
string_utils.hpp
Implement string manipulation functions.
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_variables_for_hoc
void print_global_variables_for_hoc() override
Print byte arrays that register scalar and vector variables for hoc interface.
Definition: codegen_coreneuron_cpp_visitor.cpp:1153
nmodl::codegen::naming::NODE_AREA_VARIABLE
static constexpr char NODE_AREA_VARIABLE[]
inbuilt neuron variable for area of the compartment
Definition: codegen_naming.hpp:66
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_common_code
void print_net_receive_common_code(const ast::Block &node, bool need_mech_inst=true)
Print the common code section for net receive related methods.
Definition: codegen_coreneuron_cpp_visitor.cpp:2103
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_current
void print_nrn_current(const ast::BreakpointBlock &node) override
Print the nrn_current kernel.
Definition: codegen_coreneuron_cpp_visitor.cpp:2697
nmodl::ast::Ast::is_function_block
virtual bool is_function_block() const noexcept
Check if the ast node is an instance of ast::FunctionBlock.
Definition: ast.cpp:142
nmodl::codegen::naming::NRN_ALLOC_METHOD
static constexpr char NRN_ALLOC_METHOD[]
nrn_alloc method in generated code
Definition: codegen_naming.hpp:162
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_table_call
void print_function_table_call(const ast::FunctionCall &node) override
Print special code when calling FUNCTION_TABLEs.
Definition: codegen_coreneuron_cpp_visitor.cpp:2228
nmodl::codegen::IndexVariableInfo::is_integer
bool is_integer
if this is an integer (e.g.
Definition: codegen_cpp_visitor.hpp:141
nmodl::ast::Verbatim::get_statement
std::shared_ptr< String > get_statement() const noexcept
Getter for member variable Verbatim::statement.
Definition: verbatim.hpp:157
nmodl::codegen::CodegenCoreneuronCppVisitor::print_first_pointer_var_index_getter
void print_first_pointer_var_index_getter()
Print the getter method for index position of first pointer variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:629
nmodl::codegen::CodegenCoreneuronCppVisitor::print_setup_range_variable
void print_setup_range_variable()
Print the function that initialize range variable with different data type.
Definition: codegen_coreneuron_cpp_visitor.cpp:1560
nmodl::codegen::CodegenCoreneuronCppVisitor::internal_method_arguments
std::string internal_method_arguments() override
Arguments for functions that are defined and used internally.
Definition: codegen_coreneuron_cpp_visitor.cpp:438
nmodl::parser::CDriver
Class that binds all pieces together for parsing C verbatim blocks.
Definition: c11_driver.hpp:37
nmodl::ast::BreakpointBlock::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable BreakpointBlock::statement_block.
Definition: breakpoint_block.hpp:188
nmodl::codegen::CodegenCoreneuronCppVisitor::nrn_thread_arguments
std::string nrn_thread_arguments() const override
Arguments for "_threadargs_" macro in neuron implementation.
Definition: codegen_coreneuron_cpp_visitor.cpp:486
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_arg_size_getter
void print_net_receive_arg_size_getter()
Print the getter method for getting number of arguments for net_receive.
Definition: codegen_coreneuron_cpp_visitor.cpp:658
codegen_helper_visitor.hpp
Helper visitor to gather AST information to help code generation.
nmodl::codegen::CodegenCoreneuronCppVisitor::function_table_parameters
std::pair< ParamVector, ParamVector > function_table_parameters(const ast::FunctionTableBlock &node) override
Parameters of the function itself "{}" and "table_{}".
Definition: codegen_coreneuron_cpp_visitor.cpp:503
nmodl::stringutils::trim_newline
static std::string trim_newline(std::string text)
Definition: string_utils.hpp:83
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_or_procedure
void print_function_or_procedure(const ast::Block &node, const std::string &name, const std::unordered_set< CppObjectSpecifier > &specifiers={ CppObjectSpecifier::Inline}) override
Print nmodl function or procedure (common code)
Definition: codegen_coreneuron_cpp_visitor.cpp:372
codegen_cpp_visitor.hpp
Visitor for printing C++ code compatible with legacy api of CoreNEURON
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_verbatim
void visit_verbatim(const ast::Verbatim &node) override
visit node of type ast::Verbatim
Definition: codegen_coreneuron_cpp_visitor.cpp:121
codegen_naming.hpp
nmodl::visitor::VarUsageVisitor
Check if variable is used in given block.
Definition: var_usage_visitor.hpp:35
nmodl::ast::Block
Base class for all block scoped nodes.
Definition: block.hpp:41
nmodl::ast::InitialBlock
Represents a INITIAL block in the NMODL.
Definition: initial_block.hpp:49
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_matrix_shadow_update
virtual void print_nrn_cur_matrix_shadow_update()
Print the update to matrix elements with/without shadow vectors.
Definition: codegen_coreneuron_cpp_visitor.cpp:249
nmodl::codegen::CodegenCoreneuronCppVisitor::register_mechanism_arguments
std::string register_mechanism_arguments() const override
Arguments for register_mech or point_register_mech function.
Definition: codegen_coreneuron_cpp_visitor.cpp:564
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_init_acc_serial_annotation_block_begin
virtual void print_net_init_acc_serial_annotation_block_begin()
Print accelerator kernels begin annotation for net_init kernel.
Definition: codegen_coreneuron_cpp_visitor.cpp:226
nmodl::codegen::CodegenCoreneuronCppVisitor::print_deriv_advance_flag_transfer_to_device
virtual void print_deriv_advance_flag_transfer_to_device() const
Print the code to copy derivative advance flag to device.
Definition: codegen_coreneuron_cpp_visitor.cpp:167
nmodl::codegen::CodegenCoreneuronCppVisitor::print_first_random_var_index_getter
void print_first_random_var_index_getter()
Print the getter method for index position of first RANDOM variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:637
nmodl::codegen::naming::NTHREAD_RHS_SHADOW
static constexpr char NTHREAD_RHS_SHADOW[]
shadow rhs variable in neuron thread structure
Definition: codegen_naming.hpp:93
visitor_utils.hpp
Utility functions for visitors implementation.
nmodl::codegen::BlockType::Equation
@ Equation
breakpoint block
nmodl::codegen::naming::USE_TABLE_VARIABLE
static constexpr char USE_TABLE_VARIABLE[]
global variable to indicate if table is used
Definition: codegen_naming.hpp:81
nmodl::ast::WatchStatement
Represent WATCH statement in NMODL.
Definition: watch_statement.hpp:39
nmodl::codegen::CodegenCoreneuronCppVisitor::print_before_after_block
virtual void print_before_after_block(const ast::Block *node, size_t block_id)
Print NMODL before / after block in target backend code.
Definition: codegen_coreneuron_cpp_visitor.cpp:1882
nmodl::ast::BreakpointBlock
Represents a BREAKPOINT block in NMODL.
Definition: breakpoint_block.hpp:53
driver
nmodl::parser::UnitDriver driver
Definition: parser.cpp:28
nmodl::codegen::regex_special_chars
const std::regex regex_special_chars
Definition: codegen_cpp_visitor.cpp:912
nmodl::ast::FunctionCall
TODO.
Definition: function_call.hpp:38
codegen_coreneuron_cpp_visitor.hpp
Visitor for printing C++ code compatible with legacy api of CoreNEURON
nmodl::codegen::naming::VERBATIM_VARIABLES_MAPPING
static const std::unordered_map< std::string, std::string > VERBATIM_VARIABLES_MAPPING
commonly used variables in verbatim block and how they should be mapped to new code generation backen...
Definition: codegen_naming.hpp:227
nmodl::ast::ProtectStatement
TODO.
Definition: protect_statement.hpp:38
nmodl::codegen::CodegenCoreneuronCppVisitor::print_ion_variable
void print_ion_variable() override
Print the ion variable struct.
Definition: codegen_coreneuron_cpp_visitor.cpp:1550
nmodl::codegen::BlockType::Constructor
@ Constructor
constructor block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_ion_var_structure
void print_ion_var_structure()
Print structure of ion variables used for local copies.
Definition: codegen_coreneuron_cpp_visitor.cpp:1503
c11_driver.hpp
nmodl::codegen::CodegenCoreneuronCppVisitor::internal_method_parameters
ParamVector internal_method_parameters() override
Parameters for internally defined functions.
Definition: codegen_coreneuron_cpp_visitor.cpp:446
nmodl::codegen::CodegenCoreneuronCppVisitor::position_of_int_var
int position_of_int_var(const std::string &name) const override
Determine the position in the data array for a given int variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:76
nmodl::codegen::CodegenCoreneuronCppVisitor::is_constant_variable
virtual bool is_constant_variable(const std::string &name) const
Check if variable is qualified as constant.
Definition: codegen_coreneuron_cpp_visitor.cpp:142
nmodl::codegen::CodegenCoreneuronCppVisitor::print_function_procedure_helper
void print_function_procedure_helper(const ast::Block &node) override
Common helper function to help printing function or procedure blocks.
Definition: codegen_coreneuron_cpp_visitor.cpp:395
nmodl::ast::ForNetcon::get_statement_block
std::shared_ptr< StatementBlock > get_statement_block() const noexcept override
Getter for member variable ForNetcon::statement_block.
Definition: for_netcon.hpp:185
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_for_netcon
void visit_for_netcon(const ast::ForNetcon &node) override
visit node of type ast::ForNetcon
Definition: codegen_coreneuron_cpp_visitor.cpp:3004
nmodl::codegen::CodegenCoreneuronCppVisitor::print_compute_functions
void print_compute_functions() override
Print all compute functions for every backend.
Definition: codegen_coreneuron_cpp_visitor.cpp:2934
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_conductance_kernel
void print_nrn_cur_conductance_kernel(const ast::BreakpointBlock &node) override
Print the nrn_cur kernel with NMODL conductance keyword provisions.
Definition: codegen_coreneuron_cpp_visitor.cpp:2715
nmodl::codegen::Ion::is_intra_cell_conc
bool is_intra_cell_conc(const std::string &text) const
Check if variable name is internal cell concentration.
Definition: codegen_info.hpp:135
nmodl::codegen::BlockType::Watch
@ Watch
watch block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_memory_allocation_routine
virtual void print_memory_allocation_routine() const
Print memory allocation routine.
Definition: codegen_coreneuron_cpp_visitor.cpp:298
nmodl::symtab::syminfo::to_string
std::string to_string(const T &obj)
Definition: symbol_properties.hpp:282
nmodl::codegen::CodegenCoreneuronCppVisitor::print_check_table_thread_function
void print_check_table_thread_function()
Print check_table functions.
Definition: codegen_coreneuron_cpp_visitor.cpp:348
nmodl::ast::Ast::is_initial_block
virtual bool is_initial_block() const noexcept
Check if the ast node is an instance of ast::InitialBlock.
Definition: ast.cpp:124
nmodl::codegen::BlockType::NetReceive
@ NetReceive
net_receive block
nmodl::codegen::Ion::is_extra_cell_conc
bool is_extra_cell_conc(const std::string &text) const
Check if variable name is external cell concentration.
Definition: codegen_info.hpp:144
nmodl::codegen::CodegenCoreneuronCppVisitor::backend_name
std::string backend_name() const override
Name of the code generation backend.
Definition: codegen_coreneuron_cpp_visitor.cpp:53
nmodl::codegen::naming::NRN_INIT_METHOD
static constexpr char NRN_INIT_METHOD[]
nrn_init method in generated code
Definition: codegen_naming.hpp:147
nmodl::codegen::CodegenCoreneuronCppVisitor::print_rhs_d_shadow_variables
virtual void print_rhs_d_shadow_variables()
Print the setup method for setting matrix shadow vectors.
Definition: codegen_coreneuron_cpp_visitor.cpp:241
nmodl::ast::FunctionCall::get_arguments
const ExpressionVector & get_arguments() const noexcept
Getter for member variable FunctionCall::arguments.
Definition: function_call.hpp:166
nmodl::codegen::naming::THREAD_ARGS
static constexpr char THREAD_ARGS[]
verbatim name of the variable for nrn thread arguments
Definition: codegen_naming.hpp:177
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive
void print_net_receive()
Print net_receive function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2502
nmodl::codegen::rename_net_receive_arguments
static void rename_net_receive_arguments(const ast::NetReceiveBlock &net_receive_node, const ast::Node &node)
Rename arguments to NET_RECEIVE block with corresponding pointer variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:2266
nmodl::codegen::CodegenCoreneuronCppVisitor::optimize_ion_variable_copies
bool optimize_ion_variable_copies() const override
Check if ion variable copies should be avoided.
Definition: codegen_coreneuron_cpp_visitor.cpp:293
nmodl::visitor::RenameVisitor
Blindly rename given variable to new name
Definition: rename_visitor.hpp:43
nmodl::codegen::naming::NRN_CUR_METHOD
static constexpr char NRN_CUR_METHOD[]
nrn_cur method in generated code
Definition: codegen_naming.hpp:168
codegen_utils.hpp
Implement utility functions for codegen visitors.
nmodl::codegen::naming::TQITEM_VARIABLE
static constexpr char TQITEM_VARIABLE[]
inbuilt neuron variable for tqitem process
Definition: codegen_naming.hpp:75
nmodl::visitor::RenameVisitor::set
void set(const std::string &old_name, std::string new_name)
Definition: rename_visitor.hpp:97
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mechanism_global_var_structure
void print_mechanism_global_var_structure(bool print_initializers) override
Print the structure that wraps all global variables used in the NMODL.
Definition: codegen_coreneuron_cpp_visitor.cpp:1009
defuse_analyze_visitor.hpp
Visitor to return Def-Use chain for a given variable in the block/node
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_constructor
void print_nrn_constructor() override
Print nrn_constructor function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:1936
nmodl::codegen::CodegenCoreneuronCppVisitor::print_kernel_data_present_annotation_block_end
virtual void print_kernel_data_present_annotation_block_end()
Print matching block end of accelerator annotations for data presence on device.
Definition: codegen_coreneuron_cpp_visitor.cpp:221
nmodl::ast::BATYPE_SOLVE
@ BATYPE_SOLVE
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_alloc
void print_nrn_alloc() override
Print nrn_alloc function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:1960
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_matrix_shadow_reduction
virtual void print_nrn_cur_matrix_shadow_reduction()
Print the reduction to matrix elements from shadow vectors.
Definition: codegen_coreneuron_cpp_visitor.cpp:262
nmodl::codegen::CodegenCoreneuronCppVisitor::print_memb_list_getter
void print_memb_list_getter()
Print the getter method for returning membrane list from NrnThread.
Definition: codegen_coreneuron_cpp_visitor.cpp:678
nmodl::stringutils::split_string
static std::vector< std::string > split_string(const std::string &text, char delimiter)
Split a text in a list of words, using a given delimiter character.
Definition: string_utils.hpp:116
nmodl::codegen::CodegenCoreneuronCppVisitor::print_watch_check
void print_watch_check()
Print watch activate function.
Definition: codegen_coreneuron_cpp_visitor.cpp:2021
nmodl::codegen::naming::INST_GLOBAL_MEMBER
static constexpr char INST_GLOBAL_MEMBER[]
instance struct member pointing to the global variable structure
Definition: codegen_naming.hpp:102
nmodl::codegen::CodegenCoreneuronCppVisitor::print_instance_variable_setup
void print_instance_variable_setup()
Print the function that initialize instance structure.
Definition: codegen_coreneuron_cpp_visitor.cpp:1597
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_init
void print_nrn_init(bool skip_init_check=true)
Print the nrn_init function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:1807
nmodl::codegen::CodegenCoreneuronCppVisitor::get_variable_name
std::string get_variable_name(const std::string &name, bool use_instance=true) const override
Determine variable name in the structure of mechanism properties.
Definition: codegen_coreneuron_cpp_visitor.cpp:807
nmodl::symtab::syminfo::NmodlType
NmodlType
NMODL variable properties.
Definition: symbol_properties.hpp:116
nmodl::ast::ForNetcon
TODO.
Definition: for_netcon.hpp:39
nmodl::ast::BATYPE_BREAKPOINT
@ BATYPE_BREAKPOINT
Definition: ast_common.hpp:80
nmodl::codegen::CodegenCoreneuronCppVisitor::print_abort_routine
virtual void print_abort_routine() const
Print backend specific abort routine.
Definition: codegen_coreneuron_cpp_visitor.cpp:316
nmodl::ast::ForNetcon::get_parameters
const ArgumentVector & get_parameters() const noexcept override
Getter for member variable ForNetcon::parameters.
Definition: for_netcon.hpp:176
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur
void print_nrn_cur() override
Print nrn_cur / current update function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2851
nmodl::codegen::CodegenCoreneuronCppVisitor::get_range_var_float_type
std::string get_range_var_float_type(const SymbolType &symbol)
Returns floating point type for given range variable symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:1581
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buffering_cnt_update
virtual void print_net_send_buffering_cnt_update() const
Print the code related to the update of NetSendBuffer_t cnt.
Definition: codegen_coreneuron_cpp_visitor.cpp:2404
nmodl::codegen::CodegenCoreneuronCppVisitor::print_thread_memory_callbacks
void print_thread_memory_callbacks()
Print thread related memory allocation and deallocation callbacks.
Definition: codegen_coreneuron_cpp_visitor.cpp:1398
nmodl::ast::DerivimplicitCallback
Represent a callback to NEURON's derivimplicit solver.
Definition: derivimplicit_callback.hpp:38
nmodl::codegen::ShadowUseStatement
Represents ion write statement during code generation.
Definition: codegen_cpp_visitor.hpp:166
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_buffering
void print_net_receive_buffering(bool need_mech_inst=true)
Print kernel for buffering net_receive events.
Definition: codegen_coreneuron_cpp_visitor.cpp:2358
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_function_common_code
virtual void print_global_function_common_code(BlockType type, const std::string &function_name="") override
Print common code for global functions like nrn_init, nrn_cur and nrn_state.
Definition: codegen_coreneuron_cpp_visitor.cpp:1756
nmodl::codegen::CodegenCoreneuronCppVisitor::functor_params
ParamVector functor_params() override
The parameters of the Newton solver "functor".
Definition: codegen_coreneuron_cpp_visitor.cpp:981
logger.hpp
Implement logger based on spdlog library.
nmodl::codegen::IndexVariableInfo::is_vdata
bool is_vdata
if variable resides in vdata field of NrnThread typically true for bbcore pointer
Definition: codegen_cpp_visitor.hpp:133
nmodl::codegen::BlockType
BlockType
Helper to represent various block types.
Definition: codegen_cpp_visitor.hpp:56
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mechanism_register
void print_mechanism_register() override
Print the mechanism registration function.
Definition: codegen_coreneuron_cpp_visitor.cpp:1260
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_method_annotation
virtual void print_global_method_annotation()
Print backend specific global method annotation.
Definition: codegen_coreneuron_cpp_visitor.cpp:283
nmodl::codegen::naming::NRN_STATE_METHOD
static constexpr char NRN_STATE_METHOD[]
nrn_state method in generated code
Definition: codegen_naming.hpp:165
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_kernel
void print_nrn_cur_kernel(const ast::BreakpointBlock &node) override
Print main body of nrn_cur function.
Definition: codegen_coreneuron_cpp_visitor.cpp:2786
config.h
Version information and units file path.
nmodl::codegen::CodegenCoreneuronCppVisitor::nrn_thread_internal_arguments
std::string nrn_thread_internal_arguments() override
Arguments for "_threadargs_" macro in neuron implementation.
Definition: codegen_coreneuron_cpp_visitor.cpp:498
nmodl::codegen::CodegenCoreneuronCppVisitor::print_device_atomic_capture_annotation
virtual void print_device_atomic_capture_annotation() const
Print pragma annotation for increase and capture of variable in automatic way.
Definition: codegen_coreneuron_cpp_visitor.cpp:172
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_derivimplicit_callback
void visit_derivimplicit_callback(const ast::DerivimplicitCallback &node) override
visit node of type ast::DerivimplicitCallback
Definition: codegen_coreneuron_cpp_visitor.cpp:2996
nmodl::codegen::CodegenCoreneuronCppVisitor::print_get_memb_list
virtual void print_get_memb_list()
Print the target backend code for defining and checking a local Memb_list variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:2337
nmodl::codegen::CodegenCoreneuronCppVisitor::print_num_variable_getter
void print_num_variable_getter()
Print the getter methods for float and integer variables count.
Definition: codegen_coreneuron_cpp_visitor.cpp:645
nmodl::ast::Ast::get_statement_block
virtual std::shared_ptr< StatementBlock > get_statement_block() const
Return associated statement block for the AST node.
Definition: ast.cpp:32
nmodl::codegen::CodegenCoreneuronCppVisitor::print_standard_includes
void print_standard_includes() override
Print standard C/C++ includes.
Definition: codegen_coreneuron_cpp_visitor.cpp:886
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_receive_loop_end
virtual void print_net_receive_loop_end()
Print the code for closing the main net_receive loop.
Definition: codegen_coreneuron_cpp_visitor.cpp:2353
nmodl::ast::Block::visit_children
void visit_children(visitor::Visitor &v) override
visit children i.e.
Definition: ast.cpp:392
nmodl::codegen::CodegenCoreneuronCppVisitor::print_mech_type_getter
void print_mech_type_getter()
Print the getter method for returning mechtype.
Definition: codegen_coreneuron_cpp_visitor.cpp:669
nmodl::ast::AfterBlock
Represents a AFTER block in NMODL.
Definition: after_block.hpp:51
nmodl::ast::Ast::is_before_block
virtual bool is_before_block() const noexcept
Check if the ast node is an instance of ast::BeforeBlock.
Definition: ast.cpp:152
nmodl::codegen::naming::NTHREAD_T_VARIABLE
static constexpr char NTHREAD_T_VARIABLE[]
t variable in neuron thread structure
Definition: codegen_naming.hpp:105
nmodl::codegen::CodegenCoreneuronCppVisitor::float_variable_name
std::string float_variable_name(const SymbolType &symbol, bool use_instance) const override
Determine the name of a float variable given its symbol.
Definition: codegen_coreneuron_cpp_visitor.cpp:753
nmodl::codegen::CodegenCoreneuronCppVisitor::print_global_variable_device_update_annotation
virtual void print_global_variable_device_update_annotation()
Print the pragma annotation to update global variables from host to the device.
Definition: codegen_coreneuron_cpp_visitor.cpp:1555
rename_visitor.hpp
Blindly rename given variable to new name
nmodl::codegen::BlockType::State
@ State
derivative block
symtab_visitor.hpp
THIS FILE IS GENERATED AT BUILD TIME AND SHALL NOT BE EDITED.
nmodl::codegen::CodegenCoreneuronCppVisitor::add_variable_tqitem
void add_variable_tqitem(std::vector< IndexVariableInfo > &variables) override
Add the variable tqitem during get_int_variables.
Definition: codegen_coreneuron_cpp_visitor.cpp:413
nmodl::codegen::CodegenCoreneuronCppVisitor::visit_watch_statement
void visit_watch_statement(const ast::WatchStatement &node) override
visit node of type ast::WatchStatement
Definition: codegen_coreneuron_cpp_visitor.cpp:3036
nmodl::codegen::CodegenCoreneuronCppVisitor::external_method_arguments
const std::string external_method_arguments() noexcept override
Arguments for external functions called from generated code.
Definition: codegen_coreneuron_cpp_visitor.cpp:463
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_cur_non_conductance_kernel
void print_nrn_cur_non_conductance_kernel() override
Print the nrn_cur kernel without NMODL conductance keyword provisions.
Definition: codegen_coreneuron_cpp_visitor.cpp:2752
nmodl::visitor::VarUsageVisitor::variable_used
bool variable_used(const ast::Node &node, std::string name)
Definition: var_usage_visitor.cpp:26
nmodl::codegen::CodegenCoreneuronCppVisitor::print_backend_includes
virtual void print_backend_includes()
Print backend specific includes (none needed for C++ backend)
Definition: codegen_coreneuron_cpp_visitor.cpp:288
nmodl::codegen::CodegenCoreneuronCppVisitor::process_verbatim_token
std::string process_verbatim_token(const std::string &token)
Process a token in a verbatim block for possible variable renaming.
Definition: codegen_coreneuron_cpp_visitor.cpp:85
nmodl::codegen::CodegenCoreneuronCppVisitor::print_nrn_state
void print_nrn_state() override
Print nrn_state / state update function definition.
Definition: codegen_coreneuron_cpp_visitor.cpp:2642
nmodl::codegen::CodegenCoreneuronCppVisitor::add_variable_point_process
void add_variable_point_process(std::vector< IndexVariableInfo > &variables) override
Add the variable point_process during get_int_variables.
Definition: codegen_coreneuron_cpp_visitor.cpp:427
nmodl::codegen::naming::NRN_DESTRUCTOR_METHOD
static constexpr char NRN_DESTRUCTOR_METHOD[]
nrn_destructor method in generated code
Definition: codegen_naming.hpp:153
nmodl::codegen::CodegenCoreneuronCppVisitor::print_codegen_routines
void print_codegen_routines() override
Print entry point to code generation.
Definition: codegen_coreneuron_cpp_visitor.cpp:2965
nmodl::codegen::BlockType::Initial
@ Initial
initial block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_watch_activate
void print_watch_activate()
Print watch activate function.
Definition: codegen_coreneuron_cpp_visitor.cpp:1973
nmodl::codegen::CodegenCoreneuronCppVisitor::print_atomic_reduction_pragma
virtual void print_atomic_reduction_pragma()
Print atomic update pragma for reduction statements.
Definition: codegen_coreneuron_cpp_visitor.cpp:278
nmodl::codegen::naming::THREAD_ARGS_PROTO
static constexpr char THREAD_ARGS_PROTO[]
verbatim name of the variable for nrn thread arguments in prototype
Definition: codegen_naming.hpp:183
nmodl::codegen::BlockType::BeforeAfter
@ BeforeAfter
before / after block
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buffering_grow
virtual void print_net_send_buffering_grow()
Print statement that grows NetSendBuffering_t structure if needed.
Definition: codegen_coreneuron_cpp_visitor.cpp:2409
nmodl::codegen::naming::NRN_PRIVATE_CONSTRUCTOR_METHOD
constexpr char NRN_PRIVATE_CONSTRUCTOR_METHOD[]
nrn_private_constructor method in generated code
Definition: codegen_naming.hpp:156
nmodl::ast::Ast::get_node_name
virtual std::string get_node_name() const
Return name of of the node.
Definition: ast.cpp:28
nmodl::ast::ProtectStatement::get_expression
std::shared_ptr< Expression > get_expression() const noexcept
Getter for member variable ProtectStatement::expression.
Definition: protect_statement.hpp:157
nmodl::codegen::CodegenCoreneuronCppVisitor::print_net_send_buf_update_to_host
virtual void print_net_send_buf_update_to_host() const
Print the code to update NetSendBuffer_t from device to host.
Definition: codegen_coreneuron_cpp_visitor.cpp:182
all.hpp
Auto generated AST classes declaration.
nmodl::codegen::CodegenCoreneuronCppVisitor::print_data_structures
void print_data_structures(bool print_initializers) override
Print all classes.
Definition: codegen_coreneuron_cpp_visitor.cpp:2906
nmodl::codegen::CodegenCoreneuronCppVisitor::replace_if_verbatim_variable
std::string replace_if_verbatim_variable(std::string name)
Replace commonly used verbatim variables.
Definition: codegen_coreneuron_cpp_visitor.cpp:516
nmodl::codegen::CodegenCoreneuronCppVisitor::print_ion_var_constructor
virtual void print_ion_var_constructor(const std::vector< std::string > &members)
Print constructor of ion variables.
Definition: codegen_coreneuron_cpp_visitor.cpp:1533
nmodl::codegen::CodegenCoreneuronCppVisitor::position_of_float_var
int position_of_float_var(const std::string &name) const override
Determine the position in the data array for a given float variable.
Definition: codegen_coreneuron_cpp_visitor.cpp:71
nmodl::codegen::CodegenCppVisitor::ParamVector
std::vector< std::tuple< std::string, std::string, std::string, std::string > > ParamVector
A vector of parameters represented by a 4-tuple of strings:
Definition: codegen_cpp_visitor.hpp:297
nmodl::codegen::naming::ION_VARNAME_PREFIX
static constexpr char ION_VARNAME_PREFIX[]
prefix for ion variable
Definition: codegen_naming.hpp:201