53 std::vector<int>& pointer2type);
58 double*& output_threshold,
60 int*& netcon_pntindex,
95 std::vector<T> d(
cnt * sz);
97 for (
int i = 0;
i <
cnt; ++
i) {
98 for (
int j = 0; j < sz; ++j) {
99 d[
i * sz + j] =
data[
i * sz + j];
103 for (
int i = 0;
i <
cnt; ++
i) {
104 for (
int j = 0; j < sz; ++j) {
105 data[
i + j * align_cnt] = d[
i * sz + j];
136 int n_data = 6 * n_data_padded;
138 n_data += n_data_padded;
158 size_t offset = 6 * n_data_padded;
160 offset += n_data_padded;
174 std::vector<int>
pdata;
182 auto& p2t =
tmls.back().pointer2type;
210 int n_vec_play_continuous = F.
read_int();
212 for (
int i = 0;
i < n_vec_play_continuous; ++
i) {
234 for (
int i = 0;
i < n_vec_play_continuous; ++
i) {
237 vecPlay.discon_index = F.
read_int();
238 vecPlay.ubound_index = F.
read_int();
257 int* types_ =
nullptr;
258 int* nodecounts_ =
nullptr;
260 (*nrn2core_get_dat2_1_)(thread_id,
276 delete[] nodecounts_;
282 int n_data = 6 * n_data_padded;
284 n_data += n_data_padded;
296 double* actual_a =
_data + 2 * n_data_padded;
297 double* actual_b =
_data + 3 * n_data_padded;
298 double* actual_v =
_data + 4 * n_data_padded;
299 double* actual_area =
_data + 5 * n_data_padded;
300 double* actual_diam =
n_diam > 0 ?
_data + 6 * n_data_padded :
nullptr;
301 (*nrn2core_get_dat2_2_)(
302 thread_id,
v_parent_index, actual_a, actual_b, actual_area, actual_v, actual_diam);
309 size_t offset = 6 * n_data_padded;
311 offset += n_data_padded;
323 tml.pdata.resize(
nodecounts[
i] * dparam_sizes[type]);
325 int* nodeindices_ =
nullptr;
326 double* data_ =
_data + offset;
327 int* pdata_ =
const_cast<int*
>(tml.pdata.data());
328 (*nrn2core_get_dat2_mech_)(thread_id,
330 dparam_sizes[type] > 0 ? dsz_inst : 0,
335 if (dparam_sizes[type] > 0)
339 std::copy(nodeindices_, nodeindices_ +
nodecounts[
i], tml.nodeindices.data());
343 assert(nodeindices_ ==
nullptr);
347 int* output_vindex_ =
nullptr;
348 double* output_threshold_ =
nullptr;
349 int* pnttype_ =
nullptr;
350 int* pntindex_ =
nullptr;
351 double* weight_ =
nullptr;
352 double* delay_ =
nullptr;
353 (*nrn2core_get_dat2_3_)(thread_id,
363 delete[] output_vindex_;
366 delete[] output_threshold_;
369 pnttype = std::vector<int>(pnttype_, pnttype_ + n_netcon);
372 pntindex = std::vector<int>(pntindex_, pntindex_ + n_netcon);
375 weights = std::vector<double>(weight_, weight_ + n_weight);
378 delay = std::vector<double>(delay_, delay_ + n_netcon);
389 int* iArray_ =
nullptr;
391 double* dArray_ =
nullptr;
392 (*nrn2core_get_dat2_corepointer_mech_)(nt.
id,
tmls[
i].type, icnt, dcnt, iArray_, dArray_);
393 tmls[
i].iArray.resize(icnt);
394 std::copy(iArray_, iArray_ + icnt,
tmls[
i].iArray.begin());
397 tmls[
i].dArray.resize(dcnt);
398 std::copy(dArray_, dArray_ + dcnt,
tmls[
i].dArray.begin());
404 std::vector<int> indices_vec_play_continuous;
405 (*nrn2core_get_dat2_vecplay_)(thread_id, indices_vec_play_continuous);
408 for (
auto i: indices_vec_play_continuous) {
412 double *yvec_, *tvec_;
414 (*nrn2core_get_dat2_vecplay_inst_)(thread_id,
427 std::copy(yvec_, yvec_ + sz, item.
yvec.data());
428 std::copy(tvec_, tvec_ + sz, item.
tvec.data());
435 int diff_mech_count = 0;
439 [&](
int e) { return e == mech_types[i]; })) {
441 printf(
"Error: %s is a different MOD file than used by NEURON!\n",
448 if (diff_mech_count > 0) {
451 "Error : NEURON and CoreNEURON must use same mod files for compatibility, %d "
452 "different mod file(s) found. Re-compile special and special-core!\n",
467 for (
int iml = 0; iml <
nodecount; ++iml) {
478 int type = net_buf_receive.second;
514 while ((type = F.
read_int()) != 0) {
519 auto event = std::make_shared<NetConType_>();
522 events.emplace_back(type, event);
526 auto event = std::make_shared<SelfEventType_>();
529 event->point_proc_instance = F.
read_int();
530 event->target_instance = F.
read_int();
534 events.emplace_back(type, event);
538 auto event = std::make_shared<PreSynType_>();
541 events.emplace_back(type, event);
545 auto event = std::make_shared<NetParEvent_>();
547 events.emplace_back(type, event);
551 auto event = std::make_shared<PlayRecordEventType_>();
553 event->play_record_type = F.
read_int();
555 event->vecplay_index = F.
read_int();
556 events.emplace_back(type, event);
572 std::vector<BAMech*> before_after_map(memb_func.size());
574 for (
size_t ii = 0;
ii < memb_func.size(); ++
ii) {
575 before_after_map[
ii] =
nullptr;
580 if (!before_after_map[bam->type]) {
581 before_after_map[bam->type] = bam;
586 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
587 if (before_after_map[tml->index]) {
588 int mtype = tml->index;
589 for (
auto bam = before_after_map[mtype]; bam && bam->type == mtype;
612 std::map<int, size_t> type2itml;
613 for (
size_t i = 0;
i <
tmls.size(); ++
i) {
614 if (
tmls[
i].pointer2type.size()) {
615 type2itml[
tmls[
i].type] =
i;
619 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
620 int type = tml->index;
622 int*
pdata = tml->ml->pdata;
623 int cnt = tml->ml->nodecount;
625 int* semantics = memb_func[type].dparam_semantics;
636 for (
int i = 0;
i < szdp; ++
i) {
637 int s = semantics[
i];
667 if (s >= 0 && s < 1000) {
675 for (
int iml = 0; iml <
cnt; ++iml) {
686 auto search = type2itml.find(type);
687 if (search != type2itml.end()) {
688 auto& ptypes =
tmls[type2itml[type]].pointer2type;
689 assert(ptypes.size());
691 for (
int iml = 0; iml <
cnt; ++iml) {
692 for (
int i = 0;
i < szdp; ++
i) {
693 if (semantics[
i] == -5) {
696 int ptype = ptypes[iptype++];
727 int* mech_deps = (
int*)
ecalloc(memb_func.size(),
sizeof(int));
729 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
731 tml->dependencies =
nullptr;
732 tml->ndependencies = 0;
740 std::vector<int> actual_mech_deps;
746 for (
int j = 0; j < deps_cnt; j++) {
758 std::vector<int> node_intersection;
766 std::back_inserter(node_intersection));
770 if (!node_intersection.empty()) {
771 actual_mech_deps.push_back(mech_deps[j]);
776 if (!actual_mech_deps.empty()) {
777 tml->ndependencies = actual_mech_deps.size();
778 tml->dependencies = (
int*)
ecalloc(actual_mech_deps.size(),
sizeof(int));
779 std::copy(actual_mech_deps.begin(), actual_mech_deps.end(), tml->dependencies);
795 for (
int i = 0;
i < n_netcon; ++
i) {
811 ntc.delay =
new double[n_netcon];
812 memcpy(ntc.delay,
delay.data(), n_netcon *
sizeof(
double));
814 for (
int i = 0;
i < n_netcon; ++
i) {
821 const std::vector<Memb_func>& memb_func,
826 ntc.bcpicnt =
new int[
n_mech];
827 ntc.bcpdcnt =
new int[
n_mech];
828 ntc.bcptype =
new int[
n_mech];
829 size_t point_proc_id = 0;
838 ntc.bcptype[point_proc_id] = type;
839 ntc.bcpicnt[point_proc_id] =
tmls[
i].iArray.size();
840 ntc.bcpdcnt[point_proc_id] =
tmls[
i].dArray.size();
850 for (
int j = 0; j < cntml; ++j) {
855 double* d = ml->
data;
861 tmls[
i].iArray.data(),
873 assert(dk ==
static_cast<int>(
tmls[
i].dArray.size()));
874 assert(ik ==
static_cast<int>(
tmls[
i].iArray.size()));
896 ntc.vtype[
i] = vecPlay.vtype;
899 ntc.mtype[
i] = vecPlay.mtype;
903 ntc.vecplay_ix[
i] = vecPlay.ix;
911 std::move(vecPlay.yvec),
912 std::move(vecPlay.tvec),
937 ntc.mlmap =
new Memb_list_chkpnt*[memb_func.size()];
938 for (
int i = 0;
i < memb_func.size(); ++
i) {
939 ntc.mlmap[
i] =
nullptr;
957 int shadow_rhs_cnt = 0;
968 Memb_list_chkpnt* mlc =
new Memb_list_chkpnt;
969 ntc.mlmap[tml->index] = mlc;
973 tml_last->
next = tml;
980 if (shadow_rhs_cnt) {
1014 size_t offset = 6 * n_data_padded;
1020 offset += n_data_padded;
1026 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
1028 int type = tml->index;
1031 int sz = nrn_prop_param_size_[type];
1050 ntc.parent =
new int[nt.
end];
1052 ntc.area =
new double[nt.
end];
1057 std::vector<int> pnt_offset(memb_func.size());
1063 for (
auto tml = nt.
tml; tml; tml = tml->
next, ++itml) {
1064 int type = tml->index;
1067 int szp = nrn_prop_param_size_[type];
1068 int szdp = nrn_prop_dparam_size_[type];
1074 mech_data_layout_transform<double>(ml->
data, n, szp, layout);
1079 mech_data_layout_transform<int>(ml->
pdata, n, szdp, layout);
1081 #if CHKPNTDEBUG // Not substantive. Only for debugging.
1082 Memb_list_chkpnt* mlc = ntc.mlmap[type];
1085 for (
int i = 0;
i < n; ++
i) {
1086 for (
int j = 0; j < szdp; ++j) {
1087 mlc->pdata_not_permuted[
i * szdp + j] = ml->
pdata[
i * szdp + j];
1092 for (
int i = 0;
i < n; ++
i) {
1093 for (
int j = 0; j < szdp; ++j) {
1094 mlc->pdata_not_permuted[
i * szdp + j] = ml->
pdata[
i + j * align_cnt];
1100 ml->
pdata =
nullptr;
1106 pnt_offset[type] = synoffset;
1108 for (
int i = 0;
i <
cnt; ++
i) {
1153 for (
int i = 0;
i < nt.
end; ++
i) {
1161 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
1162 if (tml->ml->nodeindices) {
1166 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
1167 if (tml->ml->nodeindices) {
1190 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
1198 for (
auto tml = nt.
tml; tml; tml = tml->
next) {
1209 for (
size_t i = 0;
i < nrn_has_net_event_.size(); ++
i) {
1222 ntc.output_vindex =
new int[nt.
n_presyn];
1243 int index = ix / 1000;
1244 int type = ix % 1000;
1248 int ip2ps = pnttype2presyn[pnt->
_type];
1256 assert(ps->
gid_ > -1);
1258 assert(ix < nt.
end);
1275 ntc.pnttype =
new int[nnetcon];
1276 ntc.pntindex =
new int[nnetcon];
1277 memcpy(ntc.pnttype,
pnttype.data(), nnetcon *
sizeof(
int));
1278 memcpy(ntc.pntindex,
pntindex.data(), nnetcon *
sizeof(
int));
1280 for (
int i = 0;
i < nnetcon; ++
i) {
1283 int index = pnt_offset[type] +
pntindex[
i];