Go to the documentation of this file.
53 std::string filename =
restore_ +
"/time.dat";
54 f.
open(filename, std::ios::in);
75 for (
int i = 0;
i < nb_threads;
i++) {
104 assert(p >= 0 && p < eml->_nodecount_padded * esz);
109 if (!ml_pinv[etype]) {
112 ei_instance = ml_pinv[etype][ei_instance];
115 return ei_instance * esz + ei;
124 fh.
open(filename, std::ios::out);
134 fh << nt.
ncell <<
" ncell\n";
135 fh << n_outputgid <<
" ngid\n";
137 assert(ntc.n_outputgids == n_outputgid);
141 fh << nt.
end <<
" nnode\n";
150 fh << nmech <<
" nmech\n";
152 assert(nmech == ntc.nmech);
159 fh << current_tml->index <<
"\n";
160 fh << current_tml->ml->nodecount <<
"\n";
163 fh << nt.
_nidata <<
" nidata\n";
164 fh << nt.
_nvdata <<
" nvdata\n";
168 int* pinv_nt =
nullptr;
170 int* d =
new int[nt.
end];
172 for (
int i = 0;
i < nt.
end; ++
i) {
181 for (
int i = 0;
i < nt.
end; ++
i) {
182 assert(d[
i] == ntc.parent[
i]);
189 for (
int i = 0;
i < nt.
end; ++
i) {
194 pinv_nt =
new int[nt.
end];
195 for (
int i = 0;
i < nt.
end; ++
i) {
204 for (
int i = 0;
i < nt.
end; ++
i) {
218 int** ml_pinv = (
int**)
ecalloc(memb_func.size(),
sizeof(
int*));
222 int type = current_tml->index;
231 int sz = nrn_prop_param_size_[type];
233 int* semantics = memb_func[type].dparam_semantics;
235 if (!nrn_is_artificial_[type]) {
241 int* nd_ix =
new int[
cnt];
242 for (
int i = 0;
i <
cnt; ++
i) {
245 nd_ix[
i] = pinv_nt[ipval];
253 sz = nrn_prop_dparam_size_[type];
257 std::vector<int> pointer2type;
258 if (!nrn_is_artificial_[type]) {
259 for (
int i_instance = 0; i_instance <
cnt; ++i_instance) {
260 for (
int i = 0;
i < sz; ++
i) {
261 int ix = i_instance * sz +
i;
262 int s = semantics[
i];
266 }
else if (s == -9) {
270 }
else if (s == -5) {
283 pointer2type.push_back(ptype);
295 }
else if (s >= 0 && s < 1000) {
301 ntc.mlmap[type]->pdata_not_permuted[i_instance * sz +
i]);
309 size_t s = pointer2type.size();
310 fh << s <<
" npointer\n";
319 int* output_vindex =
new int[nt.
n_presyn];
328 assert(ps->
pntsrc_ ==
nullptr);
332 output_threshold[
i] = 0.0;
333 output_vindex[
i] = -1;
337 int type = pnt->
_type;
341 if (!ml_pinv[type]) {
345 ix = ml_pinv[type][ix];
347 output_vindex[
i] = -(ix * 1000 + type);
354 assert(ntc.output_vindex[
i] == output_vindex[
i]);
357 assert(ntc.output_threshold[
i] == output_threshold[
i]);
360 delete[] output_vindex;
361 delete[] output_threshold;
365 std::vector<int> pnt_offset(memb_func.size(), -1);
367 int type = tml->index;
369 pnt_offset[type] = synoffset;
370 synoffset += tml->ml->nodecount;
374 int* pnttype =
new int[nnetcon];
376 double* delay =
new double[nnetcon];
377 for (
int i = 0;
i < nnetcon; ++
i) {
380 if (pnt ==
nullptr) {
405 for (
int i = 0;
i < nnetcon; ++
i) {
406 assert(ntc.pnttype[
i] == pnttype[
i]);
408 assert(ntc.delay[
i] == delay[
i]);
423 fh << nbcp <<
" bbcorepointer\n";
425 assert(nbcp == ntc.nbcp);
431 int type = tml->index;
444 for (
int j = 0; j < ml->
nodecount; ++j) {
452 nullptr,
nullptr, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->
_thread, &nt, ml, 0.0);
457 assert(ntc.bcptype[
i] == type);
458 assert(ntc.bcpicnt[
i] == icnt);
459 assert(ntc.bcpdcnt[
i] == dcnt);
461 int* iArray =
nullptr;
462 double* dArray =
nullptr;
464 iArray =
new int[icnt];
467 dArray =
new double[dcnt];
470 for (
int j = 0; j < ml->
nodecount; j++) {
481 dArray, iArray, &dcnt, &icnt, 0, aln_cntml, d, pd, ml->
_thread, &nt, ml, 0.0);
497 fh << nt.
n_vecplay <<
" VecPlay instances\n";
500 int vtype = pr->
type();
523 if (ml_pinv[mtype]) {
524 icnt = ml_pinv[mtype][icnt];
533 assert(ntc.vtype[
i] == vtype);
534 assert(ntc.mtype[
i] == mtype);
535 assert(ntc.vecplay_ix[
i] == ix);
539 int sz = vpc->
y_.size();
544 std::cerr <<
"Error checkpointing vecplay type" << std::endl;
549 for (
size_t i = 0;
i < memb_func.size(); ++
i) {
563 f.
open(filename, std::ios::out);
603 auto type = tml->index;
607 "Checkpoint is requested involving BBCOREPOINTER but there is no bbcore_write"
608 " function for %s\n",
618 template <
typename T>
623 T* d =
new T[
cnt * sz];
625 for (
int i = 0;
i <
cnt * sz; ++
i) {
630 for (
int i = 0;
i <
cnt; ++
i) {
635 for (
int j = 0; j < sz; ++j) {
636 d[
i * sz + j] =
data[ip + j * align_cnt];
643 template <
typename T>
665 fh << d->
type() <<
"\n";
672 fh << (nc - nt.
netcons) <<
"\n";
690 fh << (ps - nt.
presyns) <<
"\n";
699 fh << pr->
type() <<
"\n";
727 std::shared_ptr<Phase2::EventTypeBase> event,
752 net_send(nt.
_vdata + e->movable, e->weight_index, pnt, e->time, e->flag);
785 fh << nt.
n_vecplay <<
" VecPlayContinuous state\n";
814 fh << -1 <<
" Presyn ConditionEvent flags\n";
825 fh << -1 <<
" TQItems from atomic_dq\n";
826 while ((q = tqe->
atomic_dq(1e20)) !=
nullptr) {
830 fh << -1 <<
" TQItemsfrom binq_\n";
878 for (
const auto& event: p2.
events) {
auto & get_mech_data_layout()
TQItem * atomic_dq(double til)
void restore_tqueue(NrnThread &, const Phase2 &p2)
int type_of_ntdata(NrnThread &nt, int i, bool reset)
T * soa2aos(T *data, int cnt, int sz, int layout, int *permute) const
NrnThreadChkpnt * nrnthread_chkpnt
void * ecalloc(size_t n, size_t size)
void close()
Close currently open file.
mpi_function< cnrn_make_integral_constant_t(nrnmpi_barrier_impl)> nrnmpi_barrier
void dt2thread(double adt)
std::string get_save_path() const
auto & get_is_artificial()
auto & get_memb_func(size_t idx)
PreSynHelper * presyns_helper
void nrn_inverse_i_layout(int i, int &icnt, int cnt, int &isz, int sz, int layout)
auto & get_bbcore_write()
virtual void send(double sendtime, NetCvode *, NrnThread *) override
int nrn_soa_padded_size(int cnt, int layout)
calculate size after padding for specific memory layout
std::size_t ubound_index_
static int nrn_original_aos_index(int etype, int ix, NrnThread &nt, int **ml_pinv)
double restore_time() const
todo : need to broadcast this rather than all reading a double
THIS FILE IS AUTO GENERATED DONT MODIFY IT.
std::size_t discon_index_
icycle< ncycle;++icycle) { int istride=stride[icycle];nrn_pragma_acc(loop vector) nrn_pragma_omp(loop bind(parallel)) for(int icore=0;icore< warpsize;++icore) { int i=ii+icore;if(icore< istride) { int ip=GPU_PARENT(i);GPU_RHS(i) -=GPU_B(i) *GPU_RHS(ip);GPU_RHS(i)/=GPU_D(i);} i+=istride;} ii+=istride;}}void solve_interleaved2(int ith) { NrnThread *nt=nrn_threads+ith;InterleaveInfo &ii=interleave_info[ith];int nwarp=ii.nwarp;if(nwarp==0) return;int ncore=nwarp *warpsize;int *ncycles=ii.cellsize;int *stridedispl=ii.stridedispl;int *strides=ii.stride;int *rootbegin=ii.firstnode;int *nodebegin=ii.lastnode;nrn_pragma_acc(parallel loop gang present(nt[0:1], strides[0:nstride], ncycles[0:nwarp], stridedispl[0:nwarp+1], rootbegin[0:nwarp+1], nodebegin[0:nwarp+1]) if(nt->compute_gpu) async(nt->stream_id)) nrn_pragma_omp(target teams loop if(nt->compute_gpu)) for(int icore=0;icore< ncore;icore+=warpsize) { int iwarp=icore/warpsize;int ic=icore &(warpsize - 1);int ncycle=ncycles[iwarp];int *stride=strides+stridedispl[iwarp];int root=rootbegin[iwarp];int lastroot=rootbegin[iwarp+1];int firstnode=nodebegin[iwarp];int lastnode=nodebegin[iwarp+1];triang_interleaved2(nt, ic, ncycle, stride, lastnode);bksub_interleaved2(nt, root+ic, lastroot, ic, ncycle, stride, firstnode);} nrn_pragma_acc(wait(nt->stream_id))}void solve_interleaved1(int ith) { NrnThread *nt=nrn_threads+ith;int ncell=nt-> ncell
int mkdir_p(const char *path)
Creates directory if doesn't exisit (similar to mkdir -p)
bool should_restore() const
#define VecPlayContinuousType
std::vector< VecPlayContinuous_ > vec_play_continuous
auto & get_prop_dparam_size()
void write_array(T *p, size_t nb_elements)
Write an 1D array.
virtual void send(double sendtime, NetCvode *, NrnThread *) override
void open(const std::string &filename, std::ios::openmode mode=std::ios::in)
Preserving chkpnt state, move to a new file.
void data_write(FileHandler &F, T *data, int cnt, int sz, int layout, int *permute) const
CheckPoints(const std::string &save, const std::string &restore)
corenrn_parameters corenrn_param
Printing method.
void restore_tqitem(int type, std::shared_ptr< Phase2::EventTypeBase > event, NrnThread &nt)
void write_checkpoint(NrnThread *nt, int nb_threads) const
#define _threadargsproto_
int nrn_i_layout(int icnt, int cnt, int isz, int sz, int layout)
This function return the index in a flat array of a matrix coordinate (icnt, isz).
void write_phase2(NrnThread &nt) const
virtual void send(double deliverytime, NetCvode *, NrnThread *)
void net_send(void **, int, Point_process *, double, double)
void nrn_spike_exchange_init()
std::vector< int > preSynConditionEventFlags
T * read_array(T *p, size_t count)
Read an integer array of fixed length.
#define PlayRecordEventType
void allocate_data_in_mechanism_nrn_init()
NetCvode * net_cvode_instance
int * inverse_permute(int *p, int n)
auto & get_prop_param_size()
std::vector< std::pair< int, std::shared_ptr< EventTypeBase > > > events
bool mpi_enable
Initialization seed for random number generator (int)
int checkpoint() const
Query chkpnt state.
int checkpoint_save_patternstim(_threadargsproto_)
void write_tqueue(TQItem *q, NrnThread &nt, FileHandler &fh) const
void checkpoint_restore_patternstim(int, double, _threadargsproto_)
void nrn_thread_table_check()
#define nrn_assert(x)
assert()-like macro, independent of NDEBUG status
static int permute(int i, NrnThread &nt)
virtual bool require_checkpoint()
const std::string restore_