17 #ifdef ADJUST_GROUP_PRINT
18 fout<<
"Print adjust group information\n";
25 fout<<
"Debug mode: HERMITE\n";
30 template <
class Tmethod>
37 #ifdef ADJUST_GROUP_PRINT
38 bool adjust_group_write_flag;
42 #ifdef ADJUST_GROUP_PRINT
55 #ifdef ADJUST_GROUP_PRINT
56 ASSERT(!adjust_group_write_flag||(adjust_group_write_flag&&fgroup.is_open()));
62 void print(std::ostream & _fout)
const{
98 #ifdef ADJUST_GROUP_PRINT
99 fwrite(&adjust_group_write_flag,
sizeof(
bool),1,_fp);
115 #ifdef ADJUST_GROUP_PRINT
116 size_t rcount = fread(&adjust_group_write_flag,
sizeof(
bool),1,_fin);
118 std::cerr<<
"Error: Data reading fails! requiring data number is 1, only obtain "<<rcount<<
".\n";
161 _fout<<std::setw(_width)<<
"dE"
162 <<std::setw(_width)<<
"Etot_ref"
163 <<std::setw(_width)<<
"Ekin"
164 <<std::setw(_width)<<
"Epot"
165 <<std::setw(_width)<<
"Epert"
166 <<std::setw(_width)<<
"dE_cum"
167 <<std::setw(_width)<<
"dE_intr"
168 <<std::setw(_width)<<
"dE_mod";
179 <<std::setw(_width)<<
ekin
180 <<std::setw(_width)<<
epot
181 <<std::setw(_width)<<
epert
182 <<std::setw(_width)<<
de_cum
189 template <
class Tparticle,
class Tpcm,
class Tpert,
class TARpert,
class Tacc,
class TARacc,
class Tinfo>
198 Float time_next_min_;
202 Float energy_init_ref_;
215 int index_offset_group_;
218 int interrupt_group_dt_sorted_group_index_;
223 bool initial_system_flag_;
224 bool modify_system_flag_;
265 ASSERT(
info.checkParams());
271 energy_init_ref_(0.0),
272 energy_(), energy_sd_(),
273 n_act_single_(0), n_act_group_(0),
274 n_init_single_(0), n_init_group_(0),
275 index_offset_group_(0),
276 interrupt_group_dt_sorted_group_index_(-1), interrupt_binary_(), index_group_merger_(),
277 initial_system_flag_(false), modify_system_flag_(false),
278 index_dt_sorted_single_(), index_dt_sorted_group_(),
279 index_group_resolve_(), index_group_cm_(),
280 pred_(), force_(), time_next_(),
281 index_group_mask_(), table_group_mask_(), table_single_mask_(),
step(),
288 time_next_min_ = 0.0;
290 energy_init_ref_ = 0.0;
293 n_act_single_ = n_act_group_ = n_init_single_ = n_init_group_ = 0;
294 index_offset_group_ = 0;
295 interrupt_group_dt_sorted_group_index_ = -1;
296 interrupt_binary_.
clear();
297 index_group_merger_.
clear();
298 initial_system_flag_ =
false;
299 modify_system_flag_ =
false;
303 index_dt_sorted_single_.
clear();
304 index_dt_sorted_group_.
clear();
305 index_group_resolve_.
clear();
306 index_group_cm_.
clear();
307 index_group_mask_.
clear();
308 table_group_mask_.
clear();
309 table_single_mask_.
clear();
325 const int nmax_group =
groups.getSizeMax();
326 const int nmax_tot = nmax + nmax_group;
345 index_dt_sorted_group_.
reserveMem(nmax_group);
355 index_offset_group_ = nmax;
362 auto* nb_ptr =
neighbors.getDataAddress();
363 for (
int i=0; i<nmax; i++) {
365 nb_ptr[i].neighbor_address.reserveMem(nmax_tot);
378 void calcDt2ndList(
const int* _index_single,
380 const int* _index_group,
382 const Float _dt_limit) {
385 for (
int i=0; i<_n_single; i++){
386 const int k = _index_single[i];
387 const Float* acc0 = force_[k].acc0;
388 const Float* acc1 = force_[k].acc1;
394 for (
int i=0; i<_n_group; i++) {
395 const int k = _index_group[i];
396 const int kf = k + index_offset_group_;
397 const Float* acc0 = force_[kf].acc0;
398 const Float* acc1 = force_[kf].acc1;
400 groups[k].particles.cm.dt = dt;
401 groups[k].perturber.initial_step_flag =
false;
410 void predictAll(
const Float _time_pred) {
411 static thread_local
const Float inv3 = 1.0 / 3.0;
413 const int n_single = index_dt_sorted_single_.
getSize();
415 const auto* ptcl =
particles.getDataAddress();
417 for (
int k=0; k<n_single; k++){
418 const int i = index_dt_sorted_single_[k];
419 const Float dt = _time_pred - ptcl[i].time;
422 pred[i].pos[0] = ptcl[i].pos[0] + dt*(ptcl[i].vel[0] + 0.5*dt*(ptcl[i].acc0[0] + inv3*dt*ptcl[i].acc1[0]));
423 pred[i].pos[1] = ptcl[i].pos[1] + dt*(ptcl[i].vel[1] + 0.5*dt*(ptcl[i].acc0[1] + inv3*dt*ptcl[i].acc1[1]));
424 pred[i].pos[2] = ptcl[i].pos[2] + dt*(ptcl[i].vel[2] + 0.5*dt*(ptcl[i].acc0[2] + inv3*dt*ptcl[i].acc1[2]));
426 pred[i].vel[0] = ptcl[i].vel[0] + dt*(ptcl[i].acc0[0] + 0.5*dt*ptcl[i].acc1[0]);
427 pred[i].vel[1] = ptcl[i].vel[1] + dt*(ptcl[i].acc0[1] + 0.5*dt*ptcl[i].acc1[1]);
428 pred[i].vel[2] = ptcl[i].vel[2] + dt*(ptcl[i].acc0[2] + 0.5*dt*ptcl[i].acc1[2]);
430 pred[i].mass = ptcl[i].mass;
433 const int n_group = index_dt_sorted_group_.
getSize();
434 ASSERT(n_group <=
groups.getSize());
435 auto* group_ptr =
groups.getDataAddress();
436 for (
int k=0; k<n_group; k++) {
437 const int i = index_dt_sorted_group_[k];
438 ASSERT(i<
groups.getSize());
440 const auto& pcm = group_ptr[i].particles.cm;
441 const Float dt = _time_pred - pcm.time;
444 auto& predcm = pred[i+index_offset_group_];
445 predcm.pos[0] = pcm.pos[0] + dt*(pcm.vel[0] + 0.5*dt*(pcm.acc0[0] + inv3*dt*pcm.acc1[0]));
446 predcm.pos[1] = pcm.pos[1] + dt*(pcm.vel[1] + 0.5*dt*(pcm.acc0[1] + inv3*dt*pcm.acc1[1]));
447 predcm.pos[2] = pcm.pos[2] + dt*(pcm.vel[2] + 0.5*dt*(pcm.acc0[2] + inv3*dt*pcm.acc1[2]));
449 predcm.vel[0] = pcm.vel[0] + dt*(pcm.acc0[0] + 0.5*dt*pcm.acc1[0]);
450 predcm.vel[1] = pcm.vel[1] + dt*(pcm.acc0[1] + 0.5*dt*pcm.acc1[1]);
451 predcm.vel[2] = pcm.vel[2] + dt*(pcm.acc0[2] + 0.5*dt*pcm.acc1[2]);
453 predcm.mass = pcm.mass;
464 void correctAndCalcDt4thOne(H4Ptcl& _pi, ForceH4& _fi,
const Float _dt_limit,
const bool _init_step_flag) {
465 static thread_local
const Float inv3 = 1.0 / 3.0;
466 const Float dt = _pi.dt;
467 const Float h = 0.5 * dt;
468 const Float hinv = 2.0 / dt;
469 const Float A0p[3] = {_fi.acc0[0] + _pi.acc0[0], _fi.acc0[1] + _pi.acc0[1], _fi.acc0[2] + _pi.acc0[2]};
470 const Float A0m[3] = {_fi.acc0[0] - _pi.acc0[0], _fi.acc0[1] - _pi.acc0[1], _fi.acc0[2] - _pi.acc0[2]};
471 const Float A1p[3] = {(_fi.acc1[0] + _pi.acc1[0])*h, (_fi.acc1[1] + _pi.acc1[1])*h, (_fi.acc1[2] + _pi.acc1[2])*h};
472 const Float A1m[3] = {(_fi.acc1[0] - _pi.acc1[0])*h, (_fi.acc1[1] - _pi.acc1[1])*h, (_fi.acc1[2] - _pi.acc1[2])*h};
474 const Float vel_new[3] = {_pi.vel[0] + h*( A0p[0] - inv3*A1m[0] ),
475 _pi.vel[1] + h*( A0p[1] - inv3*A1m[1] ),
476 _pi.vel[2] + h*( A0p[2] - inv3*A1m[2] )};
477 _pi.pos[0] += h*( (_pi.vel[0] + vel_new[0]) + h*(-inv3*A0m[0]));
478 _pi.pos[1] += h*( (_pi.vel[1] + vel_new[1]) + h*(-inv3*A0m[1]));
479 _pi.pos[2] += h*( (_pi.vel[2] + vel_new[2]) + h*(-inv3*A0m[2]));
481 _pi.vel[0] = vel_new[0];
482 _pi.vel[1] = vel_new[1];
483 _pi.vel[2] = vel_new[2];
485 _pi.acc0[0] = _fi.acc0[0];
486 _pi.acc0[1] = _fi.acc0[1];
487 _pi.acc0[2] = _fi.acc0[2];
489 _pi.acc1[0] = _fi.acc1[0];
490 _pi.acc1[1] = _fi.acc1[1];
491 _pi.acc1[2] = _fi.acc1[2];
495 const Float acc3[3] = {(1.5*hinv*hinv*hinv) * (A1p[0] - A0m[0]),
496 (1.5*hinv*hinv*hinv) * (A1p[1] - A0m[1]),
497 (1.5*hinv*hinv*hinv) * (A1p[2] - A0m[2])};
498 const Float acc2[3] = {(0.5*hinv*hinv) * A1m[0] + h*acc3[0],
499 (0.5*hinv*hinv) * A1m[1] + h*acc3[1],
500 (0.5*hinv*hinv) * A1m[2] + h*acc3[2]};
502 #ifdef HERMITE_DEBUG_ACC
503 _pi.acc2[0] = acc2[0];
504 _pi.acc2[1] = acc2[1];
505 _pi.acc2[2] = acc2[2];
507 _pi.acc3[0] = acc3[0];
508 _pi.acc3[1] = acc3[1];
509 _pi.acc3[2] = acc3[2];
514 const Float dt_old = _pi.dt;
515 if(_init_step_flag) {
518 std::cerr<<
"Initial step flag on: pi.id: "<<_pi.id<<
" step size: "<<_pi.dt<<
" time: "<<_pi.time<<std::endl;
523 ASSERT((dt_old > 0.0 && _pi.dt >0.0));
535 void correctAndCalcDt4thList(
const int* _index_single,
537 const int* _index_group,
539 const Float _dt_limit) {
540 ASSERT(_n_single<=index_dt_sorted_single_.
getSize());
541 ASSERT(_n_group<=index_dt_sorted_group_.
getSize());
546 for(
int i=0; i<_n_single; i++){
547 const int k = _index_single[i];
548 correctAndCalcDt4thOne(ptcl[k], force[k], _dt_limit,
neighbors[k].initial_step_flag);
552 auto* group_ptr =
groups.getDataAddress();
553 for(
int i=0; i<_n_group; i++) {
554 const int k = _index_group[i];
555 auto& groupi = group_ptr[k];
556 correctAndCalcDt4thOne(groupi.particles.cm, force[k+index_offset_group_], std::min(_dt_limit, groupi.info.dt_limit), groupi.perturber.initial_step_flag);
557 groupi.perturber.initial_step_flag =
false;
565 void checkGroupResolve(
const int _n_group) {
566 if(_n_group==0)
return;
568 ASSERT(_n_group<=index_dt_sorted_group_.
getSize());
569 for (
int i=0; i<_n_group; i++) {
570 const int k = index_dt_sorted_group_[i];
578 groupk.perturber.checkGroupResolve();
586 void writeBackResolvedGroupAndCreateJParticleList(
const bool _write_back_flag) {
589 const int n_group_org = index_dt_sorted_group_.
getSize();
590 auto* group_ptr =
groups.getDataAddress();
592 for (
int i=0; i<n_group_org; i++) {
593 int k = index_dt_sorted_group_[i];
594 ASSERT(k<
groups.getSize());
596 auto& group_k = group_ptr[k];
597 if (group_k.perturber.need_resolve_flag) {
598 if (_write_back_flag) group_k.writeBackSlowDownParticles(ptcl[k+index_offset_group_]);
618 void insertParticleIndexToGroup(
const int _i,
621 int* _new_group_particle_index_origin,
622 int* _new_n_group_offset,
623 int& _new_n_particle,
625 int* _break_group_index_with_offset,
626 int& _n_break_no_add,
627 const int _n_break) {
628 int insert_group=-1, insert_index=-1;
629 if(_used_mask[_i]>=0) {
630 insert_group = _used_mask[_i];
633 else if(_used_mask[_j]>=0) {
634 insert_group = _used_mask[_j];
638 if(insert_group>=0) {
641 if (insert_index>=index_offset_group_) insert_n =
groups[insert_index-index_offset_group_].particles.getSize();
644 int last_group_offset = _new_n_group_offset[_new_n_group-1];
645 for (
int j=0; j<insert_n; j++)
646 _new_group_particle_index_origin[_new_n_particle++] = _new_group_particle_index_origin[last_group_offset+j];
649 if(insert_group<_new_n_group-1) {
650 for (
int k=_new_n_group-1; k>insert_group; k--) {
651 int k0_group_offset = _new_n_group_offset[k-1];
652 for (
int j=0; j<insert_n; j++)
653 _new_group_particle_index_origin[_new_n_group_offset[k]++] = _new_group_particle_index_origin[k0_group_offset+j];
657 int insert_group_offset=_new_n_group_offset[insert_group];
659 auto&
group =
groups[insert_index-index_offset_group_];
660 for (
int j=0; j<insert_n; j++)
661 _new_group_particle_index_origin[insert_group_offset++] =
group.info.particle_index[j];
663 _break_group_index_with_offset[_n_break+_n_break_no_add] = insert_index;
666 else _new_group_particle_index_origin[insert_group_offset] = insert_index;
667 _used_mask[insert_index] = insert_group;
670 _new_n_group_offset[_new_n_group] = _new_n_particle;
671 if (_i>=index_offset_group_) {
673 int insert_n =
group.particles.getSize();
674 for (
int j=0; j<insert_n; j++)
675 _new_group_particle_index_origin[_new_n_particle++] =
group.info.particle_index[j];
677 _break_group_index_with_offset[_n_break+_n_break_no_add] = _i;
680 else _new_group_particle_index_origin[_new_n_particle++] = _i;
682 _used_mask[_i] = _new_n_group;
684 if (_j>=index_offset_group_) {
686 int insert_n =
group.particles.getSize();
687 for (
int j=0; j<insert_n; j++)
688 _new_group_particle_index_origin[_new_n_particle++] =
group.info.particle_index[j];
690 _break_group_index_with_offset[_n_break+_n_break_no_add] = _j;
693 else _new_group_particle_index_origin[_new_n_particle++] = _j;
695 _used_mask[_j] = _new_n_group;
709 inline void calcOneSingleAccJerkNB(ForceH4 &_fi,
710 Neighbor<Tparticle> &_nbi,
715 _nbi.resetNeighbor();
718 const int* single_list = index_dt_sorted_single_.
getDataAddress();
719 const int n_single = index_dt_sorted_single_.
getSize();
721 for (
int i=0; i<n_single; i++) {
722 const int j = single_list[i];
724 const auto& pj = ptcl[j];
725 if (_pid==pj.id)
continue;
726 if (pj.mass==0)
continue;
732 auto* group_ptr =
groups.getDataAddress();
735 const int n_group_resolve = index_group_resolve_.
getSize();
736 for (
int i=0; i<n_group_resolve; i++) {
737 const int j =index_group_resolve_[i];
738 auto& groupj = group_ptr[j];
739 if (_pid==groupj.particles.cm.id)
continue;
740 if (groupj.particles.cm.mass==0)
continue;
743 _nbi.checkAndAddNeighborGroup(r2, groupj, j+index_offset_group_);
747 const int n_group_cm = index_group_cm_.
getSize();
748 for (
int i=0; i<n_group_cm; i++) {
749 const int j = index_group_cm_[i];
750 auto& groupj = group_ptr[j];
751 if (_pid==groupj.particles.cm.id)
continue;
752 if (groupj.particles.cm.mass==0)
continue;
754 const auto& pj = ptcl[j+index_offset_group_];
755 ASSERT(j+index_offset_group_<pred_.
getSize());
756 ASSERT(_pi.id!=pj.id);
759 _nbi.checkAndAddNeighborGroup(r2, groupj, j+index_offset_group_);
761 ASSERT(_nbi.n_neighbor_group + _nbi.n_neighbor_single == _nbi.neighbor_address.getSize());
763 #ifdef HERMITE_PERT_FORCE
775 template <
class Tpi,
class Tgroupi>
776 inline void calcOneGroupCMAccJerkNB(ForceH4 &_fi,
781 auto& nbi = _groupi.perturber;
785 const int* single_list = index_dt_sorted_single_.
getDataAddress();
786 const int n_single = index_dt_sorted_single_.
getSize();
788 for (
int i=0; i<n_single; i++) {
789 const int j = single_list[i];
791 const auto& pj = ptcl[j];
792 if (_pi.id==pj.id)
continue;
793 if (pj.mass==0.0)
continue;
799 auto* group_ptr =
groups.getDataAddress();
802 const int n_group_resolve = index_group_resolve_.
getSize();
803 for (
int i=0; i<n_group_resolve; i++) {
804 const int j =index_group_resolve_[i];
805 auto& groupj = group_ptr[j];
806 if(_pi.id==groupj.particles.cm.id)
continue;
807 if (groupj.particles.cm.mass==0.0)
continue;
810 nbi.checkAndAddNeighborGroup(r2, groupj, j+index_offset_group_);
814 const int n_group_cm = index_group_cm_.
getSize();
815 for (
int i=0; i<n_group_cm; i++) {
816 const int j = index_group_cm_[i];
817 auto& groupj = group_ptr[j];
818 if (_pi.id==groupj.particles.cm.id)
continue;
819 if (groupj.particles.cm.mass==0.0)
continue;
821 const auto& pj = ptcl[j+index_offset_group_];
822 ASSERT(j+index_offset_group_<pred_.
getSize());
825 nbi.checkAndAddNeighborGroup(r2, groupj, j+index_offset_group_);
827 ASSERT(nbi.n_neighbor_group + nbi.n_neighbor_single == nbi.neighbor_address.getSize());
829 #ifdef HERMITE_PERT_FORCE
841 template <
class Tpi,
class Tgroupi>
842 inline void calcOneGroupMemberAccJerkNB(ForceH4 &_fi,
845 auto& nbi = _groupi.perturber;
850 const int* single_list = index_dt_sorted_single_.
getDataAddress();
851 const int n_single = index_dt_sorted_single_.
getSize();
853 for (
int i=0; i<n_single; i++) {
854 const int j = single_list[i];
856 const auto& pj = ptcl[j];
857 if (pj.mass==0.0)
continue;
858 ASSERT(_pi.id!=pj.id);
864 auto* group_ptr =
groups.getDataAddress();
867 const int n_group_resolve = index_group_resolve_.
getSize();
868 for (
int i=0; i<n_group_resolve; i++) {
869 const int j =index_group_resolve_[i];
871 const auto& pj = ptcl[j+index_offset_group_];
872 auto& groupj = group_ptr[j];
873 if(_pi.id==pj.id)
continue;
874 if (pj.mass==0.0)
continue;
877 nbi.checkAndAddNeighborGroup(r2, groupj, j+index_offset_group_);
881 const int n_group_cm = index_group_cm_.
getSize();
882 for (
int i=0; i<n_group_cm; i++) {
883 const int j = index_group_cm_[i];
884 auto& groupj = group_ptr[j];
886 const auto& pj = ptcl[j+index_offset_group_];
887 if (pj.mass==0.0)
continue;
888 ASSERT(_pi.id!=pj.id);
889 ASSERT(j+index_offset_group_<pred_.
getSize());
892 nbi.checkAndAddNeighborGroup(r2, groupj, j+index_offset_group_);
894 ASSERT(nbi.n_neighbor_group + nbi.n_neighbor_single == nbi.neighbor_address.getSize());
899 auto* neighbor_ptr =
neighbors.getDataAddress();
900 auto* member_adr = _groupi.particles.getOriginAddressArray();
901 const int n_member = _groupi.particles.getSize();
905 for (
int j=0; j<n_member; j++) {
908 const int kj = _groupi.info.particle_index[j];
911 auto& pkj = *(Tparticle*)member_adr[j];
912 ASSERT((
void*)member_adr[j]==(
void*)&
particles[kj]);
913 auto& fkj = force_ptr[kj];
914 auto& nbkj = neighbor_ptr[kj];
915 calcOneSingleAccJerkNB(fkj, nbkj, pkj, _pi.id);
918 _fi.acc0[0] += pkj.mass * fkj.acc0[0];
919 _fi.acc0[1] += pkj.mass * fkj.acc0[1];
920 _fi.acc0[2] += pkj.mass * fkj.acc0[2];
922 _fi.acc1[0] += pkj.mass * fkj.acc1[0];
923 _fi.acc1[1] += pkj.mass * fkj.acc1[1];
924 _fi.acc1[2] += pkj.mass * fkj.acc1[2];
926 _fi.pot += pkj.mass * fkj.pot;
932 ASSERT(abs(mcm-_pi.mass)<1e-10);
934 _fi.acc0[0] /= _pi.mass;
935 _fi.acc0[1] /= _pi.mass;
936 _fi.acc0[2] /= _pi.mass;
938 _fi.acc1[0] /= _pi.mass;
939 _fi.acc1[1] /= _pi.mass;
940 _fi.acc1[2] /= _pi.mass;
953 inline void calcAccJerkNBList(
const int* _index_single,
955 const int* _index_group,
956 const int _n_group) {
962 auto* neighbor_ptr =
neighbors.getDataAddress();
963 for (
int k=0; k<_n_single; k++) {
964 const int i = _index_single[k];
965 auto& pi = pred_ptr[i];
966 auto& fi = force_ptr[i];
967 auto& nbi = neighbor_ptr[i];
968 calcOneSingleAccJerkNB(fi, nbi, pi, pi.id);
972 auto* group_ptr =
groups.getDataAddress();
974 for (
int k=0; k<_n_group; k++) {
975 const int i = _index_group[k];
976 auto& groupi = group_ptr[i];
978 auto& pi = pred_ptr[i+index_offset_group_];
979 auto& fi = force_ptr[i+index_offset_group_];
980 if (groupi.particles.cm.mass>0) {
981 if (groupi.perturber.need_resolve_flag) calcOneGroupMemberAccJerkNB(fi, groupi, pi);
982 else calcOneGroupCMAccJerkNB(fi, groupi, pi);
984 else calcOneSingleAccJerkNB(fi, groupi.perturber, pi, pi.id);
989 void reduceGroupCMStepByHalfAndSortDtIndex(
const int _index) {
991 int k = index_dt_sorted_group_[i];
993 auto& pcm =
groups[k].particles.cm;
996 time_next_[k+index_offset_group_] = pcm.time + pcm.dt;
998 int kp = index_dt_sorted_group_[i-1];
999 while (time_next_[k+index_offset_group_]<time_next_[kp+index_offset_group_]) {
1001 int tmp=index_dt_sorted_group_[i-1];
1002 index_dt_sorted_group_[i-1] = index_dt_sorted_group_[i];
1003 index_dt_sorted_group_[i] = tmp;
1008 else kp = index_dt_sorted_group_[i-1];
1011 time_next_min_ = std::min(time_next_min_, time_next_[k+index_offset_group_]);
1014 if (_index>=n_act_group_ && i<=n_act_group_ && n_act_group_<index_dt_sorted_group_.
getSize()) n_act_group_++;
1025 void updateTimeNextList(
const int* _index_single,
1026 const int _n_single,
1027 const int* _index_group,
1028 const int _n_group) {
1030 for (
int i=0; i<_n_single; i++){
1031 const int k = _index_single[i];
1032 ASSERT(k<time_next_.
getSize());
1037 for (
int i=0; i<_n_group; i++) {
1038 const int k = _index_group[i];
1039 const int kf = k + index_offset_group_;
1040 ASSERT(kf<time_next_.
getSize());
1041 auto& pcm =
groups[k].particles.cm;
1042 time_next_[kf] = pcm.time + pcm.dt;
1051 Float calcDrDv(
const H4Ptcl& _p1,
const H4Ptcl& _p2) {
1053 dx[0] = _p1.pos[0] - _p2.pos[0];
1054 dx[1] = _p1.pos[1] - _p2.pos[1];
1055 dx[2] = _p1.pos[2] - _p2.pos[2];
1057 dv[0] = _p1.vel[0] - _p2.vel[0];
1058 dv[1] = _p1.vel[1] - _p2.vel[1];
1059 dv[2] = _p1.vel[2] - _p2.vel[2];
1061 Float drdv= dx[0]*dv[0] + dx[1]*dv[1] + dx[2]*dv[2];
1067 class SortIndexDtSingle{
1071 SortIndexDtSingle(
Float* _time): time_(_time) {}
1072 bool operator() (
const int & left,
const int & right)
const {
1073 return time_[left] < time_[right];
1078 class SortIndexDtGroup{
1083 SortIndexDtGroup(
Float * _time,
int _offset): time_(_time), offset_(_offset) {}
1084 bool operator() (
const int & left,
const int & right)
const {
1085 return time_[left+offset_] < time_[right+offset_];
1093 int n_index = _index.
getSize();
1095 if (_table[_index[i]]) {
1096 if (i<_n_act) _n_act--;
1097 if (i<_n_init) _n_init--;
1102 if (i>=n_index)
break;
1103 _index[i] = _index[i+imove];
1106 ASSERT(_n_act<=n_index);
1108 ASSERT(_n_init<=n_index);
1116 int removeNBGroupAddressFromTable(
COMM::List<NBAdr<Tparticle>> & _adr) {
1118 int k_last = _adr.getSize()-1;
1121 if (_adr[k].index>=index_offset_group_) {
1122 const int kg = _adr[k].index-index_offset_group_;
1123 if (table_group_mask_[kg]) {
1124 _adr[k] = _adr[k_last];
1132 _adr.resizeNoInitialize(k_last+1);
1140 int removeNBSingleAddressFromTable(
COMM::List<NBAdr<Tparticle>> & _adr) {
1142 int k_last = _adr.getSize()-1;
1145 if (_adr[k].index<index_offset_group_) {
1146 if (table_single_mask_[_adr[k].index]) {
1147 _adr[k] = _adr[k_last];
1155 _adr.resizeNoInitialize(k_last+1);
1169 initial_system_flag_ =
true;
1173 const int n_particle =
particles.getSize();
1174 ASSERT(n_particle>1);
1182 neighbors.resizeNoInitialize(n_particle);
1186 for (
int i=0; i<n_particle; i++) {
1188 index_dt_sorted_single_[i] = i;
1189 table_single_mask_[i] =
false;
1194 n_init_single_ = index_dt_sorted_single_.
getSize();
1196 n_act_single_ = n_init_single_;
1209 void addGroups(
const int* _particle_index,
const int* _n_group_offset,
const int _n_group) {
1212 ASSERT(initial_system_flag_);
1214 if (_n_group==0)
return;
1216 modify_system_flag_ =
true;
1219 int group_index[_n_group];
1220 for (
int i=0; i<_n_group; i++) {
1223 if(index_group_mask_.
getSize()>0) {
1225 table_group_mask_[igroup] =
false;
1230 igroup =
groups.getSize();
1232 groups.increaseSizeNoInitialize(1);
1238 group_index[i] = igroup;
1242 for (
int i=0; i<_n_group; i++) {
1243 auto& group_new =
groups[group_index[i]];
1250 const int n_particle = _n_group_offset[i+1] - _n_group_offset[i];
1251 ASSERT(n_particle>0);
1252 ASSERT(n_particle<=
particles.getSize());
1254 group_new.particles.reserveMem(n_particle);
1255 group_new.reserveIntegratorMem();
1258 group_new.perturber.neighbor_address.reserveMem(nmax_tot);
1259 group_new.info.reserveMem(n_particle);
1262 for(
int j=_n_group_offset[i]; j<_n_group_offset[i+1]; j++) {
1263 const int p_index = _particle_index[j];
1265 group_new.particles.addMemberAndAddress(
particles[p_index]);
1266 group_new.info.particle_index.addMember(p_index);
1267 group_new.info.r_break_crit = std::max(group_new.info.r_break_crit,
particles[p_index].getRGroup());
1269 group_new.perturber.r_neighbor_crit_sq = std::max(group_new.perturber.r_neighbor_crit_sq, r_neighbor_crit*r_neighbor_crit);
1271 ASSERT(table_single_mask_[p_index]==
false);
1272 table_single_mask_[p_index] =
true;
1276 group_new.particles.calcCenterOfMass();
1278 group_new.particles.cm.id = - (group_index[i]+1);
1280 group_new.particles.shiftToCenterOfMassFrame();
1283 group_new.info.generateBinaryTree(group_new.particles,
ar_manager->
interaction.gravitational_constant);
1285 #ifdef ADJUST_GROUP_DEBUG
1286 std::cerr<<
"Add new group, index: "<<group_index[i]<<
" Member_index: ";
1287 for (
int k=0; k<group_new.particles.getSize(); k++)
1288 std::cerr<<group_new.info.particle_index[k]<<
" ";
1289 std::cerr<<
"r_break_crit: "<<group_new.info.r_break_crit;
1290 std::cerr<<std::endl;
1291 COMM::Binary& bin = group_new.info.getBinaryTreeRoot();
1293 std::cerr<<std::endl;
1294 bin.printColumn(std::cerr);
1295 std::cerr<<std::endl;
1299 group_new.perturber.need_resolve_flag =
true;
1303 const int n_group_old = index_dt_sorted_group_.
getSize();
1305 for (
int i=n_group_old-1; i>=0; i--) {
1306 index_dt_sorted_group_[i+_n_group] = index_dt_sorted_group_[i];
1308 for (
int i=0; i<_n_group; i++) {
1309 index_dt_sorted_group_[i] = group_index[i];
1311 n_init_group_ += _n_group;
1312 n_act_group_ += _n_group;
1315 removeDtIndexFromTable(index_dt_sorted_single_, n_act_single_, n_init_single_, table_single_mask_);
1317 int n_group = index_dt_sorted_group_.
getSize();
1318 for (
int i=0; i<n_group; i++) {
1319 const int k = index_dt_sorted_group_[i];
1320 auto& nbk =
groups[k].perturber;
1322 int n_rm_single= removeNBSingleAddressFromTable(nbk.neighbor_address);
1323 nbk.n_neighbor_single -= n_rm_single;
1324 ASSERT(nbk.n_neighbor_single>=0);
1326 for (
int j=0; j<_n_group; j++) {
1327 const int jk = group_index[j];
1328 if (k==jk)
continue;
1330 nbk.n_neighbor_group++;
1342 ASSERT(!_fin.eof());
1343 int n_group_offset[n_group+1];
1346 for (
int i=0; i<=n_group; i++) {
1347 _fin>>n_group_offset[i];
1348 ASSERT(!_fin.eof());
1350 int n_members = n_group_offset[n_group];
1351 ASSERT(n_members>1);
1352 int particle_index[n_members];
1353 for (
int i=0; i<n_members; i++) {
1354 _fin>>particle_index[i];
1355 ASSERT(!_fin.eof());
1358 addGroups(particle_index, n_group_offset, n_group);
1374 int* _new_n_group_offset,
1376 const int* _break_group_index_with_offset,
1377 const int _n_break_no_add,
1378 const int _n_break) {
1381 ASSERT(initial_system_flag_);
1382 ASSERT(_n_break<=index_dt_sorted_group_.
getSize());
1384 if (_n_break==0 && _n_break_no_add==0)
return;
1386 modify_system_flag_=
true;
1387 int new_index_single[
particles.getSize()];
1390 for (
int k=0; k<_n_break; k++) {
1391 const int i = _break_group_index_with_offset[k] - index_offset_group_;
1392 ASSERT(i>=0&&i<
groups.getSize());
1397 auto& groupi =
groups[i];
1398 const int n_member =groupi.particles.getSize();
1399 int particle_index_origin[n_member];
1400 int ibreak = groupi.info.getTwoBranchParticleIndexOriginFromBinaryTree(particle_index_origin, groupi.particles.getDataAddress());
1402 #ifdef ADJUST_GROUP_DEBUG
1403 auto& sd_root = groupi.info.getBinaryTreeRoot().slowdown;
1404 std::cerr<<
"Break Group: "
1406 <<
" k: "<<std::setw(2)<<i
1407 <<
" N_member: "<<std::setw(4)<<groupi.particles.getSize()
1408 <<
" step: "<<std::setw(12)<<groupi.profile.step_count_sum
1409 <<
" step(tsyn): "<<std::setw(10)<<groupi.profile.step_count_tsyn_sum
1412 <<
" Pert_In: "<<std::setw(20)<<sd_root.getPertIn()
1413 <<
" Pert_Out: "<<std::setw(20)<<sd_root.getPertOut()
1414 <<
" SD: "<<std::setw(20)<<sd_root.getSlowDownFactor()
1415 <<
" SD(org): "<<std::setw(20)<<sd_root.getSlowDownFactorOrigin();
1416 auto& bin = groupi.info.getBinaryTreeRoot();
1417 std::cerr<<
" semi: "<<std::setw(20)<<bin.semi
1418 <<
" ecc: "<<std::setw(20)<<bin.ecc
1419 <<
" NB: "<<std::setw(4)<<groupi.perturber.neighbor_address.getSize()
1423 #ifdef ADJUST_GROUP_PRINT
1424 if (
manager->adjust_group_write_flag) {
1430 groupi.particles.shiftToOriginFrame();
1431 groupi.particles.template writeBackMemberAll<Tparticle>();
1433 table_group_mask_[i] =
true;
1439 ASSERT(particle_index_origin[0]<
particles.getSize());
1440 ASSERT(table_single_mask_[particle_index_origin[0]]==
true);
1441 table_single_mask_[particle_index_origin[0]] =
false;
1442 if (
particles[particle_index_origin[0]].mass > 0.0)
1443 new_index_single[n_single_new++] = particle_index_origin[0];
1448 int index_has_mass_last = -1;
1449 for (
int j=0; j<ibreak; j++) {
1450 if (
particles[particle_index_origin[j]].mass > 0.0) {
1452 index_has_mass_last = particle_index_origin[j];
1455 if (count_mass>=2) {
1456 int offset = _new_n_group_offset[_new_n_group];
1457 for (
int j=0; j<ibreak; j++) {
1458 ASSERT(table_single_mask_[particle_index_origin[j]]==
true);
1459 table_single_mask_[particle_index_origin[j]] =
false;
1460 _new_group_particle_index_origin[offset++] = particle_index_origin[j];
1462 _new_n_group_offset[++_new_n_group] = offset;
1464 else if (count_mass==1) {
1465 ASSERT(index_has_mass_last<
particles.getSize());
1466 ASSERT(index_has_mass_last>=0);
1467 ASSERT(table_single_mask_[index_has_mass_last]==
true);
1468 table_single_mask_[index_has_mass_last] =
false;
1469 new_index_single[n_single_new++] = index_has_mass_last;
1473 if (n_member-ibreak==1) {
1474 ASSERT(particle_index_origin[ibreak]<
particles.getSize());
1475 ASSERT(table_single_mask_[particle_index_origin[ibreak]]==
true);
1476 table_single_mask_[particle_index_origin[ibreak]] =
false;
1477 if (
particles[particle_index_origin[ibreak]].mass >0.0)
1478 new_index_single[n_single_new++] = particle_index_origin[ibreak];
1483 int index_has_mass_last = -1;
1484 for (
int j=ibreak; j<n_member; j++) {
1485 if (
particles[particle_index_origin[j]].mass > 0.0) {
1487 index_has_mass_last = particle_index_origin[j];
1490 if (count_mass>=2) {
1491 int offset = _new_n_group_offset[_new_n_group];
1492 for (
int j=ibreak; j<n_member; j++) {
1493 ASSERT(table_single_mask_[particle_index_origin[j]]==
true);
1494 table_single_mask_[particle_index_origin[j]] =
false;
1495 _new_group_particle_index_origin[offset++] = particle_index_origin[j];
1497 _new_n_group_offset[++_new_n_group] = offset;
1499 else if (count_mass==1) {
1500 ASSERT(index_has_mass_last<
particles.getSize());
1501 ASSERT(index_has_mass_last>=0);
1502 ASSERT(table_single_mask_[index_has_mass_last]==
true);
1503 table_single_mask_[index_has_mass_last] =
false;
1504 new_index_single[n_single_new++] = index_has_mass_last;
1510 const int n_single_old = index_dt_sorted_single_.
getSize();
1512 for (
int i=n_single_old-1; i>=0; i--) {
1513 index_dt_sorted_single_[i+n_single_new] = index_dt_sorted_single_[i];
1515 for (
int i=0; i<n_single_new; i++) {
1516 index_dt_sorted_single_[i] = new_index_single[i];
1518 n_init_single_ += n_single_new;
1519 n_act_single_ += n_single_new;
1522 for (
int k=_n_break; k<_n_break+_n_break_no_add; k++) {
1523 const int i = _break_group_index_with_offset[k] - index_offset_group_;
1528 auto& groupi =
groups[i];
1529 groupi.particles.shiftToOriginFrame();
1530 groupi.particles.template writeBackMemberAll<Tparticle>();
1531 const int n_member = groupi.particles.getSize();
1532 ASSERT(n_member==groupi.info.particle_index.getSize());
1533 for (
int j=0; j<n_member; j++) {
1534 const int kj = groupi.info.particle_index[j];
1536 table_single_mask_[kj] =
false;
1539 table_group_mask_[i] =
true;
1544 removeDtIndexFromTable(index_dt_sorted_group_, n_act_group_, n_init_group_, table_group_mask_);
1546 int n_group = index_dt_sorted_group_.
getSize();
1547 for (
int i=0; i<n_group; i++) {
1548 const int k = index_dt_sorted_group_[i];
1549 auto& nbk =
groups[k].perturber;
1551 int n_rm_group = removeNBGroupAddressFromTable(nbk.neighbor_address);
1552 nbk.n_neighbor_group -= n_rm_group;
1553 ASSERT(nbk.n_neighbor_group>=0);
1555 for (
int j=0; j<n_single_new; j++) {
1556 const int jk = new_index_single[j];
1558 nbk.n_neighbor_single++;
1575 void checkBreak(
int* _break_group_index_with_offset,
int& _n_break,
const bool _start_flag) {
1576 const int n_group_tot = index_dt_sorted_group_.
getSize();
1577 if (n_group_tot==0)
return;
1579 bool merge_mask[n_group_tot];
1580 for (
int k=0; k<n_group_tot; k++) merge_mask[k] =
false;
1583 const int n_merger = index_group_merger_.
getSize();
1584 for (
int i=0; i<n_merger; i++) {
1585 const int k = index_group_merger_[i];
1586 ASSERT(k<
groups.getSize());
1587 ASSERT(table_group_mask_[k]==
false);
1588 auto& groupk =
groups[k];
1589 #ifdef ADJUST_GROUP_DEBUG
1590 auto& bin_root = groupk.info.getBinaryTreeRoot();
1591 std::cerr<<
"Break merger group: i_group: "<<k
1592 <<
" N_member: "<<groupk.particles.getSize()
1593 <<
" ecca: "<<bin_root.ecca
1594 <<
" separation : "<<bin_root.r
1595 <<
" apo: "<<bin_root.semi*(1.0+bin_root.ecc)
1596 <<
" dt: "<<groupk.particles.cm.dt
1597 <<
" r_crit: "<<groupk.info.r_break_crit
1602 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1603 merge_mask[k] =
true;
1609 const Float kappa_org_crit = 1e-2;
1610 for (
int i=0; i<n_group_tot; i++) {
1611 const int k = index_dt_sorted_group_[i];
1612 ASSERT(table_group_mask_[k]==
false);
1613 if (merge_mask[k])
continue;
1614 auto& groupk =
groups[k];
1616 const int n_member = groupk.particles.getSize();
1621 auto& bin_root = groupk.info.getBinaryTreeRoot();
1622 bool outgoing_flag =
false;
1626 if (bin_root.semi>0.0 && bin_root.ecca>0.0) {
1627 outgoing_flag =
true;
1629 if (bin_root.r > groupk.info.r_break_crit) {
1630 #ifdef ADJUST_GROUP_DEBUG
1631 std::cerr<<
"Break group: binary escape, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member<<
" ecca: "<<bin_root.ecca<<
" separation : "<<bin_root.r<<
" r_crit: "<<groupk.info.r_break_crit<<std::endl;
1633 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1638 Float apo = bin_root.semi * (1.0 + bin_root.ecc);
1639 if (apo>groupk.info.r_break_crit) {
1642 groupk.info.getDrDv(dr2, drdv, *bin_root.getLeftMember(), *bin_root.getRightMember());
1648 Float dr = drdv/bin_root.r*
groups[k].particles.cm.dt;
1649 Float rp = dr + bin_root.r;
1650 if (rp >groupk.info.r_break_crit) {
1652 Float rph = 0.5*dr + bin_root.r;
1653 if ( rph < groupk.info.r_break_crit && bin_root.r <0.2*groupk.info.r_break_crit) {
1655 #ifdef ADJUST_GROUP_DEBUG
1656 std::cerr<<
"Binary will escape but dr is too small, reduce cm step by half, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member<<
" ecca: "<<bin_root.ecca<<
" separation : "<<bin_root.r<<
" apo: "<<apo<<
" r_pred: "<<rp<<
" drdv: "<<drdv<<
" dt: "<<
groups[k].particles.cm.dt<<
" r_crit: "<<groupk.info.r_break_crit<<std::endl;
1658 reduceGroupCMStepByHalfAndSortDtIndex(i);
1662 #ifdef ADJUST_GROUP_DEBUG
1663 std::cerr<<
"Break group: binary will escape, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member<<
" ecca: "<<bin_root.ecca<<
" separation : "<<bin_root.r<<
" apo: "<<apo<<
" r_pred: "<<rp<<
" drdv: "<<drdv<<
" dt: "<<
groups[k].particles.cm.dt<<
" r_crit: "<<groupk.info.r_break_crit<<std::endl;
1665 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1674 if (bin_root.semi<0.0) {
1677 groupk.info.getDrDv(dr2, drdv, *bin_root.getLeftMember(), *bin_root.getRightMember());
1679 outgoing_flag =
true;
1681 if (bin_root.r > groupk.info.r_break_crit) {
1682 #ifdef ADJUST_GROUP_DEBUG
1683 std::cerr<<
"Break group: hyperbolic escape, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member<<
" drdv: "<<drdv<<
" separation : "<<bin_root.r<<
" r_crit: "<<groupk.info.r_break_crit<<std::endl;
1685 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1689 Float dr = drdv/bin_root.r*
groups[k].particles.cm.dt;
1690 Float rp = dr + bin_root.r;
1691 if (rp > groupk.info.r_break_crit) {
1693 Float rph = 0.5*dr + bin_root.r;
1694 if ( rph < groupk.info.r_break_crit && bin_root.r <0.2*groupk.info.r_break_crit) {
1696 #ifdef ADJUST_GROUP_DEBUG
1697 std::cerr<<
"Hyperbolic will escape but dr is too small, reduce cm step by half first, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member<<
" drdv: "<<drdv<<
" separation : "<<bin_root.r<<
" r_pred: "<<rp<<
" drdv: "<<drdv<<
" dt: "<<
groups[k].particles.cm.dt<<
" r_crit: "<<groupk.info.r_break_crit<<std::endl;
1699 reduceGroupCMStepByHalfAndSortDtIndex(i);
1703 #ifdef ADJUST_GROUP_DEBUG
1704 std::cerr<<
"Break group: hyperbolic will escape, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member<<
" drdv: "<<drdv<<
" separation : "<<bin_root.r<<
" r_pred: "<<rp<<
" drdv: "<<drdv<<
" dt: "<<
groups[k].particles.cm.dt<<
" r_crit: "<<groupk.info.r_break_crit<<std::endl;
1706 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1716 if (outgoing_flag) {
1719 auto& sd_group = groupk.info.getBinaryTreeRoot().slowdown;
1722 sd.
period = sd_group.period;
1729 Float* acc_cm = groupk.particles.cm.acc0;
1730 Float fcm[3] = {acc_cm[0]*bin_root.mass, acc_cm[1]*bin_root.mass, acc_cm[2]*bin_root.mass };
1735 if (kappa_org<kappa_org_crit && !_start_flag) {
1737 Float apo = bin_root.semi * (1.0 + bin_root.ecc);
1738 if (apo>groupk.info.r_break_crit||bin_root.semi<0.0) {
1739 #ifdef ADJUST_GROUP_DEBUG
1740 std::cerr<<
"Break group: strong perturbed, time: "<<time_<<
" i_group: "<<k<<
" N_member: "<<n_member;
1741 std::cerr<<
" index: ";
1742 for (
int i=0; i<n_member; i++)
1743 std::cerr<<groupk.info.particle_index[i]<<
" ";
1744 auto& sd_root = groupk.info.getBinaryTreeRoot().slowdown;
1745 std::cerr<<
" pert_in: "<<sd_root.pert_in
1746 <<
" pert_out: "<<sd_root.pert_out
1747 <<
" kappa_org: "<<kappa_org
1748 <<
" dr: "<<bin_root.r
1749 <<
" semi: "<<bin_root.semi
1750 <<
" ecc: "<<bin_root.ecc
1751 <<
" r_break: "<<groupk.info.r_break_crit
1754 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1759 #if (!defined AR_SLOWDOWN_ARRAY) && (!defined AR_SLOWDOWN_TREE)
1762 for (
int j=0; j<2; j++) {
1763 if (bin_root.isMemberTree(j)) {
1764 auto* bin_sub = bin_root.getMemberAsTree(j);
1776 if (bin_sub->semi>0.0) {
1778 Float apo_in = bin_sub->semi*(1+bin_sub->ecc);
1790 if (bin_root.semi>0.0) {
1791 Float apo_out = bin_root.semi*(1+bin_root.ecc);
1799 if (kappa_in_max>5.0) {
1801 if (abs(groupk.getEnergyError()/groupk.getEtotRef())<100.0/(1-std::min(bin_sub->ecc,bin_root.ecc))*groupk.manager->energy_error_relative_max) {
1802 #ifdef ADJUST_GROUP_DEBUG
1803 std::cerr<<
"Break group: inner kappa large, time: "<<time_<<
" i_group: "<<k<<
" i_member: "<<j<<
" kappa_in:"<<kappa_in<<
" kappa_in(max):"<<kappa_in_max
1804 <<
" Energy error:"<<groupk.getEnergyError()
1805 <<
" Etot ref:"<<groupk.getEtotRef()
1806 <<
" ecc(in):"<<bin_sub->ecc
1807 <<
" ecc(out):"<<bin_root.ecc
1810 _break_group_index_with_offset[_n_break++] = k + index_offset_group_;
1836 int* _new_n_group_offset,
1838 int* _break_group_index_with_offset,
1839 int& _n_break_no_add,
1841 const bool _start_flag) {
1843 const Float kappa_org_crit = 1e-2;
1845 const int n_particle =
particles.getSize();
1846 const int n_group =
groups.getSize();
1847 ASSERT(index_offset_group_==n_particle);
1849 int new_n_particle = 0;
1851 int used_mask[n_particle+n_group];
1853 for (
int k=0; k<n_particle; k++) used_mask[k] = table_single_mask_[k]?-2:-1;
1854 for (
int k=0; k<n_group; k++) used_mask[k+index_offset_group_] = table_group_mask_[k]?-2:-1;
1856 for (
int k=0; k<_n_break; k++) used_mask[_break_group_index_with_offset[k]] = -2;
1868 for (
int k=0; k<n_act_single_; k++) {
1869 const int i = index_dt_sorted_single_[k];
1871 ASSERT(j<n_particle+n_group);
1880 if (pi.mass==0.0)
continue;
1883 Float r_crit = pi.getRGroup();
1884 if (j<index_offset_group_)
1885 r_crit = std::max(
particles[j].getRGroup(), r_crit);
1887 r_crit = std::max(
groups[j-index_offset_group_].
info.r_break_crit, r_crit);
1888 Float r_crit_sq = r_crit*r_crit;
1890 if (dr2 < r_crit_sq) {
1893 if(used_mask[j]==-2)
continue;
1896 if(used_mask[i]>=0 && used_mask[j]>=0)
continue;
1900 if (j<index_offset_group_) {
1904 const int jg = j-index_offset_group_;
1910 pj = &
groups[jg].particles.cm;
1915 if(pj->mass==0.0)
continue;
1926 Float drdv = calcDrDv(pi, *pj);
1928 if(drdv<0.0||_start_flag) {
1930 Float fcm[3] = {pi.mass*pi.acc0[0] + pj->mass*pj->
acc0[0],
1931 pi.mass*pi.acc0[1] + pj->mass*pj->
acc0[1],
1932 pi.mass*pi.acc0[2] + pj->mass*pj->
acc0[2]};
1935 Float mcm = pi.mass + pj->mass;
1936 #ifdef AR_SLOWDOWN_MASSRATIO
1950 if(kappa_org<kappa_org_crit)
continue;
1952 #ifdef ADJUST_GROUP_DEBUG
1953 if (j<index_offset_group_) {
1954 std::cerr<<
"Find new group: time: "<<time_
1955 <<
" index: "<<i<<
" "<<j
1956 <<
" dr: "<<sqrt(dr2)
1957 <<
" kappa_org: "<<kappa_org<<
"\n";
1960 auto& bin_root =
groups[j-index_offset_group_].info.getBinaryTreeRoot();
1961 std::cerr<<
"Find new group: time: "<<time_
1962 <<
" dr: "<<sqrt(dr2)
1963 <<
" kappa_org: "<<kappa_org<<
"\n"
1964 <<
" index slowdown apo \n"
1971 <<std::setw(16)<<bin_root.slowdown.getSlowDownFactorOrigin()
1972 <<std::setw(16)<<bin_root.semi*(1.0+bin_root.ecc);
1973 std::cerr<<std::endl;
1976 insertParticleIndexToGroup(i, j, used_mask, _new_group_particle_index_origin, _new_n_group_offset, new_n_particle, _new_n_group, _break_group_index_with_offset, _n_break_no_add, _n_break);
1982 for (
int k=0; k<n_act_group_; k++) {
1983 const int ig = index_dt_sorted_group_[k];
1984 const int i = ig + index_offset_group_;
1985 auto& groupi =
groups[ig];
1986 if (used_mask[i]==-2)
continue;
1988 const int j = groupi.perturber.r_min_index;
1989 ASSERT(j<n_particle+n_group);
1992 const Float dr2 = groupi.perturber.r_min_sq;
1995 Float r_crit = groupi.info.r_break_crit;
1996 if (j<index_offset_group_)
1997 r_crit = std::max(
particles[j].getRGroup(), r_crit);
1999 r_crit = std::max(
groups[j-index_offset_group_].
info.r_break_crit, r_crit);
2000 Float r_crit_sq = r_crit*r_crit;
2003 if (dr2 < r_crit_sq) {
2006 if(used_mask[j]==-2)
continue;
2009 if(used_mask[i]>=0 && used_mask[j]>=0)
continue;
2011 auto& pi = groupi.particles.cm;
2021 if (j<index_offset_group_) {
2026 const int jg = j-index_offset_group_;
2033 pj = &
groups[jg].particles.cm;
2039 Float drdv = calcDrDv(pi, *pj);
2040 if(drdv<0.0||_start_flag) {
2043 Float fcm[3] = {pi.mass*pi.acc0[0] + pj->mass*pj->
acc0[0],
2044 pi.mass*pi.acc0[1] + pj->mass*pj->
acc0[1],
2045 pi.mass*pi.acc0[2] + pj->mass*pj->
acc0[2]};
2048 Float mcm = pi.mass + pj->mass;
2049 #ifdef AR_SLOWDOWN_MASSRATIO
2064 if(kappa_org<kappa_org_crit)
continue;
2066 #ifdef ADJUST_GROUP_DEBUG
2067 auto& bini = groupi.info.getBinaryTreeRoot();
2068 std::cerr<<
"Find new group: time: "<<time_
2069 <<
" dr: "<<sqrt(dr2)
2070 <<
" kappa_org: "<<kappa_org
2071 <<
"\n index slowdown apo \n"
2074 <<std::setw(16)<<bini.slowdown.getSlowDownFactorOrigin()
2075 <<std::setw(16)<<bini.semi*(1.0+bini.ecc);
2076 if(j<index_offset_group_) {
2083 auto& binj =
groups[j-index_offset_group_].info.getBinaryTreeRoot();
2084 Float kappaj = binj.slowdown.getSlowDownFactorOrigin();
2087 <<std::setw(16)<<kappaj
2088 <<std::setw(16)<<binj.semi*(1.0+binj.ecc);
2090 std::cerr<<std::endl;
2092 insertParticleIndexToGroup(i, j, used_mask, _new_group_particle_index_origin, _new_n_group_offset, new_n_particle, _new_n_group, _break_group_index_with_offset, _n_break_no_add, _n_break);
2097 _new_n_group_offset[_new_n_group] = new_n_particle;
2098 ASSERT(new_n_particle<=
particles.getSize());
2109 ASSERT(initial_system_flag_);
2111 modify_system_flag_=
true;
2115 int break_group_index_with_offset[
groups.getSize()+1];
2117 int n_break_no_add = 0;
2119 int new_group_particle_index[
particles.getSize()];
2120 int new_n_group_offset[
groups.getSizeMax()+1];
2121 int new_n_group = 0;
2123 checkBreak(break_group_index_with_offset, n_break, _start_flag);
2124 checkNewGroup(new_group_particle_index, new_n_group_offset, new_n_group, break_group_index_with_offset, n_break_no_add, n_break, _start_flag);
2125 ASSERT(n_break<=
groups.getSize());
2126 ASSERT(new_n_group<=
groups.getSizeMax());
2135 breakGroups(new_group_particle_index, new_n_group_offset, new_n_group, break_group_index_with_offset, n_break_no_add, n_break);
2136 addGroups(new_group_particle_index, new_n_group_offset, new_n_group);
2151 ASSERT(initial_system_flag_);
2153 modify_system_flag_=
false;
2157 #ifdef HERMITE_DEBUG
2158 int particle_index_count[
particles.getSize()];
2159 int group_index_count[
groups.getSize()];
2160 for (
int i=0; i<
particles.getSize(); i++) particle_index_count[i]=0;
2161 for (
int i=0; i<
groups.getSize(); i++) group_index_count[i]=0;
2163 for (
int i=0; i<index_dt_sorted_single_.
getSize(); i++) {
2164 ASSERT(index_dt_sorted_single_[i]<
particles.getSize());
2165 particle_index_count[index_dt_sorted_single_[i]]++;
2167 for (
int i=0; i<index_dt_sorted_group_.
getSize(); i++) {
2168 ASSERT(index_dt_sorted_group_[i]<
groups.getSize());
2169 group_index_count[index_dt_sorted_group_[i]]++;
2172 for (
int i=0; i<table_single_mask_.
getSize(); i++) {
2173 if (table_single_mask_[i]) particle_index_count[i]++;
2176 for (
int i=0; i<table_group_mask_.
getSize(); i++) {
2177 if (table_group_mask_[i]) group_index_count[i]++;
2179 for (
int i=0; i<
particles.getSize(); i++)
2180 ASSERT(particle_index_count[i]==1||(particle_index_count[i]==0&&
particles[i].mass==0.0));
2181 for (
int i=0; i<
groups.getSize(); i++)
2182 ASSERT(group_index_count[i]==1);
2184 ASSERT(n_init_group_<=index_dt_sorted_group_.
getSize());
2185 ASSERT(n_act_group_<=index_dt_sorted_group_.
getSize());
2186 ASSERT(n_init_single_<=index_dt_sorted_single_.
getSize());
2187 ASSERT(n_act_single_<=index_dt_sorted_single_.
getSize());
2190 if (n_init_single_==0&&n_init_group_==0)
return;
2195 auto* ptcl =
particles.getDataAddress();
2196 for(
int i=0; i<n_init_single_; i++){
2197 int k = index_single[i];
2198 ptcl[k].acc0[0] = ptcl[k].acc0[1] = ptcl[k].acc0[2] = 0.0;
2199 ptcl[k].acc1[0] = ptcl[k].acc1[1] = ptcl[k].acc1[2] = 0.0;
2200 ptcl[k].time = time_;
2205 Float r_neighbor_crit = ptcl[k].getRNeighbor();
2206 neighbors[k].r_neighbor_crit_sq = r_neighbor_crit*r_neighbor_crit;
2215 auto* group_ptr =
groups.getDataAddress();
2216 for(
int i=0; i<n_init_group_; i++){
2217 int k = index_group[i];
2218 ASSERT(table_group_mask_[k]==
false);
2220 auto& pcm = group_ptr[k].particles.cm;
2221 pcm.acc0[0] = pcm.acc0[1] = pcm.acc0[2] = 0.0;
2222 pcm.acc1[0] = pcm.acc1[1] = pcm.acc1[2] = 0.0;
2225 ASSERT(k+index_offset_group_<pred_.
getSize());
2226 pred_[k+index_offset_group_] = pcm;
2233 writeBackResolvedGroupAndCreateJParticleList(
false);
2235 calcAccJerkNBList(index_single, n_init_single_, index_group, n_init_group_);
2238 for(
int i=0; i<n_init_single_; i++){
2239 const int k = index_single[i];
2240 ptcl[k].acc0[0] = force_[k].acc0[0];
2241 ptcl[k].acc0[1] = force_[k].acc0[1];
2242 ptcl[k].acc0[2] = force_[k].acc0[2];
2243 ptcl[k].acc1[0] = force_[k].acc1[0];
2244 ptcl[k].acc1[1] = force_[k].acc1[1];
2245 ptcl[k].acc1[2] = force_[k].acc1[2];
2248 for(
int i=0; i<n_init_group_; i++){
2249 const int k = index_group[i];
2250 auto& pcm = group_ptr[k].particles.cm;
2251 auto& fcm = force_[k+index_offset_group_];
2253 pcm.acc0[0] = fcm.acc0[0];
2254 pcm.acc0[1] = fcm.acc0[1];
2255 pcm.acc0[2] = fcm.acc0[2];
2256 pcm.acc1[0] = fcm.acc1[0];
2257 pcm.acc1[1] = fcm.acc1[1];
2258 pcm.acc1[2] = fcm.acc1[2];
2261 group_ptr[k].initialIntegration(time_);
2271 group_ptr[k].info.time_offset = time_offset_;
2275 auto& bin_root = group_ptr[k].info.getBinaryTreeRoot();
2276 if (bin_root.semi*(1.0+bin_root.ecc)>group_ptr[k].info.r_break_crit||bin_root.semi<0) {
2280 Float ecc_anomaly = bin_root.calcEccAnomaly(group_ptr[k].
info.r_break_crit);
2281 Float mean_anomaly = bin_root.calcMeanAnomaly(ecc_anomaly, bin_root.ecc);
2282 Float kappa = bin_root.slowdown.getSlowDownFactor();
2285 Float t_peri = 0.5*abs(mean_anomaly/12.5663706144*bin_root.period)*kappa;
2286 Float dt_limit = group_ptr[k].info.dt_limit;
2287 while(dt_limit > t_peri) dt_limit *= 0.5;
2288 group_ptr[k].info.dt_limit = dt_limit;
2292 ASSERT(group_ptr[k].
info.checkParams());
2293 ASSERT(group_ptr[k].
perturber.checkParams());
2295 #ifdef ADJUST_GROUP_PRINT
2296 if (
manager->adjust_group_write_flag) {
2303 calcDt2ndList(index_single, n_init_single_, index_group, n_init_group_, dt_limit_);
2305 updateTimeNextList(index_single, n_init_single_, index_group, n_init_group_);
2308 n_init_single_ = n_init_group_ = 0;
2318 ASSERT(initial_system_flag_);
2319 ASSERT(!modify_system_flag_);
2320 ASSERT(n_init_group_==0&&n_init_single_==0);
2326 interrupt_group_dt_sorted_group_index_ = -1;
2327 interrupt_binary_.
clear();
2330 #ifdef HERMITE_DEBUG
2335 const int n_group_tot = index_dt_sorted_group_.
getSize();
2336 const int i_start = interrupt_group_dt_sorted_group_index_>=0 ? interrupt_group_dt_sorted_group_index_ : 0;
2337 int interrupt_index_dt_group_list[n_group_tot];
2338 int n_interrupt_change_dt=0;
2340 for (
int i=i_start; i<n_group_tot; i++) {
2341 const int k = index_dt_sorted_group_[i];
2343 #ifdef HERMITE_DEBUG
2344 ASSERT(table_group_mask_[k]==
false);
2345 if (i!=interrupt_group_dt_sorted_group_index_)
2352 interrupt_binary_ =
groups[k].integrateToTime(time_next);
2361 auto& pcm =
groups[k].particles.cm;
2368 groups[k].perturber.initial_step_flag=
true;
2370 if (pcm.time + pcm.dt >time_next) {
2371 ASSERT(i>=n_act_group_);
2373 pcm.dt = time_next - pcm.time;
2374 time_next_[k+index_offset_group_] = time_next;
2376 interrupt_index_dt_group_list[n_interrupt_change_dt++] = i;
2383 auto& bink =
groups[k].info.getBinaryTreeRoot();
2384 Float dm = bink.mass - pcm.mass;
2385 Float de_pot = force_[k+index_offset_group_].pot*dm;
2386 energy_.
de_cum += de_pot;
2388 energy_sd_.
de_cum += de_pot;
2394 auto& vcm = pcm.vel;
2395 auto& vbin = bink.vel;
2396 auto& vbin_bk =
groups[k].info.vcm_record;
2398 Float de_kin = 0.5*dm*(vcm[0]*vcm[0]+vcm[1]*vcm[1]+vcm[2]*vcm[2]);
2401 Float dvbin[3] = {vbin[0] - vbin_bk[0], vbin[1] - vbin_bk[1], vbin[2] - vbin_bk[2]};
2402 de_kin += bink.mass*(dvbin[0]*vcm[0]+dvbin[1]*vcm[1]+dvbin[2]*vcm[2]);
2403 energy_.
de_cum += de_kin;
2405 energy_sd_.
de_cum += de_kin;
2409 vbin_bk[0] = vbin[0];
2410 vbin_bk[1] = vbin[1];
2411 vbin_bk[2] = vbin[2];
2419 interrupt_group_dt_sorted_group_index_ = i;
2421 return interrupt_binary_;
2429 if (n_interrupt_change_dt>0) {
2431 int ishift_start = interrupt_index_dt_group_list[n_interrupt_change_dt-1];
2433 int interrupt_index_group_list[n_interrupt_change_dt];
2434 interrupt_index_group_list[0] = index_dt_sorted_group_[ishift_start];
2438 for (
int i=n_interrupt_change_dt-2; i>=0; i--) {
2440 int k = interrupt_index_dt_group_list[i];
2442 interrupt_index_group_list[shift_offset] = index_dt_sorted_group_[k];
2444 for (
int j=ishift_start; j>k+shift_offset; j--) {
2445 index_dt_sorted_group_[j] = index_dt_sorted_group_[j-shift_offset];
2448 ishift_start = k+shift_offset;
2452 ASSERT(shift_offset==n_interrupt_change_dt);
2454 for (
int j=ishift_start; j>=shift_offset; j--) {
2455 index_dt_sorted_group_[j] = index_dt_sorted_group_[j-shift_offset];
2459 for (
int i=0; i<n_interrupt_change_dt; i++) {
2460 index_dt_sorted_group_[i] = interrupt_index_group_list[i];
2462 n_act_group_ += n_interrupt_change_dt;
2463 n_act_group_ = std::min(n_act_group_, n_group_tot);
2465 #ifdef HERMITE_DEBUG
2467 int table_check_mask[n_group_tot];
2468 for (
int i=0; i<n_group_tot; i++) table_check_mask[i]=0;
2469 for (
int i=0; i<index_dt_sorted_group_.
getSize(); i++) table_check_mask[index_dt_sorted_group_[i]]++;
2470 for (
int i=0; i<n_group_tot; i++) {
2471 ASSERT((table_check_mask[i]==1&&!table_group_mask_[i])||(table_check_mask[i]==0&&table_group_mask_[i]));
2473 for (
int i=0; i<n_act_group_; i++) {
2474 ASSERT(time_next_[index_dt_sorted_group_[i]+index_offset_group_] == time_next);
2476 if (n_act_group_<n_group_tot) {
2477 ASSERT(time_next_[index_dt_sorted_group_[n_act_group_]+index_offset_group_] > time_next);
2486 return interrupt_binary_;
2491 int mod_index[n_act_single_];
2493 for (
int i=0; i<n_act_single_; i++) {
2494 int k = index_dt_sorted_single_[i];
2497 auto& rbk = pred_[k].pos;
2498 auto& vbk = pred_[k].vel;
2499 auto& mbk = pred_[k].mass;
2506 if (modified_flag) {
2508 #ifdef HERMITE_DEBUG_PRINT
2509 std::cerr<<
"Modify one particle: modify_flag: "<<modified_flag
2510 <<
" time: "<<pk.time
2513 <<
" mass_new: "<<pk.mass
2514 <<
" vel_bk: "<<vbk[0]<<
" "<<vbk[1]<<
" "<<vbk[2]
2515 <<
" vel_new: "<<v[0]<<
" "<<v[1]<<
" "<<v[2]
2519 Float de_pot = force_[k].pot*(pk.mass-mbk);
2520 energy_.
de_cum += de_pot;
2522 energy_sd_.
de_cum += de_pot;
2527 Float de_kin = -0.5*mbk*(vbk[0]*vbk[0]+vbk[1]*vbk[1]+vbk[2]*vbk[2]);
2529 de_kin += 0.5*pk.mass*(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]);
2530 energy_.
de_cum += de_kin;
2532 energy_sd_.
de_cum += de_kin;
2542 mod_index[n_mod++] = k;
2545 time_next_[k] = pk.time + pk.dt;
2552 for (
int i=0; i<n_mod; i++) {
2553 for (
int j=0; j<i; j++) {
2554 de_pot +=
manager->
interaction.calcEnergyPotSingleSingle(pred_[mod_index[i]],pred_[mod_index[j]]);
2557 energy_.
de_cum += de_pot;
2559 energy_sd_.
de_cum += de_pot;
2571 ASSERT(initial_system_flag_);
2572 ASSERT(!modify_system_flag_);
2573 ASSERT(n_init_group_==0&&n_init_single_==0);
2582 predictAll(time_next);
2585 checkGroupResolve(n_act_group_);
2586 writeBackResolvedGroupAndCreateJParticleList(
true);
2591 calcAccJerkNBList(index_single, n_act_single_, index_group, n_act_group_);
2593 correctAndCalcDt4thList(index_single, n_act_single_, index_group, n_act_group_, dt_limit_);
2595 updateTimeNextList(index_single, n_act_single_, index_group, n_act_group_);
2612 const int* _particle_index,
2613 const int _n_particle) {
2616 ASSERT(initial_system_flag_);
2619 int index_single_select[_n_particle];
2620 int n_single_select =0;
2621 int index_group_select[
groups.getSize()];
2622 int n_group_select =0;
2624 auto* ptcl =
particles.getDataAddress();
2625 auto* group_ptr =
groups.getDataAddress();
2626 for (
int i=0; i<_n_particle; i++){
2627 const int k = _particle_index[i];
2628 if (k<index_offset_group_) {
2629 if (table_single_mask_[k])
continue;
2630 if (ptcl[k].time>=_time_next)
continue;
2631 ptcl[k].dt = _time_next - ptcl[k].time;
2632 index_single_select[n_single_select++] = k;
2635 const int kg = k - index_offset_group_;
2636 if (table_group_mask_[kg])
continue;
2637 auto& pcm = group_ptr[kg].particles.cm;
2638 if (pcm.time>=_time_next)
continue;
2639 pcm.dt = _time_next - pcm.time;
2640 index_group_select[n_group_select++] = kg;
2645 if (n_single_select>0 || n_group_select>0) {
2647 calcAccJerkNBList(index_single_select, n_single_select, index_group_select, n_group_select);
2649 correctAndCalcDt4thList(index_single_select, n_single_select, index_group_select, n_group_select, dt_limit_);
2651 updateTimeNextList(index_single_select, n_single_select, index_group_select, n_group_select);
2666 if (index_dt_sorted_single_.
getSize()>0) time_next_min_ = time_next_[index_dt_sorted_single_[0]];
2667 if (index_dt_sorted_group_.
getSize()>0) time_next_min_ = std::min(time_next_min_, time_next_[index_dt_sorted_group_[0]+index_offset_group_]);
2668 ASSERT(time_next_min_>0.0);
2671 const int n_singles = index_dt_sorted_single_.getSize();
2672 for(n_act_single_=0; n_act_single_<n_singles; n_act_single_++){
2673 if (time_next_min_ < time_next_[index_dt_sorted_single_[n_act_single_]])
break;
2676 #ifdef HERMITE_DEBUG
2677 for (
int i=0; i<n_singles-1; i++) {
2678 const int k= index_dt_sorted_single_[i];
2679 const int k1= index_dt_sorted_single_[i+1];
2681 ASSERT(time_next_[k]<=time_next_[k1]);
2686 const int n_groups = index_dt_sorted_group_.
getSize();
2687 for(n_act_group_=0; n_act_group_<n_groups; n_act_group_++){
2688 if (time_next_min_ < time_next_[index_dt_sorted_group_[n_act_group_] + index_offset_group_])
break;
2691 #ifdef HERMITE_DEBUG
2692 for (
int i=0; i<n_groups-1; i++) {
2693 const int k= index_dt_sorted_group_[i];
2694 const int k1= index_dt_sorted_group_[i+1];
2696 ASSERT(k<
groups.getSize());
2697 ASSERT(k1<
groups.getSize());
2698 ASSERT(time_next_[k+index_offset_group_]<=time_next_[k1+index_offset_group_]);
2702 ASSERT(!(n_act_single_==0&&n_act_group_==0));
2708 const int n_group = index_dt_sorted_group_.
getSize();
2709 for (
int i=0; i<n_group; i++) {
2710 const int k = index_dt_sorted_group_[i];
2711 ASSERT(table_group_mask_[k]==
false);
2712 groups[k].template writeBackParticlesOriginFrame<Tparticle>();
2720 auto& groupi =
groups[_igroup];
2721 Float de_binary_interrupt = groupi.getDEChangeBinaryInterrupt();
2723 energy_.
de_cum += de_binary_interrupt;
2724 energy_sd_.
de_cum += groupi.getDESlowDownChangeCum();
2727 Float etot = groupi.getEkin() + groupi.getEpot();
2728 Float etot_sd = groupi.getEkinSlowDown() + groupi.getEpotSlowDown();
2729 energy_sd_.
de_cum += etot - etot_sd;
2731 auto& vcm = groupi.particles.cm.vel;
2732 auto& bink = groupi.info.getBinaryTreeRoot();
2733 auto& vbin = bink.vel;
2734 auto& vbin_bk = groupi.info.vcm_record;
2735 Float dvbin[3] = {vbin[0] - vbin_bk[0], vbin[1] - vbin_bk[1], vbin[2] - vbin_bk[2]};
2736 Float de_kin = bink.mass*(dvbin[0]*vcm[0]+dvbin[1]*vcm[1]+dvbin[2]*vcm[2]);
2738 energy_.
de_cum -= epert - de_kin;
2739 energy_sd_.
de_cum -= epert - de_kin ;
2744 const int n_group = index_dt_sorted_group_.
getSize();
2745 ASSERT(n_group <=
groups.getSize());
2746 for (
int k=0; k<n_group; k++) {
2747 const int i = index_dt_sorted_group_[k];
2748 ASSERT(i<
groups.getSize());
2753 Float de_binary_interrupt = gi.getDEChangeBinaryInterrupt();
2755 energy_.
de_cum += de_binary_interrupt;
2756 gi.resetDEChangeBinaryInterrupt();
2759 energy_sd_.
de_cum += gi.getDESlowDownChangeCum();
2760 gi.resetDESlowDownChangeCum();
2763 gi.resetDESlowDownChangeBinaryInterrupt();
2797 const auto* ptcl =
particles.getDataAddress();
2801 const int n_single = index_dt_sorted_single_.
getSize();
2802 for (
int k=0; k<n_single; k++){
2803 const int i = index_dt_sorted_single_[k];
2804 if (ptcl[i].time == time_) ptmp[i] = ptcl[i];
2805 else ptmp[i] = pred[i];
2809 const int n_group = index_dt_sorted_group_.
getSize();
2810 ASSERT(n_group <=
groups.getSize());
2811 auto* group_ptr =
groups.getDataAddress();
2812 for (
int k=0; k<n_group; k++) {
2813 const int i = index_dt_sorted_group_[k];
2814 const auto& groupi = group_ptr[i];
2815 const auto& pcm = groupi.particles.cm;
2816 auto& predcm = pred[i+index_offset_group_];
2818 const int n_member = groupi.particles.
getSize();
2819 for (
int j=0; j<n_member; j++) {
2820 const int kj = groupi.info.particle_index[j];
2822 ptmp[kj] = ptcl[kj];
2824 if (pcm.time < time_ ) {
2825 ptmp[kj].pos[0] += predcm.pos[0] - pcm.pos[0];
2826 ptmp[kj].pos[1] += predcm.pos[1] - pcm.pos[1];
2827 ptmp[kj].pos[2] += predcm.pos[2] - pcm.pos[2];
2828 ptmp[kj].vel[0] += predcm.vel[0] - pcm.vel[0];
2829 ptmp[kj].vel[1] += predcm.vel[1] - pcm.vel[1];
2830 ptmp[kj].vel[2] += predcm.vel[2] - pcm.vel[2];
2843 for (
int i=0; i<
groups.getSize(); i++) {
2845 energy_sd_.
ekin -= gi.getEkin();
2846 energy_sd_.
epot -= gi.getEpot();
2848 energy_sd_.
ekin += gi.getEkinSlowDown();
2849 energy_sd_.
epot += gi.getEpotSlowDown();
2851 if (_initial_flag) {
2852 energy_init_ref_ = energy_.
getEtot();
2902 return energy_.
ekin;
2909 return energy_sd_.
ekin;
2916 return energy_.
epot;
2923 return energy_sd_.
epot;
2928 return energy_sd_.
de_cum;
2988 return time_next_min_;
2998 return time_+time_offset_;
3003 time_offset_ = _time_offset;
3010 if (interrupt_group_dt_sorted_group_index_>=0)
3011 return index_dt_sorted_group_[interrupt_group_dt_sorted_group_index_];
3019 return interrupt_binary_;
3024 return n_act_single_;
3029 return n_act_group_;
3034 return n_init_group_;
3039 return n_init_single_;
3044 return index_dt_sorted_group_.
getSize();
3049 return index_dt_sorted_single_.
getSize();
3054 return index_offset_group_;
3075 void printColumnTitle(std::ostream & _fout,
const int _width,
const int _n_sd_list[],
const int _n_group,
const int _n_sd_tot) {
3076 _fout<<std::setw(_width)<<
"Time"
3077 <<std::setw(_width)<<
"dE"
3078 <<std::setw(_width)<<
"Etot_ref"
3079 <<std::setw(_width)<<
"Ekin"
3080 <<std::setw(_width)<<
"Epot"
3081 <<std::setw(_width)<<
"Epert"
3082 <<std::setw(_width)<<
"dE_cum"
3083 <<std::setw(_width)<<
"dE_intr"
3084 <<std::setw(_width)<<
"dE_mod"
3085 <<std::setw(_width)<<
"dE_SD"
3086 <<std::setw(_width)<<
"Etot_SD_ref"
3087 <<std::setw(_width)<<
"Ekin_SD"
3088 <<std::setw(_width)<<
"Epot_SD"
3089 <<std::setw(_width)<<
"Epert_SD"
3090 <<std::setw(_width)<<
"dE_SD_cum"
3091 <<std::setw(_width)<<
"dE_SD_intr"
3092 <<std::setw(_width)<<
"dE_SD_mod"
3093 <<std::setw(_width)<<
"N_SD";
3094 #if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
3097 for (
int i=0; i<_n_group; i++) {
3098 n_sd_count += _n_sd_list[i];
3099 for (
int j=0; j<_n_sd_list[i]; j++)
3102 ASSERT(_n_sd_tot == n_sd_count);
3104 perturber.printColumnTitle(_fout, _width);
3105 info.printColumnTitle(_fout, _width);
3107 particles.printColumnTitle(_fout, _width);
3118 void printColumn(std::ostream & _fout,
const int _width,
const int _n_sd_list[],
const int _n_group,
const int _n_sd_tot){
3119 _fout<<std::setw(_width)<<time_;
3122 _fout<<std::setw(_width)<<_n_sd_tot;
3124 int n_group_now =
groups.getSize();
3125 #if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
3127 for (
int i=0; i<_n_group; i++) {
3128 n_sd_count += _n_sd_list[i];
3129 if (i<n_group_now) {
3131 #ifdef AR_SLOWDOWN_ARRAY
3132 int n_sd_in = gi.binary_slowdown.getSize();
3133 for (
int j=0; j<_n_sd_list[i]; j++) {
3134 if (j<n_sd_in) gi.binary_slowdown[j]->slowdown.printColumn(_fout, _width);
3138 int n_sd_in = gi.info.binarytree.getSize();
3139 for (
int j=0; j<_n_sd_list[i]; j++) {
3140 if (j<n_sd_in) gi.info.binarytree[j].slowdown.printColumn(_fout, _width);
3146 for (
int j=0; j<_n_sd_list[i]; j++) sd_empty.
printColumn(_fout, _width);
3150 ASSERT(_n_sd_tot == n_sd_count);
3153 info.printColumn(_fout, _width);
3160 std::map<Float, int> stephist;
3161 for(
int i=0; i<index_dt_sorted_single_.
getSize(); i++) {
3162 int k = index_dt_sorted_single_[i];
3164 std::map<Float, int>::iterator p = stephist.find(dt);
3165 if (p==stephist.end()) stephist[dt] = 1;
3166 else stephist[dt]++;
3168 for(
int i=0; i<index_dt_sorted_group_.
getSize(); i++) {
3169 int k = index_dt_sorted_group_[i];
3171 std::map<Float, int>::iterator p = stephist.find(dt);
3172 if (p==stephist.end()) stephist[dt] = 1;
3173 else stephist[dt]++;
3175 std::cerr<<
"Step hist: time = "<<time_<<
"\n";
3176 for(
auto i=stephist.begin(); i!=stephist.end(); i++) {
3177 std::cerr<<std::setw(24)<<i->first;
3179 std::cerr<<std::endl;
3180 for(
auto i=stephist.begin(); i!=stephist.end(); i++) {
3181 std::cerr<<std::setw(24)<<i->second;
3183 std::cerr<<std::endl;