2 #ifdef USE_INTRINSIC_FOR_X86
9 #include"AR/symplectic_integrator.h"
10 #include"Hermite/hermite_integrator.h"
11 #include"Hermite/hermite_particle.h"
23 typedef H4::ParticleH4<PtclHard>
PtclH4;
35 AR::TimeTransformedSymplecticManager<ARInteraction>
ar_manager;
53 h4_manager.interaction.gravitational_constant = _g;
54 ar_manager.interaction.gravitational_constant = _g;
56 ar_manager.interaction.tide.gravitational_constant = _g;
62 h4_manager.step.setDtRange(_dt_max, _dt_min_index);
85 fwrite(
this, size, 1, _fp);
97 size_t rcount = fread(
this, size, 1, _fin);
99 std::cerr<<
"Error: Data reading fails! requiring data number is 1, only obtain "<<rcount<<
".\n";
108 void print(std::ostream & _fout)
const{
110 <<
"eps_sq : "<<
eps_sq<<std::endl
161 H4::HermiteIntegrator<PtclHard, PtclH4, HermitePerturber, ARPerturber, HermiteInteraction, ARInteraction, HermiteInformation>
h4_int;
162 AR::TimeTransformedSymplecticIntegrator<PtclHard, PtclH4, ARPerturber, ARInteraction, H4::ARInformation<PtclHard>>
sym_int;
170 #ifdef HARD_DEBUG_PRINT
171 PS::ReallocatableArray<PS::S32> n_group_sub_init;
181 #ifdef HARD_COUNT_NO_NEIGHBOR
182 PS::ReallocatableArray<bool> table_neighbor_exist;
189 #ifdef HARD_CHECK_ENERGY
196 #ifdef HARD_DEBUG_PRINT
197 n_group_sub_init(), n_group_sub_tot_init(0),
200 ARC_substep_sum(0), ARC_tsyn_step_sum(0), H4_step_sum(0),
202 #ifdef HARD_COUNT_NO_NEIGHBOR
203 table_neighbor_exist(), n_neighbor_zero(0),
206 #ifdef HARD_CHECK_ENERGY
230 template <
class Tsoft>
233 Tsoft* _ptcl_artificial,
235 const PS::S32* _n_member_in_group,
259 std::cerr<<
"Large cluster, n_ptcl="<<_n_ptcl<<
" n_group="<<_n_group<<std::endl;
260 std::pair<PS::S32,PS::F64> r_search_max={-1,0.0};
261 for (
PS::S32 i=0; i<_n_ptcl; i++) {
263 r_search_max.first = i;
267 std::cerr<<
"Maximum rsearch particle i = "<<r_search_max.first<<
" ";
269 std::cerr<<std::endl;
274 PS::S32 adr_first_ptcl[_n_group+1];
275 PS::S32 n_group_offset[_n_group+1];
276 n_group_offset[0] = 0;
277 for(
int i=0; i<_n_group; i++)
278 n_group_offset[i+1] = n_group_offset[i] + _n_member_in_group[i];
281 if (_ptcl_artificial!=NULL) {
282 for(
int i=0; i<_n_group; i++) {
284 auto* pi = &(_ptcl_artificial[adr_first_ptcl[i]]);
286 ASSERT(ap_manager.getMemberN(pi)==_n_member_in_group[i]);
289 auto* pcm = ap_manager.getCMParticles(pi);
291 pcm->mass = pcm->group_data.artificial.getMassBackup();
293 #ifdef ARTIFICIAL_PARTICLE_DEBUG
294 ap_manager.checkConsistence(&
ptcl_origin[n_group_offset[i]], &(_ptcl_artificial[adr_first_ptcl[i]]));
300 if(n_group_offset[_n_group]<_n_ptcl)
302 if (_ptcl_artificial!=NULL) assert(
ptcl_origin[n_group_offset[_n_group]-1].group_data.artificial.isMember());
303 else ASSERT(
ptcl_origin[n_group_offset[_n_group]-1].group_data.artificial.isSingle());
308 else if(_n_group>0)
ASSERT(_n_ptcl==2);
312 PS::S32 i_single_start = n_group_offset[_n_group];
314 PS::S32 n_single_init = _n_ptcl - i_single_start;
321 for(
int i=0; i<i_single_start; i++) {
327 #ifdef HARD_DEBUG_PRINT
328 std::cerr<<
"Hard: n_ptcl: "<<_n_ptcl<<
" n_group: "<<_n_group<<std::endl;
336 if(_n_group==1&&n_single_init==0) {
341 sym_int.particles.setMode(COMM::ListMode::copy);
342 const PS::S32 n_members = _n_member_in_group[0];
344 ASSERT(n_members == _n_ptcl);
346 sym_int.particles.reserveMem(n_members);
347 sym_int.info.reserveMem(n_members);
349 for (
PS::S32 i=0; i<n_members; i++) {
351 sym_int.info.particle_index.addMember(i);
353 Float r_neighbor_crit =
ptcl_origin[i].getRNeighbor();
354 sym_int.perturber.r_neighbor_crit_sq =
std::max(
sym_int.perturber.r_neighbor_crit_sq, r_neighbor_crit*r_neighbor_crit);
356 sym_int.reserveIntegratorMem();
357 sym_int.info.generateBinaryTree(
sym_int.particles, ar_manager.interaction.gravitational_constant);
359 if (_ptcl_artificial!=NULL) {
360 Tsoft* api=&(_ptcl_artificial[adr_first_ptcl[0]]);
361 auto* apcm = ap_manager.getCMParticles(api);
362 auto* aptt = ap_manager.getTidalTensorParticles(api);
366 tt.fit(aptt, *apcm, ap_manager.r_tidal_tensor);
367 sym_int.perturber.soft_pert=&tt;
369 sym_int.perturber.soft_pert->group_id = n_members;
373 sym_int.perturber.calcSoftPertMin(
sym_int.info.getBinaryTreeRoot(), ar_manager.interaction.gravitational_constant);
376 sym_int.initialIntegration(0.0);
378 sym_int.info.calcDsAndStepOption(ar_manager.step.getOrder(), ar_manager.interaction.gravitational_constant, ar_manager.ds_scale);
381 auto& pcm =
sym_int.particles.cm;
386 if(_ptcl_artificial==NULL) {
388 PS::F64 sd_factor =
sym_int.info.getBinaryTreeRoot().slowdown.getSlowDownFactor();
390 if (1.01*sd_factor*period<=sd_tmax) {
391 std::cerr<<
"Warning: isolated binary SD ("<<sd_factor<<
") * period ("<<period<<
") = "<<period*sd_factor<<
"< SD_timescale_max = dt_tree*n_step_per_orbit ("<<sd_tmax<<
")"<<std::endl;
399 #ifdef ADJUST_GROUP_PRINT
410 h4_int.manager = &h4_manager;
411 h4_int.ar_manager = &ar_manager;
413 h4_int.particles.setMode(COMM::ListMode::link);
416 h4_int.step = h4_manager.step;
421 h4_int.particles.calcCenterOfMass();
422 h4_int.particles.shiftToCenterOfMassFrame();
424 PS::S32 n_group_size_max = _n_ptcl+_n_group;
425 h4_int.groups.setMode(COMM::ListMode::local);
426 h4_int.groups.reserveMem(n_group_size_max);
427 h4_int.reserveIntegratorMem();
430 h4_int.initialSystemSingle(0.0);
437 #ifdef HARD_COUNT_NO_NEIGHBOR
438 table_neighbor_exist.resizeNoInitialize(_n_ptcl);
439 for (
int k=0; k<_n_ptcl; k++) table_neighbor_exist[k] =
false;
444 ASSERT(n_group_offset[_n_group]>0);
445 PS::S32 ptcl_index_group[n_group_offset[_n_group]];
446 for (
PS::S32 i=0; i<n_group_offset[_n_group]; i++) ptcl_index_group[i] = i;
447 h4_int.addGroups(ptcl_index_group, n_group_offset, _n_group);
449 for (
PS::S32 i=0; i<_n_group; i++) {
450 auto& groupi =
h4_int.groups[i];
453 auto* api = &(_ptcl_artificial[adr_first_ptcl[i]]);
454 auto* apcm = ap_manager.getCMParticles(api);
455 auto* aptt = ap_manager.getTidalTensorParticles(api);
458 apcm->pos -=
h4_int.particles.cm.pos;
461 tidal_tensor[i].fit(aptt, *apcm, ap_manager.r_tidal_tensor);
467 groupi.perturber.soft_pert->group_id = groupi.particles.getSize();
470 for (
PS::S32 k=0; k<groupi.particles.getSize(); k++) {
471 groupi.particles[k].setTidalTensorID(i+1);
472 ptcl_origin[n_group_offset[i]+k].setTidalTensorID(i+1);
476 groupi.perturber.calcSoftPertMin(groupi.info.getBinaryTreeRoot(), ar_manager.interaction.gravitational_constant);
479 auto& pcm = groupi.particles.cm;
486 PS::F64 r_out_cm = pcm.changeover.getRout();
487 for (
PS::S32 k=0; k<groupi.particles.getSize(); k++)
488 ASSERT(abs(groupi.particles[k].changeover.getRout()-r_out_cm)<1e-10);
494 h4_int.initialIntegration();
496 #ifdef HARD_DEBUG_PRINT
498 n_group_sub_init.resizeNoInitialize(_n_group);
499 n_group_sub_tot_init=0;
500 for (
int i=0; i<_n_group; i++) {
501 #ifdef AR_SLOWDOWN_ARRAY
502 n_group_sub_init[i] =
h4_int.groups[i].binary_slowdown.getSize();
504 n_group_sub_init[i] =
h4_int.groups[i].info.binarytree.getSize();
506 n_group_sub_tot_init += n_group_sub_init[i];
510 h4_int.adjustGroups(
true);
514 for(
int i=0; i<n_init; i++) {
515 auto& groupi =
h4_int.groups[group_index[i]];
517 auto& pcm = groupi.particles.cm;
553 h4_int.initialIntegration();
554 h4_int.sortDtAndSelectActParticle();
557 #ifdef HARD_CHECK_ENERGY
558 h4_int.calcEnergySlowDown(
true);
561 #ifdef HARD_DEBUG_PRINT_TITLE
562 h4_int.printColumnTitle(std::cout,
WRITE_WIDTH, n_group_sub_init.getPointer(), n_group_sub_init.size(), n_group_sub_tot_init);
563 std::cout<<std::endl;
565 #ifdef HARD_DEBUG_PRINT
566 h4_int.printColumn(std::cout,
WRITE_WIDTH, n_group_sub_init.getPointer(), n_group_sub_init.size(), n_group_sub_tot_init);
567 std::cout<<std::endl;
582 bool dump_flag=
false;
587 #ifdef ADJUST_GROUP_PRINT
598 std::cerr<<
"Large isolated AR step cluster found (dump): step: "<<
sym_int.profile.step_count<<std::endl;
610 while (
h4_int.getTimeInt()<_time_end) {
639 #ifdef HARD_COUNT_NO_NEIGHBOR
640 checkNeighborExist();
644 h4_int.integrateSingleOneStepAct();
645 h4_int.adjustGroups(
false);
649 for(
int i=0; i<n_init_group; i++) {
650 auto& groupi =
h4_int.groups[group_index[i]];
652 auto& pcm = groupi.particles.cm;
660 PS::S32 tt_id_member = groupi.particles[0].getTidalTensorID();
662 ASSERT(tt_id_member>=0&&tt_id_member<n_tt);
664 if (tt_id_member>0&&tt_id_member<n_tt) {
665 PS::S32 n_members = groupi.particles.getSize();
669 if (
int(tidal_tensor_i->
group_id) == n_members) {
671 bool tt_consistent =
true;
672 for (
PS::S32 k=1; k<n_members; k++) {
673 if (tt_id_member != groupi.particles[k].getTidalTensorID()) {
674 tt_consistent =
false;
680 if (tt_consistent) groupi.perturber.soft_pert = tidal_tensor_i;
729 h4_int.initialIntegration();
730 h4_int.modifySingleParticles();
731 h4_int.sortDtAndSelectActParticle();
737 std::cerr<<
"Large H4-AR step cluster found (dump): step: "<<
h4_int.profile.ar_step_count<<std::endl;
744 #ifdef HARD_DEBUG_PRINT
753 h4_int.calcEnergySlowDown(
false);
755 h4_int.printColumn(std::cout,
WRITE_WIDTH, n_group_sub_init.getPointer(), n_group_sub_init.size(), n_group_sub_tot_init);
756 std::cout<<std::endl;
760 std::cerr<<
"Hard energy significant!"
761 <<
" dE_SD/Etot_SD "<<de_sd/
h4_int.getEtotSlowDownRef()
762 <<
" Ekin: "<<
h4_int.getEkin()
763 <<
" Epot: "<<
h4_int.getEpot()
764 <<
" Ekin_SD: "<<
h4_int.getEkinSlowDown()
765 <<
" Epot_SD: "<<
h4_int.getEpotSlowDown()
766 <<
" Etot_SD_ref: "<<
h4_int.getEtotSlowDownRef()
767 <<
" dE: "<<
h4_int.getEnergyError()
768 <<
" dE_change: "<<
h4_int.getDEChangeCum()
769 <<
" dE_change_binary: "<<
h4_int.getDEChangeBinaryInterrupt()
770 <<
" dE_change_single: "<<
h4_int.getDEChangeModifySingle()
772 <<
" dE_SD_change: "<<
h4_int.getDESlowDownChangeCum()
773 <<
" dE_SD_change_binary: "<<
h4_int.getDESlowDownChangeBinaryInterrupt()
774 <<
" dE_SD_change_single: "<<
h4_int.getDESlowDownChangeModifySingle()
782 if (fmod(
h4_int.getTimeInt(), h4_manager.step.getDtMax())==0.0) {
792 #ifdef HARD_COUNT_NO_NEIGHBOR
793 void checkNeighborExist() {
798 for (
int k=0; k<n_act_single; k++) {
799 PS::S32 i = act_single_index[k];
806 PS::F64 r_out_j = (j<index_offset_group)?
h4_int.particles[j].changeover.getRout() :
h4_int.groups[j-index_offset_group].particles.cm.changeover.getRout();
807 if (rij2<
std::max(r_out_i, r_out_j)) table_neighbor_exist[i] =
true;
811 for (
int k=0; k<n_act_group; k++) {
812 PS::S32 i = act_group_index[k];
818 PS::F64 r_out_i =
h4_int.groups[i].particles.cm.changeover.getRout();
819 PS::F64 r_out_j = (j<index_offset_group)?
h4_int.particles[j].changeover.getRout() :
h4_int.groups[j-index_offset_group].particles.cm.changeover.getRout();
820 if (rij2<
std::max(r_out_i, r_out_j)) {
821 for (
int ki=0; ki<
h4_int.groups[i].particles.getSize(); ki++) {
823 ASSERT(ki_index>=0&&ki_index<table_neighbor_exist.size());
824 table_neighbor_exist[ki_index] =
true;
837 #ifdef HARD_CHECK_ENERGY
838 PS::F64 ekin, epot, ekin_sd, epot_sd;
841 auto& pcm =
sym_int.particles.cm;
842 pcm.pos += pcm.vel * _time_end;
845 ASSERT(!std::isinf(pcm.vel[0]));
846 ASSERT(!std::isnan(pcm.vel[0]));
847 pcm.Ptcl::calcRSearch(_time_end);
850 #ifdef HARD_CHECK_ENERGY
852 auto& bink =
sym_int.info.getBinaryTreeRoot();
854 Float dm = bink.mass - pcm.mass;
855 Float de_kin = 0.5*dm*(vcm[0]*vcm[0]+vcm[1]*vcm[1]+vcm[2]*vcm[2]);
856 auto& vbin = bink.vel;
857 de_kin += bink.mass*(vbin[0]*vcm[0]+vbin[1]*vcm[1]+vbin[2]*vcm[2]);
859 sym_int.particles.shiftToOriginFrame();
860 sym_int.particles.template writeBackMemberAll<PtclH4>();
864 for (
PS::S32 i=0; i<n_members; i++) {
866 #ifdef STELLAR_EVOLUTION
868 ASSERT(pi.group_data.artificial.isUnused());
877 pi.r_search =
std::max(pcm.r_search, pi.r_search);
878 #ifdef CLUSTER_VELOCITY
879 pi.group_data.cm.mass = pcm.mass;
880 pi.group_data.cm.vel[0] = pcm.vel[0];
881 pi.group_data.cm.vel[1] = pcm.vel[1];
882 pi.group_data.cm.vel[2] = pcm.vel[2];
884 ASSERT(!std::isinf(pi.pos[0]));
885 ASSERT(!std::isnan(pi.pos[0]));
886 ASSERT(!std::isinf(pi.vel[0]));
887 ASSERT(!std::isnan(pi.vel[0]));
899 ARC_substep_sum +=
sym_int.profile.step_count;
901 #ifdef HARD_CHECK_ENERGY
908 #if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
909 ekin_sd =
sym_int.getEkinSlowDown();
910 epot_sd =
sym_int.getEpotSlowDown();
924 #ifdef HARD_CHECK_ENERGY
926 h4_int.calcEnergySlowDown(
false);
928 h4_int.writeBackGroupMembers();
930 h4_int.particles.cm.pos +=
h4_int.particles.cm.vel * _time_end;
932 h4_int.particles.shiftToOriginFrame();
934 #ifdef HARD_CHECK_ENERGY
936 auto& pcm =
h4_int.particles.cm;
937 Float vcm_org[3] = {pcm.vel[0], pcm.vel[1], pcm.vel[2]};
938 Float mcm_bk = pcm.mass;
939 h4_int.particles.calcCenterOfMass();
940 Float dm = pcm.mass - mcm_bk;
941 Float de_kin = 0.5*dm*(vcm_org[0]*vcm_org[0]+vcm_org[1]*vcm_org[1]+vcm_org[2]*vcm_org[2]);
942 Float dvcm[3] = {pcm.vel[0] - vcm_org[0], pcm.vel[1] - vcm_org[1], pcm.vel[2] - vcm_org[2]};
943 de_kin += pcm.mass*(dvcm[0]*vcm_org[0]+dvcm[1]*vcm_org[1]+dvcm[2]*vcm_org[2]);
947 energy.
de +=
h4_int.getEnergyError();
952 #if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
953 ekin_sd =
h4_int.getEkinSlowDown();
954 epot_sd =
h4_int.getEpotSlowDown();
970 auto& h4_pcm =
h4_int.particles.cm;
973 const PS::S32 k =group_index[i];
975 ASSERT(
h4_int.groups[k].particles.cm.changeover.getRout()>0);
978 auto& pcm =
h4_int.groups[k].particles.cm;
979 pcm.vel += h4_pcm.vel;
982 ASSERT(!std::isinf(pcm.vel[0]));
983 ASSERT(!std::isnan(pcm.vel[0]));
984 pcm.Ptcl::calcRSearch(_time_end);
985 const PS::S32 n_member =
h4_int.groups[k].particles.getSize();
987 for (
PS::S32 j=0; j<n_member; j++) {
988 auto* pj =
h4_int.groups[k].particles.getMemberOriginAddress(j);
989 pj->r_search =
std::max(pj->r_search, pcm.r_search);
990 #ifdef STELLAR_EVOLUTION
992 ASSERT(pj->group_data.artificial.isUnused());
1003 #ifdef CLUSTER_VELOCITY
1005 pj->group_data.cm.mass = pcm.mass;
1006 pj->group_data.cm.vel[0] = pcm.vel[0];
1007 pj->group_data.cm.vel[1] = pcm.vel[1];
1008 pj->group_data.cm.vel[2] = pcm.vel[2];
1011 ASSERT(pj->r_search>pj->changeover.getRout());
1013 ASSERT(!std::isinf(pj->pos[0]));
1014 ASSERT(!std::isnan(pj->pos[0]));
1015 ASSERT(!std::isinf(pj->vel[0]));
1016 ASSERT(!std::isnan(pj->vel[0]));
1020 const PS::S32* single_index =
h4_int.getSortDtIndexSingle();
1022 auto& pi =
h4_int.particles[single_index[i]];
1023 #ifdef STELLAR_EVOLUTION
1025 ASSERT(pi.group_data.artificial.isUnused());
1037 #ifdef CLUSTER_VELOCITY
1039 pi.group_data.cm.mass = 0.0;
1040 pi.group_data.cm.vel[0] = 0.0;
1041 pi.group_data.cm.vel[1] = 0.0;
1042 pi.group_data.cm.vel[2] = 0.0;
1044 ASSERT(!std::isinf(pi.pos[0]));
1045 ASSERT(!std::isnan(pi.pos[0]));
1046 ASSERT(!std::isinf(pi.vel[0]));
1047 ASSERT(!std::isnan(pi.vel[0]));
1048 pi.Ptcl::calcRSearch(_time_end);
1055 H4_step_sum +=
h4_int.profile.hermite_single_step_count +
h4_int.profile.hermite_group_step_count;
1056 ARC_substep_sum +=
h4_int.profile.ar_step_count;
1057 ARC_tsyn_step_sum +=
h4_int.profile.ar_step_count_tsyn;
1060 std::cerr<<
"Large AR step cluster found: total step: "<<
h4_int.profile.ar_step_count<<std::endl;
1065 #ifdef HARD_COUNT_NO_NEIGHBOR
1066 for (
PS::S32 i=0; i<table_neighbor_exist.size(); i++) {
1067 if(!table_neighbor_exist[i]) n_neighbor_zero++;
1071 #ifdef AR_DEBUG_PRINT
1073 const PS::S32 k= group_index[i];
1074 auto& groupk =
h4_int.groups[k];
1075 auto& bink = groupk.info.getBinaryTreeRoot();
1076 std::cerr<<
"Group k:"<<std::setw(2)<<k
1077 <<
" N_member: "<<std::setw(4)<<groupk.particles.getSize()
1078 <<
" step: "<<std::setw(12)<<groupk.profile.step_count_sum
1079 <<
" step(tsyn): "<<std::setw(10)<<groupk.profile.step_count_tsyn_sum
1082 <<
" Soft_Pert: "<<std::setw(20)<<groupk.perturber.soft_pert_min
1083 <<
" Pert_In: "<<std::setw(20)<<bink.slowdown.getPertIn()
1084 <<
" Pert_Out: "<<std::setw(20)<<bink.slowdown.getPertOut()
1085 <<
" SD: "<<std::setw(20)<<bink.slowdown.getSlowDownFactor()
1086 <<
" SD(org): "<<std::setw(20)<<bink.slowdown.getSlowDownFactorOrigin()
1087 <<
" semi: "<<std::setw(20)<<bink.semi
1088 <<
" ecc: "<<std::setw(20)<<bink.ecc
1089 <<
" period: "<<std::setw(20)<<bink.period
1090 <<
" NB: "<<std::setw(4)<<groupk.perturber.neighbor_address.getSize()
1093 if (groupk.profile.step_count_tsyn_sum>10000) {
1101 #ifdef HARD_CHECK_ENERGY
1107 #ifdef HARD_DEBUG_PRINT
1108 std::cerr<<
"Hard Energy: "
1110 <<
" Ekin_SD: "<<ekin_sd
1112 <<
" Epot_SD: "<<epot_sd
1113 <<
" dE: "<<energy.
de
1117 <<
" dE_SD: "<<energy.
de_sd
1121 <<
" H4_step_sum: "<<H4_step_sum
1122 <<
" ARC_substep_sum: "<<ARC_substep_sum
1123 <<
" ARC_tsyn_step_sum: "<<ARC_tsyn_step_sum
1127 std::cerr<<
"Hard energy significant !"
1128 <<
" dE_SD: "<<energy.
de_sd
1129 <<
" dE_SD/Etot_SD: "<<energy.
de_sd/(ekin_sd+epot_sd)
1145 _fout<<
"Interrupt condition triggered! Time: ";
1147 else _fout<<
"(Hermite) ";
1155 for (
int j=0; j<2; j++) {
1173 ARC_substep_sum = 0;
1174 ARC_tsyn_step_sum = 0;
1177 #ifdef HARD_COUNT_NO_NEIGHBOR
1178 table_neighbor_exist.resizeNoInitialize(0);
1179 n_neighbor_zero = 0;
1181 #ifdef HARD_CHECK_ENERGY
1195 PS::ReallocatableArray<PtclH4> ptcl_hard_;
1196 PS::ReallocatableArray<PS::S32> n_ptcl_in_cluster_;
1197 PS::ReallocatableArray<PS::S32> n_ptcl_in_cluster_disp_;
1198 PS::ReallocatableArray<PS::S32> n_group_in_cluster_;
1199 PS::ReallocatableArray<PS::S32> n_group_in_cluster_offset_;
1200 PS::ReallocatableArray<PS::S32> n_member_in_group_;
1201 PS::ReallocatableArray<PS::S32> n_member_in_group_offset_;
1202 PS::ReallocatableArray<PS::S32> adr_first_ptcl_arti_in_cluster_;
1203 PS::ReallocatableArray<PS::S32> i_cluster_changeover_update_;
1204 PS::S32 n_group_member_remote_;
1209 PS::ReallocatableArray<HardIntegrator*> interrupt_list_;
1212 struct OPLessIDCluster{
1213 template<
class T>
bool operator() (
const T & left,
const T & right)
const {
1214 return left.id_cluster < right.id_cluster;
1220 PS::ReallocatableArray<COMM::BinaryTree<PtclH4,COMM::Binary>>
binary_table;
1230 #ifdef HARD_COUNT_NO_NEIGHBOR
1233 #ifdef HARD_CHECK_ENERGY
1242 ASSERT(n_hard_int_max_>0);
1255 template <
class Tchp,
class Tptcl>
1256 static void collectGroupMemberAdrAndSetMemberParametersIter(Tchp& _par, Tptcl*& _ptcl) {
1257 _par.group_list[_par.n++] = _ptcl - _par.adr_ref;
1259 assert(_ptcl->mass>0.0);
1261 if (_par.pcm_id<0) {
1262 _ptcl->group_data.artificial.setParticleTypeToMember(_ptcl->mass);
1265 _ptcl->group_data.artificial.setParticleTypeToMember(_ptcl->mass, - (_par.pcm_id+1));
1268 PS::F64 rin_cm = _par.changeover_cm->getRin();
1269 PS::F64 rin_p = _ptcl->changeover.getRin();
1270 if (rin_p!=rin_cm) {
1272 if (abs(rin_p-rin_cm)<1e-10) {
1273 _ptcl->changeover = *_par.changeover_cm;
1276 _ptcl->changeover.r_scale_next = rin_cm/rin_p;
1277 _ptcl->r_search =
std::max(_ptcl->r_search, _par.rsearch_cm);
1278 #ifdef ARTIFICIAL_PARTICLE_DEBUG
1279 if (_ptcl->r_search<rin_p) {
1280 std::cerr<<
"Error, r_search<r_out found! \n"
1282 <<
"bin_changeover: ";
1283 _par.changeover_cm->print(std::cerr);
1284 std::cerr<<
"member changeover: ";
1285 _ptcl->changeover.print(std::cerr);
1286 std::cerr<<
"member mass: "<<_ptcl->mass
1287 <<
"member r_search: "<<_ptcl->r_search
1288 <<
"cm research: "<<_par.rsearch_cm
1293 _par.changeover_update_flag =
true;
1302 template <
class Tchp,
class Tptcl>
1303 static PS::S64 calcBinaryIDChangeOverAndRSearchIter (Tchp& _par,
const PS::S64& _id1,
const PS::S64& _id2, COMM::BinaryTree<Tptcl,COMM::Binary>& _bin) {
1306 if (_id1<0) _bin.id = _bin.getLeftMember()->id;
1307 else _bin.id = _id1;
1308 if (_id2<0) _bin.id =
std::min(_bin.id, _bin.getRightMember()->id);
1309 else _bin.id =
std::min(_bin.id, _id2);
1310 #ifdef ARTIFICIAL_PARTICLE_DEBUG
1314 _bin.changeover.setR(_bin.mass*_par.mean_mass_inv, _par.rin, _par.rout);
1317 assert(!std::isinf(_bin.vel[0]));
1318 assert(!std::isnan(_bin.vel[0]));
1319 _bin.Ptcl::calcRSearch(_par.dt_tree);
1329 template <
class Tpi>
1330 static void calcAccPotShortWithLinearCutoff(Tpi& _pi,
1337 const PS::F64 dr2_eps = dr2 + eps_sq;
1338 const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1340 const PS::F64 drinv2 = drinv * drinv;
1341 const PS::F64 gmor3 = gmor * drinv2;
1342 const PS::F64 dr_eps = drinv * dr2_eps;
1346 #if ((! defined P3T_64BIT) && (defined USE_SIMD)) || (defined USE_GPU)
1348 const PS::F32 r_out2 = r_out_32 * r_out_32;
1353 const PS::F32 dr2_max = (dr2_eps_32 > r_out2) ? dr2_eps_32 : r_out2;
1354 const PS::F32 drinv_max = 1.0/sqrt(dr2_max);
1356 const PS::F32 drinv2_max = drinv_max*drinv_max;
1357 const PS::F32 gmor3_max = gmor_max * drinv2_max;
1360 _pi.acc -= gmor3*k*dr - gmor3_max*dr_32;
1363 const PS::F64 r_out2 = r_out * r_out;
1364 const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1365 const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1367 const PS::F64 drinv2_max = drinv_max*drinv_max;
1368 const PS::F64 gmor3_max = gmor_max * drinv2_max;
1371 _pi.acc -= (gmor3*k - gmor3_max)*dr;
1377 if (pj_artificial.isSingle()) {
1379 _pi.pot_soft -= gmor*kpot - gmor_max;
1380 _pi.pot_tot -= (gmor - gmor_max);
1383 else if (pj_artificial.isMember()) {
1384 gmor = G*pj_artificial.getMassBackup()*drinv;
1386 _pi.pot_soft -= gmor*kpot - gmor_max;
1387 _pi.pot_tot -= (gmor - gmor_max);
1391 _pi.pot_soft += gmor_max;
1392 _pi.pot_tot += gmor_max;
1395 _pi.pot_tot = _pi.pot_soft;
1404 template <
class Tpi>
1405 static void calcAccPotShortWithLinearCutoff(Tpi& _pi,
1415 const PS::F64 dr2_eps = dr2 + eps_sq;
1416 const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1418 const PS::F64 drinv2 = drinv * drinv;
1419 const PS::F64 gmor3 = gmor * drinv2;
1420 const PS::F64 dr_eps = drinv * dr2_eps;
1426 #if ((! defined P3T_64BIT) && (defined USE_SIMD)) || (defined USE_GPU)
1428 const PS::F32 r_out2 = r_out_32 * r_out_32;
1433 const PS::F32 dr2_max = (dr2_eps_32 > r_out2) ? dr2_eps_32 : r_out2;
1434 const PS::F32 drinv_max = 1.0/sqrt(dr2_max);
1436 const PS::F32 drinv2_max = drinv_max*drinv_max;
1437 const PS::F32 gmor3_max = gmor_max * drinv2_max;
1440 _pi.acc -= gmor3*k*dr - gmor3_max*dr_32;
1443 const PS::F64 r_out2 = r_out * r_out;
1444 const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1445 const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1447 const PS::F64 drinv2_max = drinv_max*drinv_max;
1448 const PS::F64 gmor3_max = gmor_max * drinv2_max;
1451 _pi.acc -= (gmor3*k - gmor3_max)*dr;
1456 if (pj_artificial.isSingle()) {
1458 _pi.pot_soft -= gmor*kpot - gmor_max;
1459 _pi.pot_tot -= (gmor - gmor_max);
1462 else if (pj_artificial.isMember()) {
1463 gmor = G*pj_artificial.getMassBackup()*drinv;
1465 _pi.pot_soft -= gmor*kpot - gmor_max;
1466 _pi.pot_tot -= (gmor - gmor_max);
1470 _pi.pot_soft += gmor_max;
1471 _pi.pot_tot += gmor_max;
1474 _pi.pot_tot = _pi.pot_soft;
1483 template <
class Tpi>
1484 static void calcAccChangeOverCorrection(Tpi& _pi,
1494 const PS::F64 dr2_eps = dr2 + eps_sq;
1495 const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1497 const PS::F64 drinv2 = drinv * drinv;
1498 const PS::F64 gmor3 = gmor * drinv2;
1499 const PS::F64 dr_eps = drinv * dr2_eps;
1506 chinew.
setR(_pi.changeover.getRin()*_pi.changeover.r_scale_next, _pi.changeover.getRout()*_pi.changeover.r_scale_next);
1511 _pi.acc -= gmor3*(knew-kold)*dr;
1519 template <
class Tpi>
1520 static void calcAccChangeOverCorrection(Tpi& _pi,
1527 const PS::F64 dr2_eps = dr2 + eps_sq;
1528 const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1530 const PS::F64 drinv2 = drinv * drinv;
1531 const PS::F64 gmor3 = gmor * drinv2;
1532 const PS::F64 dr_eps = drinv * dr2_eps;
1541 chinew.
setR(_pi.changeover.getRin()*_pi.changeover.r_scale_next, _pi.changeover.getRout()*_pi.changeover.r_scale_next);
1546 _pi.acc -= gmor3*(knew-kold)*dr;
1550 template <
class Tpi>
1551 static void calcAcorrShortWithLinearCutoff(Tpi& _pi,
1559 const PS::F64 dr2_eps = dr2 + eps_sq;
1561 const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1562 const PS::F64 drdadrinv = drda*drinv;
1564 const PS::F64 drinv2 = drinv * drinv;
1565 const PS::F64 gmor3 = gmor * drinv2;
1566 const PS::F64 dr_eps = drinv * dr2_eps;
1567 const PS::F64 alpha = drda*drinv2;
1573 #if ((! defined P3T_64BIT) && (defined USE_SIMD)) || (defined USE_GPU)
1575 const PS::F32 r_out2 = r_out_32 * r_out_32;
1583 const PS::F32 drda_32 = dr_32*da_32;
1584 const PS::F32 dr2_max = (dr2_eps_32 > r_out2) ? dr2_eps_32 : r_out2;
1585 const PS::F32 drinv_max = 1.0/sqrt(dr2_max);
1587 const PS::F32 drinv2_max = drinv_max*drinv_max;
1588 const PS::F32 gmor3_max = gmor_max * drinv2_max;
1589 const PS::F32 alpha_max = drda_32 * drinv2_max;
1590 const PS::F32vec acorr_max = gmor3_max * (da_32 - 3.0*alpha_max * dr_32);
1593 const PS::F64 r_out2 = r_out * r_out;
1594 const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1595 const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1597 const PS::F64 drinv2_max = drinv_max*drinv_max;
1598 const PS::F64 gmor3_max = gmor_max * drinv2_max;
1599 const PS::F64 alpha_max = drda * drinv2_max;
1600 const PS::F64vec acorr_max = gmor3_max * (da - 3.0*alpha_max * dr);
1603 const PS::F64vec acorr_k = gmor3 * (k*da - (3.0*k*alpha - kdot) * dr);
1605 _pi.acorr -= 2.0 * (acorr_k - acorr_max);
1609 template <
class Tpi>
1610 static void calcAcorrShortWithLinearCutoff(Tpi& _pi,
1615 const PS::F64 r_out2 = r_out * r_out;
1620 const PS::F64 dr2_eps = dr2 + eps_sq;
1622 const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1623 const PS::F64 drdadrinv = drda*drinv;
1625 const PS::F64 drinv2 = drinv * drinv;
1626 const PS::F64 gmor3 = gmor * drinv2;
1627 const PS::F64 dr_eps = drinv * dr2_eps;
1633 const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1634 const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1636 const PS::F64 drinv2_max = drinv_max*drinv_max;
1637 const PS::F64 gmor3_max = gmor_max * drinv2_max;
1639 const PS::F64 alpha = drda*drinv2;
1640 const PS::F64 alpha_max = drda * drinv2_max;
1641 const PS::F64vec acorr_k = gmor3 * (k*da - (3.0*k*alpha - kdot) * dr);
1642 const PS::F64vec acorr_max = gmor3_max * (da - 3.0*alpha_max * dr);
1644 _pi.acorr -= 2.0 * (acorr_k - acorr_max);
1655 template <
class Tpsoft,
class Ttree,
class Tepj>
1656 static void correctForceWithCutoffTreeNeighborOneParticleImp(Tpsoft& _psoft,
1658 const bool _acorr_flag=
false) {
1660 Tepj * ptcl_nb = NULL;
1661 PS::S32 n_ngb = _tree.getNeighborListOneParticle(_psoft, ptcl_nb);
1669 if (_psoft.group_data.artificial.isSingle() || (_psoft.group_data.artificial.isMember() && _psoft.getParticleCMAddress()<0)) {
1671 _psoft.pot_tot += pot_cor;
1672 _psoft.pot_soft += pot_cor;
1677 for(
PS::S32 k=0; k<n_ngb; k++){
1678 if (ptcl_nb[k].
id == _psoft.id)
continue;
1682 calcAcorrShortWithLinearCutoff(_psoft, ptcl_nb[k]);
1685 calcAccPotShortWithLinearCutoff(_psoft, ptcl_nb[k]);
1704 template <
class Tsys>
1705 void correctForceWithCutoffArtificialOneClusterImp(Tsys& _sys,
1706 const PtclH4* _ptcl_local,
1707 const PS::S32 _adr_real_start,
1710 const PS::S32* adr_first_ptcl_arti_in_cluster_,
1711 const bool _acorr_flag) {
1714 for (
int j=0; j<_n_group; j++) {
1715 PS::S32 j_start = adr_first_ptcl_arti_in_cluster_[j];
1716 #ifdef ARTIFICIAL_PARTICLE_DEBUG
1717 if (j_start<0) assert(_n_group==1&&_adr_real_end-_adr_real_start==2);
1719 if (j_start<0)
continue;
1720 auto* p_arti_j = &(_sys[j_start]);
1721 auto* pj = ap_manager.getTidalTensorParticles(p_arti_j);
1723 auto correctionLoop = [&](
const PS::S32 k) {
1725 for (
int kj=0; kj<_n_group; kj++) {
1726 PS::S32 kj_start = adr_first_ptcl_arti_in_cluster_[kj];
1727 if (kj_start<0)
continue;
1728 auto* porb_kj = ap_manager.getOrbitalParticles(&_sys[kj_start]);
1731 for (
int kk=0; kk<ap_manager.getOrbitalParticleN(); kk++) {
1732 if(&porb_kj[kk]==&(pj[k]))
continue;
1735 calcAcorrShortWithLinearCutoff(pj[k], porb_kj[kk]);
1738 calcAccPotShortWithLinearCutoff(pj[k], porb_kj[kk]);
1743 for (
int kj=_adr_real_start; kj<_adr_real_end; kj++) {
1746 PS::S64 adr_kj = _ptcl_local[kj].adr_org;
1747 calcAcorrShortWithLinearCutoff(pj[k], _sys[adr_kj]);
1751 calcAccPotShortWithLinearCutoff(pj[k], _ptcl_local[kj]);
1756 for (
int k=0; k<ap_manager.getTidalTensorParticleN(); k++) {
1762 pj = ap_manager.getCMParticles(p_arti_j);
1765 ap_manager.correctArtficialParticleForce(p_arti_j);
1777 template <
class Tsys,
class Tpsoft,
class Ttree,
class Tepj>
1778 void correctForceWithCutoffTreeNeighborImp(Tsys& _sys,
1780 const PtclH4* _ptcl_local,
1782 const PS::S32 _adr_ptcl_artificial_start,
1783 const bool _acorr_flag=
false) {
1785 #pragma omp parallel for schedule(dynamic)
1786 for (
int i=0; i<_n_ptcl; i++) {
1787 PS::S64 adr = _ptcl_local[i].adr_org;
1788 if(adr>=0) correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[adr], _tree, _acorr_flag);
1792 const PS::S32 n_tot = _sys.getNumberOfParticleLocal();
1793 #pragma omp parallel for schedule(dynamic)
1794 for (
int i=_adr_ptcl_artificial_start; i<n_tot; i++)
1795 correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[i], _tree, _acorr_flag);
1798 const PS::S32 n_artificial_per_group = ap_manager.getArtificialParticleN();
1800 assert((n_tot-_adr_ptcl_artificial_start)%n_artificial_per_group==0);
1802 #pragma omp parallel for schedule(dynamic)
1803 for (
int i=_adr_ptcl_artificial_start; i<n_tot; i+=n_artificial_per_group)
1804 ap_manager.correctArtficialParticleForce(&(_sys[i]));
1813 n_hard_int_max_ = 0;
1814 n_hard_int_use_ = 0;
1817 ARC_substep_sum = 0;
1818 ARC_tsyn_step_sum =0;
1820 ARC_n_groups_iso = 0;
1823 #ifdef HARD_COUNT_NO_NEIGHBOR
1824 n_neighbor_zero = 0;
1826 #ifdef HARD_CHECK_ENERGY
1837 assert(_n_hard_int>0);
1838 assert(n_hard_int_use_==0);
1839 if (hard_int_!=NULL)
delete [] hard_int_;
1840 const PS::S32 num_thread = PS::Comm::getNumberOfThread();
1841 n_hard_int_max_ = _n_hard_int + num_thread;
1843 n_hard_int_use_ = 0;
1847 if (hard_int_!=NULL)
delete [] hard_int_;
1855 ptcl_hard_.resizeNoInitialize(n);
1860 template<
class Tsys,
class Tptcl,
class Tmediator>
1862 const PS::ReallocatableArray<Tmediator> & med,
1863 const PS::ReallocatableArray<Tptcl> & ptcl_recv){
1864 ptcl_hard_.clearSize();
1865 n_ptcl_in_cluster_.clearSize();
1866 for(
PS::S32 i=0; i<med.size(); i++){
1867 if(med[i].adr_sys_ < 0)
continue;
1868 if(med[i].rank_send_ != PS::Comm::getRank())
continue;
1869 const auto & p = sys[med[i].adr_sys_];
1870 ptcl_hard_.push_back(
PtclHard(p, med[i].id_cluster_, med[i].adr_sys_));
1872 assert(med[i].adr_sys_<sys.getNumberOfParticleLocal());
1873 if(p.mass==0&&p.group_data.artificial.isUnused()) {
1874 std::cerr<<
"Error: unused particle is selected! i="<<i<<
"; med[i].adr_sys="<<med[i].adr_sys_<<std::endl;
1880 for(
PS::S32 i=0; i<ptcl_recv.size(); i++){
1881 const Tptcl & p = ptcl_recv[i];
1882 ptcl_hard_.push_back(
PtclHard(p, p.id_cluster, -(i+1)));
1884 if(p.mass==0&&p.group_data.artificial.isUnused()) {
1885 std::cerr<<
"Error: receive usused particle! i="<<i<<std::endl;
1891 if(ptcl_hard_.size() == 0)
return;
1892 std::sort(ptcl_hard_.getPointer(), ptcl_hard_.getPointer(ptcl_hard_.size()),
1894 PS::S32 n_tot = ptcl_hard_.size();
1895 PS::S32 id_cluster_ref = -999;
1896 for(
PS::S32 i=0; i<n_tot; i++){
1897 if(id_cluster_ref != ptcl_hard_[i].id_cluster){
1898 id_cluster_ref = ptcl_hard_[i].id_cluster;
1899 n_ptcl_in_cluster_.push_back(0);
1901 n_ptcl_in_cluster_.back()++;
1903 PS::S32 n_cluster = n_ptcl_in_cluster_.size();
1907 n_ptcl_in_cluster_disp_.resizeNoInitialize(n_cluster+1);
1908 n_ptcl_in_cluster_disp_[0] = 0;
1909 for(
PS::S32 i=0; i<n_cluster; i++){
1911 assert(n_ptcl_in_cluster_[i]>1);
1913 n_ptcl_in_cluster_disp_[i+1] = n_ptcl_in_cluster_disp_[i] + n_ptcl_in_cluster_[i];
1922 return n_group_member_remote_;
1930 return n_ptcl_in_cluster_.size();
1934 return interrupt_list_.size();
1938 return interrupt_list_[i];
1942 return n_ptcl_in_cluster_.getPointer(i);
1946 return n_ptcl_in_cluster_disp_.getPointer(i);
1950 return n_group_in_cluster_.getPointer(i);
1954 return n_group_in_cluster_offset_.getPointer(i);
1958 return adr_first_ptcl_arti_in_cluster_.getPointer(i);
1962 return i_cluster_changeover_update_.size();
1966 time_origin_ = _time_origin;
1970 time_write_back_ = time_origin_;
1974 return time_origin_;
2025 template<
class Tsys>
2027 const PS::ReallocatableArray<PS::S32> & adr_array){
2029 const PS::S32 n = adr_array.size();
2032 #pragma omp parallel for
2035 ptcl_hard_[i].DataCopy(sys[adr]);
2036 ptcl_hard_[i].adr_org = adr;
2047 const PS::S32 n = ptcl_hard_.size();
2049 #pragma omp parallel for
2051 auto& pi = ptcl_hard_[i];
2055 #ifdef STELLAR_EVOLUTION
2059 assert(!std::isinf(vbk.x));
2060 assert(!std::isnan(vbk.x));
2061 int modify_flag =
manager->
ar_manager.interaction.modifyOneParticle(pi, time_origin_, time_origin_ + _dt);
2064 Float de_kin = 0.5*(pi.mass*(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]) - mbk*(vbk[0]*vbk[0]+vbk[1]*vbk[1]+vbk[2]*vbk[2]));
2081 if (!pi.group_data.artificial.isUnused()) {
2082 auto& pcm = pi.group_data.cm;
2083 assert(pcm.mass==0.0);
2084 assert(pcm.vel.x==0.0);
2085 assert(pcm.vel.y==0.0);
2086 assert(pcm.vel.z==0.0);
2089 assert(!std::isinf(pi.vel.x));
2090 assert(!std::isnan(pi.vel.x));
2091 pi.Ptcl::calcRSearch(_dt);
2099 time_origin_ += _dt;
2107 template<
class Tsys>
2109 PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2110 const PS::S32 n = ptcl_hard_.size();
2114 PS::S32 adr = ptcl_hard_[i].adr_org;
2116 assert(sys[adr].
id == ptcl_hard_[i].
id);
2118 #ifdef STELLAR_EVOLUTION
2119 PS::F64 mass_bk = sys[adr].group_data.artificial.isMember()? sys[adr].group_data.artificial.getMassBackup(): sys[adr].mass;
2120 assert(mass_bk!=0.0);
2122 sys[adr].DataCopy(ptcl_hard_[i]);
2124 #ifdef STELLAR_EVOLUTION
2125 sys[adr].dm = sys[adr].mass - mass_bk;
2127 if (sys[adr].dm!=0.0) _mass_modify_list.push_back(adr);
2129 assert(!std::isinf(sys[adr].pos[0]));
2130 assert(!std::isnan(sys[adr].pos[0]));
2131 assert(!std::isinf(sys[adr].vel[0]));
2132 assert(!std::isnan(sys[adr].vel[0]));
2142 template<
class Tsys>
2144 PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2145 const PS::S32 n = ptcl_hard_.size();
2146 const PS::S32 num_thread = PS::Comm::getNumberOfThread();
2147 PS::ReallocatableArray<PS::S32> mass_modify_list_thx[num_thread];
2148 for (
int i=0; i<num_thread; i++) mass_modify_list_thx[i].resizeNoInitialize(0);
2150 #pragma omp parallel
2152 #ifdef STELLAR_EVOLUTION
2153 const PS::S32 ith = PS::Comm::getThreadNum();
2157 PS::S32 adr = ptcl_hard_[i].adr_org;
2160 assert(sys[adr].
id == ptcl_hard_[i].
id);
2162 #ifdef STELLAR_EVOLUTION
2163 PS::F64 mass_bk = sys[adr].group_data.artificial.isMember()? sys[adr].group_data.artificial.getMassBackup(): sys[adr].mass;
2164 assert(mass_bk!=0.0);
2166 sys[adr].DataCopy(ptcl_hard_[i]);
2168 #ifdef STELLAR_EVOLUTION
2170 sys[adr].dm = sys[adr].mass - mass_bk;
2171 if (sys[adr].dm!=0.0) mass_modify_list_thx[ith].push_back(adr);
2173 assert(!std::isinf(sys[adr].pos[0]));
2174 assert(!std::isnan(sys[adr].pos[0]));
2175 assert(!std::isinf(sys[adr].vel[0]));
2176 assert(!std::isnan(sys[adr].vel[0]));
2180 for (
int i=0; i<num_thread; i++) {
2181 for (
int k=0; k<mass_modify_list_thx[i].size(); k++) _mass_modify_list.push_back(mass_modify_list_thx[i][k]);
2191 template<
class Tsys>
2193 PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2203 template<
class Tsys>
2205 PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2228 template<
class Tsys>
2230 const PS::ReallocatableArray<PS::S32> & _adr_array,
2231 const PS::ReallocatableArray<PS::S32> & _n_ptcl_in_cluster){
2232 const PS::S32 n_cluster = _n_ptcl_in_cluster.size();
2236 n_ptcl_in_cluster_.resizeNoInitialize(n_cluster);
2237 n_ptcl_in_cluster_disp_.resizeNoInitialize(n_cluster+1);
2238 n_ptcl_in_cluster_disp_[0] = 0;
2239 for(
PS::S32 i=0; i<n_cluster; i++){
2240 n_ptcl_in_cluster_[i] = _n_ptcl_in_cluster[i];
2242 assert(n_ptcl_in_cluster_[i]>1);
2244 n_ptcl_in_cluster_disp_[i+1] = n_ptcl_in_cluster_disp_[i] + n_ptcl_in_cluster_[i];
2246 const PS::S32 n_ptcl = _adr_array.size();
2250 ptcl_hard_.resizeNoInitialize(n_ptcl);
2251 #pragma omp parallel for
2252 for(
PS::S32 i=0; i<n_ptcl; i++){
2254 ptcl_hard_[i].DataCopy(sys[adr]);
2255 ptcl_hard_[i].adr_org = adr;
2259 interrupt_list_.resizeNoInitialize(0);
2270 template<
class Tpsoft>
2272 assert(n_hard_int_use_==0);
2274 const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
2284 const PS::S32 num_thread = PS::Comm::getNumberOfThread();
2285 assert(n_hard_int_max_>num_thread);
2291 for (
PS::S32 i=0; i<num_thread; i++) {
2292 hard_int_thread[i] = &hard_int_[i];
2297 PS::F64 time_thread[num_thread];
2298 PS::S64 num_cluster[num_thread];
2299 for (
PS::S32 i=0; i<num_thread; i++) {
2305 #pragma omp parallel for schedule(dynamic)
2306 for(
PS::S32 i=0; i<n_cluster; i++){
2307 const PS::S32 ith = PS::Comm::getThreadNum();
2309 time_thread[ith] -= PS::GetWtime();
2312 const PS::S32 adr_head = n_ptcl_in_cluster_disp_[i];
2313 const PS::S32 n_ptcl = n_ptcl_in_cluster_[i];
2317 const PS::S32 n_group = n_group_in_cluster_[i];
2318 Tpsoft* ptcl_artificial_ptr=NULL;
2319 PS::S32* n_member_in_group_ptr=NULL;
2321 PS::S32 ptcl_arti_first_index = adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i]];
2322 if (ptcl_arti_first_index>=0) ptcl_artificial_ptr = &(_ptcl_soft[ptcl_arti_first_index]);
2324 else ARC_n_groups_iso += 1;
2326 n_member_in_group_ptr = &(n_member_in_group_[n_group_in_cluster_offset_[i]]);
2329 num_cluster[ith] += n_ptcl;
2332 ARC_n_groups += n_group;
2336 assert(ith<hard_dump.size);
2340 #ifdef HARD_DEBUG_PROFILE
2341 PS::F64 tstart = PS::GetWtime();
2345 hard_int_thread[ith]->
initial(ptcl_hard_.getPointer(adr_head), n_ptcl, ptcl_artificial_ptr, n_group, n_member_in_group_ptr,
manager, time_origin_);
2349 if (interrupt_binary.status!=AR::InterruptStatus::none) {
2350 #pragma omp atomic capture
2351 hard_int_thread[ith] = hard_int_front_ptr++;
2353 assert(hard_int_thread[ith]!=&hard_int_[n_hard_int_max_]);
2359 ARC_substep_sum += hard_int_thread[ith]->ARC_substep_sum;
2360 ARC_tsyn_step_sum += hard_int_thread[ith]->ARC_tsyn_step_sum;
2361 H4_step_sum += hard_int_thread[ith]->H4_step_sum;
2363 #ifdef HARD_COUNT_NO_NEIGHBOR
2364 n_neighbor_zero += hard_int_thread[ith]->n_neighbor_zero;
2366 #ifdef HARD_CHECK_ENERGY
2367 energy += hard_int_thread[ith]->energy;
2370 hard_int_thread[ith]->
clear();
2374 time_thread[ith] += PS::GetWtime();
2377 #ifdef HARD_DEBUG_PROFILE
2378 PS::F64 tend = PS::GetWtime();
2379 std::cerr<<
"HT: "<<i<<
" "<<ith<<
" "<<n_cluster<<
" "<<n_ptcl<<
" "<<tend-tstart<<std::endl;
2384 auto* pi = ptcl_hard_.getPointer(adr_head);
2385 for (
PS::S32 j=0; j<n_ptcl; j++) {
2388 #ifdef CLUSTER_VELOCITY
2389 auto& pij_cm = pi[j].group_data.cm;
2390 pij_cm.mass = pij_cm.vel.x = pij_cm.vel.y = pij_cm.vel.z = 0.0;
2392 ASSERT(!std::isinf(pi[j].vel[0]));
2393 ASSERT(!std::isnan(pi[j].vel[0]));
2394 pi[j].calcRSearch(
dt);
2401 assert(interrupt_list_.size()==0);
2402 for (
auto iptr = hard_int_; iptr<hard_int_front_ptr; iptr++)
2403 if (iptr->is_initialized) {
2404 assert(iptr->interrupt_binary.status!=AR::InterruptStatus::none);
2405 #ifdef HARD_INTERRUPT_PRINT
2406 iptr->printInterruptBinaryInfo(std::cerr);
2408 interrupt_list_.push_back(iptr);
2413 PS::S32 n_interrupt = interrupt_list_.size();
2414 if (n_interrupt==0) time_origin_ +=
dt;
2415 else interrupt_dt_ =
dt;
2426 PS::S32 n_interrupt = interrupt_list_.size();
2427 #pragma omp parallel for schedule(dynamic)
2428 for (
PS::S32 i=0; i<n_interrupt; i++) {
2429 auto hard_int_ptr = interrupt_list_[i];
2430 auto& interrupt_binary = hard_int_ptr->integrateToTime(interrupt_dt_);
2432 if (interrupt_binary.status==AR::InterruptStatus::none) {
2433 hard_int_ptr->driftClusterCMRecordGroupCMDataAndWriteBack(interrupt_dt_);
2436 ARC_substep_sum += hard_int_ptr->ARC_substep_sum;
2437 ARC_tsyn_step_sum += hard_int_ptr->ARC_tsyn_step_sum;
2438 H4_step_sum += hard_int_ptr->H4_step_sum;
2440 #ifdef HARD_CHECK_ENERGY
2441 energy += hard_int_ptr->energy;
2444 hard_int_ptr->
clear();
2451 while (i_front<i_end) {
2452 auto hard_int_front_ptr = interrupt_list_[i_front];
2453 if (hard_int_front_ptr->interrupt_binary.status!=AR::InterruptStatus::none) {
2454 assert(hard_int_front_ptr->is_initialized);
2455 #ifdef HARD_INTERRUPT_PRINT
2456 hard_int_front_ptr->printInterruptBinaryInfo(std::cerr);
2461 interrupt_list_[i_front] = interrupt_list_[--i_end];
2462 interrupt_list_.decreaseSize(1);
2466 n_interrupt = interrupt_list_.size();
2469 if (n_interrupt==0) time_origin_ += interrupt_dt_;
2494 template <
class Tptcl>
2496 Tptcl* _ptcl_in_cluster,
2498 PS::ReallocatableArray<Tptcl> & _ptcl_artificial,
2499 PS::ReallocatableArray<COMM::BinaryTree<PtclH4,COMM::Binary>> & _binary_table,
2501 PS::ReallocatableArray<GroupIndexInfo>& _n_member_in_group,
2502 PS::ReallocatableArray<PS::S32>& _changeover_update_list,
2506 PS::S32 group_ptcl_adr_list[_n_ptcl];
2507 PS::S32 group_ptcl_adr_offset=0;
2508 bool changeover_update_flag =
false;
2513 PS::ReallocatableArray<COMM::BinaryTree<Tptcl,COMM::Binary>> binary_tree;
2516 binary_tree.reserve(n_members);
2518 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2522 binary_tree.resizeNoInitialize(n_members-1);
2525 COMM::BinaryTree<Tptcl,COMM::Binary>::generateBinaryTree(binary_tree.getPointer(), member_list, n_members, _ptcl_in_cluster, ap_manager.gravitational_constant);
2529 binary_tree.back().processTreeIter(changeover_rsearch_pars, (
PS::S64)-1, (
PS::S64)-1, calcBinaryIDChangeOverAndRSearchIter);
2541 const PS::S32 n_members = closed_binary_tree_k.getMemberN();
2542 PS::S32 start_index_binary_table = _binary_table.size();
2543 _binary_table.increaseSize(n_members);
2544 closed_binary_tree_k.getherBinaryTreeIter(_binary_table.getPointer(start_index_binary_table));
2555 const PS::S32 n_members = bin.getMemberN();
2572 sd.period = bin.period;
2575 sd.calcSlowDownFactor();
2581 if (bin.period*sd.getSlowDownFactor() > dt_nstep) {
2585 group_index_pars = { _ptcl_in_cluster, &group_ptcl_adr_list[group_ptcl_adr_offset], 0, -1, &bin.changeover, bin.r_search,
false};
2586 bin.processLeafIter(group_index_pars, collectGroupMemberAdrAndSetMemberParametersIter);
2587 if (group_index_pars.changeover_update_flag) changeover_update_flag =
true;
2588 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2589 assert(group_index_pars.n==n_members);
2590 #ifdef ARTIFICIAL_PARTICLE_DEBUG_PRINT
2591 std::cerr<<
"Isolated binary case: "
2592 <<
" period: "<<bin.period
2593 <<
" semi: "<<bin.semi
2595 <<
" r_serach: "<<bin.r_search
2596 <<
" tree_step: "<<_dt_tree
2601 _n_member_in_group.push_back(
GroupIndexInfo(_i_cluster, _n_groups, n_members, group_ptcl_adr_offset, 1));
2603 group_ptcl_adr_offset += n_members;
2610 index_group[0] = (
PS::F64)(_i_cluster+1);
2613 index_group[1] = (
PS::F64)(_n_groups+1);
2616 const PS::S32 n_members = binary_stable_i.getMemberN();
2619 const int n_ptcl_artificial = _ptcl_artificial.size();
2620 _ptcl_artificial.increaseSize(ap_manager.getArtificialParticleN());
2621 Tptcl* ptcl_artificial_i = &_ptcl_artificial[n_ptcl_artificial];
2627 group_index_pars = { _ptcl_in_cluster, &group_ptcl_adr_list[group_ptcl_adr_offset], 0, n_ptcl_artificial, &binary_stable_i.changeover, binary_stable_i.r_search,
false};
2628 binary_stable_i.processLeafIter(group_index_pars, collectGroupMemberAdrAndSetMemberParametersIter);
2629 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2630 assert(group_index_pars.n==n_members);
2632 if (group_index_pars.changeover_update_flag) changeover_update_flag =
true;
2635 ap_manager.createArtificialParticles(ptcl_artificial_i, binary_stable_i, index_group, 2);
2638 auto* pcm = ap_manager.getCMParticles(ptcl_artificial_i);
2639 pcm->r_search = binary_stable_i.r_search;
2640 pcm->r_search += binary_stable_i.semi*(1+binary_stable_i.ecc);
2641 pcm->changeover = binary_stable_i.changeover;
2642 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2643 assert(pcm->r_search > pcm->changeover.getRout());
2647 Tptcl* ptcl_tt = ap_manager.getTidalTensorParticles(ptcl_artificial_i);
2649 for (
int j=0; j<ap_manager.getTidalTensorParticleN(); j++) {
2650 Tptcl& pj = ptcl_tt[j];
2651 pj.r_search = binary_stable_i.r_search;
2652 pj.changeover = binary_stable_i.changeover;
2654 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2656 PS::F64 rsearch_bin = pcm->r_search;
2657 PS::F64vec dp = pj.pos - binary_stable_i.pos;
2659 assert(dr<=rsearch_bin*rsearch_bin);
2664 Tptcl* ptcl_orbit=ap_manager.getOrbitalParticles(ptcl_artificial_i);
2665 PS::S32 n_orbit = ap_manager.getOrbitalParticleN();
2666 for (
int j=0; j<n_orbit; j++) {
2668 Tptcl& pj = ptcl_orbit[j];
2669 pj.changeover = binary_stable_i.getMember(j%2)->changeover;
2671 PS::F64 pj_r_in = pj.changeover.getRin();
2672 PS::F64 bin_r_in = binary_stable_i.changeover.getRin();
2673 if (abs( pj_r_in - bin_r_in)>1e-10) {
2674 pj.changeover.r_scale_next = bin_r_in/pj_r_in;
2675 pj.r_search =
std::max(pj.r_search, binary_stable_i.r_search);
2680 if (pj.r_search < pj.changeover.getRout()) {
2681 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2682 assert( pj.r_search > pj.changeover.getRout()*pj.changeover.r_scale_next);
2684 pj.r_search = pj.changeover.getRout();
2687 else pj.r_search = binary_stable_i.r_search;
2689 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2691 PS::F64 rsearch_bin = pcm->r_search;
2692 PS::F64vec dp = pj.pos - binary_stable_i.pos;
2694 assert(dr<=rsearch_bin*rsearch_bin);
2698 _n_member_in_group.push_back(
GroupIndexInfo(_i_cluster, _n_groups, n_members, group_ptcl_adr_offset, 0));
2699 group_ptcl_adr_offset += n_members;
2704 assert(group_ptcl_adr_offset<=_n_ptcl);
2707 PS::S32 ptcl_list_reorder[_n_ptcl];
2708 for (
int i=0; i<_n_ptcl; i++) ptcl_list_reorder[i] = i;
2711 PS::S32 i_single_front=group_ptcl_adr_offset;
2713 while (i_group<group_ptcl_adr_offset) {
2715 if(_ptcl_in_cluster[i_group].group_data.artificial.isSingle()) {
2716 while(_ptcl_in_cluster[i_single_front].group_data.artificial.isSingle()) {
2718 assert(i_single_front<_n_ptcl);
2721 PS::S32 plist_tmp = ptcl_list_reorder[i_group];
2722 ptcl_list_reorder[i_group] = ptcl_list_reorder[i_single_front];
2723 ptcl_list_reorder[i_single_front] = plist_tmp;
2729 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2731 PS::S32 plist_new[group_ptcl_adr_offset];
2732 for (
int i=0; i<group_ptcl_adr_offset; i++) plist_new[i] = group_ptcl_adr_list[i];
2733 std::sort(plist_new, plist_new+group_ptcl_adr_offset, [](
const PS::S32 &a,
const PS::S32 &b) {
return a < b;});
2734 std::sort(ptcl_list_reorder, ptcl_list_reorder+group_ptcl_adr_offset, [](
const PS::S32 &a,
const PS::S32 &b) {
return a < b;});
2735 for (
int i=0; i<group_ptcl_adr_offset; i++) assert(ptcl_list_reorder[i]==plist_new[i]);
2739 for (
int i=0; i<group_ptcl_adr_offset; i++) ptcl_list_reorder[i] = group_ptcl_adr_list[i];
2742 Tptcl ptcl_tmp[_n_ptcl];
2743 for (
int i=0; i<_n_ptcl; i++) ptcl_tmp[i]=_ptcl_in_cluster[i];
2746 for (
int i=0; i<_n_ptcl; i++) _ptcl_in_cluster[i]=ptcl_tmp[ptcl_list_reorder[i]];
2749 if (changeover_update_flag) _changeover_update_list.push_back(_i_cluster);
2756 template<
class Tsys,
class Tptcl>
2759 const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
2760 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2763 n_group_in_cluster_.resizeNoInitialize(n_cluster);
2764 n_group_member_remote_=0;
2766 const PS::S32 num_thread = PS::Comm::getNumberOfThread();
2767 PS::ReallocatableArray<PtclH4> ptcl_artificial_thread[num_thread];
2768 PS::ReallocatableArray<COMM::BinaryTree<PtclH4,COMM::Binary>> binary_table_thread[num_thread];
2769 PS::ReallocatableArray<GroupIndexInfo> n_member_in_group_thread[num_thread];
2770 PS::ReallocatableArray<PS::S32> i_cluster_changeover_update_threads[num_thread];
2771 for (
PS::S32 i=0; i<num_thread; i++) {
2772 ptcl_artificial_thread[i].resizeNoInitialize(0);
2773 binary_table_thread[i].resizeNoInitialize(0);
2774 n_member_in_group_thread[i].resizeNoInitialize(0);
2775 i_cluster_changeover_update_threads[i].resizeNoInitialize(0);
2779 #pragma omp parallel for schedule(dynamic)
2780 for (
PS::S32 i=0; i<n_cluster; i++){
2781 const PS::S32 ith = PS::Comm::getThreadNum();
2782 PtclH4* ptcl_in_cluster = ptcl_hard_.getPointer() + n_ptcl_in_cluster_disp_[i];
2783 const PS::S32 n_ptcl = n_ptcl_in_cluster_[i];
2785 for(
PS::S32 j=0; j<n_ptcl; j++) {
2787 ptcl_in_cluster[j].group_data.artificial.setParticleTypeToSingle();
2788 PS::S64 adr=ptcl_in_cluster[j].adr_org;
2789 if(adr>=0) _sys[adr].group_data = ptcl_in_cluster[j].group_data;
2797 findGroupsAndCreateArtificialParticlesOneCluster(i, ptcl_in_cluster, n_ptcl, ptcl_artificial_thread[ith], binary_table_thread[ith], n_group_in_cluster_[i], n_member_in_group_thread[ith], i_cluster_changeover_update_threads[ith], group_candidate, _dt_tree);
2801 PS::S32 n_binary_table_offset_thread[num_thread+1];
2802 n_binary_table_offset_thread[0] = 0;
2803 for (
PS::S32 i=0; i<num_thread; i++)
2804 n_binary_table_offset_thread[i+1] = n_binary_table_offset_thread[i] + binary_table_thread[i].size();
2805 binary_table.resizeNoInitialize(n_binary_table_offset_thread[num_thread]);
2806 #pragma omp parallel for
2807 for (
PS::S32 i=0; i<num_thread; i++) {
2808 for (
PS::S32 k=n_binary_table_offset_thread[i]; k<n_binary_table_offset_thread[i+1]; k++) {
2809 binary_table[k] = binary_table_thread[i][k-n_binary_table_offset_thread[i]];
2814 n_group_in_cluster_offset_.resizeNoInitialize(n_cluster+1);
2815 n_group_in_cluster_offset_[0] = 0;
2816 for (
PS::S32 i=0; i<n_cluster; i++) {
2817 n_group_in_cluster_offset_[i+1] = n_group_in_cluster_offset_[i] + n_group_in_cluster_[i];
2819 n_member_in_group_.resizeNoInitialize(n_group_in_cluster_offset_[n_cluster]);
2820 n_member_in_group_offset_.resizeNoInitialize(n_group_in_cluster_offset_[n_cluster]);
2823 adr_first_ptcl_arti_in_cluster_.resizeNoInitialize(n_group_in_cluster_offset_[n_cluster]);
2824 for (
PS::S32 i=0; i<adr_first_ptcl_arti_in_cluster_.size(); i++) adr_first_ptcl_arti_in_cluster_[i] = -1;
2826 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2827 for (
PS::S32 i=0; i<n_member_in_group_.size(); i++) n_member_in_group_[i] = 0;
2830 #pragma omp parallel for
2831 for (
PS::S32 i=0; i<num_thread; i++) {
2832 for (
PS::S32 k=0; k<n_member_in_group_thread[i].size(); k++) {
2833 PS::S32 i_cluster = n_member_in_group_thread[i][k].cluster_index;
2834 PS::S32 j_group = n_member_in_group_thread[i][k].group_index;
2835 PS::S32 n_members = n_member_in_group_thread[i][k].n_members;
2836 PS::S32 n_member_offset = n_member_in_group_thread[i][k].n_member_offset;
2837 n_member_in_group_[n_group_in_cluster_offset_[i_cluster]+j_group] = n_members;
2838 n_member_in_group_offset_[n_group_in_cluster_offset_[i_cluster]+j_group] = n_member_offset;
2840 if (n_member_in_group_thread[i][k].isolated_case) {
2841 for (
PS::S32 j=0; j<n_members; j++) {
2842 auto& p_loc = ptcl_hard_[n_ptcl_in_cluster_disp_[i_cluster]+n_member_offset+j];
2843 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2844 assert(p_loc.group_data.artificial.isMember());
2845 assert(p_loc.getParticleCMAddress()<0);
2846 assert(adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i_cluster]+j_group]<0);
2848 const PS::S64 p_glb_adr= p_loc.adr_org;
2850 auto& p_glb = _sys[p_glb_adr];
2851 p_glb.group_data= p_loc.group_data;
2852 p_glb.changeover= p_loc.changeover;
2853 p_glb.r_search = p_loc.r_search;
2857 n_group_member_remote_++;
2865 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2867 for (
PS::S32 i=0; i<n_member_in_group_.size(); i++) assert(n_member_in_group_[i] > 0);
2871 PS::S32 rank = PS::Comm::getRank();
2873 PS::S64 sys_ptcl_artificial_thread_offset[num_thread+1];
2874 sys_ptcl_artificial_thread_offset[0] = _sys.getNumberOfParticleLocal();
2875 for(
PS::S32 i=0; i<num_thread; i++) {
2876 sys_ptcl_artificial_thread_offset[i+1] = sys_ptcl_artificial_thread_offset[i] + ptcl_artificial_thread[i].size();
2878 _sys.setNumberOfParticleLocal(sys_ptcl_artificial_thread_offset[num_thread]);
2880 const PS::S32 n_artificial_per_group = ap_manager.getArtificialParticleN();
2881 #pragma omp parallel for
2882 for(
PS::S32 i=0; i<num_thread; i++) {
2884 assert(ptcl_artificial_thread[i].size()%n_artificial_per_group==0);
2886 for (
PS::S32 j=0; j<ptcl_artificial_thread[i].size(); j++) {
2887 PS::S32 adr = j+sys_ptcl_artificial_thread_offset[i];
2888 ptcl_artificial_thread[i][j].adr_org=adr;
2889 _sys[adr]=Tptcl(ptcl_artificial_thread[i][j],rank,adr);
2892 PS::S32 group_offset=0, j_group_recored=-1;
2893 for (
PS::S32 j=0; j<ptcl_artificial_thread[i].size(); j+=n_artificial_per_group) {
2894 auto* pj = &ptcl_artificial_thread[i][j];
2895 auto* pcm = ap_manager.getCMParticles(pj);
2896 PS::S32 n_members = ap_manager.getMemberN(pj);
2897 PS::S32 i_cluster = ap_manager.getStoredData(pj,0,
true)-1;
2898 PS::S32 j_group = ap_manager.getStoredData(pj,1,
true)-1;
2899 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2900 assert(n_member_in_group_[n_group_in_cluster_offset_[i_cluster]+j_group]==n_members);
2903 assert(j_group==j_group_recored+1);
2904 j_group_recored=j_group;
2906 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2908 PS::S64 id_cm = ap_manager.getCMID(pj);
2909 bool id_match_flag=
false;
2911 for (
PS::S32 k=0; k<n_members; k++) {
2912 PS::S32 kl = n_ptcl_in_cluster_disp_[i_cluster]+group_offset+k;
2913 auto& p_loc = ptcl_hard_.getPointer()[kl];
2915 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2916 assert(group_offset == n_member_in_group_offset_[n_group_in_cluster_offset_[i_cluster]+j_group]);
2917 assert(p_loc.getParticleCMAddress()==j+1);
2918 assert(p_loc.group_data.artificial.isMember());
2920 p_loc.setParticleCMAddress(pcm->adr_org);
2922 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2923 if(p_loc.id == id_cm) id_match_flag=
true;
2926 const PS::S64 p_glb_adr= p_loc.adr_org;
2928 auto& p_glb = _sys[p_glb_adr];
2929 p_glb.mass = p_loc.mass;
2930 p_glb.group_data= p_loc.group_data;
2931 p_glb.changeover= p_loc.changeover;
2932 p_glb.r_search = p_loc.r_search;
2936 n_group_member_remote_++;
2941 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2942 assert(id_match_flag);
2945 if(j_group==n_group_in_cluster_[i_cluster]-1) {
2949 else group_offset += n_members;
2951 assert(j_group<=n_group_in_cluster_[i_cluster]);
2954 adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i_cluster]+j_group] = ptcl_artificial_thread[i][j].adr_org;
2959 i_cluster_changeover_update_.resizeNoInitialize(0);
2960 for(
PS::S32 i=0; i<num_thread; i++) {
2961 for (
PS::S32 j=0; j<i_cluster_changeover_update_threads[i].size();j++)
2962 i_cluster_changeover_update_.push_back(i_cluster_changeover_update_threads[i][j]);
2964 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2965 if (i_cluster_changeover_update_.size()>0)
2966 std::cerr<<
"Changeover change cluster found: T="<<time_origin_<<
" n_cluster_change="<<i_cluster_changeover_update_.size()<<std::endl;
2987 #ifdef CLUSTER_VELOCITY
2992 template <
class Tsoft>
2993 void resetParticleGroupData(Tsoft& _ptcl_soft) {
2995 const PS::S32 n = ptcl_hard_.size();
2996 #pragma omp parallel for
2999 auto& pi_cm = ptcl_hard_[i].group_data.cm;
3000 pi_cm.mass = pi_cm.vel.x = pi_cm.vel.y = pi_cm.vel.z = 0.0;
3001 PS::S32 adr = ptcl_hard_[i].adr_org;
3005 _ptcl_soft[adr].group_data.cm = pi_cm;
3013 template <
class Tsoft>
3014 void setParticleGroupDataToCMData(Tsoft& _ptcl_soft) {
3017 const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
3018 #pragma omp parallel for schedule(dynamic)
3019 for(
PS::S32 i=0; i<n_cluster; i++){
3020 const PS::S32 adr_head = n_ptcl_in_cluster_disp_[i];
3021 const PS::S32 n_ptcl = n_ptcl_in_cluster_[i];
3022 const PS::S32 n_group = n_group_in_cluster_[i];
3023 PtclH4* ptcl_local = ptcl_hard_.getPointer(adr_head);
3025 PS::S32 n_group_offset_local = 0;
3027 for(
int k=0; k<n_group; k++) {
3028 PS::S32 n_group_in_cluster_offset_k = n_group_in_cluster_offset_[i]+k;
3029 PS::S32 ptcl_artificial_adr = adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_k];
3030 PS::S32 n_members = n_member_in_group_[n_group_in_cluster_offset_k];
3032 if (ptcl_artificial_adr>=0) {
3033 auto* pi = &(_ptcl_soft[ptcl_artificial_adr]);
3034 auto* pcm = ap_manager.getCMParticles(pi);
3035 PS::F64 pcm_mass = pcm->group_data.artificial.getMassBackup();
3036 #ifdef ARTIFICIAL_PARTICLE_DEBUG
3037 assert(n_members == ap_manager.getMemberN(pi));
3038 ap_manager.checkConsistence(&ptcl_local[n_group_offset_local], pi);
3040 for (
int j=n_group_offset_local; j<n_group_offset_local+n_members; j++) {
3041 ptcl_local[j].r_search =
std::max(pcm->r_search, ptcl_local[j].r_search);
3042 auto& pj_cm = ptcl_local[j].group_data.cm;
3043 pj_cm.mass = pcm_mass;
3044 pj_cm.vel.x = pcm->vel[0];
3045 pj_cm.vel.y = pcm->vel[1];
3046 pj_cm.vel.z = pcm->vel[2];
3047 PS::S32 adr = ptcl_local[j].adr_org;
3049 assert(ptcl_local[j].
id==_ptcl_soft[adr].
id);
3050 _ptcl_soft[adr].group_data.cm = pj_cm;
3051 _ptcl_soft[adr].r_search = ptcl_local[j].r_search;
3057 #ifdef ARTIFICIAL_PARTICLE_DEBUG
3059 assert(n_members == 2&&n_group==1);
3063 for (
int j=n_group_offset_local; j<n_group_offset_local+n_members; j++) {
3064 auto& pj = ptcl_local[j];
3066 vel_cm.x += pj.mass*pj.vel.x;
3067 vel_cm.y += pj.mass*pj.vel.y;
3068 vel_cm.z += pj.mass*pj.vel.z;
3071 for (
int j=n_group_offset_local; j<n_group_offset_local+n_members; j++) {
3072 auto& pj_cm = ptcl_local[j].group_data.cm;
3073 pj_cm.mass = mass_cm;
3075 PS::S32 adr = ptcl_local[j].adr_org;
3077 assert(ptcl_local[j].
id==_ptcl_soft[adr].
id);
3078 _ptcl_soft[adr].group_data.cm = pj_cm;
3082 n_group_offset_local += n_members;
3085 for (
int j=n_group_offset_local; j<n_ptcl; j++) {
3086 auto& pj_cm = ptcl_local[j].group_data.cm;
3087 pj_cm.mass = pj_cm.vel.x = pj_cm.vel.y = pj_cm.vel.z = 0.0;
3088 PS::S32 adr = ptcl_local[j].adr_org;
3089 if(adr>=0) _ptcl_soft[adr].group_data.cm = pj_cm;
3101 template <
class Tsys>
3103 const PS::ReallocatableArray<PS::S32>& _ptcl_list) {
3105 const PS::S32 n_ptcl = _ptcl_list.size();
3106 #pragma omp parallel for
3107 for (
int i=0; i<n_ptcl; i++) {
3108 const PS::S32 k =_ptcl_list[i];
3110 _sys[k].pot_tot += pot_cor;
3111 _sys[k].pot_soft += pot_cor;
3131 template <
class Tsys,
class Tpsoft,
class Ttree,
class Tepj>
3134 const PS::ReallocatableArray<PS::S32>& _adr_send,
3135 const bool _acorr_flag=
false) {
3136 const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
3137 const PtclH4* ptcl_local = ptcl_hard_.getPointer();
3139 #pragma omp parallel for schedule(dynamic)
3140 for (
int i=0; i<n_cluster; i++) {
3141 PS::S32 adr_real_start= n_ptcl_in_cluster_disp_[i];
3142 PS::S32 adr_real_end= n_ptcl_in_cluster_disp_[i+1];
3144 PS::S32 n_group = n_group_in_cluster_[i];
3146 const PS::S32* adr_first_ptcl_arti = &adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i]];
3149 correctForceWithCutoffArtificialOneClusterImp(_sys, ptcl_local, adr_real_start, adr_real_end, n_group, adr_first_ptcl_arti, _acorr_flag);
3152 for (
int j=adr_real_start; j<adr_real_end; j++) {
3153 PS::S64 adr = ptcl_local[j].adr_org;
3156 if(adr>=0) assert(_sys[adr].
id==ptcl_local[j].
id);
3158 if(adr>=0) correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[adr], _tree, _acorr_flag);
3162 const PS::S32 n_send = _adr_send.size();
3163 #pragma omp parallel for
3165 for (
int i=0; i<n_send; i++) {
3167 correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[adr], _tree, _acorr_flag);
3180 template <
class Tsys>
3185 const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
3187 const PtclH4* ptcl_local = ptcl_hard_.getPointer();
3188 #pragma omp parallel for schedule(dynamic)
3189 for (
int i=0; i<n_cluster; i++) {
3190 PS::S32 adr_real_start= n_ptcl_in_cluster_disp_[i];
3191 PS::S32 adr_real_end= n_ptcl_in_cluster_disp_[i+1];
3193 PS::S32 n_group = n_group_in_cluster_[i];
3195 const PS::S32* adr_first_ptcl_arti = n_group>0? &adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i]] : NULL;
3198 correctForceWithCutoffArtificialOneClusterImp(_sys, ptcl_hard_.getPointer(), adr_real_start, adr_real_end, n_group, adr_first_ptcl_arti, _acorr_flag);
3201 for (
int j=adr_real_start; j<adr_real_end; j++) {
3202 PS::S64 adr = ptcl_local[j].adr_org;
3204 assert(_sys[adr].
id==ptcl_local[j].
id);
3208 if (_sys[adr].group_data.artificial.isSingle()
3209 || (_sys[adr].group_data.artificial.isMember() && _sys[adr].getParticleCMAddress()<0)) {
3211 _sys[adr].pot_tot += pot_cor;
3212 _sys[adr].pot_soft += pot_cor;
3216 for (
int k=adr_real_start; k<adr_real_end; k++) {
3220 PS::S64 adr_k = ptcl_local[k].adr_org;
3221 calcAcorrShortWithLinearCutoff(_sys[adr], _sys[adr_k]);
3225 calcAccPotShortWithLinearCutoff(_sys[adr], ptcl_local[k]);
3229 for (
int k=0; k<n_group; k++) {
3231 PS::S32 k_start = adr_first_ptcl_arti[k];
3232 #ifdef ARTIFICIAL_PARTICLE_DEBUG
3233 if (k_start<0) assert(n_group==1&&adr_real_end-adr_real_start==2);
3235 if (k_start<0)
continue;
3236 auto* porb_k = ap_manager.getOrbitalParticles(&(_sys[k_start]));
3237 for (
int ki=0; ki<ap_manager.getOrbitalParticleN(); ki++) {
3240 calcAcorrShortWithLinearCutoff(_sys[adr], porb_k[ki]);
3243 calcAccPotShortWithLinearCutoff(_sys[adr], porb_k[ki]);
3261 template <
class Tsys,
class Ttree,
class Tepj>
3264 const PS::S32 n_cluster = i_cluster_changeover_update_.size();
3266 #pragma omp parallel for schedule(dynamic)
3267 for (
int i=0; i<n_cluster; i++) {
3268 PS::S32 i_cluster = i_cluster_changeover_update_[i];
3269 PS::S32 adr_real_start= n_ptcl_in_cluster_disp_[i_cluster];
3270 PS::S32 adr_real_end= n_ptcl_in_cluster_disp_[i_cluster+1];
3318 for (
int j=adr_real_start; j<adr_real_end; j++) {
3319 PS::S64 adr = ptcl_hard_[j].adr_org;
3321 bool change_i = _sys[adr].changeover.r_scale_next!=1.0;
3322 Tepj * ptcl_nb = NULL;
3323 PS::S32 n_ngb = _tree.getNeighborListOneParticle(_sys[adr], ptcl_nb);
3324 for(
PS::S32 k=0; k<n_ngb; k++){
3325 if (ptcl_nb[k].
id == _sys[adr].
id)
continue;
3327 if (ptcl_nb[k].r_scale_next!=1.0 || change_i)
3328 calcAccChangeOverCorrection(_sys[adr], ptcl_nb[k]);
3332 ptcl_hard_[j].changeover.updateWithRScale();
3333 if(adr>=0) _sys[adr].changeover.updateWithRScale();
3337 #pragma omp parallel for
3339 for (
int i=0; i<_n_send; i++) {
3341 bool change_i = _sys[adr].changeover.r_scale_next!=1.0;
3342 Tepj * ptcl_nb = NULL;
3343 PS::S32 n_ngb = _tree.getNeighborListOneParticle(_sys[adr], ptcl_nb);
3344 for(
PS::S32 k=0; k<n_ngb; k++){
3345 if (ptcl_nb[k].
id == _sys[adr].
id)
continue;
3347 if (ptcl_nb[k].r_scale_next!=1.0 || change_i)
3348 calcAccChangeOverCorrection(_sys[adr], ptcl_nb[k]);
3350 _sys[adr].changeover.updateWithRScale();
3365 template <
class Tsys,
class Tpsoft,
class Ttree,
class Tepj>
3368 const PS::S32 _adr_ptcl_artificial_start,
3370 const bool _acorr_flag=
false) {
3372 const PS::S32 n_tot = _sys.getNumberOfParticleLocal();
3374 #pragma omp parallel for schedule(dynamic)
3375 for (
int i=0; i<n_tot; i++) {
3376 correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[i], _tree, _acorr_flag);
3380 assert((n_tot-_adr_ptcl_artificial_start)%n_artificial==0);
3382 #pragma omp parallel for schedule(dynamic)
3383 for (
int i=_adr_ptcl_artificial_start; i<n_tot; i+=n_artificial)