PeTar
N-body code for collisional gravitational systems
hard.hpp
Go to the documentation of this file.
1 #pragma once
2 #ifdef USE_INTRINSIC_FOR_X86
3 #include<immintrin.h>
4 #endif
5 
6 #include"cstdlib"
7 #include <algorithm>
8 
9 #include"AR/symplectic_integrator.h"
10 #include"Hermite/hermite_integrator.h"
11 #include"Hermite/hermite_particle.h"
12 #include"hard_ptcl.hpp"
13 #include"soft_ptcl.hpp"
16 #include"hermite_perturber.hpp"
17 #include"ar_interaction.hpp"
18 #include"ar_perturber.hpp"
21 #include"stability.hpp"
22 
23 typedef H4::ParticleH4<PtclHard> PtclH4;
24 
27 public:
34  H4::HermiteManager<HermiteInteraction> h4_manager;
35  AR::TimeTransformedSymplecticManager<ARInteraction> ar_manager;
36 
39 
41  void setEpsSq(const PS::F64 _eps_sq) {
42  eps_sq = _eps_sq;
43  h4_manager.interaction.eps_sq = _eps_sq;
44  ar_manager.interaction.eps_sq = _eps_sq;
45  }
46 
50 #ifdef ORBIT_SAMPLING
51  ap_manager.orbit_manager.gravitational_constant = _g;
52 #endif
53  h4_manager.interaction.gravitational_constant = _g;
54  ar_manager.interaction.gravitational_constant = _g;
55 #ifdef BSE_BASE
56  ar_manager.interaction.tide.gravitational_constant = _g;
57 #endif
58  }
59 
61  void setDtRange(const PS::F64 _dt_max, const PS::S32 _dt_min_index) {
62  h4_manager.step.setDtRange(_dt_max, _dt_min_index);
63  ar_manager.time_step_min = h4_manager.step.getDtMin();
64  ar_manager.time_error_max = 0.25*ar_manager.time_step_min;
65  }
66 
68  bool checkParams() {
70  ASSERT(eps_sq>=0.0);
71  ASSERT(r_in_base>0.0);
72  ASSERT(r_out_base>0.0);
75  ASSERT(h4_manager.checkParams());
76  ASSERT(ar_manager.checkParams());
77  return true;
78  }
79 
81 
83  void writeBinary(FILE *_fp) {
84  size_t size = sizeof(*this) - sizeof(ap_manager) - sizeof(h4_manager) - sizeof(ar_manager);
85  fwrite(this, size, 1, _fp);
87  h4_manager.writeBinary(_fp);
88  ar_manager.writeBinary(_fp);
89  }
90 
92 
95  void readBinary(FILE *_fin, int _version=0) {
96  size_t size = sizeof(*this) - sizeof(ap_manager) - sizeof(h4_manager) - sizeof(ar_manager);
97  size_t rcount = fread(this, size, 1, _fin);
98  if (rcount<1) {
99  std::cerr<<"Error: Data reading fails! requiring data number is 1, only obtain "<<rcount<<".\n";
100  abort();
101  }
102  ap_manager.readBinary(_fin);
103  h4_manager.readBinary(_fin);
104  ar_manager.readBinary(_fin, _version);
105  }
106 
108  void print(std::ostream & _fout) const{
109  _fout<<"energy_error_max : "<<energy_error_max<<std::endl
110  <<"eps_sq : "<<eps_sq<<std::endl
111  <<"r_in_base : "<<r_in_base<<std::endl
112  <<"r_out_base : "<<r_out_base<<std::endl;
113  ap_manager.print(_fout);
114  h4_manager.print(_fout);
115  ar_manager.print(_fout);
116  }
117 };
118 
119 struct HardEnergy{
120  PS::F64 de; // energy error
121  PS::F64 de_change_cum; // cumulative energy change
122  PS::F64 de_change_binary_interrupt; // cumulative energy change due to interruption
123  PS::F64 de_change_modify_single; // cumulative energy change due to modification of one particle
124  PS::F64 de_sd; // slowdown energy error
125  PS::F64 de_sd_change_cum; // cumulative Etot_SD change due to the change of slowdown factor and interruption
126  PS::F64 de_sd_change_binary_interrupt; // cumulative Etot_SD change due to interruption
127  PS::F64 de_sd_change_modify_single; // cumulative Etot_SD change due to modfication of one particle
128  PS::F64 ekin_sd_correction; // correction from Ekin to Etot_sd
129  PS::F64 epot_sd_correction; // correction from Epot to Etot_sd
130 
132 
133  void clear() {
136  }
137 
140  }
141 
142  HardEnergy& operator +=(const HardEnergy& _energy) {
143  de += _energy.de;
144  de_change_cum += _energy.de_change_cum;
147  de_sd += _energy.de_sd;
153  return *this;
154  }
155 
156 };
157 
160 public:
161  H4::HermiteIntegrator<PtclHard, PtclH4, HermitePerturber, ARPerturber, HermiteInteraction, ARInteraction, HermiteInformation> h4_int;
162  AR::TimeTransformedSymplecticIntegrator<PtclHard, PtclH4, ARPerturber, ARInteraction, H4::ARInformation<PtclHard>> sym_int;
164  PS::ReallocatableArray<TidalTensor> tidal_tensor;
167 
168  AR::InterruptBinary<PtclHard> interrupt_binary;
169 
170 #ifdef HARD_DEBUG_PRINT
171  PS::ReallocatableArray<PS::S32> n_group_sub_init;
172  PS::S32 n_group_sub_tot_init;
173 #endif
174 
175 #ifdef PROFILE
176  PS::S64 ARC_substep_sum;
177  PS::S64 ARC_tsyn_step_sum;
178  PS::S64 H4_step_sum;
179 #endif
180 
181 #ifdef HARD_COUNT_NO_NEIGHBOR
182  PS::ReallocatableArray<bool> table_neighbor_exist;
183  PS::S32 n_neighbor_zero;
184 #endif
185 
186  bool use_sym_int;
188 
189 #ifdef HARD_CHECK_ENERGY
190  HardEnergy energy;
191 #endif
192 
195  interrupt_binary(),
196 #ifdef HARD_DEBUG_PRINT
197  n_group_sub_init(), n_group_sub_tot_init(0),
198 #endif
199 #ifdef PROFILE
200  ARC_substep_sum(0), ARC_tsyn_step_sum(0), H4_step_sum(0),
201 #endif
202 #ifdef HARD_COUNT_NO_NEIGHBOR
203  table_neighbor_exist(), n_neighbor_zero(0),
204 #endif
205  use_sym_int(true), is_initialized(false) {
206 #ifdef HARD_CHECK_ENERGY
207  energy.clear();
208 #endif
209  }
210 
212  bool checkParams() {
213  ASSERT(manager!=NULL);
214  ASSERT(ptcl_origin!=NULL);
215  ASSERT(time_origin>=0.0);
217  return true;
218  }
219 
221 
230  template <class Tsoft>
231  void initial(PtclH4 * _ptcl,
232  const PS::S32 _n_ptcl,
233  Tsoft* _ptcl_artificial,
234  const PS::S32 _n_group,
235  const PS::S32* _n_member_in_group,
236  HardManager* _manager,
237  const PS::F64 _time_origin) {
238 
239  // ensure the integrator is not used
240  ASSERT(ptcl_origin==NULL);
241  ASSERT(manager==NULL);
243 
244  is_initialized = true;
245 
246  // set manager
247  manager = _manager;
248  time_origin = _time_origin;
249  ptcl_origin = _ptcl;
250 
251  //for (int i=0; i<_n_ptcl; i++) {
252  // if (ptcl_origin[i].id==301) {
253  // DATADUMP((std::string("dump_301")+std::to_string(_time_origin)).c_str());
254  // }
255  //}
256 
257 //#ifdef HARD_DEBUG
258  if (_n_ptcl>400) {
259  std::cerr<<"Large cluster, n_ptcl="<<_n_ptcl<<" n_group="<<_n_group<<std::endl;
260  std::pair<PS::S32,PS::F64> r_search_max={-1,0.0};
261  for (PS::S32 i=0; i<_n_ptcl; i++) {
262  if(ptcl_origin[i].r_search>r_search_max.second) {
263  r_search_max.first = i;
264  r_search_max.second = ptcl_origin[i].r_search;
265  }
266  }
267  std::cerr<<"Maximum rsearch particle i = "<<r_search_max.first<<" ";
268  ptcl_origin[r_search_max.first].print(std::cerr);
269  std::cerr<<std::endl;
270  }
271 //#endif
272 
273  // prepare initial groups with artificial particles
274  PS::S32 adr_first_ptcl[_n_group+1];
275  PS::S32 n_group_offset[_n_group+1]; // ptcl member offset in ptcl_origin
276  n_group_offset[0] = 0;
277  for(int i=0; i<_n_group; i++)
278  n_group_offset[i+1] = n_group_offset[i] + _n_member_in_group[i];
279 
280  auto& ap_manager = manager->ap_manager;
281  if (_ptcl_artificial!=NULL) {
282  for(int i=0; i<_n_group; i++) {
283  adr_first_ptcl[i] = i*ap_manager.getArtificialParticleN();
284  auto* pi = &(_ptcl_artificial[adr_first_ptcl[i]]);
285 #ifdef HARD_DEBUG
286  ASSERT(ap_manager.getMemberN(pi)==_n_member_in_group[i]);
287 #endif
288  // pre-process for c.m. particle
289  auto* pcm = ap_manager.getCMParticles(pi);
290  // recover mass
291  pcm->mass = pcm->group_data.artificial.getMassBackup();
292 
293 #ifdef ARTIFICIAL_PARTICLE_DEBUG
294  ap_manager.checkConsistence(&ptcl_origin[n_group_offset[i]], &(_ptcl_artificial[adr_first_ptcl[i]]));
295 #endif
296  }
297 
298 #ifdef HARD_DEBUG
299  if(_n_group>0) {
300  if(n_group_offset[_n_group]<_n_ptcl)
301  ASSERT(ptcl_origin[n_group_offset[_n_group]].group_data.artificial.isSingle());
302  if (_ptcl_artificial!=NULL) assert(ptcl_origin[n_group_offset[_n_group]-1].group_data.artificial.isMember());
303  else ASSERT(ptcl_origin[n_group_offset[_n_group]-1].group_data.artificial.isSingle());
304  }
305 #endif
306  }
307 #ifdef HARD_DEBUG
308  else if(_n_group>0) ASSERT(_n_ptcl==2); // right now only support isolated binary case without artificial particles
309 #endif
310 
311  // single particle start index in ptcl_origin
312  PS::S32 i_single_start = n_group_offset[_n_group];
313  // number of single particles
314  PS::S32 n_single_init = _n_ptcl - i_single_start;
315 #ifdef HARD_DEBUG
316  ASSERT(n_single_init>=0);
317 #endif
318 
319 #ifdef HARD_DEBUG
320  // check member consistence
321  for(int i=0; i<i_single_start; i++) {
322  ASSERT(ptcl_origin[i].group_data.artificial.isMember());
323  ASSERT(ptcl_origin[i].mass>0);
324  }
325 #endif
326 
327 #ifdef HARD_DEBUG_PRINT
328  std::cerr<<"Hard: n_ptcl: "<<_n_ptcl<<" n_group: "<<_n_group<<std::endl;
329 #endif
330 
331  // manager
332  auto& h4_manager = manager->h4_manager;
333  auto& ar_manager = manager->ar_manager;
334 
335  // Only one group with all particles in group
336  if(_n_group==1&&n_single_init==0) {
337  use_sym_int = true;
338 
339  sym_int.manager = &ar_manager;
340 
341  sym_int.particles.setMode(COMM::ListMode::copy);
342  const PS::S32 n_members = _n_member_in_group[0];
343 #ifdef HARD_DEBUG
344  ASSERT(n_members == _n_ptcl);
345 #endif
346  sym_int.particles.reserveMem(n_members);
347  sym_int.info.reserveMem(n_members);
348  //sym_int.perturber.r_crit_sq = h4_manager->r_neighbor_crit*h4_manager->r_neighbor_crit;
349  for (PS::S32 i=0; i<n_members; i++) {
350  sym_int.particles.addMemberAndAddress(ptcl_origin[i]);
351  sym_int.info.particle_index.addMember(i);
352  sym_int.info.r_break_crit = std::max(sym_int.info.r_break_crit,ptcl_origin[i].getRGroup());
353  Float r_neighbor_crit = ptcl_origin[i].getRNeighbor();
354  sym_int.perturber.r_neighbor_crit_sq = std::max(sym_int.perturber.r_neighbor_crit_sq, r_neighbor_crit*r_neighbor_crit);
355  }
356  sym_int.reserveIntegratorMem();
357  sym_int.info.generateBinaryTree(sym_int.particles, ar_manager.interaction.gravitational_constant);
358 #ifdef SOFT_PERT
359  if (_ptcl_artificial!=NULL) {
360  Tsoft* api=&(_ptcl_artificial[adr_first_ptcl[0]]);
361  auto* apcm = ap_manager.getCMParticles(api);
362  auto* aptt = ap_manager.getTidalTensorParticles(api);
363 
364  tidal_tensor.resizeNoInitialize(1);
365  auto& tt = tidal_tensor[0];
366  tt.fit(aptt, *apcm, ap_manager.r_tidal_tensor);
367  sym_int.perturber.soft_pert=&tt;
368  // set tt group id to the total number of particles
369  sym_int.perturber.soft_pert->group_id = n_members;
370  }
371 #endif
372  // calculate soft_pert_min
373  sym_int.perturber.calcSoftPertMin(sym_int.info.getBinaryTreeRoot(), ar_manager.interaction.gravitational_constant);
374 
375  // initialization
376  sym_int.initialIntegration(0.0);
377  sym_int.info.time_offset = time_origin;
378  sym_int.info.calcDsAndStepOption(ar_manager.step.getOrder(), ar_manager.interaction.gravitational_constant, ar_manager.ds_scale);
379 
380  // calculate c.m. changeover
381  auto& pcm = sym_int.particles.cm;
382  PS::F64 m_fac = pcm.mass*Ptcl::mean_mass_inv;
383  pcm.changeover.setR(m_fac, manager->r_in_base, manager->r_out_base);
384 
385 #ifdef HARD_DEBUG
386  if(_ptcl_artificial==NULL) {
387  PS::F64 period = sym_int.info.getBinaryTreeRoot().period;
388  PS::F64 sd_factor = sym_int.info.getBinaryTreeRoot().slowdown.getSlowDownFactor();
389  PS::F64 sd_tmax = manager->ar_manager.slowdown_timescale_max;
390  if (1.01*sd_factor*period<=sd_tmax) {
391  std::cerr<<"Warning: isolated binary SD ("<<sd_factor<<") * period ("<<period<<") = "<<period*sd_factor<<"< SD_timescale_max = dt_tree*n_step_per_orbit ("<<sd_tmax<<")"<<std::endl;
392  }
393  }
394 #endif
395  //check paramters
396  ASSERT(sym_int.info.checkParams());
397  ASSERT(sym_int.perturber.checkParams());
398 
399 #ifdef ADJUST_GROUP_PRINT
400  if (manager->h4_manager.adjust_group_write_flag) {
401  // print new group information
402  sym_int.printGroupInfo(0, manager->h4_manager.fgroup, WRITE_WIDTH, &pcm);
403  }
404 #endif
405 
406  }
407  else { // use hermite
408  use_sym_int = false;
409 
410  h4_int.manager = &h4_manager;
411  h4_int.ar_manager = &ar_manager;
412 
413  h4_int.particles.setMode(COMM::ListMode::link);
414  h4_int.particles.linkMemberArray(ptcl_origin, _n_ptcl);
415 
416  h4_int.step = h4_manager.step;
417  PS::F64 mmax = 0.0;
418  for (int k=0; k<_n_ptcl; k++) mmax=std::max(mmax, ptcl_origin[k].mass);
419  h4_int.step.eta_4th /= std::pow(mmax*Ptcl::mean_mass_inv, 0.25);
420 
421  h4_int.particles.calcCenterOfMass();
422  h4_int.particles.shiftToCenterOfMassFrame();
423 
424  PS::S32 n_group_size_max = _n_ptcl+_n_group;
425  h4_int.groups.setMode(COMM::ListMode::local);
426  h4_int.groups.reserveMem(n_group_size_max);
427  h4_int.reserveIntegratorMem();
428 
429  // initial system
430  h4_int.initialSystemSingle(0.0);
431  h4_int.setTimeOffset(time_origin);
432 
433 #ifdef SOFT_PERT
434  // Tidal tensor
435  tidal_tensor.resizeNoInitialize(_n_group+1);
436 #endif
437 #ifdef HARD_COUNT_NO_NEIGHBOR
438  table_neighbor_exist.resizeNoInitialize(_n_ptcl);
439  for (int k=0; k<_n_ptcl; k++) table_neighbor_exist[k] = false;
440 #endif
441 
442  // add groups
443  if (_n_group>0) {
444  ASSERT(n_group_offset[_n_group]>0);
445  PS::S32 ptcl_index_group[n_group_offset[_n_group]];
446  for (PS::S32 i=0; i<n_group_offset[_n_group]; i++) ptcl_index_group[i] = i;
447  h4_int.addGroups(ptcl_index_group, n_group_offset, _n_group);
448 
449  for (PS::S32 i=0; i<_n_group; i++) {
450  auto& groupi = h4_int.groups[i];
451 
452 #ifdef SOFT_PERT
453  auto* api = &(_ptcl_artificial[adr_first_ptcl[i]]);
454  auto* apcm = ap_manager.getCMParticles(api);
455  auto* aptt = ap_manager.getTidalTensorParticles(api);
456 
457  // correct pos for t.t. cm
458  apcm->pos -= h4_int.particles.cm.pos;
459 
460  // fit tidal tensor
461  tidal_tensor[i].fit(aptt, *apcm, ap_manager.r_tidal_tensor);
462 
463  // set tidal_tensor pointer
464  groupi.perturber.soft_pert = &tidal_tensor[i];
465 
466  // set group id of tidal tensor to the n_members
467  groupi.perturber.soft_pert->group_id = groupi.particles.getSize();
468 
469  // same tidal_tensor id to member particle group_data for identification later
470  for (PS::S32 k=0; k<groupi.particles.getSize(); k++) {
471  groupi.particles[k].setTidalTensorID(i+1);
472  ptcl_origin[n_group_offset[i]+k].setTidalTensorID(i+1);
473  }
474 #endif
475  // calculate soft_pert_min
476  groupi.perturber.calcSoftPertMin(groupi.info.getBinaryTreeRoot(), ar_manager.interaction.gravitational_constant);
477 
478  // calculate c.m. changeover
479  auto& pcm = groupi.particles.cm;
480  PS::F64 m_fac = pcm.mass*Ptcl::mean_mass_inv;
481 
482  ASSERT(m_fac>0.0);
483  pcm.changeover.setR(m_fac, manager->r_in_base, manager->r_out_base);
484 
485 #ifdef HARD_DEBUG
486  PS::F64 r_out_cm = pcm.changeover.getRout();
487  for (PS::S32 k=0; k<groupi.particles.getSize(); k++)
488  ASSERT(abs(groupi.particles[k].changeover.getRout()-r_out_cm)<1e-10);
489 #endif
490  }
491  }
492 
493  // initialization
494  h4_int.initialIntegration(); // get neighbors and min particles
495 
496 #ifdef HARD_DEBUG_PRINT
497  // AR inner slowdown number
498  n_group_sub_init.resizeNoInitialize(_n_group);
499  n_group_sub_tot_init=0;
500  for (int i=0; i<_n_group; i++) {
501 #ifdef AR_SLOWDOWN_ARRAY
502  n_group_sub_init[i] = h4_int.groups[i].binary_slowdown.getSize();
503 #else
504  n_group_sub_init[i] = h4_int.groups[i].info.binarytree.getSize();
505 #endif
506  n_group_sub_tot_init += n_group_sub_init[i];
507  }
508 #endif
509 
510  h4_int.adjustGroups(true);
511 
512  const PS::S32 n_init = h4_int.getNInitGroup();
513  const PS::S32* group_index = h4_int.getSortDtIndexGroup();
514  for(int i=0; i<n_init; i++) {
515  auto& groupi = h4_int.groups[group_index[i]];
516  // calculate c.m. changeover
517  auto& pcm = groupi.particles.cm;
518  PS::F64 m_fac = pcm.mass*Ptcl::mean_mass_inv;
519  ASSERT(m_fac>0.0);
520  pcm.changeover.setR(m_fac, manager->r_in_base, manager->r_out_base);
521 
522 /* It is not consistent to use tidal tensor for different group
523 #ifdef SOFT_PERT
524  // check whether all r_out are same (primoridal or not)
525  bool primordial_flag = true;
526  PS::F64 r_out_cm = groupi.particles.cm.changeover.getRout();
527  for (PS::S32 k=0; k<groupi.particles.getSize(); k++)
528  if (abs(groupi.particles[k].changeover.getRout()-r_out_cm)>1e-10) {
529  primordial_flag =false;
530  break;
531  }
532  if (_n_group>0 && primordial_flag) {
533  // check closed tt and only find consistent changeover
534  PS::F32 tt_index=groupi.perturber.findCloseSoftPert(tidal_tensor, n_tt, n_group_size_max, groupi.particles.cm, r_out_cm);
535  ASSERT(tt_index<n_tt);
536  // calculate soft_pert_min
537  if (tt_index>=0)
538  groupi.perturber.calcSoftPertMin(groupi.info.getBinaryTreeRoot(), ar_manager.interaction.gravitational_constant);
539 #ifdef HARD_DEBUG_PRINT
540  std::cerr<<"Find tidal tensor, group i: "<<group_index[i]<<" pcm.r_out: "<<r_out_cm;
541  std::cerr<<" member.r_out: ";
542  for (PS::S32 k=0; k<groupi.particles.getSize(); k++)
543  std::cerr<<groupi.particles[k].changeover.getRout()<<" ";
544  std::cerr<<" tidal tensor index: "<<tt_index;
545  std::cerr<<std::endl;
546 #endif
547  tt_index=0;
548  }
549 #endif
550 */
551  }
552 
553  h4_int.initialIntegration();
554  h4_int.sortDtAndSelectActParticle();
555  h4_int.info.time_origin = h4_int.getTime();
556 
557 #ifdef HARD_CHECK_ENERGY
558  h4_int.calcEnergySlowDown(true);
559 #endif
560 
561 #ifdef HARD_DEBUG_PRINT_TITLE
562  h4_int.printColumnTitle(std::cout, WRITE_WIDTH, n_group_sub_init.getPointer(), n_group_sub_init.size(), n_group_sub_tot_init);
563  std::cout<<std::endl;
564 #endif
565 #ifdef HARD_DEBUG_PRINT
566  h4_int.printColumn(std::cout, WRITE_WIDTH, n_group_sub_init.getPointer(), n_group_sub_init.size(), n_group_sub_tot_init);
567  std::cout<<std::endl;
568 #endif
569  }
570  }
571 
573 
577  AR::InterruptBinary<PtclHard>& integrateToTime(const PS::F64 _time_end) {
578  ASSERT(checkParams());
579 
580 #ifdef HARD_DUMP
581  // dump flag
582  bool dump_flag=false;
583 #endif
584  // integration
585  if (use_sym_int) {
586  interrupt_binary = sym_int.integrateToTime(_time_end);
587 #ifdef ADJUST_GROUP_PRINT
588  if (manager->h4_manager.adjust_group_write_flag) {
589  // print new group information
590  sym_int.printGroupInfo(1, manager->h4_manager.fgroup, WRITE_WIDTH, &(sym_int.particles.cm));
591  }
592 #endif
593 
594  if (manager->ar_manager.interrupt_detection_option==1) interrupt_binary.clear();
595 #ifdef PROFILE
596 #ifdef HARD_DUMP
597  if (sym_int.profile.step_count>manager->ar_manager.step_count_max&&!dump_flag) {
598  std::cerr<<"Large isolated AR step cluster found (dump): step: "<<sym_int.profile.step_count<<std::endl;
599  DATADUMP("dump_large_step");
600  dump_flag=true;
601  }
602 #endif
603 #endif
604  }
605  else {
606 #ifdef SOFT_PERT
607  PS::S32 n_tt = tidal_tensor.size();
608 #endif
609  // integration loop
610  while (h4_int.getTimeInt()<_time_end) {
611  // integrate groups
612  interrupt_binary = h4_int.integrateGroupsOneStep();
613  if (interrupt_binary.status!=AR::InterruptStatus::none) {
614  if (manager->ar_manager.interrupt_detection_option==1) {
615  // correct potential difference due to the changeover function
616  /*
617  auto& pi=interrupt_binary.particle_bk[0];
618  auto& pj=interrupt_binary.particle_bk[1];
619  PtclHard ptmpi(pi), ptmpj(pj);
620  PS::F64 r_in_max = std::max(ptmpi.changeover.getRin(),ptmpj.changeover.getRin());
621  ptmpi.pos=PS::F64vec(r_in_max,0.0,0.0);
622  ptmpj.pos=PS::F64vec(0.0,0.0,0.0);
623  H4::ForceH4 fi;
624  Float dpot = manager->h4_manager.interaction.calcEnergyPotSingleSingle(ptmpi,ptmpj);
625  Float epotij=0.0;
626  AR::Force f1,f2;
627  manager->ar_manager.interaction.calcInnerAccPotAndGTKickInvTwo(f1,f2,epotij,ptmpi,ptmpj);
628  dpot -= epotij;
629  energy.de_sd += dpot;
630  energy.de += dpot;
631  */
632  //for (int k=0; k<2; k++) correctSoftPotMassChangeOneParticle(*interrupt_binary.adr->getMember(k));
633  interrupt_binary.clear();
634  }
635  // when binary is interrupted, break integration loop
636  else break;
637  }
638 
639 #ifdef HARD_COUNT_NO_NEIGHBOR
640  checkNeighborExist();
641 #endif
642 
643  // integrate singles
644  h4_int.integrateSingleOneStepAct();
645  h4_int.adjustGroups(false);
646 
647  const PS::S32 n_init_group = h4_int.getNInitGroup();
648  const PS::S32* group_index = h4_int.getSortDtIndexGroup();
649  for(int i=0; i<n_init_group; i++) {
650  auto& groupi = h4_int.groups[group_index[i]];
651  // calculate c.m. changeover
652  auto& pcm = groupi.particles.cm;
653  PS::F64 m_fac = pcm.mass*Ptcl::mean_mass_inv;
654  ASSERT(m_fac>0.0);
655  pcm.changeover.setR(m_fac, manager->r_in_base, manager->r_out_base);
656 
657 
658 #ifdef SOFT_PERT
659  // find corresponding tidal tensor if exist
660  PS::S32 tt_id_member = groupi.particles[0].getTidalTensorID();
661 #ifdef HARD_DEBUG
662  ASSERT(tt_id_member>=0&&tt_id_member<n_tt);
663 #endif
664  if (tt_id_member>0&&tt_id_member<n_tt) {
665  PS::S32 n_members = groupi.particles.getSize();
666  TidalTensor* tidal_tensor_i = &tidal_tensor[tt_id_member-1];
667 
668  // check whether n member is consistent
669  if (int(tidal_tensor_i->group_id) == n_members) {
670  // check member group_data to find whether all member has the same tidal tensor id
671  bool tt_consistent = true;
672  for (PS::S32 k=1; k<n_members; k++) {
673  if (tt_id_member != groupi.particles[k].getTidalTensorID()) {
674  tt_consistent = false;
675  break;
676  }
677  }
678 
679  // if all match, set group tidal tensor
680  if (tt_consistent) groupi.perturber.soft_pert = tidal_tensor_i;
681  }
682  }
683 
684  /* not used anymore
685  // check whether all r_out are same (primoridal or not)
686  bool primordial_flag = true;
687  PS::F64 r_out_cm = groupi.particles.cm.changeover.getRout();
688  for (PS::S32 k=0; k<groupi.particles.getSize(); k++)
689  if (abs(groupi.particles[k].changeover.getRout()-r_out_cm)>1e-10) {
690  primordial_flag =false;
691  break;
692  }
693 
694  if (n_tt>0 && primordial_flag) {
695  // check closed tt and only find consistent changeover
696  PS::F32 tt_index=groupi.perturber.findCloseSoftPert(tidal_tensor, n_tt, n_group_size_max, groupi.particles.cm, r_out_cm);
697  ASSERT(tt_index<n_tt);
698  // calculate soft_pert_min
699  if (tt_index>=0)
700  groupi.perturber.calcSoftPertMin(groupi.info.getBinaryTreeRoot(),ar_manager.interaction.gravitational_constant);
701 #ifdef HARD_DEBUG_PRINT
702  std::cerr<<"Find tidal tensor, group i: "<<group_index[i]<<" pcm.r_out: "<<groupi.particles.cm.changeover.getRout();
703  std::cerr<<" member.r_out: ";
704  for (PS::S32 k=0; k<groupi.particles.getSize(); k++)
705  std::cerr<<groupi.particles[k].changeover.getRout()<<" ";
706  std::cerr<<" tidal tensor index: "<<tt_index;
707  std::cerr<<std::endl;
708 #endif
709 
710  }
711  */
712 #endif
713  }
714  ASSERT(n_init_group<=h4_int.getNActGroup());
715 
716 /* Not consistent, should not shift
717 #ifdef SOFT_PERT
718  // update c.m. for Tidal tensor
719  if (n_tt>0) {
720  for(int i=n_init_group; i<n_act_group; i++) {
721  auto& groupi = h4_int.groups[group_index[i]];
722  if (groupi.perturber.soft_pert!=NULL)
723  groupi.perturber.soft_pert->shiftCM(groupi.particles.cm.pos);
724  }
725  }
726 #endif
727 */
728  // initial after groups are modified
729  h4_int.initialIntegration();
730  h4_int.modifySingleParticles();
731  h4_int.sortDtAndSelectActParticle();
732  h4_int.info.time_origin = h4_int.getTime();
733 
734 #ifdef PROFILE
735 #ifdef HARD_DUMP
736  if (h4_int.profile.ar_step_count>manager->ar_manager.step_count_max&&!dump_flag) {
737  std::cerr<<"Large H4-AR step cluster found (dump): step: "<<h4_int.profile.ar_step_count<<std::endl;
738  DATADUMP("dump_large_step");
739  dump_flag=true;
740  }
741 #endif
742 #endif
743 
744 #ifdef HARD_DEBUG_PRINT
745  //PS::F64 dt_max = 0.0;
746  //PS::S32 n_group = h4_int.getNGroup();
747  //PS::S32 n_single = h4_int.getNSingle();
748  //if (n_group>0) dt_max = h4_int.groups[h4_int.getSortDtIndexGroup()[n_group-1]].particles.cm.dt;
749  //if (n_single>0) dt_max = std::max(dt_max, h4_int.particles[h4_int.getSortDtIndexSingle()[n_single-1]].dt);
750  //ASSERT(dt_max>0.0);
751  auto& h4_manager = manager->h4_manager;
752  if (fmod(h4_int.getTimeInt(), h4_manager.step.getDtMax()/HARD_DEBUG_PRINT_FEQ)==0.0) {
753  h4_int.calcEnergySlowDown(false);
754 
755  h4_int.printColumn(std::cout, WRITE_WIDTH, n_group_sub_init.getPointer(), n_group_sub_init.size(), n_group_sub_tot_init);
756  std::cout<<std::endl;
757 
758  PS::F64 de_sd = h4_int.getEnergyErrorSlowDown();
759  if (abs(de_sd/std::max(1.0,abs(h4_int.getEtotSlowDownRef()))) > manager->energy_error_max) {
760  std::cerr<<"Hard energy significant!"
761  <<" dE_SD/Etot_SD "<<de_sd/h4_int.getEtotSlowDownRef()
762  <<" Ekin: "<<h4_int.getEkin()
763  <<" Epot: "<<h4_int.getEpot()
764  <<" Ekin_SD: "<<h4_int.getEkinSlowDown()
765  <<" Epot_SD: "<<h4_int.getEpotSlowDown()
766  <<" Etot_SD_ref: "<<h4_int.getEtotSlowDownRef()
767  <<" dE: "<<h4_int.getEnergyError()
768  <<" dE_change: "<<h4_int.getDEChangeCum()
769  <<" dE_change_binary: "<<h4_int.getDEChangeBinaryInterrupt()
770  <<" dE_change_single: "<<h4_int.getDEChangeModifySingle()
771  <<" dE_SD: "<<de_sd
772  <<" dE_SD_change: "<<h4_int.getDESlowDownChangeCum()
773  <<" dE_SD_change_binary: "<<h4_int.getDESlowDownChangeBinaryInterrupt()
774  <<" dE_SD_change_single: "<<h4_int.getDESlowDownChangeModifySingle()
775  <<std::endl;
776 #ifdef HARD_DUMP
777  DATADUMP("hard_dump");
778 #endif
779  }
781  }
782  if (fmod(h4_int.getTimeInt(), h4_manager.step.getDtMax())==0.0) {
783  h4_int.printStepHist();
784  }
785 #endif
786  }
787 
788  }
789  return interrupt_binary;
790  }
791 
792 #ifdef HARD_COUNT_NO_NEIGHBOR
793  void checkNeighborExist() {
795  PS::S32 index_offset_group = h4_int.getIndexOffsetGroup();
796  PS::S32 n_act_single = h4_int.getNActSingle();
797  PS::S32* act_single_index = h4_int.getSortDtIndexSingle();
798  for (int k=0; k<n_act_single; k++) {
799  PS::S32 i = act_single_index[k];
800  ASSERT(i>=0&&i<h4_int.particles.getSize());
801  PS::S32 j = h4_int.neighbors[i].r_min_index;
802  ASSERT(j<h4_int.particles.getSizeMax()+h4_int.groups.getSize());
803  if (j<0) continue;
804  PS::F64 rij2 = h4_int.neighbors[i].r_min_sq;
805  PS::F64 r_out_i = h4_int.particles[i].changeover.getRout();
806  PS::F64 r_out_j = (j<index_offset_group)? h4_int.particles[j].changeover.getRout() : h4_int.groups[j-index_offset_group].particles.cm.changeover.getRout();
807  if (rij2<std::max(r_out_i, r_out_j)) table_neighbor_exist[i] = true;
808  }
809  PS::S32 n_act_group = h4_int.getNActGroup();
810  PS::S32* act_group_index = h4_int.getSortDtIndexGroup();
811  for (int k=0; k<n_act_group; k++) {
812  PS::S32 i = act_group_index[k];
813  ASSERT(i>=0&&i<h4_int.particles.getSizeMax()+h4_int.groups.getSize());
814  PS::S32 j = h4_int.groups[i].perturber.r_min_index;
815  ASSERT(j<h4_int.particles.getSizeMax()+h4_int.groups.getSize());
816  if (j<0) continue;
817  PS::F64 rij2 = h4_int.groups[i].perturber.r_min_sq;
818  PS::F64 r_out_i = h4_int.groups[i].particles.cm.changeover.getRout();
819  PS::F64 r_out_j = (j<index_offset_group)? h4_int.particles[j].changeover.getRout() : h4_int.groups[j-index_offset_group].particles.cm.changeover.getRout();
820  if (rij2<std::max(r_out_i, r_out_j)) {
821  for (int ki=0; ki<h4_int.groups[i].particles.getSize(); ki++) {
822  PS::S32 ki_index = h4_int.groups[i].info.particle_index[ki];
823  ASSERT(ki_index>=0&&ki_index<table_neighbor_exist.size());
824  table_neighbor_exist[ki_index] = true;
825  }
826  }
827  }
828  }
829 #endif
830 
832 
836  ASSERT(checkParams());
837 #ifdef HARD_CHECK_ENERGY
838  PS::F64 ekin, epot, ekin_sd, epot_sd;
839 #endif
840  if (use_sym_int) {
841  auto& pcm = sym_int.particles.cm;
842  pcm.pos += pcm.vel * _time_end;
843 
844  // update rsearch
845  ASSERT(!std::isinf(pcm.vel[0]));
846  ASSERT(!std::isnan(pcm.vel[0]));
847  pcm.Ptcl::calcRSearch(_time_end);
848  // copyback
849 
850 #ifdef HARD_CHECK_ENERGY
851  // correct cm kinetic energy
852  auto& bink = sym_int.info.getBinaryTreeRoot();
853  auto& vcm = pcm.vel;
854  Float dm = bink.mass - pcm.mass;
855  Float de_kin = 0.5*dm*(vcm[0]*vcm[0]+vcm[1]*vcm[1]+vcm[2]*vcm[2]);
856  auto& vbin = bink.vel;
857  de_kin += bink.mass*(vbin[0]*vcm[0]+vbin[1]*vcm[1]+vbin[2]*vcm[2]);
858 #endif
859  sym_int.particles.shiftToOriginFrame();
860  sym_int.particles.template writeBackMemberAll<PtclH4>();
861 
862  PS::S32 n_members = sym_int.particles.getSize();
863 
864  for (PS::S32 i=0; i<n_members; i++) {
865  auto& pi = ptcl_origin[i];
866 #ifdef STELLAR_EVOLUTION
867  if (pi.mass==0.0) {
868  ASSERT(pi.group_data.artificial.isUnused());
869  continue;
870  }
871 
872  // shift time interrupt in order to get consistent time for stellar evolution in the next drift
873  //pi.time_record -= _time_end;
874  //pi.time_interrupt -= _time_end;
875 #endif
876 
877  pi.r_search = std::max(pcm.r_search, pi.r_search);
878 #ifdef CLUSTER_VELOCITY
879  pi.group_data.cm.mass = pcm.mass;
880  pi.group_data.cm.vel[0] = pcm.vel[0];
881  pi.group_data.cm.vel[1] = pcm.vel[1];
882  pi.group_data.cm.vel[2] = pcm.vel[2];
883 #endif
884  ASSERT(!std::isinf(pi.pos[0]));
885  ASSERT(!std::isnan(pi.pos[0]));
886  ASSERT(!std::isinf(pi.vel[0]));
887  ASSERT(!std::isnan(pi.vel[0]));
888 
889 #ifdef HARD_DEBUG
890  ASSERT(ptcl_origin[i].r_search>ptcl_origin[i].changeover.getRout());
891 #ifdef BSE_BASE
892  ASSERT(pi.star.tphys<=time_origin+_time_end);
893 #endif
894 #endif
895  }
896 
897 
898 #ifdef PROFILE
899  ARC_substep_sum += sym_int.profile.step_count;
900 #endif
901 #ifdef HARD_CHECK_ENERGY
902  ekin = sym_int.getEkin();
903  epot = sym_int.getEpot();
904  energy.de = sym_int.getEnergyError();
905  ASSERT(!std::isnan(energy.de));
906  energy.de_change_cum = sym_int.getDEChangeBinaryInterrupt() + de_kin;
907  energy.de_change_binary_interrupt = sym_int.getDEChangeBinaryInterrupt();
908 #if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
909  ekin_sd = sym_int.getEkinSlowDown();
910  epot_sd = sym_int.getEpotSlowDown();
911  energy.de_sd = sym_int.getEnergyErrorSlowDown();
912  energy.de_sd_change_cum = sym_int.getDESlowDownChangeCum() + de_kin;
913  energy.de_sd_change_binary_interrupt = sym_int.getDESlowDownChangeBinaryInterrupt();
914 #else
915  ekin_sd = ekin;
916  epot_sd = epot;
917  energy.de_sd = energy.de;
918  energy.de_sd_change_cum = energy.de_change_cum;
920 #endif
921 #endif
922  }
923  else {
924 #ifdef HARD_CHECK_ENERGY
925  // PS: writeBackGroupMembers is done inside calcEnergySlowDown
926  h4_int.calcEnergySlowDown(false);
927 #else
928  h4_int.writeBackGroupMembers();
929 #endif
930  h4_int.particles.cm.pos += h4_int.particles.cm.vel * _time_end;
931 
932  h4_int.particles.shiftToOriginFrame();
933 
934 #ifdef HARD_CHECK_ENERGY
935  // update cm kinetic energy change
936  auto& pcm = h4_int.particles.cm;
937  Float vcm_org[3] = {pcm.vel[0], pcm.vel[1], pcm.vel[2]};
938  Float mcm_bk = pcm.mass;
939  h4_int.particles.calcCenterOfMass();
940  Float dm = pcm.mass - mcm_bk;
941  Float de_kin = 0.5*dm*(vcm_org[0]*vcm_org[0]+vcm_org[1]*vcm_org[1]+vcm_org[2]*vcm_org[2]);
942  Float dvcm[3] = {pcm.vel[0] - vcm_org[0], pcm.vel[1] - vcm_org[1], pcm.vel[2] - vcm_org[2]};
943  de_kin += pcm.mass*(dvcm[0]*vcm_org[0]+dvcm[1]*vcm_org[1]+dvcm[2]*vcm_org[2]);
944 
945  ekin = h4_int.getEkin();
946  epot = h4_int.getEpot();
947  energy.de += h4_int.getEnergyError(); // notice += should be used since de may change before when changeover potential energy correction exist
948  ASSERT(!std::isnan(energy.de));
949  energy.de_change_cum = h4_int.getDEChangeCum() + de_kin;
950  energy.de_change_binary_interrupt = h4_int.getDEChangeBinaryInterrupt();
951  energy.de_change_modify_single = h4_int.getDEChangeModifySingle();
952 #if (defined AR_SLOWDOWN_ARRAY) || (defined AR_SLOWDOWN_TREE)
953  ekin_sd = h4_int.getEkinSlowDown();
954  epot_sd = h4_int.getEpotSlowDown();
955  energy.de_sd += h4_int.getEnergyErrorSlowDown();
956  energy.de_sd_change_cum = h4_int.getDESlowDownChangeCum() + de_kin;
957  energy.de_sd_change_binary_interrupt = h4_int.getDESlowDownChangeBinaryInterrupt();
958  energy.de_sd_change_modify_single = h4_int.getDESlowDownChangeModifySingle();
959 #else
960  ekin_sd = ekin;
961  epot_sd = epot;
962  energy.de_sd = energy.de;
963  energy.de_sd_change_cum = energy.de_change_cum;
966 #endif
967 #endif
968 
969  // update research and group_data.cm
970  auto& h4_pcm = h4_int.particles.cm;
971  const PS::S32* group_index = h4_int.getSortDtIndexGroup();
972  for(PS::S32 i=0; i<h4_int.getNGroup(); i++) {
973  const PS::S32 k =group_index[i];
974 #ifdef HARD_DEBUG
975  ASSERT(h4_int.groups[k].particles.cm.changeover.getRout()>0);
976 #endif
977  //h4_int.groups[k].particles.cm.calcRSearch(_dt);
978  auto& pcm = h4_int.groups[k].particles.cm;
979  pcm.vel += h4_pcm.vel;
980 
981  //pcm.calcRSearch(h4_manager.interaction.G*(h4_pcm.mass-pcm.mass), abs(pcm.pot), h4_pcm.vel, _dt);
982  ASSERT(!std::isinf(pcm.vel[0]));
983  ASSERT(!std::isnan(pcm.vel[0]));
984  pcm.Ptcl::calcRSearch(_time_end);
985  const PS::S32 n_member = h4_int.groups[k].particles.getSize();
986  //const PS::S32 id_first = h4_int.groups[k].particles.getMemberOriginAddress(0)->id;
987  for (PS::S32 j=0; j<n_member; j++) {
988  auto* pj = h4_int.groups[k].particles.getMemberOriginAddress(j);
989  pj->r_search = std::max(pj->r_search, pcm.r_search);
990 #ifdef STELLAR_EVOLUTION
991  if (pj->mass==0.0) {
992  ASSERT(pj->group_data.artificial.isUnused());
993  continue;
994  }
995 #ifdef BSE_BASE
996  ASSERT(pj->star.tphys<=time_origin+_time_end);
997 #endif
998 
999  // shift time interrupt in order to get consistent time for stellar evolution in the next drift
1000  //pj->time_record -= _time_end;
1001  //pj->time_interrupt -= _time_end;
1002 #endif
1003 #ifdef CLUSTER_VELOCITY
1004  // save c.m. velocity and mass for neighbor search
1005  pj->group_data.cm.mass = pcm.mass;
1006  pj->group_data.cm.vel[0] = pcm.vel[0];
1007  pj->group_data.cm.vel[1] = pcm.vel[1];
1008  pj->group_data.cm.vel[2] = pcm.vel[2];
1009 #endif
1010 #ifdef HARD_DEBUG
1011  ASSERT(pj->r_search>pj->changeover.getRout());
1012 #endif
1013  ASSERT(!std::isinf(pj->pos[0]));
1014  ASSERT(!std::isnan(pj->pos[0]));
1015  ASSERT(!std::isinf(pj->vel[0]));
1016  ASSERT(!std::isnan(pj->vel[0]));
1017  }
1018  }
1019 
1020  const PS::S32* single_index = h4_int.getSortDtIndexSingle();
1021  for (PS::S32 i=0; i<h4_int.getNSingle(); i++) {
1022  auto& pi = h4_int.particles[single_index[i]];
1023 #ifdef STELLAR_EVOLUTION
1024  if (pi.mass==0.0) {
1025  ASSERT(pi.group_data.artificial.isUnused());
1026 #ifdef BSE_BASE
1027  ASSERT(pi.star.tphys<=time_origin+_time_end);
1028 #endif
1029 
1030  continue;
1031  }
1032 
1033  // shift time interrupt in order to get consistent time for stellar evolution in the next drift
1034  //pi.time_record -= _time_end;
1035  //pi.time_interrupt -= _time_end;
1036 #endif
1037 #ifdef CLUSTER_VELOCITY
1038  // set group_data.cm to 0.0 for singles
1039  pi.group_data.cm.mass = 0.0;
1040  pi.group_data.cm.vel[0] = 0.0;
1041  pi.group_data.cm.vel[1] = 0.0;
1042  pi.group_data.cm.vel[2] = 0.0;
1043 #endif
1044  ASSERT(!std::isinf(pi.pos[0]));
1045  ASSERT(!std::isnan(pi.pos[0]));
1046  ASSERT(!std::isinf(pi.vel[0]));
1047  ASSERT(!std::isnan(pi.vel[0]));
1048  pi.Ptcl::calcRSearch(_time_end);
1049 // pi.calcRSearch(h4_manager.interaction.G*(h4_pcm.mass-pi.mass), abs(pi.pot), h4_pcm.vel, _dt);
1050  }
1051 
1052 
1053 #ifdef PROFILE
1054  //ARC_substep_sum += Aint.getNsubstep();
1055  H4_step_sum += h4_int.profile.hermite_single_step_count + h4_int.profile.hermite_group_step_count;
1056  ARC_substep_sum += h4_int.profile.ar_step_count;
1057  ARC_tsyn_step_sum += h4_int.profile.ar_step_count_tsyn;
1058 
1059  if (h4_int.profile.ar_step_count>manager->ar_manager.step_count_max) {
1060  std::cerr<<"Large AR step cluster found: total step: "<<h4_int.profile.ar_step_count<<std::endl;
1061  //DATADUMP("dump_large_step");
1062  }
1063 #endif
1064 
1065 #ifdef HARD_COUNT_NO_NEIGHBOR
1066  for (PS::S32 i=0; i<table_neighbor_exist.size(); i++) {
1067  if(!table_neighbor_exist[i]) n_neighbor_zero++;
1068  }
1069 #endif
1070 
1071 #ifdef AR_DEBUG_PRINT
1072  for (PS::S32 i=0; i<h4_int.getNGroup(); i++) {
1073  const PS::S32 k= group_index[i];
1074  auto& groupk = h4_int.groups[k];
1075  auto& bink = groupk.info.getBinaryTreeRoot();
1076  std::cerr<<"Group k:"<<std::setw(2)<<k
1077  <<" N_member: "<<std::setw(4)<<groupk.particles.getSize()
1078  <<" step: "<<std::setw(12)<<groupk.profile.step_count_sum
1079  <<" step(tsyn): "<<std::setw(10)<<groupk.profile.step_count_tsyn_sum
1080 // <<" step(sum): "<<std::setw(12)<<h4_int.profile.ar_step_count
1081 // <<" step_tsyn(sum): "<<std::setw(12)<<h4_int.profile.ar_step_count_tsyn
1082  <<" Soft_Pert: "<<std::setw(20)<<groupk.perturber.soft_pert_min
1083  <<" Pert_In: "<<std::setw(20)<<bink.slowdown.getPertIn()
1084  <<" Pert_Out: "<<std::setw(20)<<bink.slowdown.getPertOut()
1085  <<" SD: "<<std::setw(20)<<bink.slowdown.getSlowDownFactor()
1086  <<" SD(org): "<<std::setw(20)<<bink.slowdown.getSlowDownFactorOrigin()
1087  <<" semi: "<<std::setw(20)<<bink.semi
1088  <<" ecc: "<<std::setw(20)<<bink.ecc
1089  <<" period: "<<std::setw(20)<<bink.period
1090  <<" NB: "<<std::setw(4)<<groupk.perturber.neighbor_address.getSize()
1091  <<std::endl;
1092 #ifdef HARD_DUMP
1093  if (groupk.profile.step_count_tsyn_sum>10000) {
1094  DATADUMP("hard_dump");
1095  }
1096 #endif
1097  }
1098 #endif
1099  }
1100 
1101 #ifdef HARD_CHECK_ENERGY
1102  energy.ekin_sd_correction = ekin_sd - ekin;
1103  energy.epot_sd_correction = epot_sd - epot;
1104  // exclude the slowdown energy change due to the turn off of slowdown
1105  energy.de_sd_change_cum -= energy.ekin_sd_correction + energy.epot_sd_correction;
1106 
1107 #ifdef HARD_DEBUG_PRINT
1108  std::cerr<<"Hard Energy: "
1109  <<" Ekin: "<<ekin
1110  <<" Ekin_SD: "<<ekin_sd
1111  <<" Epot: "<<epot
1112  <<" Epot_SD: "<<epot_sd
1113  <<" dE: "<<energy.de
1114  <<" dE_change: "<<energy.de_change_cum
1115  <<" dE_change_binary: "<<energy.de_change_binary_interrupt
1116  <<" dE_change_single: "<<energy.de_change_modify_single
1117  <<" dE_SD: "<<energy.de_sd
1118  <<" dE_SD_change: "<<energy.de_sd_change_cum
1119  <<" dE_SD_change_binary: "<<energy.de_sd_change_binary_interrupt
1120  <<" dE_SD_change_single: "<<energy.de_sd_change_modify_single
1121  <<" H4_step_sum: "<<H4_step_sum
1122  <<" ARC_substep_sum: "<<ARC_substep_sum
1123  <<" ARC_tsyn_step_sum: "<<ARC_tsyn_step_sum
1124  <<std::endl;
1125 #endif
1126  if (abs(energy.de_sd/std::max(1.0,abs(ekin_sd+epot_sd))) > manager->energy_error_max) {
1127  std::cerr<<"Hard energy significant !"
1128  <<" dE_SD: "<<energy.de_sd
1129  <<" dE_SD/Etot_SD: "<<energy.de_sd/(ekin_sd+epot_sd)
1130  <<std::endl;
1131 #ifdef HARD_DUMP
1132  DATADUMP("hard_large_energy");
1133 #endif
1134  //abort();
1135  }
1136 #endif
1137 
1138  }
1139 
1141 
1144  void printInterruptBinaryInfo(std::ostream & _fout) const{
1145  _fout<<"Interrupt condition triggered! Time: ";
1146  if (use_sym_int) _fout<<"(AR) ";
1147  else _fout<<"(Hermite) ";
1148  _fout<<interrupt_binary.time_now<<std::endl;
1149  interrupt_binary.adr->printColumnTitle(_fout);
1150  _fout<<std::endl;
1151  interrupt_binary.adr->printColumn(_fout);
1152  _fout<<std::endl;
1154  _fout<<std::endl;
1155  for (int j=0; j<2; j++) {
1156  interrupt_binary.adr->getMember(j)->printColumn(_fout);
1157  _fout<<std::endl;
1158  }
1159  }
1160 
1162  void clear() {
1163  sym_int.clear();
1164  h4_int.clear();
1165  manager = NULL;
1166  tidal_tensor.resizeNoInitialize(0);
1167  time_origin = 0;
1168  ptcl_origin = NULL;
1169  interrupt_binary.clear();
1170  is_initialized = false;
1171 
1172 #ifdef PROFILE
1173  ARC_substep_sum = 0;
1174  ARC_tsyn_step_sum = 0;
1175  H4_step_sum = 0;
1176 #endif
1177 #ifdef HARD_COUNT_NO_NEIGHBOR
1178  table_neighbor_exist.resizeNoInitialize(0);
1179  n_neighbor_zero = 0;
1180 #endif
1181 #ifdef HARD_CHECK_ENERGY
1182  energy.clear();
1183 #endif
1184 
1185  }
1186 
1187 };
1188 
1191 private:
1192  PS::F64 time_origin_; // physical origin time
1193  PS::F64 time_write_back_; // time of writing back data
1194 
1195  PS::ReallocatableArray<PtclH4> ptcl_hard_; // particle data
1196  PS::ReallocatableArray<PS::S32> n_ptcl_in_cluster_; // number of particles in one cluster
1197  PS::ReallocatableArray<PS::S32> n_ptcl_in_cluster_disp_; // boundary of particle cluster
1198  PS::ReallocatableArray<PS::S32> n_group_in_cluster_; // number of groups in one cluster
1199  PS::ReallocatableArray<PS::S32> n_group_in_cluster_offset_; // boundary of groups in n_member_in_group_ and adr_first_ptcl_arti_in_cluster_
1200  PS::ReallocatableArray<PS::S32> n_member_in_group_; // number of members in each group
1201  PS::ReallocatableArray<PS::S32> n_member_in_group_offset_; // number of members in group index offest in each cluster (for n_meber_in_group_, notice in each cluster, the first offset is zero)
1202  PS::ReallocatableArray<PS::S32> adr_first_ptcl_arti_in_cluster_; // address of the first artificial particle in each groups
1203  PS::ReallocatableArray<PS::S32> i_cluster_changeover_update_; // cluster index that has member need changeover update
1204  PS::S32 n_group_member_remote_; // number of members in groups but in remote nodes
1205 
1206  HardIntegrator* hard_int_;
1207  PS::S32 n_hard_int_max_;
1208  PS::S32 n_hard_int_use_;
1209  PS::ReallocatableArray<HardIntegrator*> interrupt_list_;
1210  PS::F64 interrupt_dt_;
1211 
1212  struct OPLessIDCluster{
1213  template<class T> bool operator() (const T & left, const T & right) const {
1214  return left.id_cluster < right.id_cluster;
1215  }
1216  };
1217 
1218 
1219 public:
1220  PS::ReallocatableArray<COMM::BinaryTree<PtclH4,COMM::Binary>> binary_table;
1222 
1223 #ifdef PROFILE
1224  PS::S64 ARC_substep_sum;
1225  PS::S64 ARC_tsyn_step_sum;
1226  PS::S64 ARC_n_groups;
1227  PS::S64 ARC_n_groups_iso;
1228  PS::S64 H4_step_sum;
1229 #endif
1230 #ifdef HARD_COUNT_NO_NEIGHBOR
1231  PS::S64 n_neighbor_zero;
1232 #endif
1233 #ifdef HARD_CHECK_ENERGY
1234  HardEnergy energy;
1235 #endif
1236 
1238  bool checkParams() {
1239  ASSERT(manager!=NULL);
1241  ASSERT(hard_int_!=NULL);
1242  ASSERT(n_hard_int_max_>0);
1243  return true;
1244  }
1245 
1246 private:
1247 
1249 
1255  template <class Tchp, class Tptcl>
1256  static void collectGroupMemberAdrAndSetMemberParametersIter(Tchp& _par, Tptcl*& _ptcl) {
1257  _par.group_list[_par.n++] = _ptcl - _par.adr_ref;
1258 #ifdef HARD_DEBUG
1259  assert(_ptcl->mass>0.0);
1260 #endif
1261  if (_par.pcm_id<0) { // isolated binary case
1262  _ptcl->group_data.artificial.setParticleTypeToMember(_ptcl->mass);
1263  }
1264  else { // with artificial particles
1265  _ptcl->group_data.artificial.setParticleTypeToMember(_ptcl->mass, - (_par.pcm_id+1));
1266  _ptcl->mass = 0.0;
1267  }
1268  PS::F64 rin_cm = _par.changeover_cm->getRin();
1269  PS::F64 rin_p = _ptcl->changeover.getRin();
1270  if (rin_p!=rin_cm) {
1271  // avoid round-off error case
1272  if (abs(rin_p-rin_cm)<1e-10) {
1273  _ptcl->changeover = *_par.changeover_cm;
1274  }
1275  else {
1276  _ptcl->changeover.r_scale_next = rin_cm/rin_p;
1277  _ptcl->r_search = std::max(_ptcl->r_search, _par.rsearch_cm);
1278 #ifdef ARTIFICIAL_PARTICLE_DEBUG
1279  if (_ptcl->r_search<rin_p) {
1280  std::cerr<<"Error, r_search<r_out found! \n"
1281  <<"id: "<<_ptcl->id
1282  <<"bin_changeover: ";
1283  _par.changeover_cm->print(std::cerr);
1284  std::cerr<<"member changeover: ";
1285  _ptcl->changeover.print(std::cerr);
1286  std::cerr<<"member mass: "<<_ptcl->mass
1287  <<"member r_search: "<<_ptcl->r_search
1288  <<"cm research: "<<_par.rsearch_cm
1289  <<std::endl;
1290  abort();
1291  }
1292 #endif
1293  _par.changeover_update_flag = true;
1294  }
1295  }
1296  }
1297 
1298 
1300 
1302  template <class Tchp, class Tptcl>
1303  static PS::S64 calcBinaryIDChangeOverAndRSearchIter (Tchp& _par, const PS::S64& _id1, const PS::S64& _id2, COMM::BinaryTree<Tptcl,COMM::Binary>& _bin) {
1304  // set bin id as the left member id
1305  // _id1==-1 is the initial status, once id is obtained, it is bin.id of left member
1306  if (_id1<0) _bin.id = _bin.getLeftMember()->id;
1307  else _bin.id = _id1;
1308  if (_id2<0) _bin.id = std::min(_bin.id, _bin.getRightMember()->id);
1309  else _bin.id = std::min(_bin.id, _id2);
1310 #ifdef ARTIFICIAL_PARTICLE_DEBUG
1311  assert(_bin.id>0);
1312 #endif
1313  //set changeover to the same (root) one
1314  _bin.changeover.setR(_bin.mass*_par.mean_mass_inv, _par.rin, _par.rout);
1315 
1316  // calculate rsearch
1317  assert(!std::isinf(_bin.vel[0]));
1318  assert(!std::isnan(_bin.vel[0]));
1319  _bin.Ptcl::calcRSearch(_par.dt_tree);
1320 
1321  return _bin.id;
1322  }
1323 
1325 
1329  template <class Tpi>
1330  static void calcAccPotShortWithLinearCutoff(Tpi& _pi,
1331  const Ptcl& _pj) {
1332  const PS::F64 G = ForceSoft::grav_const;
1333  const PS::F64 eps_sq = EPISoft::eps * EPISoft::eps;
1334 
1335  const PS::F64vec dr = _pi.pos - _pj.pos;
1336  const PS::F64 dr2 = dr * dr;
1337  const PS::F64 dr2_eps = dr2 + eps_sq;
1338  const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1339  PS::F64 gmor = G*_pj.mass * drinv;
1340  const PS::F64 drinv2 = drinv * drinv;
1341  const PS::F64 gmor3 = gmor * drinv2;
1342  const PS::F64 dr_eps = drinv * dr2_eps;
1343  const PS::F64 k = 1.0 - ChangeOver::calcAcc0WTwo(_pi.changeover, _pj.changeover, dr_eps);
1344 
1345  // linear cutoff
1346 #if ((! defined P3T_64BIT) && (defined USE_SIMD)) || (defined USE_GPU)
1347  const PS::F32 r_out_32 = EPISoft::r_out;
1348  const PS::F32 r_out2 = r_out_32 * r_out_32;
1349  PS::F32vec ri_32 = PS::F32vec(_pi.pos.x, _pi.pos.y, _pi.pos.z);
1350  PS::F32vec rj_32 = PS::F32vec(_pj.pos.x, _pj.pos.y, _pj.pos.z);
1351  PS::F32vec dr_32 = ri_32 - rj_32;
1352  PS::F32 dr2_eps_32 = dr_32*dr_32 + (PS::F32)eps_sq;
1353  const PS::F32 dr2_max = (dr2_eps_32 > r_out2) ? dr2_eps_32 : r_out2;
1354  const PS::F32 drinv_max = 1.0/sqrt(dr2_max);
1355  const PS::F32 gmor_max = G*_pj.mass * drinv_max;
1356  const PS::F32 drinv2_max = drinv_max*drinv_max;
1357  const PS::F32 gmor3_max = gmor_max * drinv2_max;
1358 
1359  // correct to changeover soft acceleration
1360  _pi.acc -= gmor3*k*dr - gmor3_max*dr_32;
1361 #else
1362  const PS::F64 r_out = EPISoft::r_out;
1363  const PS::F64 r_out2 = r_out * r_out;
1364  const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1365  const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1366  const PS::F64 gmor_max = G*_pj.mass * drinv_max;
1367  const PS::F64 drinv2_max = drinv_max*drinv_max;
1368  const PS::F64 gmor3_max = gmor_max * drinv2_max;
1369 
1370  // correct to changeover soft acceleration
1371  _pi.acc -= (gmor3*k - gmor3_max)*dr;
1372 #endif
1373 
1374  auto& pj_artificial = _pj.group_data.artificial;
1375  const PS::F64 kpot = 1.0 - ChangeOver::calcPotWTwo(_pi.changeover, _pj.changeover, dr_eps);
1376  // single, remove linear cutoff, obtain changeover soft and total potential
1377  if (pj_artificial.isSingle()) {
1378  //_pi.pot_soft -= dr2_eps>r_out2? 0.0: (gmor*kpot - gmor_max);
1379  _pi.pot_soft -= gmor*kpot - gmor_max;
1380  _pi.pot_tot -= (gmor - gmor_max);
1381  }
1382  // member, mass is zero, use backup mass
1383  else if (pj_artificial.isMember()) {
1384  gmor = G*pj_artificial.getMassBackup()*drinv;
1385  //_pi.pot_soft -= dr2_eps>r_out2? 0.0: (gmor*kpot - gmor_max);
1386  _pi.pot_soft -= gmor*kpot - gmor_max;
1387  _pi.pot_tot -= (gmor - gmor_max);
1388  }
1389  // (orbitial) artificial, should be excluded in potential calculation, since it is inside neighbor, gmor_max cancel it to 0.0
1390  else {
1391  _pi.pot_soft += gmor_max;
1392  _pi.pot_tot += gmor_max;
1393  }
1394 #ifdef ONLY_SOFT
1395  _pi.pot_tot = _pi.pot_soft;
1396 #endif
1397  }
1398 
1400 
1404  template <class Tpi>
1405  static void calcAccPotShortWithLinearCutoff(Tpi& _pi,
1406  const EPJSoft& _pj) {
1407  const PS::F64 G = ForceSoft::grav_const;
1408  const PS::F64 eps_sq = EPISoft::eps * EPISoft::eps;
1409 
1410  const PS::F64vec dr = _pi.pos - _pj.pos;
1411  const PS::F64 dr2 = dr * dr;
1412 #ifdef HARD_DEBUG
1413  assert(dr2>0.0);
1414 #endif
1415  const PS::F64 dr2_eps = dr2 + eps_sq;
1416  const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1417  PS::F64 gmor = G*_pj.mass * drinv;
1418  const PS::F64 drinv2 = drinv * drinv;
1419  const PS::F64 gmor3 = gmor * drinv2;
1420  const PS::F64 dr_eps = drinv * dr2_eps;
1421  ChangeOver chj;
1422  chj.setR(_pj.r_in, _pj.r_out);
1423  const PS::F64 k = 1.0 - ChangeOver::calcAcc0WTwo(_pi.changeover, chj, dr_eps);
1424 
1425  // linear cutoff
1426 #if ((! defined P3T_64BIT) && (defined USE_SIMD)) || (defined USE_GPU)
1427  const PS::F32 r_out_32 = EPISoft::r_out;
1428  const PS::F32 r_out2 = r_out_32 * r_out_32;
1429  PS::F32vec ri_32 = PS::F32vec(_pi.pos.x, _pi.pos.y, _pi.pos.z);
1430  PS::F32vec rj_32 = PS::F32vec(_pj.pos.x, _pj.pos.y, _pj.pos.z);
1431  PS::F32vec dr_32 = ri_32 - rj_32;
1432  PS::F32 dr2_eps_32 = dr_32*dr_32 + (PS::F32)eps_sq;
1433  const PS::F32 dr2_max = (dr2_eps_32 > r_out2) ? dr2_eps_32 : r_out2;
1434  const PS::F32 drinv_max = 1.0/sqrt(dr2_max);
1435  const PS::F32 gmor_max = G*_pj.mass * drinv_max;
1436  const PS::F32 drinv2_max = drinv_max*drinv_max;
1437  const PS::F32 gmor3_max = gmor_max * drinv2_max;
1438 
1439  // correct to changeover soft acceleration
1440  _pi.acc -= gmor3*k*dr - gmor3_max*dr_32;
1441 #else
1442  const PS::F64 r_out = EPISoft::r_out;
1443  const PS::F64 r_out2 = r_out * r_out;
1444  const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1445  const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1446  const PS::F64 gmor_max = G*_pj.mass * drinv_max;
1447  const PS::F64 drinv2_max = drinv_max*drinv_max;
1448  const PS::F64 gmor3_max = gmor_max * drinv2_max;
1449 
1450  // correct to changeover soft acceleration
1451  _pi.acc -= (gmor3*k - gmor3_max)*dr;
1452 #endif
1453  auto& pj_artificial = _pj.group_data.artificial;
1454  const PS::F64 kpot = 1.0 - ChangeOver::calcPotWTwo(_pi.changeover, chj, dr_eps);
1455  // single, remove linear cutoff, obtain changeover soft and total potential
1456  if (pj_artificial.isSingle()) {
1457  //_pi.pot_soft -= dr2_eps>r_out2? 0.0: (gmor*kpot - gmor_max);
1458  _pi.pot_soft -= gmor*kpot - gmor_max;
1459  _pi.pot_tot -= (gmor - gmor_max);
1460  }
1461  // member, mass is zero, use backup mass
1462  else if (pj_artificial.isMember()) {
1463  gmor = G*pj_artificial.getMassBackup()*drinv;
1464  //_pi.pot_soft -= dr2_eps>r_out2? 0.0: (gmor*kpot - gmor_max);
1465  _pi.pot_soft -= gmor*kpot - gmor_max;
1466  _pi.pot_tot -= (gmor - gmor_max);
1467  }
1468  // (orbitial) artificial, should be excluded in potential calculation, since it is inside neighbor, gmor_max cancel it to 0.0
1469  else {
1470  _pi.pot_soft += gmor_max;
1471  _pi.pot_tot += gmor_max;
1472  }
1473 #ifdef ONLY_SOFT
1474  _pi.pot_tot = _pi.pot_soft;
1475 #endif
1476  }
1477 
1479 
1483  template <class Tpi>
1484  static void calcAccChangeOverCorrection(Tpi& _pi,
1485  const Ptcl& _pj) {
1486  const PS::F64 G = ForceSoft::grav_const;
1487  const PS::F64 eps_sq = EPISoft::eps * EPISoft::eps;
1488 
1489  const PS::F64vec dr = _pi.pos - _pj.pos;
1490  const PS::F64 dr2 = dr * dr;
1491 #ifdef HARD_DEBUG
1492  assert(dr2>0.0);
1493 #endif
1494  const PS::F64 dr2_eps = dr2 + eps_sq;
1495  const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1496  const PS::F64 gmor = G*_pj.mass * drinv;
1497  const PS::F64 drinv2 = drinv * drinv;
1498  const PS::F64 gmor3 = gmor * drinv2;
1499  const PS::F64 dr_eps = drinv * dr2_eps;
1500 
1501  // old
1502  const PS::F64 kold = 1.0 - ChangeOver::calcAcc0WTwo(_pi.changeover, _pj.changeover, dr_eps);
1503 
1504  // new
1505  ChangeOver chinew, chjnew;
1506  chinew.setR(_pi.changeover.getRin()*_pi.changeover.r_scale_next, _pi.changeover.getRout()*_pi.changeover.r_scale_next);
1508  const PS::F64 knew = 1.0 - ChangeOver::calcAcc0WTwo(chinew, chjnew, dr_eps);
1509 
1510  // correct to changeover soft acceleration
1511  _pi.acc -= gmor3*(knew-kold)*dr;
1512  }
1513 
1515 
1519  template <class Tpi>
1520  static void calcAccChangeOverCorrection(Tpi& _pi,
1521  const EPJSoft& _pj) {
1522  const PS::F64 G = ForceSoft::grav_const;
1523  const PS::F64 eps_sq = EPISoft::eps * EPISoft::eps;
1524 
1525  const PS::F64vec dr = _pi.pos - _pj.pos;
1526  const PS::F64 dr2 = dr * dr;
1527  const PS::F64 dr2_eps = dr2 + eps_sq;
1528  const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1529  const PS::F64 gmor = G*_pj.mass * drinv;
1530  const PS::F64 drinv2 = drinv * drinv;
1531  const PS::F64 gmor3 = gmor * drinv2;
1532  const PS::F64 dr_eps = drinv * dr2_eps;
1533 
1534  ChangeOver chjold;
1535  chjold.setR(_pj.r_in, _pj.r_out);
1536  // old
1537  const PS::F64 kold = 1.0 - ChangeOver::calcAcc0WTwo(_pi.changeover, chjold, dr_eps);
1538 
1539  // new
1540  ChangeOver chinew, chjnew;
1541  chinew.setR(_pi.changeover.getRin()*_pi.changeover.r_scale_next, _pi.changeover.getRout()*_pi.changeover.r_scale_next);
1542  chjnew.setR(_pj.r_in*_pj.r_scale_next, _pj.r_out*_pj.r_scale_next);
1543  const PS::F64 knew = 1.0 - ChangeOver::calcAcc0WTwo(chinew, chjnew, dr_eps);
1544 
1545  // correct to changeover soft acceleration
1546  _pi.acc -= gmor3*(knew-kold)*dr;
1547  }
1548 
1549 #ifdef KDKDK_4TH
1550  template <class Tpi>
1551  static void calcAcorrShortWithLinearCutoff(Tpi& _pi,
1552  const Ptcl& _pj) {
1553  const PS::F64 G = ForceSoft::grav_const;
1554  const PS::F64 eps_sq = EPISoft::eps * EPISoft::eps;
1555 
1556  const PS::F64vec dr = _pi.pos - _pj.pos;
1557  const PS::F64vec da = _pi.acc - _pi.acc;
1558  const PS::F64 dr2 = dr * dr;
1559  const PS::F64 dr2_eps = dr2 + eps_sq;
1560  const PS::F64 drda = dr*da;
1561  const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1562  const PS::F64 drdadrinv = drda*drinv;
1563  const PS::F64 gmor = G*_pj.mass * drinv;
1564  const PS::F64 drinv2 = drinv * drinv;
1565  const PS::F64 gmor3 = gmor * drinv2;
1566  const PS::F64 dr_eps = drinv * dr2_eps;
1567  const PS::F64 alpha = drda*drinv2;
1568 
1569  const PS::F64 k = 1.0 - ChangeOver::calcAcc0WTwo(_pi.changeover, _pj.changeover, dr_eps);
1570  const PS::F64 kdot = - ChangeOver::calcAcc1WTwo(_pi.changeover, _pj.changeover, dr_eps, drdadrinv);
1571 
1572  // linear cutoff
1573 #if ((! defined P3T_64BIT) && (defined USE_SIMD)) || (defined USE_GPU)
1574  const PS::F32 r_out_32 = EPISoft::r_out;
1575  const PS::F32 r_out2 = r_out_32 * r_out_32;
1576  PS::F32vec ri_32 = PS::F32vec(_pi.pos.x, _pi.pos.y, _pi.pos.z);
1577  PS::F32vec rj_32 = PS::F32vec(_pj.pos.x, _pj.pos.y, _pj.pos.z);
1578  PS::F32vec ai_32 = PS::F32vec(_pi.acc.x, _pi.acc.y, _pi.acc.z);
1579  PS::F32vec aj_32 = PS::F32vec(_pj.acc.x, _pj.acc.y, _pj.acc.z);
1580  PS::F32vec dr_32 = ri_32 - rj_32;
1581  PS::F32vec da_32 = ai_32 - aj_32;
1582  PS::F32 dr2_eps_32 = dr_32*dr_32 + (PS::F32)eps_sq;
1583  const PS::F32 drda_32 = dr_32*da_32;
1584  const PS::F32 dr2_max = (dr2_eps_32 > r_out2) ? dr2_eps_32 : r_out2;
1585  const PS::F32 drinv_max = 1.0/sqrt(dr2_max);
1586  const PS::F32 gmor_max = G*_pj.mass * drinv_max;
1587  const PS::F32 drinv2_max = drinv_max*drinv_max;
1588  const PS::F32 gmor3_max = gmor_max * drinv2_max;
1589  const PS::F32 alpha_max = drda_32 * drinv2_max;
1590  const PS::F32vec acorr_max = gmor3_max * (da_32 - 3.0*alpha_max * dr_32);
1591 #else
1592  const PS::F64 r_out = EPISoft::r_out;
1593  const PS::F64 r_out2 = r_out * r_out;
1594  const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1595  const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1596  const PS::F64 gmor_max = G*_pj.mass * drinv_max;
1597  const PS::F64 drinv2_max = drinv_max*drinv_max;
1598  const PS::F64 gmor3_max = gmor_max * drinv2_max;
1599  const PS::F64 alpha_max = drda * drinv2_max;
1600  const PS::F64vec acorr_max = gmor3_max * (da - 3.0*alpha_max * dr);
1601 #endif
1602 
1603  const PS::F64vec acorr_k = gmor3 * (k*da - (3.0*k*alpha - kdot) * dr);
1604 
1605  _pi.acorr -= 2.0 * (acorr_k - acorr_max);
1606  //acci + dt_kick * dt_kick * acorri /48;
1607  }
1608 
1609  template <class Tpi>
1610  static void calcAcorrShortWithLinearCutoff(Tpi& _pi,
1611  const EPJSoft& _pj) {
1612  const PS::F64 G = ForceSoft::grav_const;
1613  const PS::F64 eps_sq = EPISoft::eps * EPISoft::eps;
1614  const PS::F64 r_out = EPISoft::r_out;
1615  const PS::F64 r_out2 = r_out * r_out;
1616 
1617  const PS::F64vec dr = _pi.pos - _pj.pos;
1618  const PS::F64vec da = _pi.acc - _pi.acc;
1619  const PS::F64 dr2 = dr * dr;
1620  const PS::F64 dr2_eps = dr2 + eps_sq;
1621  const PS::F64 drda = dr*da;
1622  const PS::F64 drinv = 1.0/sqrt(dr2_eps);
1623  const PS::F64 drdadrinv = drda*drinv;
1624  const PS::F64 gmor = G*_pj.mass * drinv;
1625  const PS::F64 drinv2 = drinv * drinv;
1626  const PS::F64 gmor3 = gmor * drinv2;
1627  const PS::F64 dr_eps = drinv * dr2_eps;
1628  ChangeOver chj;
1629  chj.setR(_pj.r_in, _pj.r_out);
1630  const PS::F64 k = 1.0 - ChangeOver::calcAcc0WTwo(_pi.changeover, chj, dr_eps);
1631  const PS::F64 kdot = - ChangeOver::calcAcc1WTwo(_pi.changeover, chj, dr_eps, drdadrinv);
1632 
1633  const PS::F64 dr2_max = (dr2_eps > r_out2) ? dr2_eps : r_out2;
1634  const PS::F64 drinv_max = 1.0/sqrt(dr2_max);
1635  const PS::F64 gmor_max = G*_pj.mass * drinv_max;
1636  const PS::F64 drinv2_max = drinv_max*drinv_max;
1637  const PS::F64 gmor3_max = gmor_max * drinv2_max;
1638 
1639  const PS::F64 alpha = drda*drinv2;
1640  const PS::F64 alpha_max = drda * drinv2_max;
1641  const PS::F64vec acorr_k = gmor3 * (k*da - (3.0*k*alpha - kdot) * dr);
1642  const PS::F64vec acorr_max = gmor3_max * (da - 3.0*alpha_max * dr);
1643 
1644  _pi.acorr -= 2.0 * (acorr_k - acorr_max);
1645  //acci + dt_kick * dt_kick * acorri /48;
1646  }
1647 #endif
1648 
1650  /*
1651  @param[in,out] _psoft: particle in global system need to be corrected for acc and pot
1652  @param[in] _tree: tree for force
1653  @param[in] _acorr_flag: flag to do acorr for KDKDK_4TH case
1654  */
1655  template <class Tpsoft, class Ttree, class Tepj>
1656  static void correctForceWithCutoffTreeNeighborOneParticleImp(Tpsoft& _psoft,
1657  Ttree& _tree,
1658  const bool _acorr_flag=false) {
1660  Tepj * ptcl_nb = NULL;
1661  PS::S32 n_ngb = _tree.getNeighborListOneParticle(_psoft, ptcl_nb);
1662 #ifdef HARD_DEBUG
1663  assert(n_ngb >= 1);
1664 #endif
1665  // self-potential correction
1666  // no correction for orbital artificial particles because the potential are not used for any purpose
1667  // no correction for member particles because their mass is zero during the soft force calculation, the self-potential contribution is also zero.
1668  // for binary without artificial particles, correction is needed.
1669  if (_psoft.group_data.artificial.isSingle() || (_psoft.group_data.artificial.isMember() && _psoft.getParticleCMAddress()<0)) {
1670  PS::F64 pot_cor = G*_psoft.mass/EPISoft::r_out;
1671  _psoft.pot_tot += pot_cor;
1672  _psoft.pot_soft += pot_cor;
1673 
1674  }
1675 
1676  // loop neighbors
1677  for(PS::S32 k=0; k<n_ngb; k++){
1678  if (ptcl_nb[k].id == _psoft.id) continue;
1679 
1680 #ifdef KDKDK_4TH
1681  if(_acorr_flag)
1682  calcAcorrShortWithLinearCutoff(_psoft, ptcl_nb[k]);
1683  else
1684 #endif
1685  calcAccPotShortWithLinearCutoff(_psoft, ptcl_nb[k]);
1686  }
1687 //#ifdef STELLAR_EVOLUTION
1688 // // correct soft potential energy of one particle change due to mass change
1689 // correctSoftPotMassChangeOneParticle(_psoft);
1690 //#endif
1691  }
1692 
1694  /* 1. Correct cutoff for artificial particles
1695  2. The c.m. force is substracted from tidal tensor force
1696  @param[in,out] _sys: global particle system, acc is updated
1697  @param[in] _ptcl_local: particle in systme_hard
1698  @param[in] _adr_real_start: real particle start address in _ptcl_local
1699  @param[in] _adr_real_end: real particle end (+1) address in _ptcl_local
1700  @param[in] _n_group: number of groups in cluster
1701  @param[in] _adr_first_ptcl_arti_in_cluster: address of the first artificial particle in each groups
1702  @param[in] _acorr_flag: flag to do acorr for KDKDK_4TH case
1703  */
1704  template <class Tsys>
1705  void correctForceWithCutoffArtificialOneClusterImp(Tsys& _sys,
1706  const PtclH4* _ptcl_local,
1707  const PS::S32 _adr_real_start,
1708  const PS::S32 _adr_real_end,
1709  const PS::S32 _n_group,
1710  const PS::S32* adr_first_ptcl_arti_in_cluster_,
1711  const bool _acorr_flag) {
1712 
1713  auto& ap_manager = manager->ap_manager;
1714  for (int j=0; j<_n_group; j++) { // j: j_group
1715  PS::S32 j_start = adr_first_ptcl_arti_in_cluster_[j];
1716 #ifdef ARTIFICIAL_PARTICLE_DEBUG
1717  if (j_start<0) assert(_n_group==1&&_adr_real_end-_adr_real_start==2);
1718 #endif
1719  if (j_start<0) continue;
1720  auto* p_arti_j = &(_sys[j_start]);
1721  auto* pj = ap_manager.getTidalTensorParticles(p_arti_j);
1722 
1723  auto correctionLoop = [&](const PS::S32 k) {
1724  // loop orbital artificial particle
1725  for (int kj=0; kj<_n_group; kj++) { // group
1726  PS::S32 kj_start = adr_first_ptcl_arti_in_cluster_[kj];
1727  if (kj_start<0) continue;
1728  auto* porb_kj = ap_manager.getOrbitalParticles(&_sys[kj_start]);
1729 
1730  // particle arti orbital
1731  for (int kk=0; kk<ap_manager.getOrbitalParticleN(); kk++) {
1732  if(&porb_kj[kk]==&(pj[k])) continue; //avoid same particle
1733 #ifdef KDKDK_4TH
1734  if(_acorr_flag)
1735  calcAcorrShortWithLinearCutoff(pj[k], porb_kj[kk]);
1736  else
1737 #endif
1738  calcAccPotShortWithLinearCutoff(pj[k], porb_kj[kk]);
1739  }
1740  }
1741 
1742  // loop real particle
1743  for (int kj=_adr_real_start; kj<_adr_real_end; kj++) {
1744 #ifdef KDKDK_4TH
1745  if(_acorr_flag) {
1746  PS::S64 adr_kj = _ptcl_local[kj].adr_org;
1747  calcAcorrShortWithLinearCutoff(pj[k], _sys[adr_kj]);
1748  }
1749  else
1750 #endif
1751  calcAccPotShortWithLinearCutoff(pj[k], _ptcl_local[kj]);
1752  }
1753  };
1754 
1755  // loop all tidal tensor particles
1756  for (int k=0; k<ap_manager.getTidalTensorParticleN(); k++) {
1757  // k: k_ptcl_arti
1758  correctionLoop(k);
1759  }
1760 
1761  // for c.m. particle
1762  pj = ap_manager.getCMParticles(p_arti_j);
1763  correctionLoop(0);
1764 
1765  ap_manager.correctArtficialParticleForce(p_arti_j);
1766  }
1767  }
1768 
1770  /* @param[in,out] _sys: global particle system, acc is updated
1771  @param[in] _tree: tree for force
1772  @param[in] _ptcl_local: particle in systme_hard, only used to get adr_org
1773  @param[in] _n_ptcl: total number of particles in all clusters
1774  @param[in] _adr_ptcl_artificial_start: start address of artificial particle in _sys
1775  @param[in] _acorr_flag: flag to do acorr for KDKDK_4TH case
1776  */
1777  template <class Tsys, class Tpsoft, class Ttree, class Tepj>
1778  void correctForceWithCutoffTreeNeighborImp(Tsys& _sys,
1779  Ttree& _tree,
1780  const PtclH4* _ptcl_local,
1781  const PS::S32 _n_ptcl,
1782  const PS::S32 _adr_ptcl_artificial_start,
1783  const bool _acorr_flag=false) {
1784  // for real particle
1785 #pragma omp parallel for schedule(dynamic)
1786  for (int i=0; i<_n_ptcl; i++) {
1787  PS::S64 adr = _ptcl_local[i].adr_org;
1788  if(adr>=0) correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[adr], _tree, _acorr_flag);
1789  }
1790 
1791  // for artificial particle
1792  const PS::S32 n_tot = _sys.getNumberOfParticleLocal();
1793 #pragma omp parallel for schedule(dynamic)
1794  for (int i=_adr_ptcl_artificial_start; i<n_tot; i++)
1795  correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[i], _tree, _acorr_flag);
1796 
1797  auto& ap_manager = manager->ap_manager;
1798  const PS::S32 n_artificial_per_group = ap_manager.getArtificialParticleN();
1799 #ifdef HARD_DEBUG
1800  assert((n_tot-_adr_ptcl_artificial_start)%n_artificial_per_group==0);
1801 #endif
1802 #pragma omp parallel for schedule(dynamic)
1803  for (int i=_adr_ptcl_artificial_start; i<n_tot; i+=n_artificial_per_group)
1804  ap_manager.correctArtficialParticleForce(&(_sys[i]));
1805 
1806  }
1807 
1808 public:
1809 
1811  manager = NULL;
1812  hard_int_ = NULL;
1813  n_hard_int_max_ = 0;
1814  n_hard_int_use_ = 0;
1815 
1816 #ifdef PROFILE
1817  ARC_substep_sum = 0;
1818  ARC_tsyn_step_sum =0;
1819  ARC_n_groups = 0;
1820  ARC_n_groups_iso = 0;
1821  H4_step_sum = 0;
1822 #endif
1823 #ifdef HARD_COUNT_NO_NEIGHBOR
1824  n_neighbor_zero = 0;
1825 #endif
1826 #ifdef HARD_CHECK_ENERGY
1827  energy.clear();
1828 #endif
1829  // PS::S32 n_threads = PS::Comm::getNumberOfThread();
1830  }
1831 
1833 
1836  void allocateHardIntegrator(const PS::S32 _n_hard_int) {
1837  assert(_n_hard_int>0);
1838  assert(n_hard_int_use_==0);
1839  if (hard_int_!=NULL) delete [] hard_int_;
1840  const PS::S32 num_thread = PS::Comm::getNumberOfThread();
1841  n_hard_int_max_ = _n_hard_int + num_thread;
1842  hard_int_ = new HardIntegrator[n_hard_int_max_];
1843  n_hard_int_use_ = 0;
1844  }
1845 
1847  if (hard_int_!=NULL) delete [] hard_int_;
1848  }
1849 
1850 
1852 #ifdef HARD_DEBUG
1853  assert(n<ARRAY_ALLOW_LIMIT);
1854 #endif
1855  ptcl_hard_.resizeNoInitialize(n);
1856  }
1857 
1859  // for NON-ISOLATED CLUSTER
1860  template<class Tsys, class Tptcl, class Tmediator>
1861  void setPtclForConnectedCluster(const Tsys & sys,
1862  const PS::ReallocatableArray<Tmediator> & med,
1863  const PS::ReallocatableArray<Tptcl> & ptcl_recv){
1864  ptcl_hard_.clearSize();
1865  n_ptcl_in_cluster_.clearSize(); // clear befor break this function
1866  for(PS::S32 i=0; i<med.size(); i++){
1867  if(med[i].adr_sys_ < 0) continue;
1868  if(med[i].rank_send_ != PS::Comm::getRank()) continue;
1869  const auto & p = sys[med[i].adr_sys_];
1870  ptcl_hard_.push_back(PtclHard(p, med[i].id_cluster_, med[i].adr_sys_));
1871 #ifdef HARD_DEBUG
1872  assert(med[i].adr_sys_<sys.getNumberOfParticleLocal());
1873  if(p.mass==0&&p.group_data.artificial.isUnused()) {
1874  std::cerr<<"Error: unused particle is selected! i="<<i<<"; med[i].adr_sys="<<med[i].adr_sys_<<std::endl;
1875  abort();
1876  }
1877 #endif
1878  }
1879 
1880  for(PS::S32 i=0; i<ptcl_recv.size(); i++){
1881  const Tptcl & p = ptcl_recv[i];
1882  ptcl_hard_.push_back(PtclHard(p, p.id_cluster, -(i+1)));
1883 #ifdef HARD_DEBUG
1884  if(p.mass==0&&p.group_data.artificial.isUnused()) {
1885  std::cerr<<"Error: receive usused particle! i="<<i<<std::endl;
1886  abort();
1887  }
1888 #endif
1889  }
1890 
1891  if(ptcl_hard_.size() == 0) return;
1892  std::sort(ptcl_hard_.getPointer(), ptcl_hard_.getPointer(ptcl_hard_.size()),
1893  OPLessIDCluster());
1894  PS::S32 n_tot = ptcl_hard_.size();
1895  PS::S32 id_cluster_ref = -999;
1896  for(PS::S32 i=0; i<n_tot; i++){
1897  if(id_cluster_ref != ptcl_hard_[i].id_cluster){
1898  id_cluster_ref = ptcl_hard_[i].id_cluster;
1899  n_ptcl_in_cluster_.push_back(0);
1900  }
1901  n_ptcl_in_cluster_.back()++;
1902  }
1903  PS::S32 n_cluster = n_ptcl_in_cluster_.size();
1904 #ifdef HARD_DEBUG
1905  assert(n_cluster<ARRAY_ALLOW_LIMIT);
1906 #endif
1907  n_ptcl_in_cluster_disp_.resizeNoInitialize(n_cluster+1);
1908  n_ptcl_in_cluster_disp_[0] = 0;
1909  for(PS::S32 i=0; i<n_cluster; i++){
1910 #ifdef HARD_DEBUG
1911  assert(n_ptcl_in_cluster_[i]>1);
1912 #endif
1913  n_ptcl_in_cluster_disp_[i+1] = n_ptcl_in_cluster_disp_[i] + n_ptcl_in_cluster_[i];
1914  }
1915  }
1916 
1917 
1918  // for NON-ISOLATED CLUSTER
1920 
1922  return n_group_member_remote_;
1923  }
1924 
1925  PS::ReallocatableArray<PtclH4> & getPtcl() {
1926  return ptcl_hard_;
1927  }
1928 
1930  return n_ptcl_in_cluster_.size();
1931  }
1932 
1934  return interrupt_list_.size();
1935  }
1936 
1938  return interrupt_list_[i];
1939  }
1940 
1941  PS::S32* getClusterNumberOfMemberList(const std::size_t i=0) const{
1942  return n_ptcl_in_cluster_.getPointer(i);
1943  }
1944 
1945  PS::S32* getClusterNumberOfMemberListOffset(const std::size_t i=0) const{
1946  return n_ptcl_in_cluster_disp_.getPointer(i);
1947  }
1948 
1949  PS::S32* getGroupNumberOfMemberList(const std::size_t i=0) const{
1950  return n_group_in_cluster_.getPointer(i);
1951  }
1952 
1953  PS::S32* getGroupNumberOfMemberListOffset(const std::size_t i=0) const{
1954  return n_group_in_cluster_offset_.getPointer(i);
1955  }
1956 
1957  PS::S32* getAdrPtclArtFirstList(const std::size_t i=0) const{
1958  return adr_first_ptcl_arti_in_cluster_.getPointer(i);
1959  }
1960 
1962  return i_cluster_changeover_update_.size();
1963  }
1964 
1965  void setTimeOrigin(const PS::F64 _time_origin){
1966  time_origin_ = _time_origin;
1967  }
1968 
1970  time_write_back_ = time_origin_;
1971  }
1972 
1974  return time_origin_;
1975  }
1976 
1977  //void setParam(const PS::F64 _rbin,
1978  // const PS::F64 _rout,
1979  // const PS::F64 _rin,
1980  // const PS::F64 _eps,
1981  // const PS::F64 _dt_limit_hard,
1982  // const PS::F64 _dt_min_hard,
1983  // const PS::F64 _eta,
1984  // const PS::F64 _time_origin,
1985  // const PS::F64 _sd_factor,
1986  // const PS::F64 _v_max,
1987  // // const PS::F64 _gmin,
1988  // // const PS::F64 _m_avarage,
1989  // const PS::S64 _id_offset,
1990  // const PS::S32 _n_split){
1991  // /// Set chain pars (L.Wang)
1992  // Int_pars_.rin = _rin;
1993  // Int_pars_.rout = _rout;
1994  // Int_pars_.r_oi_inv = 1.0/(_rout-_rin);
1995  // Int_pars_.r_A = (_rout-_rin)/(_rout+_rin);
1996  // Int_pars_.pot_off = (1.0+Int_pars_.r_A)/_rout;
1997  // Int_pars_.eps2 = _eps*_eps;
1998  // Int_pars_.r_bin = _rbin;
1999  // /// Set chain pars (L.Wang)
2000  // dt_limit_hard_ = _dt_limit_hard;
2001  // dt_min_hard_ = _dt_min_hard;
2002  // eta_s_ = _eta*_eta;
2003  // sdfactor_ = _sd_factor;
2004  // v_max_ = _v_max;
2005  // time_origin_ = _time_origin;
2006 // // gamma_ = std::pow(1.0/_gmin,0.33333);
2007  // // r_search_single_ = _rsearch;
2008  // //r_bin_ = _rbin;
2009  // // m_average_ = _m_avarage;
2010  // manager->n_split = _n_split;
2011  // id_offset_ = _id_offset;
2012  //}
2013 
2014 // void updateRSearch(PtclH4* ptcl_org,
2015 // const PS::S32* ptcl_list,
2016 // const PS::S32 n_ptcl,
2017 // const PS::F64 dt_tree) {
2018 // for (PS::S32 i=0; i<n_ptcl; i++) {
2019 // ptcl_org[ptcl_list[i]].calcRSearch(dt_tree);
2020 // }
2021 // }
2022 
2024 // for one cluster
2025  template<class Tsys>
2026  void setPtclForOneClusterOMP(const Tsys & sys,
2027  const PS::ReallocatableArray<PS::S32> & adr_array){
2028  // for one cluster
2029  const PS::S32 n = adr_array.size();
2030  //ptcl_hard_.resizeNoInitialize(n);
2031  //n_ptcl_in_cluster_.resizeNoInitialize(n);
2032 #pragma omp parallel for
2033  for(PS::S32 i=0; i<n; i++){
2034  PS::S32 adr = adr_array[i];
2035  ptcl_hard_[i].DataCopy(sys[adr]);
2036  ptcl_hard_[i].adr_org = adr;
2037  //n_ptcl_in_cluster_[i] = 1;
2038  }
2039  }
2040 
2041 
2043 
2046  void driveForOneClusterOMP(const PS::F64 _dt) {
2047  const PS::S32 n = ptcl_hard_.size();
2048 
2049 #pragma omp parallel for
2050  for(PS::S32 i=0; i<n; i++){
2051  auto& pi = ptcl_hard_[i];
2052  PS::F64vec dr = pi.vel * _dt;
2053  pi.pos += dr;
2054 
2055 #ifdef STELLAR_EVOLUTION
2056  PS::F64 mbk = pi.mass;
2057 
2058  PS::F64vec vbk = pi.vel; //back up velocity in case of change
2059  assert(!std::isinf(vbk.x));
2060  assert(!std::isnan(vbk.x));
2061  int modify_flag = manager->ar_manager.interaction.modifyOneParticle(pi, time_origin_, time_origin_ + _dt);
2062  if (modify_flag) {
2063  auto& v = pi.vel;
2064  Float de_kin = 0.5*(pi.mass*(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]) - mbk*(vbk[0]*vbk[0]+vbk[1]*vbk[1]+vbk[2]*vbk[2]));
2065  energy.de_sd_change_cum += de_kin;
2066  energy.de_sd_change_modify_single += de_kin;
2067  energy.de_change_cum += de_kin;
2068  energy.de_change_modify_single += de_kin;
2069  }
2070  // shift time interrupt in order to get consistent time for stellar evolution in the next drift
2071  //pi.time_record -= _dt;
2072  //pi.time_interrupt -= _dt;
2073 #endif
2074 
2076  //auto& pi_cm = pi.group_data.cm;
2077  //pi_cm.mass = pi_cm.vel.x = pi_cm.vel.y = pi_cm.vel.z = 0.0;
2078 
2079 #ifdef HARD_DEBUG
2080  // to avoid issue in cluster search with velocity
2081  if (!pi.group_data.artificial.isUnused()) {
2082  auto& pcm = pi.group_data.cm;
2083  assert(pcm.mass==0.0);
2084  assert(pcm.vel.x==0.0);
2085  assert(pcm.vel.y==0.0);
2086  assert(pcm.vel.z==0.0);
2087  }
2088 #endif
2089  assert(!std::isinf(pi.vel.x));
2090  assert(!std::isnan(pi.vel.x));
2091  pi.Ptcl::calcRSearch(_dt);
2092  /*
2093  DriveKeplerRestricted(mass_sun_,
2094  pos_sun_, pi.pos,
2095  vel_sun_, pi.vel, dt);
2096  */
2097  }
2098 
2099  time_origin_ += _dt;
2100  }
2101 
2103 
2107  template<class Tsys>
2108  void writeBackPtclForOneCluster(Tsys & sys,
2109  PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2110  const PS::S32 n = ptcl_hard_.size();
2111  //PS::ReallocatableArray<PS::S32> removelist(n);
2112  for(PS::S32 i=0; i<n; i++){
2113  //PS::S32 adr = adr_array[i];
2114  PS::S32 adr = ptcl_hard_[i].adr_org;
2115 #ifdef HARD_DEBUG
2116  assert(sys[adr].id == ptcl_hard_[i].id);
2117 #endif
2118 #ifdef STELLAR_EVOLUTION
2119  PS::F64 mass_bk = sys[adr].group_data.artificial.isMember()? sys[adr].group_data.artificial.getMassBackup(): sys[adr].mass;
2120  assert(mass_bk!=0.0);
2121 #endif
2122  sys[adr].DataCopy(ptcl_hard_[i]);
2123 
2124 #ifdef STELLAR_EVOLUTION
2125  sys[adr].dm = sys[adr].mass - mass_bk;
2127  if (sys[adr].dm!=0.0) _mass_modify_list.push_back(adr);
2128 #endif
2129  assert(!std::isinf(sys[adr].pos[0]));
2130  assert(!std::isnan(sys[adr].pos[0]));
2131  assert(!std::isinf(sys[adr].vel[0]));
2132  assert(!std::isnan(sys[adr].vel[0]));
2133  }
2135  }
2136 
2138 
2142  template<class Tsys>
2144  PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2145  const PS::S32 n = ptcl_hard_.size();
2146  const PS::S32 num_thread = PS::Comm::getNumberOfThread();
2147  PS::ReallocatableArray<PS::S32> mass_modify_list_thx[num_thread];
2148  for (int i=0; i<num_thread; i++) mass_modify_list_thx[i].resizeNoInitialize(0);
2149 
2150 #pragma omp parallel
2151  {
2152 #ifdef STELLAR_EVOLUTION
2153  const PS::S32 ith = PS::Comm::getThreadNum();
2154 #endif
2155 #pragma omp for
2156  for(PS::S32 i=0; i<n; i++){
2157  PS::S32 adr = ptcl_hard_[i].adr_org;
2158  //PS::S32 adr = adr_array[i];
2159 #ifdef HARD_DEBUG
2160  assert(sys[adr].id == ptcl_hard_[i].id);
2161 #endif
2162 #ifdef STELLAR_EVOLUTION
2163  PS::F64 mass_bk = sys[adr].group_data.artificial.isMember()? sys[adr].group_data.artificial.getMassBackup(): sys[adr].mass;
2164  assert(mass_bk!=0.0);
2165 #endif
2166  sys[adr].DataCopy(ptcl_hard_[i]);
2167 
2168 #ifdef STELLAR_EVOLUTION
2169  // record mass change for later energy correction
2170  sys[adr].dm = sys[adr].mass - mass_bk;
2171  if (sys[adr].dm!=0.0) mass_modify_list_thx[ith].push_back(adr);
2172 #endif
2173  assert(!std::isinf(sys[adr].pos[0]));
2174  assert(!std::isnan(sys[adr].pos[0]));
2175  assert(!std::isinf(sys[adr].vel[0]));
2176  assert(!std::isnan(sys[adr].vel[0]));
2177  }
2178  }
2179 
2180  for (int i=0; i<num_thread; i++) {
2181  for (int k=0; k<mass_modify_list_thx[i].size(); k++) _mass_modify_list.push_back(mass_modify_list_thx[i][k]);
2182  }
2184  }
2185 
2187 
2191  template<class Tsys>
2192  void writeBackPtclForMultiCluster(Tsys & _sys,
2193  PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2194  writeBackPtclForOneCluster(_sys, _mass_modify_list);
2195  }
2196 
2198 
2203  template<class Tsys>
2205  PS::ReallocatableArray<PS::S32> & _mass_modify_list) {
2206  writeBackPtclForOneClusterOMP(_sys, _mass_modify_list);
2207  }
2208 
2209 // template<class Tsys>
2210 // void writeBackPtclLocalOnlyOMP(Tsys & sys) {
2211 // const PS::S32 n = ptcl_hard_.size();
2212 //#pragma omp parallel for schedule(dynamic)
2213 // for(PS::S32 i=0; i<n; i++){
2214 // PS::S32 adr = ptcl_hard_[i].adr_org;
2215 // //PS::S32 adr = adr_array[i];
2216 //#ifdef HARD_DEBUG
2217 // if(adr>=0) assert(sys[adr].id == ptcl_hard_[i].id);
2218 //#endif
2219 // if(adr>=0) sys[adr].DataCopy(ptcl_hard_[i]);
2220 // }
2221 // }
2222 // for one cluster
2224 
2225 
2227 // for isolated multi cluster only
2228  template<class Tsys>
2229  void setPtclForIsolatedMultiClusterOMP(const Tsys & sys,
2230  const PS::ReallocatableArray<PS::S32> & _adr_array,
2231  const PS::ReallocatableArray<PS::S32> & _n_ptcl_in_cluster){
2232  const PS::S32 n_cluster = _n_ptcl_in_cluster.size();
2233 #ifdef HARD_DEBUG
2234  assert(n_cluster<ARRAY_ALLOW_LIMIT);
2235 #endif
2236  n_ptcl_in_cluster_.resizeNoInitialize(n_cluster);
2237  n_ptcl_in_cluster_disp_.resizeNoInitialize(n_cluster+1);
2238  n_ptcl_in_cluster_disp_[0] = 0;
2239  for(PS::S32 i=0; i<n_cluster; i++){
2240  n_ptcl_in_cluster_[i] = _n_ptcl_in_cluster[i];
2241 #ifdef HARD_DEBUG
2242  assert(n_ptcl_in_cluster_[i]>1);
2243 #endif
2244  n_ptcl_in_cluster_disp_[i+1] = n_ptcl_in_cluster_disp_[i] + n_ptcl_in_cluster_[i];
2245  }
2246  const PS::S32 n_ptcl = _adr_array.size();
2247 #ifdef HARD_DEBUG
2248  assert(n_ptcl<ARRAY_ALLOW_LIMIT);
2249 #endif
2250  ptcl_hard_.resizeNoInitialize(n_ptcl);
2251 #pragma omp parallel for
2252  for(PS::S32 i=0; i<n_ptcl; i++){
2253  PS::S32 adr = _adr_array[i];
2254  ptcl_hard_[i].DataCopy(sys[adr]);
2255  ptcl_hard_[i].adr_org = adr;
2256  // ptcl_hard_[i].n_ngb= sys[adr].n_ngb;
2257  }
2258 
2259  interrupt_list_.resizeNoInitialize(0);
2260  }
2261 
2262 
2264 
2270  template<class Tpsoft>
2271  int driveForMultiClusterOMP(const PS::F64 dt, Tpsoft* _ptcl_soft){
2272  assert(n_hard_int_use_==0);
2273 
2274  const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
2275  //PS::ReallocatableArray<PtclH4> extra_ptcl[num_thread];
2277  //PS::ReallocatableArray<std::pair<PS::S32,PS::S32>> n_sort_list;
2278  //n_sort_list.resizeNoInitialize(n_cluster);
2279  //for(PS::S32 i=0; i<n_cluster; i++) {
2280  // n_sort_list[i].first = n_ptcl_in_cluster_[i];
2281  // n_sort_list[i].second= i;
2282  //}
2283  //std::sort(n_sort_list.getPointer(),n_sort_list.getPointer()+n_cluster,[](const std::pair<PS::S32,PS::S32> &a, const std::pair<PS::S32,PS::S32> &b){return a.first<b.first;});
2284  const PS::S32 num_thread = PS::Comm::getNumberOfThread();
2285  assert(n_hard_int_max_>num_thread);
2286 
2287 #ifndef ONLY_SOFT
2288  HardIntegrator* hard_int_thread[num_thread];
2289  // set new hard_int front pointer
2290  HardIntegrator* hard_int_front_ptr = &hard_int_[num_thread];
2291  for (PS::S32 i=0; i<num_thread; i++) {
2292  hard_int_thread[i] = &hard_int_[i];
2293  }
2294 #endif
2295 
2296 #ifdef OMP_PROFILE
2297  PS::F64 time_thread[num_thread];
2298  PS::S64 num_cluster[num_thread];
2299  for (PS::S32 i=0; i<num_thread; i++) {
2300  time_thread[i] = 0;
2301  num_cluster[i] = 0;
2302  }
2303 #endif
2304 
2305 #pragma omp parallel for schedule(dynamic)
2306  for(PS::S32 i=0; i<n_cluster; i++){
2307  const PS::S32 ith = PS::Comm::getThreadNum();
2308 #ifdef OMP_PROFILE
2309  time_thread[ith] -= PS::GetWtime();
2310 #endif
2311  //const PS::S32 i = n_sort_list[k].second;
2312  const PS::S32 adr_head = n_ptcl_in_cluster_disp_[i];
2313  const PS::S32 n_ptcl = n_ptcl_in_cluster_[i];
2314 
2315 #ifndef ONLY_SOFT
2316  // Hermite + AR integration
2317  const PS::S32 n_group = n_group_in_cluster_[i];
2318  Tpsoft* ptcl_artificial_ptr=NULL;
2319  PS::S32* n_member_in_group_ptr=NULL;
2320  if(n_group>0) {
2321  PS::S32 ptcl_arti_first_index = adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i]];
2322  if (ptcl_arti_first_index>=0) ptcl_artificial_ptr = &(_ptcl_soft[ptcl_arti_first_index]);
2323 #ifdef PROFILE
2324  else ARC_n_groups_iso += 1;
2325 #endif
2326  n_member_in_group_ptr = &(n_member_in_group_[n_group_in_cluster_offset_[i]]);
2327  }
2328 #ifdef OMP_PROFILE
2329  num_cluster[ith] += n_ptcl;
2330 #endif
2331 #ifdef PROFILE
2332  ARC_n_groups += n_group;
2333 #endif
2334 
2335 #ifdef HARD_DUMP
2336  assert(ith<hard_dump.size);
2337  hard_dump[ith].backup(ptcl_hard_.getPointer(adr_head), n_ptcl, ptcl_artificial_ptr, n_group, n_member_in_group_ptr, time_origin_, dt, manager->ap_manager.getArtificialParticleN());
2338 #endif
2339 
2340 #ifdef HARD_DEBUG_PROFILE
2341  PS::F64 tstart = PS::GetWtime();
2342 #endif
2343 
2344  // if interrupt exist, escape initial
2345  hard_int_thread[ith]->initial(ptcl_hard_.getPointer(adr_head), n_ptcl, ptcl_artificial_ptr, n_group, n_member_in_group_ptr, manager, time_origin_);
2346 
2347  auto& interrupt_binary = hard_int_thread[ith]->integrateToTime(dt);
2348 
2349  if (interrupt_binary.status!=AR::InterruptStatus::none) {
2350  #pragma omp atomic capture
2351  hard_int_thread[ith] = hard_int_front_ptr++;
2352 
2353  assert(hard_int_thread[ith]!=&hard_int_[n_hard_int_max_]);
2354  }
2355  else {
2356  hard_int_thread[ith]->driftClusterCMRecordGroupCMDataAndWriteBack(dt);
2357 
2358 #ifdef PROFILE
2359  ARC_substep_sum += hard_int_thread[ith]->ARC_substep_sum;
2360  ARC_tsyn_step_sum += hard_int_thread[ith]->ARC_tsyn_step_sum;
2361  H4_step_sum += hard_int_thread[ith]->H4_step_sum;
2362 #endif
2363 #ifdef HARD_COUNT_NO_NEIGHBOR
2364  n_neighbor_zero += hard_int_thread[ith]->n_neighbor_zero;
2365 #endif
2366 #ifdef HARD_CHECK_ENERGY
2367  energy += hard_int_thread[ith]->energy;
2368 #endif
2369 
2370  hard_int_thread[ith]->clear();
2371  }
2372 
2373 #ifdef OMP_PROFILE
2374  time_thread[ith] += PS::GetWtime();
2375 #endif
2376 
2377 #ifdef HARD_DEBUG_PROFILE
2378  PS::F64 tend = PS::GetWtime();
2379  std::cerr<<"HT: "<<i<<" "<<ith<<" "<<n_cluster<<" "<<n_ptcl<<" "<<tend-tstart<<std::endl;
2380 #endif
2381 
2382 #else
2383  // Only soft drift
2384  auto* pi = ptcl_hard_.getPointer(adr_head);
2385  for (PS::S32 j=0; j<n_ptcl; j++) {
2386  PS::F64vec dr = pi[j].vel * dt;
2387  pi[j].pos += dr;
2388 #ifdef CLUSTER_VELOCITY
2389  auto& pij_cm = pi[j].group_data.cm;
2390  pij_cm.mass = pij_cm.vel.x = pij_cm.vel.y = pij_cm.vel.z = 0.0;
2391 #endif
2392  ASSERT(!std::isinf(pi[j].vel[0]));
2393  ASSERT(!std::isnan(pi[j].vel[0]));
2394  pi[j].calcRSearch(dt);
2395  }
2396 #endif
2397 
2398  }
2399 
2400  // regist interrupted hard integrator
2401  assert(interrupt_list_.size()==0);
2402  for (auto iptr = hard_int_; iptr<hard_int_front_ptr; iptr++)
2403  if (iptr->is_initialized) {
2404  assert(iptr->interrupt_binary.status!=AR::InterruptStatus::none);
2405 #ifdef HARD_INTERRUPT_PRINT
2406  iptr->printInterruptBinaryInfo(std::cerr);
2407 #endif
2408  interrupt_list_.push_back(iptr);
2409  }
2410 
2411 
2412  // advance time_origin if all clusters finished
2413  PS::S32 n_interrupt = interrupt_list_.size();
2414  if (n_interrupt==0) time_origin_ += dt;
2415  else interrupt_dt_ = dt;
2416 
2417  return n_interrupt;
2418  }
2419 
2421 
2426  PS::S32 n_interrupt = interrupt_list_.size();
2427 #pragma omp parallel for schedule(dynamic)
2428  for (PS::S32 i=0; i<n_interrupt; i++) {
2429  auto hard_int_ptr = interrupt_list_[i];
2430  auto& interrupt_binary = hard_int_ptr->integrateToTime(interrupt_dt_);
2431 
2432  if (interrupt_binary.status==AR::InterruptStatus::none) {
2433  hard_int_ptr->driftClusterCMRecordGroupCMDataAndWriteBack(interrupt_dt_);
2434 
2435 #ifdef PROFILE
2436  ARC_substep_sum += hard_int_ptr->ARC_substep_sum;
2437  ARC_tsyn_step_sum += hard_int_ptr->ARC_tsyn_step_sum;
2438  H4_step_sum += hard_int_ptr->H4_step_sum;
2439 #endif
2440 #ifdef HARD_CHECK_ENERGY
2441  energy += hard_int_ptr->energy;
2442 #endif
2443 
2444  hard_int_ptr->clear();
2445  }
2446  }
2447 
2448  // record new interrupt list
2449  PS::S32 i_front = 0;
2450  PS::S32 i_end = n_interrupt;
2451  while (i_front<i_end) {
2452  auto hard_int_front_ptr = interrupt_list_[i_front];
2453  if (hard_int_front_ptr->interrupt_binary.status!=AR::InterruptStatus::none) {
2454  assert(hard_int_front_ptr->is_initialized);
2455 #ifdef HARD_INTERRUPT_PRINT
2456  hard_int_front_ptr->printInterruptBinaryInfo(std::cerr);
2457 #endif
2458  i_front++;
2459  }
2460  else {
2461  interrupt_list_[i_front] = interrupt_list_[--i_end];
2462  interrupt_list_.decreaseSize(1);
2463  }
2464  }
2465 
2466  n_interrupt = interrupt_list_.size();
2467 
2468  // advance time_origin if all clusters finished
2469  if (n_interrupt==0) time_origin_ += interrupt_dt_;
2470 
2471  return n_interrupt;
2472  }
2473 
2477  GroupIndexInfo(PS::S32 _cluster, PS::S32 _group, PS::S32 _member, PS::S32 _offset, PS::S32 _isolated_case):
2478  cluster_index(_cluster), group_index(_group), n_members(_member), n_member_offset(_offset), isolated_case(_isolated_case) {}
2479  };
2480 
2482  /*
2483  @param[in] _i_cluster: cluster index
2484  @param[in,out] _ptcl_in_cluster: particle data
2485  @param[in] _n_ptcl: total number of particle in _ptcl_in_cluster.
2486  @param[out] _ptcl_artificial: artificial particles that will be added
2487  @parma[out] _binary_table: binary information table
2488  @param[out] _n_groups: number of groups in current cluster
2489  @param[out] _n_members_in_groups: number of members in each group, (cluster_index, group_index, n_members)
2490  @param[out] _changeover_update_list: cluster index list for particles with changeover updates
2491  @param[in,out] _groups: searchGroupCandidate class, which contain 1-D group member index array, will be reordered by the minimum distance chain for each group
2492  @param[in] _dt_tree: tree time step for calculating r_search and set stablility checker period limit
2493  */
2494  template <class Tptcl>
2496  Tptcl* _ptcl_in_cluster,
2497  const PS::S32 _n_ptcl,
2498  PS::ReallocatableArray<Tptcl> & _ptcl_artificial,
2499  PS::ReallocatableArray<COMM::BinaryTree<PtclH4,COMM::Binary>> & _binary_table,
2500  PS::S32 &_n_groups,
2501  PS::ReallocatableArray<GroupIndexInfo>& _n_member_in_group,
2502  PS::ReallocatableArray<PS::S32>& _changeover_update_list,
2503  SearchGroupCandidate<Tptcl>& _groups,
2504  const PS::F64 _dt_tree) {
2505 
2506  PS::S32 group_ptcl_adr_list[_n_ptcl];
2507  PS::S32 group_ptcl_adr_offset=0;
2508  bool changeover_update_flag = false;
2509  _n_groups = 0;
2510  auto& ap_manager = manager->ap_manager;
2511 
2512  for (int i=0; i<_groups.getNumberOfGroups(); i++) {
2513  PS::ReallocatableArray<COMM::BinaryTree<Tptcl,COMM::Binary>> binary_tree; // hierarch binary tree
2514 
2515  const PS::S32 n_members = _groups.getNumberOfGroupMembers(i);
2516  binary_tree.reserve(n_members);
2517 
2518 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2519  assert(n_members<ARRAY_ALLOW_LIMIT);
2520 #endif
2521  PS::S32* member_list = _groups.getMemberList(i);
2522  binary_tree.resizeNoInitialize(n_members-1);
2523  // build hierarch binary tree from the minimum distant neighbors
2524 
2525  COMM::BinaryTree<Tptcl,COMM::Binary>::generateBinaryTree(binary_tree.getPointer(), member_list, n_members, _ptcl_in_cluster, ap_manager.gravitational_constant);
2526 
2527  struct {PS::F64 mean_mass_inv, rin, rout, dt_tree; } changeover_rsearch_pars = {Tptcl::mean_mass_inv, manager->r_in_base, manager->r_out_base, _dt_tree};
2528  // get new changeover and rsearch for c.m.
2529  binary_tree.back().processTreeIter(changeover_rsearch_pars, (PS::S64)-1, (PS::S64)-1, calcBinaryIDChangeOverAndRSearchIter);
2530 
2531  // stability check and break groups
2532  Stability<Tptcl> stable_checker;
2533 
2534  // find closed Kepler hierarchical systems
2535  stable_checker.stable_binary_tree.reserve(n_members);
2536  stable_checker.findClosedTree(binary_tree.back());
2537 
2538  // save group information to binary table.
2539  for (int k=0; k<stable_checker.stable_binary_tree.size(); k++) {
2540  auto& closed_binary_tree_k = *stable_checker.stable_binary_tree[k];
2541  const PS::S32 n_members = closed_binary_tree_k.getMemberN();
2542  PS::S32 start_index_binary_table = _binary_table.size();
2543  _binary_table.increaseSize(n_members);
2544  closed_binary_tree_k.getherBinaryTreeIter(_binary_table.getPointer(start_index_binary_table));
2545  }
2546 
2547 
2548  // be careful, here t_crit should be >= hard slowdown_timescale_max to avoid using slowdown for wide binaries
2549  stable_checker.t_crit = manager->ar_manager.slowdown_timescale_max;
2550  stable_checker.findStableTree(binary_tree.back());
2551 
2552  // in isolated binary case, if the slowdown period can be larger than tree step * N_step_per_orbit, it is fine without tidal tensor
2553  if (_n_ptcl==2&&stable_checker.stable_binary_tree.size()==1) {
2554  auto& bin = *stable_checker.stable_binary_tree[i];
2555  const PS::S32 n_members = bin.getMemberN();
2556 
2557  /* old criterion
2558  P k / dt > ns
2559  P^2/a^3 = 4 pi^2 / (G (m1 + m2))
2560  k = kr pert_in/pert_out = rs^3/a^3 kr (assume no mass and ecc dependence)
2561  => P < 4pi^2 kr rs^3 / (G (m1 + m2) dt ns)
2562 
2563  //PS::F64 pot_ch_inv = bin.r_search*bin.r_search*bin.r_search/(ap_manager.gravitational_constant*bin.mass);
2564  //if (bin.period < 4.93480220054 * manager->ar_manager.slowdown_pert_ratio_ref * pot_ch_inv / dt_nstep) {
2565  */
2566 
2567  // directly estimate slowdown based on perturbation calculation method for more general cases
2568  AR::SlowDown sd;
2569  sd.initialSlowDownReference(manager->ar_manager.slowdown_pert_ratio_ref,manager->ar_manager.slowdown_timescale_max);
2570  sd.pert_in = manager->ar_manager.interaction.calcPertFromBinary(bin);
2571  sd.pert_out = manager->ar_manager.interaction.calcPertFromMR(bin.r_search, bin.mass, 1.0/Ptcl::mean_mass_inv);
2572  sd.period = bin.period;
2573  // here use twice to ensure the period*sd can be larger than dt_nstep
2574  sd.timescale = 2*manager->ar_manager.slowdown_timescale_max;
2575  sd.calcSlowDownFactor();
2576 
2577  PS::F64 dt_nstep = _dt_tree*manager->n_step_per_orbit;
2578 
2579  //std::cerr<<"GROUP_SEL_RECORD: "<<sd.pert_in<<" "<<sd.pert_out<<" "<<sd.getSlowDownFactor()<<" "<<sd.getSlowDownFactorOrigin()<<" "<<bin.m1<<" "<<bin.m2<<" "<<bin.semi<<" "<<bin.ecc<<" "<<bin.period<<" "<<dt_nstep<<" "<<(bin.period*sd.getSlowDownFactor() > dt_nstep)<<std::endl;
2580 
2581  if (bin.period*sd.getSlowDownFactor() > dt_nstep) {
2582  // Set member particle type, backup mass, collect member particle index to group_ptcl_adr_list
2583  //use _ptcl_in_cluster as the first particle address as reference to calculate the particle index.
2584  struct { Tptcl* adr_ref; PS::S32* group_list; PS::S32 n; PS::S64 pcm_id; ChangeOver* changeover_cm; PS::F64 rsearch_cm; bool changeover_update_flag;}
2585  group_index_pars = { _ptcl_in_cluster, &group_ptcl_adr_list[group_ptcl_adr_offset], 0, -1, &bin.changeover, bin.r_search, false};
2586  bin.processLeafIter(group_index_pars, collectGroupMemberAdrAndSetMemberParametersIter);
2587  if (group_index_pars.changeover_update_flag) changeover_update_flag = true;
2588 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2589  assert(group_index_pars.n==n_members);
2590 #ifdef ARTIFICIAL_PARTICLE_DEBUG_PRINT
2591  std::cerr<<"Isolated binary case: "
2592  <<" period: "<<bin.period
2593  <<" semi: "<<bin.semi
2594  <<" ecc: "<<bin.ecc
2595  <<" r_serach: "<<bin.r_search
2596  <<" tree_step: "<<_dt_tree
2597  <<" n_step_per_orbit: "<<manager->n_step_per_orbit
2598  <<std::endl;
2599 #endif
2600 #endif
2601  _n_member_in_group.push_back(GroupIndexInfo(_i_cluster, _n_groups, n_members, group_ptcl_adr_offset, 1));
2602  _n_groups++;
2603  group_ptcl_adr_offset += n_members;
2604  continue;
2605  }
2606  }
2607 
2608  // save cluster and group index in artificial particle data as identification for later collection to global sys
2609  PS::F64 index_group[2];
2610  index_group[0] = (PS::F64)(_i_cluster+1);
2611 
2612  for (int i=0; i<stable_checker.stable_binary_tree.size(); i++) {
2613  index_group[1] = (PS::F64)(_n_groups+1);
2614 
2615  auto& binary_stable_i = *stable_checker.stable_binary_tree[i];
2616  const PS::S32 n_members = binary_stable_i.getMemberN();
2617 
2618  // Make sure the _ptcl_new will not make new array due to none enough capacity during the following loop,
2619  const int n_ptcl_artificial = _ptcl_artificial.size();
2620  _ptcl_artificial.increaseSize(ap_manager.getArtificialParticleN());
2621  Tptcl* ptcl_artificial_i = &_ptcl_artificial[n_ptcl_artificial];
2622 
2623  // Set member particle type to member, set mass to zero, backup mass and collect member particle index to group_ptcl_adr_list
2624  // set rsearch and changeover for members
2625  //use _ptcl_in_cluster as the first particle address as reference to calculate the particle index.
2626  struct { Tptcl* adr_ref; PS::S32* group_list; PS::S32 n; PS::S64 pcm_id; ChangeOver* changeover_cm; PS::F64 rsearch_cm; bool changeover_update_flag;}
2627  group_index_pars = { _ptcl_in_cluster, &group_ptcl_adr_list[group_ptcl_adr_offset], 0, n_ptcl_artificial, &binary_stable_i.changeover, binary_stable_i.r_search, false};
2628  binary_stable_i.processLeafIter(group_index_pars, collectGroupMemberAdrAndSetMemberParametersIter);
2629 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2630  assert(group_index_pars.n==n_members);
2631 #endif
2632  if (group_index_pars.changeover_update_flag) changeover_update_flag = true;
2633 
2634  // generate artificial particles
2635  ap_manager.createArtificialParticles(ptcl_artificial_i, binary_stable_i, index_group, 2);
2636 
2637  // set rsearch and changeover for c.m. particle
2638  auto* pcm = ap_manager.getCMParticles(ptcl_artificial_i);
2639  pcm->r_search = binary_stable_i.r_search;
2640  pcm->r_search += binary_stable_i.semi*(1+binary_stable_i.ecc); // depend on the mass ratio, the upper limit distance to c.m. from all members and artificial particles is apo-center distance
2641  pcm->changeover = binary_stable_i.changeover;
2642 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2643  assert(pcm->r_search > pcm->changeover.getRout());
2644 #endif
2645 
2646  // set rsearch and changeover for tidal tensor particles
2647  Tptcl* ptcl_tt = ap_manager.getTidalTensorParticles(ptcl_artificial_i);
2648  // use c.m. r_search and changeover
2649  for (int j=0; j<ap_manager.getTidalTensorParticleN(); j++) {
2650  Tptcl& pj = ptcl_tt[j];
2651  pj.r_search = binary_stable_i.r_search;
2652  pj.changeover = binary_stable_i.changeover;
2653 
2654 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2655  //check rsearch consistence:
2656  PS::F64 rsearch_bin = pcm->r_search;
2657  PS::F64vec dp = pj.pos - binary_stable_i.pos;
2658  PS::F64 dr = dp*dp;
2659  assert(dr<=rsearch_bin*rsearch_bin);
2660 #endif
2661  }
2662 
2663  // set rsearch and changeover for orbitial sample particles
2664  Tptcl* ptcl_orbit=ap_manager.getOrbitalParticles(ptcl_artificial_i);
2665  PS::S32 n_orbit = ap_manager.getOrbitalParticleN();
2666  for (int j=0; j<n_orbit; j++) {
2667  // use member changeover, if new changeover is different, record the scale ratio
2668  Tptcl& pj = ptcl_orbit[j];
2669  pj.changeover = binary_stable_i.getMember(j%2)->changeover;
2670 
2671  PS::F64 pj_r_in = pj.changeover.getRin();
2672  PS::F64 bin_r_in = binary_stable_i.changeover.getRin();
2673  if (abs( pj_r_in - bin_r_in)>1e-10) {
2674  pj.changeover.r_scale_next = bin_r_in/pj_r_in;
2675  pj.r_search = std::max(pj.r_search, binary_stable_i.r_search);
2676  // not necessary true since the member changeover may inherient from other binaries which can be larger than the new one here.
2677  //assert(_bin.changeover.getRin()>=pj->changeover.getRin());
2678 
2679  // in case the new changeover is small, r_search may be smaller than the previous r_out, which should be avoided
2680  if (pj.r_search < pj.changeover.getRout()) {
2681 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2682  assert( pj.r_search > pj.changeover.getRout()*pj.changeover.r_scale_next);
2683 #endif
2684  pj.r_search = pj.changeover.getRout();
2685  }
2686  }
2687  else pj.r_search = binary_stable_i.r_search;
2688 
2689 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2690  //check rsearch consistence:
2691  PS::F64 rsearch_bin = pcm->r_search;
2692  PS::F64vec dp = pj.pos - binary_stable_i.pos;
2693  PS::F64 dr = dp*dp;
2694  assert(dr<=rsearch_bin*rsearch_bin);
2695 #endif
2696  }
2697 
2698  _n_member_in_group.push_back(GroupIndexInfo(_i_cluster, _n_groups, n_members, group_ptcl_adr_offset, 0));
2699  group_ptcl_adr_offset += n_members;
2700  _n_groups++;
2701  }
2702  }
2703 
2704  assert(group_ptcl_adr_offset<=_n_ptcl);
2705 
2706  // Reorder the ptcl that group member come first
2707  PS::S32 ptcl_list_reorder[_n_ptcl];
2708  for (int i=0; i<_n_ptcl; i++) ptcl_list_reorder[i] = i;
2709 
2710  // shift single after group members
2711  PS::S32 i_single_front=group_ptcl_adr_offset;
2712  PS::S32 i_group = 0;
2713  while (i_group<group_ptcl_adr_offset) {
2714  // if single find inside group_ptcl_adr_offset, exchange single with group member out of the offset
2715  if(_ptcl_in_cluster[i_group].group_data.artificial.isSingle()) {
2716  while(_ptcl_in_cluster[i_single_front].group_data.artificial.isSingle()) {
2717  i_single_front++;
2718  assert(i_single_front<_n_ptcl);
2719  }
2720  // Swap index
2721  PS::S32 plist_tmp = ptcl_list_reorder[i_group];
2722  ptcl_list_reorder[i_group] = ptcl_list_reorder[i_single_front];
2723  ptcl_list_reorder[i_single_front] = plist_tmp;
2724  i_single_front++; // avoild same particle be replaced
2725  }
2726  i_group++;
2727  }
2728 
2729 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2730  // check whether the list is correct
2731  PS::S32 plist_new[group_ptcl_adr_offset];
2732  for (int i=0; i<group_ptcl_adr_offset; i++) plist_new[i] = group_ptcl_adr_list[i];
2733  std::sort(plist_new, plist_new+group_ptcl_adr_offset, [](const PS::S32 &a, const PS::S32 &b) {return a < b;});
2734  std::sort(ptcl_list_reorder, ptcl_list_reorder+group_ptcl_adr_offset, [](const PS::S32 &a, const PS::S32 &b) {return a < b;});
2735  for (int i=0; i<group_ptcl_adr_offset; i++) assert(ptcl_list_reorder[i]==plist_new[i]);
2736 #endif
2737 
2738  // overwrite the new ptcl list for group members by reorderd list
2739  for (int i=0; i<group_ptcl_adr_offset; i++) ptcl_list_reorder[i] = group_ptcl_adr_list[i];
2740 
2741  // templately copy ptcl data
2742  Tptcl ptcl_tmp[_n_ptcl];
2743  for (int i=0; i<_n_ptcl; i++) ptcl_tmp[i]=_ptcl_in_cluster[i];
2744 
2745  // reorder ptcl
2746  for (int i=0; i<_n_ptcl; i++) _ptcl_in_cluster[i]=ptcl_tmp[ptcl_list_reorder[i]];
2747 
2748  // changeover update
2749  if (changeover_update_flag) _changeover_update_list.push_back(_i_cluster);
2750  }
2751 
2753  /* @param[in,out] _sys: global particle system
2754  @param[in] _dt_tree: tree time step for calculating r_search
2755  */
2756  template<class Tsys, class Tptcl>
2758  const PS::F64 _dt_tree) {
2759  const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
2760 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2761  assert(n_cluster<ARRAY_ALLOW_LIMIT);
2762 #endif
2763  n_group_in_cluster_.resizeNoInitialize(n_cluster);
2764  n_group_member_remote_=0;
2765 
2766  const PS::S32 num_thread = PS::Comm::getNumberOfThread();
2767  PS::ReallocatableArray<PtclH4> ptcl_artificial_thread[num_thread];
2768  PS::ReallocatableArray<COMM::BinaryTree<PtclH4,COMM::Binary>> binary_table_thread[num_thread];
2769  PS::ReallocatableArray<GroupIndexInfo> n_member_in_group_thread[num_thread];
2770  PS::ReallocatableArray<PS::S32> i_cluster_changeover_update_threads[num_thread];
2771  for (PS::S32 i=0; i<num_thread; i++) {
2772  ptcl_artificial_thread[i].resizeNoInitialize(0);
2773  binary_table_thread[i].resizeNoInitialize(0);
2774  n_member_in_group_thread[i].resizeNoInitialize(0);
2775  i_cluster_changeover_update_threads[i].resizeNoInitialize(0);
2776  }
2777  auto& ap_manager = manager->ap_manager;
2778 
2779 #pragma omp parallel for schedule(dynamic)
2780  for (PS::S32 i=0; i<n_cluster; i++){
2781  const PS::S32 ith = PS::Comm::getThreadNum();
2782  PtclH4* ptcl_in_cluster = ptcl_hard_.getPointer() + n_ptcl_in_cluster_disp_[i];
2783  const PS::S32 n_ptcl = n_ptcl_in_cluster_[i];
2784  // reset particle type to single
2785  for(PS::S32 j=0; j<n_ptcl; j++) {
2786  // ensure both hard local and global system have reset status, otherwise singles in global system may have wrong status
2787  ptcl_in_cluster[j].group_data.artificial.setParticleTypeToSingle();
2788  PS::S64 adr=ptcl_in_cluster[j].adr_org;
2789  if(adr>=0) _sys[adr].group_data = ptcl_in_cluster[j].group_data;
2790  }
2791  // search group_candidates
2792  SearchGroupCandidate<PtclH4> group_candidate;
2793  // merge group_candidates
2794  group_candidate.searchAndMerge(ptcl_in_cluster, n_ptcl);
2795 
2796  // find groups and generate artificial particles for cluster i
2797  findGroupsAndCreateArtificialParticlesOneCluster(i, ptcl_in_cluster, n_ptcl, ptcl_artificial_thread[ith], binary_table_thread[ith], n_group_in_cluster_[i], n_member_in_group_thread[ith], i_cluster_changeover_update_threads[ith], group_candidate, _dt_tree);
2798  }
2799 
2800  // gether binary table
2801  PS::S32 n_binary_table_offset_thread[num_thread+1];
2802  n_binary_table_offset_thread[0] = 0;
2803  for (PS::S32 i=0; i<num_thread; i++)
2804  n_binary_table_offset_thread[i+1] = n_binary_table_offset_thread[i] + binary_table_thread[i].size();
2805  binary_table.resizeNoInitialize(n_binary_table_offset_thread[num_thread]);
2806 #pragma omp parallel for
2807  for (PS::S32 i=0; i<num_thread; i++) {
2808  for (PS::S32 k=n_binary_table_offset_thread[i]; k<n_binary_table_offset_thread[i+1]; k++) {
2809  binary_table[k] = binary_table_thread[i][k-n_binary_table_offset_thread[i]];
2810  }
2811  }
2812 
2813  // n_group_in_cluster_offset
2814  n_group_in_cluster_offset_.resizeNoInitialize(n_cluster+1);
2815  n_group_in_cluster_offset_[0] = 0;
2816  for (PS::S32 i=0; i<n_cluster; i++) {
2817  n_group_in_cluster_offset_[i+1] = n_group_in_cluster_offset_[i] + n_group_in_cluster_[i];
2818  }
2819  n_member_in_group_.resizeNoInitialize(n_group_in_cluster_offset_[n_cluster]);
2820  n_member_in_group_offset_.resizeNoInitialize(n_group_in_cluster_offset_[n_cluster]);
2821 
2822  // artificial particle first address array
2823  adr_first_ptcl_arti_in_cluster_.resizeNoInitialize(n_group_in_cluster_offset_[n_cluster]);
2824  for (PS::S32 i=0; i<adr_first_ptcl_arti_in_cluster_.size(); i++) adr_first_ptcl_arti_in_cluster_[i] = -1;
2825 
2826 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2827  for (PS::S32 i=0; i<n_member_in_group_.size(); i++) n_member_in_group_[i] = 0;
2828 #endif
2829 
2830 #pragma omp parallel for
2831  for (PS::S32 i=0; i<num_thread; i++) {
2832  for (PS::S32 k=0; k<n_member_in_group_thread[i].size(); k++) {
2833  PS::S32 i_cluster = n_member_in_group_thread[i][k].cluster_index;
2834  PS::S32 j_group = n_member_in_group_thread[i][k].group_index;
2835  PS::S32 n_members = n_member_in_group_thread[i][k].n_members;
2836  PS::S32 n_member_offset = n_member_in_group_thread[i][k].n_member_offset;
2837  n_member_in_group_[n_group_in_cluster_offset_[i_cluster]+j_group] = n_members;
2838  n_member_in_group_offset_[n_group_in_cluster_offset_[i_cluster]+j_group] = n_member_offset;
2839  // update system soft
2840  if (n_member_in_group_thread[i][k].isolated_case) {
2841  for (PS::S32 j=0; j<n_members; j++) {
2842  auto& p_loc = ptcl_hard_[n_ptcl_in_cluster_disp_[i_cluster]+n_member_offset+j];
2843 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2844  assert(p_loc.group_data.artificial.isMember());
2845  assert(p_loc.getParticleCMAddress()<0);
2846  assert(adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i_cluster]+j_group]<0);
2847 #endif
2848  const PS::S64 p_glb_adr= p_loc.adr_org;
2849  if(p_glb_adr>=0) {
2850  auto& p_glb = _sys[p_glb_adr];
2851  p_glb.group_data= p_loc.group_data;
2852  p_glb.changeover= p_loc.changeover;
2853  p_glb.r_search = p_loc.r_search;
2854  }
2855  else {
2856  // this is remoted member;
2857  n_group_member_remote_++;
2858  }
2859  }
2860  }
2861  }
2862  }
2863 
2864 
2865 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2866  assert(n_group_in_cluster_offset_[n_cluster]<ARRAY_ALLOW_LIMIT);
2867  for (PS::S32 i=0; i<n_member_in_group_.size(); i++) assert(n_member_in_group_[i] > 0);
2868 #endif
2869 
2870  // add artificial particle to particle system
2871  PS::S32 rank = PS::Comm::getRank();
2872  // Get the address offset for new artificial ptcl array in each thread in _sys
2873  PS::S64 sys_ptcl_artificial_thread_offset[num_thread+1];
2874  sys_ptcl_artificial_thread_offset[0] = _sys.getNumberOfParticleLocal();
2875  for(PS::S32 i=0; i<num_thread; i++) {
2876  sys_ptcl_artificial_thread_offset[i+1] = sys_ptcl_artificial_thread_offset[i] + ptcl_artificial_thread[i].size();
2877  }
2878  _sys.setNumberOfParticleLocal(sys_ptcl_artificial_thread_offset[num_thread]);
2879 
2880  const PS::S32 n_artificial_per_group = ap_manager.getArtificialParticleN();
2881 #pragma omp parallel for
2882  for(PS::S32 i=0; i<num_thread; i++) {
2883  // ptcl_artificial should be integer times of n_partificial_per_group
2884  assert(ptcl_artificial_thread[i].size()%n_artificial_per_group==0);
2885  // Add particle to ptcl sys
2886  for (PS::S32 j=0; j<ptcl_artificial_thread[i].size(); j++) {
2887  PS::S32 adr = j+sys_ptcl_artificial_thread_offset[i];
2888  ptcl_artificial_thread[i][j].adr_org=adr;
2889  _sys[adr]=Tptcl(ptcl_artificial_thread[i][j],rank,adr);
2890  }
2891  // Update the status of group members to c.m. address in ptcl sys. Notice c.m. is at the end of an artificial particle group
2892  PS::S32 group_offset=0, j_group_recored=-1;
2893  for (PS::S32 j=0; j<ptcl_artificial_thread[i].size(); j+=n_artificial_per_group) {
2894  auto* pj = &ptcl_artificial_thread[i][j];
2895  auto* pcm = ap_manager.getCMParticles(pj);
2896  PS::S32 n_members = ap_manager.getMemberN(pj);
2897  PS::S32 i_cluster = ap_manager.getStoredData(pj,0,true)-1;
2898  PS::S32 j_group = ap_manager.getStoredData(pj,1,true)-1;
2899 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2900  assert(n_member_in_group_[n_group_in_cluster_offset_[i_cluster]+j_group]==n_members);
2901 #endif
2902  // make sure group index increase one by one
2903  assert(j_group==j_group_recored+1);
2904  j_group_recored=j_group;
2905 
2906 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2907  // check whether ID is consistent.
2908  PS::S64 id_cm = ap_manager.getCMID(pj);
2909  bool id_match_flag=false;
2910 #endif
2911  for (PS::S32 k=0; k<n_members; k++) {
2912  PS::S32 kl = n_ptcl_in_cluster_disp_[i_cluster]+group_offset+k;
2913  auto& p_loc = ptcl_hard_.getPointer()[kl];
2914  // save c.m. address
2915 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2916  assert(group_offset == n_member_in_group_offset_[n_group_in_cluster_offset_[i_cluster]+j_group]);
2917  assert(p_loc.getParticleCMAddress()==j+1);
2918  assert(p_loc.group_data.artificial.isMember());
2919 #endif
2920  p_loc.setParticleCMAddress(pcm->adr_org);
2921 
2922 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2923  if(p_loc.id == id_cm) id_match_flag=true;
2924 #endif
2925  // update global particle to be consistent
2926  const PS::S64 p_glb_adr= p_loc.adr_org;
2927  if(p_glb_adr>=0) {
2928  auto& p_glb = _sys[p_glb_adr];
2929  p_glb.mass = p_loc.mass;
2930  p_glb.group_data= p_loc.group_data;
2931  p_glb.changeover= p_loc.changeover;
2932  p_glb.r_search = p_loc.r_search;
2933  }
2934  else {
2935  // this is remoted member;
2936  n_group_member_remote_++;
2937  }
2938 
2939  }
2940 
2941 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2942  assert(id_match_flag);
2943 #endif
2944  // shift cluster
2945  if(j_group==n_group_in_cluster_[i_cluster]-1) {
2946  group_offset=0;
2947  j_group_recored=-1;
2948  }
2949  else group_offset += n_members; // group offset in the ptcl list index of one cluster
2950  // j_group should be consistent with n_group[i_cluster];
2951  assert(j_group<=n_group_in_cluster_[i_cluster]);
2952 
2953  // save first address of artificial particle
2954  adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i_cluster]+j_group] = ptcl_artificial_thread[i][j].adr_org;
2955  }
2956  }
2957 
2958  // merge i_cluster_changeover
2959  i_cluster_changeover_update_.resizeNoInitialize(0);
2960  for(PS::S32 i=0; i<num_thread; i++) {
2961  for (PS::S32 j=0; j<i_cluster_changeover_update_threads[i].size();j++)
2962  i_cluster_changeover_update_.push_back(i_cluster_changeover_update_threads[i][j]);
2963  }
2964 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2965  if (i_cluster_changeover_update_.size()>0)
2966  std::cerr<<"Changeover change cluster found: T="<<time_origin_<<" n_cluster_change="<<i_cluster_changeover_update_.size()<<std::endl;
2967 #endif
2968  /*
2969  // sort data
2970  PS::S32 i_cluster_size = i_cluster_changeover_update_.size();
2971  if (i_cluster_size>0) {
2972  PS::S32* i_cluster_data = i_cluster_changeover_update_.getPointer();
2973  std::sort(i_cluster_data, i_cluster_data+i_cluster_size, [] (const PS::S32 &a, const PS::S32 &b) { return a<b; });
2974  // remove dup
2975  PS::S32* i_end = std::unique(i_cluster_data, i_cluster_data+i_cluster_size);
2976 #ifdef ARTIFICIAL_PARTICLE_DEBUG
2977  assert(i_end-i_cluster_data>=0&&i_end-i_cluster_data<=i_cluster_size);
2978  std::cerr<<"Changeover change cluster found: T="<<time_origin_<<" n_cluster_change="<<int(i_end-i_cluster_data)<<std::endl;
2979 #endif
2980  i_cluster_changeover_update_.resizeNoInitialize(i_end-i_cluster_data);
2981  }
2982  */
2983 
2985  }
2986 
2987 #ifdef CLUSTER_VELOCITY
2988 
2992  template <class Tsoft>
2993  void resetParticleGroupData(Tsoft& _ptcl_soft) {
2995  const PS::S32 n = ptcl_hard_.size();
2996 #pragma omp parallel for
2997  for(PS::S32 i=0; i<n; i++){
2998  // to avoid issue in cluster search with velocity
2999  auto& pi_cm = ptcl_hard_[i].group_data.cm;
3000  pi_cm.mass = pi_cm.vel.x = pi_cm.vel.y = pi_cm.vel.z = 0.0;
3001  PS::S32 adr = ptcl_hard_[i].adr_org;
3002 #ifdef HARD_DEBUG
3003  assert(adr>=0);
3004 #endif
3005  _ptcl_soft[adr].group_data.cm = pi_cm;
3006  }
3007  }
3008 
3010 
3013  template <class Tsoft>
3014  void setParticleGroupDataToCMData(Tsoft& _ptcl_soft) {
3016  auto& ap_manager = manager->ap_manager;
3017  const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
3018 #pragma omp parallel for schedule(dynamic)
3019  for(PS::S32 i=0; i<n_cluster; i++){
3020  const PS::S32 adr_head = n_ptcl_in_cluster_disp_[i];
3021  const PS::S32 n_ptcl = n_ptcl_in_cluster_[i];
3022  const PS::S32 n_group = n_group_in_cluster_[i];
3023  PtclH4* ptcl_local = ptcl_hard_.getPointer(adr_head);
3024 
3025  PS::S32 n_group_offset_local = 0;
3026  if(n_group>0) {
3027  for(int k=0; k<n_group; k++) {
3028  PS::S32 n_group_in_cluster_offset_k = n_group_in_cluster_offset_[i]+k;
3029  PS::S32 ptcl_artificial_adr = adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_k];
3030  PS::S32 n_members = n_member_in_group_[n_group_in_cluster_offset_k];
3031  // when artificial particles exist
3032  if (ptcl_artificial_adr>=0) {
3033  auto* pi = &(_ptcl_soft[ptcl_artificial_adr]);
3034  auto* pcm = ap_manager.getCMParticles(pi);
3035  PS::F64 pcm_mass = pcm->group_data.artificial.getMassBackup();
3036 #ifdef ARTIFICIAL_PARTICLE_DEBUG
3037  assert(n_members == ap_manager.getMemberN(pi));
3038  ap_manager.checkConsistence(&ptcl_local[n_group_offset_local], pi);
3039 #endif
3040  for (int j=n_group_offset_local; j<n_group_offset_local+n_members; j++) {
3041  ptcl_local[j].r_search = std::max(pcm->r_search, ptcl_local[j].r_search);
3042  auto& pj_cm = ptcl_local[j].group_data.cm;
3043  pj_cm.mass = pcm_mass;
3044  pj_cm.vel.x = pcm->vel[0];
3045  pj_cm.vel.y = pcm->vel[1];
3046  pj_cm.vel.z = pcm->vel[2];
3047  PS::S32 adr = ptcl_local[j].adr_org;
3048  if(adr>=0) {
3049  assert(ptcl_local[j].id==_ptcl_soft[adr].id);
3050  _ptcl_soft[adr].group_data.cm = pj_cm;
3051  _ptcl_soft[adr].r_search = ptcl_local[j].r_search;
3052  }
3053  }
3054  }
3055  else {
3056  // when no artificial particles, calculate c.m.
3057 #ifdef ARTIFICIAL_PARTICLE_DEBUG
3058  // current case only isolated binary
3059  assert(n_members == 2&&n_group==1);
3060 #endif
3061  PS::F32 mass_cm=0.0;
3062  PS::F32vec vel_cm=PS::F32vec(0.0);
3063  for (int j=n_group_offset_local; j<n_group_offset_local+n_members; j++) {
3064  auto& pj = ptcl_local[j];
3065  mass_cm += pj.mass;
3066  vel_cm.x += pj.mass*pj.vel.x;
3067  vel_cm.y += pj.mass*pj.vel.y;
3068  vel_cm.z += pj.mass*pj.vel.z;
3069  }
3070  vel_cm /= mass_cm;
3071  for (int j=n_group_offset_local; j<n_group_offset_local+n_members; j++) {
3072  auto& pj_cm = ptcl_local[j].group_data.cm;
3073  pj_cm.mass = mass_cm;
3074  pj_cm.vel = vel_cm;
3075  PS::S32 adr = ptcl_local[j].adr_org;
3076  if(adr>=0) {
3077  assert(ptcl_local[j].id==_ptcl_soft[adr].id);
3078  _ptcl_soft[adr].group_data.cm = pj_cm;
3079  }
3080  }
3081  }
3082  n_group_offset_local += n_members;
3083  }
3084  }
3085  for (int j=n_group_offset_local; j<n_ptcl; j++) {
3086  auto& pj_cm = ptcl_local[j].group_data.cm;
3087  pj_cm.mass = pj_cm.vel.x = pj_cm.vel.y = pj_cm.vel.z = 0.0;
3088  PS::S32 adr = ptcl_local[j].adr_org;
3089  if(adr>=0) _ptcl_soft[adr].group_data.cm = pj_cm;
3090  }
3091  }
3092  }
3093 #endif
3094 
3096  /* The force kernel have self-interaction on the potential contribution, need to be excluded. _sys acc is updated
3097  @param[in,out] _sys: global particle system, acc is updated
3098  @param[in] _ptcl_list: list of single particle in _ptcl
3099  @param[in] _n_ptcl: number of single particles
3100  */
3101  template <class Tsys>
3102  void correctPotWithCutoffOMP(Tsys& _sys,
3103  const PS::ReallocatableArray<PS::S32>& _ptcl_list) {
3105  const PS::S32 n_ptcl = _ptcl_list.size();
3106 #pragma omp parallel for
3107  for (int i=0; i<n_ptcl; i++) {
3108  const PS::S32 k =_ptcl_list[i];
3109  PS::F64 pot_cor = G*_sys[k].mass/manager->r_out_base;
3110  _sys[k].pot_tot += pot_cor;
3111  _sys[k].pot_soft += pot_cor;
3112 //#ifdef STELLAR_EVOLUTION
3113 // // correct soft potential energy of one particle change due to mass change
3114 // correctSoftPotMassChangeOneParticle(_sys[k]);
3115 //#endif
3116  }
3117  }
3118 
3120 
3131  template <class Tsys, class Tpsoft, class Ttree, class Tepj>
3133  Ttree& _tree,
3134  const PS::ReallocatableArray<PS::S32>& _adr_send,
3135  const bool _acorr_flag=false) {
3136  const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
3137  const PtclH4* ptcl_local = ptcl_hard_.getPointer();
3138 
3139 #pragma omp parallel for schedule(dynamic)
3140  for (int i=0; i<n_cluster; i++) { // i: i_cluster
3141  PS::S32 adr_real_start= n_ptcl_in_cluster_disp_[i];
3142  PS::S32 adr_real_end= n_ptcl_in_cluster_disp_[i+1];
3143  // artificial particle group number
3144  PS::S32 n_group = n_group_in_cluster_[i];
3145  //PS::S32 n_group_offset = n_group_in_cluster_offset_[i];
3146  const PS::S32* adr_first_ptcl_arti = &adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i]];
3147 
3148  // correction for artificial particles
3149  correctForceWithCutoffArtificialOneClusterImp(_sys, ptcl_local, adr_real_start, adr_real_end, n_group, adr_first_ptcl_arti, _acorr_flag);
3150 
3151  // obtain correction for real particles in clusters use tree neighbor search
3152  for (int j=adr_real_start; j<adr_real_end; j++) {
3153  PS::S64 adr = ptcl_local[j].adr_org;
3154  // only do for local particles
3155 #ifdef HARD_DEBUG
3156  if(adr>=0) assert(_sys[adr].id==ptcl_local[j].id);
3157 #endif
3158  if(adr>=0) correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[adr], _tree, _acorr_flag);
3159  }
3160  }
3161 
3162  const PS::S32 n_send = _adr_send.size();
3163 #pragma omp parallel for
3164  // sending list to other nodes need also be corrected.
3165  for (int i=0; i<n_send; i++) {
3166  PS::S64 adr = _adr_send[i];
3167  correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[adr], _tree, _acorr_flag);
3168  }
3169  }
3170 
3172  /* Use cluster member,
3173  1. first correct for artificial particles, then for cluster member.
3174  2. The c.m. force is substracted from tidal tensor force
3175  3. c.m. force is replaced by the averaged force on orbital particles
3176 
3177  @param[in] _sys: global particle system, acc is updated
3178  @param[in] _acorr_flag: flag to do acorr for KDKDK_4TH case
3179  */
3180  template <class Tsys>
3181  void correctForceWithCutoffClusterOMP(Tsys& _sys, const bool _acorr_flag=false) {
3183 
3185  const PS::S32 n_cluster = n_ptcl_in_cluster_.size();
3186  auto& ap_manager = manager->ap_manager;
3187  const PtclH4* ptcl_local = ptcl_hard_.getPointer();
3188 #pragma omp parallel for schedule(dynamic)
3189  for (int i=0; i<n_cluster; i++) { // i: i_cluster
3190  PS::S32 adr_real_start= n_ptcl_in_cluster_disp_[i];
3191  PS::S32 adr_real_end= n_ptcl_in_cluster_disp_[i+1];
3192  // artificial particle group number
3193  PS::S32 n_group = n_group_in_cluster_[i];
3194  //PS::S32 n_group_offset = n_group_in_cluster_offset_[i];
3195  const PS::S32* adr_first_ptcl_arti = n_group>0? &adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i]] : NULL;
3196 
3197  // correction for artificial particles
3198  correctForceWithCutoffArtificialOneClusterImp(_sys, ptcl_hard_.getPointer(), adr_real_start, adr_real_end, n_group, adr_first_ptcl_arti, _acorr_flag);
3199 
3200  // obtain correction for real particles in clusters
3201  for (int j=adr_real_start; j<adr_real_end; j++) {
3202  PS::S64 adr = ptcl_local[j].adr_org;
3203 #ifdef HARD_DEBUG
3204  assert(_sys[adr].id==ptcl_local[j].id);
3205 #endif
3206  //self-potential correction for non-group member, group member has mass zero, so no need correction
3207  // for binary without artificial particles, correction is needed.
3208  if (_sys[adr].group_data.artificial.isSingle()
3209  || (_sys[adr].group_data.artificial.isMember() && _sys[adr].getParticleCMAddress()<0)) {
3210  PS::F64 pot_cor = G*_sys[adr].mass/manager->r_out_base;
3211  _sys[adr].pot_tot += pot_cor;
3212  _sys[adr].pot_soft += pot_cor;
3213  }
3214 
3215  // cluster member
3216  for (int k=adr_real_start; k<adr_real_end; k++) {
3217  if(k==j) continue;
3218 #ifdef KDKDK_4TH
3219  if(_acorr_flag) {
3220  PS::S64 adr_k = ptcl_local[k].adr_org;
3221  calcAcorrShortWithLinearCutoff(_sys[adr], _sys[adr_k]);
3222  }
3223  else
3224 #endif
3225  calcAccPotShortWithLinearCutoff(_sys[adr], ptcl_local[k]);
3226  }
3227 
3228  // orbital artificial particle
3229  for (int k=0; k<n_group; k++) {
3230  // loop artificial particle orbital
3231  PS::S32 k_start = adr_first_ptcl_arti[k];
3232 #ifdef ARTIFICIAL_PARTICLE_DEBUG
3233  if (k_start<0) assert(n_group==1&&adr_real_end-adr_real_start==2);
3234 #endif
3235  if (k_start<0) continue;
3236  auto* porb_k = ap_manager.getOrbitalParticles(&(_sys[k_start]));
3237  for (int ki=0; ki<ap_manager.getOrbitalParticleN(); ki++) {
3238 #ifdef KDKDK_4TH
3239  if(_acorr_flag)
3240  calcAcorrShortWithLinearCutoff(_sys[adr], porb_k[ki]);
3241  else
3242 #endif
3243  calcAccPotShortWithLinearCutoff(_sys[adr], porb_k[ki]);
3244  }
3245  }
3246 //#ifdef STELLAR_EVOLUTION
3247 // // correct soft potential energy of one particle change due to mass change
3248 // correctSoftPotMassChangeOneParticle(_sys[adr]);
3249 //#endif
3250  }
3251  }
3252  }
3253 
3255 
3261  template <class Tsys, class Ttree, class Tepj>
3262  void correctForceForChangeOverUpdateOMP(Tsys& _sys, Ttree& _tree,
3263  const PS::S32* _adr_send=NULL, const PS::S32 _n_send=0) {
3264  const PS::S32 n_cluster = i_cluster_changeover_update_.size();
3265  //auto& ap_manager = manager->ap_manager;
3266 #pragma omp parallel for schedule(dynamic)
3267  for (int i=0; i<n_cluster; i++) { // i: i_cluster
3268  PS::S32 i_cluster = i_cluster_changeover_update_[i];
3269  PS::S32 adr_real_start= n_ptcl_in_cluster_disp_[i_cluster];
3270  PS::S32 adr_real_end= n_ptcl_in_cluster_disp_[i_cluster+1];
3271 
3272  // correction for artificial particles
3273  /* c.m. force is not sum of orbital force anymore, suppress this part
3274  // artificial particle group number
3275  PS::S32 n_group = n_group_in_cluster_[i_cluster];
3276  const PS::S32* adr_first_ptcl_arti = n_group>0? &adr_first_ptcl_arti_in_cluster_[n_group_in_cluster_offset_[i_cluster]] : NULL;
3277  for (int j=0; j<n_group; j++) { // j: j_group
3278  PS::S32 j_start = adr_first_ptcl_arti[j];
3279  auto* pj = &(_sys[j_start]);
3280 
3281  // loop orbital particles
3282  auto* porb_j = ap_manager.getOrbitalParticles(pj);
3283  const PS::S32 n_orb = ap_manager.getOrbitalParticleN();
3284  for (int k=0; k<n_orb; k++) {
3285  // k: k_ptcl_arti
3286  bool changek = porb_j[k].changeover.r_scale_next!=1.0;
3287 
3288  // loop orbital artificial particle
3289  // group
3290  for (int kj=0; kj<n_group; kj++) { // group
3291  PS::S32 kj_start = adr_first_ptcl_arti[kj];
3292  auto* porb_kj = ap_manager.getOrbitalParticles(&_sys[kj_start]);
3293 
3294  // particle arti orbital
3295  if (porb_kj[0].changeover.r_scale_next!=1.0 || changek) {
3296  for (int kk=0; kk<n_orb; kk++) {
3297  if(&porb_kj[kk]==&porb_j[k]) continue; //avoid same particle
3298  // in rare case, equal mass systems with circular orbit can have two members locating at the same positions, this can result in dr2=0 and NaN
3299  calcAccChangeOverCorrection(porb_j[k], porb_kj[kk]);
3300  }
3301  }
3302  }
3303 
3304  //loop real particle
3305  for (int kj=adr_real_start; kj<adr_real_end; kj++) {
3306  if (ptcl_hard_[kj].changeover.r_scale_next!=1.0 || changek) {
3307  calcAccChangeOverCorrection(porb_j[k], ptcl_hard_[kj]);
3308  }
3309  }
3310  }
3311 
3312  // correct c.m. average force
3313  ap_manager.correctOrbitalParticleForce(pj);
3314  }
3315  */
3316 
3317  // correction for real particles
3318  for (int j=adr_real_start; j<adr_real_end; j++) {
3319  PS::S64 adr = ptcl_hard_[j].adr_org;
3320  if(adr>=0) {
3321  bool change_i = _sys[adr].changeover.r_scale_next!=1.0;
3322  Tepj * ptcl_nb = NULL;
3323  PS::S32 n_ngb = _tree.getNeighborListOneParticle(_sys[adr], ptcl_nb);
3324  for(PS::S32 k=0; k<n_ngb; k++){
3325  if (ptcl_nb[k].id == _sys[adr].id) continue;
3326 
3327  if (ptcl_nb[k].r_scale_next!=1.0 || change_i)
3328  calcAccChangeOverCorrection(_sys[adr], ptcl_nb[k]);
3329  }
3330  }
3331  // update changeover
3332  ptcl_hard_[j].changeover.updateWithRScale();
3333  if(adr>=0) _sys[adr].changeover.updateWithRScale();
3334  }
3335 
3336  }
3337 #pragma omp parallel for
3338  // sending list to other nodes need also be corrected.
3339  for (int i=0; i<_n_send; i++) {
3340  PS::S64 adr = _adr_send[i];
3341  bool change_i = _sys[adr].changeover.r_scale_next!=1.0;
3342  Tepj * ptcl_nb = NULL;
3343  PS::S32 n_ngb = _tree.getNeighborListOneParticle(_sys[adr], ptcl_nb);
3344  for(PS::S32 k=0; k<n_ngb; k++){
3345  if (ptcl_nb[k].id == _sys[adr].id) continue;
3346 
3347  if (ptcl_nb[k].r_scale_next!=1.0 || change_i)
3348  calcAccChangeOverCorrection(_sys[adr], ptcl_nb[k]);
3349  }
3350  _sys[adr].changeover.updateWithRScale();
3351  }
3352 
3353  }
3354 
3355 
3357 
3365  template <class Tsys, class Tpsoft, class Ttree, class Tepj>
3367  Ttree& _tree,
3368  const PS::S32 _adr_ptcl_artificial_start,
3369  ArtificialParticleManager& _ap_manager,
3370  const bool _acorr_flag=false) {
3371  // for artificial particle
3372  const PS::S32 n_tot = _sys.getNumberOfParticleLocal();
3373 
3374 #pragma omp parallel for schedule(dynamic)
3375  for (int i=0; i<n_tot; i++) {
3376  correctForceWithCutoffTreeNeighborOneParticleImp<Tpsoft, Ttree, Tepj>(_sys[i], _tree, _acorr_flag);
3377  }
3378  const PS::S32 n_artificial = _ap_manager.getArtificialParticleN();
3379 #ifdef HARD_DEBUG
3380  assert((n_tot-_adr_ptcl_artificial_start)%n_artificial==0);
3381 #endif
3382 #pragma omp parallel for schedule(dynamic)
3383  for (int i=_adr_ptcl_artificial_start; i<n_tot; i+=n_artificial)
3384  _ap_manager.correctArtficialParticleForce(&(_sys[i]));
3385  }
3386 
3387 };
3388 
SystemHard::getGroupNumberOfMemberList
PS::S32 * getGroupNumberOfMemberList(const std::size_t i=0) const
Definition: hard.hpp:1949
SystemHard::finishIntegrateInterruptClustersOMP
PS::S32 finishIntegrateInterruptClustersOMP()
Finish interrupt integration.
Definition: hard.hpp:2425
SystemHard::correctForceWithCutoffClusterOMP
void correctForceWithCutoffClusterOMP(Tsys &_sys, const bool _acorr_flag=false)
Soft force correction due to different cut-off function.
Definition: hard.hpp:3181
HardManager::setEpsSq
void setEpsSq(const PS::F64 _eps_sq)
set softening
Definition: hard.hpp:41
HardEnergy::de_sd_change_binary_interrupt
PS::F64 de_sd_change_binary_interrupt
Definition: hard.hpp:126
HardManager::ap_manager
ArtificialParticleManager ap_manager
Definition: hard.hpp:33
SystemHard::updateTimeWriteBack
void updateTimeWriteBack()
Definition: hard.hpp:1969
HardIntegrator
hard integrator
Definition: hard.hpp:159
HardEnergy::de_sd
PS::F64 de_sd
Definition: hard.hpp:124
HardEnergy::de
PS::F64 de
Definition: hard.hpp:120
SystemHard::binary_table
PS::ReallocatableArray< COMM::BinaryTree< PtclH4, COMM::Binary > > binary_table
Definition: hard.hpp:1220
EPJSoft::mass
PS::F64 mass
Definition: soft_ptcl.hpp:314
Stability
stability checker for binary tree
Definition: stability.hpp:6
SystemHard::GroupIndexInfo::n_members
PS::S32 n_members
Definition: hard.hpp:2475
ArtificialParticleManager::print
void print(std::ostream &_fout) const
print parameters
Definition: artificial_particles.hpp:512
PIKG::S32
int32_t S32
Definition: pikg_vector.hpp:24
hermite_perturber.hpp
ChangeOver::r_scale_next
Float r_scale_next
(1 + coff_)/r_out = 2/(r_out+r_in)
Definition: changeover.hpp:19
Stability::findClosedTree
void findClosedTree(BinTree &_bin)
Check binary tree and collect closed systems subtree root.
Definition: stability.hpp:339
soft_ptcl.hpp
Ptcl::group_data_mode
static GroupDataMode group_data_mode
Definition: ptcl.hpp:47
HardIntegrator::printInterruptBinaryInfo
void printInterruptBinaryInfo(std::ostream &_fout) const
print interrupt binary information
Definition: hard.hpp:1144
SystemHard::GroupIndexInfo::GroupIndexInfo
GroupIndexInfo(PS::S32 _cluster, PS::S32 _group, PS::S32 _member, PS::S32 _offset, PS::S32 _isolated_case)
Definition: hard.hpp:2477
Ptcl::mean_mass_inv
static PS::F64 mean_mass_inv
Definition: ptcl.hpp:45
PIKG::F64vec
Vector3< F64 > F64vec
Definition: pikg_vector.hpp:167
search_group_candidate.hpp
SystemHard::GroupIndexInfo::group_index
PS::S32 group_index
Definition: hard.hpp:2475
SystemHard::getNClusterChangeOverUpdate
PS::S32 getNClusterChangeOverUpdate() const
Definition: hard.hpp:1961
HardManager::r_out_base
PS::F64 r_out_base
Definition: hard.hpp:31
SystemHard::getNumberOfInterruptClusters
PS::S32 getNumberOfInterruptClusters() const
Definition: hard.hpp:1933
HardManager::readBinary
void readBinary(FILE *_fin, int _version=0)
read class data to file with binary format
Definition: hard.hpp:95
EPJSoft::r_out
PS::F64 r_out
Definition: soft_ptcl.hpp:321
HardIntegrator::time_origin
PS::F64 time_origin
tidal tensor array
Definition: hard.hpp:165
galpy_pot_movie.dt
dt
Definition: galpy_pot_movie.py:134
HardIntegrator::driftClusterCMRecordGroupCMDataAndWriteBack
void driftClusterCMRecordGroupCMDataAndWriteBack(const PS::F64 _time_end)
drift c.m. particle of the cluster record group c.m. in group_data and write back data to original pa...
Definition: hard.hpp:835
GroupDataMode::cm
@ cm
HardManager::energy_error_max
PS::F64 energy_error_max
Definition: hard.hpp:28
TidalTensor::group_id
PS::F64 group_id
Definition: tidal_tensor.hpp:14
HardManager
Hard integrator parameter manager.
Definition: hard.hpp:26
ArtificialParticleManager::checkParams
bool checkParams()
check paramters
Definition: artificial_particles.hpp:244
SystemHard::writeBackPtclForMultiClusterOMP
void writeBackPtclForMultiClusterOMP(Tsys &_sys, PS::ReallocatableArray< PS::S32 > &_mass_modify_list)
write back hard particles to global system and update time of write back OMP version
Definition: hard.hpp:2204
HardIntegrator::sym_int
AR::TimeTransformedSymplecticIntegrator< PtclHard, PtclH4, ARPerturber, ARInteraction, H4::ARInformation< PtclHard > > sym_int
hermite integrator
Definition: hard.hpp:162
SystemHard::getClusterNumberOfMemberListOffset
PS::S32 * getClusterNumberOfMemberListOffset(const std::size_t i=0) const
Definition: hard.hpp:1945
ArtificialParticleManager::correctArtficialParticleForce
void correctArtficialParticleForce(Tptcl *_ptcl_artificial)
correct artificial particles force for furture use
Definition: artificial_particles.hpp:373
PIKG::F64
double F64
Definition: pikg_vector.hpp:17
SystemHard::GroupIndexInfo
Definition: hard.hpp:2474
SystemHard::GroupIndexInfo::isolated_case
PS::S32 isolated_case
Definition: hard.hpp:2475
HardEnergy::de_change_cum
PS::F64 de_change_cum
Definition: hard.hpp:121
SystemHard::checkParams
bool checkParams()
check paramters
Definition: hard.hpp:1238
HardManager::h4_manager
H4::HermiteManager< HermiteInteraction > h4_manager
Definition: hard.hpp:34
HardIntegrator::h4_int
H4::HermiteIntegrator< PtclHard, PtclH4, HermitePerturber, ARPerturber, HermiteInteraction, ARInteraction, HermiteInformation > h4_int
Definition: hard.hpp:161
SystemHard::correctPotWithCutoffOMP
void correctPotWithCutoffOMP(Tsys &_sys, const PS::ReallocatableArray< PS::S32 > &_ptcl_list)
potential correction for single cluster
Definition: hard.hpp:3102
GroupDataMode::artificial
@ artificial
PtclH4
H4::ParticleH4< PtclHard > PtclH4
Definition: hard.hpp:23
HardIntegrator::is_initialized
bool is_initialized
use AR integrator flag
Definition: hard.hpp:187
Ptcl::printColumnTitle
static void printColumnTitle(std::ostream &_fout, const int _width=20)
print titles of class members using column style
Definition: ptcl.hpp:85
ParticleBase::pos
PS::F64vec pos
Definition: particle_base.hpp:24
Ptcl
Particle class.
Definition: ptcl.hpp:36
PIKG::S64
int64_t S64
Definition: pikg_vector.hpp:23
SystemHard::setTimeOrigin
void setTimeOrigin(const PS::F64 _time_origin)
Definition: hard.hpp:1965
HardEnergy::resetEnergyCorrection
void resetEnergyCorrection()
Definition: hard.hpp:138
EPISoft::r_out
static PS::F64 r_out
Definition: soft_ptcl.hpp:282
PIKG::F32vec
Vector3< F32 > F32vec
Definition: pikg_vector.hpp:168
SystemHard::correctForceWithCutoffTreeNeighborOMP
static void correctForceWithCutoffTreeNeighborOMP(Tsys &_sys, Ttree &_tree, const PS::S32 _adr_ptcl_artificial_start, ArtificialParticleManager &_ap_manager, const bool _acorr_flag=false)
Soft force correction due to different cut-off function.
Definition: hard.hpp:3366
ArtificialParticleManager::readBinary
void readBinary(FILE *_fin)
read class data to file with binary format
Definition: artificial_particles.hpp:499
PtclHard
Definition: hard_ptcl.hpp:5
HardEnergy::de_change_binary_interrupt
PS::F64 de_change_binary_interrupt
Definition: hard.hpp:122
HardManager::ar_manager
AR::TimeTransformedSymplecticManager< ARInteraction > ar_manager
Definition: hard.hpp:35
SystemHard::setPtclForOneClusterOMP
void setPtclForOneClusterOMP(const Tsys &sys, const PS::ReallocatableArray< PS::S32 > &adr_array)
Definition: hard.hpp:2026
HardManager::r_in_base
PS::F64 r_in_base
Definition: hard.hpp:30
PIKG::min
T min(const Vector3< T > &v)
Definition: pikg_vector.hpp:150
HardIntegrator::ptcl_origin
PtclH4 * ptcl_origin
origin physical time
Definition: hard.hpp:166
HardEnergy::operator+=
HardEnergy & operator+=(const HardEnergy &_energy)
Definition: hard.hpp:142
SystemHard::getInterruptHardIntegrator
HardIntegrator * getInterruptHardIntegrator(const std::size_t i)
Definition: hard.hpp:1937
SystemHard::SystemHard
SystemHard()
Definition: hard.hpp:1810
ArtificialParticleManager::orbit_manager
OrbitManager orbit_manager
gravitational constant
Definition: artificial_particles.hpp:238
EPJSoft::r_in
PS::F64 r_in
Definition: soft_ptcl.hpp:320
SystemHard::getNumberOfClusters
PS::S32 getNumberOfClusters() const
Definition: hard.hpp:1929
Stability::t_crit
PS::F64 t_crit
Definition: stability.hpp:10
ArtificialParticleManager::getArtificialParticleN
PS::S32 getArtificialParticleN() const
get artificial particle total number
Definition: artificial_particles.hpp:427
HardIntegrator::use_sym_int
bool use_sym_int
interrupt binary address
Definition: hard.hpp:186
DATADUMP
#define DATADUMP(expr)
Definition: hard_assert.hpp:227
SystemHard::~SystemHard
~SystemHard()
Definition: hard.hpp:1846
SystemHard::GroupIndexInfo::cluster_index
PS::S32 cluster_index
Definition: hard.hpp:2475
SystemHard::getAdrPtclArtFirstList
PS::S32 * getAdrPtclArtFirstList(const std::size_t i=0) const
Definition: hard.hpp:1957
HardManager::setGravitationalConstant
void setGravitationalConstant(const PS::F64 _g)
set gravitational constant
Definition: hard.hpp:48
SystemHard
Hard system.
Definition: hard.hpp:1190
ChangeOver
Changeover function class.
Definition: changeover.hpp:7
HardManager::eps_sq
PS::F64 eps_sq
Definition: hard.hpp:29
Ptcl::changeover
ChangeOver changeover
Definition: ptcl.hpp:41
hermite_interaction.hpp
SystemHard::findGroupsAndCreateArtificialParticlesOneCluster
void findGroupsAndCreateArtificialParticlesOneCluster(const PS::S32 _i_cluster, Tptcl *_ptcl_in_cluster, const PS::S32 _n_ptcl, PS::ReallocatableArray< Tptcl > &_ptcl_artificial, PS::ReallocatableArray< COMM::BinaryTree< PtclH4, COMM::Binary >> &_binary_table, PS::S32 &_n_groups, PS::ReallocatableArray< GroupIndexInfo > &_n_member_in_group, PS::ReallocatableArray< PS::S32 > &_changeover_update_list, SearchGroupCandidate< Tptcl > &_groups, const PS::F64 _dt_tree)
generate artificial particles,
Definition: hard.hpp:2495
TidalTensor
Tidal tensor perterbation for AR.
Definition: tidal_tensor.hpp:4
HardIntegrator::checkParams
bool checkParams()
check parameters
Definition: hard.hpp:212
ChangeOver::calcPotWTwo
static Float calcPotWTwo(const ChangeOver &_ch1, const ChangeOver &_ch2, const Float &_dr)
calculate changeover function Pot by selecting maximum rout
Definition: changeover.hpp:366
EPJSoft
Definition: soft_ptcl.hpp:311
ChangeOver::calcAcc0WTwo
static Float calcAcc0WTwo(const ChangeOver &_ch1, const ChangeOver &_ch2, const Float &_dr)
calculate changeover function Acc0 by selecting maximum rout
Definition: changeover.hpp:372
ARRAY_ALLOW_LIMIT
#define ARRAY_ALLOW_LIMIT
Definition: cluster_list.hpp:11
HardEnergy::epot_sd_correction
PS::F64 epot_sd_correction
Definition: hard.hpp:129
EPJSoft::r_scale_next
PS::F64 r_scale_next
Definition: soft_ptcl.hpp:323
HardIntegrator::initial
void initial(PtclH4 *_ptcl, const PS::S32 _n_ptcl, Tsoft *_ptcl_artificial, const PS::S32 _n_group, const PS::S32 *_n_member_in_group, HardManager *_manager, const PS::F64 _time_origin)
initial integration
Definition: hard.hpp:231
ChangeOver::getRin
const Float & getRin() const
get r_in
Definition: changeover.hpp:87
SystemHard::GroupIndexInfo::n_member_offset
PS::S32 n_member_offset
Definition: hard.hpp:2475
SearchGroupCandidate::getMemberList
PS::S32 * getMemberList(const std::size_t igroup)
Definition: search_group_candidate.hpp:237
HardIntegrator::HardIntegrator
HardIntegrator()
indicator whether initialization is done
Definition: hard.hpp:194
ChangeOver::getRout
const Float & getRout() const
get r_out
Definition: changeover.hpp:94
SystemHard::setPtclForIsolatedMultiClusterOMP
void setPtclForIsolatedMultiClusterOMP(const Tsys &sys, const PS::ReallocatableArray< PS::S32 > &_adr_array, const PS::ReallocatableArray< PS::S32 > &_n_ptcl_in_cluster)
Definition: hard.hpp:2229
EPJSoft::pos
PS::F64vec pos
Definition: soft_ptcl.hpp:315
SystemHard::writeBackPtclForMultiCluster
void writeBackPtclForMultiCluster(Tsys &_sys, PS::ReallocatableArray< PS::S32 > &_mass_modify_list)
write back hard particles to global system, check mass modification and update time of write back
Definition: hard.hpp:2192
SystemHard::getClusterNumberOfMemberList
PS::S32 * getClusterNumberOfMemberList(const std::size_t i=0) const
Definition: hard.hpp:1941
SystemHard::allocateHardIntegrator
void allocateHardIntegrator(const PS::S32 _n_hard_int)
allocate memorgy for HardIntegrator
Definition: hard.hpp:1836
EPISoft::eps
static PS::F64 eps
Definition: soft_ptcl.hpp:281
WRITE_WIDTH
const int WRITE_WIDTH
Definition: bse_test.cxx:10
ChangeOver::calcAcc1WTwo
static Float calcAcc1WTwo(const ChangeOver &_ch1, const ChangeOver &_ch2, const Float &_dr, const Float &_drdot)
calculate changeover function Acc1 by selecting maximum rout
Definition: changeover.hpp:378
ParticleBase::mass
PS::F64 mass
Definition: particle_base.hpp:23
HardIntegrator::interrupt_binary
AR::InterruptBinary< PtclHard > interrupt_binary
original particle array
Definition: hard.hpp:168
SystemHard::correctForceForChangeOverUpdateOMP
void correctForceForChangeOverUpdateOMP(Tsys &_sys, Ttree &_tree, const PS::S32 *_adr_send=NULL, const PS::S32 _n_send=0)
Correct force due to change over factor change.
Definition: hard.hpp:3262
PIKG::max
T max(const Vector3< T > &v)
Definition: pikg_vector.hpp:143
stability.hpp
HardManager::HardManager
HardManager()
constructor
Definition: hard.hpp:38
SearchGroupCandidate::getNumberOfGroups
PS::S32 getNumberOfGroups() const
Definition: search_group_candidate.hpp:233
SystemHard::findGroupsAndCreateArtificialParticlesOMP
void findGroupsAndCreateArtificialParticlesOMP(Tsys &_sys, const PS::F64 _dt_tree)
Find groups and create aritfical particles to sys.
Definition: hard.hpp:2757
artificial_particles.hpp
SystemHard::writeBackPtclForOneCluster
void writeBackPtclForOneCluster(Tsys &sys, PS::ReallocatableArray< PS::S32 > &_mass_modify_list)
write back hard particles to global system, check mass modification and update time of write back
Definition: hard.hpp:2108
HardManager::checkParams
bool checkParams()
check paramters
Definition: hard.hpp:68
GroupDataDeliver::artificial
ArtificialParticleInformation artificial
Definition: ptcl.hpp:18
ar_perturber.hpp
ArtificialParticleManager::writeBinary
void writeBinary(FILE *_fp)
write class data to file with binary format
Definition: artificial_particles.hpp:489
HardEnergy::de_sd_change_modify_single
PS::F64 de_sd_change_modify_single
Definition: hard.hpp:127
SearchGroupCandidate::getNumberOfGroupMembers
PS::S32 getNumberOfGroupMembers(const std::size_t igroup) const
Definition: search_group_candidate.hpp:245
HardIntegrator::manager
HardManager * manager
AR integrator
Definition: hard.hpp:163
SearchGroupCandidate
Definition: search_group_candidate.hpp:9
SystemHard::manager
HardManager * manager
Definition: hard.hpp:1221
HardEnergy::de_change_modify_single
PS::F64 de_change_modify_single
Definition: hard.hpp:123
HardIntegrator::clear
void clear()
clear function
Definition: hard.hpp:1162
SystemHard::initializeForOneCluster
void initializeForOneCluster(const PS::S32 n)
Definition: hard.hpp:1851
HARD_DEBUG_PRINT_FEQ
#define HARD_DEBUG_PRINT_FEQ
Definition: hard_debug.cxx:7
ASSERT
#define ASSERT(expr)
Definition: hard_assert.hpp:238
ArtificialParticleManager
class to organize artificial particles
Definition: artificial_particles.hpp:232
Stability::stable_binary_tree
PS::ReallocatableArray< BinTree * > stable_binary_tree
Definition: stability.hpp:11
SystemHard::getGroupPtclRemoteN
PS::S32 getGroupPtclRemoteN() const
Definition: hard.hpp:1921
HardIntegrator::tidal_tensor
PS::ReallocatableArray< TidalTensor > tidal_tensor
hard manager
Definition: hard.hpp:164
HardManager::setDtRange
void setDtRange(const PS::F64 _dt_max, const PS::S32 _dt_min_index)
set time step range
Definition: hard.hpp:61
hard_ptcl.hpp
HardEnergy::HardEnergy
HardEnergy()
Definition: hard.hpp:131
HardEnergy::de_sd_change_cum
PS::F64 de_sd_change_cum
Definition: hard.hpp:125
SystemHard::getTimeOrigin
PS::F64 getTimeOrigin() const
Definition: hard.hpp:1973
SystemHard::getPtcl
PS::ReallocatableArray< PtclH4 > & getPtcl()
Definition: hard.hpp:1925
EPJSoft::group_data
GroupDataDeliver group_data
Definition: soft_ptcl.hpp:324
SystemHard::writeBackPtclForOneClusterOMP
void writeBackPtclForOneClusterOMP(Tsys &sys, PS::ReallocatableArray< PS::S32 > &_mass_modify_list)
write back hard particles to global system and update time of write back OMP version
Definition: hard.hpp:2143
SystemHard::correctForceWithCutoffTreeNeighborAndClusterOMP
void correctForceWithCutoffTreeNeighborAndClusterOMP(Tsys &_sys, Ttree &_tree, const PS::ReallocatableArray< PS::S32 > &_adr_send, const bool _acorr_flag=false)
Soft force correction due to different cut-off function.
Definition: hard.hpp:3132
HardIntegrator::integrateToTime
AR::InterruptBinary< PtclHard > & integrateToTime(const PS::F64 _time_end)
Integrate system to time.
Definition: hard.hpp:577
ChangeOver::setR
void setR(const Float &_m_fac, const Float &_r_in, const Float &_r_out)
set r_in and r_out for changeover function
Definition: changeover.hpp:44
ArtificialParticleManager::gravitational_constant
PS::F64 gravitational_constant
offset to generate ID for artificial particles
Definition: artificial_particles.hpp:237
Ptcl::group_data
GroupDataDeliver group_data
Definition: ptcl.hpp:40
SystemHard::driveForOneClusterOMP
void driveForOneClusterOMP(const PS::F64 _dt)
integrate one isolated particle
Definition: hard.hpp:2046
SystemHard::getGroupNumberOfMemberListOffset
PS::S32 * getGroupNumberOfMemberListOffset(const std::size_t i=0) const
Definition: hard.hpp:1953
HardEnergy
Definition: hard.hpp:119
HardManager::n_step_per_orbit
PS::F64 n_step_per_orbit
Definition: hard.hpp:32
PIKG::F32
float F32
Definition: pikg_vector.hpp:18
HardEnergy::clear
void clear()
Definition: hard.hpp:133
Stability::findStableTree
void findStableTree(BinTree &_bin)
Check binary tree and collect stable subtree.
Definition: stability.hpp:399
ForceSoft::grav_const
static PS::F64 grav_const
neighbor number+1
Definition: soft_ptcl.hpp:15
SystemHard::setPtclForConnectedCluster
void setPtclForConnectedCluster(const Tsys &sys, const PS::ReallocatableArray< Tmediator > &med, const PS::ReallocatableArray< Tptcl > &ptcl_recv)
Definition: hard.hpp:1861
HardManager::writeBinary
void writeBinary(FILE *_fp)
write class data to file with binary format
Definition: hard.hpp:83
ar_interaction.hpp
SystemHard::GroupIndexInfo::GroupIndexInfo
GroupIndexInfo()
Definition: hard.hpp:2476
HardManager::print
void print(std::ostream &_fout) const
print parameters
Definition: hard.hpp:108
HardEnergy::ekin_sd_correction
PS::F64 ekin_sd_correction
Definition: hard.hpp:128
hermite_information.hpp
SearchGroupCandidate::searchAndMerge
void searchAndMerge(Tptcl *_ptcl_in_cluster, const PS::S32 _n_ptcl)
Definition: search_group_candidate.hpp:224
SystemHard::driveForMultiClusterOMP
int driveForMultiClusterOMP(const PS::F64 dt, Tpsoft *_ptcl_soft)
Hard integration for clusters.
Definition: hard.hpp:2271