Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2011-2019 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "Atoms.h"
23 : #include "ActionAtomistic.h"
24 : #include "MDAtoms.h"
25 : #include "PlumedMain.h"
26 : #include "tools/Pbc.h"
27 : #include <algorithm>
28 : #include <iostream>
29 : #include <string>
30 : #include <cmath>
31 :
32 : using namespace std;
33 :
34 : namespace PLMD {
35 :
36 : /// We assume that charges and masses are constant along the simulation
37 : /// Set this to false if you want to revert to the original (expensive) behavior
38 : static const bool shareMassAndChargeOnlyAtFirstStep=true;
39 :
40 : class PlumedMain;
41 :
42 2233 : Atoms::Atoms(PlumedMain&plumed):
43 : natoms(0),
44 2233 : pbc(*new Pbc),
45 : md_energy(0.0),
46 : energy(0.0),
47 : dataCanBeSet(false),
48 : collectEnergy(false),
49 : energyHasBeenSet(false),
50 : positionsHaveBeenSet(0),
51 : massesHaveBeenSet(false),
52 : chargesHaveBeenSet(false),
53 : boxHasBeenSet(false),
54 : forcesHaveBeenSet(0),
55 : virialHasBeenSet(false),
56 : massAndChargeOK(false),
57 : shuffledAtoms(0),
58 : plumed(plumed),
59 : naturalUnits(false),
60 : MDnaturalUnits(false),
61 : timestep(0.0),
62 : forceOnEnergy(0.0),
63 : zeroallforces(false),
64 : kbT(0.0),
65 : asyncSent(false),
66 : atomsNeeded(false),
67 11165 : ddStep(0)
68 : {
69 2233 : mdatoms=MDAtomsBase::create(sizeof(double));
70 2233 : }
71 :
72 8932 : Atoms::~Atoms() {
73 2233 : if(actions.size()>0) {
74 0 : std::cerr<<"WARNING: there is some inconsistency in action added to atoms, as some of them were not properly destroyed. This might indicate an internal bug!!\n";
75 : }
76 2233 : delete mdatoms;
77 2233 : delete &pbc;
78 2233 : }
79 :
80 31332 : void Atoms::startStep() {
81 31332 : collectEnergy=false; energyHasBeenSet=false; positionsHaveBeenSet=0;
82 31332 : massesHaveBeenSet=false; chargesHaveBeenSet=false; boxHasBeenSet=false;
83 31332 : forcesHaveBeenSet=0; virialHasBeenSet=false; dataCanBeSet=true;
84 31332 : }
85 :
86 26838 : void Atoms::setBox(void*p) {
87 26838 : mdatoms->setBox(p);
88 26838 : Tensor b; mdatoms->getBox(b); boxHasBeenSet=true;
89 26838 : }
90 :
91 30656 : void Atoms::setPositions(void*p) {
92 30656 : plumed_massert( dataCanBeSet,"setPositions must be called after setStep in MD code interface");
93 30673 : plumed_massert( p || gatindex.size()==0, "NULL position pointer with non-zero local atoms");
94 30656 : mdatoms->setp(p); positionsHaveBeenSet=3;
95 30656 : }
96 :
97 30659 : void Atoms::setMasses(void*p) {
98 30668 : plumed_massert( dataCanBeSet,"setMasses must be called after setStep in MD code interface");
99 30673 : plumed_massert( p || gatindex.size()==0, "NULL mass pointer with non-zero local atoms");
100 30656 : mdatoms->setm(p); massesHaveBeenSet=true;
101 :
102 30656 : }
103 :
104 24595 : void Atoms::setCharges(void*p) {
105 24595 : plumed_massert( dataCanBeSet, "setCharges must be called after setStep in MD code interface");
106 24612 : plumed_massert( p || gatindex.size()==0, "NULL charges pointer with non-zero local atoms");
107 24595 : mdatoms->setc(p); chargesHaveBeenSet=true;
108 24595 : }
109 :
110 24658 : void Atoms::setVirial(void*p) {
111 24658 : plumed_massert( dataCanBeSet,"setVirial must be called after setStep in MD code interface");
112 24658 : mdatoms->setVirial(p); virialHasBeenSet=true;
113 24658 : }
114 :
115 5988 : void Atoms::setEnergy(void*p) {
116 5988 : plumed_massert( dataCanBeSet,"setEnergy must be called after setStep in MD code interface");
117 5988 : MD2double(p,md_energy);
118 5988 : md_energy*=MDUnits.getEnergy()/units.getEnergy();
119 5988 : energyHasBeenSet=true;
120 5988 : }
121 :
122 30656 : void Atoms::setForces(void*p) {
123 30656 : plumed_massert( dataCanBeSet,"setForces must be called after setStep in MD code interface");
124 30673 : plumed_massert( p || gatindex.size()==0, "NULL force pointer with non-zero local atoms");
125 30656 : forcesHaveBeenSet=3;
126 30656 : mdatoms->setf(p);
127 30656 : }
128 :
129 0 : void Atoms::setPositions(void*p,int i) {
130 0 : plumed_massert( dataCanBeSet,"setPositions must be called after setStep in MD code interface");
131 0 : plumed_massert( p || gatindex.size()==0, "NULL positions pointer with non-zero local atoms");
132 0 : mdatoms->setp(p,i); positionsHaveBeenSet++;
133 0 : }
134 :
135 0 : void Atoms::setForces(void*p,int i) {
136 0 : plumed_massert( dataCanBeSet,"setForces must be called after setStep in MD code interface");
137 0 : plumed_massert( p || gatindex.size()==0, "NULL force pointer with non-zero local atoms");
138 0 : mdatoms->setf(p,i); forcesHaveBeenSet++;
139 0 : }
140 :
141 29178 : void Atoms::share() {
142 : // At first step I scatter all the atoms so as to store their mass and charge
143 : // Notice that this works with the assumption that charges and masses are
144 : // not changing during the simulation!
145 29178 : if(!massAndChargeOK && shareMassAndChargeOnlyAtFirstStep) {
146 559 : shareAll();
147 559 : return;
148 : }
149 :
150 28619 : if(!(int(gatindex.size())==natoms && shuffledAtoms==0)) {
151 54210 : for(unsigned i=0; i<actions.size(); i++) {
152 33652 : if(actions[i]->isActive()) {
153 10120 : if(!actions[i]->getUnique().empty()) {
154 9972 : atomsNeeded=true;
155 : // unique are the local atoms
156 9972 : unique.insert(actions[i]->getUniqueLocal().begin(),actions[i]->getUniqueLocal().end());
157 : }
158 : }
159 : }
160 : } else {
161 334018 : for(unsigned i=0; i<actions.size(); i++) {
162 187008 : if(actions[i]->isActive()) {
163 80179 : if(!actions[i]->getUnique().empty()) {
164 71973 : atomsNeeded=true;
165 : }
166 : }
167 : }
168 :
169 : }
170 :
171 28619 : share(unique);
172 : }
173 :
174 643 : void Atoms::shareAll() {
175 643 : unique.clear();
176 : // keep in unique only those atoms that are local
177 643 : if(dd && shuffledAtoms>0) {
178 31655 : for(int i=0; i<natoms; i++) if(g2l[i]>=0) unique.insert(AtomNumber::index(i));
179 : } else {
180 691501 : for(int i=0; i<natoms; i++) unique.insert(AtomNumber::index(i));
181 : }
182 643 : atomsNeeded=true;
183 643 : share(unique);
184 643 : }
185 :
186 29262 : void Atoms::share(const std::set<AtomNumber>& unique) {
187 29262 : plumed_assert( positionsHaveBeenSet==3 && massesHaveBeenSet );
188 :
189 29262 : virial.zero();
190 58428 : if(zeroallforces || int(gatindex.size())==natoms) {
191 4663864 : for(int i=0; i<natoms; i++) forces[i].zero();
192 : } else {
193 60732 : for(const auto & p : unique) forces[p.index()].zero();
194 : }
195 67544 : for(unsigned i=getNatoms(); i<positions.size(); i++) forces[i].zero(); // virtual atoms
196 29262 : forceOnEnergy=0.0;
197 29262 : mdatoms->getBox(box);
198 :
199 29262 : if(!atomsNeeded) return;
200 28605 : atomsNeeded=false;
201 :
202 28605 : if(int(gatindex.size())==natoms && shuffledAtoms==0) {
203 : // faster version, which retrieves all atoms
204 26595 : mdatoms->getPositions(0,natoms,positions);
205 : } else {
206 2010 : uniq_index.clear();
207 2010 : uniq_index.reserve(unique.size());
208 2010 : if(shuffledAtoms>0) {
209 88398 : for(const auto & p : unique) uniq_index.push_back(g2l[p.index()]);
210 : }
211 2010 : mdatoms->getPositions(unique,uniq_index,positions);
212 : }
213 :
214 :
215 : // how many double per atom should be scattered:
216 : int ndata=3;
217 28605 : if(!massAndChargeOK) {
218 : ndata=5;
219 1118 : masses.assign(masses.size(),NAN);
220 1118 : charges.assign(charges.size(),NAN);
221 559 : mdatoms->getCharges(gatindex,charges);
222 559 : mdatoms->getMasses(gatindex,masses);
223 : }
224 :
225 28605 : if(dd && shuffledAtoms>0) {
226 2008 : if(dd.async) {
227 25312 : for(unsigned i=0; i<dd.mpi_request_positions.size(); i++) dd.mpi_request_positions[i].wait();
228 25312 : for(unsigned i=0; i<dd.mpi_request_index.size(); i++) dd.mpi_request_index[i].wait();
229 : }
230 2008 : int count=0;
231 23601 : for(const auto & p : unique) {
232 43186 : dd.indexToBeSent[count]=p.index();
233 64779 : dd.positionsToBeSent[ndata*count+0]=positions[p.index()][0];
234 64779 : dd.positionsToBeSent[ndata*count+1]=positions[p.index()][1];
235 64779 : dd.positionsToBeSent[ndata*count+2]=positions[p.index()][2];
236 21593 : if(!massAndChargeOK) {
237 5739 : dd.positionsToBeSent[ndata*count+3]=masses[p.index()];
238 5739 : dd.positionsToBeSent[ndata*count+4]=charges[p.index()];
239 : }
240 21593 : count++;
241 : }
242 2008 : if(dd.async) {
243 1988 : asyncSent=true;
244 1988 : dd.mpi_request_positions.resize(dd.Get_size());
245 1988 : dd.mpi_request_index.resize(dd.Get_size());
246 16548 : for(int i=0; i<dd.Get_size(); i++) {
247 21840 : dd.mpi_request_index[i]=dd.Isend(&dd.indexToBeSent[0],count,i,666);
248 14560 : dd.mpi_request_positions[i]=dd.Isend(&dd.positionsToBeSent[0],ndata*count,i,667);
249 : }
250 : } else {
251 20 : const int n=(dd.Get_size());
252 20 : vector<int> counts(n);
253 20 : vector<int> displ(n);
254 20 : vector<int> counts5(n);
255 20 : vector<int> displ5(n);
256 20 : dd.Allgather(count,counts);
257 20 : displ[0]=0;
258 260 : for(int i=1; i<n; ++i) displ[i]=displ[i-1]+counts[i-1];
259 260 : for(int i=0; i<n; ++i) counts5[i]=counts[i]*ndata;
260 260 : for(int i=0; i<n; ++i) displ5[i]=displ[i]*ndata;
261 40 : dd.Allgatherv(&dd.indexToBeSent[0],count,&dd.indexToBeReceived[0],&counts[0],&displ[0]);
262 40 : dd.Allgatherv(&dd.positionsToBeSent[0],ndata*count,&dd.positionsToBeReceived[0],&counts5[0],&displ5[0]);
263 60 : int tot=displ[n-1]+counts[n-1];
264 3220 : for(int i=0; i<tot; i++) {
265 6400 : positions[dd.indexToBeReceived[i]][0]=dd.positionsToBeReceived[ndata*i+0];
266 4800 : positions[dd.indexToBeReceived[i]][1]=dd.positionsToBeReceived[ndata*i+1];
267 4800 : positions[dd.indexToBeReceived[i]][2]=dd.positionsToBeReceived[ndata*i+2];
268 1600 : if(!massAndChargeOK) {
269 1296 : masses[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+3];
270 1296 : charges[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+4];
271 : }
272 : }
273 : }
274 : }
275 : }
276 :
277 29262 : void Atoms::wait() {
278 29262 : dataCanBeSet=false; // Everything should be set by this stage
279 : // How many double per atom should be scattered
280 : int ndata=3;
281 29262 : if(!massAndChargeOK)ndata=5;
282 :
283 29262 : if(dd) {
284 14833 : dd.Bcast(box,0);
285 : }
286 29262 : pbc.setBox(box);
287 :
288 29262 : if(collectEnergy) energy=md_energy;
289 :
290 29262 : if(dd && shuffledAtoms>0) {
291 : // receive toBeReceived
292 2008 : if(asyncSent) {
293 : Communicator::Status status;
294 : int count=0;
295 16548 : for(int i=0; i<dd.Get_size(); i++) {
296 14560 : dd.Recv(&dd.indexToBeReceived[count],dd.indexToBeReceived.size()-count,i,666,status);
297 : int c=status.Get_count<int>();
298 14560 : dd.Recv(&dd.positionsToBeReceived[ndata*count],dd.positionsToBeReceived.size()-ndata*count,i,667);
299 7280 : count+=c;
300 : }
301 103848 : for(int i=0; i<count; i++) {
302 203720 : positions[dd.indexToBeReceived[i]][0]=dd.positionsToBeReceived[ndata*i+0];
303 152790 : positions[dd.indexToBeReceived[i]][1]=dd.positionsToBeReceived[ndata*i+1];
304 152790 : positions[dd.indexToBeReceived[i]][2]=dd.positionsToBeReceived[ndata*i+2];
305 50930 : if(!massAndChargeOK) {
306 13806 : masses[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+3];
307 13806 : charges[dd.indexToBeReceived[i]] =dd.positionsToBeReceived[ndata*i+4];
308 : }
309 : }
310 1988 : asyncSent=false;
311 : }
312 2008 : if(collectEnergy) dd.Sum(energy);
313 : }
314 : // I take note that masses and charges have been set once for all
315 : // at the beginning of the simulation.
316 29262 : if(shareMassAndChargeOnlyAtFirstStep) massAndChargeOK=true;
317 29262 : }
318 :
319 29178 : void Atoms::updateForces() {
320 29178 : plumed_assert( forcesHaveBeenSet==3 );
321 29178 : if(forceOnEnergy*forceOnEnergy>epsilon) {
322 50 : double alpha=1.0-forceOnEnergy;
323 50 : mdatoms->rescaleForces(gatindex,alpha);
324 50 : mdatoms->updateForces(gatindex,forces);
325 : } else {
326 29128 : if(int(gatindex.size())==natoms && shuffledAtoms==0) mdatoms->updateForces(gatindex,forces);
327 1926 : else mdatoms->updateForces(unique,uniq_index,forces);
328 : }
329 29178 : if( !plumed.novirial && dd.Get_rank()==0 ) {
330 19076 : plumed_assert( virialHasBeenSet );
331 19076 : mdatoms->updateVirial(virial);
332 : }
333 29178 : }
334 :
335 635 : void Atoms::setNatoms(int n) {
336 635 : natoms=n;
337 635 : positions.resize(n);
338 635 : forces.resize(n);
339 635 : masses.resize(n);
340 635 : charges.resize(n);
341 635 : gatindex.resize(n);
342 1053331 : for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=i;
343 635 : }
344 :
345 :
346 2310 : void Atoms::add(ActionAtomistic*a) {
347 2310 : actions.push_back(a);
348 2310 : }
349 :
350 2310 : void Atoms::remove(ActionAtomistic*a) {
351 2310 : auto f=find(actions.begin(),actions.end(),a);
352 2310 : plumed_massert(f!=actions.end(),"cannot remove an action registered to atoms");
353 2310 : actions.erase(f);
354 2310 : }
355 :
356 :
357 245 : void Atoms::DomainDecomposition::enable(Communicator& c) {
358 245 : on=true;
359 245 : Set_comm(c.Get_comm());
360 245 : async=Get_size()<10;
361 245 : if(std::getenv("PLUMED_ASYNC_SHARE")) {
362 4 : std::string s(std::getenv("PLUMED_ASYNC_SHARE"));
363 4 : if(s=="yes") async=true;
364 4 : else if(s=="no") async=false;
365 0 : else plumed_merror("PLUMED_ASYNC_SHARE variable is set to " + s + "; should be yes or no");
366 : }
367 245 : }
368 :
369 1301 : void Atoms::setAtomsNlocal(int n) {
370 1301 : gatindex.resize(n);
371 1301 : g2l.resize(natoms,-1);
372 1301 : if(dd) {
373 : // Since these vectors are sent with MPI by using e.g.
374 : // &dd.positionsToBeSent[0]
375 : // we make sure they are non-zero-sized so as to
376 : // avoid errors when doing boundary check
377 1291 : if(n==0) n++;
378 1291 : dd.positionsToBeSent.resize(n*5,0.0);
379 1291 : dd.positionsToBeReceived.resize(natoms*5,0.0);
380 1291 : dd.indexToBeSent.resize(n,0);
381 1291 : dd.indexToBeReceived.resize(natoms,0);
382 : }
383 1301 : }
384 :
385 920 : void Atoms::setAtomsGatindex(int*g,bool fortran) {
386 925 : plumed_massert( g || gatindex.size()==0, "NULL gatindex pointer with non-zero local atoms");
387 1840 : ddStep=plumed.getStep();
388 920 : if(fortran) {
389 0 : for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=g[i]-1;
390 : } else {
391 54421 : for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=g[i];
392 : }
393 147706 : for(unsigned i=0; i<g2l.size(); i++) g2l[i]=-1;
394 920 : if( gatindex.size()==natoms ) {
395 3 : shuffledAtoms=0;
396 603 : for(unsigned i=0; i<gatindex.size(); i++) {
397 300 : if( gatindex[i]!=i ) { shuffledAtoms=1; break; }
398 : }
399 : } else {
400 917 : shuffledAtoms=1;
401 : }
402 920 : if(dd) {
403 910 : dd.Sum(shuffledAtoms);
404 : }
405 71948 : for(unsigned i=0; i<gatindex.size(); i++) g2l[gatindex[i]]=i;
406 :
407 29308 : for(unsigned i=0; i<actions.size(); i++) {
408 : // keep in unique only those atoms that are local
409 9156 : actions[i]->updateUniqueLocal();
410 : }
411 : unique.clear();
412 920 : }
413 :
414 381 : void Atoms::setAtomsContiguous(int start) {
415 762 : ddStep=plumed.getStep();
416 315087 : for(unsigned i=0; i<gatindex.size(); i++) gatindex[i]=start+i;
417 348135 : for(unsigned i=0; i<g2l.size(); i++) g2l[i]=-1;
418 419862 : for(unsigned i=0; i<gatindex.size(); i++) g2l[gatindex[i]]=i;
419 381 : if(gatindex.size()<natoms) shuffledAtoms=1;
420 1458 : for(unsigned i=0; i<actions.size(); i++) {
421 : // keep in unique only those atoms that are local
422 232 : actions[i]->updateUniqueLocal();
423 : }
424 : unique.clear();
425 381 : }
426 :
427 556 : void Atoms::setRealPrecision(int p) {
428 556 : MDAtomsBase *x=MDAtomsBase::create(p);
429 555 : delete mdatoms;
430 555 : mdatoms=x;
431 555 : }
432 :
433 635 : int Atoms::getRealPrecision()const {
434 635 : return mdatoms->getRealPrecision();
435 : }
436 :
437 8160 : void Atoms::MD2double(const void*m,double&d)const {
438 8160 : plumed_assert(mdatoms); mdatoms->MD2double(m,d);
439 8160 : }
440 401 : void Atoms::double2MD(const double&d,void*m)const {
441 401 : plumed_assert(mdatoms); mdatoms->double2MD(d,m);
442 401 : }
443 :
444 648 : void Atoms::updateUnits() {
445 648 : mdatoms->setUnits(units,MDUnits);
446 648 : }
447 :
448 556 : void Atoms::setTimeStep(void*p) {
449 556 : MD2double(p,timestep);
450 556 : }
451 :
452 937597 : double Atoms::getTimeStep()const {
453 937597 : return timestep/units.getTime()*MDUnits.getTime();
454 : }
455 :
456 43 : void Atoms::setKbT(void*p) {
457 43 : MD2double(p,kbT);
458 43 : }
459 :
460 809 : double Atoms::getKbT()const {
461 809 : return kbT/units.getEnergy()*MDUnits.getEnergy();
462 : }
463 :
464 :
465 15 : void Atoms::createFullList(int*n) {
466 15 : if(!massAndChargeOK && shareMassAndChargeOnlyAtFirstStep) {
467 1 : *n=natoms;
468 1 : fullList.resize(natoms);
469 201 : for(unsigned i=0; i<natoms; i++) fullList[i]=i;
470 : } else {
471 : // We update here the unique list defined at Atoms::unique.
472 : // This is not very clear, and probably should be coded differently.
473 : // Hopefully this fix the longstanding issue with NAMD.
474 : unique.clear();
475 139 : for(unsigned i=0; i<actions.size(); i++) {
476 74 : if(actions[i]->isActive()) {
477 15 : if(!actions[i]->getUnique().empty()) {
478 15 : atomsNeeded=true;
479 : // unique are the local atoms
480 15 : unique.insert(actions[i]->getUnique().begin(),actions[i]->getUnique().end());
481 : }
482 : }
483 : }
484 14 : fullList.resize(0);
485 14 : fullList.reserve(unique.size());
486 656 : for(const auto & p : unique) fullList.push_back(p.index());
487 14 : *n=fullList.size();
488 : }
489 15 : }
490 :
491 15 : void Atoms::getFullList(int**x) {
492 15 : if(!fullList.empty()) *x=&fullList[0];
493 6 : else *x=NULL;
494 15 : }
495 :
496 15 : void Atoms::clearFullList() {
497 15 : fullList.resize(0);
498 15 : }
499 :
500 635 : void Atoms::init() {
501 : // Default: set domain decomposition to NO-decomposition, waiting for
502 : // further instruction
503 635 : if(dd) {
504 245 : setAtomsNlocal(natoms);
505 245 : setAtomsContiguous(0);
506 : }
507 635 : }
508 :
509 245 : void Atoms::setDomainDecomposition(Communicator& comm) {
510 245 : dd.enable(comm);
511 245 : }
512 :
513 290 : void Atoms::resizeVectors(unsigned n) {
514 290 : positions.resize(n);
515 290 : forces.resize(n);
516 290 : masses.resize(n);
517 290 : charges.resize(n);
518 290 : }
519 :
520 145 : AtomNumber Atoms::addVirtualAtom(ActionWithVirtualAtom*a) {
521 145 : unsigned n=positions.size();
522 145 : resizeVectors(n+1);
523 145 : virtualAtomsActions.push_back(a);
524 145 : return AtomNumber::index(n);
525 : }
526 :
527 145 : void Atoms::removeVirtualAtom(ActionWithVirtualAtom*a) {
528 145 : unsigned n=positions.size();
529 290 : plumed_massert(a==virtualAtomsActions[virtualAtomsActions.size()-1],"virtual atoms should be destroyed in reverse creation order");
530 145 : resizeVectors(n-1);
531 : virtualAtomsActions.pop_back();
532 145 : }
533 :
534 87 : void Atoms::insertGroup(const std::string&name,const std::vector<AtomNumber>&a) {
535 0 : plumed_massert(groups.count(name)==0,"group named "+name+" already exists");
536 87 : groups[name]=a;
537 87 : }
538 :
539 87 : void Atoms::removeGroup(const std::string&name) {
540 0 : plumed_massert(groups.count(name)==1,"cannot remove group named "+name);
541 : groups.erase(name);
542 87 : }
543 :
544 84 : void Atoms::writeBinary(std::ostream&o)const {
545 168 : o.write(reinterpret_cast<const char*>(&positions[0][0]),natoms*3*sizeof(double));
546 84 : o.write(reinterpret_cast<const char*>(&box(0,0)),9*sizeof(double));
547 84 : o.write(reinterpret_cast<const char*>(&energy),sizeof(double));
548 84 : }
549 :
550 84 : void Atoms::readBinary(std::istream&i) {
551 168 : i.read(reinterpret_cast<char*>(&positions[0][0]),natoms*3*sizeof(double));
552 84 : i.read(reinterpret_cast<char*>(&box(0,0)),9*sizeof(double));
553 84 : i.read(reinterpret_cast<char*>(&energy),sizeof(double));
554 84 : pbc.setBox(box);
555 84 : }
556 :
557 315 : double Atoms::getKBoltzmann()const {
558 315 : if(naturalUnits || MDnaturalUnits) return 1.0;
559 308 : else return kBoltzmann/units.getEnergy();
560 : }
561 :
562 0 : double Atoms::getMDKBoltzmann()const {
563 0 : if(naturalUnits || MDnaturalUnits) return 1.0;
564 0 : else return kBoltzmann/MDUnits.getEnergy();
565 : }
566 :
567 0 : void Atoms::getLocalMasses(std::vector<double>& localMasses) {
568 0 : localMasses.resize(gatindex.size());
569 0 : for(unsigned i=0; i<gatindex.size(); i++) localMasses[i] = masses[gatindex[i]];
570 0 : }
571 :
572 450 : void Atoms::getLocalPositions(std::vector<Vector>& localPositions) {
573 450 : localPositions.resize(gatindex.size());
574 450 : mdatoms->getLocalPositions(localPositions);
575 450 : }
576 :
577 450 : void Atoms::getLocalForces(std::vector<Vector>& localForces) {
578 450 : localForces.resize(gatindex.size());
579 65700 : for(unsigned i=0; i<gatindex.size(); i++) localForces[i] = forces[gatindex[i]];
580 450 : }
581 :
582 0 : void Atoms::getLocalMDForces(std::vector<Vector>& localForces) {
583 0 : localForces.resize(gatindex.size());
584 0 : for(unsigned i=0; i<gatindex.size(); i++) {
585 0 : localForces[i] = mdatoms->getMDforces(i);
586 : }
587 0 : }
588 :
589 4839 : }
|