Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2011-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 : #include "MDAtoms.h"
23 : #include "tools/Tools.h"
24 : #include "tools/OpenMP.h"
25 : #include "tools/Exception.h"
26 : #include "tools/Units.h"
27 : #include <algorithm>
28 : #include <string>
29 : #include <map>
30 :
31 : namespace PLMD {
32 :
33 : template<typename T>
34 91446 : static void getPointers(const TypesafePtr & p,const TypesafePtr & px,const TypesafePtr & py,const TypesafePtr & pz,unsigned maxel,T*&ppx,T*&ppy,T*&ppz,unsigned & stride) {
35 91446 : if(p) {
36 91408 : auto p_=p.get<T*>({maxel,3});
37 91408 : ppx=p_;
38 91408 : ppy=p_+1;
39 91408 : ppz=p_+2;
40 91408 : stride=3;
41 38 : } else if(px && py && pz) {
42 2 : ppx=px.get<T*>(maxel);
43 2 : ppy=py.get<T*>(maxel);
44 2 : ppz=pz.get<T*>(maxel);
45 2 : stride=1;
46 : } else {
47 36 : ppx=nullptr;
48 36 : ppy=nullptr;
49 36 : ppz=nullptr;
50 36 : stride=0;
51 : }
52 91446 : }
53 :
54 : /// Class containing the pointers to the MD data
55 : /// It is templated so that single and double precision versions coexist
56 : /// IT IS STILL UNDOCUMENTED. IT PROBABLY NEEDS A STRONG CLEANUP
57 : template <class T>
58 : class MDAtomsTyped:
59 : public MDAtomsBase
60 : {
61 : T scalep=1.0; // factor to scale positions
62 : T scalef=1.0; // factor to scale forces
63 : T scaleb=1.0; // factor to scale box
64 : T scalev=1.0; // factor to scale virial
65 : T scalec=1.0; // factor to scale charges
66 : T scalem=1.0; // factor to scale masses
67 : TypesafePtr m;
68 : TypesafePtr c;
69 : TypesafePtr p;
70 : TypesafePtr px,py,pz;
71 : TypesafePtr f;
72 : TypesafePtr fx,fy,fz;
73 : TypesafePtr box;
74 : TypesafePtr virial;
75 : std::map<std::string,TypesafePtr> extraCV;
76 : std::map<std::string,TypesafePtr> extraCVForce;
77 : public:
78 : void setm(const TypesafePtr & m) override;
79 : void setc(const TypesafePtr & m) override;
80 : void setBox(const TypesafePtr & ) override;
81 : void setp(const TypesafePtr & p) override;
82 : void setVirial(const TypesafePtr & ) override;
83 : void setf(const TypesafePtr & f) override;
84 : void setp(const TypesafePtr & p,int i) override;
85 : void setf(const TypesafePtr & f,int i) override;
86 : void setUnits(const Units&,const Units&) override;
87 20 : void setExtraCV(const std::string &name,const TypesafePtr & p) override {
88 20 : p.get<T>(); // just check type and discard pointer
89 20 : extraCV[name]=p.copy();
90 20 : }
91 20 : void setExtraCVForce(const std::string &name,const TypesafePtr & p) override {
92 20 : p.get<T*>(); // just check type and discard pointer
93 20 : extraCVForce[name]=p.copy();
94 20 : }
95 60 : double getExtraCV(const std::string &name) override {
96 : auto search=extraCV.find(name);
97 60 : if(search != extraCV.end()) {
98 60 : return static_cast<double>(search->second.template get<T>());
99 : } else {
100 0 : plumed_error() << "Unable to access extra cv named '" << name << "'.\nNotice that extra cvs need to be calculated in the MD code.";
101 : }
102 : }
103 40 : void updateExtraCVForce(const std::string &name,double f) override {
104 40 : *extraCVForce[name].template get<T*>()+=static_cast<T>(f);
105 40 : }
106 12830 : void MD2double(const TypesafePtr & m,double&d)const override {
107 12830 : d=double(m.template get<T>());
108 12830 : }
109 2365 : void double2MD(const double&d,const TypesafePtr & m)const override {
110 2365 : m.set(T(d));
111 2365 : }
112 0 : Vector getMDforces(const unsigned index)const override {
113 : unsigned stride;
114 : const T* ffx;
115 : const T* ffy;
116 : const T* ffz;
117 : // node: index+1 because we are supposed to pass here the size of the array, which should be at least the element we are asking for + 1
118 0 : getPointers(f,fx,fy,fz,index+1,ffx,ffy,ffz,stride);
119 0 : Vector force(ffx[stride*index],ffy[stride*index],ffz[stride*index]);
120 0 : return force/scalef;
121 : }
122 : void getBox(Tensor &) const override;
123 : void getPositions(const std::vector<int>&index,std::vector<Vector>&positions) const override;
124 : void getPositions(const std::set<AtomNumber>&index,const std::vector<unsigned>&i,std::vector<Vector>&positions) const override;
125 : void getPositions(unsigned j,unsigned k,std::vector<Vector>&positions) const override;
126 : void getLocalPositions(std::vector<Vector>&p) const override;
127 : void getMasses(const std::vector<int>&index,std::vector<double>&) const override;
128 : void getCharges(const std::vector<int>&index,std::vector<double>&) const override;
129 : void updateVirial(const Tensor&) const override;
130 : void updateForces(const std::vector<int>&index,const std::vector<Vector>&) override;
131 : void updateForces(const std::set<AtomNumber>&index,const std::vector<unsigned>&i,const std::vector<Vector>&forces) override;
132 : void rescaleForces(const std::vector<int>&index,double factor) override;
133 : unsigned getRealPrecision()const override;
134 : };
135 :
136 : template <class T>
137 933 : void MDAtomsTyped<T>::setUnits(const Units& units,const Units& MDUnits) {
138 933 : double lscale=units.getLength()/MDUnits.getLength();
139 933 : double escale=units.getEnergy()/MDUnits.getEnergy();
140 933 : double cscale=units.getCharge()/MDUnits.getCharge();
141 933 : double mscale=units.getMass()/MDUnits.getMass();
142 : // scalep and scaleb are used to convert MD to plumed
143 933 : scalep=1.0/lscale;
144 933 : scaleb=1.0/lscale;
145 : // scalef and scalev are used to convert plumed to MD
146 933 : scalef=escale/lscale;
147 933 : scalev=escale;
148 933 : scalec=1.0/cscale;
149 933 : scalem=1.0/mscale;
150 933 : }
151 :
152 : template <class T>
153 89489 : void MDAtomsTyped<T>::getBox(Tensor&box)const {
154 89489 : auto b=this->box.template get<const T*>({3,3});
155 1114853 : if(b) for(int i=0; i<3; i++)for(int j=0; j<3; j++) box(i,j)=b[3*i+j]*scaleb;
156 4042 : else box.zero();
157 89489 : }
158 :
159 : template <class T>
160 0 : void MDAtomsTyped<T>::getPositions(const std::vector<int>&index,std::vector<Vector>&positions)const {
161 : unsigned stride;
162 : const T* ppx;
163 : const T* ppy;
164 : const T* ppz;
165 0 : getPointers(p,px,py,pz,index.size(),ppx,ppy,ppz,stride);
166 0 : plumed_assert(index.size()==0 || (ppx && ppy && ppz));
167 : // cannot be parallelized with omp because access to positions is not ordered
168 0 : for(unsigned i=0; i<index.size(); ++i) {
169 0 : positions[index[i]][0]=ppx[stride*i]*scalep;
170 0 : positions[index[i]][1]=ppy[stride*i]*scalep;
171 0 : positions[index[i]][2]=ppz[stride*i]*scalep;
172 : }
173 0 : }
174 :
175 : template <class T>
176 2212 : void MDAtomsTyped<T>::getPositions(const std::set<AtomNumber>&index,const std::vector<unsigned>&i, std::vector<Vector>&positions)const {
177 : unsigned stride;
178 : const T* ppx;
179 : const T* ppy;
180 : const T* ppz;
181 : #ifndef NDEBUG
182 : // bounds are only checked in debug mode since they require this extra step that is potentially expensive
183 : const unsigned maxel=(i.size()>0?*std::max_element(i.begin(),i.end())+1:0);
184 : #else
185 : const unsigned maxel=0;
186 : #endif
187 2212 : getPointers(p,px,py,pz,maxel,ppx,ppy,ppz,stride);
188 2212 : plumed_assert(index.size()==0 || (ppx && ppy && ppz));
189 : // cannot be parallelized with omp because access to positions is not ordered
190 : unsigned k=0;
191 33181 : for(const auto & p : index) {
192 30969 : positions[p.index()][0]=ppx[stride*i[k]]*scalep;
193 30969 : positions[p.index()][1]=ppy[stride*i[k]]*scalep;
194 30969 : positions[p.index()][2]=ppz[stride*i[k]]*scalep;
195 30969 : k++;
196 : }
197 2212 : }
198 :
199 : template <class T>
200 42702 : void MDAtomsTyped<T>::getPositions(unsigned j,unsigned k,std::vector<Vector>&positions)const {
201 : unsigned stride;
202 : const T* ppx;
203 : const T* ppy;
204 : const T* ppz;
205 42702 : getPointers(p,px,py,pz,k,ppx,ppy,ppz,stride);
206 42702 : plumed_assert(k==j || (ppx && ppy && ppz));
207 42702 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(&positions[j],(k-j)))
208 : for(unsigned i=j; i<k; ++i) {
209 : positions[i][0]=ppx[stride*i]*scalep;
210 : positions[i][1]=ppy[stride*i]*scalep;
211 : positions[i][2]=ppz[stride*i]*scalep;
212 : }
213 42702 : }
214 :
215 :
216 : template <class T>
217 450 : void MDAtomsTyped<T>::getLocalPositions(std::vector<Vector>&positions)const {
218 : unsigned stride;
219 : const T* ppx;
220 : const T* ppy;
221 : const T* ppz;
222 450 : getPointers(p,px,py,pz,positions.size(),ppx,ppy,ppz,stride);
223 450 : plumed_assert(positions.size()==0 || (ppx && ppy && ppz));
224 450 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(positions))
225 : for(unsigned i=0; i<positions.size(); ++i) {
226 : positions[i][0]=ppx[stride*i]*scalep;
227 : positions[i][1]=ppy[stride*i]*scalep;
228 : positions[i][2]=ppz[stride*i]*scalep;
229 : }
230 450 : }
231 :
232 :
233 : template <class T>
234 784 : void MDAtomsTyped<T>::getMasses(const std::vector<int>&index,std::vector<double>&masses)const {
235 784 : auto mm=m.get<const T*>(index.size());
236 480945 : if(mm) for(unsigned i=0; i<index.size(); ++i) masses[index[i]]=scalem*mm[i];
237 0 : else for(unsigned i=0; i<index.size(); ++i) masses[index[i]]=0.0;
238 784 : }
239 :
240 : template <class T>
241 784 : void MDAtomsTyped<T>::getCharges(const std::vector<int>&index,std::vector<double>&charges)const {
242 784 : auto cc=c.get<const T*>(index.size());
243 479358 : if(cc) for(unsigned i=0; i<index.size(); ++i) charges[index[i]]=scalec*cc[i];
244 1587 : else for(unsigned i=0; i<index.size(); ++i) charges[index[i]]=0.0;
245 784 : }
246 :
247 : template <class T>
248 31465 : void MDAtomsTyped<T>::updateVirial(const Tensor&virial)const {
249 31465 : auto v=this->virial.template get<T*>({3,3});
250 409045 : if(v) for(int i=0; i<3; i++)for(int j=0; j<3; j++) v[3*i+j]+=T(virial(i,j)*scalev);
251 31465 : }
252 :
253 : template <class T>
254 2098 : void MDAtomsTyped<T>::updateForces(const std::set<AtomNumber>&index,const std::vector<unsigned>&i,const std::vector<Vector>&forces) {
255 : unsigned stride;
256 : T* ffx;
257 : T* ffy;
258 : T* ffz;
259 : #ifndef NDEBUG
260 : // bounds are only checked in debug mode since they require this extra step that is potentially expensive
261 : const unsigned maxel=(i.size()>0?*std::max_element(i.begin(),i.end())+1:0);
262 : #else
263 : const unsigned maxel=0;
264 : #endif
265 2098 : getPointers(f,fx,fy,fz,maxel,ffx,ffy,ffz,stride);
266 2098 : plumed_assert(index.size()==0 || (ffx && ffy && ffz));
267 : unsigned k=0;
268 27541 : for(const auto & p : index) {
269 25443 : ffx[stride*i[k]]+=scalef*T(forces[p.index()][0]);
270 25443 : ffy[stride*i[k]]+=scalef*T(forces[p.index()][1]);
271 25443 : ffz[stride*i[k]]+=scalef*T(forces[p.index()][2]);
272 25443 : k++;
273 : }
274 2098 : }
275 :
276 : template <class T>
277 43934 : void MDAtomsTyped<T>::updateForces(const std::vector<int>&index,const std::vector<Vector>&forces) {
278 : unsigned stride;
279 : T* ffx;
280 : T* ffy;
281 : T* ffz;
282 43934 : getPointers(f,fx,fy,fz,index.size(),ffx,ffy,ffz,stride);
283 43934 : plumed_assert(index.size()==0 || (ffx && ffy && ffz));
284 43934 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(ffx,stride*index.size()))
285 : for(unsigned i=0; i<index.size(); ++i) {
286 : ffx[stride*i]+=scalef*T(forces[index[i]][0]);
287 : ffy[stride*i]+=scalef*T(forces[index[i]][1]);
288 : ffz[stride*i]+=scalef*T(forces[index[i]][2]);
289 : }
290 43934 : }
291 :
292 : template <class T>
293 50 : void MDAtomsTyped<T>::rescaleForces(const std::vector<int>&index,double factor) {
294 : unsigned stride;
295 : T* ffx;
296 : T* ffy;
297 : T* ffz;
298 50 : getPointers(f,fx,fy,fz,index.size(),ffx,ffy,ffz,stride);
299 50 : plumed_assert(index.size()==0 || (ffx && ffy && ffz));
300 50 : auto v=virial.get<T*>({3,3});
301 50 : if(v) for(unsigned i=0; i<3; i++)for(unsigned j=0; j<3; j++) v[3*i+j]*=T(factor);
302 50 : #pragma omp parallel for num_threads(OpenMP::getGoodNumThreads(ffx,stride*index.size()))
303 : for(unsigned i=0; i<index.size(); ++i) {
304 : ffx[stride*i]*=T(factor);
305 : ffy[stride*i]*=T(factor);
306 : ffz[stride*i]*=T(factor);
307 : }
308 50 : }
309 :
310 : template <class T>
311 1688 : unsigned MDAtomsTyped<T>::getRealPrecision()const {
312 1688 : return sizeof(T);
313 : }
314 :
315 : template <class T>
316 47474 : void MDAtomsTyped<T>::setp(const TypesafePtr & pp) {
317 47474 : pp.get<const T*>(); // just check type and discard pointer
318 47474 : p=pp.copy();
319 47474 : px=TypesafePtr();
320 47474 : py=TypesafePtr();
321 47474 : pz=TypesafePtr();
322 47474 : }
323 :
324 : template <class T>
325 43333 : void MDAtomsTyped<T>::setBox(const TypesafePtr & pp) {
326 43333 : pp.get<const T*>({3,3}); // just check type and size and discard pointer
327 43333 : box=pp.copy();
328 43333 : }
329 :
330 :
331 : template <class T>
332 47474 : void MDAtomsTyped<T>::setf(const TypesafePtr & ff) {
333 47474 : ff.get<T*>(); // just check type and discard pointer
334 47474 : f=ff.copy();
335 47474 : fx=TypesafePtr();
336 47474 : fy=TypesafePtr();
337 47474 : fz=TypesafePtr();
338 47474 : }
339 :
340 : template <class T>
341 3 : void MDAtomsTyped<T>::setp(const TypesafePtr & pp,int i) {
342 3 : p=TypesafePtr();
343 3 : pp.get<const T*>(); // just check type and discard pointer
344 3 : if(i==0)px=pp.copy();
345 3 : if(i==1)py=pp.copy();
346 3 : if(i==2)pz=pp.copy();
347 3 : }
348 :
349 : template <class T>
350 37675 : void MDAtomsTyped<T>::setVirial(const TypesafePtr & pp) {
351 37675 : pp.get<T*>({3,3}); // just check type and size and discard pointer
352 37673 : virial=pp.copy();
353 37673 : }
354 :
355 :
356 : template <class T>
357 3 : void MDAtomsTyped<T>::setf(const TypesafePtr & ff,int i) {
358 3 : f=TypesafePtr();;
359 3 : ff.get<T*>(); // just check type and discard pointer
360 3 : if(i==0)fx=ff.copy();
361 3 : if(i==1)fy=ff.copy();
362 3 : if(i==2)fz=ff.copy();
363 3 : }
364 :
365 : template <class T>
366 47475 : void MDAtomsTyped<T>::setm(const TypesafePtr & m) {
367 47475 : m.get<const T*>(); // just check type and discard pointer
368 47475 : this->m=m.copy();
369 47475 : }
370 :
371 : template <class T>
372 37570 : void MDAtomsTyped<T>::setc(const TypesafePtr & c) {
373 37570 : c.get<const T*>(); // just check type and discard pointer
374 37570 : this->c=c.copy();
375 37570 : }
376 :
377 405175 : std::unique_ptr<MDAtomsBase> MDAtomsBase::create(unsigned p) {
378 405175 : if(p==sizeof(double)) {
379 405174 : return Tools::make_unique<MDAtomsTyped<double>>();
380 1 : } else if (p==sizeof(float)) {
381 1 : return Tools::make_unique<MDAtomsTyped<float>>();
382 : }
383 0 : plumed_error() << "Cannot create an MD interface with sizeof(real)==" << p;
384 : }
385 :
386 : }
387 :
|