Line data Source code
1 : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 : Copyright (c) 2016-2023 The plumed team
3 : (see the PEOPLE file at the root of the distribution for a list of names)
4 :
5 : See http://www.plumed.org for more information.
6 :
7 : This file is part of plumed, version 2.
8 :
9 : plumed is free software: you can redistribute it and/or modify
10 : it under the terms of the GNU Lesser General Public License as published by
11 : the Free Software Foundation, either version 3 of the License, or
12 : (at your option) any later version.
13 :
14 : plumed is distributed in the hope that it will be useful,
15 : but WITHOUT ANY WARRANTY; without even the implied warranty of
16 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 : GNU Lesser General Public License for more details.
18 :
19 : You should have received a copy of the GNU Lesser General Public License
20 : along with plumed. If not, see <http://www.gnu.org/licenses/>.
21 : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
22 :
23 : #include "bias/Bias.h"
24 : #include "core/ActionRegister.h"
25 : #include "core/PlumedMain.h"
26 : #include "core/Value.h"
27 : #include "tools/File.h"
28 : #include "tools/OpenMP.h"
29 : #include "tools/Random.h"
30 : #include "tools/Communicator.h"
31 : #include <chrono>
32 : #include <numeric>
33 :
34 : #ifndef M_PI
35 : #define M_PI 3.14159265358979323846
36 : #endif
37 :
38 : namespace PLMD {
39 : namespace isdb {
40 :
41 : //+PLUMEDOC ISDB_BIAS METAINFERENCE
42 : /*
43 : Calculates the Metainference energy for a set of experimental data.
44 :
45 : Metainference \cite Bonomi:2016ip is a Bayesian framework
46 : to model heterogeneous systems by integrating prior information with noisy, ensemble-averaged data.
47 : Metainference models a system and quantifies the level of noise in the data by considering a set of replicas of the system.
48 :
49 : Calculated experimental data are given in input as ARG while reference experimental values
50 : can be given either from fixed components of other actions using PARARG or as numbers using
51 : PARAMETERS. The default behavior is that of averaging the data over the available replicas,
52 : if this is not wanted the keyword NOENSEMBLE prevent this averaging.
53 :
54 : Metadynamics Metainference \cite Bonomi:2016ge or more in general biased Metainference requires the knowledge of
55 : biasing potential in order to calculate the weighted average. In this case the value of the bias
56 : can be provided as the last argument in ARG and adding the keyword REWEIGHT. To avoid the noise
57 : resulting from the instantaneous value of the bias the weight of each replica can be averaged
58 : over a give time using the keyword AVERAGING.
59 :
60 : The data can be averaged by using multiple replicas and weighted for a bias if present.
61 : The functional form of Metainference can be chosen among four variants selected
62 : with NOISE=GAUSS,MGAUSS,OUTLIERS,MOUTLIERS,GENERIC which correspond to modelling the noise for
63 : the arguments as a single gaussian common to all the data points, a gaussian per data
64 : point, a single long-tailed gaussian common to all the data points, a log-tailed
65 : gaussian per data point or using two distinct noises as for the most general formulation of Metainference.
66 : In this latter case the noise of the replica-averaging is gaussian (one per data point) and the noise for
67 : the comparison with the experimental data can chosen using the keyword LIKELIHOOD
68 : between gaussian or log-normal (one per data point), furthermore the evolution of the estimated average
69 : over an infinite number of replicas is driven by DFTILDE.
70 :
71 : As for Metainference theory there are two sigma values: SIGMA_MEAN0 represent the
72 : error of calculating an average quantity using a finite set of replica and should
73 : be set as small as possible following the guidelines for replica-averaged simulations
74 : in the framework of the Maximum Entropy Principle. Alternatively, this can be obtained
75 : automatically using the internal sigma mean optimization as introduced in \cite Lohr:2017gc
76 : (OPTSIGMAMEAN=SEM), in this second case sigma_mean is estimated from the maximum standard error
77 : of the mean either over the simulation or over a defined time using the keyword AVERAGING.
78 : SIGMA_BIAS is an uncertainty parameter, sampled by a MC algorithm in the bounded interval
79 : defined by SIGMA_MIN and SIGMA_MAX. The initial value is set at SIGMA0. The MC move is a
80 : random displacement of maximum value equal to DSIGMA. If the number of data point is
81 : too large and the acceptance rate drops it is possible to make the MC move over mutually
82 : exclusive, random subset of size MC_CHUNKSIZE and run more than one move setting MC_STEPS
83 : in such a way that MC_CHUNKSIZE*MC_STEPS will cover all the data points.
84 :
85 : Calculated and experimental data can be compared modulo a scaling factor and/or an offset
86 : using SCALEDATA and/or ADDOFFSET, the sampling is obtained by a MC algorithm either using
87 : a flat or a gaussian prior setting it with SCALE_PRIOR or OFFSET_PRIOR.
88 :
89 : \par Examples
90 :
91 : In the following example we calculate a set of \ref RDC, take the replica-average of
92 : them and comparing them with a set of experimental values. RDCs are compared with
93 : the experimental data but for a multiplication factor SCALE that is also sampled by
94 : MC on-the-fly
95 :
96 : \plumedfile
97 : RDC ...
98 : LABEL=rdc
99 : SCALE=0.0001
100 : GYROM=-72.5388
101 : ATOMS1=22,23
102 : ATOMS2=25,27
103 : ATOMS3=29,31
104 : ATOMS4=33,34
105 : ... RDC
106 :
107 : METAINFERENCE ...
108 : ARG=rdc.*
109 : NOISETYPE=MGAUSS
110 : PARAMETERS=1.9190,2.9190,3.9190,4.9190
111 : SCALEDATA SCALE0=1 SCALE_MIN=0.1 SCALE_MAX=3 DSCALE=0.01
112 : SIGMA0=0.01 SIGMA_MIN=0.00001 SIGMA_MAX=3 DSIGMA=0.01
113 : SIGMA_MEAN0=0.001
114 : LABEL=spe
115 : ... METAINFERENCE
116 :
117 : PRINT ARG=spe.bias FILE=BIAS STRIDE=1
118 : \endplumedfile
119 :
120 : in the following example instead of using one uncertainty parameter per data point we use
121 : a single uncertainty value in a long-tailed gaussian to take into account for outliers, furthermore
122 : the data are weighted for the bias applied to other variables of the system.
123 :
124 : \plumedfile
125 : RDC ...
126 : LABEL=rdc
127 : SCALE=0.0001
128 : GYROM=-72.5388
129 : ATOMS1=22,23
130 : ATOMS2=25,27
131 : ATOMS3=29,31
132 : ATOMS4=33,34
133 : ... RDC
134 :
135 : cv1: TORSION ATOMS=1,2,3,4
136 : cv2: TORSION ATOMS=2,3,4,5
137 : mm: METAD ARG=cv1,cv2 HEIGHT=0.5 SIGMA=0.3,0.3 PACE=200 BIASFACTOR=8 WALKERS_MPI
138 :
139 : METAINFERENCE ...
140 : #SETTINGS NREPLICAS=2
141 : ARG=rdc.*,mm.bias
142 : REWEIGHT
143 : NOISETYPE=OUTLIERS
144 : PARAMETERS=1.9190,2.9190,3.9190,4.9190
145 : SCALEDATA SCALE0=1 SCALE_MIN=0.1 SCALE_MAX=3 DSCALE=0.01
146 : SIGMA0=0.01 SIGMA_MIN=0.00001 SIGMA_MAX=3 DSIGMA=0.01
147 : SIGMA_MEAN0=0.001
148 : LABEL=spe
149 : ... METAINFERENCE
150 : \endplumedfile
151 :
152 : (See also \ref RDC, \ref PBMETAD).
153 :
154 : */
155 : //+ENDPLUMEDOC
156 :
157 : class Metainference : public bias::Bias {
158 : // experimental values
159 : std::vector<double> parameters;
160 : // noise type
161 : unsigned noise_type_;
162 : enum { GAUSS, MGAUSS, OUTLIERS, MOUTLIERS, GENERIC };
163 : unsigned gen_likelihood_;
164 : enum { LIKE_GAUSS, LIKE_LOGN };
165 : // scale is data scaling factor
166 : // noise type
167 : unsigned scale_prior_;
168 : enum { SC_GAUSS, SC_FLAT };
169 : bool doscale_;
170 : double scale_;
171 : double scale_mu_;
172 : double scale_min_;
173 : double scale_max_;
174 : double Dscale_;
175 : // scale is data scaling factor
176 : // noise type
177 : unsigned offset_prior_;
178 : bool dooffset_;
179 : double offset_;
180 : double offset_mu_;
181 : double offset_min_;
182 : double offset_max_;
183 : double Doffset_;
184 : // scale and offset regression
185 : bool doregres_zero_;
186 : int nregres_zero_;
187 : // sigma is data uncertainty
188 : std::vector<double> sigma_;
189 : std::vector<double> sigma_min_;
190 : std::vector<double> sigma_max_;
191 : std::vector<double> Dsigma_;
192 : // sigma_mean is uncertainty in the mean estimate
193 : std::vector<double> sigma_mean2_;
194 : // this is the estimator of the mean value per replica for generic metainference
195 : std::vector<double> ftilde_;
196 : double Dftilde_;
197 :
198 : // temperature in kbt
199 : double kbt_;
200 :
201 : // Monte Carlo stuff
202 : std::vector<Random> random;
203 : unsigned MCsteps_;
204 : long long unsigned MCaccept_;
205 : long long unsigned MCacceptScale_;
206 : long long unsigned MCacceptFT_;
207 : long long unsigned MCtrial_;
208 : unsigned MCchunksize_;
209 :
210 : // output
211 : Value* valueScale;
212 : Value* valueOffset;
213 : Value* valueAccept;
214 : Value* valueAcceptScale;
215 : Value* valueAcceptFT;
216 : std::vector<Value*> valueSigma;
217 : std::vector<Value*> valueSigmaMean;
218 : std::vector<Value*> valueFtilde;
219 :
220 : // restart
221 : unsigned write_stride_;
222 : OFile sfile_;
223 :
224 : // others
225 : bool firstTime;
226 : std::vector<bool> firstTimeW;
227 : bool master;
228 : bool do_reweight_;
229 : unsigned do_optsigmamean_;
230 : unsigned nrep_;
231 : unsigned replica_;
232 : unsigned narg;
233 :
234 : // selector
235 : std::string selector_;
236 :
237 : // optimize sigma mean
238 : std::vector< std::vector < std::vector <double> > > sigma_mean2_last_;
239 : unsigned optsigmamean_stride_;
240 : // optimize sigma max
241 : unsigned N_optimized_step_;
242 : unsigned optimized_step_;
243 : bool sigmamax_opt_done_;
244 : std::vector<double> sigma_max_est_;
245 :
246 : // average weights
247 : unsigned average_weights_stride_;
248 : std::vector< std::vector <double> > average_weights_;
249 :
250 : double getEnergyMIGEN(const std::vector<double> &mean, const std::vector<double> &ftilde, const std::vector<double> &sigma,
251 : const double scale, const double offset);
252 : double getEnergySP(const std::vector<double> &mean, const std::vector<double> &sigma,
253 : const double scale, const double offset);
254 : double getEnergySPE(const std::vector<double> &mean, const std::vector<double> &sigma,
255 : const double scale, const double offset);
256 : double getEnergyGJ(const std::vector<double> &mean, const std::vector<double> &sigma,
257 : const double scale, const double offset);
258 : double getEnergyGJE(const std::vector<double> &mean, const std::vector<double> &sigma,
259 : const double scale, const double offset);
260 : void moveTilde(const std::vector<double> &mean_, double &old_energy);
261 : void moveScaleOffset(const std::vector<double> &mean_, double &old_energy);
262 : void moveSigmas(const std::vector<double> &mean_, double &old_energy, const unsigned i, const std::vector<unsigned> &indices, bool &breaknow);
263 : double doMonteCarlo(const std::vector<double> &mean);
264 : void getEnergyForceMIGEN(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
265 : void getEnergyForceSP(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
266 : void getEnergyForceSPE(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
267 : void getEnergyForceGJ(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
268 : void getEnergyForceGJE(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b);
269 : void get_weights(const unsigned iselect, double &weight, double &norm, double &neff);
270 : void replica_averaging(const double weight, const double norm, std::vector<double> &mean, std::vector<double> &dmean_b);
271 : void get_sigma_mean(const unsigned iselect, const double weight, const double norm, const double neff, const std::vector<double> &mean);
272 : void writeStatus();
273 : void do_regression_zero(const std::vector<double> &mean);
274 :
275 : public:
276 : explicit Metainference(const ActionOptions&);
277 : ~Metainference();
278 : void calculate() override;
279 : void update() override;
280 : static void registerKeywords(Keywords& keys);
281 : };
282 :
283 :
284 : PLUMED_REGISTER_ACTION(Metainference,"METAINFERENCE")
285 :
286 34 : void Metainference::registerKeywords(Keywords& keys) {
287 34 : Bias::registerKeywords(keys);
288 68 : keys.addInputKeyword("optional","PARARG","scalar","reference values for the experimental data, these can be provided as arguments without derivatives");
289 34 : keys.add("optional","PARAMETERS","reference values for the experimental data");
290 34 : keys.addFlag("NOENSEMBLE",false,"don't perform any replica-averaging");
291 34 : keys.addFlag("REWEIGHT",false,"simple REWEIGHT using the latest ARG as energy");
292 34 : keys.add("optional","AVERAGING", "Stride for calculation of averaged weights and sigma_mean");
293 34 : keys.add("compulsory","NOISETYPE","MGAUSS","functional form of the noise (GAUSS,MGAUSS,OUTLIERS,MOUTLIERS,GENERIC)");
294 34 : keys.add("compulsory","LIKELIHOOD","GAUSS","the likelihood for the GENERIC metainference model, GAUSS or LOGN");
295 34 : keys.add("compulsory","DFTILDE","0.1","fraction of sigma_mean used to evolve ftilde");
296 34 : keys.addFlag("SCALEDATA",false,"Set to TRUE if you want to sample a scaling factor common to all values and replicas");
297 34 : keys.add("compulsory","SCALE0","1.0","initial value of the scaling factor");
298 34 : keys.add("compulsory","SCALE_PRIOR","FLAT","either FLAT or GAUSSIAN");
299 34 : keys.add("optional","SCALE_MIN","minimum value of the scaling factor");
300 34 : keys.add("optional","SCALE_MAX","maximum value of the scaling factor");
301 34 : keys.add("optional","DSCALE","maximum MC move of the scaling factor");
302 34 : keys.addFlag("ADDOFFSET",false,"Set to TRUE if you want to sample an offset common to all values and replicas");
303 34 : keys.add("compulsory","OFFSET0","0.0","initial value of the offset");
304 34 : keys.add("compulsory","OFFSET_PRIOR","FLAT","either FLAT or GAUSSIAN");
305 34 : keys.add("optional","OFFSET_MIN","minimum value of the offset");
306 34 : keys.add("optional","OFFSET_MAX","maximum value of the offset");
307 34 : keys.add("optional","DOFFSET","maximum MC move of the offset");
308 34 : keys.add("optional","REGRES_ZERO","stride for regression with zero offset");
309 34 : keys.add("compulsory","SIGMA0","1.0","initial value of the uncertainty parameter");
310 34 : keys.add("compulsory","SIGMA_MIN","0.0","minimum value of the uncertainty parameter");
311 34 : keys.add("compulsory","SIGMA_MAX","10.","maximum value of the uncertainty parameter");
312 34 : keys.add("optional","DSIGMA","maximum MC move of the uncertainty parameter");
313 34 : keys.add("compulsory","OPTSIGMAMEAN","NONE","Set to NONE/SEM to manually set sigma mean, or to estimate it on the fly");
314 34 : keys.add("optional","SIGMA_MEAN0","starting value for the uncertainty in the mean estimate");
315 34 : keys.add("optional","SIGMA_MAX_STEPS", "Number of steps used to optimise SIGMA_MAX, before that the SIGMA_MAX value is used");
316 34 : keys.add("optional","TEMP","the system temperature - this is only needed if code doesn't pass the temperature to plumed");
317 34 : keys.add("optional","MC_STEPS","number of MC steps");
318 34 : keys.add("optional","MC_CHUNKSIZE","MC chunksize");
319 34 : keys.add("optional","STATUS_FILE","write a file with all the data useful for restart/continuation of Metainference");
320 34 : keys.add("optional","FMT","specify format for HILLS files (useful for decrease the number of digits in regtests)");
321 34 : keys.add("compulsory","WRITE_STRIDE","10000","write the status to a file every N steps, this can be used for restart/continuation");
322 34 : keys.add("optional","SELECTOR","name of selector");
323 34 : keys.add("optional","NSELECT","range of values for selector [0, N-1]");
324 34 : keys.use("RESTART");
325 68 : keys.addOutputComponent("sigma", "default", "scalar","uncertainty parameter");
326 68 : keys.addOutputComponent("sigmaMean", "default", "scalar","uncertainty in the mean estimate");
327 68 : keys.addOutputComponent("neff", "default", "scalar","effective number of replicas");
328 68 : keys.addOutputComponent("acceptSigma", "default", "scalar","MC acceptance for sigma values");
329 68 : keys.addOutputComponent("acceptScale", "SCALEDATA", "scalar","MC acceptance for scale value");
330 68 : keys.addOutputComponent("acceptFT", "GENERIC", "scalar","MC acceptance for general metainference f tilde value");
331 68 : keys.addOutputComponent("weight", "REWEIGHT", "scalar","weights of the weighted average");
332 68 : keys.addOutputComponent("biasDer", "REWEIGHT", "scalar","derivatives with respect to the bias");
333 68 : keys.addOutputComponent("scale", "SCALEDATA", "scalar","scale parameter");
334 68 : keys.addOutputComponent("offset", "ADDOFFSET", "scalar","offset parameter");
335 68 : keys.addOutputComponent("ftilde", "GENERIC", "scalar","ensemble average estimator");
336 34 : }
337 :
338 32 : Metainference::Metainference(const ActionOptions&ao):
339 : PLUMED_BIAS_INIT(ao),
340 32 : doscale_(false),
341 32 : scale_(1.),
342 32 : scale_mu_(0),
343 32 : scale_min_(1),
344 32 : scale_max_(-1),
345 32 : Dscale_(-1),
346 32 : dooffset_(false),
347 32 : offset_(0.),
348 32 : offset_mu_(0),
349 32 : offset_min_(1),
350 32 : offset_max_(-1),
351 32 : Doffset_(-1),
352 32 : doregres_zero_(false),
353 32 : nregres_zero_(0),
354 32 : Dftilde_(0.1),
355 32 : random(3),
356 32 : MCsteps_(1),
357 32 : MCaccept_(0),
358 32 : MCacceptScale_(0),
359 32 : MCacceptFT_(0),
360 32 : MCtrial_(0),
361 32 : MCchunksize_(0),
362 32 : write_stride_(0),
363 32 : firstTime(true),
364 32 : do_reweight_(false),
365 32 : do_optsigmamean_(0),
366 32 : optsigmamean_stride_(0),
367 32 : N_optimized_step_(0),
368 32 : optimized_step_(0),
369 32 : sigmamax_opt_done_(false),
370 32 : average_weights_stride_(1) {
371 32 : bool noensemble = false;
372 32 : parseFlag("NOENSEMBLE", noensemble);
373 :
374 : // set up replica stuff
375 32 : master = (comm.Get_rank()==0);
376 32 : if(master) {
377 24 : nrep_ = multi_sim_comm.Get_size();
378 24 : replica_ = multi_sim_comm.Get_rank();
379 24 : if(noensemble) {
380 0 : nrep_ = 1;
381 : }
382 : } else {
383 8 : nrep_ = 0;
384 8 : replica_ = 0;
385 : }
386 32 : comm.Sum(&nrep_,1);
387 32 : comm.Sum(&replica_,1);
388 :
389 32 : unsigned nsel = 1;
390 32 : parse("SELECTOR", selector_);
391 64 : parse("NSELECT", nsel);
392 : // do checks
393 32 : if(selector_.length()>0 && nsel<=1) {
394 0 : error("With SELECTOR active, NSELECT must be greater than 1");
395 : }
396 32 : if(selector_.length()==0 && nsel>1) {
397 0 : error("With NSELECT greater than 1, you must specify SELECTOR");
398 : }
399 :
400 : // initialise firstTimeW
401 32 : firstTimeW.resize(nsel, true);
402 :
403 : // reweight implies a different number of arguments (the latest one must always be the bias)
404 32 : parseFlag("REWEIGHT", do_reweight_);
405 32 : if(do_reweight_&&nrep_<2) {
406 0 : error("REWEIGHT can only be used in parallel with 2 or more replicas");
407 : }
408 32 : if(!getRestart()) {
409 56 : average_weights_.resize(nsel, std::vector<double> (nrep_, 1./static_cast<double>(nrep_)));
410 : } else {
411 8 : average_weights_.resize(nsel, std::vector<double> (nrep_, 0.));
412 : }
413 32 : narg = getNumberOfArguments();
414 32 : if(do_reweight_) {
415 16 : narg--;
416 : }
417 :
418 32 : unsigned averaging=0;
419 32 : parse("AVERAGING", averaging);
420 32 : if(averaging>0) {
421 1 : average_weights_stride_ = averaging;
422 1 : optsigmamean_stride_ = averaging;
423 : }
424 :
425 64 : parseVector("PARAMETERS",parameters);
426 32 : if(parameters.size()!=static_cast<unsigned>(narg)&&!parameters.empty()) {
427 0 : error("Size of PARAMETERS array should be either 0 or the same as of the number of arguments in ARG1");
428 : }
429 :
430 : std::vector<Value*> arg2;
431 64 : parseArgumentList("PARARG",arg2);
432 32 : if(!arg2.empty()) {
433 4 : if(parameters.size()>0) {
434 0 : error("It is not possible to use PARARG and PARAMETERS together");
435 : }
436 4 : if(arg2.size()!=narg) {
437 0 : error("Size of PARARG array should be the same as number for arguments in ARG");
438 : }
439 2360 : for(unsigned i=0; i<arg2.size(); i++) {
440 2356 : parameters.push_back(arg2[i]->get());
441 2356 : if(arg2[i]->hasDerivatives()==true) {
442 0 : error("PARARG can only accept arguments without derivatives");
443 : }
444 : }
445 : }
446 :
447 32 : if(parameters.size()!=narg) {
448 0 : error("PARARG or PARAMETERS arrays should include the same number of elements as the arguments in ARG");
449 : }
450 :
451 : std::string stringa_noise;
452 64 : parse("NOISETYPE",stringa_noise);
453 32 : if(stringa_noise=="GAUSS") {
454 3 : noise_type_ = GAUSS;
455 29 : } else if(stringa_noise=="MGAUSS") {
456 8 : noise_type_ = MGAUSS;
457 21 : } else if(stringa_noise=="OUTLIERS") {
458 14 : noise_type_ = OUTLIERS;
459 7 : } else if(stringa_noise=="MOUTLIERS") {
460 4 : noise_type_ = MOUTLIERS;
461 3 : } else if(stringa_noise=="GENERIC") {
462 3 : noise_type_ = GENERIC;
463 : } else {
464 0 : error("Unknown noise type!");
465 : }
466 :
467 32 : if(noise_type_== GENERIC) {
468 : std::string stringa_like;
469 6 : parse("LIKELIHOOD",stringa_like);
470 3 : if(stringa_like=="GAUSS") {
471 2 : gen_likelihood_ = LIKE_GAUSS;
472 1 : } else if(stringa_like=="LOGN") {
473 1 : gen_likelihood_ = LIKE_LOGN;
474 : } else {
475 0 : error("Unknown likelihood type!");
476 : }
477 :
478 6 : parse("DFTILDE",Dftilde_);
479 : }
480 :
481 64 : parse("WRITE_STRIDE",write_stride_);
482 : std::string status_file_name_;
483 64 : parse("STATUS_FILE",status_file_name_);
484 32 : if(status_file_name_=="") {
485 64 : status_file_name_ = "MISTATUS"+getLabel();
486 : } else {
487 0 : status_file_name_ = status_file_name_+getLabel();
488 : }
489 :
490 : std::string stringa_optsigma;
491 64 : parse("OPTSIGMAMEAN", stringa_optsigma);
492 32 : if(stringa_optsigma=="NONE") {
493 30 : do_optsigmamean_=0;
494 2 : } else if(stringa_optsigma=="SEM") {
495 0 : do_optsigmamean_=1;
496 2 : } else if(stringa_optsigma=="SEM_MAX") {
497 0 : do_optsigmamean_=2;
498 : }
499 :
500 32 : unsigned aver_max_steps=0;
501 32 : parse("SIGMA_MAX_STEPS", aver_max_steps);
502 32 : if(aver_max_steps==0&&do_optsigmamean_==2) {
503 0 : aver_max_steps=averaging*2000;
504 : }
505 32 : if(aver_max_steps>0&&do_optsigmamean_<2) {
506 0 : error("SIGMA_MAX_STEPS can only be used together with OPTSIGMAMEAN=SEM_MAX");
507 : }
508 32 : if(aver_max_steps>0&&do_optsigmamean_==2) {
509 0 : N_optimized_step_=aver_max_steps;
510 : }
511 32 : if(aver_max_steps>0&&aver_max_steps<averaging) {
512 0 : error("SIGMA_MAX_STEPS must be greater than AVERAGING");
513 : }
514 :
515 : // resize std::vector for sigma_mean history
516 32 : sigma_mean2_last_.resize(nsel);
517 64 : for(unsigned i=0; i<nsel; i++) {
518 32 : sigma_mean2_last_[i].resize(narg);
519 : }
520 :
521 : std::vector<double> read_sigma_mean_;
522 32 : parseVector("SIGMA_MEAN0",read_sigma_mean_);
523 32 : if(do_optsigmamean_==0 && read_sigma_mean_.size()==0 && !getRestart()) {
524 0 : error("If you don't use OPTSIGMAMEAN and you are not RESTARTING then you MUST SET SIGMA_MEAN0");
525 : }
526 :
527 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
528 15 : if(read_sigma_mean_.size()==narg) {
529 0 : sigma_mean2_.resize(narg);
530 0 : for(unsigned i=0; i<narg; i++) {
531 0 : sigma_mean2_[i]=read_sigma_mean_[i]*read_sigma_mean_[i];
532 : }
533 15 : } else if(read_sigma_mean_.size()==1) {
534 15 : sigma_mean2_.resize(narg,read_sigma_mean_[0]*read_sigma_mean_[0]);
535 0 : } else if(read_sigma_mean_.size()==0) {
536 0 : sigma_mean2_.resize(narg,0.000001);
537 : } else {
538 0 : error("SIGMA_MEAN0 can accept either one single value or as many values as the arguments (with NOISETYPE=MGAUSS|MOUTLIERS)");
539 : }
540 : // set the initial value for the history
541 30 : for(unsigned i=0; i<nsel; i++)
542 2409 : for(unsigned j=0; j<narg; j++) {
543 2394 : sigma_mean2_last_[i][j].push_back(sigma_mean2_[j]);
544 : }
545 : } else {
546 17 : if(read_sigma_mean_.size()==1) {
547 17 : sigma_mean2_.resize(1, read_sigma_mean_[0]*read_sigma_mean_[0]);
548 0 : } else if(read_sigma_mean_.size()==0) {
549 0 : sigma_mean2_.resize(1, 0.000001);
550 : } else {
551 0 : error("If you want to use more than one SIGMA_MEAN0 you should use NOISETYPE=MGAUSS|MOUTLIERS");
552 : }
553 : // set the initial value for the history
554 34 : for(unsigned i=0; i<nsel; i++)
555 109 : for(unsigned j=0; j<narg; j++) {
556 92 : sigma_mean2_last_[i][j].push_back(sigma_mean2_[0]);
557 : }
558 : }
559 :
560 32 : parseFlag("SCALEDATA", doscale_);
561 32 : if(doscale_) {
562 : std::string stringa_noise;
563 24 : parse("SCALE_PRIOR",stringa_noise);
564 12 : if(stringa_noise=="GAUSSIAN") {
565 0 : scale_prior_ = SC_GAUSS;
566 12 : } else if(stringa_noise=="FLAT") {
567 12 : scale_prior_ = SC_FLAT;
568 : } else {
569 0 : error("Unknown SCALE_PRIOR type!");
570 : }
571 12 : parse("SCALE0",scale_);
572 12 : parse("DSCALE",Dscale_);
573 12 : if(Dscale_<0.) {
574 0 : error("DSCALE must be set when using SCALEDATA");
575 : }
576 12 : if(scale_prior_==SC_GAUSS) {
577 0 : scale_mu_=scale_;
578 : } else {
579 12 : parse("SCALE_MIN",scale_min_);
580 12 : parse("SCALE_MAX",scale_max_);
581 12 : if(scale_max_<scale_min_) {
582 0 : error("SCALE_MAX and SCALE_MIN must be set when using SCALE_PRIOR=FLAT");
583 : }
584 : }
585 : }
586 :
587 32 : parseFlag("ADDOFFSET", dooffset_);
588 32 : if(dooffset_) {
589 : std::string stringa_noise;
590 12 : parse("OFFSET_PRIOR",stringa_noise);
591 6 : if(stringa_noise=="GAUSSIAN") {
592 0 : offset_prior_ = SC_GAUSS;
593 6 : } else if(stringa_noise=="FLAT") {
594 6 : offset_prior_ = SC_FLAT;
595 : } else {
596 0 : error("Unknown OFFSET_PRIOR type!");
597 : }
598 6 : parse("OFFSET0",offset_);
599 6 : parse("DOFFSET",Doffset_);
600 6 : if(offset_prior_==SC_GAUSS) {
601 0 : offset_mu_=offset_;
602 0 : if(Doffset_<0.) {
603 0 : error("DOFFSET must be set when using OFFSET_PRIOR=GAUSS");
604 : }
605 : } else {
606 6 : parse("OFFSET_MIN",offset_min_);
607 6 : parse("OFFSET_MAX",offset_max_);
608 6 : if(Doffset_<0) {
609 0 : Doffset_ = 0.05*(offset_max_ - offset_min_);
610 : }
611 6 : if(offset_max_<offset_min_) {
612 0 : error("OFFSET_MAX and OFFSET_MIN must be set when using OFFSET_PRIOR=FLAT");
613 : }
614 : }
615 : }
616 :
617 : // regression with zero intercept
618 32 : parse("REGRES_ZERO", nregres_zero_);
619 32 : if(nregres_zero_>0) {
620 : // set flag
621 1 : doregres_zero_=true;
622 : // check if already sampling scale and offset
623 1 : if(doscale_) {
624 0 : error("REGRES_ZERO and SCALEDATA are mutually exclusive");
625 : }
626 1 : if(dooffset_) {
627 0 : error("REGRES_ZERO and ADDOFFSET are mutually exclusive");
628 : }
629 : }
630 :
631 : std::vector<double> readsigma;
632 32 : parseVector("SIGMA0",readsigma);
633 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma.size()>1) {
634 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
635 : }
636 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
637 15 : sigma_.resize(readsigma.size());
638 15 : sigma_=readsigma;
639 : } else {
640 17 : sigma_.resize(1, readsigma[0]);
641 : }
642 :
643 : std::vector<double> readsigma_min;
644 32 : parseVector("SIGMA_MIN",readsigma_min);
645 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_min.size()>1) {
646 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
647 : }
648 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
649 15 : sigma_min_.resize(readsigma_min.size());
650 15 : sigma_min_=readsigma_min;
651 : } else {
652 17 : sigma_min_.resize(1, readsigma_min[0]);
653 : }
654 :
655 : std::vector<double> readsigma_max;
656 32 : parseVector("SIGMA_MAX",readsigma_max);
657 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_max.size()>1) {
658 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
659 : }
660 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
661 15 : sigma_max_.resize(readsigma_max.size());
662 15 : sigma_max_=readsigma_max;
663 : } else {
664 17 : sigma_max_.resize(1, readsigma_max[0]);
665 : }
666 :
667 32 : if(sigma_max_.size()!=sigma_min_.size()) {
668 0 : error("The number of values for SIGMA_MIN and SIGMA_MAX must be the same");
669 : }
670 :
671 : std::vector<double> read_dsigma;
672 32 : parseVector("DSIGMA",read_dsigma);
673 32 : if((noise_type_!=MGAUSS&&noise_type_!=MOUTLIERS&&noise_type_!=GENERIC)&&readsigma_max.size()>1) {
674 0 : error("If you want to use more than one SIGMA you should use NOISETYPE=MGAUSS|MOUTLIERS|GENERIC");
675 : }
676 32 : if(read_dsigma.size()>0) {
677 32 : Dsigma_.resize(read_dsigma.size());
678 32 : Dsigma_=read_dsigma;
679 : } else {
680 0 : Dsigma_.resize(sigma_max_.size(), -1.);
681 : /* in this case Dsigma is initialised after reading the restart file if present */
682 : }
683 :
684 : std::string fmt_;
685 32 : parse("FMT",fmt_);
686 :
687 : // monte carlo stuff
688 32 : parse("MC_STEPS",MCsteps_);
689 32 : parse("MC_CHUNKSIZE", MCchunksize_);
690 : // get temperature
691 32 : kbt_ = getkBT();
692 32 : if(kbt_==0.0) {
693 0 : error("Unless the MD engine passes the temperature to plumed, you must specify it using TEMP");
694 : }
695 :
696 32 : checkRead();
697 :
698 : // set sigma_bias
699 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
700 15 : if(sigma_.size()==1) {
701 15 : double tmp = sigma_[0];
702 15 : sigma_.resize(narg, tmp);
703 0 : } else if(sigma_.size()>1&&sigma_.size()!=narg) {
704 0 : error("SIGMA0 can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
705 : }
706 15 : if(sigma_min_.size()==1) {
707 15 : double tmp = sigma_min_[0];
708 15 : sigma_min_.resize(narg, tmp);
709 0 : } else if(sigma_min_.size()>1&&sigma_min_.size()!=narg) {
710 0 : error("SIGMA_MIN can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
711 : }
712 15 : if(sigma_max_.size()==1) {
713 15 : double tmp = sigma_max_[0];
714 15 : sigma_max_.resize(narg, tmp);
715 0 : } else if(sigma_max_.size()>1&&sigma_max_.size()!=narg) {
716 0 : error("SIGMA_MAX can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
717 : }
718 15 : if(Dsigma_.size()==1) {
719 15 : double tmp = Dsigma_[0];
720 15 : Dsigma_.resize(narg, tmp);
721 0 : } else if(Dsigma_.size()>1&&Dsigma_.size()!=narg) {
722 0 : error("DSIGMA can accept either one single value or as many values as the number of arguments (with NOISETYPE=MGAUSS|MOUTLIERS|GENERIC)");
723 : }
724 : }
725 :
726 32 : sigma_max_est_.resize(sigma_max_.size(), 0.);
727 :
728 32 : IFile restart_sfile;
729 32 : restart_sfile.link(*this);
730 32 : if(getRestart()&&restart_sfile.FileExist(status_file_name_)) {
731 4 : firstTime = false;
732 8 : for(unsigned i=0; i<nsel; i++) {
733 : firstTimeW[i] = false;
734 : }
735 4 : restart_sfile.open(status_file_name_);
736 4 : log.printf(" Restarting from %s\n", status_file_name_.c_str());
737 : double dummy;
738 8 : if(restart_sfile.scanField("time",dummy)) {
739 : // check for syncronisation
740 4 : std::vector<double> dummy_time(nrep_,0);
741 4 : if(master&&nrep_>1) {
742 2 : dummy_time[replica_] = dummy;
743 2 : multi_sim_comm.Sum(dummy_time);
744 : }
745 4 : comm.Sum(dummy_time);
746 8 : for(unsigned i=1; i<nrep_; i++) {
747 4 : std::string msg = "METAINFERENCE restart files " + status_file_name_ + " are not in sync";
748 4 : if(dummy_time[i]!=dummy_time[0]) {
749 0 : plumed_merror(msg);
750 : }
751 : }
752 : // nsel
753 8 : for(unsigned i=0; i<sigma_mean2_last_.size(); i++) {
754 : std::string msg_i;
755 4 : Tools::convert(i,msg_i);
756 : // narg
757 4 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
758 20 : for(unsigned j=0; j<narg; ++j) {
759 : std::string msg_j;
760 16 : Tools::convert(j,msg_j);
761 16 : std::string msg = msg_i+"_"+msg_j;
762 : double read_sm;
763 16 : restart_sfile.scanField("sigmaMean_"+msg,read_sm);
764 16 : sigma_mean2_last_[i][j][0] = read_sm*read_sm;
765 : }
766 : }
767 4 : if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
768 : double read_sm;
769 : std::string msg_j;
770 0 : Tools::convert(0,msg_j);
771 0 : std::string msg = msg_i+"_"+msg_j;
772 0 : restart_sfile.scanField("sigmaMean_"+msg,read_sm);
773 0 : for(unsigned j=0; j<narg; j++) {
774 0 : sigma_mean2_last_[i][j][0] = read_sm*read_sm;
775 : }
776 : }
777 : }
778 :
779 20 : for(unsigned i=0; i<sigma_.size(); ++i) {
780 : std::string msg;
781 16 : Tools::convert(i,msg);
782 32 : restart_sfile.scanField("sigma_"+msg,sigma_[i]);
783 : }
784 20 : for(unsigned i=0; i<sigma_max_.size(); ++i) {
785 : std::string msg;
786 16 : Tools::convert(i,msg);
787 16 : restart_sfile.scanField("sigma_max_"+msg,sigma_max_[i]);
788 16 : sigmamax_opt_done_=true;
789 : }
790 4 : if(noise_type_==GENERIC) {
791 0 : for(unsigned i=0; i<ftilde_.size(); ++i) {
792 : std::string msg;
793 0 : Tools::convert(i,msg);
794 0 : restart_sfile.scanField("ftilde_"+msg,ftilde_[i]);
795 : }
796 : }
797 4 : restart_sfile.scanField("scale0_",scale_);
798 4 : restart_sfile.scanField("offset0_",offset_);
799 :
800 8 : for(unsigned i=0; i<nsel; i++) {
801 : std::string msg;
802 4 : Tools::convert(i,msg);
803 : double tmp_w;
804 4 : restart_sfile.scanField("weight_"+msg,tmp_w);
805 4 : if(master) {
806 2 : average_weights_[i][replica_] = tmp_w;
807 2 : if(nrep_>1) {
808 2 : multi_sim_comm.Sum(&average_weights_[i][0], nrep_);
809 : }
810 : }
811 4 : comm.Sum(&average_weights_[i][0], nrep_);
812 : }
813 :
814 : }
815 4 : restart_sfile.scanField();
816 4 : restart_sfile.close();
817 : }
818 :
819 : /* If DSIGMA is not yet initialised do it now */
820 2443 : for(unsigned i=0; i<sigma_max_.size(); i++)
821 2411 : if(Dsigma_[i]==-1) {
822 0 : Dsigma_[i] = 0.05*(sigma_max_[i] - sigma_min_[i]);
823 : }
824 :
825 32 : switch(noise_type_) {
826 3 : case GENERIC:
827 3 : log.printf(" with general metainference ");
828 3 : if(gen_likelihood_==LIKE_GAUSS) {
829 2 : log.printf(" and a gaussian likelihood\n");
830 1 : } else if(gen_likelihood_==LIKE_LOGN) {
831 1 : log.printf(" and a log-normal likelihood\n");
832 : }
833 3 : log.printf(" ensemble average parameter sampled with a step %lf of sigma_mean\n", Dftilde_);
834 : break;
835 3 : case GAUSS:
836 3 : log.printf(" with gaussian noise and a single noise parameter for all the data\n");
837 : break;
838 8 : case MGAUSS:
839 8 : log.printf(" with gaussian noise and a noise parameter for each data point\n");
840 : break;
841 14 : case OUTLIERS:
842 14 : log.printf(" with long tailed gaussian noise and a single noise parameter for all the data\n");
843 : break;
844 4 : case MOUTLIERS:
845 4 : log.printf(" with long tailed gaussian noise and a noise parameter for each data point\n");
846 : break;
847 : }
848 :
849 32 : if(doscale_) {
850 : // check that the scale value is the same for all replicas
851 12 : std::vector<double> dummy_scale(nrep_,0);
852 12 : if(master&&nrep_>1) {
853 6 : dummy_scale[replica_] = scale_;
854 6 : multi_sim_comm.Sum(dummy_scale);
855 : }
856 12 : comm.Sum(dummy_scale);
857 24 : for(unsigned i=1; i<nrep_; i++) {
858 12 : std::string msg = "The SCALE value must be the same for all replicas: check your input or restart file";
859 12 : if(dummy_scale[i]!=dummy_scale[0]) {
860 0 : plumed_merror(msg);
861 : }
862 : }
863 12 : log.printf(" sampling a common scaling factor with:\n");
864 12 : log.printf(" initial scale parameter %f\n",scale_);
865 12 : if(scale_prior_==SC_GAUSS) {
866 0 : log.printf(" gaussian prior with mean %f and width %f\n",scale_mu_,Dscale_);
867 : }
868 12 : if(scale_prior_==SC_FLAT) {
869 12 : log.printf(" flat prior between %f - %f\n",scale_min_,scale_max_);
870 12 : log.printf(" maximum MC move of scale parameter %f\n",Dscale_);
871 : }
872 : }
873 :
874 32 : if(dooffset_) {
875 : // check that the offset value is the same for all replicas
876 6 : std::vector<double> dummy_offset(nrep_,0);
877 6 : if(master&&nrep_>1) {
878 0 : dummy_offset[replica_] = offset_;
879 0 : multi_sim_comm.Sum(dummy_offset);
880 : }
881 6 : comm.Sum(dummy_offset);
882 6 : for(unsigned i=1; i<nrep_; i++) {
883 0 : std::string msg = "The OFFSET value must be the same for all replicas: check your input or restart file";
884 0 : if(dummy_offset[i]!=dummy_offset[0]) {
885 0 : plumed_merror(msg);
886 : }
887 : }
888 6 : log.printf(" sampling a common offset with:\n");
889 6 : log.printf(" initial offset parameter %f\n",offset_);
890 6 : if(offset_prior_==SC_GAUSS) {
891 0 : log.printf(" gaussian prior with mean %f and width %f\n",offset_mu_,Doffset_);
892 : }
893 6 : if(offset_prior_==SC_FLAT) {
894 6 : log.printf(" flat prior between %f - %f\n",offset_min_,offset_max_);
895 6 : log.printf(" maximum MC move of offset parameter %f\n",Doffset_);
896 : }
897 : }
898 :
899 32 : if(doregres_zero_) {
900 1 : log.printf(" doing regression with zero intercept with stride: %d\n", nregres_zero_);
901 : }
902 :
903 32 : log.printf(" number of experimental data points %u\n",narg);
904 32 : log.printf(" number of replicas %u\n",nrep_);
905 32 : log.printf(" initial data uncertainties");
906 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
907 2411 : log.printf(" %f", sigma_[i]);
908 : }
909 32 : log.printf("\n");
910 32 : log.printf(" minimum data uncertainties");
911 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
912 2411 : log.printf(" %f",sigma_min_[i]);
913 : }
914 32 : log.printf("\n");
915 32 : log.printf(" maximum data uncertainties");
916 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
917 2411 : log.printf(" %f",sigma_max_[i]);
918 : }
919 32 : log.printf("\n");
920 32 : log.printf(" maximum MC move of data uncertainties");
921 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
922 2411 : log.printf(" %f",Dsigma_[i]);
923 : }
924 32 : log.printf("\n");
925 32 : log.printf(" temperature of the system %f\n",kbt_);
926 32 : log.printf(" MC steps %u\n",MCsteps_);
927 32 : log.printf(" initial standard errors of the mean");
928 2443 : for(unsigned i=0; i<sigma_mean2_.size(); ++i) {
929 2411 : log.printf(" %f", std::sqrt(sigma_mean2_[i]));
930 : }
931 32 : log.printf("\n");
932 :
933 32 : if(do_reweight_) {
934 32 : addComponent("biasDer");
935 32 : componentIsNotPeriodic("biasDer");
936 32 : addComponent("weight");
937 32 : componentIsNotPeriodic("weight");
938 : }
939 :
940 64 : addComponent("neff");
941 32 : componentIsNotPeriodic("neff");
942 :
943 32 : if(doscale_ || doregres_zero_) {
944 26 : addComponent("scale");
945 13 : componentIsNotPeriodic("scale");
946 13 : valueScale=getPntrToComponent("scale");
947 : }
948 :
949 32 : if(dooffset_) {
950 12 : addComponent("offset");
951 6 : componentIsNotPeriodic("offset");
952 6 : valueOffset=getPntrToComponent("offset");
953 : }
954 :
955 32 : if(dooffset_||doscale_) {
956 36 : addComponent("acceptScale");
957 18 : componentIsNotPeriodic("acceptScale");
958 18 : valueAcceptScale=getPntrToComponent("acceptScale");
959 : }
960 :
961 32 : if(noise_type_==GENERIC) {
962 6 : addComponent("acceptFT");
963 3 : componentIsNotPeriodic("acceptFT");
964 3 : valueAcceptFT=getPntrToComponent("acceptFT");
965 : }
966 :
967 64 : addComponent("acceptSigma");
968 32 : componentIsNotPeriodic("acceptSigma");
969 32 : valueAccept=getPntrToComponent("acceptSigma");
970 :
971 32 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
972 2409 : for(unsigned i=0; i<sigma_mean2_.size(); ++i) {
973 : std::string num;
974 2394 : Tools::convert(i,num);
975 4788 : addComponent("sigmaMean-"+num);
976 2394 : componentIsNotPeriodic("sigmaMean-"+num);
977 2394 : valueSigmaMean.push_back(getPntrToComponent("sigmaMean-"+num));
978 4788 : getPntrToComponent("sigmaMean-"+num)->set(std::sqrt(sigma_mean2_[i]));
979 4788 : addComponent("sigma-"+num);
980 2394 : componentIsNotPeriodic("sigma-"+num);
981 2394 : valueSigma.push_back(getPntrToComponent("sigma-"+num));
982 2394 : getPntrToComponent("sigma-"+num)->set(sigma_[i]);
983 2394 : if(noise_type_==GENERIC) {
984 12 : addComponent("ftilde-"+num);
985 6 : componentIsNotPeriodic("ftilde-"+num);
986 6 : valueFtilde.push_back(getPntrToComponent("ftilde-"+num));
987 : }
988 : }
989 15 : } else {
990 34 : addComponent("sigmaMean");
991 17 : componentIsNotPeriodic("sigmaMean");
992 17 : valueSigmaMean.push_back(getPntrToComponent("sigmaMean"));
993 34 : getPntrToComponent("sigmaMean")->set(std::sqrt(sigma_mean2_[0]));
994 34 : addComponent("sigma");
995 17 : componentIsNotPeriodic("sigma");
996 17 : valueSigma.push_back(getPntrToComponent("sigma"));
997 34 : getPntrToComponent("sigma")->set(sigma_[0]);
998 : }
999 :
1000 : // initialize random seed
1001 : unsigned iseed;
1002 32 : if(master) {
1003 24 : auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
1004 24 : iseed = static_cast<unsigned>(ts)+replica_;
1005 : } else {
1006 8 : iseed = 0;
1007 : }
1008 32 : comm.Sum(&iseed, 1);
1009 : // this is used for ftilde and sigma both the move and the acceptance
1010 : // this is different for each replica
1011 32 : random[0].setSeed(-iseed);
1012 32 : if(doscale_||dooffset_) {
1013 : // in this case we want the same seed everywhere
1014 18 : auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
1015 18 : iseed = static_cast<unsigned>(ts);
1016 18 : if(master&&nrep_>1) {
1017 6 : multi_sim_comm.Bcast(iseed,0);
1018 : }
1019 18 : comm.Bcast(iseed,0);
1020 : // this is used for scale and offset sampling and acceptance
1021 18 : random[1].setSeed(-iseed);
1022 : }
1023 : // this is used for random chunk of sigmas, and it is different for each replica
1024 32 : if(master) {
1025 24 : auto ts = std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now()).time_since_epoch().count();
1026 24 : iseed = static_cast<unsigned>(ts)+replica_;
1027 : } else {
1028 8 : iseed = 0;
1029 : }
1030 32 : comm.Sum(&iseed, 1);
1031 32 : random[2].setSeed(-iseed);
1032 :
1033 : // outfile stuff
1034 32 : if(write_stride_>0) {
1035 32 : sfile_.link(*this);
1036 32 : sfile_.open(status_file_name_);
1037 32 : if(fmt_.length()>0) {
1038 1 : sfile_.fmtField(fmt_);
1039 : }
1040 : }
1041 :
1042 64 : log<<" Bibliography "<<plumed.cite("Bonomi, Camilloni, Cavalli, Vendruscolo, Sci. Adv. 2, e150117 (2016)");
1043 32 : if(do_reweight_) {
1044 32 : log<<plumed.cite("Bonomi, Camilloni, Vendruscolo, Sci. Rep. 6, 31232 (2016)");
1045 : }
1046 32 : if(do_optsigmamean_>0) {
1047 0 : log<<plumed.cite("Loehr, Jussupow, Camilloni, J. Chem. Phys. 146, 165102 (2017)");
1048 : }
1049 64 : log<<plumed.cite("Bonomi, Camilloni, Bioinformatics, 33, 3999 (2017)");
1050 32 : log<<"\n";
1051 64 : }
1052 :
1053 64 : Metainference::~Metainference() {
1054 32 : if(sfile_.isOpen()) {
1055 32 : sfile_.close();
1056 : }
1057 192 : }
1058 :
1059 264 : double Metainference::getEnergySP(const std::vector<double> &mean, const std::vector<double> &sigma,
1060 : const double scale, const double offset) {
1061 264 : const double scale2 = scale*scale;
1062 264 : const double sm2 = sigma_mean2_[0];
1063 264 : const double ss2 = sigma[0]*sigma[0] + scale2*sm2;
1064 264 : const double sss = sigma[0]*sigma[0] + sm2;
1065 :
1066 : double ene = 0.0;
1067 264 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1068 : {
1069 : #pragma omp for reduction( + : ene)
1070 : for(unsigned i=0; i<narg; ++i) {
1071 : const double dev = scale*mean[i]-parameters[i]+offset;
1072 : const double a2 = 0.5*dev*dev + ss2;
1073 : if(sm2 > 0.0) {
1074 : ene += std::log(2.0*a2/(1.0-std::exp(-a2/sm2)));
1075 : } else {
1076 : ene += std::log(2.0*a2);
1077 : }
1078 : }
1079 : }
1080 : // add one single Jeffrey's prior and one normalisation per data point
1081 264 : ene += 0.5*std::log(sss) + static_cast<double>(narg)*0.5*std::log(0.5*M_PI*M_PI/ss2);
1082 264 : if(doscale_ || doregres_zero_) {
1083 156 : ene += 0.5*std::log(sss);
1084 : }
1085 264 : if(dooffset_) {
1086 0 : ene += 0.5*std::log(sss);
1087 : }
1088 264 : return kbt_ * ene;
1089 : }
1090 :
1091 144 : double Metainference::getEnergySPE(const std::vector<double> &mean, const std::vector<double> &sigma,
1092 : const double scale, const double offset) {
1093 144 : const double scale2 = scale*scale;
1094 : double ene = 0.0;
1095 144 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1096 : {
1097 : #pragma omp for reduction( + : ene)
1098 : for(unsigned i=0; i<narg; ++i) {
1099 : const double sm2 = sigma_mean2_[i];
1100 : const double ss2 = sigma[i]*sigma[i] + scale2*sm2;
1101 : const double sss = sigma[i]*sigma[i] + sm2;
1102 : const double dev = scale*mean[i]-parameters[i]+offset;
1103 : const double a2 = 0.5*dev*dev + ss2;
1104 : if(sm2 > 0.0) {
1105 : ene += 0.5*std::log(sss) + 0.5*std::log(0.5*M_PI*M_PI/ss2) + std::log(2.0*a2/(1.0-std::exp(-a2/sm2)));
1106 : } else {
1107 : ene += 0.5*std::log(sss) + 0.5*std::log(0.5*M_PI*M_PI/ss2) + std::log(2.0*a2);
1108 : }
1109 : if(doscale_ || doregres_zero_) {
1110 : ene += 0.5*std::log(sss);
1111 : }
1112 : if(dooffset_) {
1113 : ene += 0.5*std::log(sss);
1114 : }
1115 : }
1116 : }
1117 144 : return kbt_ * ene;
1118 : }
1119 :
1120 144 : double Metainference::getEnergyMIGEN(const std::vector<double> &mean, const std::vector<double> &ftilde, const std::vector<double> &sigma,
1121 : const double scale, const double offset) {
1122 : double ene = 0.0;
1123 144 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1124 : {
1125 : #pragma omp for reduction( + : ene)
1126 : for(unsigned i=0; i<narg; ++i) {
1127 : const double inv_sb2 = 1./(sigma[i]*sigma[i]);
1128 : const double inv_sm2 = 1./sigma_mean2_[i];
1129 : double devb = 0;
1130 : if(gen_likelihood_==LIKE_GAUSS) {
1131 : devb = scale*ftilde[i]-parameters[i]+offset;
1132 : } else if(gen_likelihood_==LIKE_LOGN) {
1133 : devb = std::log(scale*ftilde[i]/parameters[i]);
1134 : }
1135 : double devm = mean[i] - ftilde[i];
1136 : // deviation + normalisation + jeffrey
1137 : double normb = 0.;
1138 : if(gen_likelihood_==LIKE_GAUSS) {
1139 : normb = -0.5*std::log(0.5/M_PI*inv_sb2);
1140 : } else if(gen_likelihood_==LIKE_LOGN) {
1141 : normb = -0.5*std::log(0.5/M_PI*inv_sb2/(parameters[i]*parameters[i]));
1142 : }
1143 : const double normm = -0.5*std::log(0.5/M_PI*inv_sm2);
1144 : const double jeffreys = -0.5*std::log(2.*inv_sb2);
1145 : ene += 0.5*devb*devb*inv_sb2 + 0.5*devm*devm*inv_sm2 + normb + normm + jeffreys;
1146 : if(doscale_ || doregres_zero_) {
1147 : ene += jeffreys;
1148 : }
1149 : if(dooffset_) {
1150 : ene += jeffreys;
1151 : }
1152 : }
1153 : }
1154 144 : return kbt_ * ene;
1155 : }
1156 :
1157 108 : double Metainference::getEnergyGJ(const std::vector<double> &mean, const std::vector<double> &sigma,
1158 : const double scale, const double offset) {
1159 108 : const double scale2 = scale*scale;
1160 108 : const double inv_s2 = 1./(sigma[0]*sigma[0] + scale2*sigma_mean2_[0]);
1161 108 : const double inv_sss = 1./(sigma[0]*sigma[0] + sigma_mean2_[0]);
1162 :
1163 : double ene = 0.0;
1164 108 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1165 : {
1166 : #pragma omp for reduction( + : ene)
1167 : for(unsigned i=0; i<narg; ++i) {
1168 : double dev = scale*mean[i]-parameters[i]+offset;
1169 : ene += 0.5*dev*dev*inv_s2;
1170 : }
1171 : }
1172 108 : const double normalisation = -0.5*std::log(0.5/M_PI*inv_s2);
1173 108 : const double jeffreys = -0.5*std::log(2.*inv_sss);
1174 : // add Jeffrey's prior in case one sigma for all data points + one normalisation per datapoint
1175 108 : ene += jeffreys + static_cast<double>(narg)*normalisation;
1176 108 : if(doscale_ || doregres_zero_) {
1177 0 : ene += jeffreys;
1178 : }
1179 108 : if(dooffset_) {
1180 108 : ene += jeffreys;
1181 : }
1182 :
1183 108 : return kbt_ * ene;
1184 : }
1185 :
1186 152 : double Metainference::getEnergyGJE(const std::vector<double> &mean, const std::vector<double> &sigma,
1187 : const double scale, const double offset) {
1188 152 : const double scale2 = scale*scale;
1189 :
1190 : double ene = 0.0;
1191 152 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(ene)
1192 : {
1193 : #pragma omp for reduction( + : ene)
1194 : for(unsigned i=0; i<narg; ++i) {
1195 : const double inv_s2 = 1./(sigma[i]*sigma[i] + scale2*sigma_mean2_[i]);
1196 : const double inv_sss = 1./(sigma[i]*sigma[i] + sigma_mean2_[i]);
1197 : double dev = scale*mean[i]-parameters[i]+offset;
1198 : // deviation + normalisation + jeffrey
1199 : const double normalisation = -0.5*std::log(0.5/M_PI*inv_s2);
1200 : const double jeffreys = -0.5*std::log(2.*inv_sss);
1201 : ene += 0.5*dev*dev*inv_s2 + normalisation + jeffreys;
1202 : if(doscale_ || doregres_zero_) {
1203 : ene += jeffreys;
1204 : }
1205 : if(dooffset_) {
1206 : ene += jeffreys;
1207 : }
1208 : }
1209 : }
1210 152 : return kbt_ * ene;
1211 : }
1212 :
1213 36 : void Metainference::moveTilde(const std::vector<double> &mean_, double &old_energy) {
1214 36 : std::vector<double> new_ftilde(sigma_.size());
1215 36 : new_ftilde = ftilde_;
1216 :
1217 : // change all tildes
1218 108 : for(unsigned j=0; j<sigma_.size(); j++) {
1219 72 : const double r3 = random[0].Gaussian();
1220 72 : const double ds3 = Dftilde_*std::sqrt(sigma_mean2_[j])*r3;
1221 72 : new_ftilde[j] = ftilde_[j] + ds3;
1222 : }
1223 : // calculate new energy
1224 36 : double new_energy = getEnergyMIGEN(mean_,new_ftilde,sigma_,scale_,offset_);
1225 :
1226 : // accept or reject
1227 36 : const double delta = ( new_energy - old_energy ) / kbt_;
1228 : // if delta is negative always accept move
1229 36 : if( delta <= 0.0 ) {
1230 36 : old_energy = new_energy;
1231 36 : ftilde_ = new_ftilde;
1232 36 : MCacceptFT_++;
1233 : // otherwise extract random number
1234 : } else {
1235 0 : const double s = random[0].RandU01();
1236 0 : if( s < std::exp(-delta) ) {
1237 0 : old_energy = new_energy;
1238 0 : ftilde_ = new_ftilde;
1239 0 : MCacceptFT_++;
1240 : }
1241 : }
1242 36 : }
1243 :
1244 216 : void Metainference::moveScaleOffset(const std::vector<double> &mean_, double &old_energy) {
1245 216 : double new_scale = scale_;
1246 :
1247 216 : if(doscale_) {
1248 144 : if(scale_prior_==SC_FLAT) {
1249 144 : const double r1 = random[1].Gaussian();
1250 144 : const double ds1 = Dscale_*r1;
1251 144 : new_scale += ds1;
1252 : // check boundaries
1253 144 : if(new_scale > scale_max_) {
1254 0 : new_scale = 2.0 * scale_max_ - new_scale;
1255 : }
1256 144 : if(new_scale < scale_min_) {
1257 0 : new_scale = 2.0 * scale_min_ - new_scale;
1258 : }
1259 : } else {
1260 0 : const double r1 = random[1].Gaussian();
1261 0 : const double ds1 = 0.5*(scale_mu_-new_scale)+Dscale_*std::exp(1)/M_PI*r1;
1262 0 : new_scale += ds1;
1263 : }
1264 : }
1265 :
1266 216 : double new_offset = offset_;
1267 :
1268 216 : if(dooffset_) {
1269 72 : if(offset_prior_==SC_FLAT) {
1270 72 : const double r1 = random[1].Gaussian();
1271 72 : const double ds1 = Doffset_*r1;
1272 72 : new_offset += ds1;
1273 : // check boundaries
1274 72 : if(new_offset > offset_max_) {
1275 0 : new_offset = 2.0 * offset_max_ - new_offset;
1276 : }
1277 72 : if(new_offset < offset_min_) {
1278 0 : new_offset = 2.0 * offset_min_ - new_offset;
1279 : }
1280 : } else {
1281 0 : const double r1 = random[1].Gaussian();
1282 0 : const double ds1 = 0.5*(offset_mu_-new_offset)+Doffset_*std::exp(1)/M_PI*r1;
1283 0 : new_offset += ds1;
1284 : }
1285 : }
1286 :
1287 : // calculate new energy
1288 : double new_energy = 0.;
1289 :
1290 216 : switch(noise_type_) {
1291 36 : case GAUSS:
1292 36 : new_energy = getEnergyGJ(mean_,sigma_,new_scale,new_offset);
1293 : break;
1294 48 : case MGAUSS:
1295 48 : new_energy = getEnergyGJE(mean_,sigma_,new_scale,new_offset);
1296 : break;
1297 48 : case OUTLIERS:
1298 48 : new_energy = getEnergySP(mean_,sigma_,new_scale,new_offset);
1299 : break;
1300 48 : case MOUTLIERS:
1301 48 : new_energy = getEnergySPE(mean_,sigma_,new_scale,new_offset);
1302 : break;
1303 36 : case GENERIC:
1304 36 : new_energy = getEnergyMIGEN(mean_,ftilde_,sigma_,new_scale,new_offset);
1305 : break;
1306 : }
1307 :
1308 : // for the scale/offset we need to consider the total energy
1309 216 : std::vector<double> totenergies(2);
1310 216 : if(master) {
1311 144 : totenergies[0] = old_energy;
1312 144 : totenergies[1] = new_energy;
1313 144 : if(nrep_>1) {
1314 72 : multi_sim_comm.Sum(totenergies);
1315 : }
1316 : } else {
1317 72 : totenergies[0] = 0;
1318 72 : totenergies[1] = 0;
1319 : }
1320 216 : comm.Sum(totenergies);
1321 :
1322 : // accept or reject
1323 216 : const double delta = ( totenergies[1] - totenergies[0] ) / kbt_;
1324 : // if delta is negative always accept move
1325 216 : if( delta <= 0.0 ) {
1326 216 : old_energy = new_energy;
1327 216 : scale_ = new_scale;
1328 216 : offset_ = new_offset;
1329 216 : MCacceptScale_++;
1330 : // otherwise extract random number
1331 : } else {
1332 0 : double s = random[1].RandU01();
1333 0 : if( s < std::exp(-delta) ) {
1334 0 : old_energy = new_energy;
1335 0 : scale_ = new_scale;
1336 0 : offset_ = new_offset;
1337 0 : MCacceptScale_++;
1338 : }
1339 : }
1340 216 : }
1341 :
1342 280 : void Metainference::moveSigmas(const std::vector<double> &mean_, double &old_energy, const unsigned i, const std::vector<unsigned> &indices, bool &breaknow) {
1343 280 : std::vector<double> new_sigma(sigma_.size());
1344 280 : new_sigma = sigma_;
1345 :
1346 : // change MCchunksize_ sigmas
1347 280 : if (MCchunksize_ > 0) {
1348 6 : if ((MCchunksize_ * i) >= sigma_.size()) {
1349 : // This means we are not moving any sigma, so we should break immediately
1350 0 : breaknow = true;
1351 : }
1352 :
1353 : // change random sigmas
1354 12 : for(unsigned j=0; j<MCchunksize_; j++) {
1355 6 : const unsigned shuffle_index = j + MCchunksize_ * i;
1356 6 : if (shuffle_index >= sigma_.size()) {
1357 : // Going any further will segfault but we should still evaluate the sigmas we changed
1358 : break;
1359 : }
1360 6 : const unsigned index = indices[shuffle_index];
1361 6 : const double r2 = random[0].Gaussian();
1362 6 : const double ds2 = Dsigma_[index]*r2;
1363 6 : new_sigma[index] = sigma_[index] + ds2;
1364 : // check boundaries
1365 6 : if(new_sigma[index] > sigma_max_[index]) {
1366 0 : new_sigma[index] = 2.0 * sigma_max_[index] - new_sigma[index];
1367 : }
1368 6 : if(new_sigma[index] < sigma_min_[index]) {
1369 0 : new_sigma[index] = 2.0 * sigma_min_[index] - new_sigma[index];
1370 : }
1371 : }
1372 : } else {
1373 : // change all sigmas
1374 3224 : for(unsigned j=0; j<sigma_.size(); j++) {
1375 2950 : const double r2 = random[0].Gaussian();
1376 2950 : const double ds2 = Dsigma_[j]*r2;
1377 2950 : new_sigma[j] = sigma_[j] + ds2;
1378 : // check boundaries
1379 2950 : if(new_sigma[j] > sigma_max_[j]) {
1380 0 : new_sigma[j] = 2.0 * sigma_max_[j] - new_sigma[j];
1381 : }
1382 2950 : if(new_sigma[j] < sigma_min_[j]) {
1383 0 : new_sigma[j] = 2.0 * sigma_min_[j] - new_sigma[j];
1384 : }
1385 : }
1386 : }
1387 :
1388 280 : if (breaknow) {
1389 : // We didnt move any sigmas, so no sense in evaluating anything
1390 : return;
1391 : }
1392 :
1393 : // calculate new energy
1394 : double new_energy = 0.;
1395 280 : switch(noise_type_) {
1396 36 : case GAUSS:
1397 36 : new_energy = getEnergyGJ(mean_,new_sigma,scale_,offset_);
1398 : break;
1399 52 : case MGAUSS:
1400 52 : new_energy = getEnergyGJE(mean_,new_sigma,scale_,offset_);
1401 : break;
1402 108 : case OUTLIERS:
1403 108 : new_energy = getEnergySP(mean_,new_sigma,scale_,offset_);
1404 : break;
1405 48 : case MOUTLIERS:
1406 48 : new_energy = getEnergySPE(mean_,new_sigma,scale_,offset_);
1407 : break;
1408 36 : case GENERIC:
1409 36 : new_energy = getEnergyMIGEN(mean_,ftilde_,new_sigma,scale_,offset_);
1410 : break;
1411 : }
1412 :
1413 : // accept or reject
1414 280 : const double delta = ( new_energy - old_energy ) / kbt_;
1415 : // if delta is negative always accept move
1416 280 : if( delta <= 0.0 ) {
1417 280 : old_energy = new_energy;
1418 280 : sigma_ = new_sigma;
1419 280 : MCaccept_++;
1420 : // otherwise extract random number
1421 : } else {
1422 0 : const double s = random[0].RandU01();
1423 0 : if( s < std::exp(-delta) ) {
1424 0 : old_energy = new_energy;
1425 0 : sigma_ = new_sigma;
1426 0 : MCaccept_++;
1427 : }
1428 : }
1429 : }
1430 :
1431 280 : double Metainference::doMonteCarlo(const std::vector<double> &mean_) {
1432 : // calculate old energy with the updated coordinates
1433 280 : double old_energy=0.;
1434 :
1435 280 : switch(noise_type_) {
1436 36 : case GAUSS:
1437 36 : old_energy = getEnergyGJ(mean_,sigma_,scale_,offset_);
1438 36 : break;
1439 52 : case MGAUSS:
1440 52 : old_energy = getEnergyGJE(mean_,sigma_,scale_,offset_);
1441 52 : break;
1442 108 : case OUTLIERS:
1443 108 : old_energy = getEnergySP(mean_,sigma_,scale_,offset_);
1444 108 : break;
1445 48 : case MOUTLIERS:
1446 48 : old_energy = getEnergySPE(mean_,sigma_,scale_,offset_);
1447 48 : break;
1448 36 : case GENERIC:
1449 36 : old_energy = getEnergyMIGEN(mean_,ftilde_,sigma_,scale_,offset_);
1450 36 : break;
1451 : }
1452 :
1453 : // do not run MC if this is a replica-exchange trial
1454 280 : if(!getExchangeStep()) {
1455 :
1456 : // Create std::vector of random sigma indices
1457 : std::vector<unsigned> indices;
1458 280 : if (MCchunksize_ > 0) {
1459 12 : for (unsigned j=0; j<sigma_.size(); j++) {
1460 6 : indices.push_back(j);
1461 : }
1462 6 : random[2].Shuffle(indices);
1463 : }
1464 280 : bool breaknow = false;
1465 :
1466 : // cycle on MC steps
1467 560 : for(unsigned i=0; i<MCsteps_; ++i) {
1468 280 : MCtrial_++;
1469 : // propose move for ftilde
1470 280 : if(noise_type_==GENERIC) {
1471 36 : moveTilde(mean_, old_energy);
1472 : }
1473 : // propose move for scale and/or offset
1474 280 : if(doscale_||dooffset_) {
1475 216 : moveScaleOffset(mean_, old_energy);
1476 : }
1477 : // propose move for sigma
1478 280 : moveSigmas(mean_, old_energy, i, indices, breaknow);
1479 : // exit from the loop if this is the case
1480 280 : if(breaknow) {
1481 : break;
1482 : }
1483 : }
1484 :
1485 : /* save the result of the sampling */
1486 : /* ftilde */
1487 280 : if(noise_type_==GENERIC) {
1488 36 : double accept = static_cast<double>(MCacceptFT_) / static_cast<double>(MCtrial_);
1489 36 : valueAcceptFT->set(accept);
1490 108 : for(unsigned i=0; i<sigma_.size(); i++) {
1491 72 : valueFtilde[i]->set(ftilde_[i]);
1492 : }
1493 : }
1494 : /* scale and offset */
1495 280 : if(doscale_ || doregres_zero_) {
1496 150 : valueScale->set(scale_);
1497 : }
1498 280 : if(dooffset_) {
1499 72 : valueOffset->set(offset_);
1500 : }
1501 280 : if(doscale_||dooffset_) {
1502 216 : double accept = static_cast<double>(MCacceptScale_) / static_cast<double>(MCtrial_);
1503 216 : valueAcceptScale->set(accept);
1504 : }
1505 : /* sigmas */
1506 3236 : for(unsigned i=0; i<sigma_.size(); i++) {
1507 2956 : valueSigma[i]->set(sigma_[i]);
1508 : }
1509 280 : double accept = static_cast<double>(MCaccept_) / static_cast<double>(MCtrial_);
1510 280 : valueAccept->set(accept);
1511 : }
1512 :
1513 : // here we sum the score over the replicas to get the full metainference score that we save as a bias
1514 280 : if(master) {
1515 206 : if(nrep_>1) {
1516 74 : multi_sim_comm.Sum(old_energy);
1517 : }
1518 : } else {
1519 74 : old_energy=0;
1520 : }
1521 280 : comm.Sum(old_energy);
1522 :
1523 : // this is the energy with current coordinates and parameters
1524 280 : return old_energy;
1525 : }
1526 :
1527 : /*
1528 : In the following energy-force functions we don't add the normalisation and the jeffreys priors
1529 : because they are not needed for the forces, the correct MetaInference energy is the one calculated
1530 : in the Monte-Carlo
1531 : */
1532 :
1533 108 : void Metainference::getEnergyForceSP(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1534 : const std::vector<double> &dmean_b) {
1535 108 : const double scale2 = scale_*scale_;
1536 108 : const double sm2 = sigma_mean2_[0];
1537 108 : const double ss2 = sigma_[0]*sigma_[0] + scale2*sm2;
1538 108 : std::vector<double> f(narg,0);
1539 :
1540 108 : if(master) {
1541 84 : #pragma omp parallel num_threads(OpenMP::getNumThreads())
1542 : {
1543 : #pragma omp for
1544 : for(unsigned i=0; i<narg; ++i) {
1545 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1546 : const double a2 = 0.5*dev*dev + ss2;
1547 : if(sm2 > 0.0) {
1548 : const double t = std::exp(-a2/sm2);
1549 : const double dt = 1./t;
1550 : const double dit = 1./(1.-dt);
1551 : f[i] = -scale_*dev*(dit/sm2 + 1./a2);
1552 : } else {
1553 : f[i] = -scale_*dev*(1./a2);
1554 : }
1555 : }
1556 : }
1557 : // collect contribution to forces and energy from other replicas
1558 84 : if(nrep_>1) {
1559 24 : multi_sim_comm.Sum(&f[0],narg);
1560 : }
1561 : }
1562 : // intra-replica summation
1563 108 : comm.Sum(&f[0],narg);
1564 :
1565 : double w_tmp = 0.;
1566 720 : for(unsigned i=0; i<narg; ++i) {
1567 612 : setOutputForce(i, kbt_*f[i]*dmean_x[i]);
1568 612 : w_tmp += kbt_*f[i]*dmean_b[i];
1569 : }
1570 :
1571 108 : if(do_reweight_) {
1572 48 : setOutputForce(narg, w_tmp);
1573 96 : getPntrToComponent("biasDer")->set(-w_tmp);
1574 : }
1575 108 : }
1576 :
1577 48 : void Metainference::getEnergyForceSPE(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1578 : const std::vector<double> &dmean_b) {
1579 48 : const double scale2 = scale_*scale_;
1580 48 : std::vector<double> f(narg,0);
1581 :
1582 48 : if(master) {
1583 24 : #pragma omp parallel num_threads(OpenMP::getNumThreads())
1584 : {
1585 : #pragma omp for
1586 : for(unsigned i=0; i<narg; ++i) {
1587 : const double sm2 = sigma_mean2_[i];
1588 : const double ss2 = sigma_[i]*sigma_[i] + scale2*sm2;
1589 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1590 : const double a2 = 0.5*dev*dev + ss2;
1591 : if(sm2 > 0.0) {
1592 : const double t = std::exp(-a2/sm2);
1593 : const double dt = 1./t;
1594 : const double dit = 1./(1.-dt);
1595 : f[i] = -scale_*dev*(dit/sm2 + 1./a2);
1596 : } else {
1597 : f[i] = -scale_*dev*(1./a2);
1598 : }
1599 : }
1600 : }
1601 : // collect contribution to forces and energy from other replicas
1602 24 : if(nrep_>1) {
1603 24 : multi_sim_comm.Sum(&f[0],narg);
1604 : }
1605 : }
1606 48 : comm.Sum(&f[0],narg);
1607 :
1608 : double w_tmp = 0.;
1609 240 : for(unsigned i=0; i<narg; ++i) {
1610 192 : setOutputForce(i, kbt_ * dmean_x[i] * f[i]);
1611 192 : w_tmp += kbt_ * dmean_b[i] *f[i];
1612 : }
1613 :
1614 48 : if(do_reweight_) {
1615 48 : setOutputForce(narg, w_tmp);
1616 96 : getPntrToComponent("biasDer")->set(-w_tmp);
1617 : }
1618 48 : }
1619 :
1620 36 : void Metainference::getEnergyForceGJ(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1621 : const std::vector<double> &dmean_b) {
1622 36 : const double scale2 = scale_*scale_;
1623 36 : double inv_s2=0.;
1624 :
1625 36 : if(master) {
1626 36 : inv_s2 = 1./(sigma_[0]*sigma_[0] + scale2*sigma_mean2_[0]);
1627 36 : if(nrep_>1) {
1628 0 : multi_sim_comm.Sum(inv_s2);
1629 : }
1630 : }
1631 36 : comm.Sum(inv_s2);
1632 :
1633 : double w_tmp = 0.;
1634 36 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(w_tmp)
1635 : {
1636 : #pragma omp for reduction( + : w_tmp)
1637 : for(unsigned i=0; i<narg; ++i) {
1638 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1639 : const double mult = dev*scale_*inv_s2;
1640 : setOutputForce(i, -kbt_*dmean_x[i]*mult);
1641 : w_tmp += kbt_*dmean_b[i]*mult;
1642 : }
1643 : }
1644 :
1645 36 : if(do_reweight_) {
1646 0 : setOutputForce(narg, -w_tmp);
1647 0 : getPntrToComponent("biasDer")->set(w_tmp);
1648 : }
1649 36 : }
1650 :
1651 52 : void Metainference::getEnergyForceGJE(const std::vector<double> &mean, const std::vector<double> &dmean_x,
1652 : const std::vector<double> &dmean_b) {
1653 52 : const double scale2 = scale_*scale_;
1654 52 : std::vector<double> inv_s2(sigma_.size(),0.);
1655 :
1656 52 : if(master) {
1657 1300 : for(unsigned i=0; i<sigma_.size(); ++i) {
1658 1274 : inv_s2[i] = 1./(sigma_[i]*sigma_[i] + scale2*sigma_mean2_[i]);
1659 : }
1660 26 : if(nrep_>1) {
1661 26 : multi_sim_comm.Sum(&inv_s2[0],sigma_.size());
1662 : }
1663 : }
1664 52 : comm.Sum(&inv_s2[0],sigma_.size());
1665 :
1666 : double w_tmp = 0.;
1667 52 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(w_tmp)
1668 : {
1669 : #pragma omp for reduction( + : w_tmp)
1670 : for(unsigned i=0; i<narg; ++i) {
1671 : const double dev = scale_*mean[i]-parameters[i]+offset_;
1672 : const double mult = dev*scale_*inv_s2[i];
1673 : setOutputForce(i, -kbt_*dmean_x[i]*mult);
1674 : w_tmp += kbt_*dmean_b[i]*mult;
1675 : }
1676 : }
1677 :
1678 52 : if(do_reweight_) {
1679 52 : setOutputForce(narg, -w_tmp);
1680 104 : getPntrToComponent("biasDer")->set(w_tmp);
1681 : }
1682 52 : }
1683 :
1684 36 : void Metainference::getEnergyForceMIGEN(const std::vector<double> &mean, const std::vector<double> &dmean_x, const std::vector<double> &dmean_b) {
1685 36 : std::vector<double> inv_s2(sigma_.size(),0.);
1686 36 : std::vector<double> dev(sigma_.size(),0.);
1687 36 : std::vector<double> dev2(sigma_.size(),0.);
1688 :
1689 108 : for(unsigned i=0; i<sigma_.size(); ++i) {
1690 72 : inv_s2[i] = 1./sigma_mean2_[i];
1691 72 : if(master) {
1692 72 : dev[i] = (mean[i]-ftilde_[i]);
1693 72 : dev2[i] = dev[i]*dev[i];
1694 : }
1695 : }
1696 36 : if(master&&nrep_>1) {
1697 0 : multi_sim_comm.Sum(&dev[0],dev.size());
1698 0 : multi_sim_comm.Sum(&dev2[0],dev2.size());
1699 : }
1700 36 : comm.Sum(&dev[0],dev.size());
1701 36 : comm.Sum(&dev2[0],dev2.size());
1702 :
1703 : double dene_b = 0.;
1704 36 : #pragma omp parallel num_threads(OpenMP::getNumThreads()) shared(dene_b)
1705 : {
1706 : #pragma omp for reduction( + : dene_b)
1707 : for(unsigned i=0; i<narg; ++i) {
1708 : const double dene_x = kbt_*inv_s2[i]*dmean_x[i]*dev[i];
1709 : dene_b += kbt_*inv_s2[i]*dmean_b[i]*dev[i];
1710 : setOutputForce(i, -dene_x);
1711 : }
1712 : }
1713 :
1714 36 : if(do_reweight_) {
1715 0 : setOutputForce(narg, -dene_b);
1716 0 : getPntrToComponent("biasDer")->set(dene_b);
1717 : }
1718 36 : }
1719 :
1720 280 : void Metainference::get_weights(const unsigned iselect, double &weight, double &norm, double &neff) {
1721 280 : const double dnrep = static_cast<double>(nrep_);
1722 : // calculate the weights either from BIAS
1723 280 : if(do_reweight_) {
1724 148 : std::vector<double> bias(nrep_,0);
1725 148 : if(master) {
1726 74 : bias[replica_] = getArgument(narg);
1727 74 : if(nrep_>1) {
1728 74 : multi_sim_comm.Sum(&bias[0], nrep_);
1729 : }
1730 : }
1731 148 : comm.Sum(&bias[0], nrep_);
1732 :
1733 : // accumulate weights
1734 148 : const double decay = 1./static_cast<double> (average_weights_stride_);
1735 148 : if(!firstTimeW[iselect]) {
1736 408 : for(unsigned i=0; i<nrep_; ++i) {
1737 272 : const double delta=bias[i]-average_weights_[iselect][i];
1738 272 : average_weights_[iselect][i]+=decay*delta;
1739 : }
1740 : } else {
1741 : firstTimeW[iselect] = false;
1742 36 : for(unsigned i=0; i<nrep_; ++i) {
1743 24 : average_weights_[iselect][i] = bias[i];
1744 : }
1745 : }
1746 :
1747 : // set average back into bias and set norm to one
1748 148 : const double maxbias = *(std::max_element(average_weights_[iselect].begin(), average_weights_[iselect].end()));
1749 444 : for(unsigned i=0; i<nrep_; ++i) {
1750 296 : bias[i] = std::exp((average_weights_[iselect][i]-maxbias)/kbt_);
1751 : }
1752 : // set local weight, norm and weight variance
1753 148 : weight = bias[replica_];
1754 : double w2=0.;
1755 444 : for(unsigned i=0; i<nrep_; ++i) {
1756 296 : w2 += bias[i]*bias[i];
1757 296 : norm += bias[i];
1758 : }
1759 148 : neff = norm*norm/w2;
1760 296 : getPntrToComponent("weight")->set(weight/norm);
1761 : } else {
1762 : // or arithmetic ones
1763 132 : neff = dnrep;
1764 132 : weight = 1.0;
1765 132 : norm = dnrep;
1766 : }
1767 280 : getPntrToComponent("neff")->set(neff);
1768 280 : }
1769 :
1770 280 : void Metainference::get_sigma_mean(const unsigned iselect, const double weight, const double norm, const double neff, const std::vector<double> &mean) {
1771 280 : const double dnrep = static_cast<double>(nrep_);
1772 280 : std::vector<double> sigma_mean2_tmp(sigma_mean2_.size(), 0.);
1773 :
1774 280 : if(do_optsigmamean_>0) {
1775 : // remove first entry of the history std::vector
1776 0 : if(sigma_mean2_last_[iselect][0].size()==optsigmamean_stride_&&optsigmamean_stride_>0)
1777 0 : for(unsigned i=0; i<narg; ++i) {
1778 0 : sigma_mean2_last_[iselect][i].erase(sigma_mean2_last_[iselect][i].begin());
1779 : }
1780 : /* this is the current estimate of sigma mean for each argument
1781 : there is one of this per argument in any case because it is
1782 : the maximum among these to be used in case of GAUSS/OUTLIER */
1783 0 : std::vector<double> sigma_mean2_now(narg,0);
1784 0 : if(master) {
1785 0 : for(unsigned i=0; i<narg; ++i) {
1786 0 : sigma_mean2_now[i] = weight*(getArgument(i)-mean[i])*(getArgument(i)-mean[i]);
1787 : }
1788 0 : if(nrep_>1) {
1789 0 : multi_sim_comm.Sum(&sigma_mean2_now[0], narg);
1790 : }
1791 : }
1792 0 : comm.Sum(&sigma_mean2_now[0], narg);
1793 0 : for(unsigned i=0; i<narg; ++i) {
1794 0 : sigma_mean2_now[i] *= 1.0/(neff-1.)/norm;
1795 : }
1796 :
1797 : // add sigma_mean2 to history
1798 0 : if(optsigmamean_stride_>0) {
1799 0 : for(unsigned i=0; i<narg; ++i) {
1800 0 : sigma_mean2_last_[iselect][i].push_back(sigma_mean2_now[i]);
1801 : }
1802 : } else {
1803 0 : for(unsigned i=0; i<narg; ++i)
1804 0 : if(sigma_mean2_now[i] > sigma_mean2_last_[iselect][i][0]) {
1805 0 : sigma_mean2_last_[iselect][i][0] = sigma_mean2_now[i];
1806 : }
1807 : }
1808 :
1809 0 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
1810 0 : for(unsigned i=0; i<narg; ++i) {
1811 : /* set to the maximum in history std::vector */
1812 0 : sigma_mean2_tmp[i] = *max_element(sigma_mean2_last_[iselect][i].begin(), sigma_mean2_last_[iselect][i].end());
1813 : /* the standard error of the mean */
1814 0 : valueSigmaMean[i]->set(std::sqrt(sigma_mean2_tmp[i]));
1815 0 : if(noise_type_==GENERIC) {
1816 0 : sigma_min_[i] = std::sqrt(sigma_mean2_tmp[i]);
1817 0 : if(sigma_[i] < sigma_min_[i]) {
1818 0 : sigma_[i] = sigma_min_[i];
1819 : }
1820 : }
1821 : }
1822 0 : } else if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
1823 : // find maximum for each data point
1824 : std::vector <double> max_values;
1825 0 : for(unsigned i=0; i<narg; ++i) {
1826 0 : max_values.push_back(*max_element(sigma_mean2_last_[iselect][i].begin(), sigma_mean2_last_[iselect][i].end()));
1827 : }
1828 : // find maximum across data points
1829 0 : const double max_now = *max_element(max_values.begin(), max_values.end());
1830 : // set new value
1831 0 : sigma_mean2_tmp[0] = max_now;
1832 0 : valueSigmaMean[0]->set(std::sqrt(sigma_mean2_tmp[0]));
1833 : }
1834 : // endif sigma mean optimization
1835 : // start sigma max optimization
1836 0 : if(do_optsigmamean_>1&&!sigmamax_opt_done_) {
1837 0 : for(unsigned i=0; i<sigma_max_.size(); i++) {
1838 0 : if(sigma_max_est_[i]<sigma_mean2_tmp[i]&&optimized_step_>optsigmamean_stride_) {
1839 0 : sigma_max_est_[i]=sigma_mean2_tmp[i];
1840 : }
1841 : // ready to set once and for all the value of sigma_max
1842 0 : if(optimized_step_==N_optimized_step_) {
1843 0 : sigmamax_opt_done_=true;
1844 0 : for(unsigned i=0; i<sigma_max_.size(); i++) {
1845 0 : sigma_max_[i]=std::sqrt(sigma_max_est_[i]*dnrep);
1846 0 : Dsigma_[i] = 0.05*(sigma_max_[i] - sigma_min_[i]);
1847 0 : if(sigma_[i]>sigma_max_[i]) {
1848 0 : sigma_[i]=sigma_max_[i];
1849 : }
1850 : }
1851 : }
1852 : }
1853 0 : optimized_step_++;
1854 : }
1855 : // end sigma max optimization
1856 : } else {
1857 280 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
1858 2948 : for(unsigned i=0; i<narg; ++i) {
1859 2812 : sigma_mean2_tmp[i] = sigma_mean2_last_[iselect][i][0];
1860 2812 : valueSigmaMean[i]->set(std::sqrt(sigma_mean2_tmp[i]));
1861 : }
1862 144 : } else if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
1863 144 : sigma_mean2_tmp[0] = sigma_mean2_last_[iselect][0][0];
1864 144 : valueSigmaMean[0]->set(std::sqrt(sigma_mean2_tmp[0]));
1865 : }
1866 : }
1867 :
1868 280 : sigma_mean2_ = sigma_mean2_tmp;
1869 280 : }
1870 :
1871 280 : void Metainference::replica_averaging(const double weight, const double norm, std::vector<double> &mean, std::vector<double> &dmean_b) {
1872 280 : if(master) {
1873 2236 : for(unsigned i=0; i<narg; ++i) {
1874 2030 : mean[i] = weight/norm*getArgument(i);
1875 : }
1876 206 : if(nrep_>1) {
1877 74 : multi_sim_comm.Sum(&mean[0], narg);
1878 : }
1879 : }
1880 280 : comm.Sum(&mean[0], narg);
1881 : // set the derivative of the mean with respect to the bias
1882 3776 : for(unsigned i=0; i<narg; ++i) {
1883 3496 : dmean_b[i] = weight/norm/kbt_*(getArgument(i)-mean[i])/static_cast<double>(average_weights_stride_);
1884 : }
1885 :
1886 : // this is only for generic metainference
1887 280 : if(firstTime) {
1888 28 : ftilde_ = mean;
1889 28 : firstTime = false;
1890 : }
1891 280 : }
1892 :
1893 6 : void Metainference::do_regression_zero(const std::vector<double> &mean) {
1894 : // parameters[i] = scale_ * mean[i]: find scale_ with linear regression
1895 : double num = 0.0;
1896 : double den = 0.0;
1897 48 : for(unsigned i=0; i<parameters.size(); ++i) {
1898 42 : num += mean[i] * parameters[i];
1899 42 : den += mean[i] * mean[i];
1900 : }
1901 6 : if(den>0) {
1902 6 : scale_ = num / den;
1903 : } else {
1904 0 : scale_ = 1.0;
1905 : }
1906 6 : }
1907 :
1908 280 : void Metainference::calculate() {
1909 : // get step
1910 280 : const long long int step = getStep();
1911 :
1912 : unsigned iselect = 0;
1913 : // set the value of selector for REM-like stuff
1914 280 : if(selector_.length()>0) {
1915 0 : iselect = static_cast<unsigned>(plumed.passMap[selector_]);
1916 : }
1917 :
1918 : /* 1) collect weights */
1919 280 : double weight = 0.;
1920 280 : double neff = 0.;
1921 280 : double norm = 0.;
1922 280 : get_weights(iselect, weight, norm, neff);
1923 :
1924 : /* 2) calculate average */
1925 280 : std::vector<double> mean(narg,0);
1926 : // this is the derivative of the mean with respect to the argument
1927 280 : std::vector<double> dmean_x(narg,weight/norm);
1928 : // this is the derivative of the mean with respect to the bias
1929 280 : std::vector<double> dmean_b(narg,0);
1930 : // calculate it
1931 280 : replica_averaging(weight, norm, mean, dmean_b);
1932 :
1933 : /* 3) calculates parameters */
1934 280 : get_sigma_mean(iselect, weight, norm, neff, mean);
1935 :
1936 : // in case of regression with zero intercept, calculate scale
1937 280 : if(doregres_zero_ && step%nregres_zero_==0) {
1938 6 : do_regression_zero(mean);
1939 : }
1940 :
1941 : /* 4) run monte carlo */
1942 280 : double ene = doMonteCarlo(mean);
1943 :
1944 : // calculate bias and forces
1945 280 : switch(noise_type_) {
1946 36 : case GAUSS:
1947 36 : getEnergyForceGJ(mean, dmean_x, dmean_b);
1948 : break;
1949 52 : case MGAUSS:
1950 52 : getEnergyForceGJE(mean, dmean_x, dmean_b);
1951 : break;
1952 108 : case OUTLIERS:
1953 108 : getEnergyForceSP(mean, dmean_x, dmean_b);
1954 : break;
1955 48 : case MOUTLIERS:
1956 48 : getEnergyForceSPE(mean, dmean_x, dmean_b);
1957 : break;
1958 36 : case GENERIC:
1959 36 : getEnergyForceMIGEN(mean, dmean_x, dmean_b);
1960 : break;
1961 : }
1962 :
1963 280 : setBias(ene);
1964 280 : }
1965 :
1966 32 : void Metainference::writeStatus() {
1967 32 : sfile_.rewind();
1968 32 : sfile_.printField("time",getTimeStep()*getStep());
1969 : //nsel
1970 64 : for(unsigned i=0; i<sigma_mean2_last_.size(); i++) {
1971 : std::string msg_i,msg_j;
1972 32 : Tools::convert(i,msg_i);
1973 : std::vector <double> max_values;
1974 : //narg
1975 2518 : for(unsigned j=0; j<narg; ++j) {
1976 2486 : Tools::convert(j,msg_j);
1977 2486 : std::string msg = msg_i+"_"+msg_j;
1978 2486 : if(noise_type_==MGAUSS||noise_type_==MOUTLIERS||noise_type_==GENERIC) {
1979 7182 : sfile_.printField("sigmaMean_"+msg,std::sqrt(*max_element(sigma_mean2_last_[i][j].begin(), sigma_mean2_last_[i][j].end())));
1980 : } else {
1981 : // find maximum for each data point
1982 184 : max_values.push_back(*max_element(sigma_mean2_last_[i][j].begin(), sigma_mean2_last_[i][j].end()));
1983 : }
1984 : }
1985 32 : if(noise_type_==GAUSS||noise_type_==OUTLIERS) {
1986 : // find maximum across data points
1987 17 : const double max_now = std::sqrt(*max_element(max_values.begin(), max_values.end()));
1988 17 : Tools::convert(0,msg_j);
1989 17 : std::string msg = msg_i+"_"+msg_j;
1990 34 : sfile_.printField("sigmaMean_"+msg, max_now);
1991 : }
1992 : }
1993 2443 : for(unsigned i=0; i<sigma_.size(); ++i) {
1994 : std::string msg;
1995 2411 : Tools::convert(i,msg);
1996 4822 : sfile_.printField("sigma_"+msg,sigma_[i]);
1997 : }
1998 2443 : for(unsigned i=0; i<sigma_max_.size(); ++i) {
1999 : std::string msg;
2000 2411 : Tools::convert(i,msg);
2001 4822 : sfile_.printField("sigma_max_"+msg,sigma_max_[i]);
2002 : }
2003 32 : if(noise_type_==GENERIC) {
2004 9 : for(unsigned i=0; i<ftilde_.size(); ++i) {
2005 : std::string msg;
2006 6 : Tools::convert(i,msg);
2007 12 : sfile_.printField("ftilde_"+msg,ftilde_[i]);
2008 : }
2009 : }
2010 32 : sfile_.printField("scale0_",scale_);
2011 32 : sfile_.printField("offset0_",offset_);
2012 64 : for(unsigned i=0; i<average_weights_.size(); i++) {
2013 : std::string msg_i;
2014 32 : Tools::convert(i,msg_i);
2015 64 : sfile_.printField("weight_"+msg_i,average_weights_[i][replica_]);
2016 : }
2017 32 : sfile_.printField();
2018 32 : sfile_.flush();
2019 32 : }
2020 :
2021 280 : void Metainference::update() {
2022 : // write status file
2023 280 : if(write_stride_>0&& (getStep()%write_stride_==0 || getCPT()) ) {
2024 32 : writeStatus();
2025 : }
2026 280 : }
2027 :
2028 : }
2029 : }
2030 :
|