LCOV - code coverage report
Current view: top level - bias - MaxEnt.cpp (source / functions) Hit Total Coverage
Test: plumed test coverage Lines: 227 236 96.2 %
Date: 2024-10-11 08:09:47 Functions: 13 14 92.9 %

          Line data    Source code
       1             : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
       2             :    Copyright (c) 2016-2023 The plumed team
       3             :    (see the PEOPLE file at the root of the distribution for a list of names)
       4             : 
       5             :    See http://www.plumed.org for more information.
       6             : 
       7             :    This file is part of plumed, version 2.
       8             : 
       9             :    plumed is free software: you can redistribute it and/or modify
      10             :    it under the terms of the GNU Lesser General Public License as published by
      11             :    the Free Software Foundation, either version 3 of the License, or
      12             :    (at your option) any later version.
      13             : 
      14             :    plumed is distributed in the hope that it will be useful,
      15             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      16             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      17             :    GNU Lesser General Public License for more details.
      18             : 
      19             :    You should have received a copy of the GNU Lesser General Public License
      20             :    along with plumed.  If not, see <http://www.gnu.org/licenses/>.
      21             : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
      22             : #include "Bias.h"
      23             : #include "core/PlumedMain.h"
      24             : #include "core/Atoms.h"
      25             : #include "core/ActionRegister.h"
      26             : #include "core/ActionWithValue.h"
      27             : #include "tools/Communicator.h"
      28             : #include "tools/File.h"
      29             : 
      30             : // The original implementation of this method was contributed
      31             : // by Andrea Cesari (andreacesari90@gmail.com).
      32             : // Copyright has been then transferred to PLUMED developers
      33             : // (see https://github.com/plumed/plumed2/blob/master/.github/CONTRIBUTING.md)
      34             : 
      35             : namespace PLMD {
      36             : namespace bias {
      37             : 
      38             : //+PLUMEDOC BIAS MAXENT
      39             : /*
      40             : Add a linear biasing potential on one or more variables \f$f_{i}\left(\boldsymbol{x}\right)\f$ satisfying the maximum entropy principle as proposed in Ref. \cite cesari2016maxent .
      41             : 
      42             : \warning
      43             :     Notice that syntax is still under revision and might change
      44             : 
      45             : The resulting biasing potential is given by:
      46             : \f[
      47             :   V_{BIAS}(\boldsymbol{x},t)=K_{B}T\sum_{i=1}^{\#arguments}f_{i}(\boldsymbol{x},t)\lambda_{i}(t)
      48             : \f]
      49             : Lagrangian multipliers \f$ \lambda_{i}\f$ are updated, every PACE steps, according to the following update rule:
      50             : \f[
      51             : \lambda_{i}=\lambda_{i}+\frac{k_{i}}{1+\frac{t}{\tau_{i}}}\left(f_{exp,i}+\xi_{i}\lambda_{i}-f_{i}(\boldsymbol{x})\right)
      52             : \f]
      53             : \f$k\f$ set the initial value of the learning rate and its units are \f$[observable]^{-2}ps^{-1}\f$. This can be set with the keyword KAPPA.
      54             : The number of components for any KAPPA vector must be equal to the number of arguments of the action.
      55             : 
      56             : Variable \f$ \xi_{i}(\lambda) \f$ is related to the chosen prior to model experimental errors. If a GAUSSIAN prior is used then:
      57             : \f[
      58             : \xi_{i}(\lambda)=-\lambda_{i}\sigma^{2}
      59             : \f]
      60             : where \f$ \sigma \f$ is the typical expected error on the observable \f$ f_i\f$.
      61             : For a LAPLACE prior:
      62             : \f[
      63             : \xi_{i}(\lambda)=-\frac{\lambda_{i}\sigma^{2}}{1-\frac{\lambda^{2}\sigma^{2}}{2}}
      64             : 
      65             : \f]
      66             : The value of \f$ \xi(\lambda,t)\f$ is written in output as a component named: argument name followed by the string _error.
      67             : Setting \f$ \sigma =0\f$ is equivalent to enforce a pure Maximum Entropy restraint without any noise modelling.
      68             : This method can be also used to enforce inequality restraint as shown in following examples.
      69             : 
      70             : Notice that a similar method is available as \ref EDS, although with different features and using a different optimization algorithm.
      71             : 
      72             : \par Examples
      73             : 
      74             : The following input tells plumed to restrain the distance between atoms 7 and 15
      75             : and the distance between atoms 2 and 19, at different equilibrium
      76             : values, and to print the energy of the restraint.
      77             : Lagrangian multiplier will be printed on a file called restraint.LAGMULT with a stride set by the variable PACE to 200ps.
      78             : Moreover plumed will compute the average of each Lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers.
      79             : \plumedfile
      80             : DISTANCE ATOMS=7,15 LABEL=d1
      81             : DISTANCE ATOMS=2,19 LABEL=d2
      82             : MAXENT ...
      83             : ARG=d1,d2
      84             : TYPE=EQUAL
      85             : AT=0.2,0.5
      86             : KAPPA=35000.0,35000.0
      87             : TAU=0.02,0.02
      88             : PACE=200
      89             : TSTART=100
      90             : TEND=500
      91             : LABEL=restraint
      92             : ... MAXENT
      93             : PRINT ARG=restraint.bias
      94             : \endplumedfile
      95             : Lagrangian multipliers will be printed on a file called restraint.bias
      96             : The following input tells plumed to restrain the distance between atoms 7 and 15
      97             : to be greater than 0.2 and to print the energy of the restraint
      98             : \plumedfile
      99             : DISTANCE ATOMS=7,15 LABEL=d
     100             : MAXENT ARG=d TYPE=INEQUAL> AT=0.02 KAPPA=35000.0 TAU=3 LABEL=restraint
     101             : PRINT ARG=restraint.bias
     102             : \endplumedfile
     103             : 
     104             : (See also \ref DISTANCE and \ref PRINT).
     105             : 
     106             : */
     107             : //+ENDPLUMEDOC
     108             : 
     109             : class MaxEnt : public Bias {
     110             :   std::vector<double> at;
     111             :   std::vector<double> kappa;
     112             :   std::vector<double> lambda;
     113             :   std::vector<double> avgx;
     114             :   std::vector<double> work;
     115             :   std::vector<double> oldlambda;
     116             :   std::vector<double> tau;
     117             :   std::vector<double> avglambda;
     118             :   std::vector<double> avglambda_restart;
     119             :   std::vector<double> expected_eps;
     120             :   std::vector<double> apply_weights;
     121             :   double sigma;
     122             :   double tstart;
     123             :   double tend;
     124             :   double avgstep; //current number of samples over which to compute the average. Check if could be replaced bu getStep()
     125             :   long int pace_;
     126             :   long int stride_;
     127             :   double totalWork;
     128             :   double BetaReweightBias;
     129             :   double simtemp;
     130             :   std::vector<ActionWithValue*> biases;
     131             :   std::string type;
     132             :   std::string error_type;
     133             :   double alpha;
     134             :   double avg_counter;
     135             :   int learn_replica;
     136             :   Value* valueForce2;
     137             :   Value* valueWork;
     138             :   OFile lagmultOfile_;
     139             :   IFile ifile;
     140             :   std::string lagmultfname;
     141             :   std::string ifilesnames;
     142             :   std::string fmt;
     143             :   bool isFirstStep;
     144             :   bool reweight;
     145             :   bool no_broadcast;
     146             :   bool printFirstStep;
     147             :   std::vector<bool> done_average;
     148             :   int myrep,nrep;
     149             : public:
     150             :   explicit MaxEnt(const ActionOptions&);
     151             :   void calculate() override;
     152             :   void update() override;
     153             :   void update_lambda();
     154             :   static void registerKeywords(Keywords& keys);
     155             :   void ReadLagrangians(IFile &ifile);
     156             :   void WriteLagrangians(std::vector<double> &lagmult,OFile &file);
     157             :   double compute_error(const std::string &err_type,double l);
     158             :   double convert_lambda(const std::string &type,double lold);
     159             :   void check_lambda_boundaries(const std::string &err_type,double &l);
     160             : };
     161       10519 : PLUMED_REGISTER_ACTION(MaxEnt,"MAXENT")
     162             : 
     163          51 : void MaxEnt::registerKeywords(Keywords& keys) {
     164          51 :   Bias::registerKeywords(keys);
     165          51 :   componentsAreNotOptional(keys);
     166          51 :   keys.use("ARG");
     167         102 :   keys.add("compulsory","KAPPA","0.0","specifies the initial value for the learning rate");
     168         102 :   keys.add("compulsory","TAU","Specify the dumping time for the learning rate.");
     169         102 :   keys.add("compulsory","TYPE","specify the restraint type. "
     170             :            "EQUAL to restrain the variable at a given equilibrium value "
     171             :            "INEQUAL< to restrain the variable to be smaller than a given value "
     172             :            "INEQUAL> to restrain the variable to be greater than a given value");
     173         102 :   keys.add("optional","ERROR_TYPE","specify the prior on the error to use."
     174             :            "GAUSSIAN: use a Gaussian prior "
     175             :            "LAPLACE: use a Laplace prior");
     176         102 :   keys.add("optional","TSTART","time from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps");
     177         102 :   keys.add("optional","TEND","time in ps where to stop to compute the average of Lagrangian multiplier. From this time until the end of the simulation Lagrangian multipliers are kept fix to the average computed between TSTART and TEND;");
     178         102 :   keys.add("optional","ALPHA","default=1.0; To be used with LAPLACE KEYWORD, allows to choose a prior function proportional to a Gaussian times an exponential function. ALPHA=1 correspond to the LAPLACE prior.");
     179         102 :   keys.add("compulsory","AT","the position of the restraint");
     180         102 :   keys.add("optional","SIGMA","The typical errors expected on observable");
     181         102 :   keys.add("optional","FILE","Lagrangian multipliers output file. The default name is: label name followed by the string .LAGMULT ");
     182         102 :   keys.add("optional","LEARN_REPLICA","In a multiple replica environment specify which is the reference replica. By default replica 0 will be used.");
     183         102 :   keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondence of each replica that will receive the Lagrangian multiplier from the current one.");
     184         102 :   keys.add("optional","PACE","the frequency for Lagrangian multipliers update");
     185         102 :   keys.add("optional","PRINT_STRIDE","stride of Lagrangian multipliers output file. If no STRIDE is passed they are written every time they are updated (PACE).");
     186         102 :   keys.add("optional","FMT","specify format for Lagrangian multipliers files (useful to decrease the number of digits in regtests)");
     187         102 :   keys.addFlag("REWEIGHT",false,"to be used with plumed driver in order to reweight a trajectory a posteriori");
     188         102 :   keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be communicated to other replicas.");
     189         102 :   keys.add("optional","TEMP","the system temperature.  This is required if you are reweighting.");
     190         102 :   keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential");
     191         102 :   keys.addOutputComponent("work","default","the instantaneous value of the work done by the biasing force");
     192         102 :   keys.addOutputComponent("_work","default","the instantaneous value of the work done by the biasing force for each argument. "
     193             :                           "These quantities will named with the arguments of the bias followed by "
     194             :                           "the character string _work.");
     195         102 :   keys.addOutputComponent("_error","default","Instantaneous values of the discrepancy between the observable and the restraint center");
     196         102 :   keys.addOutputComponent("_coupling","default","Instantaneous values of Lagrangian multipliers. They are also written by default in a separate output file.");
     197          51 :   keys.use("RESTART");
     198          51 : }
     199          50 : MaxEnt::MaxEnt(const ActionOptions&ao):
     200             :   PLUMED_BIAS_INIT(ao),
     201         100 :   at(getNumberOfArguments()),
     202          50 :   kappa(getNumberOfArguments(),0.0),
     203          50 :   lambda(getNumberOfArguments(),0.0),
     204          50 :   avgx(getNumberOfArguments(),0.0),
     205          50 :   oldlambda(getNumberOfArguments(),0.0),
     206          50 :   tau(getNumberOfArguments(),getTimeStep()),
     207          50 :   avglambda(getNumberOfArguments(),0.0),
     208          50 :   avglambda_restart(getNumberOfArguments(),0.0),
     209          50 :   expected_eps(getNumberOfArguments(),0.0),
     210          50 :   sigma(0.0),
     211          50 :   pace_(100),
     212          50 :   stride_(100),
     213          50 :   alpha(1.0),
     214          50 :   avg_counter(0.0),
     215          50 :   isFirstStep(true),
     216          50 :   reweight(false),
     217          50 :   no_broadcast(false),
     218          50 :   printFirstStep(true),
     219         150 :   done_average(getNumberOfArguments(),false)
     220             : {
     221          50 :   if(comm.Get_rank()==0) nrep=multi_sim_comm.Get_size();
     222          50 :   if(comm.Get_rank()==0) myrep=multi_sim_comm.Get_rank();
     223          50 :   comm.Bcast(nrep,0);
     224          50 :   comm.Bcast(myrep,0);
     225          50 :   parseFlag("NO_BROADCAST",no_broadcast);
     226             :   //if(no_broadcast){
     227             :   //for(int irep=0;irep<nrep;irep++){
     228             :   //  if(irep!=myrep)
     229             :   //    apply_weights[irep]=0.0;}
     230             :   //}
     231          50 :   avgstep=1.0;
     232          50 :   tstart=-1.0;
     233          50 :   tend=-1.0;
     234          50 :   totalWork=0.0;
     235          50 :   learn_replica=0;
     236             : 
     237          50 :   parse("LEARN_REPLICA",learn_replica);
     238         100 :   parseVector("APPLY_WEIGHTS",apply_weights);
     239          50 :   if(apply_weights.size()==0) apply_weights.resize(nrep,1.0);
     240          50 :   parseVector("KAPPA",kappa);
     241          50 :   parseVector("AT",at);
     242          50 :   parseVector("TAU",tau);
     243         100 :   parse("TYPE",type);
     244             :   error_type="GAUSSIAN";
     245          50 :   parse("ERROR_TYPE",error_type);
     246          50 :   parse("ALPHA",alpha);
     247          50 :   parse("SIGMA",sigma);
     248          50 :   parse("TSTART",tstart);
     249          50 :   if(tstart <0 && tstart != -1.0) error("TSTART should be a positive number");
     250          50 :   parse("TEND",tend);
     251          50 :   if(tend<0 && tend != -1.0) error("TSTART should be a positive number");
     252          50 :   if(tend<tstart) error("TEND should be >= TSTART");
     253          50 :   lagmultfname=getLabel()+".LAGMULT";
     254          50 :   parse("FILE",lagmultfname);
     255          50 :   parse("FMT",fmt);
     256          50 :   parse("PACE",pace_);
     257          50 :   if(pace_<=0 ) error("frequency for Lagrangian multipliers update (PACE) is nonsensical");
     258          50 :   stride_=pace_;  //if no STRIDE is passed, then Lagrangian multipliers willbe printed at each update
     259          50 :   parse("PRINT_STRIDE",stride_);
     260          50 :   if(stride_<=0 ) error("frequency for Lagrangian multipliers printing (STRIDE) is nonsensical");
     261          50 :   simtemp=0.;
     262          50 :   parse("TEMP",simtemp);
     263          50 :   if(simtemp>0) simtemp*=plumed.getAtoms().getKBoltzmann();
     264           0 :   else simtemp=plumed.getAtoms().getKbT();
     265          50 :   parseFlag("REWEIGHT",reweight);
     266          50 :   if(simtemp<=0 && reweight) error("Set the temperature (TEMP) if you want to do reweighting.");
     267             : 
     268          50 :   checkRead();
     269             : 
     270          50 :   log.printf("  at");
     271         532 :   for(unsigned i=0; i<at.size(); i++) log.printf(" %f",at[i]);
     272          50 :   log.printf("\n");
     273          50 :   log.printf("  with initial learning rate for optimization of");
     274         532 :   for(unsigned i=0; i<kappa.size(); i++) log.printf(" %f",kappa[i]);
     275          50 :   log.printf("\n");
     276          50 :   log.printf("Dumping times for the learning rates are (ps): ");
     277         532 :   for(unsigned i=0; i<tau.size(); i++) log.printf(" %f",tau[i]);
     278          50 :   log.printf("\n");
     279          50 :   log.printf("Lagrangian multipliers are updated every %ld steps (PACE)\n",pace_);
     280          50 :   log.printf("Lagrangian multipliers output file %s\n",lagmultfname.c_str());
     281          50 :   log.printf("Lagrangian multipliers are written every %ld steps (PRINT_STRIDE)\n",stride_);
     282          50 :   if(fmt.length()>0)
     283          50 :     log.printf("The format for real number in Lagrangian multipliers file is %s\n",fmt.c_str());
     284          50 :   if(tstart >-1.0 && tend>-1.0)
     285          14 :     log.printf("Lagrangian multipliers are averaged from %lf ps to %lf ps\n",tstart,tend);
     286          50 :   if(no_broadcast)
     287           0 :     log.printf("Using NO_BROADCAST options. Lagrangian multipliers will not be comunicated to other replicas.\n");
     288             :   //for(int irep=0;irep<nrep;irep++){
     289             :   //  if(apply_weights[irep]!=0)
     290             :   //    log.printf("%d",irep);
     291             :   //  }
     292         100 :   addComponent("force2"); componentIsNotPeriodic("force2");
     293         100 :   addComponent("work"); componentIsNotPeriodic("work");
     294          50 :   valueForce2=getPntrToComponent("force2");
     295          50 :   valueWork=getPntrToComponent("work");
     296             : 
     297             :   std::string comp;
     298         532 :   for(unsigned i=0; i< getNumberOfArguments() ; i++) {
     299         482 :     comp=getPntrToArgument(i)->getName()+"_coupling";
     300         482 :     addComponent(comp); componentIsNotPeriodic(comp);
     301         482 :     comp=getPntrToArgument(i)->getName()+"_work";
     302         482 :     addComponent(comp); componentIsNotPeriodic(comp);
     303         482 :     work.push_back(0.); // initialize the work value
     304         482 :     comp=getPntrToArgument(i)->getName()+"_error";
     305         482 :     addComponent(comp); componentIsNotPeriodic(comp);
     306             :   }
     307             :   std::string fname;
     308             :   fname=lagmultfname;
     309          50 :   ifile.link(*this);
     310          50 :   if(ifile.FileExist(fname)) {
     311          37 :     ifile.open(fname);
     312          37 :     if(getRestart()) {
     313          37 :       log.printf("  Restarting from: %s\n",fname.c_str());
     314          37 :       ReadLagrangians(ifile);
     315          37 :       printFirstStep=false;
     316             :     }
     317          37 :     ifile.reset(false);
     318             :   }
     319             : 
     320          50 :   lagmultOfile_.link(*this);
     321          50 :   lagmultOfile_.open(fname);
     322         100 :   if(fmt.length()>0) {fmt=" "+fmt; lagmultOfile_.fmtField(fmt);}
     323          50 : }
     324             : ////MEMBER FUNCTIONS
     325          37 : void MaxEnt::ReadLagrangians(IFile &ifile)
     326             : {
     327             :   double dummy;
     328         888 :   while(ifile.scanField("time",dummy)) {
     329        4708 :     for(unsigned j=0; j<getNumberOfArguments(); ++j) {
     330        4301 :       ifile.scanField(getPntrToArgument(j)->getName()+"_coupling",lambda[j]);
     331        4301 :       if(dummy>=tstart && dummy <=tend)
     332          42 :         avglambda[j]+=lambda[j];
     333        4301 :       if(dummy>=tend) {
     334        4231 :         avglambda[j]=lambda[j];
     335             :         done_average[j]=true;
     336             :       }
     337             :     }
     338         407 :     if(dummy>=tstart && dummy <=tend)
     339           6 :       avg_counter++;
     340         407 :     ifile.scanField();
     341             :   }
     342          37 : }
     343         550 : void MaxEnt::WriteLagrangians(std::vector<double> &lagmult,OFile &file) {
     344         550 :   if(printFirstStep) {
     345         143 :     unsigned ncv=getNumberOfArguments();
     346         143 :     file.printField("time",getTimeStep()*getStep());
     347        1144 :     for(unsigned i=0; i<ncv; ++i)
     348        2002 :       file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
     349         143 :     file.printField();
     350             :   } else {
     351         407 :     if(!isFirstStep) {
     352         370 :       unsigned ncv=getNumberOfArguments();
     353         370 :       file.printField("time",getTimeStep()*getStep());
     354        4280 :       for(unsigned i=0; i<ncv; ++i)
     355        7820 :         file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
     356         370 :       file.printField();
     357             :     }
     358             :   }
     359         550 : }
     360        5302 : double MaxEnt::compute_error(const std::string &err_type,double l) {
     361        5302 :   double sigma2=std::pow(sigma,2.0);
     362        5302 :   double l2=convert_lambda(type,l);
     363             :   double return_error=0;
     364        5302 :   if(err_type=="GAUSSIAN" && sigma!=0.0)
     365           0 :     return_error=-l2*sigma2;
     366             :   else {
     367        5302 :     if(err_type=="LAPLACE" && sigma!=0) {
     368        5302 :       return_error=-l2*sigma2/(1.0-l2*l2*sigma2/(alpha+1));
     369             :     }
     370             :   }
     371        5302 :   return return_error;
     372             : }
     373      119118 : double MaxEnt::convert_lambda(const std::string &type,double lold) {
     374             :   double return_lambda=0;
     375      119118 :   if(type=="EQUAL")
     376             :     return_lambda=lold;
     377             :   else {
     378        5302 :     if(type=="INEQUAL>") {
     379           0 :       if(lold>0.0)
     380             :         return_lambda=0.0;
     381             :       else
     382             :         return_lambda=lold;
     383             :     }
     384             :     else {
     385        5302 :       if(type=="INEQUAL<") {
     386           0 :         if(lold<0.0)
     387             :           return_lambda=0.0;
     388             :         else
     389             :           return_lambda=lold;
     390             :       }
     391             :     }
     392             :   }
     393      119118 :   return return_lambda;
     394             : }
     395        5302 : void MaxEnt::check_lambda_boundaries(const std::string &err_type,double &l) {
     396        5302 :   if(err_type=="LAPLACE" && sigma !=0 ) {
     397        5302 :     double l2=convert_lambda(err_type,l);
     398        5302 :     if(l2 <-(std::sqrt(alpha+1)/sigma-0.01)) {
     399           0 :       l=-(std::sqrt(alpha+1)/sigma-0.01);
     400           0 :       log.printf("Lambda exceeded the allowed range\n");
     401             :     }
     402        5302 :     if(l2>(std::sqrt(alpha+1)/sigma-0.01)) {
     403           0 :       l=std::sqrt(alpha+1)/sigma-0.01;
     404           0 :       log.printf("Lambda exceeded the allowed range\n");
     405             :     }
     406             :   }
     407        5302 : }
     408             : 
     409         550 : void MaxEnt::update_lambda() {
     410             : 
     411             :   double totalWork_=0.0;
     412         550 :   const double time=getTime();
     413         550 :   const double step=getStep();
     414         550 :   double KbT=simtemp;
     415             :   double learning_rate;
     416         550 :   if(reweight)
     417         396 :     BetaReweightBias=plumed.getBias()/KbT;
     418             :   else
     419         154 :     BetaReweightBias=0.0;
     420             : 
     421        5852 :   for(unsigned i=0; i<getNumberOfArguments(); ++i) {
     422        5302 :     const double k=kappa[i];
     423        5302 :     double cv=(getArgument(i)+compute_error(error_type,lambda[i])-at[i]);
     424        5302 :     if(reweight)
     425        4224 :       learning_rate=1.0*k/(1+step/tau[i]);
     426             :     else
     427        1078 :       learning_rate=1.0*k/(1+time/tau[i]);
     428        5302 :     lambda[i]+=learning_rate*cv*std::exp(-BetaReweightBias); //update Lagrangian multipliers and reweight them if REWEIGHT is set
     429        5302 :     check_lambda_boundaries(error_type,lambda[i]);      //check that Lagrangians multipliers not exceed the allowed range
     430        5890 :     if(time>=tstart && time <=tend && !done_average[i]) {
     431         546 :       avglambda[i]+=convert_lambda(type,lambda[i]); //compute the average of Lagrangian multipliers over the required time window
     432             :     }
     433        5302 :     if(time>=tend && tend >=0) { //setting tend<0 will disable this feature
     434          98 :       if(!done_average[i]) {
     435          91 :         avglambda[i]=avglambda[i]/avg_counter;
     436             :         done_average[i]=true;
     437          91 :         lambda[i]=avglambda[i];
     438             :       }
     439             :       else
     440           7 :         lambda[i]=avglambda[i]; //keep Lagrangian multipliers fixed to the previously computed average.
     441             :     }
     442        5302 :     work[i]+=(convert_lambda(type,lambda[i])-oldlambda[i])*getArgument(i); //compute the work performed in updating lambda
     443        5302 :     totalWork_+=work[i];
     444        5302 :     totalWork=totalWork_;
     445        5302 :     oldlambda[i]=convert_lambda(type,lambda[i]);
     446             :   };
     447         550 :   if(time>=tstart && time <=tend)
     448          84 :     avg_counter++;
     449         550 : }
     450             : 
     451        5050 : void MaxEnt::calculate() {
     452             :   double totf2=0.0;
     453             :   double ene=0.0;
     454        5050 :   double KbT=simtemp;
     455       53732 :   for(unsigned i=0; i<getNumberOfArguments(); ++i) {
     456       97364 :     getPntrToComponent(getPntrToArgument(i)->getName()+"_error")->set(expected_eps[i]);
     457       48682 :     getPntrToComponent(getPntrToArgument(i)->getName()+"_work")->set(work[i]);
     458       48682 :     valueWork->set(totalWork);
     459       48682 :     getPntrToComponent(getPntrToArgument(i)->getName()+"_coupling")->set(lambda[i]);
     460       48682 :     const double f=-KbT*convert_lambda(type,lambda[i])*apply_weights[myrep];
     461       48682 :     totf2+=f*f;
     462       48682 :     ene+=KbT*convert_lambda(type,lambda[i])*getArgument(i)*apply_weights[myrep];
     463       48682 :     setOutputForce(i,f);
     464             :   }
     465             :   setBias(ene);
     466        5050 :   valueForce2->set(totf2);
     467        5050 : }
     468             : 
     469        5050 : void MaxEnt::update() {
     470             : 
     471        5050 :   if(getStep()%stride_ == 0)
     472         550 :     WriteLagrangians(lambda,lagmultOfile_);
     473        5050 :   if(getStep()%pace_ == 0) {
     474         550 :     update_lambda();
     475         550 :     if(!no_broadcast) {
     476         550 :       if(comm.Get_rank()==0) //Comunicate Lagrangian multipliers from reference replica to higher ones
     477         462 :         multi_sim_comm.Bcast(lambda,learn_replica);
     478             :     }
     479         550 :     comm.Bcast(lambda,0);
     480             :   }
     481        5050 :   isFirstStep=false;
     482        5050 : }
     483             : 
     484             : }
     485             : 
     486             : }

Generated by: LCOV version 1.15