LCOV - code coverage report
Current view: top level - bias - MaxEnt.cpp (source / functions) Hit Total Coverage
Test: plumed test coverage Lines: 226 232 97.4 %
Date: 2024-10-18 14:00:25 Functions: 10 11 90.9 %

          Line data    Source code
       1             : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
       2             :    Copyright (c) 2016-2023 The plumed team
       3             :    (see the PEOPLE file at the root of the distribution for a list of names)
       4             : 
       5             :    See http://www.plumed.org for more information.
       6             : 
       7             :    This file is part of plumed, version 2.
       8             : 
       9             :    plumed is free software: you can redistribute it and/or modify
      10             :    it under the terms of the GNU Lesser General Public License as published by
      11             :    the Free Software Foundation, either version 3 of the License, or
      12             :    (at your option) any later version.
      13             : 
      14             :    plumed is distributed in the hope that it will be useful,
      15             :    but WITHOUT ANY WARRANTY; without even the implied warranty of
      16             :    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      17             :    GNU Lesser General Public License for more details.
      18             : 
      19             :    You should have received a copy of the GNU Lesser General Public License
      20             :    along with plumed.  If not, see <http://www.gnu.org/licenses/>.
      21             : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
      22             : #include "Bias.h"
      23             : #include "core/PlumedMain.h"
      24             : #include "core/ActionRegister.h"
      25             : #include "core/ActionWithValue.h"
      26             : #include "tools/Communicator.h"
      27             : #include "tools/File.h"
      28             : 
      29             : // The original implementation of this method was contributed
      30             : // by Andrea Cesari (andreacesari90@gmail.com).
      31             : // Copyright has been then transferred to PLUMED developers
      32             : // (see https://github.com/plumed/plumed2/blob/master/.github/CONTRIBUTING.md)
      33             : 
      34             : namespace PLMD {
      35             : namespace bias {
      36             : 
      37             : //+PLUMEDOC BIAS MAXENT
      38             : /*
      39             : Add a linear biasing potential on one or more variables that satisfies a maximum entropy principle.
      40             : 
      41             : Add a linear biasing potential on one or more variables \f$f_{i}\left(\boldsymbol{x}\right)\f$ satisfying the maximum entropy principle as proposed in Ref. \cite cesari2016maxent .
      42             : 
      43             : \warning
      44             :     Notice that syntax is still under revision and might change
      45             : 
      46             : The resulting biasing potential is given by:
      47             : \f[
      48             :   V_{BIAS}(\boldsymbol{x},t)=K_{B}T\sum_{i=1}^{\#arguments}f_{i}(\boldsymbol{x},t)\lambda_{i}(t)
      49             : \f]
      50             : Lagrangian multipliers \f$ \lambda_{i}\f$ are updated, every PACE steps, according to the following update rule:
      51             : \f[
      52             : \lambda_{i}=\lambda_{i}+\frac{k_{i}}{1+\frac{t}{\tau_{i}}}\left(f_{exp,i}+\xi_{i}\lambda_{i}-f_{i}(\boldsymbol{x})\right)
      53             : \f]
      54             : \f$k\f$ set the initial value of the learning rate and its units are \f$[observable]^{-2}ps^{-1}\f$. This can be set with the keyword KAPPA.
      55             : The number of components for any KAPPA vector must be equal to the number of arguments of the action.
      56             : 
      57             : Variable \f$ \xi_{i}(\lambda) \f$ is related to the chosen prior to model experimental errors. If a GAUSSIAN prior is used then:
      58             : \f[
      59             : \xi_{i}(\lambda)=-\lambda_{i}\sigma^{2}
      60             : \f]
      61             : where \f$ \sigma \f$ is the typical expected error on the observable \f$ f_i\f$.
      62             : For a LAPLACE prior:
      63             : \f[
      64             : \xi_{i}(\lambda)=-\frac{\lambda_{i}\sigma^{2}}{1-\frac{\lambda^{2}\sigma^{2}}{2}}
      65             : 
      66             : \f]
      67             : The value of \f$ \xi(\lambda,t)\f$ is written in output as a component named: argument name followed by the string _error.
      68             : Setting \f$ \sigma =0\f$ is equivalent to enforce a pure Maximum Entropy restraint without any noise modelling.
      69             : This method can be also used to enforce inequality restraint as shown in following examples.
      70             : 
      71             : Notice that a similar method is available as \ref EDS, although with different features and using a different optimization algorithm.
      72             : 
      73             : \par Examples
      74             : 
      75             : The following input tells plumed to restrain the distance between atoms 7 and 15
      76             : and the distance between atoms 2 and 19, at different equilibrium
      77             : values, and to print the energy of the restraint.
      78             : Lagrangian multiplier will be printed on a file called restraint.LAGMULT with a stride set by the variable PACE to 200ps.
      79             : Moreover plumed will compute the average of each Lagrangian multiplier in the window [TSTART,TEND] and use that to continue the simulations with fixed Lagrangian multipliers.
      80             : \plumedfile
      81             : DISTANCE ATOMS=7,15 LABEL=d1
      82             : DISTANCE ATOMS=2,19 LABEL=d2
      83             : MAXENT ...
      84             : ARG=d1,d2
      85             : TYPE=EQUAL
      86             : AT=0.2,0.5
      87             : KAPPA=35000.0,35000.0
      88             : TAU=0.02,0.02
      89             : PACE=200
      90             : TSTART=100
      91             : TEND=500
      92             : LABEL=restraint
      93             : ... MAXENT
      94             : PRINT ARG=restraint.bias
      95             : \endplumedfile
      96             : Lagrangian multipliers will be printed on a file called restraint.bias
      97             : The following input tells plumed to restrain the distance between atoms 7 and 15
      98             : to be greater than 0.2 and to print the energy of the restraint
      99             : \plumedfile
     100             : DISTANCE ATOMS=7,15 LABEL=d
     101             : MAXENT ARG=d TYPE=INEQUAL> AT=0.02 KAPPA=35000.0 TAU=3 LABEL=restraint
     102             : PRINT ARG=restraint.bias
     103             : \endplumedfile
     104             : 
     105             : (See also \ref DISTANCE and \ref PRINT).
     106             : 
     107             : */
     108             : //+ENDPLUMEDOC
     109             : 
     110             : class MaxEnt : public Bias {
     111             :   std::vector<double> at;
     112             :   std::vector<double> kappa;
     113             :   std::vector<double> lambda;
     114             :   std::vector<double> avgx;
     115             :   std::vector<double> work;
     116             :   std::vector<double> oldlambda;
     117             :   std::vector<double> tau;
     118             :   std::vector<double> avglambda;
     119             :   std::vector<double> avglambda_restart;
     120             :   std::vector<double> expected_eps;
     121             :   std::vector<double> apply_weights;
     122             :   double sigma;
     123             :   double tstart;
     124             :   double tend;
     125             :   double avgstep; //current number of samples over which to compute the average. Check if could be replaced bu getStep()
     126             :   long long int pace_;
     127             :   long long int stride_;
     128             :   double totalWork;
     129             :   double BetaReweightBias;
     130             :   double simtemp;
     131             :   std::vector<ActionWithValue*> biases;
     132             :   std::string type;
     133             :   std::string error_type;
     134             :   double alpha;
     135             :   double avg_counter;
     136             :   int learn_replica;
     137             :   Value* valueForce2;
     138             :   Value* valueWork;
     139             :   OFile lagmultOfile_;
     140             :   IFile ifile;
     141             :   std::string lagmultfname;
     142             :   std::string ifilesnames;
     143             :   std::string fmt;
     144             :   bool isFirstStep;
     145             :   bool reweight;
     146             :   bool no_broadcast;
     147             :   bool printFirstStep;
     148             :   std::vector<bool> done_average;
     149             :   int myrep,nrep;
     150             : public:
     151             :   explicit MaxEnt(const ActionOptions&);
     152             :   void calculate() override;
     153             :   void update() override;
     154             :   void update_lambda();
     155             :   static void registerKeywords(Keywords& keys);
     156             :   void ReadLagrangians(IFile &ifile);
     157             :   void WriteLagrangians(std::vector<double> &lagmult,OFile &file);
     158             :   double compute_error(const std::string &err_type,double l);
     159             :   double convert_lambda(const std::string &type,double lold);
     160             :   void check_lambda_boundaries(const std::string &err_type,double &l);
     161             : };
     162             : PLUMED_REGISTER_ACTION(MaxEnt,"MAXENT")
     163             : 
     164          54 : void MaxEnt::registerKeywords(Keywords& keys) {
     165          54 :   Bias::registerKeywords(keys);
     166          54 :   keys.use("ARG");
     167         108 :   keys.add("compulsory","KAPPA","0.0","specifies the initial value for the learning rate");
     168         108 :   keys.add("compulsory","TAU","Specify the dumping time for the learning rate.");
     169         108 :   keys.add("compulsory","TYPE","specify the restraint type. "
     170             :            "EQUAL to restrain the variable at a given equilibrium value "
     171             :            "INEQUAL< to restrain the variable to be smaller than a given value "
     172             :            "INEQUAL> to restrain the variable to be greater than a given value");
     173         108 :   keys.add("optional","ERROR_TYPE","specify the prior on the error to use."
     174             :            "GAUSSIAN: use a Gaussian prior "
     175             :            "LAPLACE: use a Laplace prior");
     176         108 :   keys.add("optional","TSTART","time from where to start averaging the Lagrangian multiplier. By default no average is computed, hence lambda is updated every PACE steps");
     177         108 :   keys.add("optional","TEND","time in ps where to stop to compute the average of Lagrangian multiplier. From this time until the end of the simulation Lagrangian multipliers are kept fix to the average computed between TSTART and TEND;");
     178         108 :   keys.add("optional","ALPHA","default=1.0; To be used with LAPLACE KEYWORD, allows to choose a prior function proportional to a Gaussian times an exponential function. ALPHA=1 correspond to the LAPLACE prior.");
     179         108 :   keys.add("compulsory","AT","the position of the restraint");
     180         108 :   keys.add("optional","SIGMA","The typical errors expected on observable");
     181         108 :   keys.add("optional","FILE","Lagrangian multipliers output file. The default name is: label name followed by the string .LAGMULT ");
     182         108 :   keys.add("optional","LEARN_REPLICA","In a multiple replica environment specify which is the reference replica. By default replica 0 will be used.");
     183         108 :   keys.add("optional","APPLY_WEIGHTS","Vector of weights containing 1 in correspondence of each replica that will receive the Lagrangian multiplier from the current one.");
     184         108 :   keys.add("optional","PACE","the frequency for Lagrangian multipliers update");
     185         108 :   keys.add("optional","PRINT_STRIDE","stride of Lagrangian multipliers output file. If no STRIDE is passed they are written every time they are updated (PACE).");
     186         108 :   keys.add("optional","FMT","specify format for Lagrangian multipliers files (useful to decrease the number of digits in regtests)");
     187         108 :   keys.addFlag("REWEIGHT",false,"to be used with plumed driver in order to reweight a trajectory a posteriori");
     188         108 :   keys.addFlag("NO_BROADCAST",false,"If active will avoid Lagrangian multipliers to be communicated to other replicas.");
     189         108 :   keys.add("optional","TEMP","the system temperature.  This is required if you are reweighting.");
     190         108 :   keys.addOutputComponent("force2","default","the instantaneous value of the squared force due to this bias potential");
     191         108 :   keys.addOutputComponent("work","default","the instantaneous value of the work done by the biasing force");
     192         108 :   keys.addOutputComponent("_work","default","the instantaneous value of the work done by the biasing force for each argument. "
     193             :                           "These quantities will named with the arguments of the bias followed by "
     194             :                           "the character string _work.");
     195         108 :   keys.addOutputComponent("_error","default","Instantaneous values of the discrepancy between the observable and the restraint center");
     196         108 :   keys.addOutputComponent("_coupling","default","Instantaneous values of Lagrangian multipliers. They are also written by default in a separate output file.");
     197          54 :   keys.use("RESTART");
     198          54 : }
     199          52 : MaxEnt::MaxEnt(const ActionOptions&ao):
     200             :   PLUMED_BIAS_INIT(ao),
     201         104 :   at(getNumberOfArguments()),
     202          52 :   kappa(getNumberOfArguments(),0.0),
     203          52 :   lambda(getNumberOfArguments(),0.0),
     204          52 :   avgx(getNumberOfArguments(),0.0),
     205          52 :   oldlambda(getNumberOfArguments(),0.0),
     206          52 :   tau(getNumberOfArguments(),getTimeStep()),
     207          52 :   avglambda(getNumberOfArguments(),0.0),
     208          52 :   avglambda_restart(getNumberOfArguments(),0.0),
     209          52 :   expected_eps(getNumberOfArguments(),0.0),
     210          52 :   sigma(0.0),
     211          52 :   pace_(100),
     212          52 :   stride_(100),
     213          52 :   alpha(1.0),
     214          52 :   avg_counter(0.0),
     215          52 :   isFirstStep(true),
     216          52 :   reweight(false),
     217          52 :   no_broadcast(false),
     218          52 :   printFirstStep(true),
     219         156 :   done_average(getNumberOfArguments(),false)
     220             : {
     221          52 :   if(comm.Get_rank()==0) nrep=multi_sim_comm.Get_size();
     222          52 :   if(comm.Get_rank()==0) myrep=multi_sim_comm.Get_rank();
     223          52 :   comm.Bcast(nrep,0);
     224          52 :   comm.Bcast(myrep,0);
     225          52 :   parseFlag("NO_BROADCAST",no_broadcast);
     226             :   //if(no_broadcast){
     227             :   //for(int irep=0;irep<nrep;irep++){
     228             :   //  if(irep!=myrep)
     229             :   //    apply_weights[irep]=0.0;}
     230             :   //}
     231          52 :   avgstep=1.0;
     232          52 :   tstart=-1.0;
     233          52 :   tend=-1.0;
     234          52 :   totalWork=0.0;
     235          52 :   learn_replica=0;
     236             : 
     237          52 :   parse("LEARN_REPLICA",learn_replica);
     238         104 :   parseVector("APPLY_WEIGHTS",apply_weights);
     239          52 :   if(apply_weights.size()==0) apply_weights.resize(nrep,1.0);
     240          52 :   parseVector("KAPPA",kappa);
     241          52 :   parseVector("AT",at);
     242          52 :   parseVector("TAU",tau);
     243         104 :   parse("TYPE",type);
     244             :   error_type="GAUSSIAN";
     245          52 :   parse("ERROR_TYPE",error_type);
     246          52 :   parse("ALPHA",alpha);
     247          52 :   parse("SIGMA",sigma);
     248          52 :   parse("TSTART",tstart);
     249          52 :   if(tstart <0 && tstart != -1.0) error("TSTART should be a positive number");
     250          52 :   parse("TEND",tend);
     251          52 :   if(tend<0 && tend != -1.0) error("TSTART should be a positive number");
     252          52 :   if(tend<tstart) error("TEND should be >= TSTART");
     253          52 :   lagmultfname=getLabel()+".LAGMULT";
     254          52 :   parse("FILE",lagmultfname);
     255          52 :   parse("FMT",fmt);
     256          52 :   parse("PACE",pace_);
     257          52 :   if(pace_<=0 ) error("frequency for Lagrangian multipliers update (PACE) is nonsensical");
     258          52 :   stride_=pace_;  //if no STRIDE is passed, then Lagrangian multipliers willbe printed at each update
     259          52 :   parse("PRINT_STRIDE",stride_);
     260          52 :   if(stride_<=0 ) error("frequency for Lagrangian multipliers printing (STRIDE) is nonsensical");
     261          52 :   simtemp=getkBT();
     262          52 :   parseFlag("REWEIGHT",reweight);
     263          52 :   if(simtemp<=0 && reweight) error("Set the temperature (TEMP) if you want to do reweighting.");
     264             : 
     265          52 :   checkRead();
     266             : 
     267          52 :   log.printf("  at");
     268         548 :   for(unsigned i=0; i<at.size(); i++) log.printf(" %f",at[i]);
     269          52 :   log.printf("\n");
     270          52 :   log.printf("  with initial learning rate for optimization of");
     271         548 :   for(unsigned i=0; i<kappa.size(); i++) log.printf(" %f",kappa[i]);
     272          52 :   log.printf("\n");
     273          52 :   log.printf("Dumping times for the learning rates are (ps): ");
     274         548 :   for(unsigned i=0; i<tau.size(); i++) log.printf(" %f",tau[i]);
     275          52 :   log.printf("\n");
     276          52 :   log.printf("Lagrangian multipliers are updated every %lld steps (PACE)\n",pace_);
     277          52 :   log.printf("Lagrangian multipliers output file %s\n",lagmultfname.c_str());
     278          52 :   log.printf("Lagrangian multipliers are written every %lld steps (PRINT_STRIDE)\n",stride_);
     279          52 :   if(fmt.length()>0)
     280          52 :     log.printf("The format for real number in Lagrangian multipliers file is %s\n",fmt.c_str());
     281          52 :   if(tstart >-1.0 && tend>-1.0)
     282          16 :     log.printf("Lagrangian multipliers are averaged from %lf ps to %lf ps\n",tstart,tend);
     283          52 :   if(no_broadcast)
     284           0 :     log.printf("Using NO_BROADCAST options. Lagrangian multipliers will not be comunicated to other replicas.\n");
     285             :   //for(int irep=0;irep<nrep;irep++){
     286             :   //  if(apply_weights[irep]!=0)
     287             :   //    log.printf("%d",irep);
     288             :   //  }
     289         156 :   addComponent("force2"); componentIsNotPeriodic("force2");
     290         104 :   addComponent("work"); componentIsNotPeriodic("work");
     291          52 :   valueForce2=getPntrToComponent("force2");
     292          52 :   valueWork=getPntrToComponent("work");
     293             : 
     294             :   std::string comp;
     295         548 :   for(unsigned i=0; i< getNumberOfArguments() ; i++) {
     296         992 :     comp=getPntrToArgument(i)->getName()+"_coupling";
     297         992 :     addComponent(comp); componentIsNotPeriodic(comp);
     298         992 :     comp=getPntrToArgument(i)->getName()+"_work";
     299         992 :     addComponent(comp); componentIsNotPeriodic(comp);
     300         496 :     work.push_back(0.); // initialize the work value
     301         992 :     comp=getPntrToArgument(i)->getName()+"_error";
     302         992 :     addComponent(comp); componentIsNotPeriodic(comp);
     303             :   }
     304             :   std::string fname;
     305             :   fname=lagmultfname;
     306          52 :   ifile.link(*this);
     307          52 :   if(ifile.FileExist(fname)) {
     308          37 :     ifile.open(fname);
     309          37 :     if(getRestart()) {
     310          37 :       log.printf("  Restarting from: %s\n",fname.c_str());
     311          37 :       ReadLagrangians(ifile);
     312          37 :       printFirstStep=false;
     313             :     }
     314          37 :     ifile.reset(false);
     315             :   }
     316             : 
     317          52 :   lagmultOfile_.link(*this);
     318          52 :   lagmultOfile_.open(fname);
     319         104 :   if(fmt.length()>0) {fmt=" "+fmt; lagmultOfile_.fmtField(fmt);}
     320          52 : }
     321             : ////MEMBER FUNCTIONS
     322          37 : void MaxEnt::ReadLagrangians(IFile &ifile)
     323             : {
     324             :   double dummy;
     325         888 :   while(ifile.scanField("time",dummy)) {
     326        4708 :     for(unsigned j=0; j<getNumberOfArguments(); ++j) {
     327        4301 :       ifile.scanField(getPntrToArgument(j)->getName()+"_coupling",lambda[j]);
     328        4301 :       if(dummy>=tstart && dummy <=tend)
     329          42 :         avglambda[j]+=lambda[j];
     330        4301 :       if(dummy>=tend) {
     331        4231 :         avglambda[j]=lambda[j];
     332             :         done_average[j]=true;
     333             :       }
     334             :     }
     335         407 :     if(dummy>=tstart && dummy <=tend)
     336           6 :       avg_counter++;
     337         407 :     ifile.scanField();
     338             :   }
     339          37 : }
     340         572 : void MaxEnt::WriteLagrangians(std::vector<double> &lagmult,OFile &file) {
     341         572 :   if(printFirstStep) {
     342         165 :     unsigned ncv=getNumberOfArguments();
     343         165 :     file.printField("time",getTimeStep()*getStep());
     344        1320 :     for(unsigned i=0; i<ncv; ++i)
     345        2310 :       file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
     346         165 :     file.printField();
     347             :   } else {
     348         407 :     if(!isFirstStep) {
     349         370 :       unsigned ncv=getNumberOfArguments();
     350         370 :       file.printField("time",getTimeStep()*getStep());
     351        4280 :       for(unsigned i=0; i<ncv; ++i)
     352        7820 :         file.printField(getPntrToArgument(i)->getName()+"_coupling",lagmult[i]);
     353         370 :       file.printField();
     354             :     }
     355             :   }
     356         572 : }
     357        5456 : double MaxEnt::compute_error(const std::string &err_type,double l) {
     358        5456 :   double sigma2=std::pow(sigma,2.0);
     359        5456 :   double l2=convert_lambda(type,l);
     360             :   double return_error=0;
     361        5456 :   if(err_type=="GAUSSIAN" && sigma!=0.0)
     362           0 :     return_error=-l2*sigma2;
     363             :   else {
     364        5456 :     if(err_type=="LAPLACE" && sigma!=0) {
     365        5456 :       return_error=-l2*sigma2/(1.0-l2*l2*sigma2/(alpha+1));
     366             :     }
     367             :   }
     368        5456 :   return return_error;
     369             : }
     370      122646 : double MaxEnt::convert_lambda(const std::string &type,double lold) {
     371             :   double return_lambda=0;
     372      122646 :   if(type=="EQUAL")
     373             :     return_lambda=lold;
     374             :   else {
     375        8830 :     if(type=="INEQUAL>") {
     376        1687 :       if(lold>0.0)
     377             :         return_lambda=0.0;
     378             :       else
     379             :         return_lambda=lold;
     380             :     }
     381             :     else {
     382        7143 :       if(type=="INEQUAL<") {
     383        1687 :         if(lold<0.0)
     384             :           return_lambda=0.0;
     385             :         else
     386             :           return_lambda=lold;
     387             :       }
     388             :     }
     389             :   }
     390      122646 :   return return_lambda;
     391             : }
     392        5456 : void MaxEnt::check_lambda_boundaries(const std::string &err_type,double &l) {
     393        5456 :   if(err_type=="LAPLACE" && sigma !=0 ) {
     394        5456 :     double l2=convert_lambda(err_type,l);
     395        5456 :     if(l2 <-(std::sqrt(alpha+1)/sigma-0.01)) {
     396           0 :       l=-(std::sqrt(alpha+1)/sigma-0.01);
     397           0 :       log.printf("Lambda exceeded the allowed range\n");
     398             :     }
     399        5456 :     if(l2>(std::sqrt(alpha+1)/sigma-0.01)) {
     400           0 :       l=std::sqrt(alpha+1)/sigma-0.01;
     401           0 :       log.printf("Lambda exceeded the allowed range\n");
     402             :     }
     403             :   }
     404        5456 : }
     405             : 
     406         572 : void MaxEnt::update_lambda() {
     407             : 
     408             :   double totalWork_=0.0;
     409         572 :   const double time=getTime();
     410         572 :   const double step=getStep();
     411         572 :   double KbT=simtemp;
     412             :   double learning_rate;
     413         572 :   if(reweight)
     414         396 :     BetaReweightBias=plumed.getBias()/KbT;
     415             :   else
     416         176 :     BetaReweightBias=0.0;
     417             : 
     418        6028 :   for(unsigned i=0; i<getNumberOfArguments(); ++i) {
     419        5456 :     const double k=kappa[i];
     420        5456 :     double cv=(getArgument(i)+compute_error(error_type,lambda[i])-at[i]);
     421        5456 :     if(reweight)
     422        4224 :       learning_rate=1.0*k/(1+step/tau[i]);
     423             :     else
     424        1232 :       learning_rate=1.0*k/(1+time/tau[i]);
     425        5456 :     lambda[i]+=learning_rate*cv*std::exp(-BetaReweightBias); //update Lagrangian multipliers and reweight them if REWEIGHT is set
     426        5456 :     check_lambda_boundaries(error_type,lambda[i]);      //check that Lagrangians multipliers not exceed the allowed range
     427        6128 :     if(time>=tstart && time <=tend && !done_average[i]) {
     428         630 :       avglambda[i]+=convert_lambda(type,lambda[i]); //compute the average of Lagrangian multipliers over the required time window
     429             :     }
     430        5456 :     if(time>=tend && tend >=0) { //setting tend<0 will disable this feature
     431         112 :       if(!done_average[i]) {
     432         105 :         avglambda[i]=avglambda[i]/avg_counter;
     433             :         done_average[i]=true;
     434         105 :         lambda[i]=avglambda[i];
     435             :       }
     436             :       else
     437           7 :         lambda[i]=avglambda[i]; //keep Lagrangian multipliers fixed to the previously computed average.
     438             :     }
     439        5456 :     work[i]+=(convert_lambda(type,lambda[i])-oldlambda[i])*getArgument(i); //compute the work performed in updating lambda
     440        5456 :     totalWork_+=work[i];
     441        5456 :     totalWork=totalWork_;
     442        5456 :     oldlambda[i]=convert_lambda(type,lambda[i]);
     443             :   };
     444         572 :   if(time>=tstart && time <=tend)
     445          96 :     avg_counter++;
     446         572 : }
     447             : 
     448        5252 : void MaxEnt::calculate() {
     449             :   double totf2=0.0;
     450             :   double ene=0.0;
     451        5252 :   double KbT=simtemp;
     452       55348 :   for(unsigned i=0; i<getNumberOfArguments(); ++i) {
     453      100192 :     getPntrToComponent(getPntrToArgument(i)->getName()+"_error")->set(expected_eps[i]);
     454       50096 :     getPntrToComponent(getPntrToArgument(i)->getName()+"_work")->set(work[i]);
     455       50096 :     valueWork->set(totalWork);
     456       50096 :     getPntrToComponent(getPntrToArgument(i)->getName()+"_coupling")->set(lambda[i]);
     457       50096 :     const double f=-KbT*convert_lambda(type,lambda[i])*apply_weights[myrep];
     458       50096 :     totf2+=f*f;
     459       50096 :     ene+=KbT*convert_lambda(type,lambda[i])*getArgument(i)*apply_weights[myrep];
     460       50096 :     setOutputForce(i,f);
     461             :   }
     462        5252 :   setBias(ene);
     463        5252 :   valueForce2->set(totf2);
     464        5252 : }
     465             : 
     466        5252 : void MaxEnt::update() {
     467             : 
     468        5252 :   if(getStep()%stride_ == 0)
     469         572 :     WriteLagrangians(lambda,lagmultOfile_);
     470        5252 :   if(getStep()%pace_ == 0) {
     471         572 :     update_lambda();
     472         572 :     if(!no_broadcast) {
     473         572 :       if(comm.Get_rank()==0) //Comunicate Lagrangian multipliers from reference replica to higher ones
     474         484 :         multi_sim_comm.Bcast(lambda,learn_replica);
     475             :     }
     476         572 :     comm.Bcast(lambda,0);
     477             :   }
     478        5252 :   isFirstStep=false;
     479        5252 : }
     480             : 
     481             : }
     482             : 
     483             : }

Generated by: LCOV version 1.16