LCOV - code coverage report
Current view: top level - pytorch - PytorchModel.cpp (source / functions) Hit Total Coverage
Test: plumed test coverage Lines: 62 69 89.9 %
Date: 2024-10-18 14:00:25 Functions: 4 5 80.0 %

          Line data    Source code
       1             : /* +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
       2             : Copyright (c) 2022-2023 of Luigi Bonati and Enrico Trizio.
       3             : 
       4             : The pytorch module is free software: you can redistribute it and/or modify
       5             : it under the terms of the GNU Lesser General Public License as published by
       6             : the Free Software Foundation, either version 3 of the License, or
       7             : (at your option) any later version.
       8             : 
       9             : The pytorch module is distributed in the hope that it will be useful,
      10             : but WITHOUT ANY WARRANTY; without even the implied warranty of
      11             : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
      12             : GNU Lesser General Public License for more details.
      13             : 
      14             : You should have received a copy of the GNU Lesser General Public License
      15             : along with plumed.  If not, see <http://www.gnu.org/licenses/>.
      16             : +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
      17             : 
      18             : #ifdef __PLUMED_HAS_LIBTORCH
      19             : // convert LibTorch version to string
      20             : //#define STRINGIFY(x) #x
      21             : //#define TOSTR(x) STRINGIFY(x)
      22             : //#define LIBTORCH_VERSION TO_STR(TORCH_VERSION_MAJOR) "." TO_STR(TORCH_VERSION_MINOR) "." TO_STR(TORCH_VERSION_PATCH)
      23             : 
      24             : #include "core/PlumedMain.h"
      25             : #include "function/Function.h"
      26             : #include "core/ActionRegister.h"
      27             : 
      28             : #include <torch/torch.h>
      29             : #include <torch/script.h>
      30             : 
      31             : #include <fstream>
      32             : #include <cmath>
      33             : 
      34             : // Note: Freezing a ScriptModule (torch::jit::freeze) works only in >=1.11
      35             : // For 1.8 <= versions <=1.10 we need a hack
      36             : // (see https://discuss.pytorch.org/t/how-to-check-libtorch-version/77709/4 and also
      37             : // https://github.com/pytorch/pytorch/blob/dfbd030854359207cb3040b864614affeace11ce/torch/csrc/jit/api/module.cpp#L479)
      38             : // adapted from NequIP https://github.com/mir-group/nequip
      39             : #if ( TORCH_VERSION_MAJOR == 2 || TORCH_VERSION_MAJOR == 1 && TORCH_VERSION_MINOR <= 10 )
      40             : #define DO_TORCH_FREEZE_HACK
      41             : // For the hack, need more headers:
      42             : #include <torch/csrc/jit/passes/freeze_module.h>
      43             : #include <torch/csrc/jit/passes/frozen_graph_optimizations.h>
      44             : #endif
      45             : 
      46             : using namespace std;
      47             : 
      48             : namespace PLMD {
      49             : namespace function {
      50             : namespace pytorch {
      51             : 
      52             : //+PLUMEDOC PYTORCH_FUNCTION PYTORCH_MODEL
      53             : /*
      54             : Load a PyTorch model compiled with TorchScript.
      55             : 
      56             : This can be a function defined in Python or a more complex model, such as a neural network optimized on a set of data. In both cases the derivatives of the outputs with respect to the inputs are computed using the automatic differentiation (autograd) feature of Pytorch.
      57             : 
      58             : By default it is assumed that the model is saved as: `model.ptc`, unless otherwise indicated by the `FILE` keyword. The function automatically checks for the number of output dimensions and creates a component for each of them. The outputs are called node-i with i between 0 and N-1 for N outputs.
      59             : 
      60             : Note that this function requires \ref installation-libtorch LibTorch C++ library. Check the instructions in the \ref PYTORCH page to enable the module.
      61             : 
      62             : \par Examples
      63             : Load a model called `torch_model.ptc` that takes as input two dihedral angles and returns two outputs.
      64             : 
      65             : \plumedfile
      66             : #SETTINGS AUXFILE=regtest/pytorch/rt-pytorch_model_2d/torch_model.ptc
      67             : phi: TORSION ATOMS=5,7,9,15
      68             : psi: TORSION ATOMS=7,9,15,17
      69             : model: PYTORCH_MODEL FILE=torch_model.ptc ARG=phi,psi
      70             : PRINT FILE=COLVAR ARG=model.node-0,model.node-1
      71             : \endplumedfile
      72             : 
      73             : */
      74             : //+ENDPLUMEDOC
      75             : 
      76             : 
      77             : class PytorchModel :
      78             :   public Function
      79             : {
      80             :   unsigned _n_in;
      81             :   unsigned _n_out;
      82             :   torch::jit::script::Module _model;
      83             :   torch::Device device = torch::kCPU;
      84             : 
      85             : public:
      86             :   explicit PytorchModel(const ActionOptions&);
      87             :   void calculate();
      88             :   static void registerKeywords(Keywords& keys);
      89             : 
      90             :   std::vector<float> tensor_to_vector(const torch::Tensor& x);
      91             : };
      92             : 
      93             : PLUMED_REGISTER_ACTION(PytorchModel,"PYTORCH_MODEL")
      94             : 
      95           6 : void PytorchModel::registerKeywords(Keywords& keys) {
      96           6 :   Function::registerKeywords(keys);
      97           6 :   keys.use("ARG");
      98          12 :   keys.add("optional","FILE","Filename of the PyTorch compiled model");
      99          12 :   keys.addOutputComponent("node", "default", "Model outputs");
     100           6 : }
     101             : 
     102             : // Auxiliary function to transform torch tensors in std vectors
     103         103 : std::vector<float> PytorchModel::tensor_to_vector(const torch::Tensor& x) {
     104         206 :   return std::vector<float>(x.data_ptr<float>(), x.data_ptr<float>() + x.numel());
     105             : }
     106             : 
     107           4 : PytorchModel::PytorchModel(const ActionOptions&ao):
     108             :   Action(ao),
     109           4 :   Function(ao)
     110             : {
     111             :   // print libtorch version
     112           4 :   std::stringstream ss;
     113           4 :   ss << TORCH_VERSION_MAJOR << "." << TORCH_VERSION_MINOR << "." << TORCH_VERSION_PATCH;
     114             :   std::string version;
     115           4 :   ss >> version; // extract into the string.
     116           8 :   log.printf(("  LibTorch version: "+version+"\n").data());
     117             : 
     118             :   //number of inputs of the model
     119           4 :   _n_in=getNumberOfArguments();
     120             : 
     121             :   //parse model name
     122           4 :   std::string fname="model.ptc";
     123           8 :   parse("FILE",fname);
     124             : 
     125             :   //deserialize the model from file
     126             :   try {
     127           4 :     _model = torch::jit::load(fname, device);
     128             :   }
     129             : 
     130             :   //if an error is thrown check if the file exists or not
     131           0 :   catch (const c10::Error& e) {
     132           0 :     std::ifstream infile(fname);
     133             :     bool exist = infile.good();
     134           0 :     infile.close();
     135           0 :     if (exist) {
     136           0 :       plumed_merror("Cannot load FILE: '"+fname+"'. Please check that it is a Pytorch compiled model (exported with 'torch.jit.trace' or 'torch.jit.script').");
     137             :     }
     138             :     else {
     139           0 :       plumed_merror("The FILE: '"+fname+"' does not exist.");
     140             :     }
     141           0 :   }
     142           4 :   checkRead();
     143             : 
     144             : // Optimize model
     145             :   _model.eval();
     146             : #ifdef DO_TORCH_FREEZE_HACK
     147             :   // Do the hack
     148             :   // Copied from the implementation of torch::jit::freeze,
     149             :   // except without the broken check
     150             :   // See https://github.com/pytorch/pytorch/blob/dfbd030854359207cb3040b864614affeace11ce/torch/csrc/jit/api/module.cpp
     151             :   bool optimize_numerics = true;  // the default
     152             :   // the {} is preserved_attrs
     153             :   auto out_mod = torch::jit::freeze_module(
     154             :                    _model, {}
     155           4 :                  );
     156             :   // See 1.11 bugfix in https://github.com/pytorch/pytorch/pull/71436
     157           8 :   auto graph = out_mod.get_method("forward").graph();
     158           4 :   OptimizeFrozenGraph(graph, optimize_numerics);
     159           4 :   _model = out_mod;
     160             : #else
     161             :   // Do it normally
     162             :   _model = torch::jit::freeze(_model);
     163             : #endif
     164             : 
     165             : // Optimize model for inference
     166             : #if (TORCH_VERSION_MAJOR == 2 || TORCH_VERSION_MAJOR == 1 && TORCH_VERSION_MINOR >= 10)
     167           4 :   _model = torch::jit::optimize_for_inference(_model);
     168             : #endif
     169             : 
     170             :   //check the dimension of the output
     171           4 :   log.printf("  Checking output dimension:\n");
     172           4 :   std::vector<float> input_test (_n_in);
     173           4 :   torch::Tensor single_input = torch::tensor(input_test).view({1,_n_in});
     174           8 :   single_input = single_input.to(device);
     175             :   std::vector<torch::jit::IValue> inputs;
     176           4 :   inputs.push_back( single_input );
     177           8 :   torch::Tensor output = _model.forward( inputs ).toTensor();
     178           4 :   vector<float> cvs = this->tensor_to_vector (output);
     179           4 :   _n_out=cvs.size();
     180             : 
     181             :   //create components
     182           9 :   for(unsigned j=0; j<_n_out; j++) {
     183           5 :     string name_comp = "node-"+std::to_string(j);
     184           5 :     addComponentWithDerivatives( name_comp );
     185           5 :     componentIsNotPeriodic( name_comp );
     186             :   }
     187             : 
     188             :   //print log
     189           4 :   log.printf("  Number of input: %d \n",_n_in);
     190           4 :   log.printf("  Number of outputs: %d \n",_n_out);
     191           4 :   log.printf("  Bibliography: ");
     192           8 :   log<<plumed.cite("Bonati, Trizio, Rizzi and Parrinello, J. Chem. Phys. 159, 014801 (2023)");
     193           8 :   log<<plumed.cite("Bonati, Rizzi and Parrinello, J. Phys. Chem. Lett. 11, 2998-3004 (2020)");
     194           4 :   log.printf("\n");
     195             : 
     196          12 : }
     197             : 
     198             : 
     199          44 : void PytorchModel::calculate() {
     200             : 
     201             :   // retrieve arguments
     202          44 :   vector<float> current_S(_n_in);
     203          99 :   for(unsigned i=0; i<_n_in; i++)
     204          55 :     current_S[i]=getArgument(i);
     205             :   //convert to tensor
     206          44 :   torch::Tensor input_S = torch::tensor(current_S).view({1,_n_in}).to(device);
     207             :   input_S.set_requires_grad(true);
     208             :   //convert to Ivalue
     209             :   std::vector<torch::jit::IValue> inputs;
     210          44 :   inputs.push_back( input_S );
     211             :   //calculate output
     212          88 :   torch::Tensor output = _model.forward( inputs ).toTensor();
     213             : 
     214             : 
     215          99 :   for(unsigned j=0; j<_n_out; j++) {
     216          55 :     auto grad_output = torch::ones({1}).expand({1, 1}).to(device);
     217         440 :     auto gradient = torch::autograd::grad({output.slice(/*dim=*/1, /*start=*/j, /*end=*/j+1)},
     218             :     {input_S},
     219             :     /*grad_outputs=*/ {grad_output},
     220             :     /*retain_graph=*/true,
     221             :     /*create_graph=*/false)[0]; // the [0] is to get a tensor and not a vector<at::tensor>
     222             : 
     223          55 :     vector<float> der = this->tensor_to_vector ( gradient );
     224          55 :     string name_comp = "node-"+std::to_string(j);
     225             :     //set derivatives of component j
     226         132 :     for(unsigned i=0; i<_n_in; i++)
     227          77 :       setDerivative( getPntrToComponent(name_comp),i, der[i] );
     228             :   }
     229             : 
     230             :   //set CV values
     231          44 :   vector<float> cvs = this->tensor_to_vector (output);
     232          99 :   for(unsigned j=0; j<_n_out; j++) {
     233          55 :     string name_comp = "node-"+std::to_string(j);
     234          55 :     getPntrToComponent(name_comp)->set(cvs[j]);
     235             :   }
     236             : 
     237          88 : }
     238             : 
     239             : 
     240             : } //PLMD
     241             : } //function
     242             : } //pytorch
     243             : 
     244             : #endif //PLUMED_HAS_LIBTORCH

Generated by: LCOV version 1.16