From 2237f1492941343961a9a92a06f0b1e7df31e862 Mon Sep 17 00:00:00 2001
From: afanto <85966358+afanto@users.noreply.github.com>
Date: Tue, 24 Aug 2021 11:40:33 -0700
Subject: [PATCH 1/5] Add files via upload

---
 root/tmva/tmva/RNNBenchmarks.h       | 358 +++++++++++++++++++++++++++
 root/tmva/tmva/RNNCpuBenchmarks.cxx  |  26 ++
 root/tmva/tmva/RNNCudaBenchmarks.cxx |  26 ++
 3 files changed, 410 insertions(+)
 create mode 100644 root/tmva/tmva/RNNBenchmarks.h
 create mode 100644 root/tmva/tmva/RNNCpuBenchmarks.cxx
 create mode 100644 root/tmva/tmva/RNNCudaBenchmarks.cxx

diff --git a/root/tmva/tmva/RNNBenchmarks.h b/root/tmva/tmva/RNNBenchmarks.h
new file mode 100644
index 000000000..c3784340a
--- /dev/null
+++ b/root/tmva/tmva/RNNBenchmarks.h
@@ -0,0 +1,358 @@
+#include "TMVA/Factory.h"
+#include "TMVA/DataLoader.h"
+#include "TMVA/DataSetInfo.h"
+#include "TMVA/Config.h"
+#include "TMVA/MethodDL.h"
+#include "TMVA/PyMethodBase.h"
+#include "TROOT.h"
+#include "TFile.h"
+#include "TTree.h"
+#include "TMacro.h"
+#include "TSystem.h"
+#include "TH1.h"
+#include "TF1.h"
+#include "TRandom.h"
+#include "TPad.h"
+#include "TCanvas.h"
+
+void MakeTimeData(int n, int ntime, int ndim ){
+   TString fname = TString::Format("time_data_t%d_d%d.root", ntime, ndim);
+   
+   std::vector<TH1 *> v1(ntime);
+   std::vector<TH1 *> v2(ntime);
+   
+   int i = 0;
+   
+   for (int i = 0; i < ntime; ++i) {
+      v1[i] = new TH1D(TString::Format("h1_%d", i), "h1", ndim, 0, 10);
+      v2[i] = new TH1D(TString::Format("h2_%d", i), "h2", ndim, 0, 10);
+   }
+
+   auto f1 = new TF1("f1", "gaus");
+   auto f2 = new TF1("f2", "gaus");
+
+   TTree sgn("sgn", "sgn");
+   TTree bkg("bkg", "bkg");
+   TFile f(fname, "RECREATE");
+
+   std::vector<std::vector<float>> x1(ntime);
+   std::vector<std::vector<float>> x2(ntime);
+
+   for (int i = 0; i < ntime; ++i) {
+      x1[i] = std::vector<float>(ndim);
+      x2[i] = std::vector<float>(ndim);
+   }
+
+   for (auto i = 0; i < ntime; i++) {
+      bkg.Branch(Form("vars_time%d", i), "std::vector<float>", &x1[i]);
+      sgn.Branch(Form("vars_time%d", i), "std::vector<float>", &x2[i]);
+   }
+
+   sgn.SetDirectory(&f);
+   bkg.SetDirectory(&f);
+   gRandom->SetSeed(0);
+
+   std::vector<double> mean1(ntime);
+   std::vector<double> mean2(ntime);
+   std::vector<double> sigma1(ntime);
+   std::vector<double> sigma2(ntime);
+   
+   for (int j = 0; j < ntime; ++j) {
+      mean1[j] = 5. + 0.2 * sin(TMath::Pi() * j / double(ntime));
+      mean2[j] = 5. + 0.2 * cos(TMath::Pi() * j / double(ntime));
+      sigma1[j] = 4 + 0.3 * sin(TMath::Pi() * j / double(ntime));
+      sigma2[j] = 4 + 0.3 * cos(TMath::Pi() * j / double(ntime));
+   }
+   
+   for (int i = 0; i < n; ++i) {
+      if (i % 1000 == 0)
+         std::cout << "Generating  event ... " << i << std::endl;
+
+      for (int j = 0; j < ntime; ++j) {
+         auto h1 = v1[j];
+         auto h2 = v2[j];
+         h1->Reset();
+         h2->Reset();
+
+         f1->SetParameters(1, mean1[j], sigma1[j]);
+         f2->SetParameters(1, mean2[j], sigma2[j]);
+
+         h1->FillRandom("f1", 1000);
+         h2->FillRandom("f2", 1000);
+
+         for (int k = 0; k < ndim; ++k) {
+            x1[j][k] = h1->GetBinContent(k + 1) + gRandom->Gaus(0, 10);
+            x2[j][k] = h2->GetBinContent(k + 1) + gRandom->Gaus(0, 10);
+         }
+      }
+
+      sgn.Fill();
+      bkg.Fill();
+
+      if (n == 1) {
+         auto c1 = new TCanvas();
+         c1->Divide(ntime, 2);
+         
+         for (int j = 0; j < ntime; ++j) {
+            c1->cd(j + 1);
+            v1[j]->Draw();
+         }
+         
+         for (int j = 0; j < ntime; ++j) {
+            c1->cd(ntime + j + 1);
+            v2[j]->Draw();
+         }
+         
+         gPad->Update();
+      }
+   }
+   
+   if (n > 1) {
+      sgn.Write();
+      bkg.Write();
+      sgn.Print();
+      bkg.Print();
+      f.Close();
+   }
+}
+
+void RNN_benchmark(TString archName) {
+   const int ninput = 30;
+   const int ntime = 10;
+   const int batchSize = 100;
+   const int maxepochs = 20;
+
+   int nTotEvts = 10000; 
+   
+#ifndef R__HAS_TMVAGPU
+   if (archName == "GPU") {
+   	Error("TMVA_RNN_Classification", "Architecture not supported. Cannot use TMVA Deep Learning for RNN.");
+      	return;
+   }
+#endif
+#ifndef R__HAS_TMVACPU
+   if (archName == "CPU") {
+   	Error("TMVA_RNN_Classification", "Architecture not supported. Cannot use TMVA Deep Learning for RNN.");
+      	return;
+   }
+#endif
+
+   int num_threads = 0;   
+   ROOT::EnableImplicitMT(num_threads);
+
+   TMVA::Config::Instance();
+
+   std::cout << "Running with nthreads  = " << ROOT::GetThreadPoolSize() << std::endl;
+
+   TString inputFileName = "time_data_t10_d30.root";
+
+   bool fileExist = !gSystem->AccessPathName(inputFileName);
+
+   if (!fileExist)
+      MakeTimeData(nTotEvts,ntime, ninput);
+
+   auto inputFile = TFile::Open(inputFileName);
+   if (!inputFile) {
+      Error("TMVA_RNN_Classification", "Error opening input file %s - exit", inputFileName.Data());
+      return;
+   }
+   std::cout << "--- RNNClassification  : Using input file: " << inputFile->GetName() << std::endl;
+
+   TString outfileName(TString::Format("data_RNN_%s.root", archName.Data()));
+   TFile *outputFile = nullptr;
+   outputFile = TFile::Open(outfileName, "RECREATE");
+
+   // Creating the factory object
+   TMVA::Factory *factory = new TMVA::Factory("TMVAClassification", outputFile,
+                                              "!V:!Silent:Color:DrawProgressBar:Transformations=None:!Correlations:"
+                                              "AnalysisType=Classification:ModelPersistence");
+   TMVA::DataLoader *dataloader = new TMVA::DataLoader("dataset");
+
+   TTree *signalTree = (TTree *)inputFile->Get("sgn");
+   TTree *background = (TTree *)inputFile->Get("bkg");
+
+   const int nvar = ninput * ntime;
+
+   for (auto i = 0; i < ntime; i++) {
+      dataloader->AddVariablesArray(Form("vars_time%d", i), ninput);
+   }
+
+   dataloader->AddSignalTree(signalTree, 1.0);
+   dataloader->AddBackgroundTree(background, 1.0);
+
+   int nTrainSig = 0.8 * nTotEvts;
+   int nTrainBkg = 0.8 *  nTotEvts;
+
+   TString prepareOptions = TString::Format("nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations", nTrainSig, nTrainBkg);
+
+   TCut mycuts = ""; 
+   TCut mycutb = "";
+
+   dataloader->PrepareTrainingAndTestTree(mycuts, mycutb, prepareOptions);
+
+   std::cout << "prepared DATA LOADER " << std::endl;
+   
+   const char *rnn_type = "LSTM";
+
+   TString inputLayoutString = TString::Format("InputLayout=%d|%d", ntime, ninput);
+   TString rnnLayout = TString::Format("%s|10|%d|%d|0|1", rnn_type, ninput, ntime);
+   TString layoutString = TString("Layout=") + rnnLayout + TString(",RESHAPE|FLAT,DENSE|64|TANH,LINEAR");
+
+   TString trainingString1 = TString::Format("LearningRate=1e-3,Momentum=0.0,Repetitions=1,"
+                                             "ConvergenceSteps=5,BatchSize=%d,TestRepetitions=1,"
+                                             "WeightDecay=1e-2,Regularization=None,MaxEpochs=%d,"
+                                             "Optimizer=ADAM,DropConfig=0.0+0.+0.+0.", batchSize, maxepochs);
+
+   TString trainingStrategyString("TrainingStrategy=");
+   trainingStrategyString += trainingString1; 
+
+   TString rnnOptions("!H:V:ErrorStrategy=CROSSENTROPY:VarTransform=None:"
+                      "WeightInitialization=XAVIERUNIFORM:ValidationSize=0.2:RandomSeed=1234");
+
+   rnnOptions.Append(":");
+   rnnOptions.Append(inputLayoutString);
+   rnnOptions.Append(":");
+   rnnOptions.Append(layoutString);
+   rnnOptions.Append(":");
+   rnnOptions.Append(trainingStrategyString);
+   rnnOptions.Append(":");
+   rnnOptions.Append(TString::Format("Architecture=%s", archName.Data()));
+
+   TString rnnName = "TMVA_" + TString(rnn_type);
+   factory->BookMethod(dataloader, TMVA::Types::kDL, rnnName, rnnOptions);
+
+   factory->TrainAllMethods();
+   factory->TestAllMethods();
+   factory->EvaluateAllMethods();
+
+   if (outputFile) 
+   	outputFile->Close();
+}
+
+void RNN_Keras_benchmark(TString archName) {
+   const int ninput = 30;
+   const int ntime = 10;
+   const int batchSize = 100;
+   const int maxepochs = 20;
+
+   int nTotEvts = 10000; 
+   
+	
+#ifndef R__HAS_TMVAGPU
+   if (archName == "GPU") {
+   	Error("TMVA_RNN_Classification", "Architecture not supported. Cannot use TMVA Deep Learning for RNN.");
+      	return;
+   }
+#endif
+#ifndef R__HAS_TMVACPU
+   if (archName == "CPU") {
+   	Error("TMVA_RNN_Classification", "Architecture not supported. Cannot use TMVA Deep Learning for RNN.");
+      	return;
+   }
+#endif
+
+#ifdef R__HAS_PYMVA
+   TMVA::PyMethodBase::PyInitialize();
+#else
+   Error("TMVA_RNN_Classification", "Cannot use Keras.");
+   return;
+#endif
+
+   int num_threads = 0;   
+   ROOT::EnableImplicitMT(num_threads);
+
+   TMVA::Config::Instance();
+   if (archName == "CPU") gSystem->Setenv("CUDA_VISIBLE_DEVICES","-1");
+   	
+   std::cout << "Running with nthreads  = " << ROOT::GetThreadPoolSize() << std::endl;
+
+   TString inputFileName = "time_data_t10_d30.root";
+
+   bool fileExist = !gSystem->AccessPathName(inputFileName);
+
+   if (!fileExist)
+      MakeTimeData(nTotEvts,ntime, ninput);
+
+   auto inputFile = TFile::Open(inputFileName);
+   if (!inputFile) {
+      Error("TMVA_RNN_Classification", "Error opening input file %s - exit", inputFileName.Data());
+      return;
+   }
+   std::cout << "--- RNNClassification  : Using input file: " << inputFile->GetName() << std::endl;
+
+   TString outfileName(TString::Format("data_RNN_%s.root", archName.Data()));
+   TFile *outputFile = nullptr;
+   outputFile = TFile::Open(outfileName, "RECREATE");
+
+   // Creating the factory object
+   TMVA::Factory *factory = new TMVA::Factory("TMVAClassification", outputFile,
+                                              "!V:!Silent:Color:DrawProgressBar:Transformations=None:!Correlations:"
+                                              "AnalysisType=Classification:ModelPersistence");
+   TMVA::DataLoader *dataloader = new TMVA::DataLoader("dataset");
+
+   TTree *signalTree = (TTree *)inputFile->Get("sgn");
+   TTree *background = (TTree *)inputFile->Get("bkg");
+
+   const int nvar = ninput * ntime;
+
+   for (auto i = 0; i < ntime; i++) {
+      dataloader->AddVariablesArray(Form("vars_time%d", i), ninput);
+   }
+
+   dataloader->AddSignalTree(signalTree, 1.0);
+   dataloader->AddBackgroundTree(background, 1.0);
+
+   int nTrainSig = 0.8 * nTotEvts;
+   int nTrainBkg = 0.8 *  nTotEvts;
+
+   TString prepareOptions = TString::Format("nTrain_Signal=%d:nTrain_Background=%d:SplitMode=Random:SplitSeed=100:NormMode=NumEvents:!V:!CalcCorrelations", nTrainSig, nTrainBkg);
+
+   TCut mycuts = ""; 
+   TCut mycutb = "";
+
+   dataloader->PrepareTrainingAndTestTree(mycuts, mycutb, prepareOptions);
+
+   std::cout << "prepared DATA LOADER " << std::endl;
+   
+   const char *rnn_type = "LSTM";
+
+   TString modelName = TString::Format("model_%s.h5", rnn_type);
+   TString trainedModelName = TString::Format("trained_model_%s.h5", rnn_type);
+
+   TMacro m;
+   m.AddLine("import tensorflow");
+   m.AddLine("from tensorflow.keras.models import Sequential");
+   m.AddLine("from tensorflow.keras.optimizers import Adam");
+   m.AddLine("from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, SimpleRNN, GRU, LSTM, Reshape");
+   m.AddLine("");
+   m.AddLine("model = Sequential() ");
+   m.AddLine("model.add(Reshape((10, 30), input_shape = (10*30, )))");
+   m.AddLine("model.add(LSTM(units=10, return_sequences=True) )");
+   m.AddLine("model.add(Flatten())"); 
+   m.AddLine("model.add(Dense(64, activation = 'tanh')) ");
+   m.AddLine("model.add(Dense(2, activation = 'sigmoid')) ");
+   m.AddLine("model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 0.001), metrics = ['accuracy'])");
+   m.AddLine(TString::Format("modelName = '%s'", modelName.Data()));
+   m.AddLine("model.save(modelName)");
+   m.AddLine("model.summary()");
+
+   m.SaveSource("make_rnn_model.py");
+   gSystem->Exec("python make_rnn_model.py");
+   
+   Info("TMVA_RNN_Classification", "Booking Keras %s model", rnn_type);
+   factory->BookMethod(dataloader, TMVA::Types::kPyKeras,
+                       TString::Format("PyKeras_%s", rnn_type),
+                       TString::Format("!H:!V:VarTransform=None:FilenameModel=%s:tf.keras:"
+                                       "FilenameTrainedModel=%s:GpuOptions=allow_growth=True:"
+                                       "NumEpochs=%d:BatchSize=%d",
+                                       modelName.Data(), trainedModelName.Data(), maxepochs, batchSize)
+   );
+
+   factory->TrainAllMethods();
+   factory->TestAllMethods();
+   factory->EvaluateAllMethods();
+
+   if (outputFile) 
+   	outputFile->Close();
+}
+
diff --git a/root/tmva/tmva/RNNCpuBenchmarks.cxx b/root/tmva/tmva/RNNCpuBenchmarks.cxx
new file mode 100644
index 000000000..45f374506
--- /dev/null
+++ b/root/tmva/tmva/RNNCpuBenchmarks.cxx
@@ -0,0 +1,26 @@
+#include "RNNBenchmarks.h"
+#include "benchmark/benchmark.h"
+
+static void BM_RNN_CPU(benchmark::State &state)
+{
+    TString architecture("CPU");
+    
+    // Benchmarking
+    for (auto _ : state) {
+        RNN_benchmark(architecture);
+    }
+}
+BENCHMARK(BM_RNN_CPU);
+
+static void BM_RNN_Keras_CPU(benchmark::State &state)
+{
+    TString architecture("CPU");
+    
+    // Benchmarking
+    for (auto _ : state) {
+        RNN_Keras_benchmark(architecture);
+    }
+}
+BENCHMARK(BM_RNN_Keras_CPU);
+
+BENCHMARK_MAIN();
diff --git a/root/tmva/tmva/RNNCudaBenchmarks.cxx b/root/tmva/tmva/RNNCudaBenchmarks.cxx
new file mode 100644
index 000000000..0f477d751
--- /dev/null
+++ b/root/tmva/tmva/RNNCudaBenchmarks.cxx
@@ -0,0 +1,26 @@
+#include "RNNBenchmarks.h"
+#include "benchmark/benchmark.h"
+
+static void BM_RNN_CUDA(benchmark::State &state)
+{
+    TString architecture("GPU");
+    
+    // Benchmarking
+    for (auto _ : state) {
+        RNN_benchmark(architecture);
+    }
+}
+BENCHMARK(BM_RNN_CUDA);
+
+static void BM_RNN_Keras_CUDA(benchmark::State &state)
+{
+    TString architecture("GPU");
+    
+    // Benchmarking
+    for (auto _ : state) {
+        RNN_Keras_benchmark(architecture);
+    }
+}
+BENCHMARK(BM_RNN_Keras_CUDA);
+
+BENCHMARK_MAIN();

From 6779a3d00a90bea184873034ada43c126c09ac52 Mon Sep 17 00:00:00 2001
From: afanto <85966358+afanto@users.noreply.github.com>
Date: Tue, 24 Aug 2021 11:43:05 -0700
Subject: [PATCH 2/5] Added ConvNet Keras benchmark

---
 root/tmva/tmva/ConvNetBenchmarks.h | 121 ++++++++++++++++++++++++++++-
 1 file changed, 120 insertions(+), 1 deletion(-)

diff --git a/root/tmva/tmva/ConvNetBenchmarks.h b/root/tmva/tmva/ConvNetBenchmarks.h
index 43e367a3a..799683533 100644
--- a/root/tmva/tmva/ConvNetBenchmarks.h
+++ b/root/tmva/tmva/ConvNetBenchmarks.h
@@ -1,9 +1,12 @@
 #include "TMVA/Factory.h"
 #include "TMVA/DataLoader.h"
 #include "TMVA/Config.h"
+#include "TMVA/PyMethodBase.h"
 #include "TMVA/DataSetInfo.h"
 #include "TFile.h"
 #include "TTree.h"
+#include "TMacro.h"
+#include "TSystem.h"
 #include "MakeImageData.h"
 
 void CNN_benchmark(TString archName) {
@@ -115,4 +118,120 @@ void CNN_benchmark(TString archName) {
    factory->TestAllMethods();
 
    outputFile->Close();
-}
\ No newline at end of file
+}
+
+
+void CNN_Keras_benchmark(TString archName) {
+
+	int ntrainEvts = 500;
+	int ntestEvts =  500;
+
+	size_t nx = 32;
+	size_t ny = 32;
+
+	ROOT::EnableImplicitMT(0);
+	TMVA::Config::Instance();
+
+	// for using Keras
+	if (archName == "CPU") gSystem->Setenv("CUDA_VISIBLE_DEVICES","-1");
+	gSystem->Setenv("KERAS_BACKEND", "tensorflow");
+	TMVA::PyMethodBase::PyInitialize();
+
+	// Load the input data
+	TString fname = "imagesData.root";
+	TString fopt = "CACHEREAD";
+
+	// Make some Gaussian Images.
+	makeImages(ntrainEvts + ntestEvts, nx, ny);
+
+	auto input = TFile::Open(fname, fopt);
+
+	R__ASSERT(input);
+
+	std::cout << "--- Classification  : Using input file: " << input->GetName() << std::endl;
+
+	// Create a ROOT output file where TMVA will store ntuples, histograms, etc.
+	TString outfileName( "output.root" );
+	TFile* outputFile = TFile::Open(outfileName, "RECREATE");
+
+	// Creating the factory object
+	TMVA::Factory *factory = new TMVA::Factory( "TMVAClassification", outputFile,
+		                                       "!Correlations:!V:!Silent:Color:DrawProgressBar:"
+		                                       "AnalysisType=Classification:!ModelPersistence:Transformations=None" );
+	TMVA::DataLoader *dataloader = new TMVA::DataLoader("dataset_cnn_ecal");
+
+	TTree *signalTree = (TTree*)input->Get("sgn");
+	TTree *background = (TTree*)input->Get("bkg");
+
+	// add variables (time zero and time 1)
+	for (size_t j = 0; j < nx * ny; ++j) {
+	  TString varName = TString::Format("var%zu", j);
+	  dataloader->AddVariable(varName, 'F');
+	}
+
+	dataloader->AddSignalTree    ( signalTree, 1.0 );
+	dataloader->AddBackgroundTree( background, 1.0 );
+
+	// check given input
+	auto & datainfo = dataloader->GetDataSetInfo();
+	auto vars = datainfo.GetListOfVariables();
+	std::cout << "number of variables is " << vars.size() << std::endl;
+
+	TString trainAndTestOpt = TString::Format("nTrain_Signal=%d:nTrain_Background=%d:nTest_Signal=%d:nTest_Background=%d:"
+		                                     "SplitMode=Random:NormMode=NumEvents:!V",
+		                                     ntrainEvts, ntrainEvts, ntestEvts, ntestEvts);
+	TCut mycuts = "";
+	TCut mycutb = "";
+	dataloader->PrepareTrainingAndTestTree(mycuts, mycutb, trainAndTestOpt);
+
+	TMVA::MsgLogger::InhibitOutput();
+	dataloader->GetDefaultDataSetInfo().GetDataSet();
+	TMVA::MsgLogger::EnableOutput();
+
+	std::cout << "prepared DATA LOADER " << std::endl;
+
+	Info("TMVA_CNN_Classification", "Building convolutional keras model");
+
+	TMacro m;
+	m.AddLine("import tensorflow");
+	m.AddLine("from tensorflow.keras.models import Sequential");
+	m.AddLine("from tensorflow.keras.optimizers import Adam");
+	m.AddLine("from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Reshape");
+	m.AddLine("");
+	m.AddLine("model = Sequential() ");
+	m.AddLine("model.add(Reshape((32, 32, 1), input_shape = (1024, )))");
+	m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
+		"'relu', padding = 'same'))");
+	m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
+		"'relu', padding = 'same'))");
+	m.AddLine("model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2))) ");
+
+	m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
+		"'relu', padding = 'same'))");
+	m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
+		"'relu', padding = 'same'))");
+	m.AddLine("model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2))) ");
+
+	m.AddLine("model.add(Flatten())");
+	m.AddLine("model.add(Dense(64, activation = 'relu')) ");
+	m.AddLine("model.add(Dense(32, activation = 'relu')) ");
+	m.AddLine("model.add(Dense(1, activation = 'linear')) ");
+
+	m.AddLine("model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 0.001), metrics = ['accuracy'])");
+	m.AddLine("model.save('model_cnn.h5')");
+	m.AddLine("model.summary()");
+
+	m.SaveSource("make_cnn_model.py");
+	gSystem->Exec("python make_cnn_model.py");
+
+	factory->BookMethod(
+	dataloader, TMVA::Types::kPyKeras, "PyKeras",
+	"H:!V:VarTransform=None:FilenameModel=model_cnn.h5:tf.keras:"
+	"FilenameTrainedModel=trained_model_cnn.h5:NumEpochs=10:BatchSize=32:"
+	"GpuOptions=allow_growth=True"); 
+
+	factory->TrainAllMethods();
+	factory->TestAllMethods();
+
+	outputFile->Close();
+}

From 9881dfdf86f0d018fc31eaa9f16caab748da9540 Mon Sep 17 00:00:00 2001
From: afanto <85966358+afanto@users.noreply.github.com>
Date: Tue, 24 Aug 2021 11:45:06 -0700
Subject: [PATCH 3/5] Update ConvNetCpuBenchmarks.cxx

---
 root/tmva/tmva/ConvNetCpuBenchmarks.cxx | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/root/tmva/tmva/ConvNetCpuBenchmarks.cxx b/root/tmva/tmva/ConvNetCpuBenchmarks.cxx
index 26bf94ce1..ffc8d7af2 100644
--- a/root/tmva/tmva/ConvNetCpuBenchmarks.cxx
+++ b/root/tmva/tmva/ConvNetCpuBenchmarks.cxx
@@ -12,4 +12,15 @@ static void BM_ConvolutionalNetwork_CPU(benchmark::State &state)
 }
 BENCHMARK(BM_ConvolutionalNetwork_CPU);
 
-BENCHMARK_MAIN();
\ No newline at end of file
+static void BM_ConvolutionalNetwork_Keras_CPU(benchmark::State &state)
+{
+    TString architecture("CPU");
+
+    // Benchmarking
+    for (auto _ : state) {
+        CNN_Keras_benchmark(architecture);
+    }
+}
+BENCHMARK(BM_ConvolutionalNetwork_Keras_CPU);
+
+BENCHMARK_MAIN();

From e42eeca102515847db28d331b8fdb4985e7135f7 Mon Sep 17 00:00:00 2001
From: afanto <85966358+afanto@users.noreply.github.com>
Date: Tue, 24 Aug 2021 11:45:55 -0700
Subject: [PATCH 4/5] Update ConvNetCudaBenchmarks.cxx

---
 root/tmva/tmva/ConvNetCudaBenchmarks.cxx | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/root/tmva/tmva/ConvNetCudaBenchmarks.cxx b/root/tmva/tmva/ConvNetCudaBenchmarks.cxx
index cbbcff827..1145b6242 100644
--- a/root/tmva/tmva/ConvNetCudaBenchmarks.cxx
+++ b/root/tmva/tmva/ConvNetCudaBenchmarks.cxx
@@ -12,4 +12,15 @@ static void BM_ConvolutionalNetwork_CUDA(benchmark::State &state)
 }
 BENCHMARK(BM_ConvolutionalNetwork_CUDA);
 
+static void BM_ConvolutionalNetwork_Keras_CUDA(benchmark::State &state)
+{
+    TString architecture("GPU");
+
+    // Benchmarking
+    for (auto _ : state) {
+        CNN_Keras_benchmark(architecture);
+    }
+}
+BENCHMARK(BM_ConvolutionalNetwork_Keras_CUDA);
+
 BENCHMARK_MAIN();

From 46aa334135901e11d08d8a5fd05fffa54cb0f99b Mon Sep 17 00:00:00 2001
From: afanto <85966358+afanto@users.noreply.github.com>
Date: Tue, 24 Aug 2021 11:47:37 -0700
Subject: [PATCH 5/5] Update CMakeLists.txt

---
 root/tmva/tmva/CMakeLists.txt | 19 +++++++++++++++++--
 1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/root/tmva/tmva/CMakeLists.txt b/root/tmva/tmva/CMakeLists.txt
index a55abf4fc..b7e80f393 100644
--- a/root/tmva/tmva/CMakeLists.txt
+++ b/root/tmva/tmva/CMakeLists.txt
@@ -9,7 +9,7 @@ if(ROOT_tmva_FOUND AND ROOT_tmva-cpu_FOUND AND ROOT_imt_FOUND)
    RB_ADD_GBENCHMARK(ConvNetCpuBenchmarks
       ConvNetCpuBenchmarks.cxx
       LABEL short
-      LIBRARIES Core Tree MathCore RIO Hist TMVA)
+      LIBRARIES Core Tree MathCore RIO Hist TMVA PyMVA)
 endif()
 
 if (ROOT_cuda_FOUND AND ROOT_tmva-gpu_FOUND)
@@ -17,5 +17,20 @@ if (ROOT_cuda_FOUND AND ROOT_tmva-gpu_FOUND)
    RB_ADD_GBENCHMARK(ConvNetCudaBenchmarks
       ConvNetCudaBenchmarks.cxx
       LABEL short
-      LIBRARIES Core Tree MathCore RIO Hist TMVA ${DNN_CUDA_LIBRARIES})
+      LIBRARIES Core Tree MathCore RIO Hist TMVA PyMVA ${DNN_CUDA_LIBRARIES})
+endif()
+
+if(ROOT_tmva_FOUND AND ROOT_tmva-cpu_FOUND AND ROOT_imt_FOUND)
+   RB_ADD_GBENCHMARK(RNNCpuBenchmarks
+      RNNCpuBenchmarks.cxx
+      LABEL short
+      LIBRARIES Core Tree MathCore Gpad RIO Hist TMVA PyMVA)
+endif()
+
+if (ROOT_cuda_FOUND AND ROOT_tmva-gpu_FOUND)
+   set(DNN_CUDA_LIBRARIES ${CUDA_CUBLAS_LIBRARIES} ${CUDNN_LIBRARIES})
+   RB_ADD_GBENCHMARK(RNNCudaBenchmarks
+      RNNCudaBenchmarks.cxx
+      LABEL short
+      LIBRARIES Core Tree MathCore Gpad RIO Hist TMVA PyMVA ${DNN_CUDA_LIBRARIES})
 endif()