|
@@ -284,14 +284,32 @@ void PointPillars::SetDeviceMemoryToZero() {
|
|
|
}
|
|
|
|
|
|
|
|
|
+#include <fstream>
|
|
|
+#include <iostream>
|
|
|
+#
|
|
|
|
|
|
+void PointPillars::SaveEngine(nvinfer1::ICudaEngine* pengine,std::string strpath)
|
|
|
+{
|
|
|
+ nvinfer1::IHostMemory* data = pengine->serialize();
|
|
|
+ std::ofstream file;
|
|
|
+ file.open(strpath,std::ios::binary | std::ios::out);
|
|
|
+ if(!file.is_open())
|
|
|
+ {
|
|
|
+ std::cout << "read create engine file" << strpath <<" failed" << std::endl;
|
|
|
+ return;
|
|
|
+ }
|
|
|
|
|
|
+ file.write((const char*)data->data(), data->size());
|
|
|
+ file.close();
|
|
|
+}
|
|
|
|
|
|
void PointPillars::InitTRT(const bool use_onnx) {
|
|
|
if (use_onnx_) {
|
|
|
// create a TensorRT model from the onnx model and load it into an engine
|
|
|
OnnxToTRTModel(pfe_file_, &pfe_engine_);
|
|
|
+ SaveEngine(pfe_engine_,"/home/nvidia/models/lidar/cbgs_pp_multihead_pfe.trt");
|
|
|
OnnxToTRTModel(backbone_file_, &backbone_engine_);
|
|
|
+ SaveEngine(backbone_engine_,"/home/nvidia/models/lidar/cbgs_pp_multihead_backbone.trt");
|
|
|
}else {
|
|
|
EngineToTRTModel(pfe_file_, &pfe_engine_);
|
|
|
EngineToTRTModel(backbone_file_, &backbone_engine_);
|
|
@@ -345,6 +363,59 @@ void PointPillars::OnnxToTRTModel(
|
|
|
builder->destroy();
|
|
|
}
|
|
|
|
|
|
+void PointPillars::LoadEngineModel(const string &engine_file, nvinfer1::ICudaEngine **engine_ptr)
|
|
|
+{
|
|
|
+// using namespace std;
|
|
|
+// fstream file;
|
|
|
+
|
|
|
+// file.open(engine_file,ios::binary | ios::in);
|
|
|
+// if(!file.is_open())
|
|
|
+// {
|
|
|
+// cout << "read engine file" << engine_file <<" failed" << endl;
|
|
|
+// return;
|
|
|
+// }
|
|
|
+// file.seekg(0, ios::end);
|
|
|
+// int length = file.tellg();
|
|
|
+// file.seekg(0, ios::beg);
|
|
|
+// std::unique_ptr<char[]> data(new char[length]);
|
|
|
+// file.read(data.get(), length);
|
|
|
+
|
|
|
+// file.close();
|
|
|
+
|
|
|
+// std::cout << "deserializing" << std::endl;
|
|
|
+// mTrtRunTime = createInferRuntime(gLogger);
|
|
|
+// assert(mTrtRunTime != nullptr);
|
|
|
+// mTrtEngine= mTrtRunTime->deserializeCudaEngine(data.get(), length, &mTrtPluginFactory);
|
|
|
+// assert(mTrtEngine != nullptr);
|
|
|
+
|
|
|
+// nvinfer1::IRuntime* runtime = nvinfer1::createInferRuntime(g_logger_);
|
|
|
+
|
|
|
+// if (runtime == nullptr) {
|
|
|
+// std::string msg("failed to build runtime parser");
|
|
|
+// g_logger_.log(nvinfer1::ILogger::Severity::kERROR, msg.c_str());
|
|
|
+// exit(EXIT_FAILURE);
|
|
|
+// }
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+// std::cout << " "<< std::endl;
|
|
|
+// std::cout << "------------------------------------------------------------------"<< std::endl;
|
|
|
+// std::cout << ">>>> >>>>"<< std::endl;
|
|
|
+// std::cout << " "<< std::endl;
|
|
|
+// std::cout << "Input filename: " << engine_file << std::endl;
|
|
|
+// std::cout << " "<< std::endl;
|
|
|
+// std::cout << ">>>> >>>>"<< std::endl;
|
|
|
+// std::cout << "------------------------------------------------------------------"<< std::endl;
|
|
|
+// std::cout << " "<< std::endl;
|
|
|
+
|
|
|
+// nvinfer1::ICudaEngine* engine = runtime->deserializeCudaEngine(modelMem, modelSize, NULL);
|
|
|
+// if (engine == nullptr) {
|
|
|
+// std::string msg("failed to build engine parser");
|
|
|
+// g_logger_.log(nvinfer1::ILogger::Severity::kERROR, msg.c_str());
|
|
|
+// exit(EXIT_FAILURE);
|
|
|
+// }
|
|
|
+// *engine_ptr = engine;
|
|
|
+}
|
|
|
|
|
|
void PointPillars::EngineToTRTModel(
|
|
|
const std::string &engine_file ,
|