|
@@ -0,0 +1,673 @@
|
|
|
+#pragma execution_character_set("utf-8")
|
|
|
+#include "grabframethread.h"
|
|
|
+#include <QDebug>
|
|
|
+#include <QImage>
|
|
|
+
|
|
|
+#include<opencv2/core/core.hpp>
|
|
|
+#include<opencv2/highgui/highgui.hpp>
|
|
|
+#include <opencv2/opencv.hpp>
|
|
|
+
|
|
|
+#include "NvInfer.h"
|
|
|
+#include "cuda_runtime_api.h"
|
|
|
+#include <fstream>
|
|
|
+#include <iostream>
|
|
|
+#include <map>
|
|
|
+#include <sstream>
|
|
|
+#include <vector>
|
|
|
+#include <chrono>
|
|
|
+#include <cmath>
|
|
|
+#include <cassert>
|
|
|
+#include <algorithm>
|
|
|
+#include <calibrator.h>
|
|
|
+
|
|
|
+
|
|
|
+int frame_count = 0;
|
|
|
+
|
|
|
+#define CHECK(status) \
|
|
|
+ do\
|
|
|
+ {\
|
|
|
+ auto ret = (status);\
|
|
|
+ if (ret != 0)\
|
|
|
+ {\
|
|
|
+ std::cerr << "Cuda failure: " << ret << std::endl;\
|
|
|
+ abort();\
|
|
|
+ }\
|
|
|
+ } while (0)
|
|
|
+
|
|
|
+//static Logger gLogger;
|
|
|
+//构建Logger
|
|
|
+class Logger : public ILogger
|
|
|
+{
|
|
|
+ void log(Severity severity, const char* msg) noexcept override
|
|
|
+ {
|
|
|
+ // suppress info-level messages
|
|
|
+ if (severity <= Severity::kWARNING)
|
|
|
+ std::cout << msg << std::endl;
|
|
|
+ }
|
|
|
+} gLogger;
|
|
|
+
|
|
|
+
|
|
|
+GrabFrameThread::GrabFrameThread(QObject *parent) : QObject(parent)
|
|
|
+{
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::setFrameResolution(int w, int h)
|
|
|
+{
|
|
|
+ qDebug() << tr("设置分辨率:%1*%2").arg(w).arg(h);
|
|
|
+ if(!g_cap.set(cv::CAP_PROP_FRAME_WIDTH,w)){
|
|
|
+ qDebug() << tr("设置帧宽失败");
|
|
|
+ emit signal_ErrGrabFrameThread(2); // 2表示设置图像分辨率失败
|
|
|
+ }
|
|
|
+ if(!g_cap.set(cv::CAP_PROP_FRAME_HEIGHT,h)){
|
|
|
+ qDebug() << tr("设置帧高失败");
|
|
|
+ emit signal_ErrGrabFrameThread(2); // 2表示设置图像分辨率失败
|
|
|
+ }
|
|
|
+
|
|
|
+ std::cout<<"size: "<<g_cap.get(cv::CAP_PROP_FRAME_HEIGHT)<<std::endl;
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::setParameter(float conf, float nms)
|
|
|
+{
|
|
|
+ qDebug() << tr("设置检测参数:%1*%2").arg(conf).arg(nms);
|
|
|
+ conf_thr = conf;
|
|
|
+ nms_thr = nms;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+void GrabFrameThread::startDetect()
|
|
|
+{
|
|
|
+ qDebug() << tr("打开检测");
|
|
|
+ detect_flag = true;
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::closeDetect()
|
|
|
+{
|
|
|
+ qDebug() << tr("关闭检测");
|
|
|
+ detect_flag = false;
|
|
|
+}
|
|
|
+
|
|
|
+ void GrabFrameThread::destroyEngine()
|
|
|
+ {
|
|
|
+ qDebug() << tr("销毁engine");
|
|
|
+ context->destroy();
|
|
|
+ engine->destroy();
|
|
|
+ runtime->destroy();
|
|
|
+ }
|
|
|
+
|
|
|
+ void GrabFrameThread::setfp16(bool flage)
|
|
|
+ {
|
|
|
+ usefp16 = flage;
|
|
|
+ }
|
|
|
+ void GrabFrameThread::setint8(bool flage)
|
|
|
+ {
|
|
|
+ useint8 = flage;
|
|
|
+ }
|
|
|
+
|
|
|
+void GrabFrameThread::openCamera(int camID)
|
|
|
+{
|
|
|
+ qDebug() << tr("打开摄像头%1").arg(camID);
|
|
|
+ if(!g_cap.isOpened()) {
|
|
|
+ if(!g_cap.open(camID))
|
|
|
+ {
|
|
|
+ qDebug() << tr("打开摄像头失败");
|
|
|
+ emit signal_ErrGrabFrameThread(1); // 1表示打开摄像头失败
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ qDebug() << tr("摄像头处于打开状态");
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ if(usefp16)
|
|
|
+ engine_path = "./weight_fp16.engine";
|
|
|
+ else if(useint8)
|
|
|
+ engine_path = "./weight_int8.engine";
|
|
|
+ else engine_path = "./weight_fp32.engine";
|
|
|
+ if(!LoadEngine(engine_path))
|
|
|
+ {
|
|
|
+ cout<<"Build engine to "<< engine_path <<endl;
|
|
|
+ get_trtengine();
|
|
|
+ cout << "Build engine done!"<<endl;
|
|
|
+ cout<<"Reload engine from "<< engine_path <<endl;
|
|
|
+ LoadEngine(engine_path);
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::closeCamera()
|
|
|
+{
|
|
|
+ qDebug() << tr("关闭摄像头");
|
|
|
+ if(g_cap.isOpened())
|
|
|
+ g_cap.release();
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::init()
|
|
|
+{
|
|
|
+ qDebug() << tr("抓帧线程初始化");
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::refreshFrame()
|
|
|
+{
|
|
|
+ // 接收到主线程定时器的超时信号,显示新的帧
|
|
|
+ cv::Mat frame;
|
|
|
+ if(g_cap.read(frame)){
|
|
|
+
|
|
|
+ cv::Mat readimage = cv::imread("/home/nvidia/红绿灯测试/002740.png");
|
|
|
+ cv::resize(readimage,frame,cv::Size(frame.cols,frame.rows));
|
|
|
+ if(detect_flag)
|
|
|
+ {
|
|
|
+ vector<Detection> results;
|
|
|
+ vector<Detection>results_track;
|
|
|
+ //=============== infer ===========
|
|
|
+ cv::Mat testimage = cv::imread("/home/nvidia/红绿灯测试/003018.png");
|
|
|
+
|
|
|
+ infer(testimage,results);
|
|
|
+
|
|
|
+ od::bbox_t bbox_t_90; //转成跟踪格式
|
|
|
+ vector<od::bbox_t> outs_90;
|
|
|
+ for (int i = 0; i < results.size(); i++)
|
|
|
+ {
|
|
|
+ //-------------判断红绿灯是否为横向,width=(x1-x2),height=(y1-y2)-----------
|
|
|
+ bbox_t_90.x = results.at(i).bbox[0];
|
|
|
+ bbox_t_90.y = results.at(i).bbox[1];
|
|
|
+ bbox_t_90.w = results.at(i).bbox[2];
|
|
|
+ bbox_t_90.h = results.at(i).bbox[3];
|
|
|
+ bbox_t_90.prob = results.at(i).conf;
|
|
|
+ bbox_t_90.obj_id = results.at(i).class_id;
|
|
|
+ outs_90.push_back(bbox_t_90);
|
|
|
+ }
|
|
|
+ vector<od::TrackingBox>track_result_90;
|
|
|
+ bool track_flag_90 = od::TrackObstacle(frame_count,trackers_90,outs_90,track_result_90);
|
|
|
+
|
|
|
+ for(unsigned int i=0;i < track_result_90.size(); i++)
|
|
|
+ {
|
|
|
+ Detection obstacle;
|
|
|
+ obstacle.bbox[0] = track_result_90[i].box.x;
|
|
|
+ obstacle.bbox[1] = track_result_90[i].box.y;
|
|
|
+ obstacle.bbox[2] = track_result_90[i].box.width;
|
|
|
+ obstacle.bbox[3] = track_result_90[i].box.height;
|
|
|
+
|
|
|
+ //通过判断5帧数输出颜色
|
|
|
+ vector<int> class_history;
|
|
|
+ class_history = track_result_90[i].class_history;
|
|
|
+ if(class_history.size()>0)
|
|
|
+ {
|
|
|
+ vector<int> color_num(3);
|
|
|
+ for(int j=0;j<class_history.size();j++)
|
|
|
+ {
|
|
|
+ int class_id = class_history[j];
|
|
|
+ color_num[class_id] += 1;
|
|
|
+ }
|
|
|
+ std::vector<int>::iterator biggest = std::max_element(std::begin(color_num),std::end(color_num));
|
|
|
+ int maxindex = std::distance(std::begin(color_num),biggest);
|
|
|
+ obstacle.class_id = maxindex;
|
|
|
+ }
|
|
|
+ else {obstacle.class_id = track_result_90[i].class_id;}
|
|
|
+ obstacle.conf = track_result_90[i].prob;
|
|
|
+ results_track.push_back(obstacle);
|
|
|
+
|
|
|
+ cv::resize(testimage,frame,cv::Size(frame.cols,frame.rows));
|
|
|
+ draw_rect(frame,results_track);
|
|
|
+ frame_count ++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ QImage image = cvmat_to_qimage(frame);
|
|
|
+ emit signal_refreshFrame(image);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+QImage GrabFrameThread::cvmat_to_qimage(const cv::Mat &img)
|
|
|
+{
|
|
|
+ QImage image(img.data,img.cols,img.rows,img.step,QImage::Format_RGB888);
|
|
|
+ return image.rgbSwapped();
|
|
|
+}
|
|
|
+// Creat the engine using only the API and not any parser.
|
|
|
+ICudaEngine* GrabFrameThread::createEngine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config)
|
|
|
+{
|
|
|
+
|
|
|
+ INetworkDefinition* network = builder->createNetworkV2(1U); //此处重点1U为OU就有问题
|
|
|
+
|
|
|
+ IParser* parser = createParser(*network, gLogger);
|
|
|
+ parser->parseFromFile(onnx_path.c_str(), static_cast<int32_t>(ILogger::Severity::kWARNING));
|
|
|
+ //解析有错误将返回
|
|
|
+ for (int32_t i = 0; i < parser->getNbErrors(); ++i) { std::cout << parser->getError(i)->desc() << std::endl; }
|
|
|
+ std::cout << "successfully parse the onnx model" << std::endl;
|
|
|
+
|
|
|
+ // Build engine
|
|
|
+ builder->setMaxBatchSize(maxBatchSize);
|
|
|
+ config->setMaxWorkspaceSize(1 << 20);
|
|
|
+
|
|
|
+ if(usefp16)
|
|
|
+ config->setFlag(nvinfer1::BuilderFlag::kFP16); // 设置精度计算
|
|
|
+ else if(useint8)
|
|
|
+ {
|
|
|
+ std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
|
|
|
+ assert(builder->platformHasFastInt8());
|
|
|
+ config->setFlag(BuilderFlag::kINT8);
|
|
|
+ Int8EntropyCalibrator2* calibrator = new Int8EntropyCalibrator2(1, INPUT_W, INPUT_H, "./imagedata", "int8calib.table", INPUT_BLOB_NAME);
|
|
|
+ config->setInt8Calibrator(calibrator);
|
|
|
+ }
|
|
|
+
|
|
|
+ ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
|
|
|
+ std::cout << "successfully convert onnx to engine!!! " << std::endl;
|
|
|
+
|
|
|
+ //销毁
|
|
|
+ network->destroy();
|
|
|
+ //parser->destroy();
|
|
|
+
|
|
|
+ return engine;
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream)
|
|
|
+{
|
|
|
+
|
|
|
+ // Create builder
|
|
|
+ IBuilder* builder = createInferBuilder(gLogger);
|
|
|
+ IBuilderConfig* config = builder->createBuilderConfig();
|
|
|
+
|
|
|
+ // Create model to populate the network, then set the outputs and create an engine
|
|
|
+ ICudaEngine* engine = createEngine(maxBatchSize, builder, config);
|
|
|
+
|
|
|
+ assert(engine != nullptr);
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ // Serialize the engine
|
|
|
+ (*modelStream) = engine->serialize();
|
|
|
+ // Close everything down
|
|
|
+ engine->destroy();
|
|
|
+ builder->destroy();
|
|
|
+ config->destroy();
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+int GrabFrameThread::get_trtengine() {
|
|
|
+
|
|
|
+ IHostMemory* modelStream{ nullptr };
|
|
|
+ APIToModel(1, &modelStream);
|
|
|
+ assert(modelStream != nullptr);
|
|
|
+
|
|
|
+ std::ofstream p(engine_path, std::ios::binary);
|
|
|
+ if (!p)
|
|
|
+ {
|
|
|
+ std::cerr << "could not open plan output file" << std::endl;
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
|
|
|
+ modelStream->destroy();
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+void GrabFrameThread::doInference(IExecutionContext& context, float* input, float* output, int batchSize)
|
|
|
+{
|
|
|
+ const ICudaEngine& engine = context.getEngine();
|
|
|
+ // Pointers to input and output device buffers to pass to engine.
|
|
|
+ // Engine requires exactly IEngine::getNbBindings() number of buffers.
|
|
|
+ assert(engine.getNbBindings() == 2);
|
|
|
+ void* buffers[2];
|
|
|
+ // In order to bind the buffers, we need to know the names of the input and output tensors.
|
|
|
+ // Note that indices are guaranteed to be less than IEngine::getNbBindings()
|
|
|
+ const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
|
|
|
+ const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
|
|
|
+
|
|
|
+ //std::cout<<inputIndex<<" "<<outputIndex<<std::endl;
|
|
|
+ //const int inputIndex = 0;
|
|
|
+ //const int outputIndex = 1;
|
|
|
+ // Create GPU buffers on device
|
|
|
+ cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float));
|
|
|
+ cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float));
|
|
|
+ // Create stream
|
|
|
+ cudaStream_t stream;
|
|
|
+ CHECK(cudaStreamCreate(&stream));
|
|
|
+ // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
|
|
|
+ CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
|
|
|
+ context.enqueue(batchSize, buffers, stream, nullptr);
|
|
|
+
|
|
|
+ //std::cout<<buffers[outputIndex+1]<<std::endl;
|
|
|
+ CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
|
|
|
+ cudaStreamSynchronize(stream);
|
|
|
+ // Release stream and buffers
|
|
|
+ cudaStreamDestroy(stream);
|
|
|
+ CHECK(cudaFree(buffers[inputIndex]));
|
|
|
+ CHECK(cudaFree(buffers[outputIndex]));
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+//加工图片变成拥有batch的输入, tensorrt输入需要的格式,为一个维度
|
|
|
+void GrabFrameThread::ProcessImage(cv::Mat image, float input_data[]) {
|
|
|
+ //只处理一张图片,总之结果为一维[batch*3*INPUT_W*INPUT_H]
|
|
|
+ //以下代码为投机取巧了
|
|
|
+
|
|
|
+ cv::Mat resize_img ;
|
|
|
+ cv::resize(image, resize_img, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
|
|
|
+ std::vector<cv::Mat> InputImage;
|
|
|
+
|
|
|
+ InputImage.push_back(resize_img);
|
|
|
+ int ImgCount = InputImage.size();
|
|
|
+
|
|
|
+ //float input_data[BatchSize * 3 * INPUT_H * INPUT_W];
|
|
|
+ for (int b = 0; b < ImgCount; b++) {
|
|
|
+ cv::Mat img = InputImage.at(b);
|
|
|
+ int w = img.cols;
|
|
|
+ int h = img.rows;
|
|
|
+ int i = 0;
|
|
|
+ for (int row = 0; row < h; ++row) {
|
|
|
+ uchar* uc_pixel = img.data + row * img.step;
|
|
|
+ for (int col = 0; col < INPUT_W; ++col) {
|
|
|
+ input_data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
|
|
|
+ input_data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
|
|
|
+ input_data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
|
|
|
+ uc_pixel += 3;
|
|
|
+ ++i;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+//********************************************** NMS code **********************************//
|
|
|
+float GrabFrameThread::iou(Bbox box1, Bbox box2) {
|
|
|
+
|
|
|
+ int x1 = max(box1.x, box2.x);
|
|
|
+ int y1 = max(box1.y, box2.y);
|
|
|
+ int x2 = min(box1.x + box1.w, box2.x + box2.w);
|
|
|
+ int y2 = min(box1.y + box1.h, box2.y + box2.h);
|
|
|
+ int w = max(0, x2 - x1);
|
|
|
+ int h = max(0, y2 - y1);
|
|
|
+ float over_area = w * h;
|
|
|
+ return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
|
|
|
+}
|
|
|
+
|
|
|
+int GrabFrameThread::get_max_index(vector<Detection> pre_detection) {
|
|
|
+ //获得最佳置信度的值,并返回对应的索引值
|
|
|
+ int index;
|
|
|
+ float conf;
|
|
|
+ if (pre_detection.size() > 0) {
|
|
|
+ index = 0;
|
|
|
+ conf = pre_detection.at(0).conf;
|
|
|
+ for (int i = 0; i < pre_detection.size(); i++) {
|
|
|
+ if (conf < pre_detection.at(i).conf) {
|
|
|
+ index = i;
|
|
|
+ conf = pre_detection.at(i).conf;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return index;
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+}
|
|
|
+bool GrabFrameThread::judge_in_lst(int index, vector<int> index_lst) {
|
|
|
+ //若index在列表index_lst中则返回true,否则返回false
|
|
|
+ if (index_lst.size() > 0) {
|
|
|
+ for (int i = 0; i < index_lst.size(); i++) {
|
|
|
+ if (index == index_lst.at(i)) {
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return false;
|
|
|
+}
|
|
|
+vector<int> GrabFrameThread::nms(vector<Detection> pre_detection, float iou_thr)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ 返回需保存box的pre_detection对应位置索引值
|
|
|
+
|
|
|
+ */
|
|
|
+ int index;
|
|
|
+ vector<Detection> pre_detection_new;
|
|
|
+ //Detection det_best;
|
|
|
+ Bbox box_best, box;
|
|
|
+ float iou_value;
|
|
|
+ vector<int> keep_index;
|
|
|
+ vector<int> del_index;
|
|
|
+ bool keep_bool;
|
|
|
+ bool del_bool;
|
|
|
+ int rr = 0;
|
|
|
+ int zz = 0;
|
|
|
+
|
|
|
+ if (pre_detection.size() > 0) {
|
|
|
+
|
|
|
+ pre_detection_new.clear();
|
|
|
+ // 循环将预测结果建立索引
|
|
|
+ for (int i = 0; i < pre_detection.size(); i++) {
|
|
|
+ pre_detection.at(i).index = i;
|
|
|
+ pre_detection_new.push_back(pre_detection.at(i));
|
|
|
+ }
|
|
|
+ //循环遍历获得保留box位置索引-相对输入pre_detection位置
|
|
|
+ while (pre_detection_new.size() > 0) {
|
|
|
+ index = get_max_index(pre_detection_new);
|
|
|
+ if (index >= 0) {
|
|
|
+ keep_index.push_back(pre_detection_new.at(index).index); //保留索引位置
|
|
|
+
|
|
|
+ // 更新最佳保留box
|
|
|
+ box_best.x = pre_detection_new.at(index).bbox[0];
|
|
|
+ box_best.y = pre_detection_new.at(index).bbox[1];
|
|
|
+ box_best.w = pre_detection_new.at(index).bbox[2];
|
|
|
+ box_best.h = pre_detection_new.at(index).bbox[3];
|
|
|
+
|
|
|
+ for (int j = 0; j < pre_detection.size(); j++) {
|
|
|
+ keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
|
|
|
+ del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
|
|
|
+ if ((!keep_bool) && (!del_bool)) { //不在keep_index与del_index才计算iou
|
|
|
+ box.x = pre_detection.at(j).bbox[0];
|
|
|
+ box.y = pre_detection.at(j).bbox[1];
|
|
|
+ box.w = pre_detection.at(j).bbox[2];
|
|
|
+ box.h = pre_detection.at(j).bbox[3];
|
|
|
+ iou_value = iou(box_best, box);
|
|
|
+ if (iou_value > iou_thr) {
|
|
|
+ del_index.push_back(j); //记录大于阈值将删除对应的位置
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+ //更新pre_detection_new
|
|
|
+ pre_detection_new.clear();
|
|
|
+ for (int j = 0; j < pre_detection.size(); j++) {
|
|
|
+ keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
|
|
|
+ del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
|
|
|
+ if ((!keep_bool) && (!del_bool)) {
|
|
|
+ pre_detection_new.push_back(pre_detection.at(j));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ del_index.clear();
|
|
|
+ del_index.shrink_to_fit();
|
|
|
+ pre_detection_new.clear();
|
|
|
+ pre_detection_new.shrink_to_fit();
|
|
|
+
|
|
|
+ return keep_index;
|
|
|
+
|
|
|
+}
|
|
|
+void GrabFrameThread::postprocess(float* prob,vector<Detection> &results,float conf_thr=0.2,float nms_thr=0.4)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ #####################此函数处理一张图预测结果#########################
|
|
|
+ prob为[x y w h score multi-pre] 如80类-->(1,anchor_num,85)
|
|
|
+
|
|
|
+ */
|
|
|
+
|
|
|
+ vector<Detection> pre_results;
|
|
|
+ vector<int> nms_keep_index;
|
|
|
+ bool keep_bool;
|
|
|
+ Detection pre_res;
|
|
|
+ float conf;
|
|
|
+ int tmp_idx;
|
|
|
+ float tmp_cls_score;
|
|
|
+ for (int i = 0; i < anchor_output_num; i++) {
|
|
|
+ tmp_idx = i * (cls_num + 5);
|
|
|
+ pre_res.bbox[0] = prob[tmp_idx + 0];
|
|
|
+ pre_res.bbox[1] = prob[tmp_idx + 1];
|
|
|
+ pre_res.bbox[2] = prob[tmp_idx + 2];
|
|
|
+ pre_res.bbox[3] = prob[tmp_idx + 3];
|
|
|
+ conf = prob[tmp_idx + 4]; //是为目标的置信度
|
|
|
+ tmp_cls_score = prob[tmp_idx + 5] * conf;
|
|
|
+ pre_res.class_id = 0;
|
|
|
+ pre_res.conf = tmp_cls_score;
|
|
|
+ for (int j = 1; j < cls_num; j++) {
|
|
|
+ tmp_idx = i * (cls_num + 5) + 5 + j; //获得对应类别索引
|
|
|
+ if (tmp_cls_score < prob[tmp_idx] * conf)
|
|
|
+ {
|
|
|
+ tmp_cls_score = prob[tmp_idx] * conf;
|
|
|
+ pre_res.class_id = j;
|
|
|
+ pre_res.conf = tmp_cls_score;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ if (conf >= conf_thr) {
|
|
|
+
|
|
|
+ pre_results.push_back(pre_res);
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ //使用nms
|
|
|
+ nms_keep_index=nms(pre_results,nms_thr);
|
|
|
+
|
|
|
+ for (int i = 0; i < pre_results.size(); i++) {
|
|
|
+ keep_bool = judge_in_lst(i, nms_keep_index);
|
|
|
+ if (keep_bool) {
|
|
|
+ results.push_back(pre_results.at(i));
|
|
|
+ }
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ pre_results.clear();
|
|
|
+ pre_results.shrink_to_fit();
|
|
|
+ nms_keep_index.clear();
|
|
|
+ nms_keep_index.shrink_to_fit();
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
+void GrabFrameThread::draw_rect(cv::Mat &image, vector<Detection> &results) {
|
|
|
+ /*
|
|
|
+ image 为图像
|
|
|
+
|
|
|
+ struct Detection {
|
|
|
+ float bbox[4]; //center_x center_y w h
|
|
|
+ float conf; // 置信度
|
|
|
+ int class_id; //类别id
|
|
|
+ int index; //可忽略
|
|
|
+ };
|
|
|
+
|
|
|
+ */
|
|
|
+
|
|
|
+ int w1 = image.cols;
|
|
|
+ int h1 = image.rows;
|
|
|
+ int w2 = INPUT_W;
|
|
|
+ int h2 = INPUT_H;
|
|
|
+
|
|
|
+ float ratio_w = float(w1)/float(w2);
|
|
|
+ float ratio_h = float(h1)/float(h2);
|
|
|
+
|
|
|
+ float x;
|
|
|
+ float y;
|
|
|
+ float w;
|
|
|
+ float h;
|
|
|
+
|
|
|
+ cv::Rect rect;
|
|
|
+ for (int i = 0; i < results.size(); i++) {
|
|
|
+
|
|
|
+ x = results.at(i).bbox[0] * ratio_w;
|
|
|
+ y= results.at(i).bbox[1] * ratio_h;
|
|
|
+ w= results.at(i).bbox[2] * ratio_w;
|
|
|
+ h=results.at(i).bbox[3] * ratio_h;
|
|
|
+
|
|
|
+ x = (int)(x - w / 2);
|
|
|
+ y = (int)(y - h / 2);
|
|
|
+ w = (int)w;
|
|
|
+ h = (int)h;
|
|
|
+
|
|
|
+ string info;
|
|
|
+ //info = "id:";
|
|
|
+ //info.append(to_string(results.at(i).class_id));
|
|
|
+ //info.append(classnames[results.at(i).class_id]);
|
|
|
+ //info.append(":");
|
|
|
+ info.append(to_string((int)(results.at(i).conf*100) ) );
|
|
|
+ info.append("%");
|
|
|
+ rect= cv::Rect(x, y, w, h);
|
|
|
+ if(results.at(i).class_id == 0){ // red light
|
|
|
+ cv::rectangle(image, rect, cv::Scalar(0, 0, 255), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
|
|
|
+ cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 0, 255), 0.6, 1, false);
|
|
|
+ }else if(results.at(i).class_id == 1){ // green light
|
|
|
+ cv::rectangle(image, rect, cv::Scalar(0, 255, 0), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
|
|
|
+ cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 0), 0.6, 1, false);
|
|
|
+ }else if(results.at(i).class_id == 2){ // yellow light
|
|
|
+ cv::rectangle(image, rect, cv::Scalar(0, 255, 255), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
|
|
|
+ cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(0, 255, 255), 0.6, 1, false);
|
|
|
+ }else{
|
|
|
+ cv::rectangle(image, rect, cv::Scalar(255, 255, 255), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
|
|
|
+ cv::putText(image, info, cv::Point(x, y-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, cv::Scalar(255, 255, 255), 0.6, 1, false);
|
|
|
+ }
|
|
|
+
|
|
|
+ std::cout<<classnames[results.at(i).class_id]<<" "<<info<<std::endl;
|
|
|
+
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+bool GrabFrameThread::LoadEngine(const std::string engine_path){
|
|
|
+ //加载engine引擎
|
|
|
+ char* trtModelStream{ nullptr };
|
|
|
+ size_t size{ 0 };
|
|
|
+ std::ifstream file(engine_path, std::ios::binary);
|
|
|
+ if(!file)
|
|
|
+ {
|
|
|
+ cout<<engine_path<<" not found!"<<endl;
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ if (file.good()) {
|
|
|
+ file.seekg(0, file.end);
|
|
|
+ size = file.tellg();
|
|
|
+ file.seekg(0, file.beg);
|
|
|
+ trtModelStream = new char[size];
|
|
|
+ assert(trtModelStream);
|
|
|
+ file.read(trtModelStream, size);
|
|
|
+ file.close();
|
|
|
+ }
|
|
|
+ //反序列为engine,创建context
|
|
|
+ runtime = createInferRuntime(gLogger);
|
|
|
+ assert(runtime != nullptr);
|
|
|
+ engine = runtime->deserializeCudaEngine(trtModelStream, size, nullptr);
|
|
|
+ //assert(engine != nullptr);
|
|
|
+ if(engine == nullptr)
|
|
|
+ return false;
|
|
|
+ context = engine->createExecutionContext();
|
|
|
+ assert(context != nullptr);
|
|
|
+ delete[] trtModelStream;
|
|
|
+
|
|
|
+ //在主机上分配页锁定内存
|
|
|
+ CHECK(cudaHostAlloc((void **)&prob, OUTPUT_SIZE * sizeof(float), cudaHostAllocDefault));
|
|
|
+ return true;
|
|
|
+}
|
|
|
+void GrabFrameThread::infer(cv::Mat img,vector<Detection> &results) {
|
|
|
+
|
|
|
+ // 处理图片为固定输出
|
|
|
+ auto start = std::chrono::system_clock::now(); //时间函数
|
|
|
+ static float data[3 * INPUT_H * INPUT_W];
|
|
|
+ ProcessImage(img, data);
|
|
|
+ auto end = std::chrono::system_clock::now();
|
|
|
+ //time_read_img = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_read_img;
|
|
|
+
|
|
|
+ //cout<<"read img time: "<<std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()<<"ms"<<endl;
|
|
|
+
|
|
|
+ //Run inference
|
|
|
+ start = std::chrono::system_clock::now(); //时间函数
|
|
|
+ //cout<<"doinference"<<endl;
|
|
|
+ doInference(*context, data, prob, 1);
|
|
|
+ end = std::chrono::system_clock::now();
|
|
|
+ //time_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_infer;
|
|
|
+ std::cout <<"doinference: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
|
|
|
+ postprocess(prob, results, conf_thr, nms_thr);
|
|
|
+ //cout << "ok" << endl;
|
|
|
+ //time_num++;
|
|
|
+}
|
|
|
+
|