123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948 |
- #include "NvInfer.h"
- #include "cuda_runtime_api.h"
- #include <fstream>
- #include <iostream>
- #include <map>
- #include <sstream>
- #include <vector>
- #include <chrono>
- #include <cmath>
- #include <cassert>
- #include <algorithm>
- #include<opencv2/core/core.hpp>
- #include<opencv2/highgui/highgui.hpp>
- #include <opencv2/opencv.hpp>
- #include "modulecomm.h"
- #include "xmlparam.h"
- #include "ivfault.h"
- #include "ivlog.h"
- #include "ivexit.h"
- #include "ivversion.h"
- #include "rawpic.pb.h"
- #include "lightarray.pb.h"
- #include "brainstate.pb.h"
- #include <QCoreApplication>
- #include <thread>
- #include "opencv2/imgcodecs/legacy/constants_c.h"
- #include "qmutex.h"
- #include "condition_variable"
- #include "imageBuffer.h"
- #include "detect_obstacle.h"
- // onnx转换头文件
- #include "NvOnnxParser.h"
- using namespace nvonnxparser;
- using namespace nvinfer1;
- using namespace std;
- //全局变量
- struct Detection {
- //center_x center_y w h
- float bbox[4];
- float conf; // bbox_conf * cls_conf
- int class_id;
- int index;
- };
- struct Bbox {
- int x;
- int y;
- int w;
- int h;
- };
- string config_file = "./yaml/trafficlight_config.yaml";
- string onnx_path = "./model/yolov5s_640_old+new.onnx"; //onnx path
- string engine_path = "./model/yolov5s_640_old+new.engine"; // engine paths
- bool imshow_flag = true;
- bool ivlog_flag = false;
- float conf_thr = 0.5;
- float nms_thr = 0.4;
- bool calibrationstate = false;
- string calibration_yamlpath = "./yaml/camera_middle_640_360.yaml";
- bool cropstate = true;
- cv::Range crop_height = cv::Range(40,680);
- cv::Range crop_width = cv::Range(320,960);
- bool test_video = false;
- //string video_path = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink";
- string video_path = "/home/nvidia/code/modularization/src/detection/trafficlight_detection/data/camera_test3.mp4";
- //是否开启跟踪
- bool trackstate = true;
- void * gpbrain; //获取红绿灯开启状态
- string brainname = "brainstate";
- bool lightstart = false;
- void * gpcamera;
- string cameraname="image00";
- ConsumerProducerQueue<cv::Mat> * imageBuffer = new ConsumerProducerQueue<cv::Mat>(3,true);
- void * gpdetect;
- string detectname = "lightarray"; //检测结果
- // stuff we know about the network and the input/output blobs
- static const int INPUT_H = 640;
- static const int INPUT_W = 640;
- static const int cls_num = 3;
- //不同输入尺寸anchor:960-->56700 | 640-->25200 | 416-->10647 | 320-->6300
- static const int anchor_output_num = 25200;
- static const int OUTPUT_SIZE = 1* anchor_output_num *(cls_num+5); //1000 * sizeof(Detection) / sizeof(float) + 1;
- const char* INPUT_BLOB_NAME = "images";
- const char* OUTPUT_BLOB_NAME = "output";
- IExecutionContext* context;
- ICudaEngine* engine;
- IRuntime* runtime;
- float* prob;
- iv::Ivfault *gfault = nullptr;
- iv::Ivlog *givlog = nullptr;
- cv::Mat camera_matrix,dist_coe,map1,map2; //标定参数
- float time_read_img = 0.0;
- float time_infer = 0.0;
- int time_num = 0;
- #define CHECK(status) \
- do\
- {\
- auto ret = (status);\
- if (ret != 0)\
- {\
- std::cerr << "Cuda failure: " << ret << std::endl;\
- abort();\
- }\
- } while (0)
- //static Logger gLogger;
- //构建Logger
- class Logger : public ILogger
- {
- void log(Severity severity, const char* msg) noexcept override
- {
- // suppress info-level messages
- if (severity <= Severity::kWARNING)
- std::cout << msg << std::endl;
- }
- } gLogger;
- // Creat the engine using only the API and not any parser.
- ICudaEngine* createEngine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config)
- {
- INetworkDefinition* network = builder->createNetworkV2(1U); //此处重点1U为OU就有问题
- IParser* parser = createParser(*network, gLogger);
- parser->parseFromFile(onnx_path.c_str(), static_cast<int32_t>(ILogger::Severity::kWARNING));
- //解析有错误将返回
- for (int32_t i = 0; i < parser->getNbErrors(); ++i) { std::cout << parser->getError(i)->desc() << std::endl; }
- std::cout << "successfully parse the onnx model" << std::endl;
- // Build engine
- builder->setMaxBatchSize(maxBatchSize);
- config->setMaxWorkspaceSize(1 << 20);
- //config->setFlag(nvinfer1::BuilderFlag::kFP16); // 设置精度计算
- //config->setFlag(nvinfer1::BuilderFlag::kINT8);
- ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
- std::cout << "successfully convert onnx to engine!!! " << std::endl;
- //销毁
- network->destroy();
- //parser->destroy();
- return engine;
- }
- void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream)
- {
- // Create builder
- IBuilder* builder = createInferBuilder(gLogger);
- IBuilderConfig* config = builder->createBuilderConfig();
- // Create model to populate the network, then set the outputs and create an engine
- ICudaEngine* engine = createEngine(maxBatchSize, builder, config);
- assert(engine != nullptr);
- // Serialize the engine
- (*modelStream) = engine->serialize();
- // Close everything down
- engine->destroy();
- builder->destroy();
- config->destroy();
- }
- int get_trtengine() {
- IHostMemory* modelStream{ nullptr };
- APIToModel(1, &modelStream);
- assert(modelStream != nullptr);
- std::ofstream p(engine_path, std::ios::binary);
- if (!p)
- {
- std::cerr << "could not open plan output file" << std::endl;
- return -1;
- }
- p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
- modelStream->destroy();
- return 0;
- }
- void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
- {
- const ICudaEngine& engine = context.getEngine();
- // Pointers to input and output device buffers to pass to engine.
- // Engine requires exactly IEngine::getNbBindings() number of buffers.
- assert(engine.getNbBindings() == 2);
- void* buffers[2];
- // In order to bind the buffers, we need to know the names of the input and output tensors.
- // Note that indices are guaranteed to be less than IEngine::getNbBindings()
- const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
- const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
- //std::cout<<inputIndex<<" "<<outputIndex<<std::endl;
- //const int inputIndex = 0;
- //const int outputIndex = 1;
- // Create GPU buffers on device
- cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float));
- cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float));
- // Create stream
- cudaStream_t stream;
- CHECK(cudaStreamCreate(&stream));
- // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
- CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
- context.enqueue(batchSize, buffers, stream, nullptr);
- //std::cout<<buffers[outputIndex+1]<<std::endl;
- CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
- cudaStreamSynchronize(stream);
- // Release stream and buffers
- cudaStreamDestroy(stream);
- CHECK(cudaFree(buffers[inputIndex]));
- CHECK(cudaFree(buffers[outputIndex]));
- }
- //加工图片变成拥有batch的输入, tensorrt输入需要的格式,为一个维度
- void ProcessImage(cv::Mat image, float input_data[]) {
- //只处理一张图片,总之结果为一维[batch*3*INPUT_W*INPUT_H]
- //以下代码为投机取巧了
- cv::Mat resize_img ;
- cv::resize(image, resize_img, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
- std::vector<cv::Mat> InputImage;
- InputImage.push_back(resize_img);
- int ImgCount = InputImage.size();
- //float input_data[BatchSize * 3 * INPUT_H * INPUT_W];
- for (int b = 0; b < ImgCount; b++) {
- cv::Mat img = InputImage.at(b);
- int w = img.cols;
- int h = img.rows;
- int i = 0;
- for (int row = 0; row < h; ++row) {
- uchar* uc_pixel = img.data + row * img.step;
- for (int col = 0; col < INPUT_W; ++col) {
- input_data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
- input_data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
- input_data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
- uc_pixel += 3;
- ++i;
- }
- }
- }
- }
- //********************************************** NMS code **********************************//
- float iou(Bbox box1, Bbox box2) {
- int x1 = max(box1.x, box2.x);
- int y1 = max(box1.y, box2.y);
- int x2 = min(box1.x + box1.w, box2.x + box2.w);
- int y2 = min(box1.y + box1.h, box2.y + box2.h);
- int w = max(0, x2 - x1);
- int h = max(0, y2 - y1);
- float over_area = w * h;
- return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
- }
- int get_max_index(vector<Detection> pre_detection) {
- //获得最佳置信度的值,并返回对应的索引值
- int index;
- float conf;
- if (pre_detection.size() > 0) {
- index = 0;
- conf = pre_detection.at(0).conf;
- for (int i = 0; i < pre_detection.size(); i++) {
- if (conf < pre_detection.at(i).conf) {
- index = i;
- conf = pre_detection.at(i).conf;
- }
- }
- return index;
- }
- else {
- return -1;
- }
- }
- bool judge_in_lst(int index, vector<int> index_lst) {
- //若index在列表index_lst中则返回true,否则返回false
- if (index_lst.size() > 0) {
- for (int i = 0; i < index_lst.size(); i++) {
- if (index == index_lst.at(i)) {
- return true;
- }
- }
- }
- return false;
- }
- vector<int> nms(vector<Detection> pre_detection, float iou_thr)
- {
- /*
- 返回需保存box的pre_detection对应位置索引值
- */
- int index;
- vector<Detection> pre_detection_new;
- //Detection det_best;
- Bbox box_best, box;
- float iou_value;
- vector<int> keep_index;
- vector<int> del_index;
- bool keep_bool;
- bool del_bool;
- int rr = 0;
- int zz = 0;
- if (pre_detection.size() > 0) {
- pre_detection_new.clear();
- // 循环将预测结果建立索引
- for (int i = 0; i < pre_detection.size(); i++) {
- pre_detection.at(i).index = i;
- pre_detection_new.push_back(pre_detection.at(i));
- }
- //循环遍历获得保留box位置索引-相对输入pre_detection位置
- while (pre_detection_new.size() > 0) {
- index = get_max_index(pre_detection_new);
- if (index >= 0) {
- keep_index.push_back(pre_detection_new.at(index).index); //保留索引位置
- // 更新最佳保留box
- box_best.x = pre_detection_new.at(index).bbox[0];
- box_best.y = pre_detection_new.at(index).bbox[1];
- box_best.w = pre_detection_new.at(index).bbox[2];
- box_best.h = pre_detection_new.at(index).bbox[3];
- for (int j = 0; j < pre_detection.size(); j++) {
- keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
- del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
- if ((!keep_bool) && (!del_bool)) { //不在keep_index与del_index才计算iou
- box.x = pre_detection.at(j).bbox[0];
- box.y = pre_detection.at(j).bbox[1];
- box.w = pre_detection.at(j).bbox[2];
- box.h = pre_detection.at(j).bbox[3];
- iou_value = iou(box_best, box);
- if (iou_value > iou_thr) {
- del_index.push_back(j); //记录大于阈值将删除对应的位置
- }
- }
- }
- //更新pre_detection_new
- pre_detection_new.clear();
- for (int j = 0; j < pre_detection.size(); j++) {
- keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
- del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
- if ((!keep_bool) && (!del_bool)) {
- pre_detection_new.push_back(pre_detection.at(j));
- }
- }
- }
- }
- }
- del_index.clear();
- del_index.shrink_to_fit();
- pre_detection_new.clear();
- pre_detection_new.shrink_to_fit();
- return keep_index;
- }
- void postprocess(float* prob,vector<Detection> &results,float conf_thr = 0.2, float nms_thr = 0.4) {
- /*
- #####################此函数处理一张图预测结果#########################
- prob为[x y w h score multi-pre] 如80类-->(1,anchor_num,85)
- */
- vector<Detection> pre_results;
- vector<int> nms_keep_index;
- bool keep_bool;
- Detection pre_res;
- float conf;
- int tmp_idx;
- float tmp_cls_score;
- for (int i = 0; i < anchor_output_num; i++) {
- tmp_idx = i * (cls_num + 5);
- pre_res.bbox[0] = prob[tmp_idx + 0];
- pre_res.bbox[1] = prob[tmp_idx + 1];
- pre_res.bbox[2] = prob[tmp_idx + 2];
- pre_res.bbox[3] = prob[tmp_idx + 3];
- conf = prob[tmp_idx + 4]; //是为目标的置信度
- tmp_cls_score = prob[tmp_idx + 5] * conf;
- pre_res.class_id = 0;
- pre_res.conf = tmp_cls_score;
- for (int j = 1; j < cls_num; j++) {
- tmp_idx = i * (cls_num + 5) + 5 + j; //获得对应类别索引
- if (tmp_cls_score < prob[tmp_idx] * conf)
- {
- tmp_cls_score = prob[tmp_idx] * conf;
- pre_res.class_id = j;
- pre_res.conf = tmp_cls_score;
- }
- }
- if (conf >= conf_thr) {
- pre_results.push_back(pre_res);
- }
- }
- //使用nms
- nms_keep_index=nms(pre_results,nms_thr);
- for (int i = 0; i < pre_results.size(); i++) {
- keep_bool = judge_in_lst(i, nms_keep_index);
- if (keep_bool) {
- results.push_back(pre_results.at(i));
- }
- }
- pre_results.clear();
- pre_results.shrink_to_fit();
- nms_keep_index.clear();
- nms_keep_index.shrink_to_fit();
- }
- cv::Mat draw_rect(cv::Mat image, vector<Detection> results) {
- /*
- image 为图像
- struct Detection {
- float bbox[4]; //center_x center_y w h
- float conf; // 置信度
- int class_id; //类别id
- int index; //可忽略
- };
- */
- float x;
- float y;
- float w;
- float h;
- string info;
- cv::Rect rect;
- for (int i = 0; i < results.size(); i++) {
- x = results.at(i).bbox[0];
- y= results.at(i).bbox[1];
- w= results.at(i).bbox[2];
- h=results.at(i).bbox[3];
- x = (int)(x - w / 2);
- y = (int)(y - h / 2);
- w = (int)w;
- h = (int)h;
- info = "id:";
- info.append(to_string(results.at(i).class_id));
- info.append(" s:");
- info.append(to_string((int)(results.at(i).conf*100) ) );
- info.append("%");
- rect= cv::Rect(x, y, w, h);
- if(results.at(i).class_id == 0){ // red light
- cv::rectangle(image, rect, cv::Scalar(0, 0, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
- cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 0, 255), 0.6, 1, false);
- }else if(results.at(i).class_id == 1){ // green light
- cv::rectangle(image, rect, cv::Scalar(0, 255, 0), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
- cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 0), 0.6, 1, false);
- }else if(results.at(i).class_id == 2){ // yellow light
- cv::rectangle(image, rect, cv::Scalar(0, 255, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
- cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 255), 0.6, 1, false);
- }else{
- cv::rectangle(image, rect, cv::Scalar(255, 255, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
- cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255, 255, 255), 0.6, 1, false);
- }
- }
- return image;
- }
- bool LoadEngine(const std::string engine_path){
- //加载engine引擎
- char* trtModelStream{ nullptr };
- size_t size{ 0 };
- std::ifstream file(engine_path, std::ios::binary);
- if(!file)
- {
- cout<<engine_path<<" not found!"<<endl;
- return false;
- }
- if (file.good()) {
- file.seekg(0, file.end);
- size = file.tellg();
- file.seekg(0, file.beg);
- trtModelStream = new char[size];
- assert(trtModelStream);
- file.read(trtModelStream, size);
- file.close();
- }
- //反序列为engine,创建context
- runtime = createInferRuntime(gLogger);
- assert(runtime != nullptr);
- #ifdef UBUNTU_22_04
- engine = runtime->deserializeCudaEngine(trtModelStream, size);
- #else
- engine = runtime->deserializeCudaEngine(trtModelStream, size, nullptr);
- #endif
- //assert(engine != nullptr);
- if(engine == nullptr)
- return false;
- context = engine->createExecutionContext();
- assert(context != nullptr);
- delete[] trtModelStream;
- //在主机上分配页锁定内存
- CHECK(cudaHostAlloc((void **)&prob, OUTPUT_SIZE * sizeof(float), cudaHostAllocDefault));
- return true;
- }
- void shareLightMsg(vector<Detection> results)
- {
- iv::vision::Lightarray light_array; //向共享内存传结果
- for (int i = 0; i < results.size(); i++)
- {
- float x = results.at(i).bbox[0];
- float y = results.at(i).bbox[1];
- float w = results.at(i).bbox[2];
- float h = results.at(i).bbox[3];
- /*---------------protobuf----------------*/
- iv::vision::Light *light = light_array.add_light();
- iv::vision::Center light_center;
- light_center.set_x(x);
- light_center.set_y(y);
- light->mutable_center()->CopyFrom(light_center);
- light->set_index(i+1);
- int light_type = 255;
- if (results.at(i).class_id == 0) //红色
- light_type = 2;
- else if(results.at(i).class_id == 1) //绿色
- light_type = 1;
- else if(results.at(i).class_id == 2) //黄色
- light_type = 3;
- light->set_type(light_type);
- cout<<"light color: "<<light_type<<endl;
- if(ivlog_flag)
- givlog->verbose("light color: %d",light_type);
- }
- int size = light_array.ByteSize();
- char * strdata = new char[light_array.ByteSize()];
- if(light_array.SerializeToArray(strdata, size))
- {
- iv::modulecomm::ModuleSendMsg(gpdetect, strdata, size);
- }
- else
- {
- std::cout<<"light_array serialize error."<<std::endl;
- }
- light_array.Clear();
- delete strdata;
- /*--------------------test ParseFromArray-------------------*/
- // iv::vision::Lightarray light_array1;
- // light_array1.ParseFromArray(strdata,size);
- // cout<<"parsefromarray:"<<std::endl;
- // cout<<"light_size:"<<light_array1.light_size()<<endl;
- // for (int i=0;i<light_array1.light_size();i++) {
- // std::cout<<"index:"<<light_array1.light(i).index()<<" type:"
- // <<light_array1.light(i).type()
- // <<std::endl;
- // }
- }
- void infer(cv::Mat img,vector<Detection> &results) {
- // 处理图片为固定输出
- auto start = std::chrono::system_clock::now(); //时间函数
- static float data[3 * INPUT_H * INPUT_W];
- ProcessImage(img, data);
- auto end = std::chrono::system_clock::now();
- //time_read_img = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_read_img;
- //cout<<"read img time: "<<std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()<<"ms"<<endl;
- //Run inference
- start = std::chrono::system_clock::now(); //时间函数
- //cout<<"doinference"<<endl;
- doInference(*context, data, prob, 1);
- end = std::chrono::system_clock::now();
- //time_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_infer;
- std::cout <<"doinference: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
- postprocess(prob, results, conf_thr, nms_thr);
- //cout << "ok" << endl;
- //time_num++;
- }
- void exitfunc()
- {
- return;
- }
- //读取视频数据
- void ReadFunc(int n)
- {
- cv::VideoCapture cap(video_path);
- if(!cap.isOpened())
- {
- cout<<"camera failed to open"<<endl;
- }
- while(1)
- {
- cv::Mat frame;
- //读视频的时候加上,读摄像头去掉
- if(imageBuffer->isFull())
- {
- continue;
- }
- if(cap.read(frame))
- {
- if(calibrationstate)
- cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
- if(cropstate)
- frame = frame(crop_height,crop_width);
- imageBuffer->add(frame);
- }
- else
- {
- std::this_thread::sleep_for(std::chrono::milliseconds(1));
- }
- }
- }
- void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
- {
- if(nSize<1000)return;
- iv::vision::rawpic pic;
- if(false == pic.ParseFromArray(strdata,nSize))
- {
- std::cout<<"picview Listenpic fail."<<std::endl;
- return;
- }
- cv::Mat mat(pic.height(),pic.width(),pic.mattype());
- if(pic.type() == 1)
- memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
- else
- {
- // mat.release();
- std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
- mat = cv::imdecode(buff,cv::IMREAD_COLOR);
- }
- if(calibrationstate)
- cv::remap(mat,mat,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
- if(cropstate)
- mat = mat(crop_height,crop_width);
- imageBuffer->add(mat);
- mat.release();
- }
- //从共享内存中获取signal
- void Listensignal(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
- {
- if(nSize<1)return;
- iv::brain::brainstate brain_state;
- if(false == brain_state.ParseFromArray(strdata,nSize))
- {
- std::cout<<"Listen signal fail."<<std::endl;
- return;
- }
- cout<<"brain_state traficlightstart: "<<brain_state.mbtraficlightstart()<<endl;
- lightstart = brain_state.mbtraficlightstart();
- }
- bool comp(const od::TrackingBox &a, const od::TrackingBox &b) {
- return a.id < b.id;
- }
- int main(int argc, char** argv)
- {
- showversion("yolov5s");
- QCoreApplication a(argc, argv);
- gfault = new iv::Ivfault("traffic_light_detection");
- givlog = new iv::Ivlog("traffic_light_detection");
- gfault->SetFaultState(0,0,"yolov5 initialize.");
- // if(argc==3)
- // {
- // test_video = (strcmp(argv[1], "true") == 0)?true:false;
- // video_path = argv[2];
- // }
- // if(argc==2)
- // {
- // test_video = (strcmp(argv[1], "true") == 0)?true:false;
- // }
- cv::FileStorage config(config_file, cv::FileStorage::READ);
- bool config_isOpened = config.isOpened();
- //const char* onnx_path_;
- if(config_isOpened)
- {
- onnx_path = string(config["model"]["onnx_path"]);
- engine_path = string(config["model"]["engine_path"]);
- imshow_flag = (string(config["imshow_flag"]) == "true");
- ivlog_flag = (string(config["ivlog_flag"]) == "true");
- test_video = (string(config["test_video"]["open"]) == "true");
- video_path = string(config["test_video"]["video_path"]);
- lightstart = (string(config["lightstart"]) == "true");
- trackstate = (string(config["trackstate"]) == "true");
- conf_thr = float(config["conf_thr"]);
- nms_thr = float(config["nms_thr"]);
- calibrationstate = (string(config["camera"]["calibrationstate"]) == "true");
- calibration_yamlpath = string(config["camera"]["calibration_yamlpath"]);
- cropstate = (string(config["camera"]["cropstate"]) == "true");
- crop_height = cv::Range(int(config["camera"]["crop_height"]["start"]),
- int(config["camera"]["crop_height"]["end"]));
- crop_width = cv::Range(int(config["camera"]["crop_width"]["start"]),
- int(config["camera"]["crop_width"]["end"]));
- }
- config.release();
- if(test_video)
- std::thread * readthread = new std::thread(ReadFunc,1);
- else
- gpcamera= iv::modulecomm::RegisterRecv(&cameraname[0],Listenpic);
- //================================== camera calib init ==========================
- if (calibrationstate)
- {
- cv::FileStorage calib_file(calibration_yamlpath, cv::FileStorage::READ);
- calib_file["cameraMatrix"]>>camera_matrix;
- calib_file["distCoeffs"]>>dist_coe;
- cv::Mat R = cv::Mat::eye(3, 3, CV_64F);
- cv::Size imgsize=cv::Size(1280,720);
- cv::initUndistortRectifyMap(camera_matrix, dist_coe, R, camera_matrix,imgsize,CV_16SC2,map1,map2);
- }
- //==============================================================================
- if(!LoadEngine(engine_path))
- {
- cout<<"Build engine to "<< engine_path <<endl;
- get_trtengine();
- cout << "Build engine done!"<<endl;
- cout<<"Reload engine from "<< engine_path <<endl;
- LoadEngine(engine_path);
- }
- gpbrain = iv::modulecomm::RegisterRecv(&brainname[0],Listensignal);
- gpdetect = iv::modulecomm::RegisterSend(&detectname[0],10000,1);
- while(1)
- {
- if(lightstart)
- {
- std::cout<<"------start program------"<<std::endl;
- vector<KalmanTracker> trackers_90;
- KalmanTracker::kf_count = 0; // tracking id relies on this, so we have to reset it in each seq.
- int frame_count = 0;
- double waittime = (double)cv::getTickCount();
- while (1)
- {
- if(imageBuffer->isEmpty())
- {
- double waittotal = (double)cv::getTickCount() - waittime;
- double totaltime = waittotal/cv::getTickFrequency();
- // if(totaltime>10.0)
- // {
- // cout<<"Cant't get frame and quit"<<endl;
- // lightstart = false;
- // cv::destroyAllWindows();
- // std::cout<<"------end program------"<<std::endl;
- // break;
- // }
- cout<<"Wait for frame "<<totaltime<<"s"<<endl;
- if(ivlog_flag)
- givlog->verbose("Wait for frame %f s",totaltime);
- std::this_thread::sleep_for(std::chrono::milliseconds(10));
- continue;
- }
- auto start = std::chrono::system_clock::now(); //时间函数
- cv::Mat frame;
- imageBuffer->consume(frame);
- frame_count++;
- cv::Mat res_img; // result image
- vector<Detection> results;
- vector<Detection>results_track;
- //================================== infer ==========================
- infer(frame,results);
- //================================== track ==========================
- if (trackstate)
- {
- auto start_track = std::chrono::system_clock::now(); //时间函数
- od::bbox_t bbox_t_90; //转成跟踪格式
- vector<od::bbox_t> outs_90;
- for (int i = 0; i < results.size(); i++)
- {
- //-------------判断红绿灯是否为横向,width=(x1-x2),height=(y1-y2)-----------
- bbox_t_90.x = results.at(i).bbox[0];
- bbox_t_90.y = results.at(i).bbox[1];
- bbox_t_90.w = results.at(i).bbox[2];
- bbox_t_90.h = results.at(i).bbox[3];
- bbox_t_90.prob = results.at(i).conf;
- bbox_t_90.obj_id = results.at(i).class_id;
- outs_90.push_back(bbox_t_90);
- }
- vector<od::TrackingBox>track_result_90;
- bool track_flag_90 = od::TrackObstacle(frame_count,trackers_90,outs_90,track_result_90);
- //sort(track_result_90.begin(), track_result_90.end(), comp); //track id 本来就是由大到小
- for(unsigned int i=0;i < track_result_90.size(); i++)
- {
- Detection obstacle;
- obstacle.bbox[0] = track_result_90[i].box.x;
- obstacle.bbox[1] = track_result_90[i].box.y;
- obstacle.bbox[2] = track_result_90[i].box.width;
- obstacle.bbox[3] = track_result_90[i].box.height;
- //cout<<"11111: "<<track_result_90[i].id<<endl;
- //通过判断5帧数输出颜色
- vector<int> class_history;
- class_history = track_result_90[i].class_history;
- if(class_history.size()>0)
- {
- vector<int> color_num(3);
- for(int j=0;j<class_history.size();j++)
- {
- int class_id = class_history[j];
- color_num[class_id] += 1;
- }
- std::vector<int>::iterator biggest = std::max_element(std::begin(color_num),std::end(color_num));
- int maxindex = std::distance(std::begin(color_num),biggest);
- obstacle.class_id = maxindex;
- }
- else {obstacle.class_id = track_result_90[i].class_id;}
- obstacle.conf = track_result_90[i].prob;
- results_track.push_back(obstacle);
- }
- auto end_track = std::chrono::system_clock::now(); //时间函数
- //std::cout <<"track: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end_track - start_track).count() << "ms" << std::endl;
- }
- //================================== track ==========================
- vector<Detection>results_final;
- results_final = (trackstate)?results_track:results;
- shareLightMsg(results_final);
- auto end = std::chrono::system_clock::now(); //时间函数
- std::cout <<"total time traffic: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
- //显示检测结果,检测结果没有返回到原始图像大小,所以需要resize到网络输入尺寸
- cv::resize(frame, frame, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
- res_img=draw_rect(frame,results_final);
- std::cout<<"light number: "<<results_final.size()<<std::endl;
- if(ivlog_flag)
- givlog->verbose("light number: %d",results_final.size());
- results.clear();
- results_track.clear();
- results_final.clear();
- if (imshow_flag)
- {
- cv::namedWindow("Result",cv::WINDOW_NORMAL);
- cv::imshow("Result",frame);
- if(cv::waitKey(10) == 'q')
- {
- cv::destroyAllWindows();
- //yolo_context->destroy();
- //start_up = false;
- break;
- }
- if(cv::waitKey(1) == 's')
- cv::waitKey(0);
- }
- //writer << frame;
- waittime = (double)cv::getTickCount();
- if(!lightstart)
- {
- cv::destroyAllWindows();
- //yolo_context->destroy();
- //start_up = false;
- std::cout<<"------end program------"<<std::endl;
- break;
- }
- }
- }
- else
- {
- cout<<"Wait for lightstart==true"<<endl;
- if(ivlog_flag)
- givlog->verbose("Wait for lightstart==true");
- std::this_thread::sleep_for(std::chrono::milliseconds(10));
- }
- }
- // Destroy the engine
- context->destroy();
- engine->destroy();
- runtime->destroy();
- return 0;
- }
|