Browse Source

add track

fujiankuan 1 year ago
parent
commit
0bc98418c2
26 changed files with 3266 additions and 0 deletions
  1. 163 0
      src/detection/yolov5-trt-v2.0/calibrate_camera.py
  2. 199 0
      src/detection/yolov5-trt-v2.0/calibrate_camera_new.py
  3. 36 0
      src/detection/yolov5-trt-v2.0/include/Hungarian.h
  4. 90 0
      src/detection/yolov5-trt-v2.0/include/KalmanTracker.h
  5. 54 0
      src/detection/yolov5-trt-v2.0/include/detect_obstacle.h
  6. 82 0
      src/detection/yolov5-trt-v2.0/include/imageBuffer.h
  7. 701 0
      src/detection/yolov5-trt-v2.0/main.cpp
  8. 866 0
      src/detection/yolov5-trt-v2.0/main_new.cpp
  9. 40 0
      src/detection/yolov5-trt-v2.0/saveVideo.py
  10. 398 0
      src/detection/yolov5-trt-v2.0/src/Hungarian.cpp
  11. 106 0
      src/detection/yolov5-trt-v2.0/src/KalmanTracker.cpp
  12. 260 0
      src/detection/yolov5-trt-v2.0/src/detect_obstacle.cpp
  13. 12 0
      src/detection/yolov5-trt-v2.0/yaml/camera.yaml
  14. 12 0
      src/detection/yolov5-trt-v2.0/yaml/camera147.yaml
  15. 12 0
      src/detection/yolov5-trt-v2.0/yaml/camera_middle_640_360.yaml
  16. 16 0
      src/detection/yolov5-trt-v2.0/yaml/init.yaml
  17. 10 0
      src/detection/yolov5-trt-v2.0/yaml/pers_ori.yaml
  18. 12 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/camera.yaml
  19. 12 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/camera_ori.yaml
  20. 16 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/init.yaml
  21. 19 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers.yaml
  22. 19 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers3.yaml
  23. 19 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers_1.yaml
  24. 19 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers_2.yaml
  25. 10 0
      src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers_ori.yaml
  26. 83 0
      src/detection/yolov5-trt-v2.0/yolov5-trt-v2.0.pro

+ 163 - 0
src/detection/yolov5-trt-v2.0/calibrate_camera.py

@@ -0,0 +1,163 @@
+#!/usr/bin/python3
+"""
+~~~~~~~~~~~~~~~~~~
+Camera calibration
+~~~~~~~~~~~~~~~~~~
+
+Usage:
+    python calib.py \
+        -i /dev/video0 \
+        -grid 9x6 \
+        -out fisheye.yaml \
+        -framestep 20 \
+        -resolution 640x480
+        --fisheye
+"""
+import argparse
+import os
+import numpy as np
+import yaml
+import cv2
+import sys
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    # parser.add_argument("-i", "--input", type=int, default=0,
+                        # help="input camera device")
+    parser.add_argument("-grid", "--grid", default="10x7", help="size of the grid (rows x cols)")
+    parser.add_argument("-framestep", type=int, default=20, help="use every nth frame in the video")
+    parser.add_argument("-o", "--output", default="./yaml",
+                        help="path to output yaml file")
+    # parser.add_argument("-resolution", "--resolution", default="640x480",
+    #                     help="resolution of the camera")
+    parser.add_argument("-fisheye", "--fisheye", default=True,action="store_true",
+                        help="set ture if this is a fisheye camera")
+
+    args = parser.parse_args()
+
+   
+    cap = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink"
+
+    if not os.path.exists(args.output):
+        os.mkdir(args.output)
+
+    
+    source = cv2.VideoCapture(cap)
+
+    W=1280
+    H=720
+    source.set(3, W)
+    source.set(4, H)
+
+    grid_size = tuple(int(x) for x in args.grid.split("x"))
+    grid_points = np.zeros((np.prod(grid_size), 3), np.float32)
+    grid_points[:, :2] = np.indices(grid_size).T.reshape(-1, 2)
+
+    objpoints = []  # 3d point in real world space
+    imgpoints = []  # 2d points in image plane
+
+    quit = False
+    do_calib = False
+    i = -1
+    while True:
+        i += 1
+        retcode, img = source.read()
+        W,H,_ = img.shape
+        if not retcode:
+            raise ValueError("cannot read frame from video")
+        if i % args.framestep != 0:
+            continue
+
+        print("searching for chessboard corners in frame " + str(i) + "...")
+        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+        found, corners = cv2.findChessboardCorners(
+            gray,
+            grid_size,
+            cv2.CALIB_CB_ADAPTIVE_THRESH +
+            cv2.CALIB_CB_NORMALIZE_IMAGE +
+            cv2.CALIB_CB_FILTER_QUADS
+        )
+        if found:
+            term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01)
+            cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
+            print("OK")
+            imgpoints.append(corners.reshape(1, -1, 2))
+            objpoints.append(grid_points.reshape(1, -1, 3))
+            cv2.drawChessboardCorners(img, grid_size, corners, found)
+        text1 = "press c to calibrate"
+        text2 = "press q to quit"
+        text3 = "device: {}".format(cap)
+        fontscale = 0.6
+        cv2.putText(img, text1, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
+        cv2.putText(img, text2, (20, 110), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
+        cv2.putText(img, text3, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
+        cv2.imshow("corners", img)
+        key = cv2.waitKey(1) & 0xFF
+        if key == ord("c"):
+            do_calib = True
+            break
+
+        elif key == ord("q"):
+            quit = True
+            break
+
+    if quit:
+        source.release()
+        cv2.destroyAllWindows()
+
+    if do_calib:
+        print("\nPerforming calibration...\n")
+        N_OK = len(objpoints)
+        if N_OK < 12:
+            print("Less than 12 corners detected, calibration failed")
+            return
+
+        K = np.zeros((3, 3))
+        D = np.zeros((4, 1))
+        rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
+        tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
+        calibration_flags = (cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC +
+                             cv2.fisheye.CALIB_CHECK_COND +
+                             cv2.fisheye.CALIB_FIX_SKEW)
+
+        # 求出内参矩阵和畸变系数
+        if True:
+            ret, K, D, rvecs, tvecs = cv2.fisheye.calibrate(
+                objpoints,
+                imgpoints,
+                (W, H),
+                K,
+                D,
+                rvecs,
+                tvecs,
+                calibration_flags,
+                (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
+            )
+        else:
+            ret, K, D, rvecs, tvecs = cv2.calibrateCamera(
+                objpoints,
+                imgpoints,
+                (W, H),
+                None,
+                None)
+        if ret:
+            print(ret)
+            print(K)
+            data = {"dim": np.array([W, H]).tolist(), "K": K.tolist(), "D": D.tolist()}
+            fname = "./yaml/camera147.yaml"
+            print(fname)
+            with open(fname, "w") as f:
+                yaml.safe_dump(data,f)
+            print("succesfully saved camera data")
+
+            cv2.putText(img, "Success!", (220 , 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
+        else:
+            cv2.putText(img, "Failed!", (220, 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
+
+        cv2.imshow("corners", img)
+        cv2.waitKey(0)
+
+
+if __name__ == "__main__":
+    main()

+ 199 - 0
src/detection/yolov5-trt-v2.0/calibrate_camera_new.py

@@ -0,0 +1,199 @@
+#!/usr/bin/python3
+"""
+~~~~~~~~~~~~~~~~~~
+Camera calibration
+~~~~~~~~~~~~~~~~~~
+
+Usage:
+    python calib.py \
+        -i /dev/video0 \
+        -grid 9x6 \
+        -out fisheye.yaml \
+        -framestep 20 \
+        -resolution 640x480
+        --fisheye
+"""
+import argparse
+import os
+import numpy as np
+import yaml
+import cv2
+import sys
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    # parser.add_argument("-i", "--input", type=int, default=0,
+                        # help="input camera device")
+    parser.add_argument("-grid", "--grid", default="10x7", help="size of the grid (rows x cols)")
+    parser.add_argument("-framestep", type=int, default=20, help="use every nth frame in the video")
+    parser.add_argument("-o", "--output", default="./yaml",
+                        help="path to output yaml file")
+    # parser.add_argument("-resolution", "--resolution", default="640x480",
+    #                     help="resolution of the camera")
+    parser.add_argument("-fisheye", "--fisheye", default=True,action="store_true",
+                        help="set ture if this is a fisheye camera")
+
+    args = parser.parse_args()
+
+   
+    cap = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink"
+
+    if not os.path.exists(args.output):
+        os.mkdir(args.output)
+
+    
+    source = cv2.VideoCapture(cap)
+
+    W=1280
+    H=720
+    source.set(3, W)
+    source.set(4, H)
+
+    grid_size = tuple(int(x) for x in args.grid.split("x"))
+    grid_points = np.zeros((np.prod(grid_size), 3), np.float32)
+    grid_points[:, :2] = np.indices(grid_size).T.reshape(-1, 2)
+
+    objpoints = []  # 3d point in real world space
+    imgpoints = []  # 2d points in image plane
+
+    quit = False
+    do_calib = False
+    i = -1
+    while True:
+        i += 1
+        retcode, img = source.read()
+        # img= img[120:480, 240:880]
+        # print(img.shape)
+        W,H,_ = img.shape
+        if not retcode:
+            raise ValueError("cannot read frame from video")
+        if i % args.framestep != 0:
+            continue
+
+        print("searching for chessboard corners in frame " + str(i) + "...")
+        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+        found, corners = cv2.findChessboardCorners(
+            gray,
+            grid_size,
+            cv2.CALIB_CB_ADAPTIVE_THRESH +
+            cv2.CALIB_CB_NORMALIZE_IMAGE +
+            cv2.CALIB_CB_FILTER_QUADS
+        )
+        if found:
+            term = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.01)
+            cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)
+            print("OK")
+            imgpoints.append(corners.reshape(1, -1, 2))
+            objpoints.append(grid_points.reshape(1, -1, 3))
+            cv2.drawChessboardCorners(img, grid_size, corners, found)
+        text1 = "press c to calibrate"
+        text2 = "press q to quit"
+        text3 = "device: {}".format(cap)
+        fontscale = 0.6
+        cv2.putText(img, text1, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
+        cv2.putText(img, text2, (20, 110), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
+        cv2.putText(img, text3, (20, 30), cv2.FONT_HERSHEY_SIMPLEX, fontscale, (255, 200, 0), 2)
+        cv2.imshow("corners", img)
+
+        print("\nPerforming calibration...\n")
+        N_OK = len(objpoints)
+        if N_OK > 12:
+            print("More than 12 corners detected")
+
+            K = np.zeros((3, 3))
+            D = np.zeros((4, 1))
+            rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
+            tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
+            calibration_flags = (cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC +
+                                cv2.fisheye.CALIB_CHECK_COND +
+                                cv2.fisheye.CALIB_FIX_SKEW)
+
+            try:
+                ret, K, D, rvecs, tvecs = cv2.fisheye.calibrate(
+                    objpoints,
+                    imgpoints,
+                    (W, H),
+                    K,
+                    D,
+                    rvecs,
+                    tvecs,
+                    calibration_flags,
+                    (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-5)
+                )
+                break
+            except:
+                objpoints = []
+                imgpoints = []
+                print("Do not press C!!!!")
+
+
+        key = cv2.waitKey(1) & 0xFF
+        if key == ord("c"):
+            do_calib = True
+            break
+
+        elif key == ord("q"):
+            quit = True
+            break
+
+    if quit:
+        source.release()
+        cv2.destroyAllWindows()
+
+    if do_calib:
+        print("\nPerforming calibration...\n")
+        N_OK = len(objpoints)
+        if N_OK < 12:
+            print("Less than 12 corners detected, calibration failed")
+            return
+
+        K = np.zeros((3, 3))
+        D = np.zeros((4, 1))
+        rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
+        tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for _ in range(N_OK)]
+        calibration_flags = (cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC +
+                             cv2.fisheye.CALIB_CHECK_COND +
+                             cv2.fisheye.CALIB_FIX_SKEW)
+
+        # 求出内参矩阵和畸变系数
+        if True:
+            ret, K, D, rvecs, tvecs = cv2.fisheye.calibrate(
+                objpoints,
+                imgpoints,
+                (W, H),
+                K,
+                D,
+                rvecs,
+                tvecs,
+                calibration_flags,
+                (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-5)
+            )
+
+        else:
+            ret, K, D, rvecs, tvecs = cv2.calibrateCamera(
+                objpoints,
+                imgpoints,
+                (W, H),
+                None,
+                None)
+        if ret:
+            print(ret)
+            print(K)
+            data = {"dim": np.array([W, H]).tolist(), "K": K.tolist(), "D": D.tolist()}
+            fname = "./yaml/camera147.yaml"
+            print(fname)
+            with open(fname, "w") as f:
+                yaml.safe_dump(data,f)
+            print("succesfully saved camera data")
+
+            cv2.putText(img, "Success!", (220 , 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
+        else:
+            cv2.putText(img, "Failed!", (220, 240), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255), 2)
+
+        cv2.imshow("corners", img)
+        cv2.waitKey(0)
+
+
+if __name__ == "__main__":
+    main()

+ 36 - 0
src/detection/yolov5-trt-v2.0/include/Hungarian.h

@@ -0,0 +1,36 @@
+//
+// Created by lqx on 20-4-23.
+//
+
+#ifndef TRACK_SORT_HUNGARIAN_H_H
+#define TRACK_SORT_HUNGARIAN_H_H
+
+#include <iostream>
+#include <vector>
+
+using namespace std;
+
+class HungarianAlgorithm
+{
+public:
+    HungarianAlgorithm();
+    ~HungarianAlgorithm();
+    double Solve(vector<vector<double>>& DistMatrix, vector<int>& Assignment);
+
+private:
+    void assignmentoptimal(int *assignment, double *cost, double *distMatrix, int nOfRows, int nOfColumns);
+    void buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns);
+    void computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows);
+    void step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+                bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+    void step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+                bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+    void step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+               bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+    void step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+               bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col);
+    void step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+               bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+};
+
+#endif //TRACK_SORT_HUNGARIAN_H_H

+ 90 - 0
src/detection/yolov5-trt-v2.0/include/KalmanTracker.h

@@ -0,0 +1,90 @@
+//
+// Created by lqx on 20-4-23.
+//
+
+#ifndef TRACK_SORT_KALMANTRACKER_H
+#define TRACK_SORT_KALMANTRACKER_H
+
+///////////////////////////////////////////////////////////////////////////////
+// KalmanTracker.h: KalmanTracker Class Declaration
+
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+using namespace std;
+using namespace cv;
+
+#define StateType Rect_<float>
+
+
+// This class represents the internel state of individual tracked objects observed as bounding box.
+class KalmanTracker
+{
+public:
+    KalmanTracker()
+    {
+        init_kf(StateType());
+        m_time_since_update = 0;
+        m_hits = 0;
+        m_hit_streak = 0;
+        m_age = 0;
+        m_id = kf_count;
+        //kf_count++;
+    }
+    KalmanTracker(StateType initRect)
+    {
+        init_kf(initRect);
+        m_time_since_update = 0;
+        m_hits = 0;
+        m_hit_streak = 0;
+        m_age = 0;
+        m_id = kf_count;
+        //kf_count++;
+    }
+
+    KalmanTracker(StateType initRect, int classId,float prob)
+    {
+        init_kf(initRect);
+        m_time_since_update = 0;
+        m_hits = 0;
+        m_hit_streak = 0;
+        m_age = 0;
+        m_id = kf_count;
+        kf_count++;
+        m_class_id = classId;
+        m_prob = prob;
+    }
+
+    ~KalmanTracker()
+    {
+        m_history.clear();
+        m_class_history.clear();
+    }
+
+    StateType predict();
+    void update(StateType stateMat,int classId, float prob);
+
+    StateType get_state();
+    StateType get_rect_xysr(float cx, float cy, float s, float r);
+
+    static int kf_count;
+
+    int m_time_since_update;
+    int m_hits;
+    int m_hit_streak;
+    int m_age;
+    int m_id;
+    int m_class_id;
+    std::vector<int> m_class_history;
+    float m_prob;
+
+private:
+    void init_kf(StateType stateMat);
+
+    cv::KalmanFilter kf;
+    cv::Mat measurement;
+
+    std::vector<StateType> m_history;
+};
+
+#endif //TRACK_SORT_KALMANTRACKER_H

+ 54 - 0
src/detection/yolov5-trt-v2.0/include/detect_obstacle.h

@@ -0,0 +1,54 @@
+#ifndef DETECT_TURNSTILE_H
+#define DETECT_TURNSTILE_H
+
+#include "Hungarian.h"
+#include "KalmanTracker.h"
+
+namespace od{
+
+const int COLOR_MAP[3][3]={{0, 0, 255},{0, 255, 0},{0,255,255}};
+const int max_age = 3;
+const int min_hits = 3;
+const double iouThreshold = 0.5;
+
+struct bbox_t {
+    unsigned int x, y, w, h;       // (x,y) - top-left corner, (w, h) - width & height of bounded box
+    float prob;                    // confidence - probability that the object was found correctly
+    unsigned int obj_id;           // class of object - from range [0, classes-1]
+    unsigned int track_id;         // tracking id for video (0 - untracked, 1 - inf - tracked object)
+    unsigned int frames_counter;   // counter of frames on which the object was detected
+    float x_3d, y_3d, z_3d;        // center of object (in Meters) if ZED 3D Camera is used
+};
+
+typedef struct TrackingBox
+{
+    int frame;
+    int id;
+    int class_id;
+    float prob;
+    Rect_<float> box;
+    vector<int> class_history;
+}TrackingBox;
+
+//yolo data o DetectBox
+typedef struct DetectBox
+{
+    int class_id;
+    float prob;
+    Rect_<float> box;
+}DetectBox;
+
+//Computes IOU between two bounding boxes
+double GetIOU(Rect_<float> bb_test, Rect_<float> bb_gt);
+//画出检测框和相关信息
+void DrawBoxes(Mat &frame, vector<string> classes, int classId, int turnstileId,float conf, int left, int top, int right, int bottom);
+
+//画出检测结果,image
+void Drawer(Mat &frame, vector<bbox_t> outs, vector<string> classes);
+//画出检测结果,video
+void Drawer(Mat &frame, vector<od::TrackingBox> &track_result, vector<string> &classes);
+//tracking obstacle
+bool TrackObstacle(int frame_count,vector<KalmanTracker> &trackers,vector<bbox_t> &outs,vector<od::TrackingBox> &track_result);
+}
+
+#endif // DETECT_TURNSTILE_H

+ 82 - 0
src/detection/yolov5-trt-v2.0/include/imageBuffer.h

@@ -0,0 +1,82 @@
+#ifndef IMAGEBUFFER_H
+#define IMAGEBUFFER_H
+
+#include <opencv2/opencv.hpp>
+#include <mutex>
+#include <condition_variable>
+#include <queue>
+template<typename T>
+class ConsumerProducerQueue
+{
+
+public:
+    ConsumerProducerQueue(int mxsz,bool dropFrame) :
+            maxSize(mxsz),dropFrame(dropFrame)
+    { }
+
+    bool add(T request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        if(dropFrame && isFull())
+        {
+            //lock.unlock();
+            //return false;
+            cpq.pop();
+            cpq.push(request);
+            cond.notify_all();
+            return true;
+        }
+        else {
+            cond.wait(lock, [this]() { return !isFull(); });
+            cpq.push(request);
+            //lock.unlock();
+            cond.notify_all();
+            return true;
+        }
+    }
+    void consume(T &request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        cond.wait(lock, [this]()
+        { return !isEmpty(); });
+        request = cpq.front();
+        cpq.pop();
+        //lock.unlock();
+        cond.notify_all();
+
+    }
+
+    bool isFull() const
+    {
+        return cpq.size() >= maxSize;
+    }
+
+    bool isEmpty() const
+    {
+        return cpq.size() == 0;
+    }
+
+    int length() const
+    {
+        return cpq.size();
+    }
+
+    void clear()
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        while (!isEmpty())
+        {
+            cpq.pop();
+        }
+        lock.unlock();
+        cond.notify_all();
+    }
+
+private:
+    std::condition_variable cond;  //条件变量允许通过通知进而实现线程同步
+    std::mutex mutex;     //提供了多种互斥操作,可以显式避免数据竞争
+    std::queue<T> cpq;    //容器适配器,它给予程序员队列的功能
+    int maxSize;
+    bool dropFrame;
+};
+#endif // IMAGEBUFFER_H

+ 701 - 0
src/detection/yolov5-trt-v2.0/main.cpp

@@ -0,0 +1,701 @@
+#include "NvInfer.h"
+#include "cuda_runtime_api.h"
+#include <fstream>
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <vector>
+#include <chrono>
+#include <cmath>
+#include <cassert>
+
+
+#include<opencv2/core/core.hpp>
+#include<opencv2/highgui/highgui.hpp>
+#include <opencv2/opencv.hpp>
+
+#include "modulecomm.h"
+#include "xmlparam.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include "rawpic.pb.cc"
+#include "lightarray.pb.h"
+#include "brainstate.pb.h"
+#include <QCoreApplication>
+#include <thread>
+#include "opencv2/imgcodecs/legacy/constants_c.h"
+#include "qmutex.h"
+#include "condition_variable"
+
+// onnx转换头文件
+#include "NvOnnxParser.h"
+using namespace nvonnxparser;
+
+const std::string engine_path = "/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/best-640.engine"; // engine path
+
+cv::Mat camera_matrix,dist_coe,map1,map2;
+float time_read_img = 0.0;
+float time_infer = 0.0;
+cv::Mat res_img; // result image
+int time_num = 0;
+
+float* prob;
+using namespace nvinfer1;
+
+IExecutionContext* context;
+ICudaEngine* engine;
+IRuntime* runtime;
+
+
+bool gbstate = true;
+void * gp;
+void * gpdetect;
+iv::Ivfault *gfault = nullptr;
+iv::Ivlog *givlog = nullptr;
+
+
+using namespace std;
+
+
+#define CHECK(status) \
+    do\
+    {\
+        auto ret = (status);\
+        if (ret != 0)\
+        {\
+            std::cerr << "Cuda failure: " << ret << std::endl;\
+            abort();\
+        }\
+    } while (0)
+struct  Detection {
+    //center_x center_y  w h
+    float bbox[4];
+    float conf;  // bbox_conf * cls_conf
+    int class_id;
+    int index;
+};
+
+vector<Detection> results;
+/*-----------------------protobuf-------------------------*/
+iv::vision::Lightarray light_array;
+
+// stuff we know about the network and the input/output blobs
+static const int INPUT_H = 640;
+static const int INPUT_W = 640;
+static const int cls_num = 4;
+//不同输入尺寸anchor:960-->56700 | 640-->25200 | 416-->10647 | 320-->6300
+static const int anchor_output_num = 25200;
+static const int OUTPUT_SIZE = 1* anchor_output_num *(cls_num+5); //1000 * sizeof(Detection) / sizeof(float) + 1;
+
+
+const char* INPUT_BLOB_NAME = "images";
+const char* OUTPUT_BLOB_NAME = "output";
+
+
+
+//static Logger gLogger;
+
+//构建Logger
+class Logger : public ILogger
+{
+    void log(Severity severity, const char* msg) noexcept override
+    {
+        // suppress info-level messages
+        if (severity <= Severity::kWARNING)
+            std::cout << msg << std::endl;
+    }
+} gLogger;
+
+
+
+void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
+{
+    const ICudaEngine& engine = context.getEngine();
+    // Pointers to input and output device buffers to pass to engine.
+    // Engine requires exactly IEngine::getNbBindings() number of buffers.
+    assert(engine.getNbBindings() == 2);
+    void* buffers[2];
+    // In order to bind the buffers, we need to know the names of the input and output tensors.
+    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
+    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
+
+    //std::cout<<inputIndex<<" "<<outputIndex<<std::endl;
+    //const int inputIndex = 0;
+    //const int outputIndex = 1;
+    // Create GPU buffers on device
+    cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float));
+    cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float));
+    // Create stream
+    cudaStream_t stream;
+    CHECK(cudaStreamCreate(&stream));
+    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
+    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
+    context.enqueue(batchSize, buffers, stream, nullptr);
+
+    //std::cout<<buffers[outputIndex+1]<<std::endl;
+    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
+    cudaStreamSynchronize(stream);
+    // Release stream and buffers
+    cudaStreamDestroy(stream);
+    CHECK(cudaFree(buffers[inputIndex]));
+    CHECK(cudaFree(buffers[outputIndex]));
+
+}
+
+
+
+//加工图片变成拥有batch的输入, tensorrt输入需要的格式,为一个维度
+void ProcessImage(cv::Mat image, float input_data[]) {
+    //只处理一张图片,总之结果为一维[batch*3*INPUT_W*INPUT_H]
+    //以下代码为投机取巧了
+
+    cv::resize(image, image, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
+    std::vector<cv::Mat> InputImage;
+
+    InputImage.push_back(image);
+
+
+
+    int ImgCount = InputImage.size();
+
+    //float input_data[BatchSize * 3 * INPUT_H * INPUT_W];
+    for (int b = 0; b < ImgCount; b++) {
+        cv::Mat img = InputImage.at(b);
+        int w = img.cols;
+        int h = img.rows;
+        int i = 0;
+        for (int row = 0; row < h; ++row) {
+            uchar* uc_pixel = img.data + row * img.step;
+            for (int col = 0; col < INPUT_W; ++col) {
+                input_data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
+                input_data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
+                input_data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
+                uc_pixel += 3;
+                ++i;
+            }
+        }
+
+    }
+
+}
+
+
+
+
+//********************************************** NMS code **********************************//
+
+
+struct Bbox {
+    int x;
+    int y;
+    int w;
+    int h;
+};
+
+float iou(Bbox box1, Bbox box2) {
+
+    int x1 = max(box1.x, box2.x);
+    int y1 = max(box1.y, box2.y);
+    int x2 = min(box1.x + box1.w, box2.x + box2.w);
+    int y2 = min(box1.y + box1.h, box2.y + box2.h);
+    int w = max(0, x2 - x1);
+    int h = max(0, y2 - y1);
+    float over_area = w * h;
+    return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
+}
+
+int get_max_index(vector<Detection> pre_detection) {
+    //获得最佳置信度的值,并返回对应的索引值
+    int index;
+    float conf;
+    if (pre_detection.size() > 0) {
+        index = 0;
+        conf = pre_detection.at(0).conf;
+        for (int i = 0; i < pre_detection.size(); i++) {
+            if (conf < pre_detection.at(i).conf) {
+                index = i;
+                conf = pre_detection.at(i).conf;
+            }
+        }
+        return index;
+    }
+    else {
+        return -1;
+    }
+
+
+}
+bool judge_in_lst(int index, vector<int> index_lst) {
+    //若index在列表index_lst中则返回true,否则返回false
+    if (index_lst.size() > 0) {
+        for (int i = 0; i < index_lst.size(); i++) {
+            if (index == index_lst.at(i)) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+vector<int> nms(vector<Detection> pre_detection, float iou_thr)
+{
+    /*
+    返回需保存box的pre_detection对应位置索引值
+
+    */
+    int index;
+    vector<Detection> pre_detection_new;
+    //Detection det_best;
+    Bbox box_best, box;
+    float iou_value;
+    vector<int> keep_index;
+    vector<int> del_index;
+    bool keep_bool;
+    bool del_bool;
+    int rr = 0;
+    int zz = 0;
+
+    if (pre_detection.size() > 0) {
+
+        pre_detection_new.clear();
+        // 循环将预测结果建立索引
+        for (int i = 0; i < pre_detection.size(); i++) {
+            pre_detection.at(i).index = i;
+            pre_detection_new.push_back(pre_detection.at(i));
+        }
+        //循环便利获得保留box位置索引-相对输入pre_detection位置
+        while (pre_detection_new.size() > 0) {
+            index = get_max_index(pre_detection_new);
+            if (index >= 0) {
+                keep_index.push_back(pre_detection_new.at(index).index); //保留索引位置
+
+                // 更新最佳保留box
+                box_best.x = pre_detection_new.at(index).bbox[0];
+                box_best.y = pre_detection_new.at(index).bbox[1];
+                box_best.w = pre_detection_new.at(index).bbox[2];
+                box_best.h = pre_detection_new.at(index).bbox[3];
+
+                for (int j = 0; j < pre_detection.size(); j++) {
+                    keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
+                    del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
+                    if ((!keep_bool) && (!del_bool)) { //不在keep_index与del_index才计算iou
+                        box.x = pre_detection.at(j).bbox[0];
+                        box.y = pre_detection.at(j).bbox[1];
+                        box.w = pre_detection.at(j).bbox[2];
+                        box.h = pre_detection.at(j).bbox[3];
+                        iou_value = iou(box_best, box);
+                        if (iou_value > iou_thr) {
+                            del_index.push_back(j); //记录大于阈值将删除对应的位置
+                        }
+                    }
+
+                }
+                //更新pre_detection_new
+                pre_detection_new.clear();
+                for (int j = 0; j < pre_detection.size(); j++) {
+                    keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
+                    del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
+                    if ((!keep_bool) && (!del_bool)) {
+                        pre_detection_new.push_back(pre_detection.at(j));
+                    }
+                }
+            }
+        }
+    }
+
+    del_index.clear();
+    del_index.shrink_to_fit();
+    pre_detection_new.clear();
+    pre_detection_new.shrink_to_fit();
+
+    return  keep_index;
+
+}
+
+
+
+
+
+vector<Detection>  postprocess(float* prob, float conf_thr = 0.2, float nms_thr = 0.4) {
+    /*
+    #####################此函数处理一张图预测结果#########################
+    prob为[x y w h  score  multi-pre] 如80类-->(1,anchor_num,85)
+
+    */
+
+    vector<Detection> pre_results;
+    vector<int> nms_keep_index;
+    vector<Detection> results;
+    bool keep_bool;
+    Detection pre_res;
+    float conf;
+    int tmp_idx;
+    float tmp_cls_score;
+    for (int i = 0; i < anchor_output_num; i++) {
+        tmp_idx = i * (cls_num + 5);
+        pre_res.bbox[0] = prob[tmp_idx + 0];
+        pre_res.bbox[1] = prob[tmp_idx + 1];
+        pre_res.bbox[2] = prob[tmp_idx + 2];
+        pre_res.bbox[3] = prob[tmp_idx + 3];
+        conf = prob[tmp_idx + 4];  //是为目标的置信度
+        tmp_cls_score = prob[tmp_idx + 5] * conf;
+        pre_res.class_id = 0;
+        pre_res.conf = tmp_cls_score;
+        for (int j = 1; j < cls_num; j++) {
+            tmp_idx = i * (cls_num + 5) + 5 + j; //获得对应类别索引
+            if (tmp_cls_score < prob[tmp_idx] * conf)
+            {
+                tmp_cls_score = prob[tmp_idx] * conf;
+                pre_res.class_id = j;
+                pre_res.conf = tmp_cls_score;
+            }
+        }
+        if (conf >= conf_thr) {
+
+            pre_results.push_back(pre_res);
+        }
+
+    }
+
+    //使用nms
+    nms_keep_index=nms(pre_results,nms_thr);
+
+    for (int i = 0; i < pre_results.size(); i++) {
+        keep_bool = judge_in_lst(i, nms_keep_index);
+        if (keep_bool) {
+            results.push_back(pre_results.at(i));
+        }
+
+    }
+
+
+    pre_results.clear();
+    pre_results.shrink_to_fit();
+    nms_keep_index.clear();
+    nms_keep_index.shrink_to_fit();
+
+
+    return results;
+
+}
+
+cv::Mat draw_rect(cv::Mat image, vector<Detection> results) {
+    /*
+    image 为图像
+
+    struct  Detection {
+    float bbox[4];  //center_x center_y  w h
+    float conf;  // 置信度
+    int class_id; //类别id
+    int index;    //可忽略
+    };
+
+    */
+
+    float x;
+    float y;
+    float w;
+    float h;
+    string info;
+
+    cv::Rect rect;
+    for (int i = 0; i < results.size(); i++) {
+
+        x = results.at(i).bbox[0];
+        y= results.at(i).bbox[1];
+        w= results.at(i).bbox[2];
+        h=results.at(i).bbox[3];
+
+        /*---------------protobuf----------------*/
+        iv::vision::Light *light = light_array.add_light();
+        light->set_index(i+1);
+        light->set_type(results.at(i).class_id+1);
+
+        /*----------------------------------------*/
+
+
+        x = (int)(x - w / 2);
+        y = (int)(y - h / 2);
+        w = (int)w;
+        h = (int)h;
+        info = "id:";
+        info.append(to_string(results.at(i).class_id));
+        info.append(" s:");
+        info.append(to_string((int)(results.at(i).conf*100)  )   );
+        info.append("%");
+        rect= cv::Rect(x, y, w, h);
+        if(results.at(i).class_id == 0){ // red light
+            cv::rectangle(image, rect, cv::Scalar(0, 0, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 0, 255), 0.6, 1, false);
+        }else if(results.at(i).class_id == 1){ // green light
+            cv::rectangle(image, rect, cv::Scalar(0, 255, 0), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 0), 0.6, 1, false);
+        }else if(results.at(i).class_id == 2){ // yellow light
+            cv::rectangle(image, rect, cv::Scalar(0, 255, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 255), 0.6, 1, false);
+        }else{
+            cv::rectangle(image, rect, cv::Scalar(255, 255, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255, 255, 255), 0.6, 1, false);
+        }
+//        cv::rectangle(image, rect, cv::Scalar(0, 255, 0), 1, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+//        cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 0), 0.4, 1, false);
+
+    }
+
+
+    return image;
+
+}
+
+void LoadEngine(const std::string engine_path){
+    //加载engine引擎
+    char* trtModelStream{ nullptr };
+    size_t size{ 0 };
+    std::ifstream file(engine_path, std::ios::binary);
+    if (file.good()) {
+        file.seekg(0, file.end);
+        size = file.tellg();
+        file.seekg(0, file.beg);
+        trtModelStream = new char[size];
+        assert(trtModelStream);
+        file.read(trtModelStream, size);
+        file.close();
+    }
+
+    //反序列为engine,创建context
+
+    runtime = createInferRuntime(gLogger);
+    assert(runtime != nullptr);
+    engine = runtime->deserializeCudaEngine(trtModelStream, size, nullptr);
+    assert(engine != nullptr);
+    context = engine->createExecutionContext();
+    assert(context != nullptr);
+    delete[] trtModelStream;
+
+    //在主机上分配页锁定内存
+    CHECK(cudaHostAlloc((void **)&prob, OUTPUT_SIZE * sizeof(float), cudaHostAllocDefault));
+}
+
+void shareMsg(){
+    int size = light_array.ByteSize();
+    char * strdata = new char[light_array.ByteSize()];
+    if(light_array.SerializeToArray(strdata, size))
+    {
+        iv::modulecomm::ModuleSendMsg(gpdetect, strdata, size);
+    }
+    else
+    {
+        std::cout<<"light_array serialize error."<<std::endl;
+    }
+    light_array.Clear();
+    delete strdata;
+
+    /*--------------------test ParseFromArray-------------------*/
+//    iv::vision::Lightarray light_array1;
+//    light_array1.ParseFromArray(strdata,size);
+//    cout<<"parsefromarray:"<<std::endl;
+//    cout<<"light_size:"<<light_array1.light_size()<<endl;
+//    for (int i=0;i<light_array1.light_size();i++) {
+//        std::cout<<"index:"<<light_array1.light(i).index()<<" type:"
+//                <<light_array1.light(i).type()
+//                <<std::endl;
+
+//    }
+}
+
+void infer(cv::Mat img) {
+
+   // 处理图片为固定输出
+
+    auto start = std::chrono::system_clock::now();  //时间函数
+    static float data[3 * INPUT_H * INPUT_W];
+
+    ProcessImage(img, data);
+    auto end = std::chrono::system_clock::now();
+    time_read_img = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_read_img;
+
+//    cout<<"read img time: "<<time_read_img<<"ms"<<endl;
+    //Run inference
+    start = std::chrono::system_clock::now();  //时间函数
+    doInference(*context, data, prob, 1);
+    cout<<"doinference"<<endl;
+    end = std::chrono::system_clock::now();
+    time_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_infer;
+    std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+
+    //输出后处理
+    //std::cout <<"prob="<<prob << std::endl;
+    results.clear();
+    results=postprocess(prob, 0.3,  0.4);
+
+    cv::resize(img, img, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
+    res_img=draw_rect(img,results);
+
+//    cv::imshow("streetLight", res_img);
+//    cv::waitKey(1);
+
+    cout << "ok" << endl;
+    time_num++;
+    shareMsg();
+
+}
+
+
+
+void exitfunc()
+{
+    return;
+}
+
+void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+
+    std::cout<<"one time"<<std::endl;
+    if(nSize<1000)return;
+    iv::vision::rawpic pic;
+
+    if(false == pic.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"picview Listenpic fail."<<std::endl;
+        return;
+    }
+
+    cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+    if(pic.type() == 1)
+        memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+    else
+    {
+       mat.release();
+       std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+       mat = cv::imdecode(buff,CV_LOAD_IMAGE_COLOR);
+    }
+
+    cv::Mat img_camera=mat;
+
+//    img_camera = img_camera(cv::Range(0,640), cv::Range(320,960));//height width
+//    img_camera = img_camera(cv::Range(240,600), cv::Range(320,960));
+    img_camera = img_camera(cv::Range(240,600), cv::Range(460,820));
+//    cv::remap(img_camera,img_camera,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+
+    infer(img_camera);
+
+    mat.release();
+}
+
+void ListenLight(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+
+    std::cout<<"one time"<<std::endl;
+    if(nSize<1000)return;
+    iv::vision::rawpic pic;
+    iv::brain::brainstate brain_state;
+
+    if(false == brain_state.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"brain_state Listen fail."<<std::endl;
+        return;
+    }
+
+    cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+    if(pic.type() == 1)
+        memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+    else
+    {
+       mat.release();
+       std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+       mat = cv::imdecode(buff,CV_LOAD_IMAGE_COLOR);
+    }
+
+    cv::Mat img_camera=mat;
+
+//    img_camera = img_camera(cv::Range(0,640), cv::Range(320,960));//height width
+//    img_camera = img_camera(cv::Range(240,600), cv::Range(320,960));
+    img_camera = img_camera(cv::Range(240,600), cv::Range(460,820));
+//    cv::remap(img_camera,img_camera,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+
+    infer(img_camera);
+
+    mat.release();
+}
+
+int main(int argc, char** argv)
+{
+    showversion("yolov5s");
+    QCoreApplication a(argc, argv);
+
+    gfault = new iv::Ivfault("lidar_cnn_segmentation");
+    givlog = new iv::Ivlog("lidar_cnn_segmentation");
+    gfault->SetFaultState(0,0,"cnn initialize.");
+
+
+//================================== camera calib init ==========================
+
+    cv::FileStorage calib_file("/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/yaml/camera_middle_640_360.yaml", cv::FileStorage::READ);
+    calib_file["cameraMatrix"]>>camera_matrix;
+    calib_file["distCoeffs"]>>dist_coe;
+    cv::Mat R = cv::Mat::eye(3, 3, CV_64F);
+    cv::Size imgsize=cv::Size(640,360);
+    cv::initUndistortRectifyMap(camera_matrix, dist_coe, R, camera_matrix,imgsize,CV_16SC2,map1,map2);
+
+//==============================================================================
+
+    bool light_start = false;
+    while(light_start == false){
+        light_start = iv::modulecomm::RegisterRecv("lightStart",ListenLight);
+    }
+
+
+    LoadEngine(engine_path);
+    gp = iv::modulecomm::RegisterRecv("image00",Listenpic);
+    gpdetect = iv::modulecomm::RegisterSend("streetLight",10000000,1);
+
+//==============================================================================================
+//    cv::Mat img_test = cv::imread("/home/nvidia/modularization/src/detection/yolov5-trt/data/00002.jpg");
+//    infer(img_test);
+//    while(true){
+//        cv::imshow("streetLight", res_img);
+//        cv::waitKey(10);
+//    }
+//==========================================================================================
+    cv::VideoCapture cap("/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/data/phone_light1.mp4");
+    if(!cap.isOpened()){
+        std::cout<<"open failed"<<std::endl;
+        return -1;
+    }
+    cv::Mat frame;
+    cv::waitKey(1000);
+    while(true){
+        cap>>frame;
+        if(frame.empty()){
+            break;
+        }
+        //frame = frame(cv::Range(240,480), cv::Range(520,760));//height width
+        //frame = frame(cv::Range(0,640), cv::Range(320,960));//height width
+        //frame = frame(cv::Range(240,600), cv::Range(320,960));//height width
+
+        frame = frame(cv::Range(0,640), cv::Range(320,980));//height width
+ //        cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+        //infer(frame);
+        cv::imshow("streetLight", frame);
+        if (cv::waitKey(10) == 'q') break;
+    }
+    cv::destroyAllWindows();
+
+//========================================================================================
+//    cv::waitKey(5000);
+//    while(true){
+//        cv::imshow("streetLight", res_img);
+//        cv::waitKey(10);
+//    }
+    std::cout << "C++ 2engine" << " mean read img time =" << time_read_img / time_num << "ms\t" << " mean infer img time =" << time_infer / time_num << "ms" << std::endl;
+
+    // Destroy the engine
+    context->destroy();
+    engine->destroy();
+    runtime->destroy();
+
+    return 0;
+}
+

+ 866 - 0
src/detection/yolov5-trt-v2.0/main_new.cpp

@@ -0,0 +1,866 @@
+#include "NvInfer.h"
+#include "cuda_runtime_api.h"
+#include <fstream>
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <vector>
+#include <chrono>
+#include <cmath>
+#include <cassert>
+#include <algorithm>
+
+
+#include<opencv2/core/core.hpp>
+#include<opencv2/highgui/highgui.hpp>
+#include <opencv2/opencv.hpp>
+
+#include "modulecomm.h"
+#include "xmlparam.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include "rawpic.pb.h"
+#include "lightarray.pb.h"
+#include "brainstate.pb.h"
+#include <QCoreApplication>
+#include <thread>
+#include "opencv2/imgcodecs/legacy/constants_c.h"
+#include "qmutex.h"
+#include "condition_variable"
+#include "imageBuffer.h"
+#include "detect_obstacle.h"
+
+// onnx转换头文件
+#include "NvOnnxParser.h"
+using namespace nvonnxparser;
+using namespace nvinfer1;
+using namespace std;
+
+//全局变量
+struct  Detection {
+    //center_x center_y  w h
+    float bbox[4];
+    float conf;  // bbox_conf * cls_conf
+    int class_id;
+    int index;
+};
+
+struct Bbox {
+    int x;
+    int y;
+    int w;
+    int h;
+};
+
+
+float conf_thr = 0.5;
+float nms_thr = 0.4;
+
+const bool calibrationstate = false;
+const bool cropstate = false;
+cv::Range crop_height = cv::Range(40,680);
+cv::Range crop_width = cv::Range(320,960);
+const bool trackstate = false;
+
+// stuff we know about the network and the input/output blobs
+static const int INPUT_H = 640;
+static const int INPUT_W = 640;
+static const int cls_num = 3;
+//不同输入尺寸anchor:960-->56700 | 640-->25200 | 416-->10647 | 320-->6300
+static const int anchor_output_num = 25200;
+static const int OUTPUT_SIZE = 1* anchor_output_num *(cls_num+5); //1000 * sizeof(Detection) / sizeof(float) + 1;
+
+const char* INPUT_BLOB_NAME = "images";
+const char* OUTPUT_BLOB_NAME = "output";
+
+const char* onnx_path = "/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/model/yolov5s_640_old+new.onnx"; //onnx path
+const std::string engine_path = "/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/model/yolov5s_640_old+new.engine"; // engine path
+IExecutionContext* context;
+ICudaEngine* engine;
+IRuntime* runtime;
+float* prob;
+
+bool test_video = true;
+//string video_path = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink";
+string video_path = "/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/data/camera_test4.mp4";
+
+void * gpcamera;
+string cameraname="image00";
+ConsumerProducerQueue<cv::Mat> * imageBuffer =  new ConsumerProducerQueue<cv::Mat>(3,true);
+
+void * gpbrain;  //获取红绿灯开启状态
+string brainname = "brainstate";
+bool lightstart = true;
+
+void * gpdetect;
+string detectname = "lightarray";  //检测结果
+
+
+iv::Ivfault *gfault = nullptr;
+iv::Ivlog *givlog = nullptr;
+
+cv::Mat camera_matrix,dist_coe,map1,map2;  //标定参数
+float time_read_img = 0.0;
+float time_infer = 0.0;
+int time_num = 0;
+
+#define CHECK(status) \
+    do\
+    {\
+        auto ret = (status);\
+        if (ret != 0)\
+        {\
+            std::cerr << "Cuda failure: " << ret << std::endl;\
+            abort();\
+        }\
+    } while (0)
+
+//static Logger gLogger;
+//构建Logger
+class Logger : public ILogger
+{
+    void log(Severity severity, const char* msg) noexcept override
+    {
+        // suppress info-level messages
+        if (severity <= Severity::kWARNING)
+            std::cout << msg << std::endl;
+    }
+} gLogger;
+
+
+// Creat the engine using only the API and not any parser.
+ICudaEngine* createEngine(unsigned int maxBatchSize, IBuilder* builder, IBuilderConfig* config)
+{
+
+    INetworkDefinition* network = builder->createNetworkV2(1U); //此处重点1U为OU就有问题
+
+    IParser* parser = createParser(*network, gLogger);
+    parser->parseFromFile(onnx_path, static_cast<int32_t>(ILogger::Severity::kWARNING));
+    //解析有错误将返回
+    for (int32_t i = 0; i < parser->getNbErrors(); ++i) { std::cout << parser->getError(i)->desc() << std::endl; }
+    std::cout << "successfully parse the onnx model" << std::endl;
+
+    // Build engine
+    builder->setMaxBatchSize(maxBatchSize);
+    config->setMaxWorkspaceSize(1 << 20);
+    //config->setFlag(nvinfer1::BuilderFlag::kFP16); // 设置精度计算
+    //config->setFlag(nvinfer1::BuilderFlag::kINT8);
+    ICudaEngine* engine = builder->buildEngineWithConfig(*network, *config);
+    std::cout << "successfully  convert onnx to  engine!!! " << std::endl;
+
+    //销毁
+    network->destroy();
+    //parser->destroy();
+
+    return engine;
+}
+
+void APIToModel(unsigned int maxBatchSize, IHostMemory** modelStream)
+{
+
+    // Create builder
+    IBuilder* builder = createInferBuilder(gLogger);
+    IBuilderConfig* config = builder->createBuilderConfig();
+
+    // Create model to populate the network, then set the outputs and create an engine
+    ICudaEngine* engine = createEngine(maxBatchSize, builder, config);
+
+    assert(engine != nullptr);
+
+
+
+    // Serialize the engine
+    (*modelStream) = engine->serialize();
+    // Close everything down
+    engine->destroy();
+    builder->destroy();
+    config->destroy();
+
+}
+
+
+int get_trtengine() {
+
+    IHostMemory* modelStream{ nullptr };
+    APIToModel(1, &modelStream);
+    assert(modelStream != nullptr);
+
+    std::ofstream p(engine_path, std::ios::binary);
+    if (!p)
+    {
+        std::cerr << "could not open plan output file" << std::endl;
+        return -1;
+    }
+    p.write(reinterpret_cast<const char*>(modelStream->data()), modelStream->size());
+    modelStream->destroy();
+
+    return 0;
+
+}
+
+
+void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
+{
+    const ICudaEngine& engine = context.getEngine();
+    // Pointers to input and output device buffers to pass to engine.
+    // Engine requires exactly IEngine::getNbBindings() number of buffers.
+    assert(engine.getNbBindings() == 2);
+    void* buffers[2];
+    // In order to bind the buffers, we need to know the names of the input and output tensors.
+    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);
+    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
+
+    //std::cout<<inputIndex<<" "<<outputIndex<<std::endl;
+    //const int inputIndex = 0;
+    //const int outputIndex = 1;
+    // Create GPU buffers on device
+    cudaMalloc(&buffers[inputIndex], batchSize * 3 * INPUT_H * INPUT_W * sizeof(float));
+    cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float));
+    // Create stream
+    cudaStream_t stream;
+    CHECK(cudaStreamCreate(&stream));
+    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
+    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * 3 * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
+    context.enqueue(batchSize, buffers, stream, nullptr);
+
+    //std::cout<<buffers[outputIndex+1]<<std::endl;
+    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost, stream));
+    cudaStreamSynchronize(stream);
+    // Release stream and buffers
+    cudaStreamDestroy(stream);
+    CHECK(cudaFree(buffers[inputIndex]));
+    CHECK(cudaFree(buffers[outputIndex]));
+
+}
+
+//加工图片变成拥有batch的输入, tensorrt输入需要的格式,为一个维度
+void ProcessImage(cv::Mat image, float input_data[]) {
+    //只处理一张图片,总之结果为一维[batch*3*INPUT_W*INPUT_H]
+    //以下代码为投机取巧了
+
+    cv::Mat resize_img ;
+    cv::resize(image, resize_img, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
+    std::vector<cv::Mat> InputImage;
+
+    InputImage.push_back(resize_img);
+    int ImgCount = InputImage.size();
+
+    //float input_data[BatchSize * 3 * INPUT_H * INPUT_W];
+    for (int b = 0; b < ImgCount; b++) {
+        cv::Mat img = InputImage.at(b);
+        int w = img.cols;
+        int h = img.rows;
+        int i = 0;
+        for (int row = 0; row < h; ++row) {
+            uchar* uc_pixel = img.data + row * img.step;
+            for (int col = 0; col < INPUT_W; ++col) {
+                input_data[b * 3 * INPUT_H * INPUT_W + i] = (float)uc_pixel[2] / 255.0;
+                input_data[b * 3 * INPUT_H * INPUT_W + i + INPUT_H * INPUT_W] = (float)uc_pixel[1] / 255.0;
+                input_data[b * 3 * INPUT_H * INPUT_W + i + 2 * INPUT_H * INPUT_W] = (float)uc_pixel[0] / 255.0;
+                uc_pixel += 3;
+                ++i;
+            }
+        }
+
+    }
+
+}
+
+//********************************************** NMS code **********************************//
+float iou(Bbox box1, Bbox box2) {
+
+    int x1 = max(box1.x, box2.x);
+    int y1 = max(box1.y, box2.y);
+    int x2 = min(box1.x + box1.w, box2.x + box2.w);
+    int y2 = min(box1.y + box1.h, box2.y + box2.h);
+    int w = max(0, x2 - x1);
+    int h = max(0, y2 - y1);
+    float over_area = w * h;
+    return over_area / (box1.w * box1.h + box2.w * box2.h - over_area);
+}
+
+int get_max_index(vector<Detection> pre_detection) {
+    //获得最佳置信度的值,并返回对应的索引值
+    int index;
+    float conf;
+    if (pre_detection.size() > 0) {
+        index = 0;
+        conf = pre_detection.at(0).conf;
+        for (int i = 0; i < pre_detection.size(); i++) {
+            if (conf < pre_detection.at(i).conf) {
+                index = i;
+                conf = pre_detection.at(i).conf;
+            }
+        }
+        return index;
+    }
+    else {
+        return -1;
+    }
+
+
+}
+bool judge_in_lst(int index, vector<int> index_lst) {
+    //若index在列表index_lst中则返回true,否则返回false
+    if (index_lst.size() > 0) {
+        for (int i = 0; i < index_lst.size(); i++) {
+            if (index == index_lst.at(i)) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+vector<int> nms(vector<Detection> pre_detection, float iou_thr)
+{
+    /*
+    返回需保存box的pre_detection对应位置索引值
+
+    */
+    int index;
+    vector<Detection> pre_detection_new;
+    //Detection det_best;
+    Bbox box_best, box;
+    float iou_value;
+    vector<int> keep_index;
+    vector<int> del_index;
+    bool keep_bool;
+    bool del_bool;
+    int rr = 0;
+    int zz = 0;
+
+    if (pre_detection.size() > 0) {
+
+        pre_detection_new.clear();
+        // 循环将预测结果建立索引
+        for (int i = 0; i < pre_detection.size(); i++) {
+            pre_detection.at(i).index = i;
+            pre_detection_new.push_back(pre_detection.at(i));
+        }
+        //循环便利获得保留box位置索引-相对输入pre_detection位置
+        while (pre_detection_new.size() > 0) {
+            index = get_max_index(pre_detection_new);
+            if (index >= 0) {
+                keep_index.push_back(pre_detection_new.at(index).index); //保留索引位置
+
+                // 更新最佳保留box
+                box_best.x = pre_detection_new.at(index).bbox[0];
+                box_best.y = pre_detection_new.at(index).bbox[1];
+                box_best.w = pre_detection_new.at(index).bbox[2];
+                box_best.h = pre_detection_new.at(index).bbox[3];
+
+                for (int j = 0; j < pre_detection.size(); j++) {
+                    keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
+                    del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
+                    if ((!keep_bool) && (!del_bool)) { //不在keep_index与del_index才计算iou
+                        box.x = pre_detection.at(j).bbox[0];
+                        box.y = pre_detection.at(j).bbox[1];
+                        box.w = pre_detection.at(j).bbox[2];
+                        box.h = pre_detection.at(j).bbox[3];
+                        iou_value = iou(box_best, box);
+                        if (iou_value > iou_thr) {
+                            del_index.push_back(j); //记录大于阈值将删除对应的位置
+                        }
+                    }
+
+                }
+                //更新pre_detection_new
+                pre_detection_new.clear();
+                for (int j = 0; j < pre_detection.size(); j++) {
+                    keep_bool = judge_in_lst(pre_detection.at(j).index, keep_index);
+                    del_bool = judge_in_lst(pre_detection.at(j).index, del_index);
+                    if ((!keep_bool) && (!del_bool)) {
+                        pre_detection_new.push_back(pre_detection.at(j));
+                    }
+                }
+            }
+        }
+    }
+
+    del_index.clear();
+    del_index.shrink_to_fit();
+    pre_detection_new.clear();
+    pre_detection_new.shrink_to_fit();
+
+    return  keep_index;
+
+}
+void postprocess(float* prob,vector<Detection> &results,float conf_thr = 0.2, float nms_thr = 0.4) {
+    /*
+    #####################此函数处理一张图预测结果#########################
+    prob为[x y w h  score  multi-pre] 如80类-->(1,anchor_num,85)
+
+    */
+
+    vector<Detection> pre_results;
+    vector<int> nms_keep_index;
+    bool keep_bool;
+    Detection pre_res;
+    float conf;
+    int tmp_idx;
+    float tmp_cls_score;
+    for (int i = 0; i < anchor_output_num; i++) {
+        tmp_idx = i * (cls_num + 5);
+        pre_res.bbox[0] = prob[tmp_idx + 0];
+        pre_res.bbox[1] = prob[tmp_idx + 1];
+        pre_res.bbox[2] = prob[tmp_idx + 2];
+        pre_res.bbox[3] = prob[tmp_idx + 3];
+        conf = prob[tmp_idx + 4];  //是为目标的置信度
+        tmp_cls_score = prob[tmp_idx + 5] * conf;
+        pre_res.class_id = 0;
+        pre_res.conf = tmp_cls_score;
+        for (int j = 1; j < cls_num; j++) {
+            tmp_idx = i * (cls_num + 5) + 5 + j; //获得对应类别索引
+            if (tmp_cls_score < prob[tmp_idx] * conf)
+            {
+                tmp_cls_score = prob[tmp_idx] * conf;
+                pre_res.class_id = j;
+                pre_res.conf = tmp_cls_score;
+            }
+        }
+        if (conf >= conf_thr) {
+
+            pre_results.push_back(pre_res);
+        }
+
+    }
+
+    //使用nms
+    nms_keep_index=nms(pre_results,nms_thr);
+
+    for (int i = 0; i < pre_results.size(); i++) {
+        keep_bool = judge_in_lst(i, nms_keep_index);
+        if (keep_bool) {
+            results.push_back(pre_results.at(i));
+        }
+
+    }
+
+    pre_results.clear();
+    pre_results.shrink_to_fit();
+    nms_keep_index.clear();
+    nms_keep_index.shrink_to_fit();
+
+}
+
+cv::Mat draw_rect(cv::Mat image, vector<Detection> results) {
+    /*
+    image 为图像
+
+    struct  Detection {
+    float bbox[4];  //center_x center_y  w h
+    float conf;  // 置信度
+    int class_id; //类别id
+    int index;    //可忽略
+    };
+
+    */
+
+    float x;
+    float y;
+    float w;
+    float h;
+    string info;
+
+    cv::Rect rect;
+    for (int i = 0; i < results.size(); i++) {
+
+        x = results.at(i).bbox[0];
+        y= results.at(i).bbox[1];
+        w= results.at(i).bbox[2];
+        h=results.at(i).bbox[3];
+
+        x = (int)(x - w / 2);
+        y = (int)(y - h / 2);
+        w = (int)w;
+        h = (int)h;
+        info = "id:";
+        info.append(to_string(results.at(i).class_id));
+        info.append(" s:");
+        info.append(to_string((int)(results.at(i).conf*100)  )   );
+        info.append("%");
+        rect= cv::Rect(x, y, w, h);
+        if(results.at(i).class_id == 0){ // red light
+            cv::rectangle(image, rect, cv::Scalar(0, 0, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 0, 255), 0.6, 1, false);
+        }else if(results.at(i).class_id == 1){ // green light
+            cv::rectangle(image, rect, cv::Scalar(0, 255, 0), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 0), 0.6, 1, false);
+        }else if(results.at(i).class_id == 2){ // yellow light
+            cv::rectangle(image, rect, cv::Scalar(0, 255, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(0, 255, 255), 0.6, 1, false);
+        }else{
+            cv::rectangle(image, rect, cv::Scalar(255, 255, 255), 2, 1, 0);//矩形的两个顶点,两个顶点都包括在矩形内部
+            cv::putText(image, info, cv::Point(x, y), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255, 255, 255), 0.6, 1, false);
+        }
+
+    }
+    return image;
+}
+
+bool LoadEngine(const std::string engine_path){
+    //加载engine引擎
+    char* trtModelStream{ nullptr };
+    size_t size{ 0 };
+    std::ifstream file(engine_path, std::ios::binary);
+    if(!file)
+    {
+        cout<<engine_path<<" not found!"<<endl;
+        return false;
+    }
+    if (file.good()) {
+        file.seekg(0, file.end);
+        size = file.tellg();
+        file.seekg(0, file.beg);
+        trtModelStream = new char[size];
+        assert(trtModelStream);
+        file.read(trtModelStream, size);
+        file.close();
+    }
+    //反序列为engine,创建context
+    runtime = createInferRuntime(gLogger);
+    assert(runtime != nullptr);
+    engine = runtime->deserializeCudaEngine(trtModelStream, size, nullptr);
+    //assert(engine != nullptr);
+    if(engine == nullptr)
+        return false;
+    context = engine->createExecutionContext();
+    assert(context != nullptr);
+    delete[] trtModelStream;
+
+    //在主机上分配页锁定内存
+    CHECK(cudaHostAlloc((void **)&prob, OUTPUT_SIZE * sizeof(float), cudaHostAllocDefault));
+    return true;
+}
+
+void shareLightMsg(vector<Detection> results)
+{
+    iv::vision::Lightarray light_array;   //向共享内存传结果
+    for (int i = 0; i < results.size(); i++)
+    {
+        float x = results.at(i).bbox[0];
+        float y = results.at(i).bbox[1];
+        float w = results.at(i).bbox[2];
+        float h = results.at(i).bbox[3];
+        /*---------------protobuf----------------*/
+        iv::vision::Light *light = light_array.add_light();
+        iv::vision::Center light_center;
+        light_center.set_x(x);
+        light_center.set_y(y);
+        light->mutable_center()->CopyFrom(light_center);
+        light->set_index(i+1);
+        light->set_type(results.at(i).class_id+1);
+    }
+
+    int size = light_array.ByteSize();
+    char * strdata = new char[light_array.ByteSize()];
+    if(light_array.SerializeToArray(strdata, size))
+    {
+        iv::modulecomm::ModuleSendMsg(gpdetect, strdata, size);
+    }
+    else
+    {
+        std::cout<<"light_array serialize error."<<std::endl;
+    }
+    light_array.Clear();
+    delete strdata;
+
+    /*--------------------test ParseFromArray-------------------*/
+//    iv::vision::Lightarray light_array1;
+//    light_array1.ParseFromArray(strdata,size);
+//    cout<<"parsefromarray:"<<std::endl;
+//    cout<<"light_size:"<<light_array1.light_size()<<endl;
+//    for (int i=0;i<light_array1.light_size();i++) {
+//        std::cout<<"index:"<<light_array1.light(i).index()<<" type:"
+//                <<light_array1.light(i).type()
+//                <<std::endl;
+
+//    }
+
+}
+
+void infer(cv::Mat img,vector<Detection> &results) {
+
+   // 处理图片为固定输出
+    auto start = std::chrono::system_clock::now();  //时间函数
+    static float data[3 * INPUT_H * INPUT_W];
+    ProcessImage(img, data);
+    auto end = std::chrono::system_clock::now();
+    //time_read_img = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_read_img;
+
+    //cout<<"read img time: "<<std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count()<<"ms"<<endl;
+
+    //Run inference
+    start = std::chrono::system_clock::now();  //时间函数
+    //cout<<"doinference"<<endl;
+    doInference(*context, data, prob, 1);
+    end = std::chrono::system_clock::now();
+    //time_infer = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() + time_infer;
+    std::cout <<"doinference: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    postprocess(prob, results, conf_thr,  nms_thr);
+    //cout << "ok" << endl;
+    //time_num++;
+}
+
+
+void exitfunc()
+{
+    return;
+}
+
+//读取视频数据
+void ReadFunc(int n)
+{
+    cv::VideoCapture cap(video_path);
+    if(!cap.isOpened())
+    {
+        cout<<"camera failed to open"<<endl;
+    }
+    while(1)
+    {
+        cv::Mat frame;
+        //读视频的时候加上,读摄像头去掉
+        if(imageBuffer->isFull())
+        {
+            continue;
+        }
+        if(cap.read(frame))
+        {
+            if(calibrationstate)
+                cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+            if(cropstate)
+                frame = frame(crop_height,crop_width);
+            imageBuffer->add(frame);
+        }
+        else
+        {
+            std::this_thread::sleep_for(std::chrono::milliseconds(1));
+        }
+    }
+}
+void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    if(nSize<1000)return;
+    iv::vision::rawpic pic;
+    if(false == pic.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"picview Listenpic fail."<<std::endl;
+        return;
+    }
+    cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+    if(pic.type() == 1)
+        memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+    else
+    {
+        //     mat.release();
+        std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+        mat = cv::imdecode(buff,cv::IMREAD_COLOR);
+    }
+    cv::Mat frame;
+    if(calibrationstate)
+        cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+    if(cropstate)
+        frame = frame(crop_height,crop_width);
+    imageBuffer->add(frame);
+    mat.release();
+}
+
+//从共享内存中获取signal
+void Listensignal(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    if(nSize<1)return;
+    iv::brain::brainstate brain_state;
+    if(false == brain_state.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"Listen signal fail."<<std::endl;
+        return;
+    }
+
+    cout<<"brain_state traficlightstart: "<<brain_state.mbtraficlightstart()<<endl;
+
+    lightstart = brain_state.mbtraficlightstart();
+}
+
+int main(int argc, char** argv)
+{
+    showversion("yolov5s");
+    QCoreApplication a(argc, argv);
+
+    gfault = new iv::Ivfault("traffic_light_detection");
+    givlog = new iv::Ivlog("traffic_light_detection");
+    gfault->SetFaultState(0,0,"yolov5 initialize.");
+
+    if(argc==3)
+    {
+        test_video = (strcmp(argv[1], "true") == 0)?true:false;
+        video_path = argv[2];
+    }
+    if(argc==2)
+    {
+        test_video = (strcmp(argv[1], "true") == 0)?true:false;
+    }
+
+    if(test_video)
+        std::thread * readthread = new std::thread(ReadFunc,1);
+    else
+        gpcamera= iv::modulecomm::RegisterRecv(&cameraname[0],Listenpic);
+
+//================================== camera calib init ==========================
+    if (calibrationstate)
+    {
+        cv::FileStorage calib_file("/home/nvidia/code/modularization/src/detection/yolov5-trt-v2.0/yaml/camera_middle_640_360.yaml", cv::FileStorage::READ);
+        calib_file["cameraMatrix"]>>camera_matrix;
+        calib_file["distCoeffs"]>>dist_coe;
+        cv::Mat R = cv::Mat::eye(3, 3, CV_64F);
+        cv::Size imgsize=cv::Size(1280,720);
+        cv::initUndistortRectifyMap(camera_matrix, dist_coe, R, camera_matrix,imgsize,CV_16SC2,map1,map2);
+    }
+//==============================================================================
+    if(!LoadEngine(engine_path))
+    {
+        cout<<"Build engine to "<< engine_path <<endl;
+        get_trtengine();
+        cout << "Build engine done!"<<endl;
+        cout<<"Reload engine from "<< engine_path <<endl;
+        LoadEngine(engine_path);
+    }
+    gpbrain = iv::modulecomm::RegisterRecv(&brainname[0],Listensignal);
+    gpdetect = iv::modulecomm::RegisterSend(&detectname[0],10000,1);
+    while(1)
+    {
+        if(lightstart)
+        {
+            std::cout<<"------start program------"<<std::endl;
+            vector<KalmanTracker> trackers_90;
+            KalmanTracker::kf_count = 0; // tracking id relies on this, so we have to reset it in each seq.
+            int frame_count = 0;
+            double waittime = (double)cv::getTickCount();
+            while (1)
+            {
+                if(imageBuffer->isEmpty())
+                {
+                    double waittotal = (double)cv::getTickCount() - waittime;
+                    double totaltime = waittotal/cv::getTickFrequency();
+                    if(totaltime>10.0)
+                    {
+                        cout<<"Cant't get frame and quit"<<endl;
+                        lightstart = false;
+                        cv::destroyAllWindows();
+                        std::cout<<"------end program------"<<std::endl;
+                        break;
+                    }
+                    cout<<"Wait for frame "<<totaltime<<"s"<<endl;
+                    continue;
+                }
+                auto start = std::chrono::system_clock::now();  //时间函数
+                cv::Mat frame;
+                imageBuffer->consume(frame);
+
+                frame_count++;
+
+                cv::Mat res_img; // result image
+                vector<Detection> results;
+                vector<Detection>results_track;
+                infer(frame,results);
+
+//================================== track ==========================
+                if (trackstate)
+                {
+                    auto start_track = std::chrono::system_clock::now();  //时间函数
+                    od::bbox_t bbox_t_90;   //转成跟踪格式
+                    vector<od::bbox_t> outs_90;
+                    for (int i = 0; i < results.size(); i++)
+                    {
+                        bbox_t_90.x = results.at(i).bbox[0];
+                        bbox_t_90.y = results.at(i).bbox[1];
+                        bbox_t_90.w = results.at(i).bbox[2];
+                        bbox_t_90.h = results.at(i).bbox[3];
+                        bbox_t_90.prob = results.at(i).conf;
+                        bbox_t_90.obj_id = results.at(i).class_id;
+                        outs_90.push_back(bbox_t_90);
+                    }
+                    vector<od::TrackingBox>track_result_90;
+                    bool track_flag_90 = od::TrackObstacle(frame_count,trackers_90,outs_90,track_result_90);
+                    for(unsigned int i=0;i < track_result_90.size(); i++)
+                    {
+                        Detection obstacle;
+                        obstacle.bbox[0] = track_result_90[i].box.x;
+                        obstacle.bbox[1] = track_result_90[i].box.y;
+                        obstacle.bbox[2] = track_result_90[i].box.width;
+                        obstacle.bbox[3] = track_result_90[i].box.height;
+                        //通过判断5帧数输出颜色
+                        vector<int> class_history;
+                        class_history = track_result_90[i].class_history;
+                        if(class_history.size()>0)
+                        {
+                            vector<int> color_num(3);
+                            for(int j=0;j<class_history.size();j++)
+                            {
+                                int class_id = class_history[j];
+                                color_num[class_id] += 1;
+                            }
+                            std::vector<int>::iterator biggest = std::max_element(std::begin(color_num),std::end(color_num));
+                            int maxindex = std::distance(std::begin(color_num),biggest);
+                            obstacle.class_id = maxindex;
+                        }
+                        else {obstacle.class_id = track_result_90[i].class_id;}
+                        obstacle.conf = track_result_90[i].prob;
+                        results_track.push_back(obstacle);
+                    }
+                    auto end_track = std::chrono::system_clock::now();  //时间函数
+                    //std::cout <<"track: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end_track - start_track).count() << "ms" << std::endl;
+
+                }
+//================================== track ==========================
+
+                vector<Detection>results_final;
+                results_final = (trackstate)?results_track:results;
+                shareLightMsg(results_final);
+                auto end = std::chrono::system_clock::now();  //时间函数
+                std::cout <<"total time traffic: "<< std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+
+                //显示检测检测结果
+                cv::resize(frame, frame, cv::Size(INPUT_W, INPUT_H), 0, 0, cv::INTER_LINEAR);
+                res_img=draw_rect(frame,results_final);
+
+                results.clear();
+                results_track.clear();
+                results_final.clear();
+
+                cv::namedWindow("Result",cv::WINDOW_NORMAL);
+                cv::imshow("Result",frame);
+                if(cv::waitKey(10) == 'q')
+                {
+                    cv::destroyAllWindows();
+                    //yolo_context->destroy();
+                    //start_up = false;
+                    break;
+                }
+                if(cv::waitKey(1) == 's')
+                    cv::waitKey(0);
+                //writer << frame;
+                //waittime = (double)cv::getTickCount();
+                if(!lightstart)
+                {
+                    cv::destroyAllWindows();
+                    //yolo_context->destroy();
+                    //start_up = false;
+                    std::cout<<"------end program------"<<std::endl;
+                    break;
+                }
+            }
+        }
+        //else cout<<"Wait for lightstart==true"<<endl;
+    }
+
+    // Destroy the engine
+    context->destroy();
+    engine->destroy();
+    runtime->destroy();
+
+    return 0;
+}
+

+ 40 - 0
src/detection/yolov5-trt-v2.0/saveVideo.py

@@ -0,0 +1,40 @@
+import cv2
+import os
+import argparse
+import time
+
+def arg_parser():
+    parser = argparse.ArgumentParser(description='Save Image')
+    parser.add_argument('--savedir', type=str, default='./Image',help='图片保存的路径')
+    return parser.parse_args()
+
+if __name__ == '__main__':
+	args = arg_parser()
+	savedir = args.savedir
+	if not os.path.exists(savedir):
+		os.makedirs(savedir)
+	#米文读取森云摄像头
+	cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink")
+	#cap = cv2.VideoCapture('/home/vanessa/lqx/PycharmProjects/fire-detect-yolov4-master/yolov4_turnstile/20201231145638.avi')
+	image_nums_start = 0
+	while True:
+		ret,frame = cap.read()
+		if not ret:
+			print("Camera read failed !!!")
+			break
+		#image_name = '{:s}.jpg'.format('{:d}'.format(image_nums_start).zfill(4))
+		image_name = str(time.time()) + '.jpg'
+		save_path = os.path.join(savedir,image_name)
+		###抽帧
+		#frame = cv2.flip(frame,0)
+		#frame = cv2.flip(frame,1)
+		if image_nums_start % 10 == 0:
+			cv2.imwrite(save_path,frame)
+		image_nums_start += 1
+		image1=frame[40:680,320:960]
+		print("shape: ",image1.shape)
+		cv2.imshow('frame',image1)
+		if cv2.waitKey(1) & 0xFF==ord('q'):
+			break
+	cap.release()
+	cv2.destroyWindow('frame')

+ 398 - 0
src/detection/yolov5-trt-v2.0/src/Hungarian.cpp

@@ -0,0 +1,398 @@
+//
+// Created by lqx on 20-4-23.
+//
+///////////////////////////////////////////////////////////////////////////////
+// Hungarian.cpp: Implementation file for Class HungarianAlgorithm.
+//
+// This is a C++ wrapper with slight modification of a hungarian algorithm implementation by Markus Buehren.
+// The original implementation is a few mex-functions for use in MATLAB, found here:
+// http://www.mathworks.com/matlabcentral/fileexchange/6543-functions-for-the-rectangular-assignment-problem
+//
+// Both this code and the orignal code are published under the BSD license.
+// by Cong Ma, 2016
+//
+
+#include <math.h>
+#include <cfloat>
+#include "Hungarian.h"
+
+
+HungarianAlgorithm::HungarianAlgorithm(){}
+HungarianAlgorithm::~HungarianAlgorithm(){}
+
+
+//********************************************************//
+// A single function wrapper for solving assignment problem.
+//********************************************************//
+double HungarianAlgorithm::Solve(vector<vector<double>>& DistMatrix, vector<int>& Assignment)
+{
+    unsigned int nRows = DistMatrix.size();
+    unsigned int nCols = DistMatrix[0].size();
+
+    double *distMatrixIn = new double[nRows * nCols];
+    int *assignment = new int[nRows];
+    double cost = 0.0;
+
+    // Fill in the distMatrixIn. Mind the index is "i + nRows * j".
+    // Here the cost matrix of size MxN is defined as a double precision array of N*M elements.
+    // In the solving functions matrices are seen to be saved MATLAB-internally in row-order.
+    // (i.e. the matrix [1 2; 3 4] will be stored as a vector [1 3 2 4], NOT [1 2 3 4]).
+    for (unsigned int i = 0; i < nRows; i++)
+        for (unsigned int j = 0; j < nCols; j++)
+            distMatrixIn[i + nRows * j] = DistMatrix[i][j];
+
+    // call solving function
+    assignmentoptimal(assignment, &cost, distMatrixIn, nRows, nCols);
+
+    Assignment.clear();
+    for (unsigned int r = 0; r < nRows; r++)
+        Assignment.push_back(assignment[r]);
+
+    delete[] distMatrixIn;
+    delete[] assignment;
+    return cost;
+}
+
+
+//********************************************************//
+// Solve optimal solution for assignment problem using Munkres algorithm, also known as Hungarian Algorithm.
+//********************************************************//
+void HungarianAlgorithm::assignmentoptimal(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
+{
+    double *distMatrix, *distMatrixTemp, *distMatrixEnd, *columnEnd, value, minValue;
+    bool *coveredColumns, *coveredRows, *starMatrix, *newStarMatrix, *primeMatrix;
+    int nOfElements, minDim, row, col;
+
+    /* initialization */
+    *cost = 0;
+    for (row = 0; row<nOfRows; row++)
+        assignment[row] = -1;
+
+    /* generate working copy of distance Matrix */
+    /* check if all matrix elements are positive */
+    nOfElements = nOfRows * nOfColumns;
+    distMatrix = (double *)malloc(nOfElements * sizeof(double));
+    distMatrixEnd = distMatrix + nOfElements;
+
+    for (row = 0; row<nOfElements; row++)
+    {
+        value = distMatrixIn[row];
+        if (value < 0)
+            cerr << "All matrix elements have to be non-negative." << endl;
+        distMatrix[row] = value;
+    }
+
+
+    /* memory allocation */
+    coveredColumns = (bool *)calloc(nOfColumns, sizeof(bool));
+    coveredRows = (bool *)calloc(nOfRows, sizeof(bool));
+    starMatrix = (bool *)calloc(nOfElements, sizeof(bool));
+    primeMatrix = (bool *)calloc(nOfElements, sizeof(bool));
+    newStarMatrix = (bool *)calloc(nOfElements, sizeof(bool)); /* used in step4 */
+
+    /* preliminary steps */
+    if (nOfRows <= nOfColumns)
+    {
+        minDim = nOfRows;
+
+        for (row = 0; row<nOfRows; row++)
+        {
+            /* find the smallest element in the row */
+            distMatrixTemp = distMatrix + row;
+            minValue = *distMatrixTemp;
+            distMatrixTemp += nOfRows;
+            while (distMatrixTemp < distMatrixEnd)
+            {
+                value = *distMatrixTemp;
+                if (value < minValue)
+                    minValue = value;
+                distMatrixTemp += nOfRows;
+            }
+
+            /* subtract the smallest element from each element of the row */
+            distMatrixTemp = distMatrix + row;
+            while (distMatrixTemp < distMatrixEnd)
+            {
+                *distMatrixTemp -= minValue;
+                distMatrixTemp += nOfRows;
+            }
+        }
+
+        /* Steps 1 and 2a */
+        for (row = 0; row<nOfRows; row++)
+            for (col = 0; col<nOfColumns; col++)
+                if (fabs(distMatrix[row + nOfRows*col]) < DBL_EPSILON)
+                    if (!coveredColumns[col])
+                    {
+                        starMatrix[row + nOfRows*col] = true;
+                        coveredColumns[col] = true;
+                        break;
+                    }
+    }
+    else /* if(nOfRows > nOfColumns) */
+    {
+        minDim = nOfColumns;
+
+        for (col = 0; col<nOfColumns; col++)
+        {
+            /* find the smallest element in the column */
+            distMatrixTemp = distMatrix + nOfRows*col;
+            columnEnd = distMatrixTemp + nOfRows;
+
+            minValue = *distMatrixTemp++;
+            while (distMatrixTemp < columnEnd)
+            {
+                value = *distMatrixTemp++;
+                if (value < minValue)
+                    minValue = value;
+            }
+
+            /* subtract the smallest element from each element of the column */
+            distMatrixTemp = distMatrix + nOfRows*col;
+            while (distMatrixTemp < columnEnd)
+                *distMatrixTemp++ -= minValue;
+        }
+
+        /* Steps 1 and 2a */
+        for (col = 0; col<nOfColumns; col++)
+            for (row = 0; row<nOfRows; row++)
+                if (fabs(distMatrix[row + nOfRows*col]) < DBL_EPSILON)
+                    if (!coveredRows[row])
+                    {
+                        starMatrix[row + nOfRows*col] = true;
+                        coveredColumns[col] = true;
+                        coveredRows[row] = true;
+                        break;
+                    }
+        for (row = 0; row<nOfRows; row++)
+            coveredRows[row] = false;
+
+    }
+
+    /* move to step 2b */
+    step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+
+    /* compute cost and remove invalid assignments */
+    computeassignmentcost(assignment, cost, distMatrixIn, nOfRows);
+
+    /* free allocated memory */
+    free(distMatrix);
+    free(coveredColumns);
+    free(coveredRows);
+    free(starMatrix);
+    free(primeMatrix);
+    free(newStarMatrix);
+
+    return;
+}
+
+/********************************************************/
+void HungarianAlgorithm::buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns)
+{
+    int row, col;
+
+    for (row = 0; row<nOfRows; row++)
+        for (col = 0; col<nOfColumns; col++)
+            if (starMatrix[row + nOfRows*col])
+            {
+#ifdef ONE_INDEXING
+                assignment[row] = col + 1; /* MATLAB-Indexing */
+#else
+                assignment[row] = col;
+#endif
+                break;
+            }
+}
+
+/********************************************************/
+void HungarianAlgorithm::computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows)
+{
+    int row, col;
+
+    for (row = 0; row<nOfRows; row++)
+    {
+        col = assignment[row];
+        if (col >= 0)
+            *cost += distMatrix[row + nOfRows*col];
+    }
+}
+
+/********************************************************/
+void HungarianAlgorithm::step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    bool *starMatrixTemp, *columnEnd;
+    int col;
+
+    /* cover every column containing a starred zero */
+    for (col = 0; col<nOfColumns; col++)
+    {
+        starMatrixTemp = starMatrix + nOfRows*col;
+        columnEnd = starMatrixTemp + nOfRows;
+        while (starMatrixTemp < columnEnd){
+            if (*starMatrixTemp++)
+            {
+                coveredColumns[col] = true;
+                break;
+            }
+        }
+    }
+
+    /* move to step 3 */
+    step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+
+/********************************************************/
+void HungarianAlgorithm::step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    int col, nOfCoveredColumns;
+
+    /* count covered columns */
+    nOfCoveredColumns = 0;
+    for (col = 0; col<nOfColumns; col++)
+        if (coveredColumns[col])
+            nOfCoveredColumns++;
+
+    if (nOfCoveredColumns == minDim)
+    {
+        /* algorithm finished */
+        buildassignmentvector(assignment, starMatrix, nOfRows, nOfColumns);
+    }
+    else
+    {
+        /* move to step 3 */
+        step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+    }
+
+}
+
+/********************************************************/
+void HungarianAlgorithm::step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    bool zerosFound;
+    int row, col, starCol;
+
+    zerosFound = true;
+    while (zerosFound)
+    {
+        zerosFound = false;
+        for (col = 0; col<nOfColumns; col++)
+            if (!coveredColumns[col])
+                for (row = 0; row<nOfRows; row++)
+                    if ((!coveredRows[row]) && (fabs(distMatrix[row + nOfRows*col]) < DBL_EPSILON))
+                    {
+                        /* prime zero */
+                        primeMatrix[row + nOfRows*col] = true;
+
+                        /* find starred zero in current row */
+                        for (starCol = 0; starCol<nOfColumns; starCol++)
+                            if (starMatrix[row + nOfRows*starCol])
+                                break;
+
+                        if (starCol == nOfColumns) /* no starred zero found */
+                        {
+                            /* move to step 4 */
+                            step4(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim, row, col);
+                            return;
+                        }
+                        else
+                        {
+                            coveredRows[row] = true;
+                            coveredColumns[starCol] = false;
+                            zerosFound = true;
+                            break;
+                        }
+                    }
+    }
+
+    /* move to step 5 */
+    step5(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+
+/********************************************************/
+void HungarianAlgorithm::step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col)
+{
+    int n, starRow, starCol, primeRow, primeCol;
+    int nOfElements = nOfRows*nOfColumns;
+
+    /* generate temporary copy of starMatrix */
+    for (n = 0; n<nOfElements; n++)
+        newStarMatrix[n] = starMatrix[n];
+
+    /* star current zero */
+    newStarMatrix[row + nOfRows*col] = true;
+
+    /* find starred zero in current column */
+    starCol = col;
+    for (starRow = 0; starRow<nOfRows; starRow++)
+        if (starMatrix[starRow + nOfRows*starCol])
+            break;
+
+    while (starRow<nOfRows)
+    {
+        /* unstar the starred zero */
+        newStarMatrix[starRow + nOfRows*starCol] = false;
+
+        /* find primed zero in current row */
+        primeRow = starRow;
+        for (primeCol = 0; primeCol<nOfColumns; primeCol++)
+            if (primeMatrix[primeRow + nOfRows*primeCol])
+                break;
+
+        /* star the primed zero */
+        newStarMatrix[primeRow + nOfRows*primeCol] = true;
+
+        /* find starred zero in current column */
+        starCol = primeCol;
+        for (starRow = 0; starRow<nOfRows; starRow++)
+            if (starMatrix[starRow + nOfRows*starCol])
+                break;
+    }
+
+    /* use temporary copy as new starMatrix */
+    /* delete all primes, uncover all rows */
+    for (n = 0; n<nOfElements; n++)
+    {
+        primeMatrix[n] = false;
+        starMatrix[n] = newStarMatrix[n];
+    }
+    for (n = 0; n<nOfRows; n++)
+        coveredRows[n] = false;
+
+    /* move to step 2a */
+    step2a(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+
+/********************************************************/
+void HungarianAlgorithm::step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    double h, value;
+    int row, col;
+
+    /* find smallest uncovered element h */
+    h = DBL_MAX;
+    for (row = 0; row<nOfRows; row++)
+        if (!coveredRows[row])
+            for (col = 0; col<nOfColumns; col++)
+                if (!coveredColumns[col])
+                {
+                    value = distMatrix[row + nOfRows*col];
+                    if (value < h)
+                        h = value;
+                }
+
+    /* add h to each covered row */
+    for (row = 0; row<nOfRows; row++)
+        if (coveredRows[row])
+            for (col = 0; col<nOfColumns; col++)
+                distMatrix[row + nOfRows*col] += h;
+
+    /* subtract h from each uncovered column */
+    for (col = 0; col<nOfColumns; col++)
+        if (!coveredColumns[col])
+            for (row = 0; row<nOfRows; row++)
+                distMatrix[row + nOfRows*col] -= h;
+
+    /* move to step 3 */
+    step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+

+ 106 - 0
src/detection/yolov5-trt-v2.0/src/KalmanTracker.cpp

@@ -0,0 +1,106 @@
+//
+// Created by lqx on 20-4-23.
+//
+
+///////////////////////////////////////////////////////////////////////////////
+// KalmanTracker.cpp: KalmanTracker Class Implementation Declaration
+
+#include "KalmanTracker.h"
+#include "detect_obstacle.h"
+#include <iostream>
+
+int KalmanTracker::kf_count = 0;
+
+// initialize Kalman filter
+void KalmanTracker::init_kf(StateType stateMat)
+{
+    int stateNum = 7;
+    int measureNum = 4;
+    kf = KalmanFilter(stateNum, measureNum, 0);
+
+    measurement = Mat::zeros(measureNum, 1, CV_32F);
+
+    kf.transitionMatrix = (Mat_<float>(stateNum, stateNum) <<
+                                                            1, 0, 0, 0, 1, 0, 0,
+            0, 1, 0, 0, 0, 1, 0,
+            0, 0, 1, 0, 0, 0, 1,
+            0, 0, 0, 1, 0, 0, 0,
+            0, 0, 0, 0, 1, 0, 0,
+            0, 0, 0, 0, 0, 1, 0,
+            0, 0, 0, 0, 0, 0, 1);
+
+    setIdentity(kf.measurementMatrix);
+    setIdentity(kf.processNoiseCov, Scalar::all(1e-2));
+    setIdentity(kf.measurementNoiseCov, Scalar::all(1e-1));
+    setIdentity(kf.errorCovPost, Scalar::all(1));
+
+    // initialize state vector with bounding box in [cx,cy,s,r] style
+    kf.statePost.at<float>(0, 0) = stateMat.x + stateMat.width / 2;
+    kf.statePost.at<float>(1, 0) = stateMat.y + stateMat.height / 2;
+    kf.statePost.at<float>(2, 0) = stateMat.area();
+    kf.statePost.at<float>(3, 0) = stateMat.width / stateMat.height;
+}
+
+// Predict the estimated bounding box.
+StateType KalmanTracker::predict()
+{
+    // predict
+    Mat p = kf.predict();
+    m_age += 1;
+
+    if (m_time_since_update > od::max_age)
+        m_hit_streak = 0;
+    m_time_since_update += 1;
+    StateType predictBox = get_rect_xysr(p.at<float>(0, 0), p.at<float>(1, 0), p.at<float>(2, 0), p.at<float>(3, 0));
+    m_history.push_back(predictBox);
+    return m_history.back();
+}
+
+// Update the state vector with observed bounding box.
+void KalmanTracker::update(StateType stateMat, int classId, float prob)
+{
+    m_time_since_update = 0;
+    m_history.clear();
+    m_hits += 1;
+    m_hit_streak += 1;
+    m_class_id = classId;
+
+    //只保留最近的5帧检测类别
+    if (m_class_history.size() >= 5)
+        m_class_history.erase(m_class_history.begin());
+    m_class_history.push_back(classId);
+
+    m_prob = prob;
+    // measurement
+    measurement.at<float>(0, 0) = stateMat.x + stateMat.width / 2;
+    measurement.at<float>(1, 0) = stateMat.y + stateMat.height / 2;
+    measurement.at<float>(2, 0) = stateMat.area();
+    measurement.at<float>(3, 0) = stateMat.width / stateMat.height;
+
+    // update
+    kf.correct(measurement);
+}
+
+// Return the current state vector
+StateType KalmanTracker::get_state()
+{
+    Mat s = kf.statePost;
+    return get_rect_xysr(s.at<float>(0, 0), s.at<float>(1, 0), s.at<float>(2, 0), s.at<float>(3, 0));
+}
+
+
+// Convert bounding box from [cx,cy,s,r] to [x,y,w,h] style.
+StateType KalmanTracker::get_rect_xysr(float cx, float cy, float s, float r)
+{
+    float w = sqrt(s * r);
+    float h = s / w;
+    float x = (cx - w / 2);
+    float y = (cy - h / 2);
+
+    if (x < 0 && cx > 0)
+        x = 0;
+    if (y < 0 && cy > 0)
+        y = 0;
+
+    return StateType(x, y, w, h);
+}

+ 260 - 0
src/detection/yolov5-trt-v2.0/src/detect_obstacle.cpp

@@ -0,0 +1,260 @@
+
+#include <set>
+#include "detect_obstacle.h"
+
+namespace od{
+// Computes IOU between two bounding boxes
+double GetIOU(Rect_<float> bb_test, Rect_<float> bb_gt)
+{
+    float in = (bb_test & bb_gt).area();
+    float un = bb_test.area() + bb_gt.area() - in;
+
+    if (un < DBL_EPSILON)
+        return 0;
+
+    return (double)(in / un);
+}
+
+
+//tracking obstacle
+bool TrackObstacle(int frame_count,vector<KalmanTracker> &trackers,vector<bbox_t> &outs,vector<od::TrackingBox> &track_result)
+{
+    // variables used in the for-loop
+    vector<Rect_<float>> predictedBoxes;
+    vector<vector<double>> iouMatrix;
+    vector<int> assignment;
+    set<int> unmatchedDetections;
+    set<int> unmatchedTrajectories;
+    set<int> allItems;
+    set<int> matchedItems;
+    vector<cv::Point> matchedPairs;
+    unsigned int trkNum = 0;
+    unsigned int detNum = 0;
+    vector<od::DetectBox> detect_outs;
+    //bbox_t to Detect_box
+    for(unsigned int i=0;i<outs.size();i++)
+    {
+        od::DetectBox detect_temp;
+        detect_temp.class_id = outs[i].obj_id;
+        detect_temp.prob = outs[i].prob;
+        float tpx = outs[i].x;
+        float tpy = outs[i].y;
+        float tpw = outs[i].w;
+        float tph = outs[i].h;
+        //detect_temp.box = Rect_<float>(Point_<float>(tpx, tpy),Point_<float>(tpx + tpw, tpy + tph));
+        detect_temp.box = Rect_<float>(tpx,tpy,tpw,tph);
+        detect_outs.push_back(detect_temp);
+    }
+    //tracking
+    if (trackers.size() == 0) // the first frame met
+    {
+        // initialize kalman trackers using first detections.
+        for (unsigned int i = 0; i < outs.size(); i++)
+        {
+            KalmanTracker trk = KalmanTracker(detect_outs[i].box,
+                                              detect_outs[i].class_id,
+                                              detect_outs[i].prob);
+            trackers.push_back(trk);
+        }
+        return false;
+    }
+    ///////////////////////////////////////
+    // 3.1. get predicted locations from existing trackers.
+    predictedBoxes.clear();
+
+    for (auto it = trackers.begin(); it != trackers.end();)
+    {
+        Rect_<float> pBox = (*it).predict();
+        if (pBox.x >= 0 && pBox.y >= 0)
+        {
+            predictedBoxes.push_back(pBox);
+            it++;
+        }
+        else
+        {
+
+            cerr << "Box invalid at frame: " << frame_count <<" id "<<(*it).m_id+1<<endl;
+            it = trackers.erase(it);
+
+        }
+    }
+
+    if (trackers.size() == 0 || detect_outs.size() == 0) return false;
+
+    ///////////////////////////////////////
+    // 3.2. associate detections to tracked object (both represented as bounding boxes)
+    // dets : detFrameData[fi]
+    trkNum = predictedBoxes.size();
+    detNum = outs.size();
+
+    iouMatrix.clear();
+    iouMatrix.resize(trkNum, vector<double>(detNum, 0));
+
+    for (unsigned int i = 0; i < trkNum; i++) // compute iou matrix as a distance matrix
+    {
+        for (unsigned int j = 0; j < detNum; j++)
+        {
+            // use 1-iou because the hungarian algorithm computes a minimum-cost assignment.
+            iouMatrix[i][j] = 1 - GetIOU(predictedBoxes[i], detect_outs[j].box);
+        }
+    }
+    // solve the assignment problem using hungarian algorithm.
+    // the resulting assignment is [track(prediction) : detection], with len=preNum
+    HungarianAlgorithm HungAlgo;
+    assignment.clear();
+    HungAlgo.Solve(iouMatrix, assignment);
+
+    // find matches, unmatched_detections and unmatched_predictions
+    unmatchedTrajectories.clear();
+    unmatchedDetections.clear();
+    allItems.clear();
+    matchedItems.clear();
+    if (detNum > trkNum) //	there are unmatched detections
+    {
+        for (unsigned int n = 0; n < detNum; n++)
+            allItems.insert(n);
+
+        for (unsigned int i = 0; i < trkNum; ++i)
+            matchedItems.insert(assignment[i]);
+
+        set_difference(allItems.begin(), allItems.end(),
+                       matchedItems.begin(), matchedItems.end(),
+                       insert_iterator<set<int>>(unmatchedDetections, unmatchedDetections.begin()));
+    }
+    else if (detNum < trkNum) // there are unmatched trajectory/predictions
+    {
+        for (unsigned int i = 0; i < trkNum; ++i)
+            if (assignment[i] == -1) // unassigned label will be set as -1 in the assignment algorithm
+                unmatchedTrajectories.insert(i);
+    }
+    // filter out matched with low IOU
+    matchedPairs.clear();
+    for (unsigned int i = 0; i < trkNum; ++i)
+    {
+        if (assignment[i] == -1) // pass over invalid values
+            continue;
+        if (1 - iouMatrix[i][assignment[i]] < od::iouThreshold)
+        {
+            unmatchedTrajectories.insert(i);
+            unmatchedDetections.insert(assignment[i]);
+        }
+        else
+            matchedPairs.push_back(cv::Point(i, assignment[i]));
+    }
+    ///////////////////////////////////////
+    // 3.3. updating trackers
+    // update matched trackers with assigned detections.
+    // each prediction is corresponding to a tracker
+    int detIdx, trkIdx;
+    for (unsigned int i = 0; i < matchedPairs.size(); i++)
+    {
+        trkIdx = matchedPairs[i].x;
+        detIdx = matchedPairs[i].y;
+        trackers[trkIdx].update(detect_outs[detIdx].box,
+                                detect_outs[detIdx].class_id,
+                                detect_outs[detIdx].prob);
+    }
+    // create and initialise new trackers for unmatched detections
+    for (auto umd : unmatchedDetections)
+    {
+        KalmanTracker tracker = KalmanTracker(detect_outs[umd].box,
+                                              detect_outs[umd].class_id,
+                                              detect_outs[umd].prob);
+        trackers.push_back(tracker);
+    }
+
+#if 0
+    //get unique trackers,merg same trackers
+    unsigned int trackers_num = trackers.size();
+    iouMatrix.clear();
+    iouMatrix.resize(trackers_num, vector<double>(trackers_num, 0));
+    for (unsigned int i = 0; i < trackers_num; i++) // compute iou matrix as a distance matrix
+    {
+        for (unsigned int j = 0; j < trackers_num; j++)
+        {
+            // use 1-iou because the hungarian algorithm computes a minimum-cost assignment.
+            if(j==i)
+                iouMatrix[i][j] = 1;
+            else
+                iouMatrix[i][j] = 1 - GetIOU(trackers[i].get_state(), trackers[j].get_state());
+        }
+    }
+    // solve the assignment problem using hungarian algorithm.
+    // the resulting assignment is [track(prediction) : detection], with len=preNum
+    assignment.clear();
+    HungAlgo.Solve(iouMatrix, assignment);
+    // filter out matched with low IOU
+    matchedPairs.clear();
+    for (unsigned int i = 0; i < trackers_num; ++i)
+    {
+        if (assignment[i] == -1) // pass over invalid values
+            continue;
+        if (iouMatrix[i][assignment[i]] < od::iouThreshold)
+        {
+            matchedPairs.push_back(cv::Point(i, assignment[i]));
+        }
+    }
+    int index1,index2;
+    vector<int> delete_index;
+    for (unsigned int i = 0; i < matchedPairs.size(); i++)
+    {
+        index1 = matchedPairs[i].x;
+        index2 = matchedPairs[i].y;
+        if(index1 >= index2)
+            continue;
+        if((trackers[index1].m_id > trackers[index2].m_id) && (trackers[index1].m_class_history.size()>0))
+        {
+            trackers[index1].m_id = trackers[index2].m_id;
+            trackers[index1].m_class_history.insert(trackers[index1].m_class_history.begin(),
+                                                    trackers[index2].m_class_history.begin(),trackers[index2].m_class_history.end());
+            delete_index.push_back(index2);
+        }
+        else if((trackers[index2].m_id > trackers[index1].m_id) && (trackers[index2].m_class_history.size()>0))
+        {
+            trackers[index2].m_id = trackers[index1].m_id;
+            trackers[index2].m_class_history.insert(trackers[index2].m_class_history.begin(),
+                                                    trackers[index1].m_class_history.begin(),trackers[index1].m_class_history.end());
+            delete_index.push_back(index1);
+
+        }
+    }
+    for(unsigned int i = 0; i < delete_index.size(); i++)
+    {
+        int idx = delete_index[i] - i;
+        trackers.erase(trackers.begin() + idx);
+    }
+#endif
+
+    // get trackers' output
+    track_result.clear();
+    for (auto it = trackers.begin(); it != trackers.end();)
+    {
+        if (((*it).m_time_since_update <= od::max_age) &&
+                ((*it).m_hit_streak >= od::min_hits || frame_count <= od::min_hits))
+        {
+            od::TrackingBox res;
+            res.box = (*it).get_state();
+            res.id = (*it).m_id + 1;
+            res.frame = frame_count;
+            res.class_id = (*it).m_class_id;
+            res.prob = (*it).m_prob;
+            res.class_history = (*it).m_class_history;
+            track_result.push_back(res);
+            it++;
+        }
+        else
+            it ++;
+        //remove dead tracklet
+        if(it != trackers.end() && (*it).m_time_since_update > od::max_age)
+        {
+            it = trackers.erase(it);
+        }
+    }
+    if(track_result.size()>0)
+        return true;
+    else return false;
+}
+}
+
+
+

+ 12 - 0
src/detection/yolov5-trt-v2.0/yaml/camera.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [329.8960137714725, 0.0, 306.98655702722937, 0.0, 329.8476481330821, 147.92102146061933, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 4
+    dt: f
+    data: [0.06540082256039308, 0.003679070986049775, 0.02795206257493852, 0.019310323874426313]

+ 12 - 0
src/detection/yolov5-trt-v2.0/yaml/camera147.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [329.8960137714725, 0.0, 306.98655702722937, 0.0, 329.8476481330821, 147.92102146061933, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 4
+    dt: f
+    data: [0.06540082256039308, 0.003679070986049775, 0.02795206257493852, 0.019310323874426313]

+ 12 - 0
src/detection/yolov5-trt-v2.0/yaml/camera_middle_640_360.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [380.0899898274016, 0.0, 312.50422695333214,0.0, 378.9971350621845, 158.00203854195047, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 5
+    dt: f
+    data: [-0.31123247928669645, 0.14133965413035393, -0.0030934980146547883, -0.0005549080413969106,-0.03447171176210059]

+ 16 - 0
src/detection/yolov5-trt-v2.0/yaml/init.yaml

@@ -0,0 +1,16 @@
+source:
+- 0
+
+src:
+- [140, 370]
+- [220, 298]
+- [410, 292]
+- [478, 360]
+dst:
+- [125, 371]
+- [125, 218]
+- [484, 218]
+- [484, 370]
+
+meters: 2.0
+pixels: 640

+ 10 - 0
src/detection/yolov5-trt-v2.0/yaml/pers_ori.yaml

@@ -0,0 +1,10 @@
+%YAML:1.0
+
+
+board_left_top: [267,188]
+
+board_left_bottom: [174,251]
+
+board_right_top: [347,189]
+
+board_right_bottom: [447,257]

+ 12 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/camera.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [329.8960137714725, 0.0, 306.98655702722937, 0.0, 329.8476481330821, 147.92102146061933, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 4
+    dt: f
+    data: [0.06540082256039308, 0.003679070986049775, 0.02795206257493852, 0.019310323874426313]

+ 12 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/camera_ori.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [283.2175041932327, 0.0, 618.5248476440728, 0.0, 282.8089524514217, 386.63584727262344, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 4
+    dt: f
+    data: [0.1362060349662609,-0.027120543730038277,0.16273833878800492,-0.16242892318961671]

+ 16 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/init.yaml

@@ -0,0 +1,16 @@
+source:
+- 0
+
+src:
+- [140, 370]
+- [220, 298]
+- [410, 292]
+- [478, 360]
+dst:
+- [125, 371]
+- [125, 218]
+- [484, 218]
+- [484, 370]
+
+meters: 2.0
+pixels: 640

+ 19 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers.yaml

@@ -0,0 +1,19 @@
+%YAML:1.0
+
+
+board_left_top: [255,189]
+
+board_left_bottom: [193,232]
+
+board_right_top: [342,189]
+
+board_right_bottom: [403,232]
+
+
+board_left_top_dst: [569,409]
+
+board_left_bottom_dst: [569,504]
+
+board_right_top_dst: [635,409]
+
+board_right_bottom_dst: [635,504]

+ 19 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers3.yaml

@@ -0,0 +1,19 @@
+%YAML:1.0
+
+
+board_left_top: [569,459]
+
+board_left_bottom: [509,504]
+
+board_right_top: [635,459]
+
+board_right_bottom: [696,504]
+
+
+board_left_top_dst: [569,409]
+
+board_left_bottom_dst: [569,504]
+
+board_right_top_dst: [635,409]
+
+board_right_bottom_dst: [635,504]

+ 19 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers_1.yaml

@@ -0,0 +1,19 @@
+%YAML:1.0
+
+
+board_left_top: [569,459]
+
+board_left_bottom: [509,504]
+
+board_right_top: [635,459]
+
+board_right_bottom: [696,504]
+
+
+board_left_top_dst: [509,459]
+
+board_left_bottom_dst: [509,550]
+
+board_right_top_dst: [635,459]
+
+board_right_bottom_dst: [635,550]

+ 19 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers_2.yaml

@@ -0,0 +1,19 @@
+%YAML:1.0
+
+
+board_left_top: [126,276]
+
+board_left_bottom: [33,312]
+
+board_right_top: [206,276]
+
+board_right_bottom: [273,312]
+
+
+board_left_top_dst: [200,100]
+
+board_left_bottom_dst: [200,800]
+
+board_right_top_dst: [900,100]
+
+board_right_bottom_dst: [900,800]

+ 10 - 0
src/detection/yolov5-trt-v2.0/yaml_bak_zhengke/pers_ori.yaml

@@ -0,0 +1,10 @@
+%YAML:1.0
+
+
+board_left_top: [283,352]
+
+board_left_bottom: [269,406]
+
+board_right_top: [353,352]
+
+board_right_bottom: [363,406]

+ 83 - 0
src/detection/yolov5-trt-v2.0/yolov5-trt-v2.0.pro

@@ -0,0 +1,83 @@
+QT -= gui
+
+CONFIG += c++11 console
+CONFIG -= app_bundle
+
+# The following define makes your compiler emit warnings if you use
+# any Qt feature that has been marked deprecated (the exact warnings
+# depend on your compiler). Please consult the documentation of the
+# deprecated API in order to know how to port your code away from it.
+DEFINES += QT_DEPRECATED_WARNINGS
+
+# You can also make your code fail to compile if it uses deprecated APIs.
+# In order to do so, uncomment the following line.
+# You can also select to disable deprecated APIs only up to a certain version of Qt.
+#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
+
+# Default rules for deployment.
+qnx: target.path = /tmp/$${TARGET}/bin
+else: unix:!android: target.path = /opt/$${TARGET}/bin
+!isEmpty(target.path): INSTALLS += target
+
+
+INCLUDEPATH += yolov5-trt-v2.0/include \
+               include
+
+SOURCES += main_new.cpp \
+           src/Hungarian.cpp \
+           src/KalmanTracker.cpp \
+           src/detect_obstacle.cpp\
+           ../../include/msgtype/rawpic.pb.cc \
+           ../../include/msgtype/lightarray.pb.cc \
+           ../../include/msgtype/brainstate.pb.cc
+
+
+INCLUDEPATH += /usr/src/tensorrt/include \
+               /usr/include/aarch64-linux-gnu \
+               /usr/include/opencv4 \
+               /usr/local/cuda/include \
+               /usr/src/tensorrt/samples/common \
+               /usr/local/include \
+               /usr/include/opencv2 \
+               /usr/include/opencv \
+
+
+LIBS += /usr/lib/aarch64-linux-gnu/libopencv_highgui.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_core.so    \
+        /usr/lib/aarch64-linux-gnu/libopencv_imgproc.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_imgcodecs.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_videoio.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_video.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_calib3d.so \
+
+LIBS += /usr/local/cuda/lib64/libcudart.so \
+        /usr/lib/aarch64-linux-gnu/libnvparsers.so \
+        /usr/lib/aarch64-linux-gnu/libnvinfer.so \
+        /usr/lib/aarch64-linux-gnu/libnvonnxparser.so \
+        /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so
+
+LIBS += -L"/usr/local/lib" -lprotobuf
+LIBS += -L/usr/lib/aarch64-linux-gnu -lstdc++fs
+
+
+
+INCLUDEPATH += ../../../include
+LIBS +=  ../../../bin/libxmlparam.so \
+         ../../../bin/libivlog.so \
+         ../../../bin/libivfault.so \
+         ../../../bin/libmodulecomm.so
+
+
+!include(../../../include/common.pri ) {
+    error( "Couldn't find the common.pri file!" )
+}
+
+!include(../../../include/ivprotobuf.pri ) {
+    error( "Couldn't find the ivprotobuf.pri file!" )
+}
+!include(../../../include/ivopencv.pri ) {
+    error( "Couldn't find the ivopencv.pri file!" )
+}
+INCLUDEPATH += $$PWD/../../include/msgtype
+LIBS += -L/usr/lib/aarch64-linux-gnu/ -lglog
+