欢迎来到尧图网

客户服务 关于我们

您的位置:首页 > 新闻 > 社会 > C++ TensorRT yolov8

C++ TensorRT yolov8

2024/10/28 19:13:24 来源:https://blog.csdn.net/lw112190/article/details/143225616  浏览:    关键词:C++ TensorRT yolov8

目录

效果

项目

代码

下载


效果

C++ TensorRT yolov8

项目

包含目录

库目录

附件依赖项

cublas.lib
cublasLt.lib
cuda.lib
cudadevrt.lib
cudart.lib
cudart_static.lib
cudnn.lib
cudnn64_8.lib
cudnn_adv_infer.lib
cudnn_adv_infer64_8.lib
cudnn_adv_train.lib
cudnn_adv_train64_8.lib
cudnn_cnn_infer.lib
cudnn_cnn_infer64_8.lib
cudnn_cnn_train.lib
cudnn_cnn_train64_8.lib
cudnn_ops_infer.lib
cudnn_ops_infer64_8.lib
cudnn_ops_train.lib
cudnn_ops_train64_8.lib
cufft.lib
cufftw.lib
cufilt.lib
curand.lib
cusolver.lib
cusolverMg.lib
cusparse.lib
nppc.lib
nppial.lib
nppicc.lib
nppidei.lib
nppif.lib
nppig.lib
nppim.lib
nppist.lib
nppisu.lib
nppitc.lib
npps.lib
nvblas.lib
nvJitLink.lib
nvJitLink_static.lib
nvjpeg.lib
nvml.lib
nvptxcompiler_static.lib
nvrtc-builtins_static.lib
nvrtc.lib
nvrtc_static.lib
OpenCL.lib
nvinfer.lib
nvinfer_dispatch.lib
nvinfer_lean.lib
nvinfer_plugin.lib
nvinfer_vc_plugin.lib
nvonnxparser.lib
nvparsers.lib
opencv_world481.lib

代码

#define _CRT_SECURE_NO_DEPRECATE

#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <numeric>
#include "NvInfer.h"

std::vector<std::string> labels;
float score_threshold = 0.3f;
float nms_threshold = 0.5f;
int input_h = 640;
int input_w = 640;

std::string lable_path = "model/lable.txt";
std::string engin_path = "model/yolov8n.engine";
std::string video_path = "test/VID_2K.mp4";

NvinferStruct* p = nullptr;
Logger logger;

int w = 0;
int h = 0;
float x_factor = 0;
float y_factor = 0;
std::vector<float> input_image;

float* output_data = nullptr;
size_t output_size = 0;

double preprocessTime = 0;
double inferTime = 0;
double postprocessTime = 0;
double totalTime = 0;
double detFps = 0;

int init() {

    std::ifstream lable_file(lable_path);
    if (!lable_file.is_open())
    {
        std::cerr << "Error opening file: " << lable_path << std::endl;
        return -1;
    }
    std::string line;
    while (std::getline(lable_file, line))
    {
        if (!line.empty())
        {
            labels.push_back(line);
        }
    }
    lable_file.close();

    // 以二进制方式读取文件
    std::ifstream engin_file(engin_path.data(), std::ios::binary);
    if (!engin_file.good()) {
        std::cerr << "文件无法打开,请确定文件是否可用!" << std::endl;
        return -1;
    }
    size_t size = 0;
    engin_file.seekg(0, engin_file.end);    // 将读指针从文件末尾开始移动0个字节
    size = engin_file.tellg();    // 返回读指针的位置,此时读指针的位置就是文件的字节数
    engin_file.seekg(0, engin_file.beg);    // 将读指针从文件开头开始移动0个字节
    char* modelStream = new char[size];
    engin_file.read(modelStream, size);
    engin_file.close();// 关闭文件

    //创建推理核心结构体,初始化变量
    p = new NvinferStruct();

    //初始化反序列化引擎
    p->runtime = nvinfer1::createInferRuntime(logger);

    // 初始化推理引擎
    p->engine = p->runtime->deserializeCudaEngine(modelStream, size);

    // 创建上下文
    p->context = p->engine->createExecutionContext();
    int numNode = p->engine->getNbBindings();

    // 创建gpu数据缓冲区
    p->dataBuffer = new void* [numNode];

    delete[] modelStream;

    for (int i = 0; i < numNode; i++) {
        nvinfer1::Dims dims = p->engine->getBindingDimensions(i);
        nvinfer1::DataType type = p->engine->getBindingDataType(i);
        std::vector<int> shape(dims.d, dims.d + dims.nbDims);
        size_t size = std::accumulate(dims.d, dims.d + dims.nbDims, 1, std::multiplies<size_t>());
        switch (type)
        {
        case nvinfer1::DataType::kINT32:
        case nvinfer1::DataType::kFLOAT: size *= 4; break;  // 明确为类型 float
        case nvinfer1::DataType::kHALF: size *= 2; break;
        case nvinfer1::DataType::kBOOL:
        case nvinfer1::DataType::kINT8:
        default:break;
        }
        cudaMalloc(&(p->dataBuffer[i]), size);
    }

    output_size = 1 * (labels.size() + 4) * 8400;;
    output_data = new float[output_size];

    return 0;
}

void preprocess(cv::Mat& frame) {
    //前处理
    w = frame.cols;
    h = frame.rows;
    int max = std::max(h, w);
    cv::Mat image = cv::Mat::zeros(cv::Size(max, max), CV_8UC3);
    cv::Rect roi(0, 0, w, h);
    frame.copyTo(image(roi));
    x_factor = image.cols / static_cast<float>(input_w);
    y_factor = image.rows / static_cast<float>(input_h);
    cv::resize(image, image, cv::Size(input_w, input_h));
    cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
    std::vector<cv::Mat> bgrChannels(3);
    cv::split(image, bgrChannels);
    for (int c = 0; c < 3; c++)
    {
        bgrChannels[c].convertTo(bgrChannels[c], CV_32FC1, 1.0 / 255.0);
    }
    std::vector<float> input_image;
    int image_area = input_h * input_w;
    input_image.clear();
    input_image.resize(3 * image_area);
    size_t single_chn_size = image_area * sizeof(float);
    memcpy(input_image.data(), (float*)bgrChannels[0].data, single_chn_size);
    memcpy(input_image.data() + image_area, (float*)bgrChannels[1].data, single_chn_size);
    memcpy(input_image.data() + image_area * 2, (float*)bgrChannels[2].data, single_chn_size);

    cudaMemcpy(p->dataBuffer[0], input_image.data(), input_image.size() * sizeof(float), cudaMemcpyHostToDevice);
}

void postprocess(std::vector<detresult>& detectionResult) {


    cudaMemcpy(output_data, p->dataBuffer[1], output_size * sizeof(float), cudaMemcpyDeviceToHost);

    cv::Mat dout(labels.size() + 4, 8400, CV_32F, output_data);
    cv::Mat det_output = dout.t();

    std::vector<cv::Rect> boxes;
    std::vector<int> classIds;
    std::vector<float> confidences;

    for (int i = 0; i < det_output.rows; i++)
    {
        cv::Mat classes_scores = det_output.row(i).colRange(4, labels.size() + 4);
        cv::Point classIdPoint;
        double score;
        cv::minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);

        if (score > score_threshold)
        {
            float cx = det_output.at<float>(i, 0);
            float cy = det_output.at<float>(i, 1);
            float ow = det_output.at<float>(i, 2);
            float oh = det_output.at<float>(i, 3);
            int x = static_cast<int>((cx - 0.5 * ow) * x_factor);
            int y = static_cast<int>((cy - 0.5 * oh) * y_factor);
            int width = static_cast<int>(ow * x_factor);
            int height = static_cast<int>(oh * y_factor);

            // 坐标值安全校验
            if (x < 0)x = 0;
            if (y < 0)y = 0;

            if (x > w)x = w;
            if (y > h)y = h;

            if (x + width > w)width = w - x;
            if (y + height > h)height = h - y;

            cv::Rect box;
            box.x = x;
            box.y = y;
            box.width = width;
            box.height = height;

            boxes.push_back(box);
            classIds.push_back(classIdPoint.x);
            confidences.push_back(score);
        }
    }

    std::vector<int> indexes;
    cv::dnn::NMSBoxes(boxes, confidences, score_threshold, nms_threshold, indexes);


    for (size_t i = 0; i < indexes.size(); i++)
    {
        int index = indexes[i];
        detresult box(labels[classIds[index]], classIds[index], confidences[index], boxes[index]);
        detectionResult.push_back(box);
    }

}

void draw(cv::Mat& frame, std::vector<detresult>& detectionResult) {

    for (size_t i = 0; i < detectionResult.size(); ++i)
    {
        detresult box = detectionResult[i];
        cv::rectangle(frame, box.rect, cv::Scalar(0, 0, 255), 2);
        std::string label = box.className + ":" + cv::format("%.2f", box.confidence);
        putText(frame, label, cv::Point(box.rect.x, box.rect.y - 5), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
    }

    // 绘制时间
    putText(frame, "preprocessTime:" + std::to_string(preprocessTime * 1000) + "ms", cv::Point(10, 30), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
    putText(frame, "inferTime:" + std::to_string(inferTime * 1000) + "ms", cv::Point(10, 70), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
    putText(frame, "postprocessTime:" + std::to_string(postprocessTime * 1000) + "ms", cv::Point(10, 110), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
    putText(frame, "totalTime:" + std::to_string(totalTime * 1000) + "ms", cv::Point(10, 150), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);
    putText(frame, "detFps:" + std::to_string(detFps), cv::Point(10, 190), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);

    cv::imshow("detresult", frame);

}

void destroy() {

    delete output_data;
    delete p->dataBuffer;
    p->context->destroy();
    p->engine->destroy();
    p->runtime->destroy();
    delete p;
}

int main()
{
    init();

    //cv::Mat frame = cv::imread(img_path);
    //cv::imshow("1.jpg", frame);
    //cv::waitKey(0);

    cv::VideoCapture capture(video_path);
    // 检查视频是否成功打开
    if (!capture.isOpened())
    {
        std::cout << "无法读取视频文件" << std::endl;
        return -1;
    }

    double fps = capture.get(cv::CAP_PROP_FPS);
    int width = static_cast<int>(capture.get(cv::CAP_PROP_FRAME_WIDTH));
    int height = static_cast<int>(capture.get(cv::CAP_PROP_FRAME_HEIGHT));

    cv::Mat frame;
    while (true)
    {
        bool success = capture.read(frame); // 读取一帧数据
        // 检查是否成功读取帧
        if (!success)
        {
            std::cout << "读取完毕" << std::endl;
            break;
        }

        double start = (double)cv::getTickCount();
        preprocess(frame);
        preprocessTime = ((double)cv::getTickCount() - start) / cv::getTickFrequency();

        //推理
        start = (double)cv::getTickCount();
        p->context->executeV2(p->dataBuffer);
        inferTime = ((double)cv::getTickCount() - start) / cv::getTickFrequency();

        //后处理
        start = (double)cv::getTickCount();
        std::vector<detresult> detectionResult;
        postprocess(detectionResult);
        postprocessTime = ((double)cv::getTickCount() - start) / cv::getTickFrequency();

        totalTime = preprocessTime + inferTime + postprocessTime;
        detFps = (1 / (totalTime));

        //绘制、显示
        cv::namedWindow("detresult", cv::WINDOW_NORMAL); // cv::WINDOW_NORMAL允许用户调整窗口大小
        cv::resizeWindow("detresult", width / 2, height / 2); // 设置窗口的宽度和高度
        draw(frame, detectionResult);

        if (cv::waitKey(1) == 27) // 通过按下ESC键退出循环
        {
            break;
        }
    }

    destroy();

    cv::destroyAllWindows();
    getchar();

    return 0;
}

#define _CRT_SECURE_NO_DEPRECATE#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <fstream>
#include <numeric>
#include "NvInfer.h"std::vector<std::string> labels;
float score_threshold = 0.3f;
float nms_threshold = 0.5f;
int input_h = 640;
int input_w = 640;std::string lable_path = "model/lable.txt";
std::string engin_path = "model/yolov8n.engine";
std::string video_path = "test/VID_2K.mp4";NvinferStruct* p = nullptr;
Logger logger;int w = 0;
int h = 0;
float x_factor = 0;
float y_factor = 0;
std::vector<float> input_image;float* output_data = nullptr;
size_t output_size = 0;double preprocessTime = 0;
double inferTime = 0;
double postprocessTime = 0;
double totalTime = 0;
double detFps = 0;int init() {std::ifstream lable_file(lable_path);if (!lable_file.is_open()){std::cerr << "Error opening file: " << lable_path << std::endl;return -1;}std::string line;while (std::getline(lable_file, line)){if (!line.empty()){labels.push_back(line);}}lable_file.close();// 以二进制方式读取文件std::ifstream engin_file(engin_path.data(), std::ios::binary);if (!engin_file.good()) {std::cerr << "文件无法打开,请确定文件是否可用!" << std::endl;return -1;}size_t size = 0;engin_file.seekg(0, engin_file.end);	// 将读指针从文件末尾开始移动0个字节size = engin_file.tellg();	// 返回读指针的位置,此时读指针的位置就是文件的字节数engin_file.seekg(0, engin_file.beg);	// 将读指针从文件开头开始移动0个字节char* modelStream = new char[size];engin_file.read(modelStream, size);engin_file.close();// 关闭文件//创建推理核心结构体,初始化变量p = new NvinferStruct();//初始化反序列化引擎p->runtime = nvinfer1::createInferRuntime(logger);// 初始化推理引擎p->engine = p->runtime->deserializeCudaEngine(modelStream, size);// 创建上下文p->context = p->engine->createExecutionContext();int numNode = p->engine->getNbBindings();// 创建gpu数据缓冲区p->dataBuffer = new void* [numNode];delete[] modelStream;for (int i = 0; i < numNode; i++) {nvinfer1::Dims dims = p->engine->getBindingDimensions(i);nvinfer1::DataType type = p->engine->getBindingDataType(i);std::vector<int> shape(dims.d, dims.d + dims.nbDims);size_t size = std::accumulate(dims.d, dims.d + dims.nbDims, 1, std::multiplies<size_t>());switch (type){case nvinfer1::DataType::kINT32:case nvinfer1::DataType::kFLOAT: size *= 4; break;  // 明确为类型 floatcase nvinfer1::DataType::kHALF: size *= 2; break;case nvinfer1::DataType::kBOOL:case nvinfer1::DataType::kINT8:default:break;}cudaMalloc(&(p->dataBuffer[i]), size);}output_size = 1 * (labels.size() + 4) * 8400;;output_data = new float[output_size];return 0;
}void preprocess(cv::Mat& frame) {//前处理w = frame.cols;h = frame.rows;int max = std::max(h, w);cv::Mat image = cv::Mat::zeros(cv::Size(max, max), CV_8UC3);cv::Rect roi(0, 0, w, h);frame.copyTo(image(roi));x_factor = image.cols / static_cast<float>(input_w);y_factor = image.rows / static_cast<float>(input_h);cv::resize(image, image, cv::Size(input_w, input_h));cv::cvtColor(image, image, cv::COLOR_BGR2RGB);std::vector<cv::Mat> bgrChannels(3);cv::split(image, bgrChannels);for (int c = 0; c < 3; c++){bgrChannels[c].convertTo(bgrChannels[c], CV_32FC1, 1.0 / 255.0);}std::vector<float> input_image;int image_area = input_h * input_w;input_image.clear();input_image.resize(3 * image_area);size_t single_chn_size = image_area * sizeof(float);memcpy(input_image.data(), (float*)bgrChannels[0].data, single_chn_size);memcpy(input_image.data() + image_area, (float*)bgrChannels[1].data, single_chn_size);memcpy(input_image.data() + image_area * 2, (float*)bgrChannels[2].data, single_chn_size);cudaMemcpy(p->dataBuffer[0], input_image.data(), input_image.size() * sizeof(float), cudaMemcpyHostToDevice);
}void postprocess(std::vector<detresult>& detectionResult) {cudaMemcpy(output_data, p->dataBuffer[1], output_size * sizeof(float), cudaMemcpyDeviceToHost);cv::Mat dout(labels.size() + 4, 8400, CV_32F, output_data);cv::Mat det_output = dout.t();std::vector<cv::Rect> boxes;std::vector<int> classIds;std::vector<float> confidences;for (int i = 0; i < det_output.rows; i++){cv::Mat classes_scores = det_output.row(i).colRange(4, labels.size() + 4);cv::Point classIdPoint;double score;cv::minMaxLoc(classes_scores, 0, &score, 0, &classIdPoint);if (score > score_threshold){float cx = det_output.at<float>(i, 0);float cy = det_output.at<float>(i, 1);float ow = det_output.at<float>(i, 2);float oh = det_output.at<float>(i, 3);int x = static_cast<int>((cx - 0.5 * ow) * x_factor);int y = static_cast<int>((cy - 0.5 * oh) * y_factor);int width = static_cast<int>(ow * x_factor);int height = static_cast<int>(oh * y_factor);// 坐标值安全校验if (x < 0)x = 0;if (y < 0)y = 0;if (x > w)x = w;if (y > h)y = h;if (x + width > w)width = w - x;if (y + height > h)height = h - y;cv::Rect box;box.x = x;box.y = y;box.width = width;box.height = height;boxes.push_back(box);classIds.push_back(classIdPoint.x);confidences.push_back(score);}}std::vector<int> indexes;cv::dnn::NMSBoxes(boxes, confidences, score_threshold, nms_threshold, indexes);for (size_t i = 0; i < indexes.size(); i++){int index = indexes[i];detresult box(labels[classIds[index]], classIds[index], confidences[index], boxes[index]);detectionResult.push_back(box);}}void draw(cv::Mat& frame, std::vector<detresult>& detectionResult) {for (size_t i = 0; i < detectionResult.size(); ++i){detresult box = detectionResult[i];cv::rectangle(frame, box.rect, cv::Scalar(0, 0, 255), 2);std::string label = box.className + ":" + cv::format("%.2f", box.confidence);putText(frame, label, cv::Point(box.rect.x, box.rect.y - 5), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);}// 绘制时间putText(frame, "preprocessTime:" + std::to_string(preprocessTime * 1000) + "ms", cv::Point(10, 30), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);putText(frame, "inferTime:" + std::to_string(inferTime * 1000) + "ms", cv::Point(10, 70), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);putText(frame, "postprocessTime:" + std::to_string(postprocessTime * 1000) + "ms", cv::Point(10, 110), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);putText(frame, "totalTime:" + std::to_string(totalTime * 1000) + "ms", cv::Point(10, 150), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);putText(frame, "detFps:" + std::to_string(detFps), cv::Point(10, 190), cv::FONT_HERSHEY_SIMPLEX, 1, cv::Scalar(0, 255, 0), 2);cv::imshow("detresult", frame);}void destroy() {delete output_data;delete p->dataBuffer;p->context->destroy();p->engine->destroy();p->runtime->destroy();delete p;
}int main()
{init();//cv::Mat frame = cv::imread(img_path);//cv::imshow("1.jpg", frame);//cv::waitKey(0);cv::VideoCapture capture(video_path);// 检查视频是否成功打开if (!capture.isOpened()){std::cout << "无法读取视频文件" << std::endl;return -1;}double fps = capture.get(cv::CAP_PROP_FPS);int width = static_cast<int>(capture.get(cv::CAP_PROP_FRAME_WIDTH));int height = static_cast<int>(capture.get(cv::CAP_PROP_FRAME_HEIGHT));cv::Mat frame;while (true){bool success = capture.read(frame); // 读取一帧数据// 检查是否成功读取帧if (!success){std::cout << "读取完毕" << std::endl;break;}double start = (double)cv::getTickCount();preprocess(frame);preprocessTime = ((double)cv::getTickCount() - start) / cv::getTickFrequency();//推理start = (double)cv::getTickCount();p->context->executeV2(p->dataBuffer);inferTime = ((double)cv::getTickCount() - start) / cv::getTickFrequency();//后处理start = (double)cv::getTickCount();std::vector<detresult> detectionResult;postprocess(detectionResult);postprocessTime = ((double)cv::getTickCount() - start) / cv::getTickFrequency();totalTime = preprocessTime + inferTime + postprocessTime;detFps = (1 / (totalTime));//绘制、显示cv::namedWindow("detresult", cv::WINDOW_NORMAL); // cv::WINDOW_NORMAL允许用户调整窗口大小cv::resizeWindow("detresult", width / 2, height / 2); // 设置窗口的宽度和高度draw(frame, detectionResult);if (cv::waitKey(1) == 27) // 通过按下ESC键退出循环{break;}}destroy();cv::destroyAllWindows();getchar();return 0;
}

下载

源码下载

版权声明:

本网仅为发布的内容提供存储空间,不对发表、转载的内容提供任何形式的保证。凡本网注明“来源:XXX网络”的作品,均转载自其它媒体,著作权归作者所有,商业转载请联系作者获得授权,非商业转载请注明出处。

我们尊重并感谢每一位作者,均已注明文章来源和作者。如因作品内容、版权或其它问题,请及时与我们联系,联系邮箱:809451989@qq.com,投稿邮箱:809451989@qq.com