文章目录

环境准备

  • opencv
  • onnxruntime-gpu

修改cmakelists.txt

cmake_minimum_required(VERSION 3.0.0)
project(demo VERSION 0.1.0)

Set(ONNXRUNTIME_DIR /home/yp/onnx/onnxruntime-linux-x64-gpu-1.12.1)
message(STATUS "ONNXRUNTIME_DIR: ${ONNXRUNTIME_DIR}")

include(CTest)
include_directories(${PROJECT_SOURCE_DIR}/include)
enable_testing()

find_package(OpenCV REQUIRED)

add_executable(demo ${PROJECT_SOURCE_DIR}/main.cpp ${PROJECT_SOURCE_DIR}/Helpers.cpp)

target_link_libraries(demo ${OpenCV_LIBS})
target_include_directories(demo PRIVATE "${ONNXRUNTIME_DIR}/include")
target_link_libraries(demo "${ONNXRUNTIME_DIR}/lib/libonnxruntime.so")

set(CPACK_PROJECT_NAME ${PROJECT_NAME})
set(CPACK_PROJECT_VERSION ${PROJECT_VERSION})
include(CPack)

修改完成后,执行下列命令操作:

在这里插入代码片
mkdir build
cd build
cmake ..
make

代码说明

session初始化

::Env env;
Ort::Session session(nullptr);
Ort::SessionOptions sessionOptions{nullptr};

GPU调用

::vector<std::string> availableProviders = Ort::GetAvailableProviders();
auto cudaAvailable = std::find(availableProviders.begin(), availableProviders.end(), "CUDAExecutionProvider");
OrtCUDAProviderOptions cudaOption;

if (isGPU && (cudaAvailable == availableProviders.end()))
{
std::cout << "GPU is not supported by your ONNXRuntime build. Fallback to CPU." << std::endl;
std::cout << "Inference device: CPU" << std::endl;
}
else if (isGPU && (cudaAvailable != availableProviders.end()))
{
std::cout << "Inference device: GPU" << std::endl;
sessionOptions.AppendExecutionProvider_CUDA(cudaOption);
}
else
{
std::cout << "Inference device: CPU" << std::endl;
}

创建session

<< "create session. " << endl;
session = Ort::Session(env, modelPath, sessionOptions);

定义Tensor

// define shape
const array<int64_t, 4> inputShape = {1, numChannles, height, width};
const array<int64_t, 2> outputShape = {1, numClasses};

// define array
array<float, numInputElements> input;
array<float, numClasses> results;

// define Tensor
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
auto inputTensor = Ort::Value::CreateTensor<float>(memory_info, input.data(), input.size(), inputShape.data(), inputShape.size());
auto outputTensor = Ort::Value::CreateTensor<float>(memory_info, results.data(), results.size(), outputShape.data(), outputShape.size());

执行推理

session.Run(Ort::RunOptions{nullptr}, inputNames.data(), &inputTensor, 1, outputNames.data(), &outputTensor, 1);

cpu和gpu的速度比较

GPU

Onnxruntime之图像分类(c++)_c++

CPU

Onnxruntime之图像分类(c++)_ide_02

源码

​https://github.com/SonwYang/onnxruntime_resnet​