编译为so之后调用报错No EGL Display nvbufsurftransform: Could not get EGL display connection
nvidia@ubuntu:~/jetson_dec_5.0.2/build$ ./main No EGL Display nvbufsurftransform: Could not get EGL display connection 'nvidia_rtsp_create' loaded successfully! 'nvidia_rtsp_read' loaded successfully! 'nvidia_rtsp_destroy' loaded successfully! username:admin password:ahsg123456 url:rtsp://192.168.1.225:554/Streaming/Channels/101 host:192.168.1.225 port:554 /home/nvidia/jetson_dec_5.0.2/media/MediaReader/RtspReader/rtsp_client.cpp249 OPTIONS rtsp://192.168.1.225:554/Streaming/Channels/101 RTSP/1.0 CSeq: 1 User-Agent: simple-rtsp-client
/home/nvidia/jetson_dec_5.0.2/media/MediaReader/RtspReader/rtsp_client.cpp169 RTSP/1.0 200 OK CSeq: 1 Public: OPTIONS, DESCRIBE, GET_PARAMETER, PAUSE, PLAY, SETUP, SET_PARAMETER, TEARDOWN Date: Thu, Dec 12 2024 16:17:46 GMT
/home/nvidia/jetson_dec_5.0.2/media/MediaReader/RtspReader/rtsp_client.cpp287 DESCRIBE rtsp://192.168.1.225:554/Streaming/Channels/101 RTSP/1.0 CSeq: 2 User-Agent: simple-rtsp-client Accept: application/sdp
/home/nvidia/jetson_dec_5.0.2/media/MediaReader/RtspReader/rtsp_client.cpp169 RTSP/1.0 401 Unauthorized CSeq: 2 WWW-Authenticate: Digest realm="IP Camera(F8024)", nonce="e1a544e8bba7259300761b8b429f9bf4", stale="FALSE" Date: Thu, Dec 12 2024 16:17:46 GMT
/home/nvidia/jetson_dec_5.0.2/media/MediaReader/RtspReader/rtsp_client.cpp287 DESCRIBE rtsp://192.168.1.225:554/Streaming/Channels/101 RTSP/1.0 CSeq: 3 User-Agent: simple-rtsp-client Accept: application/sdp Authorization: Digest username="admin", realm="IP Camera(F8024)", nonce="e1a544e8bba7259300761b8b429f9bf4", uri="rtsp://192.168.1.225:554/Streaming/Channels/101", response="00000000000000000000000000000000"
这个不影响运行
这个不影响运行
就是取不到流,认证不了
这个不影响运行 返回401,res是00000000000000000
你用这个试试,你这个好像是老款的海康把,我没试过,新款的海康我测试没遇到过 https://github.com/BreakingY/simple-rtsp-client
你用这个试试,你这个好像是老款的海康把,我没试过,新款的海康我测试没遇到过 https://github.com/BreakingY/simple-rtsp-client
我直接跑源码没问题的,就是编译为so就不行
#include "nvidia_rtsp.h"
你用这个试试,你这个好像是老款的海康把,我没试过,新款的海康我测试没遇到过 https://github.com/BreakingY/simple-rtsp-client
我直接跑源码没问题的,就是编译为so就不行
‘’‘
#include "JetsonDec.h"
#include "h264.h"
#include "MediaInterface.h"
#include "rtsp_client_proxy.h"
#ifdef Status
#undef Status
#endif
#include <opencv2/opencv.hpp>
#include <fstream>
#include <iostream>
#include <vector>
#include <unistd.h>
#include <sys/time.h>
#include <memory>
bool run_flag = true;
struct nvidia_rtspContext {
bool initialized;
bool rtsp_initialized; // 每个上下文有自己的RTSP初始化标志
cv::Mat current_frame;
cv::Mat rgbImage; // 当前解码的图像
std::mutex rgbImage_mutex; // 用于保护rgbImage的互斥锁
bool frame_ready; // 是否有解码帧准备好
unsigned char* jetson_addr_ = nullptr;
int width_, height_, fps_;
bool initialized_decoder_ = false;
uint32_t decoder_pixfmt_;
std::unique_ptr<JetsonDec> jetson_dec_obj_; // 解码器对象
std::unique_ptr<RtspClientProxy> rtsp_client_proxy_; // RTSP客户端代理
};
class Wrapper : public JetsonDecListner, public MediaDataListner {
public:
explicit Wrapper(const char* rtsp_url);
~Wrapper();
void InitializeDecoder();
cv::Mat GetFrame(); // 获取当前解码后的帧
private:
void OnJetsonDecData(unsigned char* data, int data_len, uint64_t timestamp);
void OnVideoData(VideoData data);
void OnAudioData(AudioData data);
void MediaOverhandle();
std::unique_ptr<nvidia_rtspContext> context_;
};
Wrapper::Wrapper(const char* rtsp_url) {
context_ = std::make_unique<nvidia_rtspContext>();
// 初始化 RTSP 客户端代理
context_->rtsp_client_proxy_ = std::make_unique<RtspClientProxy>(rtsp_url);
context_->rtsp_client_proxy_->ProbeVideoFps();
context_->rtsp_client_proxy_->GetVideoCon(context_->width_, context_->height_, context_->fps_);
context_->rtsp_client_proxy_->SetDataListner(static_cast<MediaDataListner *>(this), [this]() {
return this->MediaOverhandle();
});
}
Wrapper::~Wrapper() {
// 释放解码器地址内存
if (context_->jetson_addr_) {
free(context_->jetson_addr_);
context_->jetson_addr_ = nullptr;
}
printf("~Wrapper\n");
}
void Wrapper::InitializeDecoder() {
if (!context_->initialized_decoder_) {
enum VideoType video_type_ = context_->rtsp_client_proxy_->GetVideoType();
if (video_type_ == VIDEO_NONE) {
printf("only support H264/H265\n");
exit(1);
}
context_->decoder_pixfmt_ = (video_type_ == VIDEO_H264) ? V4L2_PIX_FMT_H264 : V4L2_PIX_FMT_H265;
context_->jetson_addr_ = (unsigned char*)malloc(context_->width_ * context_->height_ * 4);
if (!context_->jetson_addr_) {
printf("Memory allocation failed for jetson_addr\n");
exit(1);
}
printf("width:%d height:%d fps:%d\n", context_->width_, context_->height_, context_->fps_);
context_->jetson_dec_obj_ = std::make_unique<JetsonDec>(context_->decoder_pixfmt_, context_->width_, context_->height_, context_->jetson_addr_);
context_->jetson_dec_obj_->SetDecCallBack(static_cast<JetsonDecListner*>(this));
context_->initialized_decoder_ = true;
}
}
void Wrapper::OnVideoData(VideoData data) {
InitializeDecoder();
int try_cnt = 0;
while (context_->jetson_dec_obj_->GetQueueSize() > 5 && try_cnt < 4) {
usleep(1000 * 10);
try_cnt++;
printf("GetQueueSize:%d\n", context_->jetson_dec_obj_->GetQueueSize());
}
struct timeval time_now;
gettimeofday(&time_now, NULL);
uint64_t timestamp = 1000 * (time_now.tv_sec) + (time_now.tv_usec) / 1000;
context_->jetson_dec_obj_->AddEsData((unsigned char*)data.data, data.data_len, timestamp);
}
void Wrapper::OnAudioData(AudioData data) {
printf("OnAudioData\n");
}
void Wrapper::MediaOverhandle() {
printf("MediaOverhandle....\n");
run_flag = false;
}
void Wrapper::OnJetsonDecData(unsigned char* data, int data_len, uint64_t timestamp) {
static int64_t frames_ = 0;
static uint64_t total_ = 0;
static const int n = 100;
struct timeval time_dec;
gettimeofday(&time_dec, NULL);
uint64_t time_stamp = 1000 * (time_dec.tv_sec) + (time_dec.tv_usec) / 1000;
frames_++;
long long delay = time_stamp - timestamp;
if (frames_ > n) {
total_ += time_stamp - timestamp;
}
if (frames_ % 20 == 0) {
printf("delay:%lld avg:%llu\n", delay, static_cast<unsigned long long>(total_ / (frames_ - n)));
}
// 使用 OpenCV 显示解码后的数据
cv::Mat yuvImage(context_->height_ + context_->height_ / 2, context_->width_, CV_8UC1, context_->jetson_addr_);
cv::Mat bgrImage;
// 转换 YUV 到 BGR
cv::cvtColor(yuvImage, bgrImage, cv::COLOR_YUV2BGR_NV12);
// 将解码后的帧存储到 rgbImage
{
std::lock_guard<std::mutex> lock(context_->rgbImage_mutex);
context_->rgbImage = bgrImage.clone();
}
}
cv::Mat Wrapper::GetFrame() {
std::lock_guard<std::mutex> lock(context_->rgbImage_mutex);
return context_->rgbImage.clone(); // 返回当前帧的副本
}
// 创建 RTSP 流处理实例
extern "C" void* nvidia_rtsp_create(const char* rtsp) {
return new Wrapper(rtsp);
}
// 读取一帧图像
extern "C" std::tuple<bool, cv::Mat> nvidia_rtsp_read(void* handle) {
Wrapper* wrapper = static_cast<Wrapper*>(handle);
if (wrapper) {
cv::Mat frame = wrapper->GetFrame();
if (!frame.empty()) {
return std::make_tuple(true, frame);
}
}
return std::make_tuple(false, cv::Mat());
}
// 销毁 RTSP 流处理实例
extern "C" void nvidia_rtsp_destroy(void* handle) {
Wrapper* wrapper = static_cast<Wrapper*>(handle);
if (wrapper) {
delete wrapper;
}
}
// 主函数
int main(int argc, char** argv) {
const char* rtsp = "rtsp://admin:[email protected]:554/Streaming/Channels/101";
void* handle = nvidia_rtsp_create(rtsp);
while (run_flag) {
auto [success, frame] = nvidia_rtsp_read(handle);
if (success && !frame.empty()) {
// 处理帧 (例如显示)
cv::imshow("Frame", frame);
if (cv::waitKey(1) == 'q') break;
}
}
nvidia_rtsp_destroy(handle);
return 0;
}
‘’‘
这样可以
’‘’
#include <iostream>
#include <dlfcn.h>
#include <opencv2/opencv.hpp>
#include <tuple>
#include <unistd.h> // 用于 sleep 函数
// 定义函数指针类型
typedef void* (*CreateFunc)(const char*);
typedef std::tuple<bool, cv::Mat> (*ReadFunc)(void*);
typedef void (*DestroyFunc)(void*);
int main() {
// 加载共享库
void* handle = dlopen("./libnvidia_rtsp.so", RTLD_LAZY);
if (!handle) {
std::cerr << "Cannot open library: " << dlerror() << '\n';
return 1;
}
// 获取函数地址
CreateFunc create = (CreateFunc)dlsym(handle, "nvidia_rtsp_create");
const char* dlsym_error = dlerror();
if (dlsym_error) {
std::cerr << "Cannot load symbol 'nvidia_rtsp_create': " << dlsym_error << '\n';
dlclose(handle);
return 1;
} else {
std::cout << "'nvidia_rtsp_create' loaded successfully!" << std::endl;
}
ReadFunc read = (ReadFunc)dlsym(handle, "nvidia_rtsp_read");
dlsym_error = dlerror();
if (dlsym_error) {
std::cerr << "Cannot load symbol 'nvidia_rtsp_read': " << dlsym_error << '\n';
dlclose(handle);
return 1;
} else {
std::cout << "'nvidia_rtsp_read' loaded successfully!" << std::endl;
}
DestroyFunc destroy = (DestroyFunc)dlsym(handle, "nvidia_rtsp_destroy");
dlsym_error = dlerror();
if (dlsym_error) {
std::cerr << "Cannot load symbol 'nvidia_rtsp_destroy': " << dlsym_error << '\n';
dlclose(handle);
return 1;
} else {
std::cout << "'nvidia_rtsp_destroy' loaded successfully!" << std::endl;
}
// RTSP 流地址
const char* rtsp_url = "rtsp://admin:[email protected]:554/Streaming/Channels/101"; // 修改为你的RTSP URL
// 创建 RTSP 流
void* ctx = create(rtsp_url);
if (!ctx) {
std::cerr << "Failed to create RTSP context" << std::endl;
dlclose(handle);
return 1;
}
std::cout << "RTSP context created successfully!" << std::endl;
while (true) {
auto [success, frame] = read(ctx);
if (success && !frame.empty()) {
std::cout << "Frame read successfully!" << std::endl;
cv::imshow("Stream", frame);
if (cv::waitKey(1) == 'q') break;
}
}
// 销毁窗口
cv::destroyAllWindows();
// 销毁 RTSP 流并关闭共享库
destroy(ctx);
std::cout << "RTSP context destroyed." << std::endl;
dlclose(handle);
std::cout << "Shared library closed." << std::endl;
return 0;
}
’‘’
我这样调用就不行了
你把项目中的所有头文件都引入都到你的工程试试
你把项目中的所有头文件都引入都到你的工程试试
引入到so还是main啊
cmake_minimum_required(VERSION 3.10)
project(JETSONTEST)
# 设置C++标准和CUDA
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# 查找CUDA和OpenCV
find_package(CUDA REQUIRED)
find_package(OpenCV REQUIRED COMPONENTS core imgproc highgui)
# 设置包含路径
include_directories(
${CUDA_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
./include
./include/libjpeg-8b
/usr/include/libdrm
./common/algorithm/cuda
/usr/local/cuda-11.4/targets/aarch64-linux/include/
media/MediaCommon
media/MediaReader
media/MediaReader/FileReader
media/MediaReader/RtspReader
media/MediaReader/RtspReader/rtp
media/MediaReader/RtspReader/3rdparty
media/Bitstream/h264/include
media/Bitstream/h265/include
)
# 设置CUDA源文件
set(CUDA_SOURCES
./common/algorithm/cuda/NvAnalysis.cu # 加入 CUDA 文件
# 其他需要的 CUDA 文件
)
# 设置源文件
aux_source_directory(./src SRC)
aux_source_directory(./common/classes COMMON)
aux_source_directory(./media/MediaCommon MEDIACOMMON)
aux_source_directory(./media/MediaReader/FileReader MEDIAREADER)
aux_source_directory(./media/MediaReader/RtspReader MEDIARTSP)
aux_source_directory(./media/MediaReader/RtspReader/rtp MEDIARTP)
aux_source_directory(./media/MediaReader/RtspReader/3rdparty MEDIA3RD)
aux_source_directory(./media/Bitstream/h264/source H264)
aux_source_directory(./media/Bitstream/h265/source H265)
# 链接目录
link_directories(
/usr/lib/aarch64-linux-gnu/tegra
/lib/aarch64-linux-gnu
/usr/local/lib
/usr/lib
/usr/local/cuda/lib64
)
# 添加编译选项
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive -g -std=c++14")
# 设置输出路径
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
# 设置构建类型为 Release
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
# 创建可执行文件 demo
add_executable(demo
video_decode_main.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 链接 OpenCV 库和其他依赖
target_link_libraries(demo
${CUDA_LIBRARIES} # CUDA 库
pthread
v4l2
EGL
GLESv2
X11
nvbufsurface
nvbufsurftransform
nvjpeg
nvosd
drm
cuda
cudart
avutil
avformat
avcodec
${OpenCV_LIBS}
)
# 创建共享库 nvidia_rtsp,确保包含所有源文件
add_library(nvidia_rtsp SHARED
video_decode_main.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 链接 nvidia_rtsp 所需的库,确保CUDA库放在前面
target_link_libraries(nvidia_rtsp
${CUDA_LIBRARIES} # CUDA 库
pthread
v4l2
EGL
GLESv2
X11
nvbufsurface
nvbufsurftransform
nvjpeg
nvosd
drm
cuda
cudart
avutil
avformat
avcodec
${OpenCV_LIBS}
)
# 创建可执行文件 main
add_executable(main main.cpp)
# 链接 OpenCV 库和其他依赖
target_link_libraries(main
${OpenCV_LIBS}
dl # 添加 -ldl 以链接 libdl 库
)
我的cmake
你把项目中的所有头文件都引入都到你的工程试试
方便给个邮箱不,我把代码发给您看看,方便吗
target_link_libraries(main
${OpenCV_LIBS}
dl # 添加 -ldl 以链接 libdl 库
)
这里,不需要连接nvidia_rtsp吗? 还有
add_library(nvidia_rtsp SHARED
video_decode_main.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
这里就不需要包含video_decode_main.cpp了把,创建动态库,不需要video_decode_main.cpp 你根据你的项目结构,改一下cmake应该就可以了把,如果实在不行,就不用动态库了把
target_link_libraries(main ${OpenCV_LIBS} dl # 添加 -ldl 以链接 libdl 库 )这里,不需要连接nvidia_rtsp吗? 还有
add_library(nvidia_rtsp SHARED video_decode_main.cpp ${CUDA_SOURCES} # 加入 CUDA 文件 ${SRC} ${COMMON} ${MEDIACOMMON} ${MEDIAREADER} ${MEDIARTSP} ${MEDIARTP} ${MEDIA3RD} ${H264} ${H265} )这里就不需要包含video_decode_main.cpp了把,创建动态库,不需要video_decode_main.cpp 你根据你的项目结构,改一下cmake应该就可以了把,如果实在不行,就不用动态库了把
cmake_minimum_required(VERSION 3.10)
project(JETSONTEST)
# 设置C++标准和CUDA
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# 查找CUDA和OpenCV
find_package(CUDA REQUIRED)
find_package(OpenCV REQUIRED COMPONENTS core imgproc highgui)
# 设置包含路径
include_directories(
${CUDA_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
./include
./include/libjpeg-8b
/usr/include/libdrm
./common/algorithm/cuda
/usr/local/cuda-11.4/targets/aarch64-linux/include/
media/MediaCommon
media/MediaReader
media/MediaReader/FileReader
media/MediaReader/RtspReader
media/MediaReader/RtspReader/rtp
media/MediaReader/RtspReader/3rdparty
media/Bitstream/h264/include
media/Bitstream/h265/include
)
# 设置CUDA源文件
set(CUDA_SOURCES
./common/algorithm/cuda/NvAnalysis.cu # 加入 CUDA 文件
# 其他需要的 CUDA 文件
)
# 设置源文件
aux_source_directory(./src SRC)
aux_source_directory(./common/classes COMMON)
aux_source_directory(./media/MediaCommon MEDIACOMMON)
aux_source_directory(./media/MediaReader/FileReader MEDIAREADER)
aux_source_directory(./media/MediaReader/RtspReader MEDIARTSP)
aux_source_directory(./media/MediaReader/RtspReader/rtp MEDIARTP)
aux_source_directory(./media/MediaReader/RtspReader/3rdparty MEDIA3RD)
aux_source_directory(./media/Bitstream/h264/source H264)
aux_source_directory(./media/Bitstream/h265/source H265)
# 链接目录
link_directories(
/usr/lib/aarch64-linux-gnu/tegra
/lib/aarch64-linux-gnu
/usr/local/lib
/usr/lib
/usr/local/cuda/lib64
)
# 添加编译选项
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive -g -std=c++14")
# 设置输出路径
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
# 设置构建类型为 Release
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
# 创建可执行文件 demo
add_executable(demo
video_decode_main.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 链接 OpenCV 库和其他依赖
target_link_libraries(demo
${CUDA_LIBRARIES} # CUDA 库
pthread
v4l2
EGL
GLESv2
X11
nvbufsurface
nvbufsurftransform
nvjpeg
nvosd
drm
cuda
cudart
avutil
avformat
avcodec
${OpenCV_LIBS}
)
# 创建共享库 nvidia_rtsp,确保包含所有源文件
add_library(nvidia_rtsp SHARED
nvidia_rtsp.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 链接 nvidia_rtsp 所需的库,确保CUDA库放在前面
target_link_libraries(nvidia_rtsp
${CUDA_LIBRARIES} # CUDA 库
pthread
v4l2
EGL
GLESv2
X11
nvbufsurface
nvbufsurftransform
nvjpeg
nvosd
drm
cuda
cudart
avutil
avformat
avcodec
${OpenCV_LIBS}
)
# 创建可执行文件 main
add_executable(main main.cpp)
# 链接 OpenCV 库和其他依赖
target_link_libraries(main
${OpenCV_LIBS}
nvidia_rtsp
dl # 添加 -ldl 以链接 libdl 库
)
我是编译好给别人调用的
target_link_libraries(main ${OpenCV_LIBS} dl # 添加 -ldl 以链接 libdl 库 )这里,不需要连接nvidia_rtsp吗? 还有
add_library(nvidia_rtsp SHARED video_decode_main.cpp ${CUDA_SOURCES} # 加入 CUDA 文件 ${SRC} ${COMMON} ${MEDIACOMMON} ${MEDIAREADER} ${MEDIARTSP} ${MEDIARTP} ${MEDIA3RD} ${H264} ${H265} )这里就不需要包含video_decode_main.cpp了把,创建动态库,不需要video_decode_main.cpp 你根据你的项目结构,改一下cmake应该就可以了把,如果实在不行,就不用动态库了把
cmake_minimum_required(VERSION 3.10) project(JETSONTEST) # 设置C++标准和CUDA set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) # 查找CUDA和OpenCV find_package(CUDA REQUIRED) find_package(OpenCV REQUIRED COMPONENTS core imgproc highgui) # 设置包含路径 include_directories( ${CUDA_INCLUDE_DIRS} ${OpenCV_INCLUDE_DIRS} ./include ./include/libjpeg-8b /usr/include/libdrm ./common/algorithm/cuda /usr/local/cuda-11.4/targets/aarch64-linux/include/ media/MediaCommon media/MediaReader media/MediaReader/FileReader media/MediaReader/RtspReader media/MediaReader/RtspReader/rtp media/MediaReader/RtspReader/3rdparty media/Bitstream/h264/include media/Bitstream/h265/include ) # 设置CUDA源文件 set(CUDA_SOURCES ./common/algorithm/cuda/NvAnalysis.cu # 加入 CUDA 文件 # 其他需要的 CUDA 文件 ) # 设置源文件 aux_source_directory(./src SRC) aux_source_directory(./common/classes COMMON) aux_source_directory(./media/MediaCommon MEDIACOMMON) aux_source_directory(./media/MediaReader/FileReader MEDIAREADER) aux_source_directory(./media/MediaReader/RtspReader MEDIARTSP) aux_source_directory(./media/MediaReader/RtspReader/rtp MEDIARTP) aux_source_directory(./media/MediaReader/RtspReader/3rdparty MEDIA3RD) aux_source_directory(./media/Bitstream/h264/source H264) aux_source_directory(./media/Bitstream/h265/source H265) # 链接目录 link_directories( /usr/lib/aarch64-linux-gnu/tegra /lib/aarch64-linux-gnu /usr/local/lib /usr/lib /usr/local/cuda/lib64 ) # 添加编译选项 set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive -g -std=c++14") # 设置输出路径 set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}) # 设置构建类型为 Release if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release) endif() # 创建可执行文件 demo add_executable(demo video_decode_main.cpp ${CUDA_SOURCES} # 加入 CUDA 文件 ${SRC} ${COMMON} ${MEDIACOMMON} ${MEDIAREADER} ${MEDIARTSP} ${MEDIARTP} ${MEDIA3RD} ${H264} ${H265} ) # 链接 OpenCV 库和其他依赖 target_link_libraries(demo ${CUDA_LIBRARIES} # CUDA 库 pthread v4l2 EGL GLESv2 X11 nvbufsurface nvbufsurftransform nvjpeg nvosd drm cuda cudart avutil avformat avcodec ${OpenCV_LIBS} ) # 创建共享库 nvidia_rtsp,确保包含所有源文件 add_library(nvidia_rtsp SHARED nvidia_rtsp.cpp ${CUDA_SOURCES} # 加入 CUDA 文件 ${SRC} ${COMMON} ${MEDIACOMMON} ${MEDIAREADER} ${MEDIARTSP} ${MEDIARTP} ${MEDIA3RD} ${H264} ${H265} ) # 链接 nvidia_rtsp 所需的库,确保CUDA库放在前面 target_link_libraries(nvidia_rtsp ${CUDA_LIBRARIES} # CUDA 库 pthread v4l2 EGL GLESv2 X11 nvbufsurface nvbufsurftransform nvjpeg nvosd drm cuda cudart avutil avformat avcodec ${OpenCV_LIBS} ) # 创建可执行文件 main add_executable(main main.cpp) # 链接 OpenCV 库和其他依赖 target_link_libraries(main ${OpenCV_LIBS} nvidia_rtsp dl # 添加 -ldl 以链接 libdl 库 )我是编译好给别人调用的
这样还是不行
我这样修改就可以调用so了,但是我不是很明白为什么还要重新给main再链接一遍库和cpp,不是应在封装so的时候链接过了吗
cmake_minimum_required(VERSION 3.10)
project(JETSONTEST)
# 设置C++标准和CUDA
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
# 查找CUDA和OpenCV
find_package(CUDA REQUIRED)
find_package(OpenCV REQUIRED COMPONENTS core imgproc highgui)
# 设置包含路径
include_directories(
${CUDA_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
./include
./include/libjpeg-8b
/usr/include/libdrm
./common/algorithm/cuda
/usr/local/cuda-11.4/targets/aarch64-linux/include/
media/MediaCommon
media/MediaReader
media/MediaReader/FileReader
media/MediaReader/RtspReader
media/MediaReader/RtspReader/rtp
media/MediaReader/RtspReader/3rdparty
media/Bitstream/h264/include
media/Bitstream/h265/include
)
# 设置CUDA源文件
set(CUDA_SOURCES
./common/algorithm/cuda/NvAnalysis.cu # 加入 CUDA 文件
# 其他需要的 CUDA 文件
)
# 设置源文件
aux_source_directory(./src SRC)
aux_source_directory(./common/classes COMMON)
aux_source_directory(./media/MediaCommon MEDIACOMMON)
aux_source_directory(./media/MediaReader/FileReader MEDIAREADER)
aux_source_directory(./media/MediaReader/RtspReader MEDIARTSP)
aux_source_directory(./media/MediaReader/RtspReader/rtp MEDIARTP)
aux_source_directory(./media/MediaReader/RtspReader/3rdparty MEDIA3RD)
aux_source_directory(./media/Bitstream/h264/source H264)
aux_source_directory(./media/Bitstream/h265/source H265)
# 链接目录
link_directories(
/usr/lib/aarch64-linux-gnu/tegra
/lib/aarch64-linux-gnu
/usr/local/lib
/usr/lib
/usr/local/cuda/lib64
)
# 添加编译选项
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fpermissive -g -std=c++14")
# 设置输出路径
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR})
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
# 设置构建类型为 Release
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
# 创建共享库 nvidia_rtsp,确保包含所有源文件
add_library(nvidia_rtsp SHARED
nvidia_rtsp.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 将所有依赖项链接到 nvidia_rtsp 共享库
target_link_libraries(nvidia_rtsp
${CUDA_LIBRARIES} # CUDA 库
pthread
v4l2
EGL
GLESv2
X11
nvbufsurface
nvbufsurftransform
nvjpeg
nvosd
drm
cuda
cudart
avutil
avformat
avcodec
${OpenCV_LIBS}
)
# 创建可执行文件 demo
add_executable(demo
video_decode_main.cpp
${CUDA_SOURCES} # 加入 CUDA 文件
${SRC}
${COMMON}
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 链接 demo 依赖,使用 nvidia_rtsp 库来传递依赖
target_link_libraries(demo
nvidia_rtsp # 使用 nvidia_rtsp 库来传递依赖
)
# 创建可执行文件 main
add_executable(main
main.cpp
${MEDIACOMMON}
${MEDIAREADER}
${MEDIARTSP}
${MEDIARTP}
${MEDIA3RD}
${H264}
${H265}
)
# 链接 main,直接链接 nvidia_rtsp,这会自动传递依赖
target_link_libraries(main
nvidia_rtsp # 直接链接 nvidia_rtsp 库,避免重新链接所有依赖
)