flutter-webrtc
flutter-webrtc copied to clipboard
feat(videoFrame): add onframe callback feature
- [x] Android
- [x] Ios
- [x] Other Formats
- [x] Check the usage of resources
- [x] Frame Capture invoke this methos
release: 108
@cloudwebrtc
你好,不好意思打扰了。麻烦问下,有什么办法验证下生成的格式是否正确吗?I420,RGBA
RGBA 应该可以直接渲染吧,YUV 你可以从native 和flutter 分别写一个文件,然后对比内容?
Hi,@cloudwebrtc,I've finished this.
But there may be an issue here. The YUV420(NV12) data can't be rotated in native (it seems need a third-party library, e.g. libyuv), And the image generated from the YUV420(NV12) data lacks color(only appears in black and white colors), I'm not entirely certain if this is an issue.
here's flutter test code:
localRenderer.onFrame = (RTCVideoFrame frame) async {
String fromat = frame.format.getStringValue();
final directory = await getTemporaryDirectory();
final filePath = '${directory.path}/frame.jpeg';
// jpeg
if(frame.format == RTCVideoFrameFormat.KMJPEG) {
File file = File(filePath);
await file.writeAsBytes(frame.data);
OpenFile.open(filePath);
}
// rgba
if(frame.format == RTCVideoFrameFormat.KRGBA) {
Uint8List rgbData = rgbaToRgb(frame.data);
image.Image img = image.Image.fromBytes(width: frame.width, height: frame.height, bytes: rgbData.buffer);
File(filePath).writeAsBytesSync(image.encodeJpg(img));
await OpenFile.open(filePath);
}
// I420 NV12
if(frame.format == RTCVideoFrameFormat.KI420) {
Uint8List NV12Data = Uint8List.fromList(frame.data);
Uint8List rgbData = convertNV12ToRGB(NV12Data, frame.width, frame.height);
image.Image img = image.Image.fromBytes(
width: frame.width, height: frame.height, bytes: rgbData.buffer);
File(filePath).writeAsBytesSync(image.encodeJpg(img));
await OpenFile.open(filePath);
}
};
localRenderer.initialize(exportFrame: ExportFrame(enabledExportFrame: true, frameCount: 100, format: RTCVideoFrameFormat.KI420));
Uint8List convertNV12ToRGB(Uint8List nv12Data, int width, int height) {
final int imageSize = width * height;
final int uvStride = width ~/ 2;
final int uvHeight = height ~/ 2;
Uint8List rgbData = Uint8List(imageSize * 3);
int uvIndex = imageSize;
int rgbIndex = 0;
for (int y = 0; y < height; y++) {
int yOffset = y * width;
int uvOffset = imageSize + (y ~/ 2) * uvStride;
for (int x = 0; x < width; x++) {
int yIndex = yOffset + x;
int subUvIndex = uvOffset + (x ~/ 2) * 2;
int yValue = nv12Data[yIndex];
int uValue = nv12Data[subUvIndex];
int vValue = nv12Data[subUvIndex + 1];
// 计算RGB值
int c = yValue - 16;
int d = uValue - 128;
int e = vValue - 128;
int r = (298 * c + 409 * e + 128) >> 8;
int g = (298 * c - 100 * d - 208 * e + 128) >> 8;
int b = (298 * c + 516 * d + 128) >> 8;
// 限制RGB值在0-255范围内
r = r.clamp(0, 255);
g = g.clamp(0, 255);
b = b.clamp(0, 255);
// 将RGB值存入结果数组
rgbData[rgbIndex++] = r;
rgbData[rgbIndex++] = g;
rgbData[rgbIndex++] = b;
}
}
return rgbData;
}
@binvb can you please make you fork up to date with latest changes ? @cloudwebrtc can we have this in next release please ?
any updates? It's interesting functional for using with ml kit.
@cloudwebrtc is there any plan to apply this PR?
@binvb I do not know if this would help but I suffered from this color issue and I have resolve it by this code . webrtc::VideoFrame VideoCapturer::MatToVideoFrame(const cv::Mat& mat) { cv::Mat yuv; cv::cvtColor(mat, yuv, cv::COLOR_BGR2YUV_I420);
int width = mat.cols; int height = mat.rows;
rtc::scoped_refptrwebrtc::I420Buffer buffer = webrtc::I420Buffer::Create(width, height); memcpy(buffer->MutableDataY(), yuv.data, width * height); memcpy(buffer->MutableDataU(), yuv.data + width * height, width * height / 4); memcpy(buffer->MutableDataV(), yuv.data + width * height + width * height / 4, width * height / 4);
webrtc::VideoFrame frame = webrtc::VideoFrame::Builder() .set_video_frame_buffer(buffer) .set_rotation(webrtc::kVideoRotation_0) .set_timestamp_us(0) .set_id(0) .build();
return frame; }
cv::Mat VideoCapturer::VideoFrameToMat(const webrtc::VideoFrame& frame) { rtc::scoped_refptrwebrtc::I420BufferInterface buffer = frame.video_frame_buffer()->ToI420(); int width = buffer->width(); int height = buffer->height();
cv::Mat mat(height, width, CV_8UC3);
cv::Mat yuv(height + height / 2, width, CV_8UC1); memcpy(yuv.data, buffer->DataY(), width * height); memcpy(yuv.data + width * height, buffer->DataU(), width * height / 4); memcpy(yuv.data + width * height + width * height / 4, buffer->DataV(), width * height / 4); cv::cvtColor(yuv, mat, cv::COLOR_YUV2BGR_I420);
auto now = std::time(nullptr); std::tm tm; localtime_r(&now, &tm);
std::ostringstream oss; oss << std::put_time(&tm, "%Y-%m-%d %H:%M:%S");
int fontFace = cv::FONT_HERSHEY_SIMPLEX; double fontScale = 1; int thickness = 1; cv::Point textOrg(10, mat.rows - 10);
cv::putText(mat, oss.str(), textOrg, fontFace, fontScale, cv::Scalar(33,37,43), thickness); ; return mat; } this code is for exporting frame to opencv::mat format but it was for exporting the frames the same way as your image , I hope this would help.