该原创文章首发于微信公众号:字节流动
本文将利用 FFmpeg 对一个 Mp4 文件的视频流进行解码,而后应用 libswscale 将解码后的 YUV 帧转换为 RGBA 帧,最初应用 ANativeWindow 进行渲染。
FFmpeg 视频解码
参考雷霄骅博士的音视频解决流程图,咱们大略理解了本节的解决流程:(Mp4文件)解协定->解封装->视频解码->缩放或格局转换->渲染。
对于 FFmpeg 须要理解的几个构造体:
- AVFormatContext:解封装性能的构造体,蕴含文件名、音视频流、时长、比特率等信息;
- AVCodecContext:编解码器上下文,编码和解码时必须用到的构造体,蕴含编解码器类型、视频宽高、音频通道数和采样率等信息;
- AVCodec:存储编解码器信息的构造体;
- AVStream:存储音频或视频流信息的构造体;
- AVPacket:存储音频或视频编码数据;
- AVFrame:存储音频或视频解码数据(原始数据);
视频解码流程:
<code class="c">//1.创立封装格局上下文 m_AVFormatContext = avformat_alloc_context(); //2.关上输出文件,解封装 if(avformat_open_input(&m_AVFormatContext, m_Url, NULL, NULL) != 0) { LOGCATE("DecoderBase::InitFFDecoder avformat_open_input fail."); break; } //3.获取音视频流信息 if(avformat_find_stream_info(m_AVFormatContext, NULL) < 0) { LOGCATE("DecoderBase::InitFFDecoder avformat_find_stream_info fail."); break; } //4.获取音视频流索引 for(int i=0; i < m_AVFormatContext->nb_streams; i++) { if(m_AVFormatContext->streams[i]->codecpar->codec_type == m_MediaType) { m_StreamIndex = i; break; } } if(m_StreamIndex == -1) { LOGCATE("DecoderBase::InitFFDecoder Fail to find stream index."); break; } //5.获取解码器参数 AVCodecParameters *codecParameters = m_AVFormatContext->streams[m_StreamIndex]->codecpar; //6.依据 codec_id 获取解码器 m_AVCodec = avcodec_find_decoder(codecParameters->codec_id); if(m_AVCodec == nullptr) { LOGCATE("DecoderBase::InitFFDecoder avcodec_find_decoder fail."); break; } //7.创立解码器上下文 m_AVCodecContext = avcodec_alloc_context3(m_AVCodec); if(avcodec_parameters_to_context(m_AVCodecContext, codecParameters) != 0) { LOGCATE("DecoderBase::InitFFDecoder avcodec_parameters_to_context fail."); break; } //8.关上解码器 result = avcodec_open2(m_AVCodecContext, m_AVCodec, NULL); if(result < 0) { LOGCATE("DecoderBase::InitFFDecoder avcodec_open2 fail. result=%d", result); break; } //9.创立存储编码数据和解码数据的构造体 m_Packet = av_packet_alloc(); //创立 AVPacket 寄存编码数据 m_Frame = av_frame_alloc(); //创立 AVFrame 寄存解码后的数据 //10.解码循环 while (av_read_frame(m_AVFormatContext, m_Packet) >= 0) { //读取帧 if (m_Packet->stream_index == m_StreamIndex) { if (avcodec_send_packet(m_AVCodecContext, m_Packet) != 0) { //视频解码 return -1; } while (avcodec_receive_frame(m_AVCodecContext, m_Frame) == 0) { //获取到 m_Frame 解码数据,在这里进行格局转换,而后进行渲染,下一节介绍 ANativeWindow 渲染过程 } } av_packet_unref(m_Packet); //开释 m_Packet 援用,避免内存透露 } //11.开释资源,解码实现 if(m_Frame != nullptr) { av_frame_free(&m_Frame); m_Frame = nullptr; } if(m_Packet != nullptr) { av_packet_free(&m_Packet); m_Packet = nullptr; } if(m_AVCodecContext != nullptr) { avcodec_close(m_AVCodecContext); avcodec_free_context(&m_AVCodecContext); m_AVCodecContext = nullptr; m_AVCodec = nullptr; } if(m_AVFormatContext != nullptr) { avformat_close_input(&m_AVFormatContext); avformat_free_context(m_AVFormatContext); m_AVFormatContext = nullptr; }
ANativeWindow 渲染解码帧
每一种操作系统都定义了本人的窗口零碎,而 ANativeWindow 就是 Android 的本地窗口,在 Android Java 层,Surface 又继承于 ANativeWindow ,实际上 Surface 是 ANativeWindow 的具体实现,所以一个 ANativeWindow 示意的就是一块屏幕缓冲区。
咱们要渲染一帧图像,只须要将图像数据刷进 ANativeWindow 所示意的屏幕缓冲区即可。
<code class="c">enum { // NOTE: these values must match the values from graphics/common/x.x/types.hal /** Red: 8 bits, Green: 8 bits, Blue: 8 bits, Alpha: 8 bits. **/ WINDOW_FORMAT_RGBA_8888 = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, /** Red: 8 bits, Green: 8 bits, Blue: 8 bits, Unused: 8 bits. **/ WINDOW_FORMAT_RGBX_8888 = AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, /** Red: 5 bits, Green: 6 bits, Blue: 5 bits. **/ WINDOW_FORMAT_RGB_565 = AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, };
须要留神的是 ANativeWindow 仅反对 RGB 类型的图像数据,所以咱们还须要利用 libswscale 库将解码后的 YUV 数据转成 RGB 。
利用 libswscale 库将对图像进行格局转换,有如下几个步骤:
<code class="c">//1. 调配存储 RGB 图像的 buffer m_VideoWidth = m_AVCodecContext->width; m_VideoHeight = m_AVCodecContext->height; m_RGBAFrame = av_frame_alloc(); //计算 Buffer 的大小 int bufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGBA, m_VideoWidth, m_VideoHeight, 1); //为 m_RGBAFrame 调配空间 m_FrameBuffer = (uint8_t *) av_malloc(bufferSize * sizeof(uint8_t)); av_image_fill_arrays(m_RGBAFrame->data, m_RGBAFrame->linesize, m_FrameBuffer, AV_PIX_FMT_RGBA, m_VideoWidth, m_VideoHeight, 1); //2. 获取转换的上下文 m_SwsContext = sws_getContext(m_VideoWidth, m_VideoHeight, m_AVCodecContext->pix_fmt, m_RenderWidth, m_RenderHeight, AV_PIX_FMT_RGBA, SWS_FAST_BILINEAR, NULL, NULL, NULL); //3. 格局转换 sws_scale(m_SwsContext, frame->data, frame->linesize, 0, m_VideoHeight, m_RGBAFrame->data, m_RGBAFrame->linesize); //4. 开释资源 if(m_RGBAFrame != nullptr) { av_frame_free(&m_RGBAFrame); m_RGBAFrame = nullptr; } if(m_FrameBuffer != nullptr) { free(m_FrameBuffer); m_FrameBuffer = nullptr; } if(m_SwsContext != nullptr) { sws_freeContext(m_SwsContext); m_SwsContext = nullptr; }
咱们拿到了 RGBA 格局的图像,能够利用 ANativeWindow 进行渲染了。
<code class="c">//1. 利用 Java 层 SurfaceView 传下来的 Surface 对象,获取 ANativeWindow m_NativeWindow = ANativeWindow_fromSurface(env, surface); //2. 设置渲染区域和输出格局 ANativeWindow_setBuffersGeometry(m_NativeWindow, m_VideoWidth, m_VideoHeight, WINDOW_FORMAT_RGBA_8888); //3. 渲染 ANativeWindow_Buffer m_NativeWindowBuffer; //锁定以后 Window ,获取屏幕缓冲区 Buffer 的指针 ANativeWindow_lock(m_NativeWindow, &m_NativeWindowBuffer, nullptr); uint8_t *dstBuffer = static_cast<uint8_t *>(m_NativeWindowBuffer.bits); int srcLineSize = m_RGBAFrame->linesize[0];//输出图的步长(一行像素有多少字节) int dstLineSize = m_NativeWindowBuffer.stride * 4;//RGBA 缓冲区步长 for (int i = 0; i < m_VideoHeight; ++i) { //一行一行地拷贝图像数据 memcpy(dstBuffer + i * dstLineSize, m_FrameBuffer + i * srcLineSize, srcLineSize); } //解锁以后 Window ,渲染缓冲区数据 ANativeWindow_unlockAndPost(m_NativeWindow); //4. 开释 ANativeWindow if(m_NativeWindow) ANativeWindow_release(m_NativeWindow);
以上就是 FFmpeg + ANativeWindow 实现视频解码播放的整个过程。
参考
https://www.gaodaima.com/leixiaohua1020
分割与交换
有疑难或技术交换能够增加我的微信:Byte-Flow