About FFMPEG hard decoding, in fact, does not use FFMPEG itself decoder, but from the system to find the hard decoder, on Android is through reflection call system decoder middleware MediaCodec. There are many articles about FFMPEG hard decoding on the network, ffMPEG official demo also has a very detailed writing method. However, most of these disclosed materials are for playing video files (AVCC) and very little is for hard decoding AnnexB video streams.
As we all know, before using MediaCodec hard decoding in Android, you need to set the parameters of video stream through MediaFormat before enabling decoder decoding correctly. In FFMPEG, if it is hard decoded from a video file, FFMPEG can automatically read extradata containing SPS, PPS and other information from the video header. Set AVCodecContext->extradata and AVCodecContext-> extradATA_size to enable the hardware decoder AVcodec_open2 correctly. AVCodecContext-> Extradata stores information stored in MediaFormat such as SPS, PPS, width, height, etc. However, if AnnexB’s video stream does not have a file header to read the video parameters to set the extradata, avCOdec_open2 will fail, equivalent to not setting the parameters in MediaFormat before using MediaCodec in Java.
1. Hard to decode
Although AnnexB video stream has no file header, each frame has NALU containing SPS and PPS information. Before opening decoder, AVPacket of one frame can be detached through AV_read_frame and extradata in AVPacket can be solved. Populate AVCodecContext to achieve success with AVCODEC_open2.
int extract_extradata(AVCodecContext *pCodecCtx, AVPacket *packet, uint8_t **extradata_dest, int *extradata_size_dest)
{
const AVBitStreamFilter *bsf;
int ret;
if( (bsf = av_bsf_get_by_name("extract_extradata")) = =NULL)
{
LOGD("failed to get extract_extradata bsf\n");
return 0;
}
printf("\nfound bsf\n");
AVBSFContext *bsf_context;
if( (ret=av_bsf_alloc(bsf, &bsf_context) ) < 0)
{
LOGD("failed to alloc bsf contextx\n");
return 0;
}
printf("alloced bsf context\n");
if( (ret=avcodec_parameters_from_context(bsf_context->par_in, pCodecCtx) ) < 0)
{
LOGD("failed to copy parameters from contextx\n");
av_bsf_free(&bsf_context);
return 0;
}
printf("copied bsf params\n");
if( (ret = av_bsf_init(bsf_context)) < 0 )
{
LOGD("failed to init bsf contextx\n");
av_bsf_free(&bsf_context);
return 0;
}
printf("initialized bsf context\n");
AVPacket *packet_ref = av_packet_alloc(a);if(av_packet_ref(packet_ref, packet) < 0 )
{
LOGD("failed to ref packet\n");
av_bsf_free(&bsf_context);
return 0;
}
//make sure refs are used corectly
//this probably resests packet
if((ret = av_bsf_send_packet(bsf_context, packet_ref)) < 0)
{
LOGD("failed to send packet to bsf\n");
av_packet_unref(packet_ref);
av_bsf_free(&bsf_context);
return 0;
}
printf("sent packet to bsf\n");
int done=0;
while (ret >= 0 && !done) / /! h->decoder_ctx->extradata)
{
int extradata_size;
uint8_t *extradata;
ret = av_bsf_receive_packet(bsf_context, packet_ref);
if (ret < 0)
{
if(ret ! =AVERROR(EAGAIN) && ret ! = AVERROR_EOF) {LOGD("bsf error, not eagain or eof\n");
return 0;
}
continue;
}
extradata = av_packet_get_side_data(packet_ref, AV_PKT_DATA_NEW_EXTRADATA, &extradata_size);
if (extradata)
{
LOGD("got extradata, %d size! \n", extradata_size);
done=1;
*extradata_dest = (uint8_t *) av_mallocz(extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
memcpy(*extradata_dest, extradata, extradata_size);
*extradata_size_dest = extradata_size;
av_packet_unref(packet_ref); }}av_packet_free(&packet_ref);
av_bsf_free(&bsf_context);
return done;
}
Copy the code
2. Capture JPEG
If it is YUV420P format after soft solution, when encoding into JPEG by MJPEG encoder, you can directly specify the input format as AV_PIX_FMT_YUVJ420P, the reason is that YUVJ420P is not YUV420P, but YUVJ420P. YUVJ420P color_range is JPEG, JPEG coding, see stackoverflow.com/a/33939577. If you specify AV_PIX_FMT_YUVJ420P as the input format, you can use NV12 instead of YUV420P as the input format. It crashes while coding. So before encoding JPEG, sws_scale is converted to AV_PIX_FMT_YUVJ420P.
AVFrame *pFrameYUVJ420;
if(pix_fmt ! = AV_PIX_FMT_YUVJ420P) { pFrameYUVJ420 =av_frame_alloc(a);if (pFrameYUVJ420 == NULL) {
LOGD("Could not allocate video frame: pFrameYUVJ420.");
return - 1;
}
// Determine required buffer size and allocate buffer
// The data in the buffer is used for encoding, and the format is YUVJ420
int numBytes = av_image_get_buffer_size(AV_PIX_FMT_YUVJ420P, pFrame->width, pFrame->height,
1);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
av_image_fill_arrays(pFrameYUVJ420->data, pFrameYUVJ420->linesize, buffer,
AV_PIX_FMT_YUVJ420P,
pFrame->width, pFrame->height, 1);
// Since the decoded frame format is not YUVJ420, format conversion is required before encoding
struct SwsContext *sws_ctx = sws_getContext(pFrame->width,
pFrame->height,
pix_fmt,
pFrame->width,
pFrame->height,
AV_PIX_FMT_YUVJ420P,
SWS_BILINEAR,
NULL.NULL.NULL);
// Format conversion
sws_scale(sws_ctx, (uint8_t const *const *) pFrame->data,
pFrame->linesize, 0, pFrame->height,
pFrameYUVJ420->data, pFrameYUVJ420->linesize);
pFrameYUVJ420->format = AV_PIX_FMT_YUVJ420P;
pFrameYUVJ420->width = pFrame->width;
pFrameYUVJ420->height = pFrame->height;
av_frame_unref(pFrame);
av_free(pFrame);
} else {
pFrameYUVJ420 = pFrame;
}
Copy the code
3. The OpenGL rendering
Because YUV420P decoded soft and NV12 decoded hard, data interweaving is different, so the number of texture layers created is different, the use of chip shaders is also different. YUV420P requires 3 textures, NV12 only 2. Since video players typically support both hard and soft solutions, the shader supports multiple formats.
Vertex shaders and slice shaders:
// Vertex shader GLSL
#define GET_STR(x) #x
static const char *vertexShader = GET_STR(
attribute vec4 aPosition; // Vertex coordinates
attribute vec2 aTexCoord; // Texture vertex coordinates
varying vec2 vTexCoord; // Output material coordinates
void main(){
vTexCoord = vec2(aTexCoord.x,1.0-aTexCoord.y); gl_Position = aPosition; });// Chip shader
static const char *fragYUV420P = GET_STR(
precision mediump float; / / precision
varying vec2 vTexCoord; // Coordinates passed by the vertex shader
uniform sampler2D yTexture; // Input material (opaque grayscale, single pixel)
uniform sampler2D uTexture;
uniform sampler2D vTexture;
uniform int u_ImgType;// 1:RGBA, 2:NV21, 3:NV12, 4:I420
void main() {if(u_ImgType == 1) //RGBA
{
gl_FragColor = texture2D(yTexture, vTexCoord);
}
else if(u_ImgType == 2) //NV21
{
vec3 yuv;
vec3 rgb;
yuv.r = texture2D(yTexture,vTexCoord).r;
yuv.g = texture2D(uTexture,vTexCoord).a - 0.5;
yuv.b = texture2D(uTexture,vTexCoord).r - 0.5;
rgb = mat3(1.0.1.0.1.0.0.0.0.39465.2.03211.1.13983.0.58060.0.0)*yuv;
// Outputs pixel colors
gl_FragColor = vec4(rgb,1.0);
}
else if(u_ImgType == 3) //NV12
{
vec3 yuv;
vec3 rgb;
yuv.r = texture2D(yTexture,vTexCoord).r;
yuv.g = texture2D(uTexture,vTexCoord).r - 0.5;
yuv.b = texture2D(uTexture,vTexCoord).a - 0.5;
rgb = mat3(1.0.1.0.1.0.0.0.0.39465.2.03211.1.13983.0.58060.0.0)*yuv;
// Outputs pixel colors
gl_FragColor = vec4(rgb,1.0);
}
else if(u_ImgType == 4) //I420
{
vec3 yuv;
vec3 rgb;
yuv.r = texture2D(yTexture,vTexCoord).r;
yuv.g = texture2D(uTexture,vTexCoord).r - 0.5;
yuv.b = texture2D(vTexture,vTexCoord).r - 0.5;
rgb = mat3(1.0.1.0.1.0.0.0.0.39465.2.03211.1.13983.0.58060.0.0)*yuv;
// Outputs pixel colors
gl_FragColor = vec4(rgb,1.0);
}
else
{
gl_FragColor = vec4(1.0); }});Copy the code
Texture shading:
switch (pCodecCtx->pix_fmt) {
case AV_PIX_FMT_RGBA:
{
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texts[0]);
glTexSubImage2D(GL_TEXTURE_2D,0.0.0,width,height,GL_RGBA,GL_UNSIGNED_BYTE,pFrame->data[0]);
}
break;
case AV_PIX_FMT_NV21:
case AV_PIX_FMT_NV12:
{
// Activate layer 1 texture and bind it to the created OpengL texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,texts[0]);
// Replace the texture content
glTexSubImage2D(GL_TEXTURE_2D,0.0.0,width,height,GL_LUMINANCE,GL_UNSIGNED_BYTE,pFrame->data[0]);
//update UV plane data
glActiveTexture(GL_TEXTURE0+1);
glBindTexture(GL_TEXTURE_2D, texts[1]);
glTexSubImage2D(GL_TEXTURE_2D,0.0.0,width/2,height/2,GL_LUMINANCE_ALPHA,GL_UNSIGNED_BYTE,pFrame->data[1]);
}
break;
case AV_PIX_FMT_YUV420P:
{
// Activate layer 1 texture and bind it to the created OpengL texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,texts[0]);
// Replace the texture content
glTexSubImage2D(GL_TEXTURE_2D,0.0.0,width,height,GL_LUMINANCE,GL_UNSIGNED_BYTE,pFrame->data[0]);
// Activate the layer 2 texture, bind to the created OpengL texture
glActiveTexture(GL_TEXTURE0+1);
glBindTexture(GL_TEXTURE_2D,texts[1]);
// Replace the texture content
glTexSubImage2D(GL_TEXTURE_2D,0.0.0,width/2,height/2,GL_LUMINANCE,GL_UNSIGNED_BYTE,pFrame->data[1]);
// Activate layer 3 texture and bind it to the created OpengL texture
glActiveTexture(GL_TEXTURE0+2);
glBindTexture(GL_TEXTURE_2D,texts[2]);
// Replace the texture content
glTexSubImage2D(GL_TEXTURE_2D,0.0.0,width/2,height/2,GL_LUMINANCE,GL_UNSIGNED_BYTE,pFrame->data[2]);
}
break;
}
Copy the code
4. Encode it as MP4
If the MP4 file cannot be played after being encoded as MP4, or only VLC can be played, and the phone, PC or MAC cannot load the thumbnail of the MP4 file, it may be because the MP4 file header is wrong. The key is to set the extradata and extradATA_size of the output streams AVStream-> codecPAR, as well as the AV_CODEC_FLAG_GLOBAL_HEADER. Extradata and extradATA_size can be loaded directly from the AVCodecContext and copied to the AVStream-> codecPAR. Extradata and extradATA_size need to be extracted from an AVPacket as described in section 1 and copied to AVStream-> CodecPAR.
AVStream *in_stream = ifmt_ctx_v->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
videoindex_v=i;
if(! out_stream) {LOGD( "Failed allocating output stream");
ret = AVERROR_UNKNOWN;
goto end;
}
videoindex_out=out_stream->index;
//Copy the settings of AVCodecContext
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
// extra_data to write file header
out_stream->codecpar->extradata = (uint8_t *) av_mallocz(pCodecCtx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
memcpy(out_stream->codecpar->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
out_stream->codecpar->extradata_size = pCodecCtx->extradata_size;
LOGD("got extradata, %d size! \n", out_stream->codecpar->extradata_size);
if (ret < 0) {
LOGD( "Failed to copy context from input to output stream codec context");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
LOGD( "AV_CODEC_FLAG_GLOBAL_HEADER");
}
Copy the code
References:
Blog.csdn.net/yue_huang/a… Blog.csdn.net/special00/a… Github.com/bmegli/hard… Github.com/bmegli/hard… Github.com/githubhaoha… Stackoverflow.com/a/33939577 blog.csdn.net/Kennethdroi… www.jianshu.com/p/65d926ba1… Qincji. Gitee. IO / 2021/02/01 /…