The entire project is available at github.com/ximikang/ff… release
Steps to generate thumbnails
- Decode the video using FFMPEG
- Frame format conversion
- Frames are taken from the video stream based on the number of thumbnails
- Use OpencV to create a canvas and generate thumbnails
Ffmpeg decodes video
Frames are taken from the video stream based on the number of thumbnails
- Gets the time interval between images
// Read media file and read the header information from container format
AVFormatContext* pFormatContext = avformat_alloc_context(a);if(! pFormatContext) {logging("ERROR could not allocate memory for format context");
return - 1;
}
if (avformat_open_input(&pFormatContext, inputFilePath.string().c_str(), NULL.NULL) != 0) {
logging("ERROR could not open media file");
}
logging("format %s, duration %lld us, bit_rate %lld", pFormatContext->iformat->name, pFormatContext->duration, pFormatContext->bit_rate);
cout << "Video often:" << pFormatContext->duration / 1000.0 / 1000.0 << "s" << endl;
int64_t video_duration = pFormatContext->duration;
int sum_count = rowNums * colNums;
// Jump to the interval ms
int64_t time_step = video_duration / sum_count / 1000;
Copy the code
- Set the jump time to obtain different video packets
for (int i = 0; i < sum_count ; ++i) {
cv::Mat tempImage;
// Read the image at the same time interval and store it in vImage
while (av_read_frame(pFormatContext, pPacket) >= 0) {
if (pPacket->stream_index == video_stream_index) {
response = decode_packet_2mat(pPacket, pCodecContext, pFrame, tempImage);/ / return
}
if (response == 0)// Successfully read a frame
break;
if (response < 0)
continue;
}
vImage.push_back(tempImage);
// Jump video
av_seek_frame(pFormatContext, - 1((,double)time_step / (double)1000)* AV_TIME_BASE*(double)(i+1) + (double)pFormatContext->start_time, AVSEEK_FLAG_BACKWARD);
}
Copy the code
Obtaining Frame At a fixed time point, it may be impossible to obtain the corresponding Frame from the Packet at the current time point, so it is necessary to judge the obtained Packet. If the corresponding Frame is not obtained, the next Packet should be obtained until the corresponding Frame is obtained.
static int decode_packet_2mat(AVPacket* pPacket, AVCodecContext* pCodecContext, AVFrame* pFrame, cv::Mat& image) {
int response = avcodec_send_packet(pCodecContext, pPacket);
if (response < 0) {
logging("Error while sending a packet to the decoder");
return response;
}
while (response >= 0) {
// return decoded out data from a decoder
response = avcodec_receive_frame(pCodecContext, pFrame);
if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
logging("averror averror_eof");
break;
}
else if (response < 0) {
logging("Error while receiving frame");
return response;
}
if (response >= 0) {
// Get the Frame
image = frame2Mat(pFrame, pCodecContext->pix_fmt);
}
return 0; }}Copy the code
Frame format conversion
Since the Frame obtained from the video stream is the Frame format of YUV format, opencV is used for subsequent operation, so format conversion is carried out.
First, SwsContext in FFMPEG is used to convert the frames extracted from the video from YUV to BGR format. Then, raw data is obtained from memory in BGRFrame and converted to opencV’s Mat type.
cv::Mat frame2Mat(AVFrame* pFrame, AVPixelFormat pPixFormat)
{
// image init
AVFrame* pRGBFrame = av_frame_alloc(a);uint8_t* out_buffer = new uint8_t[avpicture_get_size(AV_PIX_FMT_BGR24, pFrame->width, pFrame->height)];
avpicture_fill((AVPicture*)pRGBFrame, out_buffer, AV_PIX_FMT_BGR24, pFrame->width, pFrame->height);
SwsContext* rgbSwsContext = sws_getContext(pFrame->width, pFrame->height, pPixFormat, pFrame->width, pFrame->height, AV_PIX_FMT_BGR24,SWS_BICUBIC, NULL.NULL.NULL);
if(! rgbSwsContext) {logging("Error could not create frame to rgbframe sws context");
exit(- 1);
}
if (sws_scale(rgbSwsContext, pFrame->data, pFrame->linesize, 0, pFrame->height, pRGBFrame->data, pRGBFrame->linesize) < 0) {
logging("Error could not sws to rgb frame");
exit(- 1);
}
cv::Mat mRGB(cv::Size(pFrame->width, pFrame->height), CV_8UC3);
mRGB.data = (uchar*)pRGBFrame->data[0];(uchar*)pFrameBGR->data
av_free(pRGBFrame);
sws_freeContext(rgbSwsContext);
return mRGB;
}
Copy the code
Use OpencV to create a canvas and generate thumbnails
Draw a white canvas based on the size parameters required by the canvas and fill the canvas.
cv::Mat makeThumbnail(vector<cv::Mat> vImage, const unsigned int rowNums, const unsigned int colNums)
{
// check the image
if (vImage.size() != rowNums * colNums) {
logging("Error image size not equal input size");
logging("vImage length: %d, rowNums: %d, col number: %d", vImage.size(), rowNums, colNums);
exit(- 1);
}
int interval = 100;
int height = vImage[0].size().height * rowNums + interval * (rowNums + 1);
int width = vImage[0].size().width * colNums + interval * (colNums + 1);
logging("thumbnail size: %d * %d", height, width);
cv::Mat thumbnail(cv::Size(width, height), CV_8UC3);
thumbnail.setTo(255);
// Fill
for (int i = 0; i < rowNums; ++i) {
for (int j = 0; j < colNums; ++j) {
int no = i * rowNums + j;
int widthOffset = (vImage[0].size().width + interval) * j + interval;
int heightOffset = (vImage[0].size().height + interval) * i + interval;
vImage[no].copyTo(thumbnail(cv::Rect(widthOffset, heightOffset, vImage[0].size().width, vImage[0].size().height))); }}return thumbnail;
}
Copy the code
Final effect
Welcome to my personal blog ximikang.icu