This is the ninth day of my participation in the First Challenge 2022. For details: First Challenge 2022.

preface

In “Simple understanding of iOS CVelBuffer (Middle)”, we learned about the difference between RGB and YUV in color space and related background knowledge. Finally, we read the related types of kCVPixelFormatType in CVelBuffer. In this article, we will continue to talk about some format transformations in CVPixelBuffer.

RGBandYUVFormat conversion

In many scenarios, we need to transform different color Spaces to solve the corresponding engineering problems. Here is the conversion formula:

YUV -> RGB

R = Y + 1.13983 * V G = y-0.39465 * U-0.58060 * V B = Y + 2.03211 * UCopy the code

RGB -> YUV

Y = 0.299 * R + 0.587 * G + 0.114 * B U = -0.14713 * r-0.28886 * G + 0.436 * B V = 0.615 * R-0.51499 * g-0.10001 *  BCopy the code

Common format conversion in iOS

In iOS RGB and YUV mutual conversion method will use the libyuv open source library, open this link requires a ladder, currently also available in China, need to own libyuv open source library · domestic warehouse

IOS on CVPixelBuffer converting is complex, the buffer operation need to be performed before locking method CVPixelBufferLockBaseAddress, after processing, Perform unlock CVPixelBufferUnlockBaseAddress buffer method.

The following methods are sorted out by me in the project and in the usual development for reference only.

NV12 to I420

The core NV12ToI420 method uses the libyuv open source library;

/// NV12 to I420 + (CVPixelBufferRef)I420PixelBufferWithNV12:(CVImageBufferRef)cvpixelBufferRef { CVPixelBufferLockBaseAddress(cvpixelBufferRef, 0); // Size_t pixelWidth = CVPixelBufferGetWidth(cvpixelBufferRef); // Image height (pixels) size_t pixelHeight = CVPixelBufferGetHeight(cvpixelBufferRef); / / get CVPixelBufferRef y data of const uint8 * y_frame = (uint8 *) CVPixelBufferGetBaseAddressOfPlane (CVPixelBufferRef, 0); / / retrieve the uv data from CMVImageBufferRef const uint8 * uv_frame = (uint8 *) CVPixelBufferGetBaseAddressOfPlane (cvpixelBufferRef, 1); //y stride size_t plane1_stride = CVPixelBufferGetBytesPerRowOfPlane (cvpixelBufferRef, 0); //uv stride size_t plane2_stride = CVPixelBufferGetBytesPerRowOfPlane (cvpixelBufferRef, 1); Yuv_size (memory space) size_t frame_size = pixelWidth*pixelHeight*3/2; Uint8 * buffer = (unsigned char *)malloc(frame_size); Uint8 * dST_u = Buffer + pixelWidth*pixelHeight; // Uint8 * dst_v = dst_u + pixelWidth*pixelHeight/4; Int ret = NV12ToI420(y_frame, (int)plane1_stride, uv_frame, (int)plane2_stride, buffer, (int)pixelWidth, dst_u, (int)pixelWidth/2, dst_v, (int)pixelWidth/2, (int)pixelWidth, (int)pixelHeight ); if (ret) { return NULL; } NSDictionary *pixelAttributes = @{(id)kCVPixelBufferIOSurfacePropertiesKey : @{}}; CVPixelBufferRef pixelBuffer = NULL; CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault, pixelWidth, pixelHeight, kCVPixelFormatType_420YpCbCr8Planar, (__bridge CFDictionaryRef)(pixelAttributes), &pixelBuffer); CVPixelBufferLockBaseAddress(pixelBuffer, 0); size_t d = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0); size_t ud = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1); size_t vd = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 2); unsigned char* dsty = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0); unsigned char* dstu = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1); unsigned char* dstv = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 2); unsigned char* srcy = buffer; for (unsigned int rIdx = 0; rIdx < pixelHeight; ++rIdx, srcy += pixelWidth, dsty += d) { memcpy(dsty, srcy, pixelWidth); } unsigned char* srcu = buffer + pixelHeight*pixelWidth; for (unsigned int rIdx = 0; rIdx < pixelHeight/2; ++rIdx, srcu += pixelWidth/2, dstu += ud) { memcpy(dstu, srcu, pixelWidth/2); } unsigned char* srcv = buffer + pixelHeight*pixelWidth*5/4; for (unsigned int rIdx = 0; rIdx < pixelHeight/2; ++rIdx, srcv += pixelWidth/2, dstv += vd) { memcpy(dstv, srcv, pixelWidth/2); } CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); if (result ! = kCVReturnSuccess) { NSLog(@"Unable to create cvpixelbuffer %d", result); } free(buffer); // CVPixelBufferRelease(cvpixelBufferRef); return pixelBuffer; }Copy the code

NV12 to BGRA

The core NV12ToARGB method also uses the libyuv open source library

/// NV12 to BGRA + (CVPixelBufferRef)RGBAPixelBufferWithNV12:(CVImageBufferRef)pixelBufferNV12{ CVPixelBufferLockBaseAddress(pixelBufferNV12, 0); // Image width (pixel) size_t pixelWidth = CVPixelBufferGetWidth(pixelBufferNV12); // Image height (pixels) size_t pixelHeight = CVPixelBufferGetHeight(pixelBufferNV12); //y_stride size_t src_stride_y = CVPixelBufferGetBytesPerRowOfPlane(pixelBufferNV12, 0); / / uv_stride size_t src_stride_uv = CVPixelBufferGetBytesPerRowOfPlane (pixelBufferNV12, 1); / / get the y data uint8_t CVImageBufferRef * src_y = (unsigned char *) CVPixelBufferGetBaseAddressOfPlane (pixelBufferNV12, 0); / / retrieve the uv data from CMVImageBufferRef uint8_t * src_uv = (unsigned char *) CVPixelBufferGetBaseAddressOfPlane (pixelBufferNV12, 1); / / create an empty 32 bgra format CVPixelBufferRef NSDictionary * pixelAttributes = @ {(id) kCVPixelBufferIOSurfacePropertiesKey: @ {}}; CVPixelBufferRef pixelBufferRGBA = NULL; CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault, pixelWidth,pixelHeight,kCVPixelFormatType_32BGRA, (__bridge CFDictionaryRef)pixelAttributes,&pixelBufferRGBA); //kCVPixelFormatType_32BGRA if (result ! = kCVReturnSuccess) { NSLog(@"Unable to create cvpixelbuffer %d", result); return NULL; } result = CVPixelBufferLockBaseAddress(pixelBufferRGBA, 0); if (result ! = kCVReturnSuccess) { CFRelease(pixelBufferRGBA); NSLog(@"Failed to lock base address: %d", result); return NULL; } / / get the newly created the first address uint8_t CVPixelBufferRef RGB data * rgb_data = (uint8 *) CVPixelBufferGetBaseAddress (pixelBufferRGBA); / / use libyuv for rgb_data write data, converts NV12 BGRA size_t bgraStride = CVPixelBufferGetBytesPerRowOfPlane (pixelBufferRGBA, 0); int ret = NV12ToARGB(src_y, (int)src_stride_y, src_uv, (int)src_stride_uv, rgb_data,(int)bgraStride, (int)pixelWidth, (int)pixelHeight); if (ret) { NSLog(@"Error converting NV12 VideoFrame to BGRA: %d", result); CFRelease(pixelBufferRGBA); return NULL; } CVPixelBufferUnlockBaseAddress(pixelBufferRGBA, 0); CVPixelBufferUnlockBaseAddress(pixelBufferNV12, 0); return pixelBufferRGBA; }Copy the code

CVPixelBufferRef to UIImage

The following method can convert video frames into single images (suitable for long interval screenshots, using this method frequently may cause memory problems)

/// buffer to image
+ (UIImage *)convert:(CVPixelBufferRef)pixelBuffer {
    CIImage *ciImage = [CIImage imageWithCVPixelBuffer:pixelBuffer];
    CIContext *temporaryContext = [CIContext contextWithOptions:nil];
    CGImageRef videoImage = [temporaryContext createCGImage:ciImage
             fromRect:CGRectMake(0, 0, CVPixelBufferGetWidth(pixelBuffer), CVPixelBufferGetHeight(pixelBuffer))];
    UIImage *uiImage = [UIImage imageWithCGImage:videoImage];
    CGImageRelease(videoImage);
    return uiImage;
}

Copy the code

CGImageRef to CVPixelBufferRef

The following method converts a single image into a PixelBuffer (this is useful for converting a frame into a Buffer, adding subtitles, beauty stickers, etc.)

/// image to buffer + (CVPixelBufferRef)pixelBufferFromCGImage:(CGImageRef)image { NSDictionary *options = @{ (NSString*)kCVPixelBufferCGImageCompatibilityKey : @YES, (NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : @YES, (NSString*)kCVPixelBufferIOSurfacePropertiesKey: [NSDictionary dictionary] }; CVPixelBufferRef pxbuffer = NULL; CGFloat frameWidth = CGImageGetWidth(image); CGFloat frameHeight = CGImageGetHeight(image); CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32BGRA, (__bridge CFDictionaryRef) options, &pxbuffer); NSParameterAssert(status == kCVReturnSuccess && pxbuffer ! = NULL); CVPixelBufferLockBaseAddress(pxbuffer, 0); void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer); NSParameterAssert(pxdata ! = NULL); CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef context = CGBitmapContextCreate(pxdata, frameWidth, frameHeight, 8, CVPixelBufferGetBytesPerRow(pxbuffer), rgbColorSpace, (CGBitmapInfo)kCGImageAlphaNoneSkipFirst); NSParameterAssert(context); CGContextConcatCTM(context, CGAffineTransformIdentity); CGContextDrawImage(context, CGRectMake(0, 0, frameWidth, frameHeight), image); CGColorSpaceRelease(rgbColorSpace); CGContextRelease(context); CVPixelBufferUnlockBaseAddress(pxbuffer, 0); return pxbuffer; }Copy the code

Buffer Data to UIImage

The following method converts the memory data into an image (fetch the stored buffer based on the memory address and generate the image, in fact, the memory address refers to the buffer)

// NV12 to image + (UIImage *)YUVtoUIImage:(int)w h:(int)h buffer:(unsigned char *)buffer { //YUV(NV12)-->CIImage--->UIImage Conversion NSDictionary *pixelAttributes = @{(NSString*)kCVPixelBufferIOSurfacePropertiesKey:@{}}; CVPixelBufferRef pixelBuffer = NULL; CVReturn result = CVPixelBufferCreate(kCFAllocatorDefault, w, h, kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange, (__bridge CFDictionaryRef)(pixelAttributes), &pixelBuffer); CVPixelBufferLockBaseAddress(pixelBuffer,0); void *yDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0); // Here y_ch0 is Y-Plane of YUV(NV12) data. unsigned char *y_ch0 = buffer; unsigned char *y_ch1 = buffer + w * h; memcpy(yDestPlane, y_ch0, w * h); void *uvDestPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1); // Here y_ch1 is uv-plane of YUV(NV12) data. memcpy(uvDestPlane, y_ch1, w * h * 0.5); CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); if (result ! = kCVReturnSuccess) { NSLog(@"Unable to create cvpixelbuffer %d", result); } // CIImage Conversion if (@available(iOS 13.0, *)) { CIImage *coreImage = [CIImage imageWithCVPixelBuffer:pixelBuffer]; CIContext *temporaryContext = [CIContext contextWithOptions:nil]; CGImageRef videoImage = [temporaryContext createCGImage:coreImage fromRect:CGRectMake(0, 0, w, h)]; UIImage *finalImage = [[UIImage alloc] initWithCGImage:videoImage]; CVPixelBufferRelease(pixelBuffer); CGImageRelease(videoImage); return finalImage; } return nil; };Copy the code

Format conversion in iOS involves C, OC, and C++ methods, so many of these methods can seem very redundant and require some basic and continuous learning. There are a lot of methods to draw images through OpenGL, it is difficult to understand, so beginners take notes, keep patience, the common methods sorted out, and finally encapsulated in the tool class. Wait for the right moment to learn more.

reference

Understand the sampling and format of YUV