CIDetector. H comes with four recognition functions under CoreImage
/ * Face recognition */CORE_IMAGE_EXPORT NSString* const CIDetectorTypeFace NS_AVAILABLE(10 _7.5 _0); / * Rectangle edge recognition */CORE_IMAGE_EXPORT NSString* const CIDetectorTypeRectangle NS_AVAILABLE(10 _10.8 _0);
/* Qr code recognition */
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeQRCode NS_AVAILABLE(10 _10.8 _0);
/* Text recognition */
#if __OBJC2__
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeText NS_AVAILABLE(10 _11.9 _0);
Copy the code
Then, CIDetectorTypeRectangle was used to identify the rectangular edge of the image. The effect picture is as follows (shown at the bottom of the Demo link)
Part code:
- Initialize a high-precision recognizer
// High precision edge recognizer
- (CIDetector *)highAccuracyRectangleDetector
{
static CIDetector *detector = nil;
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^
{
detector = [CIDetector detectorOfType:CIDetectorTypeRectangle context:nil options:@{CIDetectorAccuracy : CIDetectorAccuracyHigh}];
});
return detector;
}
Copy the code
- Call the camera to capture the camera image
NSArray *possibleDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *device = [possibleDevices firstObject];
if(! device)return;
_imageDedectionConfidence = 0.0;
AVCaptureSession *session = [[AVCaptureSession alloc] init];
self.captureSession = session;
[session beginConfiguration];
self.captureDevice = device;
NSError *error = nil;
AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
session.sessionPreset = AVCaptureSessionPresetPhoto;
[session addInput:input];
AVCaptureVideoDataOutput *dataOutput = [[AVCaptureVideoDataOutput alloc] init];
[dataOutput setAlwaysDiscardsLateVideoFrames:YES];
[dataOutput setVideoSettings:@{(id)kCVPixelBufferPixelFormatTypeKey:@(kCVPixelFormatType_32BGRA)}];
[dataOutput setSampleBufferDelegate:self queue:dispatch_get_main_queue()];
[session addOutput:dataOutput];
self.stillImageOutput = [[AVCaptureStillImageOutput alloc] init];
[session addOutput:self.stillImageOutput];
AVCaptureConnection *connection = [dataOutput.connections firstObject];
[connection setVideoOrientation:AVCaptureVideoOrientationPortrait];
Copy the code
- You also need a container that displays captured images
self.context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
GLKView *view = [[GLKView alloc] initWithFrame:self.bounds];
view.autoresizingMask = UIViewAutoresizingFlexibleWidth | UIViewAutoresizingFlexibleHeight;
view.translatesAutoresizingMaskIntoConstraints = YES;
view.context = self.context;
view.contentScaleFactor = 1.0f;
view.drawableDepthFormat = GLKViewDrawableDepthFormat24;
[self insertSubview:view atIndex:0];
_glkView = view;
glGenRenderbuffers(1, &_renderBuffer);
glBindRenderbuffer(GL_RENDERBUFFER, _renderBuffer);
// The image will be drawn into _coreImageContext
_coreImageContext = [CIContext contextWithEAGLContext:self.context];
[EAGLContext setCurrentContext:self.context];
Copy the code
- follow
AVCaptureVideoDataOutputSampleBufferDelegate
The agent, after capturing the image, calls the following methods
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
Copy the code
Call CIDetector for identification and obtain the maximum quadrilateral irregularity
// Get the CIImage from the buffer
CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *image = [CIImage imageWithCVPixelBuffer:pixelBuffer];
// Use high precision edge recognizer to identify features
NSArray <CIFeature *>*features = [[self highAccuracyRectangleDetector] featuresInImage:image];
// Select the largest quadrilateral in the feature list
_borderDetectLastRectangleFeature = [self biggestRectangleInRectangles:features];
Copy the code
- Use after identifying edges
CAShapeLayer
Draws and displays the edges
// Draw the edge detection layer
- (void)drawBorderDetectRectWithImageRect:(CGRect)imageRect topLeft:(CGPoint)topLeft topRight:(CGPoint)topRight bottomLeft:(CGPoint)bottomLeft bottomRight:(CGPoint)bottomRight
{
if(! _rectOverlay) { _rectOverlay = [CAShapeLayer layer];
_rectOverlay.fillRule = kCAFillRuleEvenOdd;
_rectOverlay.fillColor = [UIColor colorWithRed:73/255.0 green:130/255.0 blue:180/255.0 alpha:0.4].CGColor;
_rectOverlay.strokeColor = [UIColor whiteColor].CGColor;
_rectOverlay.lineWidth = 5.0f;
}
if(! _rectOverlay.superlayer) {self.layer.masksToBounds = YES;
[self.layer addSublayer:_rectOverlay];
}
// Convert the image space coordinate system to UIKit coordinate system
TransformCIFeatureRect featureRect = [self transfromRealRectWithImageRect:imageRect topLeft:topLeft topRight:topRight bottomLeft:bottomLeft bottomRight:bottomRight];
// Edge recognition path
UIBezierPath *path = [UIBezierPath new];
[path moveToPoint:featureRect.topLeft];
[path addLineToPoint:featureRect.topRight];
[path addLineToPoint:featureRect.bottomRight];
[path addLineToPoint:featureRect.bottomLeft];
[path closePath];
// Background mask path
UIBezierPath *rectPath = [UIBezierPath bezierPathWithRect:CGRectMake(-5,
-5.self.frame.size.width + 10.self.frame.size.height + 10)];
[rectPath setUsesEvenOddFillRule:YES];
[rectPath appendPath:path];
_rectOverlay.path = rectPath.CGPath;
}
Copy the code
Now you can see the effect of real-time identification
- Finally, after taking photos, the filter will be used to convert the irregular quadrilateral identified into a rectangle, which can be converted into a square rectangle
/// Convert any quadrilateral into a square
- (CIImage *)correctPerspectiveForImage:(CIImage *)image withFeatures:(CIRectangleFeature *)rectangleFeature
{
NSMutableDictionary *rectangleCoordinates = [NSMutableDictionary new];
rectangleCoordinates[@"inputTopLeft"] = [CIVector vectorWithCGPoint:rectangleFeature.topLeft];
rectangleCoordinates[@"inputTopRight"] = [CIVector vectorWithCGPoint:rectangleFeature.topRight];
rectangleCoordinates[@"inputBottomLeft"] = [CIVector vectorWithCGPoint:rectangleFeature.bottomLeft];
rectangleCoordinates[@"inputBottomRight"] = [CIVector vectorWithCGPoint:rectangleFeature.bottomRight];
return [image imageByApplyingFilter:@"CIPerspectiveCorrection" withInputParameters:rectangleCoordinates];
}
Copy the code
// TODO: After an edge is identified, you can manually set the edge range
Demo address: making portal: https://github.com/madaoCN/MADRectDetect man points under the Star