I added a new iOS 8 Photo Extension to my existing photo editing app. My app has quite a complex filter pipeline and needs to keep multiple textures in memory at a time. How
Here is how you apply two consecutive convolution kernels in Core Image, with the "intermediary result" between them:
- (CIImage *)outputImage {
const double g = self.inputIntensity.doubleValue;
const CGFloat weights_v[] = { -1*g, 0*g, 1*g,
-1*g, 0*g, 1*g,
-1*g, 0*g, 1*g};
CIImage *result = [CIFilter filterWithName:@"CIConvolution3X3" keysAndValues:
@"inputImage", self.inputImage,
@"inputWeights", [CIVector vectorWithValues:weights_v count:9],
@"inputBias", [NSNumber numberWithFloat:1.0],
nil].outputImage;
CGRect rect = [self.inputImage extent];
rect.origin = CGPointZero;
CGRect cropRectLeft = CGRectMake(0, 0, rect.size.width, rect.size.height);
CIVector *cropRect = [CIVector vectorWithX:rect.origin.x Y:rect.origin.y Z:rect.size.width W:rect.size.height];
result = [result imageByCroppingToRect:cropRectLeft];
result = [CIFilter filterWithName:@"CICrop" keysAndValues:@"inputImage", result, @"inputRectangle", cropRect, nil].outputImage;
const CGFloat weights_h[] = {-1*g, -1*g, -1*g,
0*g, 0*g, 0*g,
1*g, 1*g, 1*g};
result = [CIFilter filterWithName:@"CIConvolution3X3" keysAndValues:
@"inputImage", result,
@"inputWeights", [CIVector vectorWithValues:weights_h count:9],
@"inputBias", [NSNumber numberWithFloat:1.0],
nil].outputImage;
result = [result imageByCroppingToRect:cropRectLeft];
result = [CIFilter filterWithName:@"CICrop" keysAndValues:@"inputImage", result, @"inputRectangle", cropRect, nil].outputImage;
result = [CIFilter filterWithName:@"CIColorInvert" keysAndValues:kCIInputImageKey, result, nil].outputImage;
return result;
}