me
me copied to clipboard
学习 MacOS 开发 (Part 17.2: Core Image)
随着逐步的深入,对各个框架认识如下:
- Cocoa中的NSImage/UIImage: 属于UI层面,同时支持多个representation
- Core Graphics中的CGImage: 其核心是Quartz,通过CGContext实现绘图, CGImage只对应一个Bitmap layer
- Core Image中的CIImage: 主要用来做效果和滤镜,包括图片的缩放和裁剪,识别部分移到了Vision框架
- Vision: 通过Core ML机器学习能力展现出强大的分析能力,包括人脸、body、姿态、物体等等的检测
进入正题我们来看几个Core Image的场景。
Applying a Chroma Key Effect
俗话说绿幕抠像,智能抠像就要用到后面的Vision Framework。
2个步骤:
- 通过CIColorFilterCube实现关键色抠图
- 通过CISourceOverCompositing filter实现背景图合成
这次我们用console swift来实现:
import Cocoa
import CoreGraphics
import CoreImage
run()
func run() {
let forgroundImage = NSImage(byReferencingFile: "./assets/sample.jpg")
guard let bitmapRep = forgroundImage?.representations.first as? NSBitmapImageRep else {
return
}
let foregroundCIImage = CIImage(bitmapImageRep: bitmapRep)
let chromaCIFilter = chromaKeyFilter(fromHue: 0.32, toHue: 0.4)
chromaCIFilter?.setValue(foregroundCIImage, forKey: kCIInputImageKey)
guard let sourceCIImageWithoutBackground = chromaCIFilter?.outputImage else {
print("background remove FAILED")
return
}
let outputRep = NSBitmapImageRep(ciImage: sourceCIImageWithoutBackground)
if let data = outputRep.representation(using: .png, properties: [:]) {
do { try data.write(to: URL(fileURLWithPath: "output.png")) } catch { print(error) }
}
}
func getHue(red: CGFloat, green: CGFloat, blue: CGFloat) -> CGFloat {
let color = NSColor(red: red, green: green, blue: blue, alpha: 1)
var hue: CGFloat = 0
color.getHue(&hue, saturation: nil, brightness: nil, alpha: nil)
return hue
}
// 将Hue的一个范围(fromHue, toHue)设置作为参数添加到colorCubeFilter
func chromaKeyFilter(fromHue: CGFloat, toHue: CGFloat) -> CIFilter? {
let size = 64
var cubeRGB = [Float]()
for z in 0 ..< size {
let blue = CGFloat(z) / CGFloat(size - 1)
for y in 0 ..< size {
let green = CGFloat(y) / CGFloat(size - 1)
for x in 0 ..< size {
let red = CGFloat(x) / CGFloat(size - 1)
let hue = getHue(red: red, green: green, blue: blue)
let alpha: CGFloat = (hue >= fromHue && hue <= toHue) ? 0 : 1
cubeRGB.append(Float(red * alpha))
cubeRGB.append(Float(green * alpha))
cubeRGB.append(Float(blue * alpha))
cubeRGB.append(Float(alpha))
}
}
}
let data = Data(buffer: UnsafeBufferPointer(start: &cubeRGB, count: cubeRGB.count))
let colorCubeFilter = CIFilter(name: "CIColorCube", parameters: ["inputCubeDimension": size, "inputCubeData": data])
return colorCubeFilter
}
效果如下:

继续叠加底图
let backgroundImage = NSImage(byReferencingFile: "./assets/back.jpg")
guard let backgroundBitmapRep = backgroundImage?.representations.first as? NSBitmapImageRep else {
return
}
let backgroundCIImage = CIImage(bitmapImageRep: backgroundBitmapRep)
guard let compositor = CIFilter(name: "CISourceOverCompositing") else {
print("create CISourceOverCompositing FAILED")
return
}
compositor.setValue(sourceCIImageWithoutBackground, forKey: kCIInputImageKey)
compositor.setValue(backgroundCIImage, forKey: kCIInputBackgroundImageKey)
guard let compositedCIImage = compositor.outputImage else {
print("composited FAILED")
return
}
// 保存二阶段
let compositedRep = NSBitmapImageRep(ciImage: compositedCIImage)
if let data = compositedRep.representation(using: .png, properties: [:]) {
do { try data.write(to: URL(fileURLWithPath: "phase2.png")) } catch { print(error) }
}
两相对比,CoreGraphics的color mask啥都不是。
调整下比例和合成的坐标,
sourceCIImageWithoutBackground = scaleFilter(sourceCIImageWithoutBackground, aspectRatio: 1.0, scale: 0.35)
sourceCIImageWithoutBackground = perspectiveFilter(sourceCIImageWithoutBackground, pixelsWide: backgroundBitmapRep.pixelsWide, pixelsHigh: backgroundBitmapRep.pixelsHigh)
func perspectiveFilter(_ input: CIImage, pixelsWide pixelsWide: Int, pixelsHigh pixelsHigh: Int) -> CIImage {
let filter = CIFilter(name: "CIPerspectiveTransform")!
let w = Float(input.extent.size.width)
let h = Float(input.extent.size.height)
let centerX = Float(pixelsWide) / 2
let centerY = Float(pixelsHigh) / 2
print("\(w)x\(h)")
print("center: \(centerX), \(centerY)")
filter.setValue(CIVector(x: CGFloat(centerX - w / 2), y: CGFloat(centerY + h / 2)), forKey: "inputTopLeft")
filter.setValue(CIVector(x: CGFloat(centerX + w / 2), y: CGFloat(centerY + h / 2)), forKey: "inputTopRight")
filter.setValue(CIVector(x: CGFloat(centerX - w / 2), y: CGFloat(centerY - h / 2)), forKey: "inputBottomLeft")
filter.setValue(CIVector(x: CGFloat(centerX + w / 2), y: CGFloat(centerY - h / 2)), forKey: "inputBottomRight")
filter.setValue(input, forKey: kCIInputImageKey)
return filter.outputImage!
}
func scaleFilter(_ input: CIImage, aspectRatio: Double, scale: Double) -> CIImage {
let scaleFilter = CIFilter(name: "CILanczosScaleTransform")!
scaleFilter.setValue(input, forKey: kCIInputImageKey)
scaleFilter.setValue(scale, forKey: kCIInputScaleKey)
scaleFilter.setValue(aspectRatio, forKey: kCIInputAspectRatioKey)
return scaleFilter.outputImage!
}

最后,多个filter可以通过CIContext进行chain,这样可以提升buffer效率,一次性收获。