I can't find a way to do this but this code shows the concept of what I'm trying to do. I need to draw the system image into a shape.
struct MyNoOp: Shape {
var drawInfo: DrawInfo
var iconName: String
init(_ drawInfo: DrawInfo,_ iconName: String) {
self.drawInfo = drawInfo
self.iconName = iconName
}
func path(in rect: CGRect) -> Path {
var path = Path( )
path.addPath(Image( systemName: iconName).path(in: rect))
return path
}
}
I would suggest that you do not want to create a path from an Image
(which is a “view” type), but rather from NSImage
/UIImage
. So here is a routine to create a CGPath
from a system symbol using the Vision framework’s “detect contours” routine from aNSImage
/UIImage
:
// CGPath+Contour.swift
//
// Created by Robert Ryan on 4/20/24.
#if os(macOS)
import AppKit.NSImage
typealias ImageType = NSImage
typealias Weight = NSFont.Weight
typealias Scale = NSImage.SymbolScale
#else
import UIKit.UIImage
typealias ImageType = UIImage
typealias Weight = UIImage.SymbolWeight
typealias Scale = UIImage.SymbolScale
#endif
import Vision
extension CGPath {
static func contourPath(
forSystemName systemName: String,
pointSize: CGFloat,
weight: Weight = .regular,
scale: Scale = .small
) throws -> CGPath {
guard
let image = ImageType.system(
named: systemName,
pointSize: pointSize,
weight: weight,
scale: scale,
foreground: .black
)?
.withBackground(.white),
let cgImage = image.cgImage
else {
throw ContourError.imageNotFound
}
return try cgImage.contourPath(in: CGRect(origin: .zero, size: image.size))
}
}
extension CGImage {
func contourPath(in bounds: CGRect? = nil, contrastAdjustment: Float = 2, maximumImageDimension: Int? = nil) throws -> CGPath {
let bounds = bounds ?? CGRect(x: 0, y: 0, width: width, height: height)
let contourRequest = VNDetectContoursRequest()
contourRequest.maximumImageDimension = maximumImageDimension ?? Int(max(bounds.width, bounds.height))
contourRequest.contrastAdjustment = contrastAdjustment
let requestHandler = VNImageRequestHandler(cgImage: self, options: [:])
try requestHandler.perform([contourRequest])
var transform = CGAffineTransform(translationX: bounds.minX, y: bounds.height + bounds.minY)
.scaledBy(x: bounds.width, y: -bounds.height)
guard let path = contourRequest.results?.first?.normalizedPath.mutableCopy(using: &transform) else {
throw ContourError.noContour
}
return path
}
}
enum ContourError: Error {
case imageNotFound
case noContour
}
#if os(macOS)
private extension NSImage {
var cgImage: CGImage? {
var rect = CGRect(origin: .zero, size: size)
return cgImage(forProposedRect: &rect, context: nil, hints: nil)
}
static func createImage(size: NSSize, drawingHandler: (NSGraphicsContext) -> Void) -> NSImage? {
let image = NSImage(size: size)
image.lockFocus()
defer { image.unlockFocus() }
guard let context = NSGraphicsContext.current else {
return nil
}
drawingHandler(context)
return image
}
static func system(
named systemName: String,
pointSize: CGFloat,
weight: NSFont.Weight = .regular,
scale: SymbolScale = .medium,
foreground: NSColor
) -> NSImage? {
let configuration = Self.SymbolConfiguration(pointSize: pointSize, weight: weight, scale: scale)
.applying(.init(paletteColors: [foreground]))
return NSImage(systemSymbolName: systemName, accessibilityDescription: nil)?
.withSymbolConfiguration(configuration)
}
func withBackground(_ color: NSColor) -> NSImage? {
NSImage.createImage(size: size) { context in
let rect = CGRect(origin: .zero, size: size)
NSColor.white.setFill()
NSBezierPath(rect: rect).fill()
draw(in: rect)
}
}
}
#else
extension UIImage {
static func system(
named systemName: String,
pointSize: CGFloat,
weight: SymbolWeight = .regular,
scale: SymbolScale = .default,
foreground: UIColor
) -> UIImage? {
UIImage(
systemName: systemName,
withConfiguration: UIImage.SymbolConfiguration(pointSize: pointSize, weight: weight, scale: scale)
)?
.withTintColor(foreground, renderingMode: .alwaysOriginal)
}
func withBackground(_ background: UIColor) -> UIImage? {
let format = UIGraphicsImageRendererFormat()
format.scale = scale
let rect = CGRect(origin: .zero, size: size)
return UIGraphicsImageRenderer(bounds: rect, format: format).image { _ in
background.setFill()
UIBezierPath(rect: rect).fill()
draw(in: rect)
}
}
}
#endif
Now you can use that CGPath
routine any way you want, e.g.:
struct ContentView: View {
var body: some View {
VStack {
Text("Hello, world!")
if let cgPath = try? CGPath.contourPath(forSystemName: "trash.circle", pointSize: 256) {
Path(cgPath)
.stroke(Color.red, lineWidth: 3)
.frame(
width: cgPath.boundingBoxOfPath.maxX + cgPath.boundingBoxOfPath.minX,
height: cgPath.boundingBoxOfPath.maxY + cgPath.boundingBoxOfPath.minY
)
}
}
.padding()
}
}
That is both the macOS and iOS rendition.
Note, contour detection does not appear to sense a change of alpha as an edge, so I render the system symbol in a high-contrast image (black on white), and then scan for contours in that.
That yields on macOS:
Or on iOS:
I threw this together, and there are tons of refinements I might contemplate, but hopefully, this is enough to get you going.
For the sake of future readers, we must recognize it is an inherently inefficient and imprecise technique to take what is, essentially, a vector asset, rendering it to a bitmap, and the detecting contours/edges within that image.
Generally, if I want a vector-based system image, I will often pull the SVG from the SF Symbols app, and then do what I need from there. This avoids runtime computational overhead and is far more precise. The OP could not do that in their particular use-case, but just a disclaimer on the above code.