I'm playing around with images in SwiftUI by making a simple book cover view.
VStack {
AsyncImage(url: URL(string: coverURL)) { phase in
switch phase {
case .failure:
Image(systemName: "photo")
.font(.largeTitle)
case .success(let image):
image.resizable()
default:
ProgressView()
}
}
.scaledToFit()
.frame(height: 300)
.cornerRadius(10)
.padding()
VStack {
Text(title)
.font(.title)
.foregroundStyle(.white)
if let author = author {
Text(author)
.font(.subheadline)
.foregroundStyle(.white)
}
}
.padding()
}
.padding(.top, geometry.safeAreaInsets.top)
.frame(maxWidth: .infinity)
.background(
AsyncImage(url: URL(string: coverURL)) { phase in
switch phase {
case .failure:
Color.gray
case .success(let image):
image.resizable()
default:
Color.gray
}
}
.blur(radius: 30, opaque: true)
)
The problem is that the text doesn't contrast enough when the image is light. Ideally the text should be either white or black depending on the background image. I've found suggestions to calculate the average color of an image and base the text's color on that, but those work for UIImage
not Image
. Blend mode, .blendMode(.difference)
, also didn't produce the best results.
How can I change text (title and author in this example) color based on their background image in SwiftUI?
One approach is to calculate the Luminance or Perceived Brightness of the area behind the text bounding box. We can do this with Core Image
:
import SwiftUI
import CoreImage
import CoreImage.CIFilterBuiltins
extension UIImage {
func averageColor(in rect: CGRect) -> UIColor? {
guard let cgImage = self.cgImage else { return nil }
guard let cropped = cgImage.cropping(to: rect) else { return nil }
let ciImage = CIImage(cgImage: cropped)
let context = CIContext(options: [.workingColorSpace: kCFNull!])
let filter = CIFilter.areaAverage()
filter.inputImage = ciImage
filter.extent = ciImage.extent
guard let outputImage = filter.outputImage else { return nil }
var bitmap = [UInt8](repeating: 0, count: 4)
context.render(outputImage,
toBitmap: &bitmap,
rowBytes: 4,
bounds: CGRect(x: 0, y: 0, width: 1, height: 1),
format: .RGBA8,
colorSpace: nil)
return UIColor(red: CGFloat(bitmap[0]) / 255.0,
green: CGFloat(bitmap[1]) / 255.0,
blue: CGFloat(bitmap[2]) / 255.0,
alpha: 1.0)
}
}
extension UIColor {
var isLight: Bool {
var r: CGFloat = 0, g: CGFloat = 0, b: CGFloat = 0, a: CGFloat = 0
getRed(&r, green: &g, blue: &b, alpha: &a)
let luminance = 0.299*r + 0.587*g + 0.114*b
return luminance > 0.5
}
var contrastingTextColor: Color {
return isLight ? .black : .white
}
}
Here's an example using that:
struct ContentView: View {
let image: UIImage = UIImage(named: "dark")!
let text: String = "Hello Contrast"
@State private var textColor: Color = .white
var body: some View {
GeometryReader { geo in
ZStack {
Image(uiImage: image)
.resizable()
.scaledToFit()
Text(text)
.font(.system(size: 48, weight: .bold))
.background(
GeometryReader { textGeo in
Color.clear
.onAppear {
updateTextColor(textFrame: textGeo.frame(in: .global),
imageFrame: geo.frame(in: .global),
img: image
)
}
}
)
.foregroundColor(textColor)
}
}
}
private func updateTextColor(textFrame: CGRect, imageFrame: CGRect, img: UIImage) {
// Map label rect to image pixel coordinates
let scaleX = img.size.width / imageFrame.width
let scaleY = img.size.height / imageFrame.height
let rectInImage = CGRect(x: (textFrame.minX - imageFrame.minX) * scaleX,
y: (textFrame.minY - imageFrame.minY) * scaleY,
width: textFrame.width * scaleX,
height: textFrame.height * scaleY)
if let avg = img.averageColor(in: rectInImage) {
textColor = avg.contrastingTextColor
}
}
}
Using this "light" image:
and this "dark" image:
Gives us this output:
Note: you will likely come across many images where this doesn't give you exactly what you're hoping for...
Edit
Not too tough to implement this with AsyncImage
... use the same UIImage
and UIColor
extensions from above, then add:
extension Image {
func asUIImage() -> UIImage? {
let controller = UIHostingController(rootView: self)
controller.view.layoutIfNeeded()
let targetSize = controller.view.intrinsicContentSize
controller.view.bounds = CGRect(origin: .zero, size: targetSize)
controller.view.backgroundColor = .clear
let format = UIGraphicsImageRendererFormat()
format.scale = 1.0
let renderer = UIGraphicsImageRenderer(size: targetSize, format: format)
return renderer.image { _ in
controller.view.drawHierarchy(in: controller.view.bounds, afterScreenUpdates: true)
}
}
}
to convert a SwiftUI Image
to a UIImage
.
I don't work with SwiftUI, so it took a little trial-and-error to get things to work. We can't set the text color until we have a downloaded image, and we want to make sure we're comparing the portion of the image that will be covered by the bounding-box of the text.
Here's an updated example View, using the URLs of the two sample images from this answer:
struct AsyncContentView: View {
let text: String = "Hello\nAsync\nContrast"
// Light image (white flower) results in Black Text
let imageUrl = URL(string: "https://i.sstatic.net/t8UtOgyf.jpg")!
// Dark image (violet flower) results in White Text
//let imageUrl = URL(string: "https://i.sstatic.net/Hl9n7BLO.jpg")!
@State private var loadedUIImage: UIImage?
@State private var textColor: Color = .white
@State private var textFrame: CGRect?
@State private var imageFrame: CGRect?
var body: some View {
GeometryReader { geo in
ZStack {
AsyncImage(url: imageUrl) { phase in
switch phase {
case .empty:
ProgressView()
case .success(let image):
GeometryReader { imageGeo in
image
.resizable()
.scaledToFit()
.frame(maxWidth: .infinity, maxHeight: .infinity)
.onAppear {
loadedUIImage = image.asUIImage()
imageFrame = imageGeo.frame(in: .global)
if let uiImage = loadedUIImage, let tFrame = textFrame, let iFrame = imageFrame {
updateTextColor(
textFrame: tFrame,
imageFrame: iFrame,
img: uiImage
)
}
}
.onChange(of: imageGeo.frame(in: .global)) { oldFrame, newFrame in
imageFrame = newFrame
if let uiImage = loadedUIImage, let tFrame = textFrame {
updateTextColor(
textFrame: tFrame,
imageFrame: newFrame,
img: uiImage
)
}
}
}
case .failure:
Image(systemName: "exclamationmark.triangle.fill")
.resizable()
.scaledToFit()
.frame(maxWidth: .infinity, maxHeight: .infinity)
@unknown default:
EmptyView()
}
}
Text(text)
.font(.system(size: 64, weight: .bold))
.multilineTextAlignment(.center)
.foregroundColor(textColor)
.background(
GeometryReader { textGeo in
Color.clear
.onAppear {
textFrame = textGeo.frame(in: .global)
}
.onChange(of: textGeo.frame(in: .global)) { oldFrame, newFrame in
textFrame = newFrame
if let uiImage = loadedUIImage, let iFrame = imageFrame {
updateTextColor(
textFrame: newFrame,
imageFrame: iFrame,
img: uiImage
)
}
}
}
)
}
}
}
private func updateTextColor(textFrame: CGRect, imageFrame: CGRect, img: UIImage) {
// Calculate the scaled image size within the frame
let imageAspect = img.size.width / img.size.height
let frameAspect = imageFrame.width / imageFrame.height
var scaledWidth: CGFloat
var scaledHeight: CGFloat
var offsetX: CGFloat = 0
var offsetY: CGFloat = 0
if imageAspect > frameAspect {
// Image is wider: fits to width, height is scaled
scaledWidth = imageFrame.width
scaledHeight = imageFrame.width / imageAspect
offsetY = (imageFrame.height - scaledHeight) / 2
} else {
// Image is taller: fits to height, width is scaled
scaledHeight = imageFrame.height
scaledWidth = imageFrame.height * imageAspect
offsetX = (imageFrame.width - scaledWidth) / 2
}
// Map text bounds to the scaled image coordinates
let scaleX = img.size.width / scaledWidth
let scaleY = img.size.height / scaledHeight
let rectInImage = CGRect(
x: ((textFrame.minX - imageFrame.minX - offsetX) * scaleX),
y: ((textFrame.minY - imageFrame.minY - offsetY) * scaleY),
width: textFrame.width * scaleX,
height: textFrame.height * scaleY
)
if let avg = img.averageColor(in: rectInImage) {
textColor = avg.contrastingTextColor
}
}
}
The results are the same, with slight layout changes (multi-line text and vertically-centered image):