I am working on a SwiftUI app that displays an AVCaptureVideoPreviewLayer
and also implements the AVCaptureVideoDataOutputSampleBufferDelegate
protocol to perform some custom logic in captureOutput(_: didOutput: from:)
. The custom logic was working as expected and updating my view as expected until I implemented the video preview layer.
Now, only the video preview layer is updated within the view. Both the video preview layer and the update to the published variable occur within a call to DispatchQueue.main.async
. Is this appropriate?
I also have a suspicion that I may need to implement some logic within the updateUIViewController(_: context:)
function within the UIViewControllerRepresentable
struct I am using to display the video peview layer in my view. The docs provided for this function are not very helpful, can anyone provide any tips on how it should be used?
class VideoStream: UIViewController, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
@Published var luminosityReading : Double = 0.0
...
// AVCaptureSession configuration entered, input added, establish preview layer:
// Currently working on DispatchQueue(label: "VideoStreamSetupQueue")
layer = AVCaptureVideoPreviewLayer(session: session)
...
DispatchQueue.main.async {
self.view.layer.addSublayer(self.layer)
}
// Establish output for luminosity calculation
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoStreamForCaptureOutputQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
...
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// custom logic to calculate luminosity
DispatchQueue.main.async {
print(luminosity) // value changes as expected
self.luminosityReading = luminosity // view not updated with printed value
}
}
Establishing a UIViewControllerRepresentable
to display video preview layer in a SwiftUI view:
struct HostedVideoPreviewLayer: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> some UIViewController {
return VideoStream()
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
// video preview layer works as expected
// text unrelated to this struct (see below) is not updating
}
}
Creating the view:
struct ContentView: View {
@StateObject var videoStream = VideoStream()
var body: some View {
VStack {
HostedVideoPreviewLayer()
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.font(.largeTitle)
.padding()
}
}
}
Minimal Reproducible Example:
import Foundation
import UIKit
import AVKit
import AVFoundation
import SwiftUI
struct ContentView: View {
@StateObject var videoStream = VideoStream()
var body: some View {
VStack {
HostedVideoPreviewLayer()
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.font(.largeTitle)
.padding()
}
}
}
class VideoStream: UIViewController, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
@Published var luminosityReading : Double = 0.0
private let session = AVCaptureSession()
private let queue = DispatchQueue(label: "VideoStreamSetupQueue")
private var layer = AVCaptureVideoPreviewLayer()
var screenRect: CGRect!
override func viewDidLoad() {
authorizeCapture()
queue.async {
self.authorizeCapture()
}
}
func authorizeCapture() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized: // The user has previously granted access to the camera.
beginCapture()
case .notDetermined: // The user has not yet been asked for camera access.
queue.suspend()
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.beginCapture()
self.queue.resume()
}
}
default:
return
}
}
func beginCapture() {
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(for: .video)
// Add device as input
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
session.addInput(videoDeviceInput)
// Establish preview layer
screenRect = UIScreen.main.bounds
layer = AVCaptureVideoPreviewLayer(session: session)
layer.frame = CGRect(x: 0, y: 0, width: screenRect.size.width, height: 300)
layer.videoGravity = AVLayerVideoGravity.resizeAspectFill
layer.connection?.videoOrientation = .portrait
DispatchQueue.main.async {
self.view.layer.addSublayer(self.layer)
}
// Establish output for luminosity calculation
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoStreamForCaptureOutputQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
// From: https://stackoverflow.com/questions/41921326/how-to-get-light-value-from-avfoundation/46842115#46842115
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Retrieving EXIF data of camara frame buffer
let rawMetadata = CMCopyDictionaryOfAttachments(allocator: nil, target: sampleBuffer, attachmentMode: CMAttachmentMode(kCMAttachmentMode_ShouldPropagate))
let metadata = CFDictionaryCreateMutableCopy(nil, 0, rawMetadata) as NSMutableDictionary
let exifData = metadata.value(forKey: "{Exif}") as? NSMutableDictionary
let FNumber : Double = exifData?["FNumber"] as! Double
let ExposureTime : Double = exifData?["ExposureTime"] as! Double
let ISOSpeedRatingsArray = exifData!["ISOSpeedRatings"] as? NSArray
let ISOSpeedRatings : Double = ISOSpeedRatingsArray![0] as! Double
let CalibrationConstant : Double = 50
//Calculating the luminosity
let luminosity : Double = (CalibrationConstant * FNumber * FNumber ) / ( ExposureTime * ISOSpeedRatings )
DispatchQueue.main.async {
print(luminosity) // value changes as expected
self.luminosityReading = luminosity // view not updated with recent value
}
}
override func willTransition(to newCollection: UITraitCollection, with coordinator: UIViewControllerTransitionCoordinator) {
screenRect = UIScreen.main.bounds
layer.frame = CGRect(x: 0, y: 0, width: screenRect.size.width, height: screenRect.size.height)
switch UIDevice.current.orientation {
// Home button on top
case UIDeviceOrientation.portraitUpsideDown:
layer.connection?.videoOrientation = .portraitUpsideDown
// Home button on right
case UIDeviceOrientation.landscapeLeft:
layer.connection?.videoOrientation = .landscapeRight
// Home button on left
case UIDeviceOrientation.landscapeRight:
layer.connection?.videoOrientation = .landscapeLeft
// Home button at bottom
case UIDeviceOrientation.portrait:
layer.connection?.videoOrientation = .portrait
default:
break
}
}
}
struct HostedVideoPreviewLayer: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> some UIViewController {
return VideoStream()
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
// video preview layer works as expected
// text unrelated to this struct is not updating
}
}
[1]: https://developer.apple.com/documentation/swiftui/uiviewcontrollerrepresentable/updateuiviewcontroller(_:context:)
My working solution instead passes the AVCaptureSession
created in VideoStream
as a parameter to a custom view VideoPreviewHolder
. I use a state object to ensure the session is available (if not, a progress indicator is displayed) and then display the preview layer. I hope this may be useful to others:
class VideoPreview: UIView { private var session: AVCaptureSession!
init(runningSession session: AVCaptureSession) {
super.init(frame: .zero)
self.session = session
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if self.superview != nil {
self.videoPreviewLayer.session = self.session
self.videoPreviewLayer.videoGravity = .resizeAspect
}
}
}
struct VideoPreviewHolder: UIViewRepresentable { public var runningSession: AVCaptureSession
typealias UIViewType = VideoPreview
func makeUIView(context: Context) -> VideoPreview {
VideoPreview(runningSession: runningSession)
}
func updateUIView(_ uiView: VideoPreview, context: Context) {
}
}
struct ContentView: View {
@StateObject var videoStream = VideoStream() // this class definition is in original question body
var body: some View {
if (!videoStream.cameraAccess) {
// request access
} else {
NavigationView {
VStack {
if (videoStream.session != nil) {
VideoPreviewHolder(runningSession: videoStream.session)
.frame(minWidth: 0, idealWidth: .infinity, maxWidth: .infinity, minHeight: 0, idealHeight: .infinity, maxHeight: .infinity, alignment: .center)
} else {
ProgressView()
}
...
}
}
}
}