I have a simple SwiftUI app with SwiftUI life cycle and I am trying to use AVFoundation to automatically take a photo. I will do this based on conditions or based on a timer - but for this example, I'd just like to take a photo on startup and display it (NOT a preview layer). I do not want any user action to be required.
I clearly don't understand the proper setup and capture.
import SwiftUI
import AVFoundation
struct ContentView: View {
let dataStore = DataStore.shared
@State private var captureSession = AVCaptureSession()
@State private var backCamera : AVCaptureDevice?
@State private var frontCamera : AVCaptureDevice?
@State private var currentCamera : AVCaptureDevice?
@State private var photoOutput : AVCapturePhotoOutput?
@State private var capturedImage: UIImage?
var body: some View {
VStack {
Text("Take a Photo Automatically")
.padding()
ZStack {
RoundedRectangle(cornerRadius: 0)
.stroke(Color.blue, lineWidth: 4)
.frame(width: 320, height: 240, alignment: .center)
Image(uiImage: dataStore.capturedImage)
}
Spacer()
}
.onAppear {
if UIImagePickerController.isSourceTypeAvailable(.camera){
self.setupCaptureSession()
self.setupDevices()
self.setupInputOutput()
self.startRunningCaptureSession()
} else {
print("No Camera is Available")
}
}
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
//you only get here if there is a camera ( ! ok )
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
})
captureSession.addOutput(photoOutput!)
captureSession.commitConfiguration()
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}
}//setupInputOutput
func startRunningCaptureSession() {
let settings = AVCapturePhotoSettings()
captureSession.startRunning()
photoOutput?.capturePhoto(with: settings, delegate: PhotoDelegate())
}//startRunningCaptureSession
}//struct
class PhotoDelegate: NSObject, AVCapturePhotoCaptureDelegate {
let dataStore = DataStore.shared
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation(),
let image = UIImage(data: data) else {
return
}
dataStore.capturedImage = image
}
}//photo delegate
class DataStore {
static let shared = DataStore()
private init() {}
@Published var capturedImage: UIImage = UIImage()
}//dataStore
Any guidance would be appreciated. Xcode 12.5.1 iOS 14.5
Second attempt to add example:
Later edit. After correcting my approach with the changes provided by Philip Dukhov, I still had a problem - the images were still very dark - to the point that objects were unrecognizable. After a lot of trial and error, it seems that the camera needs some very small amount of time to setup before the capture. While it does not seem like a good plan for programming, I setup a slight delay before the session capture. As little as 0.1 seconds seems to be enough. Starting the capture is now:
func startRunningCaptureSession() {
let settings = AVCapturePhotoSettings()
captureSession.startRunning()
//don't know why this is needed - but it works. Low number of tests at 0.1 all work
DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
self.photoOutput?.capturePhoto(with: settings, delegate: self)
}
}//start Running Capture Session
If you know a better way to do this - please let me know.
The main problem is that you create a PhotoDelegate
but do not store it. In iOS, the delegate
object is usually stored as a weak reference to prevent a circular reference / retain cycle.
You can fix this by simply creating another property in your view, but instead I suggest you create a model class. If you're doing something unrelated to the view itself, that's a sign that you're better off moving it to some other place, like ObservableObject
. You can also make it your delegate, so you don't have to create a separate object and use a singleton: that's another sign that you're doing something wrong.
class CaptureModel: NSObject, ObservableObject {
let captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var currentCamera: AVCaptureDevice?
@Published
var capturedImage: UIImage?
override init() {
super.init()
setupCaptureSession()
setupDevices()
setupInputOutput()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
//you only get here if there is a camera ( ! ok )
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
})
captureSession.addOutput(photoOutput!)
captureSession.commitConfiguration()
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}
}//setupInputOutput
func startRunningCaptureSession() {
let settings = AVCapturePhotoSettings()
captureSession.startRunning()
photoOutput?.capturePhoto(with: settings, delegate: self)
}//startRunningCaptureSession
func stopRunningCaptureSession() {
captureSession.stopRunning()
}//startRunningCaptureSession
}
extension CaptureModel: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let data = photo.fileDataRepresentation(),
let image = UIImage(data: data) else {
return
}
capturedImage = image
}
}
struct ContentView: View {
@StateObject
var model = CaptureModel()
var body: some View {
VStack {
Text("Take a Photo Automatically")
.padding()
ZStack {
RoundedRectangle(cornerRadius: 0)
.stroke(Color.blue, lineWidth: 4)
.frame(width: 320, height: 240, alignment: .center)
model.capturedImage.map { capturedImage in
Image(uiImage: capturedImage)
}
}
Spacer()
}
.onAppear {
if UIImagePickerController.isSourceTypeAvailable(.camera) {
model.startRunningCaptureSession()
} else {
print("No Camera is Available")
}
}
.onDisappear {
model.stopRunningCaptureSession()
}
}
}//struct