ioscoremlresnetxcode13apple-vision

Xcode13/ SwiftUI: Object detection app iOS


I am building an object detection app using SwiftUI/Xcode and for image classification i used Resnet50. But there is an error. [init() is deprecated][1]. My code was

How to solve this issue. I am a beginner, please make it simple

//camera let captureSession = AVCaptureSession()

    guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
    guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
    captureSession.addInput(input)
    
    captureSession.startRunning()
    
    let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
    view.layer.addSublayer(previewLayer)
    previewLayer.frame = view.frame
    
    
    // The camera is now created!
    
    view.addSubview(belowView)
    
    belowView.clipsToBounds = true
    belowView.layer.cornerRadius = 15.0
    belowView.layer.maskedCorners = [.layerMaxXMinYCorner, .layerMinXMinYCorner]
    
    
    let  dataOutput = AVCaptureVideoDataOutput()
    dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
    captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
    guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }

guard let model = try? VNCoreMLModel(for: model) else { return } (can't find model in scope) let request = VNCoreMLRequest(model: model) { (finishedReq, err) in

        guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
        guard let firstObservation = results.first else {return}
        
        let name: String = firstObservation.identifier
        let acc: Int = Int(firstObservation.confidence * 100)
        
        DispatchQueue.main.async {
            self.objectNameLabel.text = name
            self.accuracyLabel.text = "Accuracy: \(acc)%"
            
        }
    }
    
    try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
    
    
}

}


Solution

  • You need the configuration now to initialized it. Something like

    static func createImageClassifier() -> VNCoreMLModel {
        // Use a default model configuration.
        let defaultConfig = MLModelConfiguration()
    
        // Create an instance of the image classifier's wrapper class.
        let imageClassifierWrapper = try? Resnet50(configuration: defaultConfig)
    
        guard let imageClassifier = imageClassifierWrapper else {
            fatalError("App failed to create an image classifier model instance.")
        }
    
        // Get the underlying model instance.
        let imageClassifierModel = imageClassifier.model
    
        // Create a Vision instance using the image classifier's model instance.
        guard let imageClassifierVisionModel = try? VNCoreMLModel(for: imageClassifierModel) else {
            fatalError("App failed to create a `VNCoreMLModel` instance.")
        }
    
        return imageClassifierVisionModel
    }
    

    Apple has really good sample code that gets you started.

    https://developer.apple.com/documentation/vision/classifying_images_with_vision_and_core_ml