My app imports video that the user selects through the system file picker, which comes as an AVAsset
:
@IBAction func handleImportVideoButton(_ sender: Any) {
let documentPicker = UIDocumentPickerViewController(forOpeningContentTypes: [.movie], asCopy: true)
documentPicker.delegate = self
present(documentPicker, animated: true)
}
// UIDocumentPickerDelegate callback.
func documentPicker(_ controller: UIDocumentPickerViewController, didPickDocumentsAt urls: [URL]) {
guard let url = urls.first else {
return
}
model.recordedVideoSource = AVAsset(url: url)
}
How do I then convert this AVAsset
into CMSampleBuffer
frames? The end goal is to then convert the CMSampleBuffer
frames into CGImage
s so I can think perform machine learning analysis on each image frame.
This is untested but it should give you the gist of how to go about this:
let asset = AVAsset()
let reader = AVAssetReader(asset: asset)
guard let track = asset.tracks(withMediaType: .video).last else {
return
}
let trackOutput = AVAssetReaderTrackOutput(track: track, outputSettings: nil)
reader.add(trackOutput)
reader.startReading()
// Get first sample buffer
var sample = trackOutput.copyNextSampleBuffer()
while sample != nil {
// iterate over all buffers
// sample = trackOutput.copyNextSampleBuffer()
}