Skip to content

iOS demo app. #557

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

import SwiftUI

@main
struct App: SwiftUI.App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

import AVFoundation
import SwiftUI

enum CameraControllerError: Error {
case authorization(String)
case capture(String)
case setup(String)
}

class CameraController: NSObject, ObservableObject, AVCapturePhotoCaptureDelegate {
let captureSession = AVCaptureSession()
private var photoOutput = AVCapturePhotoOutput()
private var timer: Timer?
private var callback: ((Result<UIImage, Error>) -> Void)?

func startCapturing(withTimeInterval interval: TimeInterval,
callback: @escaping (Result<UIImage, Error>) -> Void) {
authorize { error in
if let error {
DispatchQueue.main.async {
callback(.failure(error))
}
return
}
self.setup { error in
if let error {
DispatchQueue.main.async {
callback(.failure(error))
}
return
}
self.captureSession.startRunning()
DispatchQueue.main.async {
self.callback = callback
self.timer = Timer.scheduledTimer(withTimeInterval: interval, repeats: true) { _ in
self.photoOutput.capturePhoto(with: AVCapturePhotoSettings(), delegate: self)
}
}
}
}
}

private func authorize(_ completion: @escaping (Error?) -> Void) {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
DispatchQueue.global(qos: .userInitiated).async {
completion(nil)
}
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { granted in
DispatchQueue.global(qos: .userInitiated).async {
if granted {
completion(nil)
} else {
completion(CameraControllerError.authorization("Camera access denied"))
}
}
}
default:
DispatchQueue.global(qos: .userInitiated).async {
completion(CameraControllerError.authorization("Camera access denied"))
}
}
}

private func setup(_ callback: (Error?) -> Void) {
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video)
else {
callback(CameraControllerError.setup("Cannot get video capture device"))
return
}
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
callback(CameraControllerError.setup("Cannot set up video input: \(error)"))
return
}
if captureSession.canAddInput(videoInput) {
captureSession.addInput(videoInput)
} else {
callback(CameraControllerError.setup("Cannot add video input"))
return
}
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
} else {
callback(CameraControllerError.setup("Cannot add photo output"))
return
}
callback(nil)
}

func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let callback = self.callback else {
print("No image capturing callback set")
return
}
if let error {
callback(.failure(CameraControllerError.capture("Image capture error: \(error)")))
}
guard let imageData = photo.fileDataRepresentation(),
let image = UIImage(data: imageData),
let cgImage = image.cgImage
else {
callback(.failure(CameraControllerError.capture("Couldn't get image data")))
return
}
var orientation = UIImage.Orientation.up
switch UIDevice.current.orientation {
case .portrait:
orientation = .right
case .portraitUpsideDown:
orientation = .left
case .landscapeRight:
orientation = .down
default:
break
}
callback(.success(UIImage(cgImage: cgImage, scale: image.scale, orientation: orientation)))
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

import AVFoundation
import SwiftUI

struct CameraPreview: UIViewRepresentable {
let captureSession: AVCaptureSession

func makeUIView(context: Context) -> UIView {
let view = CameraView(frame: UIScreen.main.bounds)
view.videoPreviewLayer?.session = captureSession
return view
}

func updateUIView(_ uiView: UIView, context: Context) {
if let view = uiView as? CameraView {
view.videoPreviewLayer?.frame = uiView.bounds
}
}
}

final class CameraView: UIView {
override class var layerClass: AnyClass {
return AVCaptureVideoPreviewLayer.self
}

var videoPreviewLayer: AVCaptureVideoPreviewLayer? {
return layer as? AVCaptureVideoPreviewLayer
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

import ImageClassification
import MobileNetClassifier
import SwiftUI

enum Mode: String, CaseIterable {
case xnnpack = "XNNPACK"
case benchmark = "Benchmark"
case coreML = "Core ML"
}

class ClassificationController: ObservableObject {
@AppStorage("mode") var mode: Mode = .benchmark
@Published var classifications: [Classification] = []
@Published var elapsedTime: TimeInterval = 0.0
@Published var isRunning = false

private let queue = DispatchQueue(label: "org.pytorch.executorch.demo", qos: .userInitiated)
private var classifier: ImageClassification?
private var currentMode: Mode = .benchmark

func classify(_ image: UIImage) {
guard !isRunning else {
print("Dropping frame")
return
}
isRunning = true

if currentMode != mode {
currentMode = mode
classifier = nil
}
queue.async {
var classifications: [Classification] = []
var elapsedTime: TimeInterval = -1
do {
if self.classifier == nil {
self.classifier = try self.createClassifier(for: self.currentMode)
}
let startTime = CFAbsoluteTimeGetCurrent()
classifications = try self.classifier?.classify(image: image) ?? []
elapsedTime = (CFAbsoluteTimeGetCurrent() - startTime) * 1000
} catch {
print("Error classifying image: \(error)")
}
DispatchQueue.main.async {
self.classifications = classifications
self.elapsedTime = elapsedTime
self.isRunning = false
}
}
}

private func createClassifier(for mode: Mode) throws -> ImageClassification? {
let modelFileName: String
switch mode {
case .xnnpack:
modelFileName = "mv3_xnnpack_fp32"
case .benchmark:
modelFileName = "mv3"
case .coreML:
modelFileName = "mv3_coreml"
}
guard let modelFilePath = Bundle.main.path(forResource: modelFileName, ofType: "pte"),
let labelsFilePath = Bundle.main.path(forResource: "imagenet_classes", ofType: "txt")
else { return nil }
return try MobileNetClassifier(modelFilePath: modelFilePath, labelsFilePath: labelsFilePath)
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

import AVFoundation
import SwiftUI

struct ContentView: View {
@StateObject private var cameraController = CameraController()
@StateObject private var classificationController = ClassificationController()

var body: some View {
ZStack {
cameraPreview
controlPanel
}
}

private var cameraPreview: some View {
CameraPreview(captureSession: cameraController.captureSession)
.aspectRatio(contentMode: .fill)
.edgesIgnoringSafeArea(.all)
.onAppear(perform: startCapturing)
.onDisappear(perform: stopCapturing)
}

private var controlPanel: some View {
VStack(spacing: 0) {
TopBar(title: "ExecuTorch Demo")
ClassificationLabelView(controller: classificationController)
Spacer()
ClassificationTimeView(controller: classificationController)
ModeSelector(controller: classificationController)
}
}

private func startCapturing() {
UIApplication.shared.isIdleTimerDisabled = true
cameraController.startCapturing(withTimeInterval: 1.0) { result in
switch result {
case .success(let image):
self.classificationController.classify(image)
case .failure(let error):
self.handleError(error)
}
}
}

private func stopCapturing() {
UIApplication.shared.isIdleTimerDisabled = false
}

private func handleError(_ error: Error) {
stopCapturing()
print(error)
}
}

struct ContentView_Previews: PreviewProvider {
static var previews: some View {
ContentView()
}
}
Loading