general: fixes, log updates

This commit is contained in:
Michael Freno
2026-01-16 01:07:35 -05:00
parent e5646a192f
commit 4ae8d77dab
5 changed files with 345 additions and 259 deletions

View File

@@ -55,38 +55,38 @@ class EnforceModeService: ObservableObject {
// If settings say it's enabled AND camera is authorized, mark as enabled
if settingsEnabled && cameraService.isCameraAuthorized {
isEnforceModeEnabled = true
print("✓ Enforce mode initialized as enabled (camera authorized)")
logDebug("✓ Enforce mode initialized as enabled (camera authorized)")
} else {
isEnforceModeEnabled = false
print("🔒 Enforce mode initialized as disabled")
logDebug("🔒 Enforce mode initialized as disabled")
}
}
func enableEnforceMode() async {
print("🔒 enableEnforceMode called")
logDebug("🔒 enableEnforceMode called")
guard !isEnforceModeEnabled else {
print("⚠️ Enforce mode already enabled")
logError("⚠️ Enforce mode already enabled")
return
}
let cameraService = CameraAccessService.shared
if !cameraService.isCameraAuthorized {
do {
print("🔒 Requesting camera permission...")
logDebug("🔒 Requesting camera permission...")
try await cameraService.requestCameraAccess()
} catch {
print("⚠️ Failed to get camera permission: \(error.localizedDescription)")
logError("⚠️ Failed to get camera permission: \(error.localizedDescription)")
return
}
}
guard cameraService.isCameraAuthorized else {
print("❌ Camera permission denied")
logError("❌ Camera permission denied")
return
}
isEnforceModeEnabled = true
print("✓ Enforce mode enabled (camera will activate before lookaway reminders)")
logDebug("✓ Enforce mode enabled (camera will activate before lookaway reminders)")
}
func disableEnforceMode() {
@@ -95,7 +95,7 @@ class EnforceModeService: ObservableObject {
stopCamera()
isEnforceModeEnabled = false
userCompliedWithBreak = false
print("✓ Enforce mode disabled")
logDebug("✓ Enforce mode disabled")
}
func setTimerEngine(_ engine: TimerEngine) {
@@ -118,23 +118,23 @@ class EnforceModeService: ObservableObject {
guard isEnforceModeEnabled else { return }
guard !isCameraActive else { return }
print("👁️ Starting camera for lookaway reminder (T-\(secondsRemaining)s)")
logDebug("👁️ Starting camera for lookaway reminder (T-\(secondsRemaining)s)")
do {
try await eyeTrackingService.startEyeTracking()
isCameraActive = true
lastFaceDetectionTime = Date() // Reset grace period
startFaceDetectionTimer()
print("✓ Camera active")
logDebug("✓ Camera active")
} catch {
print("⚠️ Failed to start camera: \(error.localizedDescription)")
logError("⚠️ Failed to start camera: \(error.localizedDescription)")
}
}
func stopCamera() {
guard isCameraActive else { return }
print("👁️ Stopping camera")
logDebug("👁️ Stopping camera")
eyeTrackingService.stopEyeTracking()
isCameraActive = false
userCompliedWithBreak = false
@@ -191,7 +191,7 @@ class EnforceModeService: ObservableObject {
// If person has not been detected for too long, temporarily disable enforce mode
if timeSinceLastDetection > faceDetectionTimeout {
print(
logDebug(
"⏰ Person not detected for \(faceDetectionTimeout)s. Temporarily disabling enforce mode."
)
disableEnforceMode()
@@ -210,7 +210,7 @@ class EnforceModeService: ObservableObject {
guard isEnforceModeEnabled else { return }
guard !isCameraActive else { return }
print("🧪 Starting test mode")
logDebug("🧪 Starting test mode")
isTestMode = true
do {
@@ -218,9 +218,9 @@ class EnforceModeService: ObservableObject {
isCameraActive = true
lastFaceDetectionTime = Date() // Reset grace period
startFaceDetectionTimer()
print("✓ Test mode camera active")
logDebug("✓ Test mode camera active")
} catch {
print("⚠️ Failed to start test mode camera: \(error.localizedDescription)")
logError("⚠️ Failed to start test mode camera: \(error.localizedDescription)")
isTestMode = false
}
}
@@ -228,7 +228,7 @@ class EnforceModeService: ObservableObject {
func stopTestMode() {
guard isTestMode else { return }
print("🧪 Stopping test mode")
logDebug("🧪 Stopping test mode")
stopCamera()
isTestMode = false
}

View File

@@ -14,11 +14,11 @@
// - Efficient contour detection with union-find
//
import CoreImage
import Vision
import Accelerate
import CoreImage
import ImageIO
import UniformTypeIdentifiers
import Vision
struct PupilPosition: Equatable, Sendable {
let x: CGFloat
@@ -37,13 +37,13 @@ final class PupilCalibration: @unchecked Sendable {
private let targetFrames = 20
private var thresholdsLeft: [Int] = []
private var thresholdsRight: [Int] = []
var isComplete: Bool {
lock.lock()
defer { lock.unlock() }
return thresholdsLeft.count >= targetFrames && thresholdsRight.count >= targetFrames
}
func threshold(forSide side: Int) -> Int {
lock.lock()
defer { lock.unlock() }
@@ -51,7 +51,7 @@ final class PupilCalibration: @unchecked Sendable {
guard !thresholds.isEmpty else { return 50 }
return thresholds.reduce(0, +) / thresholds.count
}
func evaluate(eyeData: UnsafePointer<UInt8>, width: Int, height: Int, side: Int) {
let bestThreshold = findBestThreshold(eyeData: eyeData, width: width, height: height)
lock.lock()
@@ -62,16 +62,16 @@ final class PupilCalibration: @unchecked Sendable {
thresholdsRight.append(bestThreshold)
}
}
private func findBestThreshold(eyeData: UnsafePointer<UInt8>, width: Int, height: Int) -> Int {
let averageIrisSize = 0.48
var bestThreshold = 50
var bestDiff = Double.greatestFiniteMagnitude
let bufferSize = width * height
let tempBuffer = UnsafeMutablePointer<UInt8>.allocate(capacity: bufferSize)
defer { tempBuffer.deallocate() }
for threshold in stride(from: 5, to: 100, by: 5) {
PupilDetector.imageProcessingOptimized(
input: eyeData,
@@ -87,18 +87,18 @@ final class PupilCalibration: @unchecked Sendable {
bestThreshold = threshold
}
}
return bestThreshold
}
private static func irisSize(data: UnsafePointer<UInt8>, width: Int, height: Int) -> Double {
let margin = 5
guard width > margin * 2, height > margin * 2 else { return 0 }
var blackCount = 0
let innerWidth = width - margin * 2
let innerHeight = height - margin * 2
for y in margin..<(height - margin) {
let rowStart = y * width + margin
for x in 0..<innerWidth {
@@ -107,11 +107,11 @@ final class PupilCalibration: @unchecked Sendable {
}
}
}
let totalCount = innerWidth * innerHeight
return totalCount > 0 ? Double(blackCount) / Double(totalCount) : 0
}
func reset() {
lock.lock()
defer { lock.unlock() }
@@ -126,7 +126,7 @@ struct PupilDetectorMetrics: Sendable {
var averageProcessingTimeMs: Double = 0
var frameCount: Int = 0
var processedFrameCount: Int = 0
mutating func recordProcessingTime(_ ms: Double) {
lastProcessingTimeMs = ms
processedFrameCount += 1
@@ -136,50 +136,52 @@ struct PupilDetectorMetrics: Sendable {
}
final class PupilDetector: @unchecked Sendable {
// MARK: - Thread Safety
// MARK: - Thread Safety
private static let lock = NSLock()
// MARK: - Configuration
static var enableDebugImageSaving = false
static var enablePerformanceLogging = false
static var frameSkipCount = 10 // Process every Nth frame
// MARK: - State (protected by lock)
private static var _debugImageCounter = 0
private static var _frameCounter = 0
private static var _lastPupilPositions: (left: PupilPosition?, right: PupilPosition?) = (nil, nil)
private static var _lastPupilPositions: (left: PupilPosition?, right: PupilPosition?) = (
nil, nil
)
private static var _metrics = PupilDetectorMetrics()
static let calibration = PupilCalibration()
// MARK: - Convenience Properties
private static var debugImageCounter: Int {
get { _debugImageCounter }
set { _debugImageCounter = newValue }
}
private static var frameCounter: Int {
get { _frameCounter }
set { _frameCounter = newValue }
}
private static var lastPupilPositions: (left: PupilPosition?, right: PupilPosition?) {
get { _lastPupilPositions }
set { _lastPupilPositions = newValue }
}
private static var metrics: PupilDetectorMetrics {
get { _metrics }
set { _metrics = newValue }
}
// MARK: - Precomputed Tables
private static let spatialWeightsLUT: [[Float]] = {
let d = 10
let radius = d / 2
@@ -187,13 +189,14 @@ final class PupilDetector: @unchecked Sendable {
var weights = [[Float]](repeating: [Float](repeating: 0, count: d), count: d)
for dy in 0..<d {
for dx in 0..<d {
let dist = sqrt(Float((dy - radius) * (dy - radius) + (dx - radius) * (dx - radius)))
let dist = sqrt(
Float((dy - radius) * (dy - radius) + (dx - radius) * (dx - radius)))
weights[dy][dx] = exp(-dist * dist / (2 * sigmaSpace * sigmaSpace))
}
}
return weights
}()
private static let colorWeightsLUT: [Float] = {
let sigmaColor: Float = 15.0
var lut = [Float](repeating: 0, count: 256)
@@ -203,18 +206,18 @@ final class PupilDetector: @unchecked Sendable {
}
return lut
}()
// MARK: - Reusable Buffers
private static var grayscaleBuffer: UnsafeMutablePointer<UInt8>?
private static var grayscaleBufferSize = 0
private static var eyeBuffer: UnsafeMutablePointer<UInt8>?
private static var eyeBufferSize = 0
private static var tempBuffer: UnsafeMutablePointer<UInt8>?
private static var tempBufferSize = 0
// MARK: - Public API
/// Detects pupil position with frame skipping for performance
/// Returns cached result on skipped frames
nonisolated static func detectPupil(
@@ -225,7 +228,7 @@ final class PupilDetector: @unchecked Sendable {
side: Int = 0,
threshold: Int? = nil
) -> (pupilPosition: PupilPosition, eyeRegion: EyeRegion)? {
// Frame skipping - return cached result
if frameCounter % frameSkipCount != 0 {
let cachedPosition = side == 0 ? lastPupilPositions.left : lastPupilPositions.right
@@ -242,68 +245,77 @@ final class PupilDetector: @unchecked Sendable {
}
return nil
}
let startTime = CFAbsoluteTimeGetCurrent()
defer {
if enablePerformanceLogging {
let elapsed = (CFAbsoluteTimeGetCurrent() - startTime) * 1000
metrics.recordProcessingTime(elapsed)
if metrics.processedFrameCount % 30 == 0 {
print("👁 PupilDetector: \(String(format: "%.2f", elapsed))ms (avg: \(String(format: "%.2f", metrics.averageProcessingTimeMs))ms)")
print(
"👁 PupilDetector: \(String(format: "%.2f", elapsed))ms (avg: \(String(format: "%.2f", metrics.averageProcessingTimeMs))ms)"
)
}
}
}
// Step 1: Convert Vision landmarks to pixel coordinates
let eyePoints = landmarksToPixelCoordinates(
landmarks: eyeLandmarks,
faceBoundingBox: faceBoundingBox,
imageSize: imageSize
)
guard eyePoints.count >= 6 else { return nil }
// Step 2: Create eye region bounding box with margin
guard let eyeRegion = createEyeRegion(from: eyePoints, imageSize: imageSize) else {
return nil
}
let frameWidth = CVPixelBufferGetWidth(pixelBuffer)
let frameHeight = CVPixelBufferGetHeight(pixelBuffer)
let frameSize = frameWidth * frameHeight
// Step 3: Ensure buffers are allocated
ensureBufferCapacity(frameSize: frameSize, eyeSize: Int(eyeRegion.frame.width * eyeRegion.frame.height))
ensureBufferCapacity(
frameSize: frameSize, eyeSize: Int(eyeRegion.frame.width * eyeRegion.frame.height))
guard let grayBuffer = grayscaleBuffer,
let eyeBuf = eyeBuffer,
let tmpBuf = tempBuffer else {
let eyeBuf = eyeBuffer,
let tmpBuf = tempBuffer
else {
return nil
}
// Step 4: Extract grayscale data using vImage
guard extractGrayscaleDataOptimized(from: pixelBuffer, to: grayBuffer, width: frameWidth, height: frameHeight) else {
guard
extractGrayscaleDataOptimized(
from: pixelBuffer, to: grayBuffer, width: frameWidth, height: frameHeight)
else {
return nil
}
// Step 5: Isolate eye with polygon mask
let eyeWidth = Int(eyeRegion.frame.width)
let eyeHeight = Int(eyeRegion.frame.height)
// Early exit for tiny regions (less than 10x10 pixels)
guard eyeWidth >= 10, eyeHeight >= 10 else { return nil }
guard isolateEyeWithMaskOptimized(
frameData: grayBuffer,
frameWidth: frameWidth,
frameHeight: frameHeight,
eyePoints: eyePoints,
region: eyeRegion,
output: eyeBuf
) else {
guard
isolateEyeWithMaskOptimized(
frameData: grayBuffer,
frameWidth: frameWidth,
frameHeight: frameHeight,
eyePoints: eyePoints,
region: eyeRegion,
output: eyeBuf
)
else {
return nil
}
// Step 6: Get threshold (from calibration or parameter)
let effectiveThreshold: Int
if let manualThreshold = threshold {
@@ -314,7 +326,7 @@ final class PupilDetector: @unchecked Sendable {
calibration.evaluate(eyeData: eyeBuf, width: eyeWidth, height: eyeHeight, side: side)
effectiveThreshold = calibration.threshold(forSide: side)
}
// Step 7: Process image (bilateral filter + erosion + threshold)
imageProcessingOptimized(
input: eyeBuf,
@@ -323,43 +335,47 @@ final class PupilDetector: @unchecked Sendable {
height: eyeHeight,
threshold: effectiveThreshold
)
// Debug: Save processed images if enabled
if enableDebugImageSaving && debugImageCounter < 10 {
saveDebugImage(data: tmpBuf, width: eyeWidth, height: eyeHeight, name: "processed_eye_\(debugImageCounter)")
saveDebugImage(
data: tmpBuf, width: eyeWidth, height: eyeHeight,
name: "processed_eye_\(debugImageCounter)")
debugImageCounter += 1
}
// Step 8: Find contours and compute centroid
guard let (centroidX, centroidY) = findPupilFromContoursOptimized(
data: tmpBuf,
width: eyeWidth,
height: eyeHeight
) else {
guard
let (centroidX, centroidY) = findPupilFromContoursOptimized(
data: tmpBuf,
width: eyeWidth,
height: eyeHeight
)
else {
return nil
}
let pupilPosition = PupilPosition(x: CGFloat(centroidX), y: CGFloat(centroidY))
// Cache result
if side == 0 {
lastPupilPositions.left = pupilPosition
} else {
lastPupilPositions.right = pupilPosition
}
return (pupilPosition, eyeRegion)
}
// MARK: - Buffer Management
private static func ensureBufferCapacity(frameSize: Int, eyeSize: Int) {
if grayscaleBufferSize < frameSize {
grayscaleBuffer?.deallocate()
grayscaleBuffer = UnsafeMutablePointer<UInt8>.allocate(capacity: frameSize)
grayscaleBufferSize = frameSize
}
let requiredEyeSize = max(eyeSize, 10000) // Minimum size for safety
if eyeBufferSize < requiredEyeSize {
eyeBuffer?.deallocate()
@@ -369,9 +385,9 @@ final class PupilDetector: @unchecked Sendable {
eyeBufferSize = requiredEyeSize
}
}
// MARK: - Optimized Grayscale Conversion (vImage)
private static func extractGrayscaleDataOptimized(
from pixelBuffer: CVPixelBuffer,
to output: UnsafeMutablePointer<UInt8>,
@@ -380,38 +396,38 @@ final class PupilDetector: @unchecked Sendable {
) -> Bool {
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
let pixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
switch pixelFormat {
case kCVPixelFormatType_32BGRA:
guard let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer) else { return false }
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
var srcBuffer = vImage_Buffer(
data: baseAddress,
height: vImagePixelCount(height),
width: vImagePixelCount(width),
rowBytes: bytesPerRow
)
var dstBuffer = vImage_Buffer(
data: output,
height: vImagePixelCount(height),
width: vImagePixelCount(width),
rowBytes: width
)
// BGRA to Planar8 grayscale using luminance coefficients
// Y = 0.299*R + 0.587*G + 0.114*B
let matrix: [Int16] = [
28, // B coefficient (0.114 * 256 29, adjusted)
28, // B coefficient (0.114 * 256 29, adjusted)
150, // G coefficient (0.587 * 256 150)
77, // R coefficient (0.299 * 256 77)
0 // A coefficient
77, // R coefficient (0.299 * 256 77)
0, // A coefficient
]
let divisor: Int32 = 256
let error = vImageMatrixMultiply_ARGB8888ToPlanar8(
&srcBuffer,
&dstBuffer,
@@ -421,27 +437,30 @@ final class PupilDetector: @unchecked Sendable {
0,
vImage_Flags(kvImageNoFlags)
)
return error == kvImageNoError
case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
guard let yPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0) else { return false }
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange:
guard let yPlane = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0) else {
return false
}
let yBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0)
let yBuffer = yPlane.assumingMemoryBound(to: UInt8.self)
// Direct copy of Y plane (already grayscale)
for y in 0..<height {
memcpy(output.advanced(by: y * width), yBuffer.advanced(by: y * yBytesPerRow), width)
memcpy(
output.advanced(by: y * width), yBuffer.advanced(by: y * yBytesPerRow), width)
}
return true
default:
// Fallback to manual conversion
guard let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer) else { return false }
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let buffer = baseAddress.assumingMemoryBound(to: UInt8.self)
for y in 0..<height {
for x in 0..<width {
let offset = y * bytesPerRow + x * 4
@@ -454,9 +473,9 @@ final class PupilDetector: @unchecked Sendable {
return true
}
}
// MARK: - Optimized Eye Isolation
private static func isolateEyeWithMaskOptimized(
frameData: UnsafePointer<UInt8>,
frameWidth: Int,
@@ -469,57 +488,60 @@ final class PupilDetector: @unchecked Sendable {
let minY = Int(region.frame.origin.y)
let eyeWidth = Int(region.frame.width)
let eyeHeight = Int(region.frame.height)
guard eyeWidth > 0, eyeHeight > 0 else { return false }
// Initialize to white (masked out)
memset(output, 255, eyeWidth * eyeHeight)
// Convert eye points to local coordinates
let localPoints = eyePoints.map { point in
(x: Float(point.x) - Float(minX), y: Float(point.y) - Float(minY))
}
// Precompute edge data for faster point-in-polygon
let edges = (0..<localPoints.count).map { i in
let p1 = localPoints[i]
let p2 = localPoints[(i + 1) % localPoints.count]
return (x1: p1.x, y1: p1.y, x2: p2.x, y2: p2.y)
}
for y in 0..<eyeHeight {
let py = Float(y)
for x in 0..<eyeWidth {
let px = Float(x)
if pointInPolygonFast(px: px, py: py, edges: edges) {
let frameX = minX + x
let frameY = minY + y
if frameX >= 0, frameX < frameWidth, frameY >= 0, frameY < frameHeight {
output[y * eyeWidth + x] = frameData[frameY * frameWidth + frameX]
}
}
}
}
return true
}
@inline(__always)
private static func pointInPolygonFast(px: Float, py: Float, edges: [(x1: Float, y1: Float, x2: Float, y2: Float)]) -> Bool {
@inline(__always)
private static func pointInPolygonFast(
px: Float, py: Float, edges: [(x1: Float, y1: Float, x2: Float, y2: Float)]
) -> Bool {
var inside = false
for edge in edges {
if ((edge.y1 > py) != (edge.y2 > py)) &&
(px < (edge.x2 - edge.x1) * (py - edge.y1) / (edge.y2 - edge.y1) + edge.x1) {
if ((edge.y1 > py) != (edge.y2 > py))
&& (px < (edge.x2 - edge.x1) * (py - edge.y1) / (edge.y2 - edge.y1) + edge.x1)
{
inside = !inside
}
}
return inside
}
// MARK: - Optimized Image Processing
static func imageProcessingOptimized(
input: UnsafePointer<UInt8>,
output: UnsafeMutablePointer<UInt8>,
@@ -529,23 +551,24 @@ final class PupilDetector: @unchecked Sendable {
) {
let size = width * height
guard size > 0 else { return }
// Use a working buffer for intermediate results
let workBuffer = UnsafeMutablePointer<UInt8>.allocate(capacity: size)
defer { workBuffer.deallocate() }
// 1. Fast Gaussian blur using vImage (replaces expensive bilateral filter)
gaussianBlurOptimized(input: input, output: workBuffer, width: width, height: height)
// 2. Erosion with vImage (3 iterations)
erodeOptimized(input: workBuffer, output: output, width: width, height: height, iterations: 3)
erodeOptimized(
input: workBuffer, output: output, width: width, height: height, iterations: 3)
// 3. Simple binary threshold (no vDSP overhead for small buffers)
for i in 0..<size {
output[i] = output[i] > UInt8(threshold) ? 255 : 0
}
}
private static func gaussianBlurOptimized(
input: UnsafePointer<UInt8>,
output: UnsafeMutablePointer<UInt8>,
@@ -554,24 +577,24 @@ final class PupilDetector: @unchecked Sendable {
) {
// Use a more appropriate convolution for performance
// Using vImageTentConvolve_Planar8 with optimized parameters
var srcBuffer = vImage_Buffer(
data: UnsafeMutableRawPointer(mutating: input),
height: vImagePixelCount(height),
width: vImagePixelCount(width),
rowBytes: width
)
var dstBuffer = vImage_Buffer(
data: UnsafeMutableRawPointer(output),
height: vImagePixelCount(height),
width: vImagePixelCount(width),
rowBytes: width
)
// Kernel size that provides good blur with minimal computational overhead
let kernelSize: UInt32 = 5
vImageTentConvolve_Planar8(
&srcBuffer,
&dstBuffer,
@@ -583,7 +606,7 @@ final class PupilDetector: @unchecked Sendable {
vImage_Flags(kvImageEdgeExtend)
)
}
private static func erodeOptimized(
input: UnsafePointer<UInt8>,
output: UnsafeMutablePointer<UInt8>,
@@ -595,52 +618,54 @@ final class PupilDetector: @unchecked Sendable {
memcpy(output, input, width * height)
return
}
// Copy input to output first so we can use output as working buffer
memcpy(output, input, width * height)
var srcBuffer = vImage_Buffer(
data: UnsafeMutableRawPointer(output),
height: vImagePixelCount(height),
width: vImagePixelCount(width),
rowBytes: width
)
// Allocate temp buffer for ping-pong
let tempData = UnsafeMutablePointer<UInt8>.allocate(capacity: width * height)
defer { tempData.deallocate() }
var dstBuffer = vImage_Buffer(
data: UnsafeMutableRawPointer(tempData),
height: vImagePixelCount(height),
width: vImagePixelCount(width),
rowBytes: width
)
// 3x3 erosion kernel (all ones)
let kernel: [UInt8] = [
1, 1, 1,
1, 1, 1,
1, 1, 1
1, 1, 1,
]
for i in 0..<iterations {
if i % 2 == 0 {
vImageErode_Planar8(&srcBuffer, &dstBuffer, 0, 0, kernel, 3, 3, vImage_Flags(kvImageNoFlags))
vImageErode_Planar8(
&srcBuffer, &dstBuffer, 0, 0, kernel, 3, 3, vImage_Flags(kvImageNoFlags))
} else {
vImageErode_Planar8(&dstBuffer, &srcBuffer, 0, 0, kernel, 3, 3, vImage_Flags(kvImageNoFlags))
vImageErode_Planar8(
&dstBuffer, &srcBuffer, 0, 0, kernel, 3, 3, vImage_Flags(kvImageNoFlags))
}
}
// If odd iterations, result is in dstBuffer (tempData), copy to output
if iterations % 2 == 1 {
memcpy(output, tempData, width * height)
}
// If even iterations, result is already in srcBuffer (output)
}
// MARK: - Optimized Contour Detection
/// Optimized centroid-of-dark-pixels approach - much faster than union-find
/// Returns the centroid of the largest dark region
private static func findPupilFromContoursOptimized(
@@ -648,25 +673,25 @@ final class PupilDetector: @unchecked Sendable {
width: Int,
height: Int
) -> (x: Double, y: Double)? {
// Optimized approach: find centroid of all black pixels with early exit
// This works well for pupil detection since the pupil is the main dark blob
// Use a more efficient approach that doesn't iterate through entire image
var sumX: Int = 0
var sumY: Int = 0
var count: Int = 0
// Early exit if we already know this isn't going to be useful
let threshold = UInt8(5) // Only consider pixels that are quite dark
// Process in chunks for better cache performance
let chunkSize = 16
var rowsProcessed = 0
while rowsProcessed < height {
let endRow = min(rowsProcessed + chunkSize, height)
for y in rowsProcessed..<endRow {
let rowOffset = y * width
for x in 0..<width {
@@ -678,117 +703,128 @@ final class PupilDetector: @unchecked Sendable {
}
}
}
rowsProcessed = endRow
// Early exit if we've found enough pixels for a reasonable estimate
if count > 25 { // Early termination condition
break
}
}
guard count > 10 else { return nil } // Need minimum pixels for valid pupil
return (
x: Double(sumX) / Double(count),
y: Double(sumY) / Double(count)
)
}
// MARK: - Helper Methods
private static func landmarksToPixelCoordinates(
landmarks: VNFaceLandmarkRegion2D,
faceBoundingBox: CGRect,
imageSize: CGSize
) -> [CGPoint] {
return landmarks.normalizedPoints.map { point in
let imageX = (faceBoundingBox.origin.x + point.x * faceBoundingBox.width) * imageSize.width
let imageY = (faceBoundingBox.origin.y + point.y * faceBoundingBox.height) * imageSize.height
let imageX =
(faceBoundingBox.origin.x + point.x * faceBoundingBox.width) * imageSize.width
let imageY =
(faceBoundingBox.origin.y + point.y * faceBoundingBox.height) * imageSize.height
return CGPoint(x: imageX, y: imageY)
}
}
private static func createEyeRegion(from points: [CGPoint], imageSize: CGSize) -> EyeRegion? {
guard !points.isEmpty else { return nil }
let margin: CGFloat = 5
var minX = CGFloat.greatestFiniteMagnitude
var maxX = -CGFloat.greatestFiniteMagnitude
var minY = CGFloat.greatestFiniteMagnitude
var maxY = -CGFloat.greatestFiniteMagnitude
for point in points {
minX = min(minX, point.x)
maxX = max(maxX, point.x)
minY = min(minY, point.y)
maxY = max(maxY, point.y)
}
minX -= margin
maxX += margin
minY -= margin
maxY += margin
let clampedMinX = max(0, minX)
let clampedMaxX = min(imageSize.width, maxX)
let clampedMinY = max(0, minY)
let clampedMaxY = min(imageSize.height, maxY)
let frame = CGRect(
x: clampedMinX,
y: clampedMinY,
width: clampedMaxX - clampedMinX,
height: clampedMaxY - clampedMinY
)
let center = CGPoint(x: frame.width / 2, y: frame.height / 2)
let origin = CGPoint(x: clampedMinX, y: clampedMinY)
return EyeRegion(frame: frame, center: center, origin: origin)
}
// MARK: - Debug Helpers
private static func saveDebugImage(data: UnsafePointer<UInt8>, width: Int, height: Int, name: String) {
private static func saveDebugImage(
data: UnsafePointer<UInt8>, width: Int, height: Int, name: String
) {
guard let cgImage = createCGImage(from: data, width: width, height: height) else { return }
let url = URL(fileURLWithPath: "/tmp/\(name).png")
guard let destination = CGImageDestinationCreateWithURL(url as CFURL, UTType.png.identifier as CFString, 1, nil) else { return }
guard
let destination = CGImageDestinationCreateWithURL(
url as CFURL, UTType.png.identifier as CFString, 1, nil)
else { return }
CGImageDestinationAddImage(destination, cgImage, nil)
CGImageDestinationFinalize(destination)
print("💾 Saved debug image: \(url.path)")
}
private static func createCGImage(from data: UnsafePointer<UInt8>, width: Int, height: Int) -> CGImage? {
private static func createCGImage(from data: UnsafePointer<UInt8>, width: Int, height: Int)
-> CGImage?
{
let mutableData = UnsafeMutablePointer<UInt8>.allocate(capacity: width * height)
defer { mutableData.deallocate() }
memcpy(mutableData, data, width * height)
guard let context = CGContext(
data: mutableData,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: width,
space: CGColorSpaceCreateDeviceGray(),
bitmapInfo: CGImageAlphaInfo.none.rawValue
) else {
guard
let context = CGContext(
data: mutableData,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: width,
space: CGColorSpaceCreateDeviceGray(),
bitmapInfo: CGImageAlphaInfo.none.rawValue
)
else {
return nil
}
return context.makeImage()
}
/// Clean up allocated buffers (call on app termination if needed)
static func cleanup() {
grayscaleBuffer?.deallocate()
grayscaleBuffer = nil
grayscaleBufferSize = 0
eyeBuffer?.deallocate()
eyeBuffer = nil
tempBuffer?.deallocate()
tempBuffer = nil
eyeBufferSize = 0

View File

@@ -7,224 +7,231 @@
import SwiftUI
import XCTest
@testable import Gaze
@MainActor
final class OnboardingNavigationTests: XCTestCase {
var testEnv: TestEnvironment!
override func setUp() async throws {
var settings = AppSettings.defaults
settings.hasCompletedOnboarding = false
testEnv = TestEnvironment(settings: settings)
}
override func tearDown() async throws {
testEnv = nil
}
// MARK: - Navigation Tests
func testOnboardingStartsAtWelcomePage() {
// Use real SettingsManager for view initialization test since @Bindable requires concrete type
let onboarding = OnboardingContainerView(settingsManager: SettingsManager.shared)
// Verify initial state
XCTAssertFalse(testEnv.settingsManager.settings.hasCompletedOnboarding)
}
func testNavigationForwardThroughAllPages() async throws {
var settings = testEnv.settingsManager.settings
// Simulate moving through pages
let pages = [
"Welcome", // 0
"LookAway", // 1
"Blink", // 2
"Posture", // 3
"General", // 4
"Completion" // 5
"Welcome", // 0
"LookAway", // 1
"Blink", // 2
"Posture", // 3
"General", // 4
"Completion", // 5
]
for (index, pageName) in pages.enumerated() {
// Verify we can track page progression
XCTAssertEqual(index, index, "Should be on page \(index): \(pageName)")
}
}
func testNavigationBackward() {
// Start from page 3 (Posture)
var currentPage = 3
// Navigate backward
currentPage -= 1
XCTAssertEqual(currentPage, 2, "Should navigate back to Blink page")
currentPage -= 1
XCTAssertEqual(currentPage, 1, "Should navigate back to LookAway page")
currentPage -= 1
XCTAssertEqual(currentPage, 0, "Should navigate back to Welcome page")
}
func testCannotNavigateBackFromWelcome() {
let currentPage = 0
// Should not be able to go below 0
XCTAssertEqual(currentPage, 0, "Should stay on Welcome page")
}
func testSettingsPersistDuringNavigation() {
// Configure lookaway timer
var config = testEnv.settingsManager.settings.lookAwayTimer
config.enabled = true
config.intervalSeconds = 1200
testEnv.settingsManager.updateTimerConfiguration(for: .lookAway, configuration: config)
// Verify settings persisted
let retrieved = testEnv.settingsManager.timerConfiguration(for: .lookAway)
XCTAssertTrue(retrieved.enabled)
XCTAssertEqual(retrieved.intervalSeconds, 1200)
// Configure blink timer
var blinkConfig = testEnv.settingsManager.settings.blinkTimer
blinkConfig.enabled = false
blinkConfig.intervalSeconds = 300
testEnv.settingsManager.updateTimerConfiguration(for: .blink, configuration: blinkConfig)
// Verify both settings persist
let lookAway = testEnv.settingsManager.timerConfiguration(for: .lookAway)
let blink = testEnv.settingsManager.timerConfiguration(for: .blink)
XCTAssertTrue(lookAway.enabled)
XCTAssertEqual(lookAway.intervalSeconds, 1200)
XCTAssertFalse(blink.enabled)
XCTAssertEqual(blink.intervalSeconds, 300)
}
func testOnboardingCompletion() {
// Start with onboarding incomplete
XCTAssertFalse(testEnv.settingsManager.settings.hasCompletedOnboarding)
// Complete onboarding
testEnv.settingsManager.settings.hasCompletedOnboarding = true
// Verify completion
XCTAssertTrue(testEnv.settingsManager.settings.hasCompletedOnboarding)
}
func testAllTimersConfiguredDuringOnboarding() {
// Configure all three built-in timers
var lookAwayConfig = testEnv.settingsManager.settings.lookAwayTimer
lookAwayConfig.enabled = true
lookAwayConfig.intervalSeconds = 1200
testEnv.settingsManager.updateTimerConfiguration(for: .lookAway, configuration: lookAwayConfig)
testEnv.settingsManager.updateTimerConfiguration(
for: .lookAway, configuration: lookAwayConfig)
var blinkConfig = testEnv.settingsManager.settings.blinkTimer
blinkConfig.enabled = true
blinkConfig.intervalSeconds = 300
testEnv.settingsManager.updateTimerConfiguration(for: .blink, configuration: blinkConfig)
var postureConfig = testEnv.settingsManager.settings.postureTimer
postureConfig.enabled = true
postureConfig.intervalSeconds = 1800
testEnv.settingsManager.updateTimerConfiguration(for: .posture, configuration: postureConfig)
testEnv.settingsManager.updateTimerConfiguration(
for: .posture, configuration: postureConfig)
// Verify all configurations
let allConfigs = testEnv.settingsManager.allTimerConfigurations()
XCTAssertEqual(allConfigs[.lookAway]?.intervalSeconds, 1200)
XCTAssertEqual(allConfigs[.blink]?.intervalSeconds, 300)
XCTAssertEqual(allConfigs[.posture]?.intervalSeconds, 1800)
XCTAssertTrue(allConfigs[.lookAway]?.enabled ?? false)
XCTAssertTrue(allConfigs[.blink]?.enabled ?? false)
XCTAssertTrue(allConfigs[.posture]?.enabled ?? false)
}
func testNavigationWithPartialConfiguration() {
// Configure only some timers
var lookAwayConfig = testEnv.settingsManager.settings.lookAwayTimer
lookAwayConfig.enabled = true
testEnv.settingsManager.updateTimerConfiguration(for: .lookAway, configuration: lookAwayConfig)
testEnv.settingsManager.updateTimerConfiguration(
for: .lookAway, configuration: lookAwayConfig)
var blinkConfig = testEnv.settingsManager.settings.blinkTimer
blinkConfig.enabled = false
testEnv.settingsManager.updateTimerConfiguration(for: .blink, configuration: blinkConfig)
// Should still be able to complete onboarding
testEnv.settingsManager.settings.hasCompletedOnboarding = true
XCTAssertTrue(testEnv.settingsManager.settings.hasCompletedOnboarding)
}
func testGeneralSettingsConfigurationDuringOnboarding() {
// Configure general settings
testEnv.settingsManager.settings.playSounds = true
testEnv.settingsManager.settings.launchAtLogin = true
XCTAssertTrue(testEnv.settingsManager.settings.playSounds)
XCTAssertTrue(testEnv.settingsManager.settings.launchAtLogin)
}
func testOnboardingFlowFromStartToFinish() {
// Complete simulation of onboarding flow
XCTAssertFalse(testEnv.settingsManager.settings.hasCompletedOnboarding)
// Page 0: Welcome - no configuration needed
// Page 1: LookAway Setup
var lookAwayConfig = testEnv.settingsManager.settings.lookAwayTimer
lookAwayConfig.enabled = true
lookAwayConfig.intervalSeconds = 1200
testEnv.settingsManager.updateTimerConfiguration(for: .lookAway, configuration: lookAwayConfig)
testEnv.settingsManager.updateTimerConfiguration(
for: .lookAway, configuration: lookAwayConfig)
// Page 2: Blink Setup
var blinkConfig = testEnv.settingsManager.settings.blinkTimer
blinkConfig.enabled = true
blinkConfig.intervalSeconds = 300
testEnv.settingsManager.updateTimerConfiguration(for: .blink, configuration: blinkConfig)
// Page 3: Posture Setup
var postureConfig = testEnv.settingsManager.settings.postureTimer
postureConfig.enabled = false // User chooses to disable this one
testEnv.settingsManager.updateTimerConfiguration(for: .posture, configuration: postureConfig)
testEnv.settingsManager.updateTimerConfiguration(
for: .posture, configuration: postureConfig)
// Page 4: General Settings
testEnv.settingsManager.settings.playSounds = true
testEnv.settingsManager.settings.launchAtLogin = false
// Page 5: Completion - mark as done
testEnv.settingsManager.settings.hasCompletedOnboarding = true
// Verify final state
XCTAssertTrue(testEnv.settingsManager.settings.hasCompletedOnboarding)
let finalConfigs = testEnv.settingsManager.allTimerConfigurations()
XCTAssertTrue(finalConfigs[.lookAway]?.enabled ?? false)
XCTAssertTrue(finalConfigs[.blink]?.enabled ?? false)
XCTAssertFalse(finalConfigs[.posture]?.enabled ?? true)
XCTAssertTrue(testEnv.settingsManager.settings.playSounds)
XCTAssertFalse(testEnv.settingsManager.settings.launchAtLogin)
}
func testNavigatingBackPreservesSettings() {
// Configure on page 1
var lookAwayConfig = testEnv.settingsManager.settings.lookAwayTimer
lookAwayConfig.intervalSeconds = 1500
testEnv.settingsManager.updateTimerConfiguration(for: .lookAway, configuration: lookAwayConfig)
testEnv.settingsManager.updateTimerConfiguration(
for: .lookAway, configuration: lookAwayConfig)
// Move forward to page 2
var blinkConfig = testEnv.settingsManager.settings.blinkTimer
blinkConfig.intervalSeconds = 250
testEnv.settingsManager.updateTimerConfiguration(for: .blink, configuration: blinkConfig)
// Navigate back to page 1
// Verify lookaway settings still exist
let lookAway = testEnv.settingsManager.timerConfiguration(for: .lookAway)
XCTAssertEqual(lookAway.intervalSeconds, 1500)
// Navigate forward again to page 2
// Verify blink settings still exist
let blink = testEnv.settingsManager.timerConfiguration(for: .blink)

View File

@@ -32,7 +32,7 @@ final class ExampleUITests: XCTestCase {
// For example:
// XCTAssertEqual(app.windows.count, 1)
// XCTAssertTrue(app.buttons["Start"].exists)
XCTAssertTrue(true, "UI testing example - this would verify UI elements")
}
@@ -43,4 +43,5 @@ final class ExampleUITests: XCTestCase {
XCUIApplication().launch()
}
}
}
}

42
run
View File

@@ -107,6 +107,32 @@ print_errors() {
echo "================================================================================"
}
# Pretty prints diagnostic warnings from output (LSP and compiler warnings)
print_warnings() {
local output="$1"
echo ""
echo "⚠️ Diagnostic Warnings:"
echo "================================================================================"
# Extract Swift compiler warnings in the format: /path/file.swift:line:col: warning: message
local warnings
warnings=$(echo "$output" | grep -E "\.swift:[0-9]+:[0-9]+: warning:" | sed 's/^/ /')
if [ -n "$warnings" ]; then
# Count total warnings
local count
count=$(echo "$warnings" | wc -l | tr -d ' ')
echo " Found $count warning(s):"
echo ""
echo "$warnings"
else
echo " No warnings found."
fi
echo "================================================================================"
}
# Launches the built application
launch_app() {
local build_dir
@@ -235,6 +261,11 @@ case "$ACTION" in
if [ $? -eq 0 ]; then
handle_build_success
echo "💡 The app is located at: build/Debug/Gaze.app"
# Show warnings in verbose mode
if [ "$VERBOSE" = true ]; then
print_warnings "$COMMAND_OUTPUT"
fi
else
echo "❌ Build failed!"
print_errors "$COMMAND_OUTPUT" "Build"
@@ -248,6 +279,11 @@ case "$ACTION" in
if [ $? -eq 0 ]; then
echo "✅ Tests passed!"
# Show warnings in verbose mode
if [ "$VERBOSE" = true ]; then
print_warnings "$COMMAND_OUTPUT"
fi
else
echo "❌ Tests failed!"
print_errors "$COMMAND_OUTPUT" "Test"
@@ -264,6 +300,12 @@ case "$ACTION" in
if [ $? -eq 0 ]; then
handle_build_success
# Show warnings in verbose mode
if [ "$VERBOSE" = true ]; then
print_warnings "$COMMAND_OUTPUT"
fi
launch_app
else
echo "❌ Build failed!"