new macOS version

This commit is contained in:
2026-04-01 16:10:30 -05:00
parent 56673078f5
commit 483e3c1d00
13 changed files with 2302 additions and 319 deletions

View File

@@ -1,5 +1,7 @@
// FloatingHUDView.swift Content for the always-on-top focus HUD panel // FloatingHUDView.swift Content for the always-on-top focus HUD panel
// All notifications (friction, nudges, resume) render here not in system notifications.
import AppKit
import SwiftUI import SwiftUI
struct FloatingHUDView: View { struct FloatingHUDView: View {
@@ -19,23 +21,24 @@ struct FloatingHUDView: View {
.animation(.spring(duration: 0.3), value: session.isExecuting) .animation(.spring(duration: 0.3), value: session.isExecuting)
.animation(.spring(duration: 0.3), value: session.executorOutput?.title) .animation(.spring(duration: 0.3), value: session.executorOutput?.title)
.animation(.spring(duration: 0.3), value: session.monitoringError) .animation(.spring(duration: 0.3), value: session.monitoringError)
.animation(.spring(duration: 0.3), value: session.nudgeMessage)
.animation(.spring(duration: 0.3), value: session.showingResumeCard)
} }
// MARK: - Header // MARK: - Header
private var header: some View { private var header: some View {
HStack(spacing: 8) { HStack(spacing: 8) {
Image(systemName: "eye.fill") Image(systemName: session.isSessionActive ? "eye.fill" : "eye")
.foregroundStyle(.blue) .foregroundStyle(session.isSessionActive ? .blue : .secondary)
.font(.caption) .font(.caption)
Text(session.activeTask?.title ?? "Focus Session") Text(session.activeTask?.title ?? (session.isSessionActive ? "Focus Session" : "Argus Monitoring"))
.font(.caption.bold()) .font(.caption.bold())
.lineLimit(1) .lineLimit(1)
Spacer() Spacer()
// Pulse dot green when capturing, orange when executing
if session.isExecuting { if session.isExecuting {
Image(systemName: "bolt.fill") Image(systemName: "bolt.fill")
.font(.caption2) .font(.caption2)
@@ -55,24 +58,24 @@ struct FloatingHUDView: View {
.padding(.vertical, 10) .padding(.vertical, 10)
} }
// MARK: - Content // MARK: - Content (priority order)
@ViewBuilder @ViewBuilder
private var content: some View { private var content: some View {
// Error / warning banner shown above all other content when monitoring has a problem // 1. Error / warning banner
if let error = session.monitoringError { if let error = session.monitoringError {
MonitoringErrorBanner(message: error) MonitoringErrorBanner(message: error)
.transition(.move(edge: .top).combined(with: .opacity)) .transition(.move(edge: .top).combined(with: .opacity))
} }
// Executor output sticky card (highest priority persists until dismissed) // 2. Executor output sticky card (highest priority persists until dismissed)
if let output = session.executorOutput { if let output = session.executorOutput {
ExecutorOutputCard(title: output.title, content: output.content) { ExecutorOutputCard(title: output.title, content: output.content) {
session.executorOutput = nil session.executorOutput = nil
} }
.transition(.move(edge: .top).combined(with: .opacity)) .transition(.move(edge: .top).combined(with: .opacity))
} }
// Executing spinner // 3. Executing spinner
else if session.isExecuting { else if session.isExecuting {
HStack(spacing: 10) { HStack(spacing: 10) {
ProgressView() ProgressView()
@@ -84,34 +87,196 @@ struct FloatingHUDView: View {
.padding(14) .padding(14)
.transition(.opacity) .transition(.opacity)
} }
// Proactive friction card // 4. Resume card (shown in HUD, not as system overlay)
else if session.showingResumeCard, let card = session.resumeCard {
ResumeCardView(card: card)
.transition(.move(edge: .top).combined(with: .opacity))
}
// 5. Proactive friction / session action card
else if let card = session.proactiveCard { else if let card = session.proactiveCard {
HUDCardView(card: card) HUDCardView(card: card)
.transition(.move(edge: .top).combined(with: .opacity)) .transition(.move(edge: .top).combined(with: .opacity))
} }
// Latest VLM summary (idle state) // 6. Nudge card (amber, shown in HUD instead of system notification)
else if let nudge = session.nudgeMessage {
NudgeCardView(message: nudge)
.transition(.move(edge: .top).combined(with: .opacity))
}
// 7. Idle state latest VLM summary
else if session.monitoringError == nil { else if session.monitoringError == nil {
VStack(alignment: .leading, spacing: 4) { IdleSummaryView()
.transition(.opacity)
}
}
}
// MARK: - Nudge Card (amber replaces UNUserNotificationCenter)
private struct NudgeCardView: View {
let message: String
@Environment(SessionManager.self) private var session
var body: some View {
VStack(alignment: .leading, spacing: 8) {
HStack(alignment: .top, spacing: 8) {
Image(systemName: "lightbulb.fill")
.foregroundStyle(.orange)
.font(.caption)
Text(message)
.font(.caption)
.foregroundStyle(.primary)
.fixedSize(horizontal: false, vertical: true)
.lineLimit(4)
Spacer(minLength: 0)
Button { session.dismissNudge() } label: {
Image(systemName: "xmark")
.font(.caption2.bold())
.foregroundStyle(.secondary)
}
.buttonStyle(.plain)
}
}
.padding(12)
.background(Color.orange.opacity(0.08))
.overlay(
Rectangle()
.frame(width: 3)
.foregroundStyle(Color.orange),
alignment: .leading
)
}
}
// MARK: - Resume Card (warm welcome-back in HUD)
private struct ResumeCardView: View {
let card: ResumeCard
@Environment(SessionManager.self) private var session
var body: some View {
VStack(alignment: .leading, spacing: 8) {
HStack(spacing: 8) {
Image(systemName: "arrow.counterclockwise.circle.fill")
.foregroundStyle(.blue)
.font(.caption)
Text(card.welcomeBack)
.font(.caption.bold())
.foregroundStyle(.blue)
Spacer()
Button { session.showingResumeCard = false } label: {
Image(systemName: "xmark")
.font(.caption2.bold())
.foregroundStyle(.secondary)
}
.buttonStyle(.plain)
}
Text(card.youWereDoing)
.font(.caption)
.foregroundStyle(.primary)
.fixedSize(horizontal: false, vertical: true)
Text(card.nextStep)
.font(.caption)
.foregroundStyle(.secondary)
.fixedSize(horizontal: false, vertical: true)
Text(card.motivation)
.font(.caption.italic())
.foregroundStyle(.blue.opacity(0.8))
.fixedSize(horizontal: false, vertical: true)
Button("Got it — let's go") {
session.showingResumeCard = false
}
.font(.caption.bold())
.foregroundStyle(.white)
.padding(.horizontal, 12)
.padding(.vertical, 5)
.background(Color.blue)
.clipShape(.rect(cornerRadius: 6))
.buttonStyle(.plain)
}
.padding(14)
.background(Color.blue.opacity(0.07))
}
}
// MARK: - Idle Summary View
private struct IdleSummaryView: View {
@Environment(SessionManager.self) private var session
var body: some View {
VStack(alignment: .leading, spacing: 8) {
// Step progress only when session is active with steps
if session.isSessionActive && session.totalSteps > 0 {
HStack(spacing: 6) {
Image(systemName: "checklist")
.font(.caption2)
.foregroundStyle(.blue)
Text("Step \(min(session.completedCount + 1, session.totalSteps))/\(session.totalSteps): \(session.currentStep?.title ?? "In progress")")
.font(.caption)
.foregroundStyle(.blue)
.lineLimit(1)
}
Divider()
}
// Inferred task
if let task = session.latestInferredTask, !task.isEmpty { if let task = session.latestInferredTask, !task.isEmpty {
VStack(alignment: .leading, spacing: 2) {
Text("DOING NOW")
.font(.system(size: 9, weight: .semibold))
.foregroundStyle(.secondary)
.tracking(0.5)
Text(task) Text(task)
.font(.caption.bold()) .font(.caption.bold())
.foregroundStyle(.primary) .foregroundStyle(.primary)
.fixedSize(horizontal: false, vertical: true) .fixedSize(horizontal: false, vertical: true)
.lineLimit(2) .lineLimit(2)
} }
}
// App badge + VLM summary
HStack(alignment: .top, spacing: 6) {
if let app = session.latestAppName, !app.isEmpty {
Text(app)
.font(.system(size: 9, weight: .medium))
.foregroundStyle(.purple)
.padding(.horizontal, 5)
.padding(.vertical, 2)
.background(Color.purple.opacity(0.1))
.clipShape(.capsule)
.lineLimit(1)
}
Text(session.latestVlmSummary ?? "Monitoring your screen…") Text(session.latestVlmSummary ?? "Monitoring your screen…")
.font(.caption) .font(.caption)
.foregroundStyle(.secondary) .foregroundStyle(.secondary)
.fixedSize(horizontal: false, vertical: true) .fixedSize(horizontal: false, vertical: true)
.lineLimit(3) .lineLimit(3)
} }
.padding(14)
.transition(.opacity) // Distraction count badge
if session.isSessionActive && session.distractionCount > 0 {
HStack(spacing: 4) {
Image(systemName: "exclamationmark.triangle")
.font(.system(size: 9))
.foregroundStyle(.orange)
Text("\(session.distractionCount) distraction\(session.distractionCount == 1 ? "" : "s") this session")
.font(.system(size: 9))
.foregroundStyle(.orange)
} }
} }
}
.padding(14)
}
} }
// MARK: - HUD Card (friction + proposed actions) // MARK: - HUD Card (friction + proposed actions / session actions)
private struct HUDCardView: View { private struct HUDCardView: View {
let card: ProactiveCard let card: ProactiveCard
@@ -145,7 +310,6 @@ private struct HUDCardView: View {
.buttonStyle(.plain) .buttonStyle(.plain)
} }
// Action buttons
actionButtons actionButtons
} }
.padding(14) .padding(14)
@@ -161,19 +325,10 @@ private struct HUDCardView: View {
Button { Button {
session.approveProactiveCard(actionIndex: index) session.approveProactiveCard(actionIndex: index)
} label: { } label: {
VStack(alignment: .leading, spacing: 2) {
Text(action.label) Text(action.label)
.font(.caption.bold()) .font(.caption.bold())
.lineLimit(2) .lineLimit(2)
.multilineTextAlignment(.leading) .multilineTextAlignment(.leading)
if let details = action.details, !details.isEmpty {
Text(details)
.font(.caption2)
.foregroundStyle(.purple.opacity(0.7))
.lineLimit(2)
.multilineTextAlignment(.leading)
}
}
.frame(maxWidth: .infinity, alignment: .leading) .frame(maxWidth: .infinity, alignment: .leading)
.padding(.horizontal, 10) .padding(.horizontal, 10)
.padding(.vertical, 6) .padding(.vertical, 6)
@@ -186,7 +341,7 @@ private struct HUDCardView: View {
notNowButton notNowButton
} }
case .sessionAction(let type, _, _, _, _): case .sessionAction(let type, _, _, _, _, _):
VStack(alignment: .leading, spacing: 6) { VStack(alignment: .leading, spacing: 6) {
Button { Button {
session.approveProactiveCard(actionIndex: 0) session.approveProactiveCard(actionIndex: 0)
@@ -201,6 +356,26 @@ private struct HUDCardView: View {
} }
.buttonStyle(.plain) .buttonStyle(.plain)
.foregroundStyle(.purple) .foregroundStyle(.purple)
notNowButton
}
case .appSwitchLoop:
VStack(alignment: .leading, spacing: 6) {
Button {
session.approveProactiveCard(actionIndex: 0)
} label: {
Text("Help me with this")
.font(.caption.bold())
.frame(maxWidth: .infinity, alignment: .leading)
.padding(.horizontal, 10)
.padding(.vertical, 6)
.background(Color.purple.opacity(0.12))
.clipShape(.rect(cornerRadius: 8))
}
.buttonStyle(.plain)
.foregroundStyle(.purple)
notNowButton notNowButton
} }
@@ -222,7 +397,7 @@ private struct HUDCardView: View {
case "resume": return "Resume session" case "resume": return "Resume session"
case "switch": return "Switch to this task" case "switch": return "Switch to this task"
case "complete": return "Mark complete" case "complete": return "Mark complete"
case "start_new": return "Start focus session" case "start_new": return "Create task + start focus session"
default: return "OK" default: return "OK"
} }
} }
@@ -233,7 +408,7 @@ private struct HUDCardView: View {
return description ?? "I noticed something that might be slowing you down." return description ?? "I noticed something that might be slowing you down."
case .appSwitchLoop(let apps, let count): case .appSwitchLoop(let apps, let count):
return "You've switched between \(apps.joined(separator: "")) \(count)× — are you stuck?" return "You've switched between \(apps.joined(separator: "")) \(count)× — are you stuck?"
case .sessionAction(_, _, let checkpoint, let reason, _): case .sessionAction(_, _, let checkpoint, let reason, _, _):
if !checkpoint.isEmpty { return "Left off: \(checkpoint)" } if !checkpoint.isEmpty { return "Left off: \(checkpoint)" }
return reason.isEmpty ? "Argus noticed a session change." : reason return reason.isEmpty ? "Argus noticed a session change." : reason
} }
@@ -293,6 +468,13 @@ private struct ExecutorOutputCard: View {
let content: String let content: String
let onDismiss: () -> Void let onDismiss: () -> Void
@State private var copied = false
private var maxScrollHeight: CGFloat {
let screenHeight = NSScreen.main?.visibleFrame.height ?? 800
return max(120, screenHeight - 157)
}
var body: some View { var body: some View {
VStack(alignment: .leading, spacing: 8) { VStack(alignment: .leading, spacing: 8) {
HStack(spacing: 6) { HStack(spacing: 6) {
@@ -316,16 +498,34 @@ private struct ExecutorOutputCard: View {
Text(content) Text(content)
.font(.caption) .font(.caption)
.foregroundStyle(.primary) .foregroundStyle(.primary)
.textSelection(.enabled)
.fixedSize(horizontal: false, vertical: true) .fixedSize(horizontal: false, vertical: true)
.frame(maxWidth: .infinity, alignment: .leading) .frame(maxWidth: .infinity, alignment: .leading)
} }
.frame(maxHeight: 120) .frame(maxHeight: maxScrollHeight)
HStack {
Button("Dismiss") { onDismiss() } Button("Dismiss") { onDismiss() }
.font(.caption) .font(.caption)
.foregroundStyle(.secondary) .foregroundStyle(.secondary)
.buttonStyle(.plain) .buttonStyle(.plain)
.frame(maxWidth: .infinity, alignment: .trailing) Spacer()
Button {
NSPasteboard.general.clearContents()
NSPasteboard.general.setString(content, forType: .string)
copied = true
Task {
try? await Task.sleep(for: .seconds(2))
copied = false
}
} label: {
Label(copied ? "Copied!" : "Copy", systemImage: copied ? "checkmark" : "doc.on.doc")
.font(.caption.bold())
.foregroundStyle(copied ? AnyShapeStyle(.secondary) : AnyShapeStyle(Color.green))
}
.buttonStyle(.plain)
.animation(.easeInOut(duration: 0.15), value: copied)
}
} }
.padding(14) .padding(14)
.background(Color.green.opacity(0.07)) .background(Color.green.opacity(0.07))

View File

@@ -32,6 +32,11 @@ final class FloatingPanel: NSPanel {
// Don't activate the app when clicked (user keeps focus on their work) // Don't activate the app when clicked (user keeps focus on their work)
becomesKeyOnlyIfNeeded = true becomesKeyOnlyIfNeeded = true
} }
// Allow the panel to become key so buttons inside it can receive clicks.
// Combined with .nonactivatingPanel, this lets buttons work without
// stealing focus from the user's active app.
override var canBecomeKey: Bool { true }
} }
// MARK: - Controller // MARK: - Controller
@@ -49,14 +54,20 @@ final class FloatingPanelController {
let p = FloatingPanel() let p = FloatingPanel()
let hud = FloatingHUDView() let hud = FloatingHUDView()
.environment(session) .environment(session)
p.contentView = NSHostingView(rootView: hud)
// Position: top-right of the main screen, just below the menu bar // NSHostingController gives proper preferredContentSize tracking so the
// panel auto-resizes as SwiftUI content grows or shrinks.
let controller = NSHostingController(rootView: hud)
p.contentViewController = controller
// Position: top-right of the main screen, just below the menu bar.
// Anchor the top edge so the panel grows downward as content expands.
if let screen = NSScreen.main { if let screen = NSScreen.main {
let margin: CGFloat = 16 let margin: CGFloat = 16
let x = screen.visibleFrame.maxX - 320 - margin let x = screen.visibleFrame.maxX - 320 - margin
let y = screen.visibleFrame.maxY - 160 - margin // Place top edge just below the menu bar
p.setFrameOrigin(NSPoint(x: x, y: y)) let topY = screen.visibleFrame.maxY - margin
p.setFrameTopLeftPoint(NSPoint(x: x, y: topY))
} else { } else {
p.center() p.center()
} }

View File

@@ -1,5 +1,5 @@
// GeminiVLMClient.swift Native Swift Gemini Vision API client // GeminiVLMClient.swift Native Swift Gemini Vision API client + Agentic Executor
// Ports the Python argus VLM analysis (vlm.py) directly into Swift. // Ports the Python argus VLM analysis (vlm.py) and executor (executor.py) into Swift.
// No subprocess required: screenshots go straight from ScreenCaptureKit Gemini UI. // No subprocess required: screenshots go straight from ScreenCaptureKit Gemini UI.
import Foundation import Foundation
@@ -7,36 +7,81 @@ import Foundation
struct GeminiVLMClient { struct GeminiVLMClient {
private static let apiBase = "https://generativelanguage.googleapis.com/v1beta/models" private static let apiBase = "https://generativelanguage.googleapis.com/v1beta/models"
private static let model = "gemini-3.1-pro-preview" private static let analysisModel = "gemini-3-flash-preview"
private static let executorModel = "gemini-3-flash-preview"
let apiKey: String let apiKey: String
// MARK: - Public // MARK: - Files API Upload
/// Upload a single JPEG frame to the Gemini Files API.
/// Returns the file URI which can be reused in subsequent VLM requests,
/// avoiding redundant base64 re-encoding of frames already seen by the model.
func uploadFrame(_ data: Data) async throws -> String {
let urlStr = "https://generativelanguage.googleapis.com/upload/v1beta/files?uploadType=multipart&key=\(apiKey)"
guard let url = URL(string: urlStr) else { throw URLError(.badURL) }
let boundary = "frameboundary-\(UUID().uuidString.prefix(16))"
var body = Data()
let meta = "{\"file\":{\"display_name\":\"frame\"}}"
body.append("--\(boundary)\r\nContent-Type: application/json; charset=UTF-8\r\n\r\n\(meta)\r\n".data(using: .utf8)!)
body.append("--\(boundary)\r\nContent-Type: image/jpeg\r\n\r\n".data(using: .utf8)!)
body.append(data)
body.append("\r\n--\(boundary)--\r\n".data(using: .utf8)!)
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.setValue("multipart/related; boundary=\(boundary)", forHTTPHeaderField: "Content-Type")
request.httpBody = body
request.timeoutInterval = 30
let (responseData, response) = try await URLSession.shared.data(for: request)
if let http = response as? HTTPURLResponse, http.statusCode != 200 {
let msg = String(data: responseData, encoding: .utf8) ?? "HTTP \(http.statusCode)"
print("[GeminiFiles] Upload failed \(http.statusCode): \(msg.prefix(200))")
throw URLError(.badServerResponse)
}
guard let json = try JSONSerialization.jsonObject(with: responseData) as? [String: Any],
let file = json["file"] as? [String: Any],
let uri = file["uri"] as? String
else {
let raw = String(data: responseData, encoding: .utf8) ?? ""
print("[GeminiFiles] Unexpected upload response: \(raw.prefix(200))")
throw URLError(.cannotParseResponse)
}
print("[GeminiFiles] Uploaded \(data.count / 1024)KB → \(uri.suffix(20))")
return uri
}
// MARK: - VLM Analysis
/// Analyze a sequence of JPEG frames and return a structured distraction analysis. /// Analyze a sequence of JPEG frames and return a structured distraction analysis.
/// - Parameters: /// Pass `fileUris` (parallel to `frames`) to use Gemini Files API URIs for frames that
/// - frames: JPEG screenshot frames, oldest first, newest last. /// were already uploaded avoids re-sending base64 for the 3 frames carried over from
/// - taskTitle: Current task title (empty if no session). /// the previous rolling-window call. Nil entries fall back to inline base64.
/// - taskGoal: Task description / goal.
/// - steps: Active step list for the current task.
/// - windowTitle: Frontmost app name from NSWorkspace.
/// - recentSummaries: Rolling summaries from previous analyses (temporal context).
func analyze( func analyze(
frames: [Data], frames: [Data],
fileUris: [String?] = [],
taskTitle: String, taskTitle: String,
taskGoal: String, taskGoal: String,
steps: [Step], steps: [Step],
windowTitle: String, windowTitle: String,
recentSummaries: [String] historyContext: String,
sessionContext: String,
lastOutputContext: String,
executionContext: String
) async throws -> DistractionAnalysisResponse { ) async throws -> DistractionAnalysisResponse {
let prompt = buildPrompt( let prompt = buildPrompt(
taskTitle: taskTitle, taskTitle: taskTitle,
taskGoal: taskGoal, taskGoal: taskGoal,
steps: steps, steps: steps,
windowTitle: windowTitle, windowTitle: windowTitle,
recentSummaries: recentSummaries historyContext: historyContext,
sessionContext: sessionContext,
lastOutputContext: lastOutputContext,
executionContext: executionContext
) )
let raw = try await callGemini(prompt: prompt, frames: frames) let raw = try await callGemini(prompt: prompt, frames: frames, fileUris: fileUris, maxOutputTokens: 1024)
return try parseResponse(raw) return try parseResponse(raw)
} }
@@ -47,11 +92,14 @@ struct GeminiVLMClient {
taskGoal: String, taskGoal: String,
steps: [Step], steps: [Step],
windowTitle: String, windowTitle: String,
recentSummaries: [String] historyContext: String,
sessionContext: String,
lastOutputContext: String,
executionContext: String
) -> String { ) -> String {
let stepsText: String let stepsText: String
if steps.isEmpty { if steps.isEmpty {
stepsText = " (no steps defined)" stepsText = " (no steps)"
} else { } else {
stepsText = steps.map { s in stepsText = steps.map { s in
let marker: String let marker: String
@@ -67,14 +115,12 @@ struct GeminiVLMClient {
}.joined(separator: "\n") }.joined(separator: "\n")
} }
let historyText: String let sessionSection = sessionContext.isEmpty
if recentSummaries.isEmpty { ? "(no open sessions — suggest start_new if user is actively working on something)"
historyText = " (no previous frames)" : sessionContext
} else {
historyText = recentSummaries.enumerated() let prevSection = lastOutputContext.isEmpty ? "" : "\n\(lastOutputContext)"
.map { i, s in " [frame \(i + 1)] \(s)" } let execSection = executionContext.isEmpty ? "" : "\n\(executionContext)"
.joined(separator: "\n")
}
return """ return """
You are a proactive focus assistant analyzing a TIME SEQUENCE of screenshots. You are a proactive focus assistant analyzing a TIME SEQUENCE of screenshots.
@@ -82,142 +128,460 @@ struct GeminiVLMClient {
## How to read the screenshots ## How to read the screenshots
You receive screenshots in chronological order (oldest first, newest last). You receive screenshots in chronological order (oldest first, newest last).
Each frame is ~5 seconds apart. This means: You receive ~4 frames spanning ~20 seconds (one frame every 5 seconds). This means:
- 2 unchanged frames = ~10 seconds idle significant. - 2 unchanged frames = 10+ seconds idle. That's significant.
- 3 unchanged frames = ~15 seconds idle user is stuck or distracted. - 3+ unchanged frames = 15-20 seconds idle. The user is stuck or distracted.
- If ALL frames are identical, the user has been idle for 15+ seconds — flag it. - If ALL frames are identical, the user has been idle for 20 seconds — definitely flag it.
- If the user wrote code/text and then 2+ frames show no changes, they are STUCK NOW.
Do NOT wait for many frames to flag problems. React fast.
Your PRIMARY signal is the DIFFERENCES between consecutive frames. Your PRIMARY signal is the DIFFERENCES between consecutive frames.
Where the screen CHANGED = where attention is. Static areas = ignore. Where the screen CHANGED = where the user's ATTENTION is.
Where the screen is STATIC = background noise. Ignore it.
Diff signals and what they mean: Diff signals and what they mean:
- New text appearing / cursor advancing → user is actively typing (this IS their task) - New text appearing / cursor advancing → user is actively typing (THIS is their task)
- Window or tab switch → context change, could be reference or distraction - Window or tab switch → context change, could be reference or distraction
- Same content, no pixel changes → stalled, idle, or reading - Same content, no pixel changes → stalled, idle, or reading
- Repeated switching between same 2-3 apps → repetitive loop (manual data transfer) - Repeated switching between same 2-3 apps → repetitive loop (manual data transfer)
- Scroll position change → reading or browsing
- Error message that APPEARED between frames → user just triggered it, relevant - Error message that APPEARED between frames → user just triggered it, relevant
- Error message already in ALL frames → stale, ignore - Error message that was ALREADY THERE in all frames → stale, ignore it
CRITICAL — looking at something ≠ working on something: CRITICAL — looking at something ≠ working on something:
- User switches to browser/another app and just LOOKS → distraction or quick reference. - User switches to browser/another app and just LOOKS → distraction or quick reference.
- User switches and starts TYPING/EDITING → might be a new task. - User switches and starts TYPING/EDITING → might be a new task.
- If the user has an active session and switches away WITHOUT typing in the new app, - If the user has an active session and switches away WITHOUT typing in the new app,
they are DISTRACTED from their session, not starting a new task. they are DISTRACTED from their session, not starting a new task.
- Only infer a new task when there is clear evidence of productive work (typing, editing,
cursor movement between frames) in the new context.
- A single app switch is NEVER enough to infer a new task. Wait for active work. - A single app switch is NEVER enough to infer a new task. Wait for active work.
## Current task context ## Current state: \(taskTitle.isEmpty ? "MONITORING MODE (no active focus session)" : "FOCUS SESSION on \"\(taskTitle)\"")
Task: \(taskTitle.isEmpty ? "(no active task)" : taskTitle) \(taskTitle.isEmpty ? "" : "Task: \(taskTitle)\nGoal: \(taskGoal.isEmpty ? taskTitle : taskGoal)\nSteps:\n\(stepsText)")
Goal: \(taskGoal.isEmpty ? taskTitle : taskGoal)
Steps:
\(stepsText)
Window title (OS): \(windowTitle.isEmpty ? "(unknown)" : windowTitle) Window title (OS): \(windowTitle.isEmpty ? "(unknown)" : windowTitle)
## Recent screen history (for temporal context) \(taskTitle.isEmpty ? """
\(historyText) You are in MONITORING MODE — no focus session is active.
Rules for monitoring mode:
- NEVER send notification type "nudge". Nudges are only for active focus sessions.
- Instead, suggest session_action: start_new or resume if the user is actively working.
- If the user is browsing, idle, or doing casual stuff, set notification type "none".
- Do NOT nag the user about incomplete tasks. Only suggest sessions when you see ACTIVE WORK.
""" : """
IMPORTANT — Do NOT force-fit everything to the current task:
- The current task is what the user WAS working on. They may have MOVED ON.
- If the screen shows UNRELATED work (different app, different topic, different file),
the user is NOT on this task. Set on_task: false.
- If the user has been doing unrelated work for multiple frames, suggest
session_action: complete (they're done) or session_action: start_new (new work).
- Do NOT interpret browsing YouTube, checking email, or working on a different project
as "related to" the current task just because a session is active.
- Your job is to OBSERVE what the user is doing, not to anchor to the current task.
""")
## What to output ## Open sessions and tasks from backend (use EXACT IDs below)
\(sessionSection)
Analyze the screenshots and return JSON with EXACTLY this structure (no extra fields, no markdown): Session & task matching rules:
- A session matches ONLY if the user is actively EDITING the session's last_file.
Being in the same app (e.g. VS Code) is NOT enough — must be typing/editing the specific file.
- If the session's file IS being actively edited → session_action: resume with EXACT session_id.
- If the user moved to a different open session's file → session_action: switch with EXACT session_id.
- If the session's task appears DONE → session_action: complete with EXACT session_id.
Completion = the task's GOAL is visibly achieved on screen, NOT "all steps checked off."
Steps are AI-generated approximations. A commit, successful build, or "fixed" message
means the task is done regardless of how many steps are still marked pending.
- If the user is working on something matching an UNSTARTED TASK (listed above with task_id),
output session_action: start_new with task_id set to that task's ID. This starts a session
linked to the existing task instead of creating a new one.
- If the user is working on something that matches NO existing session or task,
output session_action: start_new with session_id: null AND task_id: null.
- NEVER invent IDs. Use only the IDs listed above or null.
\(prevSection)\(execSection)
## Recent screen history (temporal context)
\(historyContext)
## What to analyze
1. INFERRED TASK: What is the user working on right now? Base this on where pixels changed.
2. CHECKPOINT: What specific progress did the user make across these frames?
3. STEP COMPLETION — be AGGRESSIVE about marking steps done:
- Steps are AI-generated APPROXIMATIONS, not a rigid checklist.
- The user might solve the entire task in fewer steps than listed.
- If the screen shows the task's GOAL is achieved (e.g., code compiles, commit succeeded,
file is saved, output looks correct), mark ALL remaining steps as done via steps_completed.
- Look for completion signals: "committed", "fixed", "done", "success", green checkmarks,
successful build output, "pushed", merged PR, closed issue.
- A single action (like an AI agent fixing a bug) can complete multiple steps at once.
- When in doubt about whether a step is done, CHECK THE SCREEN — if the end result is
visible and correct, the intermediate steps don't matter.
4. TASK/SESSION COMPLETION — detect when the WHOLE task is done:
- If you can see the task's goal is achieved on screen, output session_action: complete.
- Do NOT wait for all steps to be individually checked off. Steps are suggestions.
- Completion signals: successful commit/push, "fixed", moving on to unrelated work,
closing the relevant files, terminal showing success.
- If an AI agent (like Claude Code) just solved the problem and committed, the task is DONE.
5. FRICTION DETECTION: Is the user stuck in any of these patterns?
- REPETITIVE_LOOP: Switching between same 2-3 windows (copying data manually)
- STALLED: No meaningful pixel changes across 2+ frames, OR user wrote then deleted/undid
(write-then-delete = struggle, NOT "refining")
- TEDIOUS_MANUAL: Doing automatable work (filling forms, transcribing, copying by hand)
- CONTEXT_OVERHEAD: Many windows open, visibly searching across them
- TASK_RESUMPTION: User just returned to a task from earlier
IMPORTANT signals to catch IMMEDIATELY:
- User wrote code/text then deleted it → STUCK. Flag stalled.
- User switching between source doc and target file repeatedly → TEDIOUS_MANUAL.
Flag it on the SECOND switch. Don't wait.
6. NOTIFICATION: Decide what to show the user:
- "none" — user is productively working
- "nudge" — user is idle/distracted, set message to a short reminder
- "friction" — user is stuck and an AI agent can take a concrete action
ONLY use "friction" when proposed_actions has a specific, executable task with a target
7. PROPOSED ACTION (only when notification.type = "friction"):
The "details" field is the executor agent's full instruction:
Bad: "Extract data from the document"
Good: "User is copying table values from a PDF into markdown. Extract the table from the PDF
(visible in screenshots), format as a markdown table matching the style already in the
file, and append to report.md. The user has been writing plain text tables match that style."
Respond ONLY with JSON (no markdown fences):
{ {
"on_task": true, "on_task": true,
"current_step_id": "step UUID or null", "current_step_id": "step UUID or null",
"inferred_task": "what the user is actually working on based on screen diffs", "inferred_task": "what the user is actually working on, based on screen diffs",
"checkpoint_note_update": "what specifically changed across these frames", "checkpoint_note_update": "what changed across these frames specifically",
"steps_completed": [], "steps_completed": [],
"friction": { "friction": {
"type": "repetitive_loop | stalled | tedious_manual | context_overhead | task_resumption | none", "type": "repetitive_loop | stalled | tedious_manual | context_overhead | task_resumption | none",
"confidence": 0.0, "confidence": 0.0,
"description": "what the user is struggling with", "description": "what the user is struggling with, based on diff evidence",
"proposed_actions": [ "proposed_actions": [
{ {
"label": "specific verb phrase: what to do", "label": "specific verb phrase the user can approve with one tap",
"action_type": "auto_extract | brain_dump | other", "details": "Natural language spec: (1) what to do, (2) where to look in screenshots, (3) EXACT format matching what the user already wrote, (4) target file. Concrete enough for an agent to execute without asking questions."
"details": "natural language spec for what action to take"
} }
], ],
"source_context": "filename or app name, or null", "source_context": "filename if visible, or app name",
"target_context": "filename or app name, or null" "target_context": "filename if visible, or app name"
}, },
"session_action": { "session_action": {
"type": "none", "type": "resume | switch | complete | start_new | none",
"session_id": null, "session_id": "uuid of matching session, or null for start_new/none",
"reason": "" "task_id": "uuid of matching unstarted task (for start_new only), or null",
"reason": "why this session action is suggested"
},
"notification": {
"type": "none | nudge | friction",
"message": "nudge text if type=nudge, null otherwise"
}, },
"intent": "skimming | engaged | unclear | null", "intent": "skimming | engaged | unclear | null",
"distraction_type": "app_switch | browsing | idle | null", "distraction_type": "app_switch | browsing | idle | null",
"app_name": "primary visible application", "app_name": "primary visible application",
"confidence": 0.8, "confidence": 0.8,
"gentle_nudge": "short nudge message if distracted but no friction action applies, otherwise null",
"vlm_summary": "1-sentence description of what CHANGED across the frames (not what is static)" "vlm_summary": "1-sentence description of what CHANGED across the frames (not what is static)"
} }
FRICTION DETECTION rules:
- REPETITIVE_LOOP: Switching between same 2-3 windows (copying data manually)
- STALLED: No meaningful pixel changes across 2+ frames; or user wrote then deleted
- TEDIOUS_MANUAL: Doing automatable work (filling forms, transcribing, copying by hand)
- CONTEXT_OVERHEAD: Many windows open, visibly searching across them
- TASK_RESUMPTION: User just returned to a task they were working on earlier
If friction confidence < 0.5, set type to "none".
Only set gentle_nudge when user is off-task AND no actionable friction applies.
""" """
} }
// MARK: - Action Executor // MARK: - Agentic Executor (ported from executor.py)
/// Execute a user-approved proactive action and return a plain-text result. /// Execute a user-approved proactive action using a multi-step agent loop
/// with Gemini function calling. Returns the final output/summary.
func executeAction( func executeAction(
label: String, label: String,
actionType: String,
details: String, details: String,
screenshot: Data? frames: [Data],
onToolCall: (@Sendable (String, String) -> Void)? = nil
) async throws -> String { ) async throws -> String {
let taskInstruction: String let systemPrompt = """
switch actionType { You are a productivity assistant executing a task the user approved.
case "auto_extract": Action: "\(label)"
taskInstruction = "Extract the relevant data from the screenshot and present it concisely as plain text." Spec: \(details.isEmpty ? "(none provided)" : details)
case "brain_dump":
taskInstruction = "Format this as a short brain-dump note the user should add to their task list." INSTRUCTIONS:
default: 1. For BINARY files (PDFs, images, etc.): use your VISION. Read content directly
taskInstruction = "Provide 23 concrete next steps the user can take right now." from the screenshots — this is your most reliable source for non-text files.
} 2. For TEXT files (code, markdown, configs, txt): use read_file to get exact content.
let prompt = """ 3. If you need a file but only know the filename (not the path), FIND IT FIRST:
You are a productivity assistant. The user approved this action: "\(label)" - run_command("mdfind -name 'filename'") — fast macOS Spotlight search
Details: \(details.isEmpty ? "(none)" : details) - run_command("lsof -c AppName | grep filename") — find what file an app has open
\(taskInstruction) Do NOT guess paths. Search first.
Be specific and brief (35 sentences max). No markdown, no preamble, plain text only. 4. Choose the right output method:
- write_file(): For existing text files where the modification is clear and the
file location is known — code files (cpp, py, js, etc.), markdown, configs.
Read the file first, then write the updated version.
NEVER create new files. NEVER write to files you haven't read first.
- output(): For everything else — extracted data from PDFs/images, content for
binary targets (docx, ppt, forms, websites), or when you're unsure where to
put the result. User will review and copy/paste.
5. Use run_command to compile, test, or search for files. Never to write files.
6. Do NOT hallucinate content. If you can't read something, say so.
7. Call done() with a summary when the action is complete.
""" """
let frames: [Data] = screenshot.map { [$0] } ?? []
return try await callGemini(prompt: prompt, frames: frames)
}
// MARK: - Gemini REST API Call // Build initial user message with screenshots
var userParts: [[String: Any]] = []
private func callGemini(prompt: String, frames: [Data]) async throws -> String {
let urlStr = "\(Self.apiBase)/\(Self.model):generateContent?key=\(apiKey)"
guard let url = URL(string: urlStr) else { throw URLError(.badURL) }
// Build content parts: label + image for each frame, then instruction
var parts: [[String: Any]] = []
let total = frames.count
for (i, frame) in frames.enumerated() { for (i, frame) in frames.enumerated() {
parts.append(["text": "[Screenshot \(i + 1)/\(total)\((total - i) * 5)s ago]"]) userParts.append(["text": "[Screenshot \(i + 1)/\(frames.count)]"])
parts.append([ userParts.append([
"inlineData": [ "inlineData": [
"mimeType": "image/jpeg", "mimeType": "image/jpeg",
"data": frame.base64EncodedString() "data": frame.base64EncodedString()
] ]
]) ])
} }
parts.append(["text": "Analyze this screenshot sequence now. Reply with ONLY valid JSON — no markdown, no code fences."]) userParts.append(["text": "Execute the action now. Use the tools available to you."])
var messages: [[String: Any]] = [
["role": "user", "parts": userParts]
]
let maxSteps = 10
var filesRead: Set<String> = []
var outputResult: String?
var doneSummary: String?
for step in 0..<maxSteps {
print("[Executor] Step \(step + 1)/\(maxSteps)")
let responseData = try await callGeminiWithTools(
systemPrompt: systemPrompt,
messages: messages,
maxOutputTokens: 4096
)
// Parse response
guard let json = try JSONSerialization.jsonObject(with: responseData) as? [String: Any],
let candidates = json["candidates"] as? [[String: Any]],
let first = candidates.first,
let content = first["content"] as? [String: Any],
let parts = content["parts"] as? [[String: Any]]
else {
let raw = String(data: responseData, encoding: .utf8) ?? ""
print("[Executor] Unexpected response: \(raw.prefix(300))")
break
}
// Check for text response (model is done)
if let textPart = parts.first(where: { $0["text"] != nil }),
let text = textPart["text"] as? String,
parts.allSatisfy({ $0["functionCall"] == nil }) {
// Model responded with text, no function calls it's done
return doneSummary ?? outputResult ?? text
}
// Append model's response to conversation
messages.append(["role": "model", "parts": parts])
// Process function calls
var functionResponses: [[String: Any]] = []
for part in parts {
guard let funcCall = part["functionCall"] as? [String: Any],
let name = funcCall["name"] as? String,
let args = funcCall["args"] as? [String: Any]
else { continue }
let result: String
print("[Executor] → \(name)(\(args))")
onToolCall?(name, "\(args)")
switch name {
case "read_file":
let path = args["path"] as? String ?? ""
result = executeReadFile(path: path)
filesRead.insert(path)
case "write_file":
let path = args["path"] as? String ?? ""
let fileContent = args["content"] as? String ?? ""
if !filesRead.contains(path) {
result = "ERROR: You must read_file('\(path)') before writing to it."
} else {
result = executeWriteFile(path: path, content: fileContent)
}
case "run_command":
let command = args["command"] as? String ?? args["shell_command"] as? String ?? ""
result = await executeRunCommand(command: command)
case "output":
let title = args["title"] as? String ?? label
let content = args["content"] as? String ?? ""
outputResult = content.isEmpty ? title : content
result = "Displayed to user: \(title)"
case "done":
let summary = args["summary"] as? String ?? "Action completed."
doneSummary = summary
// Return immediately agent is done
return outputResult ?? summary
default:
result = "Unknown tool: \(name)"
}
print("[Executor] ← \(result.prefix(200))")
functionResponses.append([
"functionResponse": [
"name": name,
"response": ["content": result]
]
])
}
// Feed tool results back to the model
if !functionResponses.isEmpty {
messages.append(["role": "user", "parts": functionResponses])
}
}
// Hit step limit
return outputResult ?? doneSummary ?? "Action completed (reached step limit)."
}
// MARK: - Tool Implementations
nonisolated private func executeReadFile(path: String) -> String {
let expandedPath = NSString(string: path).expandingTildeInPath
guard FileManager.default.fileExists(atPath: expandedPath) else {
return "ERROR: File not found: \(path)"
}
guard FileManager.default.isReadableFile(atPath: expandedPath) else {
return "ERROR: Cannot read file: \(path)"
}
do {
let content = try String(contentsOfFile: expandedPath, encoding: .utf8)
// Truncate very large files
if content.count > 50_000 {
return String(content.prefix(50_000)) + "\n\n[TRUNCATED — file is \(content.count) characters]"
}
return content
} catch {
return "ERROR: \(error.localizedDescription)"
}
}
nonisolated private func executeWriteFile(path: String, content: String) -> String {
let expandedPath = NSString(string: path).expandingTildeInPath
guard FileManager.default.fileExists(atPath: expandedPath) else {
return "ERROR: File does not exist: \(path). Cannot create new files."
}
do {
try content.write(toFile: expandedPath, atomically: true, encoding: .utf8)
return "OK — wrote \(content.count) characters to \(path)"
} catch {
return "ERROR: \(error.localizedDescription)"
}
}
nonisolated private func executeRunCommand(command: String) async -> String {
// Safety: block obviously destructive commands
let dangerous = ["rm -rf /", "rm -rf ~", "mkfs", "dd if=", "> /dev/"]
for d in dangerous where command.contains(d) {
return "ERROR: Blocked dangerous command."
}
return await withCheckedContinuation { continuation in
let process = Process()
process.executableURL = URL(fileURLWithPath: "/bin/zsh")
process.arguments = ["-c", command]
let stdout = Pipe()
let stderr = Pipe()
process.standardOutput = stdout
process.standardError = stderr
var hasResumed = false
// Timeout after 30 seconds
let timeoutWork = DispatchWorkItem {
guard !hasResumed else { return }
hasResumed = true
process.terminate()
continuation.resume(returning: "ERROR: Command timed out after 30s.")
}
DispatchQueue.global().asyncAfter(deadline: .now() + 30, execute: timeoutWork)
process.terminationHandler = { _ in
timeoutWork.cancel()
guard !hasResumed else { return }
hasResumed = true
let outData = stdout.fileHandleForReading.readDataToEndOfFile()
let errData = stderr.fileHandleForReading.readDataToEndOfFile()
let out = String(data: outData, encoding: .utf8) ?? ""
let err = String(data: errData, encoding: .utf8) ?? ""
var result = ""
if !out.isEmpty { result += out }
if !err.isEmpty { result += (result.isEmpty ? "" : "\n") + "STDERR: " + err }
if result.isEmpty { result = "(no output)" }
if result.count > 10_000 {
result = String(result.prefix(10_000)) + "\n\n[TRUNCATED]"
}
if process.terminationStatus != 0 {
result += "\n(exit code: \(process.terminationStatus))"
}
continuation.resume(returning: result)
}
do {
try process.run()
} catch {
timeoutWork.cancel()
guard !hasResumed else { return }
hasResumed = true
continuation.resume(returning: "ERROR: \(error.localizedDescription)")
}
}
}
// MARK: - Gemini API: Analysis (no tools)
private func callGemini(
prompt: String,
frames: [Data],
fileUris: [String?] = [],
finalInstruction: String = "Analyze this screenshot sequence now. Reply with ONLY valid JSON — no markdown, no code fences.",
maxOutputTokens: Int = 1024
) async throws -> String {
let urlStr = "\(Self.apiBase)/\(Self.analysisModel):generateContent?key=\(apiKey)"
guard let url = URL(string: urlStr) else { throw URLError(.badURL) }
var parts: [[String: Any]] = []
let total = frames.count
var inlineCount = 0
var uriCount = 0
for (i, frame) in frames.enumerated() {
let age = (total - i) * 5 // approximate seconds ago
parts.append(["text": "[Screenshot \(i + 1)/\(total)\(age)s ago]"])
let uri = i < fileUris.count ? fileUris[i] : nil
if let uri {
// Use Files API URI no re-upload of this frame's bytes
parts.append(["fileData": ["mimeType": "image/jpeg", "fileUri": uri]])
uriCount += 1
} else {
// Fallback to inline base64 (newest frame, or upload not yet complete)
parts.append(["inlineData": ["mimeType": "image/jpeg", "data": frame.base64EncodedString()]])
inlineCount += 1
}
}
print("[GeminiVLM] Sending \(uriCount) URI frames + \(inlineCount) inline frames")
parts.append(["text": finalInstruction])
let body: [String: Any] = [ let body: [String: Any] = [
"systemInstruction": ["parts": [["text": prompt]]], "systemInstruction": ["parts": [["text": prompt]]],
"contents": [["parts": parts]], "contents": [["parts": parts]],
"generationConfig": [ "generationConfig": [
"temperature": 0.2, "temperature": 0.2,
"maxOutputTokens": 1024 "maxOutputTokens": maxOutputTokens
] ]
] ]
@@ -247,15 +611,134 @@ struct GeminiVLMClient {
throw URLError(.cannotParseResponse) throw URLError(.cannotParseResponse)
} }
if let reason = first["finishReason"] as? String, reason != "STOP" {
print("[GeminiVLM] finishReason=\(reason) — response may be truncated")
}
print("[GeminiVLM] Response (\(text.count) chars): \(text.prefix(200))") print("[GeminiVLM] Response (\(text.count) chars): \(text.prefix(200))")
return text return text
} }
// MARK: - Gemini API: Executor (with function calling)
/// Gemini function calling tool declarations for the agentic executor.
private var executorTools: [[String: Any]] {
[[
"functionDeclarations": [
[
"name": "read_file",
"description": "Read a plain text file. Returns the file contents as a string.",
"parameters": [
"type": "object",
"properties": [
"path": ["type": "string", "description": "Absolute file path to read"]
],
"required": ["path"]
]
],
[
"name": "write_file",
"description": "Write content to an existing plain text file. You MUST call read_file on this path first. Cannot create new files.",
"parameters": [
"type": "object",
"properties": [
"path": ["type": "string", "description": "Absolute file path (must already exist)"],
"content": ["type": "string", "description": "Full file content to write"]
],
"required": ["path", "content"]
]
],
[
"name": "run_command",
"description": "Execute a shell command and return stdout/stderr. Use for compilation, testing, file discovery (mdfind, lsof). Do not use to write files.",
"parameters": [
"type": "object",
"properties": [
"command": ["type": "string", "description": "Shell command to execute"]
],
"required": ["command"]
]
],
[
"name": "output",
"description": "Display content to the user in a sticky note card. Use for extracted data from PDFs/images, content for binary targets, or when unsure where to put results.",
"parameters": [
"type": "object",
"properties": [
"title": ["type": "string", "description": "Card title"],
"content": ["type": "string", "description": "Content to display"]
],
"required": ["title", "content"]
]
],
[
"name": "done",
"description": "Signal that the action is complete. Always call this when finished.",
"parameters": [
"type": "object",
"properties": [
"summary": ["type": "string", "description": "Brief summary of what was done"]
],
"required": ["summary"]
]
]
]
]]
}
/// Call Gemini with function calling enabled. Returns raw response Data.
private func callGeminiWithTools(
systemPrompt: String,
messages: [[String: Any]],
maxOutputTokens: Int = 4096
) async throws -> Data {
let urlStr = "\(Self.apiBase)/\(Self.executorModel):generateContent?key=\(apiKey)"
guard let url = URL(string: urlStr) else { throw URLError(.badURL) }
let body: [String: Any] = [
"systemInstruction": ["parts": [["text": systemPrompt]]],
"tools": executorTools,
"contents": messages,
"generationConfig": [
"temperature": 0.2,
"maxOutputTokens": maxOutputTokens
]
]
var request = URLRequest(url: url)
request.httpMethod = "POST"
request.setValue("application/json", forHTTPHeaderField: "Content-Type")
request.httpBody = try JSONSerialization.data(withJSONObject: body)
request.timeoutInterval = 120
let (data, response) = try await URLSession.shared.data(for: request)
if let http = response as? HTTPURLResponse, http.statusCode == 429 {
// Rate limited wait and retry once
print("[Executor] Rate limited (429) — retrying in 5s")
try await Task.sleep(for: .seconds(5))
let (retryData, retryResponse) = try await URLSession.shared.data(for: request)
if let retryHttp = retryResponse as? HTTPURLResponse, retryHttp.statusCode != 200 {
let msg = String(data: retryData, encoding: .utf8) ?? "HTTP \(retryHttp.statusCode)"
print("[Executor] Retry failed: \(msg)")
throw URLError(.badServerResponse)
}
return retryData
}
if let http = response as? HTTPURLResponse, http.statusCode != 200 {
let msg = String(data: data, encoding: .utf8) ?? "HTTP \(http.statusCode)"
print("[Executor] API error \(http.statusCode): \(msg)")
throw URLError(.badServerResponse)
}
return data
}
// MARK: - Response Parsing // MARK: - Response Parsing
private func parseResponse(_ text: String) throws -> DistractionAnalysisResponse { private func parseResponse(_ text: String) throws -> DistractionAnalysisResponse {
var cleaned = text.trimmingCharacters(in: .whitespacesAndNewlines) var cleaned = text.trimmingCharacters(in: .whitespacesAndNewlines)
// Strip ```json ... ``` or ``` ... ``` fences
if cleaned.hasPrefix("```") { if cleaned.hasPrefix("```") {
let lines = cleaned.components(separatedBy: "\n") let lines = cleaned.components(separatedBy: "\n")
cleaned = lines.dropFirst().joined(separator: "\n") cleaned = lines.dropFirst().joined(separator: "\n")
@@ -264,15 +747,57 @@ struct GeminiVLMClient {
} }
cleaned = cleaned.trimmingCharacters(in: .whitespacesAndNewlines) cleaned = cleaned.trimmingCharacters(in: .whitespacesAndNewlines)
} }
// Find JSON object boundaries robustly
guard let start = cleaned.firstIndex(of: "{"), guard let start = cleaned.firstIndex(of: "{") else {
let end = cleaned.lastIndex(of: "}") else {
throw URLError(.cannotParseResponse) throw URLError(.cannotParseResponse)
} }
guard let end = cleaned.lastIndex(of: "}") else {
print("[GeminiVLM] Truncated JSON — attempting partial field extraction")
return partialFallback(from: String(cleaned[start...]))
}
let jsonStr = String(cleaned[start...end]) let jsonStr = String(cleaned[start...end])
guard let jsonData = jsonStr.data(using: .utf8) else { guard let jsonData = jsonStr.data(using: .utf8) else {
throw URLError(.cannotParseResponse) throw URLError(.cannotParseResponse)
} }
do {
return try JSONDecoder().decode(DistractionAnalysisResponse.self, from: jsonData) return try JSONDecoder().decode(DistractionAnalysisResponse.self, from: jsonData)
} catch {
print("[GeminiVLM] Decode error: \(error) — attempting partial field extraction")
return partialFallback(from: jsonStr)
}
}
private func partialFallback(from jsonText: String) -> DistractionAnalysisResponse {
let onTask = !jsonText.contains("\"on_task\": false") && !jsonText.contains("\"on_task\":false")
let inferredTask = regexExtract(#""inferred_task"\s*:\s*"((?:[^"\\]|\\.)*)""#, from: jsonText)
let vlmSummary = regexExtract(#""vlm_summary"\s*:\s*"((?:[^"\\]|\\.)*)""#, from: jsonText)
let appName = regexExtract(#""app_name"\s*:\s*"((?:[^"\\]|\\.)*)""#, from: jsonText)
print("[GeminiVLM] Partial recovery on_task=\(onTask) task=\(inferredTask ?? "nil")")
return DistractionAnalysisResponse(
onTask: onTask,
currentStepId: nil,
inferredTask: inferredTask,
checkpointNoteUpdate: nil,
stepsCompleted: [],
friction: nil,
sessionAction: nil,
notification: nil,
intent: nil,
distractionType: nil,
appName: appName,
confidence: 0.0,
vlmSummary: vlmSummary
)
}
private func regexExtract(_ pattern: String, from text: String) -> String? {
guard let regex = try? NSRegularExpression(pattern: pattern),
let match = regex.firstMatch(in: text, range: NSRange(text.startIndex..., in: text)),
let range = Range(match.range(at: 1), in: text)
else { return nil }
return String(text[range])
} }
} }

189
HistoryBuffer.swift Normal file
View File

@@ -0,0 +1,189 @@
// HistoryBuffer.swift Two-tier rolling history for VLM temporal context
// Ports Python argus buffer.py: image tier (recent frames) + text tier (older summaries).
// The VLM sees recent images directly AND gets text context for events 30-60s ago.
import Foundation
/// A single buffered screenshot frame with its VLM summary.
struct BufferEntry: Sendable {
let imageData: Data // JPEG bytes
var summary: String // VLM-generated summary (populated after analysis)
let timestamp: Date
var fileUri: String? // Gemini Files API URI (set async after upload; nil = use inline)
}
/// A text-only summary from an older analysis (images already evicted).
struct TextEntry: Sendable {
let summary: String
let timestamp: Date
}
/// Two-tier rolling buffer that provides temporal context to the VLM.
///
/// - **Image tier:** Last N frames (JPEG + summary + timestamp). Sent as images.
/// - **Text tier:** Older summaries that rolled off the image buffer. Sent as text.
/// - **Last output:** Previous VLM JSON result for self-refinement.
/// - **Last execution:** Executor action summary to prevent re-flagging.
///
/// Only accessed from `SessionManager` on the main actor no concurrent access.
@MainActor
final class HistoryBuffer {
private let imageMaxLen: Int
private let textMaxLen: Int
/// Recent frames sent as images to the VLM.
private(set) var images: [BufferEntry] = []
/// Older summaries sent as text context.
private(set) var textHistory: [TextEntry] = []
/// Full VLM JSON output from last analysis (for self-refinement).
private(set) var lastOutput: String = ""
/// Summary of last executor action (prevents re-flagging same friction).
private(set) var lastExecution: String = ""
/// Counter for how many VLM calls since execution was set (clear after 3).
private var executionAge: Int = 0
init(imageMaxLen: Int = 4, textMaxLen: Int = 12) {
self.imageMaxLen = imageMaxLen
self.textMaxLen = textMaxLen
}
// MARK: - Push / Update
/// Add a new frame to the image buffer. If the buffer is full, the oldest
/// frame's summary is promoted to the text tier before eviction.
func push(imageData: Data, summary: String = "") {
let entry = BufferEntry(imageData: imageData, summary: summary, timestamp: Date())
if images.count >= imageMaxLen {
// Promote oldest image's summary to text tier (if non-empty)
let evicted = images.removeFirst()
if !evicted.summary.isEmpty {
textHistory.append(TextEntry(summary: evicted.summary, timestamp: evicted.timestamp))
if textHistory.count > textMaxLen {
textHistory.removeFirst()
}
}
}
images.append(entry)
}
/// Update the summary on the most recent image entry (called after VLM returns).
func updateLastSummary(_ summary: String) {
guard !images.isEmpty else { return }
images[images.count - 1].summary = summary
}
/// Store the Gemini Files API URI for the frame with the given timestamp.
/// Called asynchronously after upload completes safe because pushes happen at 5s intervals.
func updateFileUri(_ uri: String, forTimestamp ts: Date) {
guard let idx = images.firstIndex(where: { abs($0.timestamp.timeIntervalSince(ts)) < 1.0 }) else { return }
images[idx].fileUri = uri
}
/// Store the full VLM JSON output for self-refinement on the next call.
func setLastOutput(_ json: String) {
lastOutput = json
}
/// Store executor action summary. Cleared automatically after 3 VLM iterations.
func setLastExecution(_ summary: String) {
lastExecution = summary
executionAge = 0
}
/// Tick execution age call after each VLM analysis. Clears after 3.
func tickExecutionAge() {
if !lastExecution.isEmpty {
executionAge += 1
if executionAge >= 3 {
lastExecution = ""
executionAge = 0
}
}
}
/// Get all buffered JPEG frames (for sending to VLM as images).
var frameData: [Data] {
images.map(\.imageData)
}
/// File URIs parallel to frameData nil means fall back to inline base64 for that frame.
var fileUris: [String?] {
images.map(\.fileUri)
}
/// Get recent summaries as strings (for recentSummaries parameter).
var recentSummaries: [String] {
images.compactMap { $0.summary.isEmpty ? nil : $0.summary }
}
/// Clear all state (e.g., on session end).
func clear() {
images.removeAll()
textHistory.removeAll()
lastOutput = ""
lastExecution = ""
executionAge = 0
}
// MARK: - Prompt Formatting
/// Build the temporal context section for the VLM prompt.
/// Returns a formatted string with older text context + image labels.
func formatForPrompt() -> String {
var lines: [String] = []
// Older text-only context (no images just summaries)
if !textHistory.isEmpty {
lines.append("Older context (text only, no images):")
for entry in textHistory {
let age = Int(Date().timeIntervalSince(entry.timestamp))
lines.append(" - [\(age)s ago] \(entry.summary)")
}
lines.append("")
}
// Recent image labels (these accompany the actual images sent to the VLM)
if !images.isEmpty {
let total = images.count
lines.append("Recent screenshots (\(total) frames, newest last):")
for (i, entry) in images.enumerated() {
let age = Int(Date().timeIntervalSince(entry.timestamp))
let isCurrent = (i == images.count - 1)
let label = " - Screenshot \(i + 1)/\(total): [\(isCurrent ? "now" : "\(age)s ago")]"
if !entry.summary.isEmpty {
lines.append("\(label) \(entry.summary)")
} else {
lines.append(label)
}
}
}
return lines.isEmpty ? "(no previous context)" : lines.joined(separator: "\n")
}
/// Format the last VLM output for self-refinement injection into the prompt.
func formatLastOutput() -> String {
guard !lastOutput.isEmpty else { return "" }
return """
Your previous analysis (refine or correct this based on new evidence):
\(lastOutput)
If your previous analysis was wrong or incomplete, correct it now. If it was accurate, build on it.
"""
}
/// Format execution context for injection into the prompt.
func formatLastExecution() -> String {
guard !lastExecution.isEmpty else { return "" }
return """
IMPORTANT — An AI agent just completed an action for the user:
\(lastExecution)
This task is DONE. Do not re-flag the same friction. Look for what the user does NEXT.
"""
}
}

View File

@@ -8,6 +8,7 @@
/* Begin PBXBuildFile section */ /* Begin PBXBuildFile section */
FF341F642F7932FA00B5716A /* GeminiVLMClient.swift in Sources */ = {isa = PBXBuildFile; fileRef = FF341F632F7932FA00B5716A /* GeminiVLMClient.swift */; }; FF341F642F7932FA00B5716A /* GeminiVLMClient.swift in Sources */ = {isa = PBXBuildFile; fileRef = FF341F632F7932FA00B5716A /* GeminiVLMClient.swift */; };
FF341F662F793A0000B5716A /* HistoryBuffer.swift in Sources */ = {isa = PBXBuildFile; fileRef = FF341F652F793A0000B5716A /* HistoryBuffer.swift */; };
FF935B1E2F78A83100ED3330 /* SpeakerKit in Frameworks */ = {isa = PBXBuildFile; productRef = FF935B1D2F78A83100ED3330 /* SpeakerKit */; }; FF935B1E2F78A83100ED3330 /* SpeakerKit in Frameworks */ = {isa = PBXBuildFile; productRef = FF935B1D2F78A83100ED3330 /* SpeakerKit */; };
FF935B202F78A83100ED3330 /* TTSKit in Frameworks */ = {isa = PBXBuildFile; productRef = FF935B1F2F78A83100ED3330 /* TTSKit */; }; FF935B202F78A83100ED3330 /* TTSKit in Frameworks */ = {isa = PBXBuildFile; productRef = FF935B1F2F78A83100ED3330 /* TTSKit */; };
FF935B222F78A83100ED3330 /* WhisperKit in Frameworks */ = {isa = PBXBuildFile; productRef = FF935B212F78A83100ED3330 /* WhisperKit */; }; FF935B222F78A83100ED3330 /* WhisperKit in Frameworks */ = {isa = PBXBuildFile; productRef = FF935B212F78A83100ED3330 /* WhisperKit */; };
@@ -18,6 +19,7 @@
/* Begin PBXFileReference section */ /* Begin PBXFileReference section */
FF3296C22F785B3300C734EB /* LockInBro.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = LockInBro.app; sourceTree = BUILT_PRODUCTS_DIR; }; FF3296C22F785B3300C734EB /* LockInBro.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = LockInBro.app; sourceTree = BUILT_PRODUCTS_DIR; };
FF341F632F7932FA00B5716A /* GeminiVLMClient.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GeminiVLMClient.swift; sourceTree = "<group>"; }; FF341F632F7932FA00B5716A /* GeminiVLMClient.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GeminiVLMClient.swift; sourceTree = "<group>"; };
FF341F652F793A0000B5716A /* HistoryBuffer.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HistoryBuffer.swift; sourceTree = "<group>"; };
FF935B232F78D0AA00ED3330 /* FloatingPanel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FloatingPanel.swift; sourceTree = "<group>"; }; FF935B232F78D0AA00ED3330 /* FloatingPanel.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FloatingPanel.swift; sourceTree = "<group>"; };
FF935B252F78D0BF00ED3330 /* FloatingHUDView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FloatingHUDView.swift; sourceTree = "<group>"; }; FF935B252F78D0BF00ED3330 /* FloatingHUDView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FloatingHUDView.swift; sourceTree = "<group>"; };
/* End PBXFileReference section */ /* End PBXFileReference section */
@@ -52,6 +54,7 @@
FF935B232F78D0AA00ED3330 /* FloatingPanel.swift */, FF935B232F78D0AA00ED3330 /* FloatingPanel.swift */,
FF935B252F78D0BF00ED3330 /* FloatingHUDView.swift */, FF935B252F78D0BF00ED3330 /* FloatingHUDView.swift */,
FF341F632F7932FA00B5716A /* GeminiVLMClient.swift */, FF341F632F7932FA00B5716A /* GeminiVLMClient.swift */,
FF341F652F793A0000B5716A /* HistoryBuffer.swift */,
); );
sourceTree = "<group>"; sourceTree = "<group>";
}; };
@@ -146,6 +149,7 @@
FF935B262F78D0BF00ED3330 /* FloatingHUDView.swift in Sources */, FF935B262F78D0BF00ED3330 /* FloatingHUDView.swift in Sources */,
FF341F642F7932FA00B5716A /* GeminiVLMClient.swift in Sources */, FF341F642F7932FA00B5716A /* GeminiVLMClient.swift in Sources */,
FF935B242F78D0AA00ED3330 /* FloatingPanel.swift in Sources */, FF935B242F78D0AA00ED3330 /* FloatingPanel.swift in Sources */,
FF341F662F793A0000B5716A /* HistoryBuffer.swift in Sources */,
); );
runOnlyForDeploymentPostprocessing = 0; runOnlyForDeploymentPostprocessing = 0;
}; };
@@ -277,12 +281,10 @@
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
CODE_SIGN_ENTITLEMENTS = LockInBro/LockInBro.entitlements; CODE_SIGN_ENTITLEMENTS = LockInBro/LockInBro.entitlements;
CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_IDENTITY = "Apple Development";
"CODE_SIGN_IDENTITY[sdk=macosx*]" = "Apple Development"; CODE_SIGN_STYLE = Automatic;
CODE_SIGN_STYLE = Manual;
COMBINE_HIDPI_IMAGES = YES; COMBINE_HIDPI_IMAGES = YES;
CURRENT_PROJECT_VERSION = 1; CURRENT_PROJECT_VERSION = 1;
DEVELOPMENT_TEAM = ""; DEVELOPMENT_TEAM = YK2DB9NT3S;
"DEVELOPMENT_TEAM[sdk=macosx*]" = YK2DB9NT3S;
ENABLE_APP_SANDBOX = NO; ENABLE_APP_SANDBOX = NO;
ENABLE_PREVIEWS = YES; ENABLE_PREVIEWS = YES;
ENABLE_USER_SELECTED_FILES = readonly; ENABLE_USER_SELECTED_FILES = readonly;
@@ -299,7 +301,6 @@
PRODUCT_BUNDLE_IDENTIFIER = com.adipu.LockInBro; PRODUCT_BUNDLE_IDENTIFIER = com.adipu.LockInBro;
PRODUCT_NAME = "$(TARGET_NAME)"; PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE_SPECIFIER = ""; PROVISIONING_PROFILE_SPECIFIER = "";
"PROVISIONING_PROFILE_SPECIFIER[sdk=macosx*]" = "Joy Zhuo";
REGISTER_APP_GROUPS = YES; REGISTER_APP_GROUPS = YES;
STRING_CATALOG_GENERATE_SYMBOLS = YES; STRING_CATALOG_GENERATE_SYMBOLS = YES;
SWIFT_APPROACHABLE_CONCURRENCY = YES; SWIFT_APPROACHABLE_CONCURRENCY = YES;
@@ -317,12 +318,10 @@
ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor;
CODE_SIGN_ENTITLEMENTS = LockInBro/LockInBro.entitlements; CODE_SIGN_ENTITLEMENTS = LockInBro/LockInBro.entitlements;
CODE_SIGN_IDENTITY = "Apple Development"; CODE_SIGN_IDENTITY = "Apple Development";
"CODE_SIGN_IDENTITY[sdk=macosx*]" = "Apple Development"; CODE_SIGN_STYLE = Automatic;
CODE_SIGN_STYLE = Manual;
COMBINE_HIDPI_IMAGES = YES; COMBINE_HIDPI_IMAGES = YES;
CURRENT_PROJECT_VERSION = 1; CURRENT_PROJECT_VERSION = 1;
DEVELOPMENT_TEAM = ""; DEVELOPMENT_TEAM = YK2DB9NT3S;
"DEVELOPMENT_TEAM[sdk=macosx*]" = YK2DB9NT3S;
ENABLE_APP_SANDBOX = NO; ENABLE_APP_SANDBOX = NO;
ENABLE_PREVIEWS = YES; ENABLE_PREVIEWS = YES;
ENABLE_USER_SELECTED_FILES = readonly; ENABLE_USER_SELECTED_FILES = readonly;
@@ -339,7 +338,6 @@
PRODUCT_BUNDLE_IDENTIFIER = com.adipu.LockInBro; PRODUCT_BUNDLE_IDENTIFIER = com.adipu.LockInBro;
PRODUCT_NAME = "$(TARGET_NAME)"; PRODUCT_NAME = "$(TARGET_NAME)";
PROVISIONING_PROFILE_SPECIFIER = ""; PROVISIONING_PROFILE_SPECIFIER = "";
"PROVISIONING_PROFILE_SPECIFIER[sdk=macosx*]" = "Joy Zhuo";
REGISTER_APP_GROUPS = YES; REGISTER_APP_GROUPS = YES;
STRING_CATALOG_GENERATE_SYMBOLS = YES; STRING_CATALOG_GENERATE_SYMBOLS = YES;
SWIFT_APPROACHABLE_CONCURRENCY = YES; SWIFT_APPROACHABLE_CONCURRENCY = YES;

View File

@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>SchemeUserState</key>
<dict>
<key>LockInBro.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>0</integer>
</dict>
</dict>
</dict>
</plist>

View File

@@ -0,0 +1,14 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>SchemeUserState</key>
<dict>
<key>LockInBro.xcscheme_^#shared#^_</key>
<dict>
<key>orderHint</key>
<integer>0</integer>
</dict>
</dict>
</dict>
</plist>

View File

@@ -265,6 +265,26 @@ final class APIClient {
// MARK: - Sessions // MARK: - Sessions
/// Returns all active + interrupted sessions (for VLM session context).
func getOpenSessions() async throws -> [OpenSession] {
do {
let data = try await req("/sessions/open")
return try decode([OpenSession].self, from: data)
} catch NetworkError.httpError(404, _) {
return []
}
}
/// Create a task detected by the VLM from screen analysis.
func createVLMTask(title: String) async throws -> AppTask {
let body = try JSONSerialization.data(withJSONObject: [
"title": title,
"source": "vlm_detected"
])
let data = try await req("/tasks", method: "POST", body: body)
return try decode(AppTask.self, from: data)
}
/// Returns the currently active session, or nil if none (404). /// Returns the currently active session, or nil if none (404).
func getActiveSession() async throws -> FocusSession? { func getActiveSession() async throws -> FocusSession? {
do { do {
@@ -318,6 +338,28 @@ final class APIClient {
_ = try await req("/sessions/\(sessionId)/checkpoint", method: "POST", body: body) _ = try await req("/sessions/\(sessionId)/checkpoint", method: "POST", body: body)
} }
// MARK: - Nudge (cross-device)
/// Send a focus-session nudge via the backend push pipeline to all signed-in devices.
func sendNudge(
sessionId: String,
title: String,
body: String,
nudgeNumber: Int,
lastStep: String?,
nextStep: String?
) async throws {
var dict: [String: Any] = [
"title": title,
"body": body,
"nudge_number": nudgeNumber,
]
if let ls = lastStep { dict["last_step"] = ls }
if let ns = nextStep { dict["next_step"] = ns }
let bodyData = try JSONSerialization.data(withJSONObject: dict)
_ = try await req("/sessions/\(sessionId)/nudge", method: "POST", body: bodyData)
}
// MARK: - App Activity // MARK: - App Activity
func appActivity( func appActivity(
@@ -352,14 +394,16 @@ final class APIClient {
if let stepId = result.currentStepId { payload["current_step_id"] = stepId } if let stepId = result.currentStepId { payload["current_step_id"] = stepId }
if let note = result.checkpointNoteUpdate { payload["checkpoint_note_update"] = note } if let note = result.checkpointNoteUpdate { payload["checkpoint_note_update"] = note }
if let app = result.appName { payload["app_name"] = app } if let app = result.appName { payload["app_name"] = app }
if let nudge = result.gentleNudge { payload["gentle_nudge"] = nudge } if let notif = result.notification {
payload["notification"] = ["type": notif.type, "message": notif.message as Any]
}
if let friction = result.friction { if let friction = result.friction {
payload["friction"] = [ payload["friction"] = [
"type": friction.type, "type": friction.type,
"confidence": friction.confidence, "confidence": friction.confidence,
"description": friction.description as Any, "description": friction.description as Any,
"proposed_actions": friction.proposedActions.map { "proposed_actions": friction.proposedActions.map {
["label": $0.label, "action_type": $0.actionType, "details": $0.details as Any] ["label": $0.label, "details": $0.details as Any]
}, },
] ]
} }

View File

@@ -83,7 +83,7 @@ struct FocusSessionView: View {
} }
// Latest nudge // Latest nudge
if let nudge = session.lastNudge { if let nudge = session.nudgeMessage {
NudgeCard(message: nudge) NudgeCard(message: nudge)
} }
@@ -401,7 +401,7 @@ private struct ProactiveCardView: View {
return description ?? "I noticed something that might be slowing you down." return description ?? "I noticed something that might be slowing you down."
case .appSwitchLoop(let apps, let count): case .appSwitchLoop(let apps, let count):
return "You've switched between \(apps.joined(separator: "")) \(count)× in a row — are you stuck?" return "You've switched between \(apps.joined(separator: "")) \(count)× in a row — are you stuck?"
case .sessionAction(_, _, let checkpoint, let reason, _): case .sessionAction(_, _, let checkpoint, let reason, _, _):
return checkpoint.isEmpty ? reason : "Left off: \(checkpoint)" return checkpoint.isEmpty ? reason : "Left off: \(checkpoint)"
} }
} }

View File

@@ -2,10 +2,6 @@
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0"> <plist version="1.0">
<dict> <dict>
<key>com.apple.developer.applesignin</key>
<array>
<string>Default</string>
</array>
<key>com.apple.security.app-sandbox</key> <key>com.apple.security.app-sandbox</key>
<false/> <false/>
</dict> </dict>

View File

@@ -18,6 +18,11 @@ struct MenuBarView: View {
Divider() Divider()
// Settings
settingsSection
Divider()
// Bottom // Bottom
HStack { HStack {
Text(auth.currentUser?.displayName ?? auth.currentUser?.email ?? "LockInBro") Text(auth.currentUser?.displayName ?? auth.currentUser?.email ?? "LockInBro")
@@ -120,6 +125,31 @@ struct MenuBarView: View {
} }
.padding(.vertical, 4) .padding(.vertical, 4)
} }
private var settingsSection: some View {
HStack(spacing: 8) {
Image(systemName: "bell.badge")
.foregroundStyle(.secondary)
.frame(width: 16)
Text("Nudge after")
.font(.caption)
.foregroundStyle(.secondary)
Spacer()
Picker("", selection: Binding(
get: { Int(session.distractionThresholdSeconds) },
set: { session.distractionThresholdSeconds = TimeInterval($0) }
)) {
Text("1 min").tag(60)
Text("2 min").tag(120)
Text("3 min").tag(180)
Text("5 min").tag(300)
}
.pickerStyle(.menu)
.frame(width: 80)
.font(.caption)
}
.padding(.horizontal, 12)
.padding(.vertical, 6)
}
} }
// MARK: - Menu Bar Button // MARK: - Menu Bar Button

View File

@@ -208,14 +208,8 @@ struct StepPlanResponse: Codable {
/// A single action the proactive agent can take on the user's behalf. /// A single action the proactive agent can take on the user's behalf.
struct ProposedAction: Codable { struct ProposedAction: Codable {
let label: String // e.g. "Extract all 14 events" let label: String // e.g. "Extract all 14 events into transcript.md"
let actionType: String // e.g. "auto_extract", "brain_dump" let details: String? // Executor instruction spec (not shown as UI text)
let details: String?
enum CodingKeys: String, CodingKey {
case label, details
case actionType = "action_type"
}
} }
/// Friction pattern detected by the upgraded Argus VLM prompt. /// Friction pattern detected by the upgraded Argus VLM prompt.
@@ -244,14 +238,25 @@ struct SessionAction: Codable {
/// resume | switch | complete | start_new | none /// resume | switch | complete | start_new | none
let type: String let type: String
let sessionId: String? let sessionId: String?
/// When start_new matches an existing task, the VLM sets this to the task's ID.
let taskId: String?
let reason: String? let reason: String?
enum CodingKeys: String, CodingKey { enum CodingKeys: String, CodingKey {
case type, reason case type, reason
case sessionId = "session_id" case sessionId = "session_id"
case taskId = "task_id"
} }
} }
/// VLM-decided notification intent replaces the old gentle_nudge field.
struct VLMNotification: Codable {
/// "none" | "nudge" | "friction"
let type: String
/// Populated when type == "nudge"; nil otherwise.
let message: String?
}
struct DistractionAnalysisResponse: Codable { struct DistractionAnalysisResponse: Codable {
let onTask: Bool let onTask: Bool
let currentStepId: String? let currentStepId: String?
@@ -260,12 +265,13 @@ struct DistractionAnalysisResponse: Codable {
let stepsCompleted: [String] let stepsCompleted: [String]
// Upgraded Argus prompt fields (nil when backend uses legacy prompt) // Upgraded Argus prompt fields (nil when backend uses legacy prompt)
let friction: FrictionInfo? let friction: FrictionInfo?
let sessionAction: SessionAction? // new argus: session lifecycle suggestions let sessionAction: SessionAction?
/// VLM explicitly decides what to show: none | nudge | friction
let notification: VLMNotification?
let intent: String? // skimming | engaged | unclear | null let intent: String? // skimming | engaged | unclear | null
let distractionType: String? let distractionType: String?
let appName: String? let appName: String?
let confidence: Double let confidence: Double
let gentleNudge: String?
let vlmSummary: String? let vlmSummary: String?
enum CodingKeys: String, CodingKey { enum CodingKeys: String, CodingKey {
@@ -274,12 +280,11 @@ struct DistractionAnalysisResponse: Codable {
case inferredTask = "inferred_task" case inferredTask = "inferred_task"
case checkpointNoteUpdate = "checkpoint_note_update" case checkpointNoteUpdate = "checkpoint_note_update"
case stepsCompleted = "steps_completed" case stepsCompleted = "steps_completed"
case friction, intent case friction, notification, intent
case sessionAction = "session_action" case sessionAction = "session_action"
case distractionType = "distraction_type" case distractionType = "distraction_type"
case appName = "app_name" case appName = "app_name"
case confidence case confidence
case gentleNudge = "gentle_nudge"
case vlmSummary = "vlm_summary" case vlmSummary = "vlm_summary"
} }
} }
@@ -347,11 +352,14 @@ struct ProactiveCard: Identifiable {
/// Heuristic app-switch loop detected by NSWorkspace observer (fallback when VLM hasn't returned friction yet). /// Heuristic app-switch loop detected by NSWorkspace observer (fallback when VLM hasn't returned friction yet).
case appSwitchLoop(apps: [String], switchCount: Int) case appSwitchLoop(apps: [String], switchCount: Int)
/// VLM suggests a session lifecycle action (new argus: resume, switch, complete, start_new). /// VLM suggests a session lifecycle action (new argus: resume, switch, complete, start_new).
case sessionAction(type: String, taskTitle: String, checkpoint: String, reason: String, sessionId: String?) /// taskId: if start_new matches an existing unstarted task, this is set so we link instead of creating.
case sessionAction(type: String, taskTitle: String, checkpoint: String, reason: String, sessionId: String?, taskId: String?)
} }
let id = UUID() let id = UUID()
let source: Source let source: Source
/// For start_new: an existing task from the database that matches the inferred work.
var matchedTask: AppTask?
/// Human-readable title for the card header. /// Human-readable title for the card header.
var title: String { var title: String {
@@ -366,7 +374,7 @@ struct ProactiveCard: Identifiable {
} }
case .appSwitchLoop: case .appSwitchLoop:
return "Repetitive Pattern Detected" return "Repetitive Pattern Detected"
case .sessionAction(let type, let taskTitle, _, _, _): case .sessionAction(let type, let taskTitle, _, _, _, _):
switch type { switch type {
case "resume": return "Resume: \(taskTitle)" case "resume": return "Resume: \(taskTitle)"
case "switch": return "Switch to: \(taskTitle)" case "switch": return "Switch to: \(taskTitle)"
@@ -390,7 +398,7 @@ struct ProactiveCard: Identifiable {
} }
case .appSwitchLoop: case .appSwitchLoop:
return "arrow.triangle.2.circlepath" return "arrow.triangle.2.circlepath"
case .sessionAction(let type, _, _, _, _): case .sessionAction(let type, _, _, _, _, _):
switch type { switch type {
case "resume": return "arrow.counterclockwise.circle" case "resume": return "arrow.counterclockwise.circle"
case "switch": return "arrow.left.arrow.right" case "switch": return "arrow.left.arrow.right"
@@ -402,6 +410,45 @@ struct ProactiveCard: Identifiable {
} }
} }
// MARK: - Open Sessions (GET /sessions/open for VLM session context)
struct OpenSessionTask: Codable {
let title: String
let goal: String?
}
struct OpenSessionCheckpoint: Codable {
let activeApp: String?
let activeFile: String?
let currentStepId: String?
let lastActionSummary: String?
enum CodingKeys: String, CodingKey {
case activeApp = "active_app"
case activeFile = "active_file"
case currentStepId = "current_step_id"
case lastActionSummary = "last_action_summary"
}
}
struct OpenSession: Identifiable, Codable {
let id: String
let taskId: String?
let task: OpenSessionTask?
let status: String // active | interrupted
let platform: String
let startedAt: String
let endedAt: String?
let checkpoint: OpenSessionCheckpoint?
enum CodingKeys: String, CodingKey {
case id, task, status, platform, checkpoint
case taskId = "task_id"
case startedAt = "started_at"
case endedAt = "ended_at"
}
}
// MARK: - API Error // MARK: - API Error
struct APIErrorResponse: Codable { struct APIErrorResponse: Codable {

File diff suppressed because it is too large Load Diff