This commit is contained in:
2026-03-29 06:57:34 -04:00
commit 37503231b3
31 changed files with 3444 additions and 0 deletions

0
app/services/__init__.py Normal file
View File

18
app/services/db.py Normal file
View File

@@ -0,0 +1,18 @@
import asyncpg
from app.config import settings
pool: asyncpg.Pool | None = None
async def get_pool() -> asyncpg.Pool:
global pool
if pool is None:
pool = await asyncpg.create_pool(settings.DATABASE_URL, min_size=2, max_size=10)
return pool
async def close_pool():
global pool
if pool:
await pool.close()
pool = None

View File

@@ -0,0 +1,49 @@
import asyncio
import httpx
from app.config import settings
HEX_API_BASE = "https://app.hex.tech/api/v1"
HEADERS = {
"Authorization": f"Bearer {settings.HEX_API_TOKEN}",
"Content-Type": "application/json",
}
NOTEBOOKS = {
"distraction_patterns": settings.HEX_NB_DISTRACTIONS,
"focus_trends": settings.HEX_NB_FOCUS_TRENDS,
"weekly_report": settings.HEX_NB_WEEKLY_REPORT,
}
async def run_notebook(notebook_key: str, user_id: str) -> dict:
project_id = NOTEBOOKS.get(notebook_key)
if not project_id:
raise ValueError(f"Unknown notebook: {notebook_key}")
async with httpx.AsyncClient(timeout=60) as http:
# Trigger run — POST /projects/{projectId}/runs
resp = await http.post(
f"{HEX_API_BASE}/projects/{project_id}/runs",
headers=HEADERS,
json={"inputParams": {"user_id": user_id}},
)
resp.raise_for_status()
run_id = resp.json()["runId"]
# Poll for completion — GET /projects/{projectId}/runs/{runId}
for _ in range(30):
status_resp = await http.get(
f"{HEX_API_BASE}/projects/{project_id}/runs/{run_id}",
headers=HEADERS,
)
status_resp.raise_for_status()
data = status_resp.json()
if data["status"] == "COMPLETED":
return {"status": "COMPLETED", "elapsed": data.get("elapsedTime")}
if data["status"] in ("ERRORED", "KILLED", "UNABLE_TO_ALLOCATE_KERNEL"):
raise RuntimeError(f"Hex run failed: {data['status']}")
await asyncio.sleep(2)
raise TimeoutError("Hex notebook run timed out")

351
app/services/llm.py Normal file
View File

@@ -0,0 +1,351 @@
import base64
import json
import logging
from app.config import settings
logger = logging.getLogger(__name__)
# ── Provider setup: prefer Anthropic, fall back to Gemini ──
_provider: str | None = None
if settings.ANTHROPIC_API_KEY:
import anthropic
_anthropic_client = anthropic.Anthropic(api_key=settings.ANTHROPIC_API_KEY)
_provider = "anthropic"
_model = "claude-sonnet-4-20250514"
logger.info("LLM provider: Anthropic (Claude)")
elif settings.GEMINI_API_KEY:
from google import genai
from google.genai import types as genai_types
_gemini_client = genai.Client(api_key=settings.GEMINI_API_KEY)
_provider = "gemini"
_model = "gemini-3.1-pro-preview"
logger.info("LLM provider: Google (Gemini)")
def _parse_json(text: str) -> dict | list:
import re
text = text.strip()
# Strip markdown code fences
if text.startswith("```"):
text = text.split("\n", 1)[1]
text = text.rsplit("```", 1)[0]
# Find the first { or [ and last } or ]
start = -1
for i, c in enumerate(text):
if c in "{[":
start = i
break
if start == -1:
raise ValueError(f"No JSON found in LLM response: {text[:200]}")
end = max(text.rfind("}"), text.rfind("]"))
if end == -1:
raise ValueError(f"No closing bracket in LLM response: {text[:200]}")
json_str = text[start:end + 1]
# Strip // comments (Gemini sometimes adds these)
json_str = re.sub(r'//[^\n]*', '', json_str)
# Strip trailing commas before } or ]
json_str = re.sub(r',\s*([}\]])', r'\1', json_str)
return json.loads(json_str)
def _check_provider():
if not _provider:
raise RuntimeError("No LLM API key configured. Set ANTHROPIC_API_KEY or GEMINI_API_KEY in .env")
async def _text_completion(system: str, user_content: str, max_tokens: int = 1024) -> str:
_check_provider()
if _provider == "anthropic":
response = _anthropic_client.messages.create(
model=_model,
max_tokens=max_tokens,
messages=[{"role": "user", "content": f"{system}\n\n{user_content}"}],
)
return response.content[0].text
else:
response = _gemini_client.models.generate_content(
model=_model,
config={"system_instruction": system},
contents=user_content,
)
return response.text
async def _vision_completion(system: str, image_bytes: bytes, user_text: str, max_tokens: int = 512) -> str:
_check_provider()
if _provider == "anthropic":
image_b64 = base64.b64encode(image_bytes).decode()
response = _anthropic_client.messages.create(
model=_model,
max_tokens=max_tokens,
messages=[{
"role": "user",
"content": [
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": image_b64}},
{"type": "text", "text": f"{system}\n\n{user_text}"},
],
}],
)
return response.content[0].text
else:
response = _gemini_client.models.generate_content(
model=_model,
config={"system_instruction": system},
contents=[
genai_types.Part.from_bytes(data=image_bytes, mime_type="image/jpeg"),
user_text,
],
)
return response.text
# ── Public API (unchanged signatures) ──
async def parse_brain_dump(raw_text: str, timezone: str) -> dict:
from datetime import datetime
system = f"""You are a task parser and ADHD-friendly planner.
Extract structured tasks from this brain dump, then break each task into
concrete, actionable steps someone with ADHD can start immediately.
Today's date: {datetime.now().strftime("%Y-%m-%d")}
User's timezone: {timezone}
Task extraction rules:
- Be generous with deadlines — infer from context.
- If no deadline is obvious, set priority to 0 (unset).
- Unrelated items stay as separate top-level tasks.
Step rules (applied to every task's subtasks array):
- Each step should be 5-15 minutes, specific enough to start without decision paralysis.
- First step should be the EASIEST to reduce activation energy.
- Steps explicitly mentioned in the brain dump have "suggested": false.
- Then ADD 1-3 additional steps the user likely needs but didn't mention, with "suggested": true.
Examples: "gather materials", "review before sending", "set a reminder", "test it works".
- Keep step titles short and action-oriented.
- Every task should have at least 2 steps total.
Respond ONLY with JSON, no other text.
Example:
{{
"parsed_tasks": [{{
"title": "concise task title",
"description": "any extra detail from the dump",
"deadline": "ISO 8601 or null",
"priority": "0-4 integer (0=unset, 1=low, 2=med, 3=high, 4=urgent)",
"estimated_minutes": "total for all steps or null",
"tags": ["work", "personal", "health", "errands", etc.],
"subtasks": [
{{"title": "step from the dump", "description": null, "deadline": null, "estimated_minutes": 10, "suggested": false}},
{{"title": "AI-suggested next step", "description": null, "deadline": null, "estimated_minutes": 5, "suggested": true}}
]
}}],
"unparseable_fragments": ["text that couldn't be parsed into tasks"]
}}"""
text = await _text_completion(system, f"Brain dump:\n{raw_text}", max_tokens=2048)
return _parse_json(text)
async def generate_step_plan(task_title: str, task_description: str | None, estimated_minutes: int | None) -> list:
est = f"{estimated_minutes} minutes" if estimated_minutes else "unknown"
system = f"""You are an ADHD-friendly task planner.
Break this task into concrete steps of 5-15 minutes each.
Each step should be specific enough that someone with ADHD
can start immediately without decision paralysis.
Rules:
- First step should be the EASIEST (reduce activation energy)
- Steps should be independently completable
- Include time estimates per step
- Total estimated time should roughly match the task estimate
- No step longer than 15 minutes
Respond ONLY with JSON array:
[{{
"sort_order": 1,
"title": "specific action description",
"description": "additional detail if needed",
"estimated_minutes": number
}}]"""
text = await _text_completion(system, f"Task: {task_title}\nDescription: {task_description or 'N/A'}\nEstimated total: {est}")
return _parse_json(text)
async def analyze_screenshot(
screenshot_bytes: bytes,
window_title: str,
task_context: dict,
recent_summaries: list[str] | None = None,
) -> dict:
"""Legacy server-side VLM analysis. Upgraded with friction detection prompt."""
steps_text = ""
for s in task_context.get("steps", []):
cp = f' checkpoint_note="{s["checkpoint_note"]}"' if s.get("checkpoint_note") else ""
steps_text += f' - [{s["status"]}] {s["sort_order"]}. {s["title"]} (id={s["id"]}){cp}\n'
history_text = ""
if recent_summaries:
for i, summary in enumerate(recent_summaries):
history_text += f" - [{(len(recent_summaries) - i) * 5}s ago] {summary}\n"
system = f"""You are a proactive focus assistant analyzing a user's screen.
The user's current task and step progress:
Task: {task_context.get("task_title", "")}
Goal: {task_context.get("task_goal", "")}
Steps:
{steps_text} Window title reported by OS: {window_title}
{"Recent screen history:" + chr(10) + history_text if history_text else ""}
Analyze the current screenshot. Determine:
1. TASK STATUS: Is the user working on their task? Which step? Any steps completed?
2. CHECKPOINT: What specific within-step progress have they made?
3. FRICTION DETECTION: Is the user stuck in any of these patterns?
- REPETITIVE_LOOP: Switching between same 2-3 windows (copying data manually)
- STALLED: Same screen region with minimal changes for extended time
- TEDIOUS_MANUAL: Doing automatable work (filling forms, organizing files, transcribing)
- CONTEXT_OVERHEAD: Many windows open, visibly searching across them
- TASK_RESUMPTION: User just returned to a task they were working on earlier
4. INTENT: If viewing informational content, is the user SKIMMING, ENGAGED, or UNCLEAR?
5. PROPOSED ACTION: If friction detected, suggest a specific action the AI could take.
Respond ONLY with JSON:
{{
"on_task": boolean,
"current_step_id": "step UUID or null",
"checkpoint_note_update": "within-step progress or null",
"steps_completed": ["UUIDs"],
"friction": {{
"type": "repetitive_loop | stalled | tedious_manual | context_overhead | task_resumption | none",
"confidence": 0.0-1.0,
"description": "what the user is struggling with or null",
"proposed_actions": [
{{"label": "action description", "action_type": "auto_extract | brain_dump", "details": "specifics"}}
],
"source_context": "what info to extract from or null",
"target_context": "where to put it or null"
}},
"intent": "skimming | engaged | unclear | null",
"distraction_type": "app_switch | browsing | idle | null",
"app_name": "primary visible application",
"confidence": 0.0-1.0,
"gentle_nudge": "nudge if distracted and no friction action applies, null otherwise",
"vlm_summary": "1-sentence factual description of screen"
}}"""
text = await _vision_completion(system, screenshot_bytes, "Analyze this screenshot.")
return _parse_json(text)
async def generate_resume_card(
task_title: str,
goal: str | None,
current_step_title: str | None,
checkpoint_note: str | None,
completed_count: int,
total_count: int,
next_step_title: str | None,
minutes_away: int,
attention_score: int | None,
) -> dict:
system = """Generate a brief, encouraging context-resume card for
someone with ADHD returning to their task.
Be warm, specific, and action-oriented. No shame. No generic platitudes.
Use the checkpoint_note to give hyper-specific context about where they left off.
Respond ONLY with JSON:
{
"welcome_back": "short friendly greeting (max 8 words)",
"you_were_doing": "1 sentence referencing checkpoint_note specifically",
"next_step": "concrete next action with time estimate",
"motivation": "1 sentence encouragement (ADHD-friendly, no shame)"
}"""
user_content = f"""Inputs:
- Task: {task_title}
- Overall goal: {goal or "N/A"}
- Current step: {current_step_title or "N/A"}
- Current step checkpoint_note: {checkpoint_note or "N/A"}
- Steps completed: {completed_count} of {total_count}
- Next step after current: {next_step_title or "N/A"}
- Time away: {minutes_away} minutes
- Attention score before leaving: {attention_score or "N/A"}"""
text = await _text_completion(system, user_content, max_tokens=256)
return _parse_json(text)
async def generate_app_activity_nudge(
app_name: str,
duration_seconds: int,
task_title: str,
current_step_title: str | None,
checkpoint_note: str | None,
) -> str:
minutes = duration_seconds // 60
duration_text = f"{minutes} minute{'s' if minutes != 1 else ''}" if minutes > 0 else f"{duration_seconds} seconds"
system = """Generate a single gentle, non-judgmental nudge for someone with ADHD
who drifted to a non-work app during a focus session.
Reference their specific progress to make returning easier.
No shame. Keep it under 30 words.
Respond with ONLY the nudge text, no JSON, no quotes."""
user_content = f"""Context:
- Distraction app: {app_name}
- Time spent: {duration_text}
- Current task: {task_title}
- Current step: {current_step_title or "N/A"}
- Progress so far: {checkpoint_note or "N/A"}"""
return (await _text_completion(system, user_content, max_tokens=100)).strip()
async def suggest_work_apps(task_title: str, task_description: str | None) -> dict:
system = """Given this task, suggest which Apple apps the user likely needs.
Return the most likely single app as the primary suggestion.
Respond ONLY with JSON:
{
"suggested_app_scheme": "URL scheme (e.g. mobilenotes://, x-apple-pages://, com.google.docs://)",
"suggested_app_name": "human-readable name (e.g. Notes, Pages, Google Docs)"
}"""
text = await _text_completion(system, f"Task: {task_title}\nDescription: {task_description or 'N/A'}", max_tokens=100)
return _parse_json(text)
async def prioritize_tasks(tasks_json: list, timezone: str) -> list:
from datetime import datetime
system = """You are an ADHD-friendly task prioritizer.
Consider: deadlines, estimated effort, task dependencies,
and the user's energy patterns.
Rules:
- Hard deadlines always take top priority
- Front-load quick wins (<15min) for momentum
- Group errands together
- Deprioritize tasks with no deadline and low urgency
Respond ONLY with JSON array:
[{
"task_id": "uuid",
"recommended_priority": 1-4,
"reason": "1-sentence explanation"
}]"""
user_content = f"""Input: {json.dumps(tasks_json)}
Current time: {datetime.now().isoformat()}
User's timezone: {timezone}"""
text = await _text_completion(system, user_content, max_tokens=512)
return _parse_json(text)

283
app/services/push.py Normal file
View File

@@ -0,0 +1,283 @@
"""APNs push notification service.
Uses HTTP/2 APNs provider API with .p8 auth key (token-based auth).
Falls back to logging if APNS_KEY_ID / APNS_TEAM_ID / APNS_P8_PATH are not configured.
Required .env vars:
APNS_KEY_ID — 10-char key ID from Apple Developer portal
APNS_TEAM_ID — 10-char team ID from Apple Developer portal
APNS_P8_PATH — absolute path to the AuthKey_XXXXXXXXXX.p8 file
APNS_SANDBOX — True for development/TestFlight, False (default) for production
"""
import base64
import json
import logging
import time
import httpx
from cryptography.hazmat.primitives.asymmetric.ec import ECDSA
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from app.config import settings
from app.services.db import get_pool
logger = logging.getLogger(__name__)
# Cache the provider JWT — valid for 60 min, refresh 5 min early
_apns_token: str | None = None
_apns_token_exp: float = 0.0
_private_key = None
def _b64(data: bytes) -> str:
return base64.urlsafe_b64encode(data).rstrip(b"=").decode()
def _apns_configured() -> bool:
return bool(settings.APNS_KEY_ID and settings.APNS_TEAM_ID and settings.APNS_P8_PATH)
def _make_apns_jwt() -> str:
global _apns_token, _apns_token_exp, _private_key
now = time.time()
if _apns_token and now < _apns_token_exp:
return _apns_token
if _private_key is None:
with open(settings.APNS_P8_PATH, "rb") as f:
_private_key = load_pem_private_key(f.read(), password=None)
header = _b64(json.dumps({"alg": "ES256", "kid": settings.APNS_KEY_ID}).encode())
payload = _b64(json.dumps({"iss": settings.APNS_TEAM_ID, "iat": int(now)}).encode())
msg = f"{header}.{payload}".encode()
sig = _b64(_private_key.sign(msg, ECDSA(hashes.SHA256())))
token = f"{header}.{payload}.{sig}"
_apns_token = token
_apns_token_exp = now + 3300 # 55-minute lifetime (APNs tokens last 60 min)
return token
async def _send_apns(device_token: str, aps_payload: dict, push_type: str = "alert") -> bool:
host = "api.sandbox.push.apple.com" if settings.APNS_SANDBOX else "api.push.apple.com"
url = f"https://{host}/3/device/{device_token}"
topic = settings.APPLE_BUNDLE_ID
if push_type == "liveactivity":
topic += ".push-type.liveactivity"
headers = {
"authorization": f"bearer {_make_apns_jwt()}",
"apns-topic": topic,
"apns-push-type": push_type,
"apns-priority": "10",
}
try:
async with httpx.AsyncClient(http2=True) as client:
resp = await client.post(url, json=aps_payload, headers=headers, timeout=10.0)
print(f"APNs response: {resp.status_code} http_version={resp.http_version} token=…{device_token[-8:]} body={resp.text}")
print(f"APNs request: url={url} payload={json.dumps(aps_payload)}")
if resp.status_code == 200:
return True
if resp.status_code == 410:
# Token is dead — device uninstalled or revoked push. Remove from DB.
logger.warning(f"APNs 410 Unregistered for token …{device_token[-8:]}, removing from DB")
await _remove_device_token(device_token)
return False
logger.error(f"APNs {resp.status_code} for token …{device_token[-8:]}: {resp.text}")
return False
except Exception as exc:
logger.error(f"APNs request failed: {exc}")
return False
async def _remove_device_token(device_token: str):
"""Remove a dead APNs token from all users."""
pool = await get_pool()
await pool.execute(
"""UPDATE users SET device_tokens = (
SELECT COALESCE(jsonb_agg(t), '[]'::jsonb)
FROM jsonb_array_elements(device_tokens) t
WHERE t->>'token' != $1
) WHERE device_tokens @> $2::jsonb""",
device_token,
json.dumps([{"token": device_token}]),
)
# ── Public API ────────────────────────────────────────────────────────────────
async def get_device_tokens(user_id: str, platform: str | None = None) -> list[dict]:
pool = await get_pool()
row = await pool.fetchrow(
"SELECT device_tokens FROM users WHERE id = $1::uuid", user_id
)
if not row or not row["device_tokens"]:
return []
tokens = (
json.loads(row["device_tokens"])
if isinstance(row["device_tokens"], str)
else row["device_tokens"]
)
if platform:
tokens = [t for t in tokens if t.get("platform", "").startswith(platform)]
return tokens
async def register_device_token(user_id: str, platform: str, token: str):
pool = await get_pool()
await pool.execute(
"""UPDATE users SET device_tokens = (
SELECT COALESCE(jsonb_agg(t), '[]'::jsonb)
FROM jsonb_array_elements(device_tokens) t
WHERE t->>'platform' != $2
) || $3::jsonb
WHERE id = $1::uuid""",
user_id,
platform,
json.dumps([{"platform": platform, "token": token}]),
)
async def send_push(user_id: str, platform: str, aps_payload: dict):
"""Send an APNs push to all registered tokens for a user/platform."""
tokens = await get_device_tokens(user_id, platform)
print(f"send_push → user={user_id} platform={platform} tokens={tokens} configured={_apns_configured()}")
if not tokens:
return
if not _apns_configured():
for t in tokens:
logger.info(
f"[APNs STUB] platform={t['platform']} token=…{t['token'][-8:]} payload={aps_payload}"
)
return
for t in tokens:
await _send_apns(t["token"], aps_payload)
async def send_task_added(user_id: str, task_title: str, step_count: int = 0):
"""Notify the user that a new task was added."""
subtitle = f"{step_count} subtask{'s' if step_count != 1 else ''}"
payload = {
"aps": {
"alert": {"title": task_title, "subtitle": subtitle},
"sound": "default",
}
}
for platform in ["iphone", "ipad"]:
await send_push(user_id, platform, payload)
async def send_activity_update(user_id: str, task_title: str, task_id=None, started_at: int | None = None):
"""Send ActivityKit push to update Live Activity on all devices with current step progress."""
tokens = await get_device_tokens(user_id, "liveactivity_update_")
if not tokens:
return
step_progress = await _get_step_progress(task_id)
now_ts = started_at or int(time.time())
content_state = _build_content_state(task_title, now_ts, step_progress)
if not _apns_configured():
for t in tokens:
logger.info(f"[ActivityKit STUB] token=…{t['token'][-8:]} state={content_state}")
return
payload = {"aps": {"timestamp": int(time.time()), "content-state": content_state, "event": "update"}}
for t in tokens:
await _send_apns(t["token"], payload, push_type="liveactivity")
async def send_activity_end(user_id: str, task_title: str = "Session ended", task_id=None):
"""Send ActivityKit push-to-end using per-activity update tokens."""
tokens = await get_device_tokens(user_id, "liveactivity_update_")
if not tokens:
return
now_ts = int(time.time())
step_progress = await _get_step_progress(task_id)
payload = {
"aps": {
"timestamp": now_ts,
"event": "end",
"content-state": _build_content_state(task_title, now_ts, step_progress),
"dismissal-date": now_ts,
}
}
if not _apns_configured():
for t in tokens:
logger.info(f"[ActivityKit END STUB] token=...{t['token'][-8:]}")
return
for t in tokens:
await _send_apns(t["token"], payload, push_type="liveactivity")
async def _get_step_progress(task_id) -> dict:
"""Fetch step progress for a task: completed count, total count, current step title."""
if not task_id:
return {"stepsCompleted": 0, "stepsTotal": 0, "currentStepTitle": None, "lastCompletedStepTitle": None}
pool = await get_pool()
rows = await pool.fetch(
"SELECT title, status FROM steps WHERE task_id = $1 ORDER BY sort_order", task_id
)
total = len(rows)
completed = sum(1 for r in rows if r["status"] == "done")
current = next((r["title"] for r in rows if r["status"] in ("in_progress", "pending")), None)
last_completed = next((r["title"] for r in reversed(rows) if r["status"] == "done"), None)
return {"stepsCompleted": completed, "stepsTotal": total, "currentStepTitle": current, "lastCompletedStepTitle": last_completed}
def _build_content_state(task_title: str, started_at: int, step_progress: dict) -> dict:
state = {
"taskTitle": task_title,
"startedAt": started_at,
"stepsCompleted": step_progress["stepsCompleted"],
"stepsTotal": step_progress["stepsTotal"],
}
if step_progress["currentStepTitle"]:
state["currentStepTitle"] = step_progress["currentStepTitle"]
if step_progress.get("lastCompletedStepTitle"):
state["lastCompletedStepTitle"] = step_progress["lastCompletedStepTitle"]
return state
async def send_activity_start(user_id: str, task_title: str, task_id=None):
"""Send ActivityKit push-to-start to all liveactivity tokens."""
tokens = await get_device_tokens(user_id, "liveactivity")
if not tokens:
return
now_ts = int(time.time())
step_progress = await _get_step_progress(task_id)
payload = {
"aps": {
"timestamp": now_ts,
"event": "start",
"content-state": _build_content_state(task_title, now_ts, step_progress),
"attributes-type": "FocusSessionAttributes",
"attributes": {
"sessionType": "Focus"
},
"alert": {
"title": "Focus Session Started",
"body": task_title
}
}
}
if not _apns_configured():
for t in tokens:
logger.info(f"[ActivityKit START STUB] token=...{t['token'][-8:]} start payload={payload}")
return
for t in tokens:
await _send_apns(t["token"], payload, push_type="liveactivity")