diff --git a/agent_cli/cli.py b/agent_cli/cli.py index 3fd52ea49b..f50c0f7c35 100644 --- a/agent_cli/cli.py +++ b/agent_cli/cli.py @@ -1,6 +1,6 @@ from __future__ import annotations -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path import typer @@ -77,7 +77,7 @@ async def run( # Setup transcript path (always write transcript) if transcript is None: - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") transcript = Path(f"/tmp/adgn-agent-transcript-{timestamp}.jsonl") console.print(f"[dim]Writing transcript to: {transcript}[/dim]") diff --git a/cluster/k8s/inventree/token-provisioner/provision.py b/cluster/k8s/inventree/token-provisioner/provision.py index d614460bd6..61fe2c77ae 100644 --- a/cluster/k8s/inventree/token-provisioner/provision.py +++ b/cluster/k8s/inventree/token-provisioner/provision.py @@ -67,7 +67,7 @@ def needs_renewal(token: dict) -> bool: print("Token has no expiry — skipping renewal.") return False expiry = datetime.date.fromisoformat(expiry_str) - days_remaining = (expiry - datetime.date.today()).days + days_remaining = (expiry - datetime.datetime.now(tz=datetime.UTC).date()).days print(f"Token '{TOKEN_NAME}' expires {expiry_str} ({days_remaining} days remaining).") return days_remaining < RENEW_DAYS_BEFORE diff --git a/debug/spice_lag/analyze.py b/debug/spice_lag/analyze.py index a51f7e356b..45233eb356 100644 --- a/debug/spice_lag/analyze.py +++ b/debug/spice_lag/analyze.py @@ -89,7 +89,7 @@ def _cache_path(frame_file: Path) -> Path: def _parse_clock(clock_str: str) -> datetime.datetime: """Parse HH:MM:SS.mmm clock string.""" padded = clock_str + "000" if len(clock_str.rsplit(".", maxsplit=1)[-1]) == 3 else clock_str - return datetime.datetime.strptime(padded, "%H:%M:%S.%f") + return datetime.datetime.strptime(padded, "%H:%M:%S.%f").replace(tzinfo=datetime.UTC) def analyze_pixeldiff( diff --git a/debug/spice_lag/record.py b/debug/spice_lag/record.py index e002cfd774..7b251a3fa8 100644 --- a/debug/spice_lag/record.py +++ b/debug/spice_lag/record.py @@ -89,7 +89,7 @@ def stop_screencast(bus: Gio.DBusConnection) -> None: def _now_str() -> str: """Current wall-clock time as HH:MM:SS.mmm.""" - now = datetime.datetime.now() + now = datetime.datetime.now(tz=datetime.UTC) return now.strftime("%H:%M:%S.") + f"{now.microsecond // 1000:03d}" diff --git a/devinfra/claude/hook_daemon/session_start/handler.py b/devinfra/claude/hook_daemon/session_start/handler.py index e3388eef23..55d0a00fe7 100644 --- a/devinfra/claude/hook_daemon/session_start/handler.py +++ b/devinfra/claude/hook_daemon/session_start/handler.py @@ -11,7 +11,7 @@ import logging import logging.handlers from dataclasses import dataclass -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path import anyio @@ -529,7 +529,7 @@ async def run_session( bazelisk.install_wrapper(paths) # Generate timestamp - hook_timestamp = datetime.now() + hook_timestamp = datetime.now(tz=UTC) timestamp_file = paths.session_dir / "session-hook-last-run" timestamp_file.write_text(f"{hook_timestamp.isoformat()}\n") logger.info("Session start hook timestamp: %s", hook_timestamp.isoformat()) diff --git a/finance/reconcile/cli.py b/finance/reconcile/cli.py index 8aa8ba9cf0..31edc3c862 100644 --- a/finance/reconcile/cli.py +++ b/finance/reconcile/cli.py @@ -101,7 +101,7 @@ def add_external_to_gnucash(external_transaction, book, account_of_interest, ext currency = book.get_table().lookup("ISO4217", "CHF") tx.SetCurrency(currency) tx.SetDescription(external_transaction.description) - tx.SetNotes(f"Imported at {datetime.datetime.now()}") + tx.SetNotes(f"Imported at {datetime.datetime.now(tz=datetime.UTC)}") split_in_splitwise = gnucash.Split(book) split_in_splitwise.SetParent(tx) @@ -162,7 +162,11 @@ def main(_): # 'start_date' sets date at which mapping starts if "start_date" in reconcile_config: - start_date = datetime.datetime.strptime(reconcile_config["start_date"], "%Y-%m-%d").date() + start_date = ( + datetime.datetime.strptime(reconcile_config["start_date"], "%Y-%m-%d") + .replace(tzinfo=datetime.UTC) + .date() + ) external_transaction_by_external_id = { external_id: external_transaction diff --git a/finance/reconcile/splitwise_lib.py b/finance/reconcile/splitwise_lib.py index 91c6b4f906..df929c7373 100644 --- a/finance/reconcile/splitwise_lib.py +++ b/finance/reconcile/splitwise_lib.py @@ -110,7 +110,7 @@ def load_splitwise_expenses(splitwise_group_id) -> dict[str, external_system.Ext # We are not involved. continue - dt = datetime.datetime.strptime(expense.date, "%Y-%m-%dT%H:%M:%SZ").date() + dt = datetime.datetime.strptime(expense.date, "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=datetime.UTC).date() expenses[str(expense.id)] = external_system.ExternalExpense( id=str(expense.id), description=((expense.description or "") + (expense.notes or "")), diff --git a/gmail_archiver/planners/aliexpress.py b/gmail_archiver/planners/aliexpress.py index f2d7a210c9..5359cc4d53 100644 --- a/gmail_archiver/planners/aliexpress.py +++ b/gmail_archiver/planners/aliexpress.py @@ -1,225 +1,225 @@ -"""Planner for managing AliExpress order notification emails.""" - -import re -from collections import defaultdict -from datetime import UTC, datetime, timedelta -from enum import StrEnum - -from pydantic import BaseModel - -from gmail_archiver.gmail_api_models import SystemLabel -from gmail_archiver.inbox import GmailInbox -from gmail_archiver.models import Email -from gmail_archiver.plan import Plan - -LABEL_ALIEXPRESS_AUTO_CLEANED = "gmail-archiver/aliexpress-auto-cleaned" - - -class AliExpressStatus(StrEnum): - """Status of an AliExpress order based on email subject.""" - - CONFIRMED = "confirmed" - READY_TO_SHIP = "ready_to_ship" - SHIPPED = "shipped" - IN_TRANSIT = "in_transit" - CLEARED_CUSTOMS = "cleared_customs" - IN_COUNTRY = "in_country" - AT_DELIVERY_CENTER = "at_delivery_center" - OUT_FOR_DELIVERY = "out_for_delivery" - DELIVERED = "delivered" - DELIVERY_UPDATE = "delivery_update" - AWAITING_CONFIRMATION = "awaiting_confirmation" - FEEDBACK_REQUEST = "feedback_request" - CLOSED = "closed" - DELAYED_COUPON = "delayed_coupon" - - -STATES_WITH_DEADLINE = {AliExpressStatus.AWAITING_CONFIRMATION, AliExpressStatus.FEEDBACK_REQUEST} - - -class AliExpressEmail(BaseModel): - """Parsed AliExpress email data.""" - - order_id: str | None - status: AliExpressStatus - confirmation_deadline: datetime | None = None - - @classmethod - def display_columns(cls) -> list[tuple[str, str]]: - return [("order_id", "Order ID"), ("status", "Status")] - - @classmethod - def hide_subject(cls) -> bool: - # Order ID + Status fully capture AliExpress subject content - return True - - def format_column(self, key: str) -> str: - if key == "order_id": - return self.order_id or "" - if key == "status": - return self.status.value - return "" - - -# Regex patterns for subject parsing -ORDER_ID_PATTERN = re.compile(r"Order (\d+):") -STATUS_PATTERNS = [ - ("delivered", AliExpressStatus.DELIVERED), - ("out for delivery", AliExpressStatus.OUT_FOR_DELIVERY), - ("at delivery center", AliExpressStatus.AT_DELIVERY_CENTER), - ("in your country", AliExpressStatus.IN_COUNTRY), - ("cleared customs", AliExpressStatus.CLEARED_CUSTOMS), - ("package in transit", AliExpressStatus.IN_TRANSIT), - ("order shipped", AliExpressStatus.SHIPPED), - ("ready to ship", AliExpressStatus.READY_TO_SHIP), - ("order confirmed", AliExpressStatus.CONFIRMED), - ("delivery update", AliExpressStatus.DELIVERY_UPDATE), - ("awaiting confirmation", AliExpressStatus.AWAITING_CONFIRMATION), - ("how did it go", AliExpressStatus.FEEDBACK_REQUEST), - ("is closed", AliExpressStatus.CLOSED), - ("delayed delivery coupon", AliExpressStatus.DELAYED_COUPON), -] - - -class AliExpressParseError(Exception): - """Raised when an AliExpress email cannot be parsed.""" - - -def parse_aliexpress_subject(subject: str) -> AliExpressEmail: - """Parse order ID and status from AliExpress email subject. - - Raises AliExpressParseError if the status cannot be determined. - """ - # Extract order ID - order_match = ORDER_ID_PATTERN.search(subject) - order_id = order_match.group(1) if order_match else None - - # Determine status - subject_lower = subject.lower() - for pattern_text, status in STATUS_PATTERNS: - if pattern_text in subject_lower: - return AliExpressEmail(order_id=order_id, status=status) - - raise AliExpressParseError(f"Unrecognized AliExpress email subject: {subject}") - - -DISPUTE_WINDOW_DAYS = 15 # AliExpress allows 15 days after confirmation to dispute - -# Pattern for "Delivered DD/MM/YYYY" in email body -DELIVERED_DATE_PATTERN = re.compile(r"Delivered\s+(\d{1,2})/(\d{1,2})/(\d{4})") - - -def extract_delivered_date(body: str) -> datetime | None: - """Extract delivered date from AliExpress email body.""" - if match := DELIVERED_DATE_PATTERN.search(body): - day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3)) - try: - return datetime(year, month, day) - except ValueError: - return None - return None - - -def compute_deadline(message: Email) -> datetime | None: - """Compute deadline from delivered date + 15 days. - - Falls back to email received date + 30 days if delivered date not found. - TODO: Would be nice to match with actual delivery dates from other emails in the order. - """ - if delivered := extract_delivered_date(message.get_text()): - return delivered + timedelta(days=DISPUTE_WINDOW_DAYS) - - # Fallback: internal_date + 30 days - return message.internal_date.replace(tzinfo=None) + timedelta(days=30) - - -def parse_aliexpress(message: Email, *, should_compute_deadline: bool = False) -> AliExpressEmail: - """Parse AliExpress email, optionally computing deadline from received date.""" - parsed = parse_aliexpress_subject(message.subject) - - if should_compute_deadline and parsed.status in STATES_WITH_DEADLINE: - parsed.confirmation_deadline = compute_deadline(message) - - return parsed - - -class AliExpressPlanner: - """Manages AliExpress order notification emails. - - Logic: - - Groups emails by order ID - - For terminal states (delivered, closed, feedback_request): archive all - - For awaiting_confirmation: keep if deadline not passed, else archive - - For in-progress states: keep only latest per order, archive older - """ - - name = "AliExpress orders" - - def plan(self, inbox: GmailInbox) -> Plan: - plan = Plan(planner=self) - - # Fetch AliExpress transaction emails in inbox (specific sender, not all aliexpress) - messages = inbox.fetch_messages("from:transaction@notice.aliexpress.com label:INBOX") - - # Group by order ID - by_order: dict[str | None, list[Email]] = defaultdict(list) - parsed_cache: dict[str, AliExpressEmail] = {} - unparseable: list[Email] = [] - - for msg in messages: - try: - parsed = parse_aliexpress_subject(msg.subject) - parsed_cache[msg.id] = parsed - by_order[parsed.order_id].append(msg) - except AliExpressParseError: - unparseable.append(msg) - - now = datetime.now(UTC) - - # Report unparseable emails but don't take action - if unparseable: - plan.add_message(f"Skipping {len(unparseable)} emails with unrecognized status:") - for msg in unparseable: - plan.add_message(f" - {msg.subject[:60]}...") - - for order_emails in by_order.values(): - # Sort by date, newest first - sorted_emails = sorted(order_emails, key=lambda m: m.internal_date, reverse=True) - latest = sorted_emails[0] - latest_parsed = parsed_cache[latest.id] - - # For states with deadlines, check if deadline passed → archive all - if latest_parsed.status in STATES_WITH_DEADLINE: - full_parsed = parse_aliexpress(latest, should_compute_deadline=True) - - if full_parsed.confirmation_deadline and full_parsed.confirmation_deadline < now: - # Deadline passed - archive all emails for this order - for msg in sorted_emails: - plan.add_action( - message=msg, - labels_to_add=[LABEL_ALIEXPRESS_AUTO_CLEANED], - labels_to_remove=[SystemLabel.INBOX], - reason=f"Deadline passed: {full_parsed.confirmation_deadline}", - custom_data=parsed_cache[msg.id], - ) - continue - - # Default: keep only latest, archive older - plan.add_action( - message=latest, - labels_to_add=[], - labels_to_remove=[], - reason=f"Latest ({latest_parsed.status})", - custom_data=latest_parsed, - ) - - for msg in sorted_emails[1:]: - plan.add_action( - message=msg, - labels_to_add=[LABEL_ALIEXPRESS_AUTO_CLEANED], - labels_to_remove=[SystemLabel.INBOX], - reason="Older (keeping latest only)", - custom_data=parsed_cache[msg.id], - ) - - return plan +"""Planner for managing AliExpress order notification emails.""" + +import re +from collections import defaultdict +from datetime import UTC, datetime, timedelta +from enum import StrEnum + +from pydantic import BaseModel + +from gmail_archiver.gmail_api_models import SystemLabel +from gmail_archiver.inbox import GmailInbox +from gmail_archiver.models import Email +from gmail_archiver.plan import Plan + +LABEL_ALIEXPRESS_AUTO_CLEANED = "gmail-archiver/aliexpress-auto-cleaned" + + +class AliExpressStatus(StrEnum): + """Status of an AliExpress order based on email subject.""" + + CONFIRMED = "confirmed" + READY_TO_SHIP = "ready_to_ship" + SHIPPED = "shipped" + IN_TRANSIT = "in_transit" + CLEARED_CUSTOMS = "cleared_customs" + IN_COUNTRY = "in_country" + AT_DELIVERY_CENTER = "at_delivery_center" + OUT_FOR_DELIVERY = "out_for_delivery" + DELIVERED = "delivered" + DELIVERY_UPDATE = "delivery_update" + AWAITING_CONFIRMATION = "awaiting_confirmation" + FEEDBACK_REQUEST = "feedback_request" + CLOSED = "closed" + DELAYED_COUPON = "delayed_coupon" + + +STATES_WITH_DEADLINE = {AliExpressStatus.AWAITING_CONFIRMATION, AliExpressStatus.FEEDBACK_REQUEST} + + +class AliExpressEmail(BaseModel): + """Parsed AliExpress email data.""" + + order_id: str | None + status: AliExpressStatus + confirmation_deadline: datetime | None = None + + @classmethod + def display_columns(cls) -> list[tuple[str, str]]: + return [("order_id", "Order ID"), ("status", "Status")] + + @classmethod + def hide_subject(cls) -> bool: + # Order ID + Status fully capture AliExpress subject content + return True + + def format_column(self, key: str) -> str: + if key == "order_id": + return self.order_id or "" + if key == "status": + return self.status.value + return "" + + +# Regex patterns for subject parsing +ORDER_ID_PATTERN = re.compile(r"Order (\d+):") +STATUS_PATTERNS = [ + ("delivered", AliExpressStatus.DELIVERED), + ("out for delivery", AliExpressStatus.OUT_FOR_DELIVERY), + ("at delivery center", AliExpressStatus.AT_DELIVERY_CENTER), + ("in your country", AliExpressStatus.IN_COUNTRY), + ("cleared customs", AliExpressStatus.CLEARED_CUSTOMS), + ("package in transit", AliExpressStatus.IN_TRANSIT), + ("order shipped", AliExpressStatus.SHIPPED), + ("ready to ship", AliExpressStatus.READY_TO_SHIP), + ("order confirmed", AliExpressStatus.CONFIRMED), + ("delivery update", AliExpressStatus.DELIVERY_UPDATE), + ("awaiting confirmation", AliExpressStatus.AWAITING_CONFIRMATION), + ("how did it go", AliExpressStatus.FEEDBACK_REQUEST), + ("is closed", AliExpressStatus.CLOSED), + ("delayed delivery coupon", AliExpressStatus.DELAYED_COUPON), +] + + +class AliExpressParseError(Exception): + """Raised when an AliExpress email cannot be parsed.""" + + +def parse_aliexpress_subject(subject: str) -> AliExpressEmail: + """Parse order ID and status from AliExpress email subject. + + Raises AliExpressParseError if the status cannot be determined. + """ + # Extract order ID + order_match = ORDER_ID_PATTERN.search(subject) + order_id = order_match.group(1) if order_match else None + + # Determine status + subject_lower = subject.lower() + for pattern_text, status in STATUS_PATTERNS: + if pattern_text in subject_lower: + return AliExpressEmail(order_id=order_id, status=status) + + raise AliExpressParseError(f"Unrecognized AliExpress email subject: {subject}") + + +DISPUTE_WINDOW_DAYS = 15 # AliExpress allows 15 days after confirmation to dispute + +# Pattern for "Delivered DD/MM/YYYY" in email body +DELIVERED_DATE_PATTERN = re.compile(r"Delivered\s+(\d{1,2})/(\d{1,2})/(\d{4})") + + +def extract_delivered_date(body: str) -> datetime | None: + """Extract delivered date from AliExpress email body.""" + if match := DELIVERED_DATE_PATTERN.search(body): + day, month, year = int(match.group(1)), int(match.group(2)), int(match.group(3)) + try: + return datetime(year, month, day, tzinfo=UTC) + except ValueError: + return None + return None + + +def compute_deadline(message: Email) -> datetime | None: + """Compute deadline from delivered date + 15 days. + + Falls back to email received date + 30 days if delivered date not found. + TODO: Would be nice to match with actual delivery dates from other emails in the order. + """ + if delivered := extract_delivered_date(message.get_text()): + return delivered + timedelta(days=DISPUTE_WINDOW_DAYS) + + # Fallback: internal_date + 30 days (keep timezone-aware) + return message.internal_date + timedelta(days=30) + + +def parse_aliexpress(message: Email, should_compute_deadline: bool = False) -> AliExpressEmail: + """Parse AliExpress email, optionally computing deadline from received date.""" + parsed = parse_aliexpress_subject(message.subject) + + if should_compute_deadline and parsed.status in STATES_WITH_DEADLINE: + parsed.confirmation_deadline = compute_deadline(message) + + return parsed + + +class AliExpressPlanner: + """Manages AliExpress order notification emails. + + Logic: + - Groups emails by order ID + - For terminal states (delivered, closed, feedback_request): archive all + - For awaiting_confirmation: keep if deadline not passed, else archive + - For in-progress states: keep only latest per order, archive older + """ + + name = "AliExpress orders" + + def plan(self, inbox: GmailInbox) -> Plan: + plan = Plan(planner=self) + + # Fetch AliExpress transaction emails in inbox (specific sender, not all aliexpress) + messages = inbox.fetch_messages("from:transaction@notice.aliexpress.com label:INBOX") + + # Group by order ID + by_order: dict[str | None, list[Email]] = defaultdict(list) + parsed_cache: dict[str, AliExpressEmail] = {} + unparseable: list[Email] = [] + + for msg in messages: + try: + parsed = parse_aliexpress_subject(msg.subject) + parsed_cache[msg.id] = parsed + by_order[parsed.order_id].append(msg) + except AliExpressParseError: + unparseable.append(msg) + + now = datetime.now(UTC) + + # Report unparseable emails but don't take action + if unparseable: + plan.add_message(f"Skipping {len(unparseable)} emails with unrecognized status:") + for msg in unparseable: + plan.add_message(f" - {msg.subject[:60]}...") + + for order_emails in by_order.values(): + # Sort by date, newest first + sorted_emails = sorted(order_emails, key=lambda m: m.internal_date, reverse=True) + latest = sorted_emails[0] + latest_parsed = parsed_cache[latest.id] + + # For states with deadlines, check if deadline passed → archive all + if latest_parsed.status in STATES_WITH_DEADLINE: + full_parsed = parse_aliexpress(latest, should_compute_deadline=True) + + if full_parsed.confirmation_deadline and full_parsed.confirmation_deadline < now: + # Deadline passed - archive all emails for this order + for msg in sorted_emails: + plan.add_action( + message=msg, + labels_to_add=[LABEL_ALIEXPRESS_AUTO_CLEANED], + labels_to_remove=[SystemLabel.INBOX], + reason=f"Deadline passed: {full_parsed.confirmation_deadline}", + custom_data=parsed_cache[msg.id], + ) + continue + + # Default: keep only latest, archive older + plan.add_action( + message=latest, + labels_to_add=[], + labels_to_remove=[], + reason=f"Latest ({latest_parsed.status})", + custom_data=latest_parsed, + ) + + for msg in sorted_emails[1:]: + plan.add_action( + message=msg, + labels_to_add=[LABEL_ALIEXPRESS_AUTO_CLEANED], + labels_to_remove=[SystemLabel.INBOX], + reason="Older (keeping latest only)", + custom_data=parsed_cache[msg.id], + ) + + return plan diff --git a/gmail_archiver/planners/anthem_reimbursement.py b/gmail_archiver/planners/anthem_reimbursement.py index 0e9da469e5..fc98921257 100644 --- a/gmail_archiver/planners/anthem_reimbursement.py +++ b/gmail_archiver/planners/anthem_reimbursement.py @@ -39,7 +39,7 @@ def parse_anthem_reimbursement(email: Email) -> AnthemReimbursement: care_dt = None if m := re.search(r"Date of care:\s*([0-9]{1,2}/[0-9]{1,2}/[0-9]{2})", text): with contextlib.suppress(ValueError): - care_dt = datetime.strptime(m.group(1), "%m/%d/%y") + care_dt = datetime.strptime(m.group(1), "%m/%d/%y").replace(tzinfo=UTC) # Parse amounts (remove commas from numbers like "8,676.65") amount_you_pay_decimal = None diff --git a/gmail_archiver/planners/square.py b/gmail_archiver/planners/square.py index 5006f25f82..a941c91127 100644 --- a/gmail_archiver/planners/square.py +++ b/gmail_archiver/planners/square.py @@ -2,7 +2,7 @@ import contextlib import re -from datetime import datetime +from datetime import UTC, datetime from decimal import Decimal from pydantic import BaseModel @@ -63,7 +63,7 @@ def parse_square(email: Email) -> SquareReceipt: # Parse transaction datetime - expected format: "Mar 16 2023 at 10:26 AM" with contextlib.suppress(ValueError): - transaction_dt = datetime.strptime(m.group(5).strip(), "%b %d %Y at %I:%M %p") + transaction_dt = datetime.strptime(m.group(5).strip(), "%b %d %Y at %I:%M %p").replace(tzinfo=UTC) return SquareReceipt( merchant_name=merchant_name, diff --git a/gmail_archiver/planners/test_anthropic.py b/gmail_archiver/planners/test_anthropic.py index 28e20135e3..176a10f798 100644 --- a/gmail_archiver/planners/test_anthropic.py +++ b/gmail_archiver/planners/test_anthropic.py @@ -1,6 +1,6 @@ """Tests for Anthropic receipt parser.""" -from datetime import datetime +from datetime import UTC, datetime from decimal import Decimal import pytest @@ -43,7 +43,7 @@ def test_parse_full_receipt(self, make_email): assert isinstance(receipt, AnthropicReceipt) assert receipt.amount == Decimal("90.28") - assert receipt.charge_date == datetime(2025, 12, 15) + assert receipt.charge_date == datetime(2025, 12, 15, tzinfo=UTC) assert receipt.invoice_number == "OKBBHMMB-0145" assert receipt.receipt_number == "2554-1935-9612" @@ -54,7 +54,7 @@ def test_parse_minimal_receipt(self, make_email): assert isinstance(receipt, AnthropicReceipt) assert receipt.amount == Decimal("45.00") - assert receipt.charge_date == datetime(2025, 1, 1) + assert receipt.charge_date == datetime(2025, 1, 1, tzinfo=UTC) assert receipt.invoice_number is None assert receipt.receipt_number is None @@ -84,9 +84,9 @@ def test_parse_multiple_amounts_uses_first(self, make_email): @pytest.mark.parametrize( ("date_str", "expected_date"), [ - ("Paid January 15, 2025", datetime(2025, 1, 15)), - ("Paid February 1, 2025", datetime(2025, 2, 1)), - ("Paid December 31, 2024", datetime(2024, 12, 31)), + ("Paid January 15, 2025", datetime(2025, 1, 15, tzinfo=UTC)), + ("Paid February 1, 2025", datetime(2025, 2, 1, tzinfo=UTC)), + ("Paid December 31, 2024", datetime(2024, 12, 31, tzinfo=UTC)), ], ) def test_parse_date_formats(self, make_email, date_str, expected_date): diff --git a/gmail_archiver/planners/test_square.py b/gmail_archiver/planners/test_square.py index aa67fe18b5..30a6f9e330 100644 --- a/gmail_archiver/planners/test_square.py +++ b/gmail_archiver/planners/test_square.py @@ -1,6 +1,6 @@ """Tests for Square receipt parser.""" -from datetime import datetime +from datetime import UTC, datetime from decimal import Decimal import pytest_bazel @@ -34,8 +34,8 @@ def test_parse_full_receipt(self, make_email): assert result.amount == Decimal("27.78") assert result.card_type == "Visa" assert result.card_last4 == "6915" - assert result.transaction_datetime == datetime(2023, 3, 16, 10, 26) - assert result.email_date == datetime(2023, 3, 16, 17, 31, 44) + assert result.transaction_datetime == datetime(2023, 3, 16, 10, 26, tzinfo=UTC) + assert result.email_date == datetime(2023, 3, 16, 17, 31, 44, tzinfo=UTC) def test_parse_different_card_type(self, make_email): email = make_email( diff --git a/gmail_archiver/planners/usps.py b/gmail_archiver/planners/usps.py index a8565d014c..c5905d98b9 100644 --- a/gmail_archiver/planners/usps.py +++ b/gmail_archiver/planners/usps.py @@ -33,7 +33,7 @@ def parse_usps(email: Email) -> USPSDelivery | None: date_str = match.group(1) # e.g., "Thursday, December 18, 2025" try: - expected_delivery_date = datetime.strptime(date_str, "%A, %B %d, %Y") + expected_delivery_date = datetime.strptime(date_str, "%A, %B %d, %Y").replace(tzinfo=UTC) except ValueError: return None diff --git a/llm/hook_logging.py b/llm/hook_logging.py index 309dc42d54..de54b730d5 100644 --- a/llm/hook_logging.py +++ b/llm/hook_logging.py @@ -2,7 +2,7 @@ import json import logging -from datetime import datetime +from datetime import UTC, datetime from typing import Any, NewType from uuid import UUID @@ -24,7 +24,7 @@ def __init__(self, hook_name: str, session_id: SessionID, invocation_id: Invocat def format(self, record: logging.LogRecord) -> str: """Format log record as JSON.""" log_entry: dict[str, Any] = { - "timestamp": datetime.fromtimestamp(record.created).isoformat(), + "timestamp": datetime.fromtimestamp(record.created, tz=UTC).isoformat(), "level": record.levelname, "hook_name": self.hook_name, "session_id": str(self.session_id), diff --git a/llm/html/llm_html/test_token.py b/llm/html/llm_html/test_token.py index 4a348acaa4..dbaf986fbb 100644 --- a/llm/html/llm_html/test_token.py +++ b/llm/html/llm_html/test_token.py @@ -5,7 +5,7 @@ deterministic. """ -from datetime import datetime +from datetime import UTC, datetime import pytest import pytest_bazel @@ -44,7 +44,7 @@ def token_scheme(): @pytest.fixture def fresh_valid_token(token_scheme): """Generate a fresh *valid* token and return the scheme instance & token.""" - prefix, bits = token_scheme.make_token(datetime.now()) + prefix, bits = token_scheme.make_token(datetime.now(tz=UTC)) return prefix + "".join(bits) diff --git a/llm/mcp/habitify/cli.py b/llm/mcp/habitify/cli.py index 93c209913d..d58d26e329 100644 --- a/llm/mcp/habitify/cli.py +++ b/llm/mcp/habitify/cli.py @@ -8,7 +8,7 @@ import signal import subprocess import sys -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Literal @@ -121,7 +121,7 @@ def mcp( except KeyboardInterrupt: err_console.print("\n[yellow]Keyboard interrupt received.[/] Shutting down...") except Exception as e: - logger.error(f"Error running server: {e}", exc_info=True) + logger.exception("Error running server: %s", e) err_console.print(f"[bold red]Error:[/] {e!s}") raise typer.Exit(code=1) @@ -331,7 +331,7 @@ async def _log_async( elif date: formatted_date = datetime.fromisoformat(date).strftime("%B %d, %Y") else: - formatted_date = datetime.now().strftime("%B %d, %Y") + formatted_date = datetime.now(tz=UTC).strftime("%B %d, %Y") # Success message with color based on status status_color = get_status_color(status) diff --git a/llm/mcp/habitify/test_habitify_client.py b/llm/mcp/habitify/test_habitify_client.py index 0018bed624..91fda76d13 100644 --- a/llm/mcp/habitify/test_habitify_client.py +++ b/llm/mcp/habitify/test_habitify_client.py @@ -67,7 +67,7 @@ async def test_get_areas(client, mock_async_response, patch_client_method): async def test_get_journal(client, mock_async_response, patch_client_method): - today = datetime.date.today().isoformat() + today = datetime.datetime.now(tz=datetime.UTC).date().isoformat() mock_resp = mock_async_response("get_journal.yaml") with patch_client_method("get", return_value=mock_resp) as mock_get: @@ -84,7 +84,7 @@ async def test_get_journal(client, mock_async_response, patch_client_method): async def test_get_journal_filtered(client, mock_async_response, patch_client_method): - today = datetime.date.today().isoformat() + today = datetime.datetime.now(tz=datetime.UTC).date().isoformat() mock_resp = mock_async_response("get_journal_filtered.yaml") with patch_client_method("get", return_value=mock_resp) as mock_get: diff --git a/llm/mcp/habitify/utils/date_utils.py b/llm/mcp/habitify/utils/date_utils.py index e698fb42ca..8416d4bcf2 100644 --- a/llm/mcp/habitify/utils/date_utils.py +++ b/llm/mcp/habitify/utils/date_utils.py @@ -18,14 +18,14 @@ def parse_date(date_string: str | None = None) -> datetime.datetime: datetime object """ if not date_string: - return datetime.datetime.now() + return datetime.datetime.now(tz=datetime.UTC) try: return datetime.datetime.fromisoformat(date_string) except ValueError: try: # Try to parse as YYYY-MM-DD - return datetime.datetime.strptime(date_string, "%Y-%m-%d") + return datetime.datetime.strptime(date_string, "%Y-%m-%d").replace(tzinfo=datetime.UTC) except ValueError: raise ValueError(f"Invalid date format: {date_string}. Please use YYYY-MM-DD.") @@ -41,7 +41,7 @@ def _normalize_date(date: str | datetime.date | datetime.datetime | None = None) Normalized datetime object """ if date is None: - return datetime.datetime.now() + return datetime.datetime.now(tz=datetime.UTC) if isinstance(date, datetime.datetime): return date @@ -96,7 +96,7 @@ def format_date_for_api(date: str | datetime.date | datetime.datetime | None = N # Special case for YYYY-MM-DD string format - optimize to avoid parsing if isinstance(date, str) and len(date.split("-")) == 3: try: - datetime.datetime.strptime(date, "%Y-%m-%d") + datetime.datetime.strptime(date, "%Y-%m-%d").replace(tzinfo=datetime.UTC) return f"{date}T00:00:00+00:00" except ValueError: pass diff --git a/llm/ultra_long_cot/ultra_long_cot_o4.py b/llm/ultra_long_cot/ultra_long_cot_o4.py index b405d648c5..c597ea844a 100644 --- a/llm/ultra_long_cot/ultra_long_cot_o4.py +++ b/llm/ultra_long_cot/ultra_long_cot_o4.py @@ -8,7 +8,7 @@ import os import sys -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any, Literal @@ -58,7 +58,7 @@ def __init__(self): self.total_output_tokens = 0 self.total_reasoning_tokens = 0 self.total_cost = 0.0 - self.start_time = datetime.now() + self.start_time = datetime.now(tz=UTC) def add_usage(self, usage): """Track token usage from API response""" @@ -76,7 +76,7 @@ def add_usage(self, usage): def print_stats(self): """Print usage statistics""" - duration = (datetime.now() - self.start_time).seconds + duration = (datetime.now(tz=UTC) - self.start_time).seconds print("\nšŸ“Š Session Statistics:") print(f"Duration: {duration}s") print(f"Input tokens: {self.total_input_tokens:,}") @@ -139,7 +139,7 @@ def main(): logs_dir.mkdir(exist_ok=True) # Create session log file - session_id = datetime.now().strftime("%Y%m%d_%H%M%S") + session_id = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") log_file = logs_dir / f"session_{session_id}.jsonl" print(f"\nšŸ“ Logging to: {log_file}") diff --git a/props/cli/cmd_db.py b/props/cli/cmd_db.py index 97220158fd..10a86a32cd 100644 --- a/props/cli/cmd_db.py +++ b/props/cli/cmd_db.py @@ -4,7 +4,7 @@ import gzip import subprocess -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path import typer @@ -118,7 +118,7 @@ def cmd_db_backup(output: Path | None = BACKUP_OUTPUT_OPT, plain: bool = BACKUP_ if output is None: backup_dir = get_default_backup_dir() backup_dir.mkdir(parents=True, exist_ok=True) - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") suffix = ".sql" if plain else ".sql.gz" output = backup_dir / f"props_backup_{timestamp}{suffix}" @@ -206,7 +206,7 @@ def cmd_db_list_backups() -> None: for backup in backups: stat = backup.stat() size_mb = stat.st_size / (1024 * 1024) - created = datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S") + created = datetime.fromtimestamp(stat.st_mtime, tz=UTC).strftime("%Y-%m-%d %H:%M:%S") table.add_row(backup.name, f"{size_mb:.1f} MB", created) console.print(table) diff --git a/props/core/gepa/gepa_adapter.py b/props/core/gepa/gepa_adapter.py index e10fc582e7..b174b4cb36 100644 --- a/props/core/gepa/gepa_adapter.py +++ b/props/core/gepa/gepa_adapter.py @@ -39,7 +39,7 @@ import tempfile from collections.abc import Mapping, Sequence from dataclasses import dataclass -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any from uuid import UUID @@ -187,10 +187,10 @@ def __init__( # Set up proposal logging if reflection_model provided if reflection_model: - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") log_file = run_dir / f"gepa_proposals_{timestamp}.jsonl" self._setup_proposal_logging(log_file) - logger.info(f"GEPA proposal logging enabled: {log_file.absolute()}") + logger.info("GEPA proposal logging enabled: %s", log_file.absolute()) else: # No reflection model - GEPA will use default proposal mechanism self.propose_new_texts = None @@ -310,7 +310,7 @@ def propose_new_texts( f.write( json.dumps( { - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(tz=UTC).isoformat(), "call_id": call_count, "component": name, "type": "input", @@ -337,7 +337,7 @@ def propose_new_texts( f.write( json.dumps( { - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(tz=UTC).isoformat(), "call_id": call_count, "component": name, "type": "output", @@ -521,7 +521,7 @@ async def load_datasets(db: Database) -> tuple[list[Example], list[Example]]: trainset = get_examples_for_split(session, Split.TRAIN) valset = get_examples_for_split(session, Split.VALID) - logger.info(f"Loaded {len(trainset)} training examples, {len(valset)} validation examples") + logger.info("Loaded %s training examples, %s validation examples", len(trainset), len(valset)) return trainset, valset @@ -559,18 +559,18 @@ def _log_run_statistics(critic_model: str, grader_model: str, db: Database) -> N # Log critic statistics total_critic = sum(count for _, count in critic_status_counts) if total_critic > 0: - logger.info(f"Critic run statistics (model={critic_model}, total={total_critic}):") + logger.info("Critic run statistics (model=%s, total=%s):", critic_model, total_critic) for status, count in sorted(critic_status_counts): - logger.info(f" {status}: {count} ({count / total_critic:.1%})") + logger.info(" %s: %s (%.1f%%)", status, count, count / total_critic * 100) else: logger.info("No critic runs found") # Log grader statistics total_grader = sum(count for _, count in grader_status_counts) if total_grader > 0: - logger.info(f"Grader run statistics (model={grader_model}, total={total_grader}):") + logger.info("Grader run statistics (model=%s, total=%s):", grader_model, total_grader) for status, count in sorted(grader_status_counts): - logger.info(f" {status}: {count} ({count / total_grader:.1%})") + logger.info(" %s: %s (%.1f%%)", status, count, count / total_grader * 100) else: logger.info("No grader runs found") @@ -631,18 +631,18 @@ async def optimize_with_gepa( """ _gepa_not_implemented() logger.info("Starting GEPA optimization") - logger.info(f"Reflection model: {reflection_model}") - logger.info(f"Max metric calls: {max_metric_calls}") - logger.info(f"Minibatch size: {minibatch_size}") - logger.info(f"Initial prompt length: {len(initial_prompt)} chars") - logger.info(f"Warm start: {warm_start}") + logger.info("Reflection model: %s", reflection_model) + logger.info("Max metric calls: %s", max_metric_calls) + logger.info("Minibatch size: %s", minibatch_size) + logger.info("Initial prompt length: %s chars", len(initial_prompt)) + logger.info("Warm start: %s", warm_start) if seed is not None: - logger.info(f"Random seed: {seed}") + logger.info("Random seed: %s", seed) # Load datasets (always uses critic scopes from database) logger.info("Loading datasets...") trainset, valset = await load_datasets(db) - logger.info(f"Loaded {len(trainset)} training examples, {len(valset)} validation examples") + logger.info("Loaded %s training examples, %s validation examples", len(trainset), len(valset)) # Prepare run directory with optional warm-start checkpoint run_dir = None @@ -659,7 +659,9 @@ async def optimize_with_gepa( with checkpoint_path.open("wb") as f: pickle.dump(historical_state, f) logger.info( - f"Saved historical state with {len(historical_state['program_candidates'])} prompts to {checkpoint_path}" + "Saved historical state with %s prompts to %s", + len(historical_state["program_candidates"]), + checkpoint_path, ) run_dir = temp_dir else: @@ -668,10 +670,10 @@ async def optimize_with_gepa( # If no run_dir yet (no warm start or no historical data), create one if run_dir is None: run_dir = tempfile.mkdtemp(prefix="gepa_run_") - logger.info(f"Created run directory: {run_dir}") + logger.info("Created run directory: %s", run_dir) # Create adapter - logger.info(f"Creating CriticAdapter with max_parallelism={max_parallelism}") + logger.info("Creating CriticAdapter with max_parallelism=%s", max_parallelism) adapter = CriticAdapter( critic_client, grader_client, @@ -683,7 +685,7 @@ async def optimize_with_gepa( ) # Run optimization (reflection_lm accepts model string directly) - logger.info(f"Starting GEPA evolutionary search (merge={'enabled' if use_merge else 'disabled'})...") + logger.info("Starting GEPA evolutionary search (merge=%s)...", "enabled" if use_merge else "disabled") result: GEPAResult[CriticOutput, Any] = gepa.optimize( seed_candidate={"system_prompt": initial_prompt}, trainset=trainset, @@ -702,7 +704,7 @@ async def optimize_with_gepa( optimized_prompt = result.candidates[result.best_idx]["system_prompt"] best_score = result.val_aggregate_scores[result.best_idx] - logger.info(f"GEPA optimization complete. Best score: {best_score:.3f}, Metric calls: {result.total_metric_calls}") + logger.info("GEPA optimization complete. Best score: %.3f, Metric calls: %s", best_score, result.total_metric_calls) # Log run statistics (critic/grader status breakdown) _log_run_statistics(critic_client.model, grader_client.model, db) diff --git a/ruff.toml b/ruff.toml index 3eefa5d50a..4caa4f8be2 100644 --- a/ruff.toml +++ b/ruff.toml @@ -73,6 +73,7 @@ select = [ "TRY401", # redundant exception object in logging.exception "TRY203", # useless try-except (just re-raises) "TRY400", # logging.error in except → logging.exception + "DTZ", # datetime timezone "RUF" # ruff-specific ] # Keep these off globally; handled case-by-case or via constants diff --git a/skills/proxmox_vm/vm_interact.py b/skills/proxmox_vm/vm_interact.py index db15e34051..973d293919 100644 --- a/skills/proxmox_vm/vm_interact.py +++ b/skills/proxmox_vm/vm_interact.py @@ -8,7 +8,7 @@ import sys import tempfile import time -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Annotated @@ -150,7 +150,7 @@ def do_sendkey(vmid: int, key: str) -> None: def do_screenshot(vmid: int, delay: float) -> Path: remote_ppm = f"/tmp/vm{vmid}-screenshot.ppm" cache_dir = get_cache_dir(vmid) - timestamp = datetime.now().strftime("%Y%m%d-%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d-%H%M%S") local_png = cache_dir / f"{timestamp}.png" qemu_monitor(vmid, f"screendump {remote_ppm}") diff --git a/sysrw/run_eval.py b/sysrw/run_eval.py index 68231cee6d..6d4da985b3 100644 --- a/sysrw/run_eval.py +++ b/sysrw/run_eval.py @@ -8,7 +8,7 @@ import sys from collections import Counter from contextlib import suppress -from datetime import datetime +from datetime import UTC, datetime from importlib import resources from pathlib import Path from typing import Any, cast @@ -493,7 +493,7 @@ async def run_eval( # Caller provided a final directory — use it directly (no nesting) out_dir = base_out else: - ts = datetime.now().strftime("%Y%m%d_%H%M%S") + ts = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") base = DEFAULT_BASE # Default layout: runs/ for variants; runs/baseline- for baseline out_dir = base / f"baseline-{ts}" if template_path.name == "current_effective_template.txt" else base / f"{ts}" diff --git a/tana/export/convert.py b/tana/export/convert.py index 5111b03175..c8416dc731 100644 --- a/tana/export/convert.py +++ b/tana/export/convert.py @@ -4,7 +4,7 @@ import html from contextlib import contextmanager from dataclasses import dataclass, field -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from tana.domain.constants import ( @@ -31,7 +31,7 @@ def _journal_headline(name: str) -> str: # pattern: YYYY-MM-DD - Weekday try: date_str = name.split(" ", maxsplit=1)[0] # "2025-05-06" - dt = datetime.strptime(date_str, "%Y-%m-%d") + dt = datetime.strptime(date_str, "%Y-%m-%d").replace(tzinfo=UTC) # TODO: detect day more robustly return dt.strftime("%a, %b %-d") # "Tue, May 6" except (ValueError, IndexError): diff --git a/trilium/search_hack.py b/trilium/search_hack.py index 415bf095d3..3aee849e04 100644 --- a/trilium/search_hack.py +++ b/trilium/search_hack.py @@ -144,7 +144,10 @@ def index(): print(content) print("----") - embeddings["notes"][note_id] = {"string": content, "datetime": datetime.datetime.now().isoformat()} + embeddings["notes"][note_id] = { + "string": content, + "datetime": datetime.datetime.now(tz=datetime.UTC).isoformat(), + } strings = {note["string"] for note in embeddings["notes"].values()} not_embedded = strings - set(embeddings["strings"].keys()) diff --git a/wt/e2e/test_github_pr_display_variants.py b/wt/e2e/test_github_pr_display_variants.py index fa391ec085..6843ba6f26 100644 --- a/wt/e2e/test_github_pr_display_variants.py +++ b/wt/e2e/test_github_pr_display_variants.py @@ -6,7 +6,7 @@ import socket import uuid from dataclasses import dataclass -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from pathlib import Path from typing import Any @@ -147,7 +147,7 @@ def test_github_pr_variants(variant, expects, github_pr_env: "GithubPrEnv"): state=PRState.OPEN if variant == "open_mergeable" else PRState.CLOSED, draft=False, mergeable=variant in {"open_mergeable", "merged"}, - merged_at=None if variant != "merged" else datetime.now().isoformat(), + merged_at=None if variant != "merged" else datetime.now(tz=UTC).isoformat(), additions=10 if variant == "open_mergeable" else 3 diff --git a/wt/integration/test_integration_cli_output_format.py b/wt/integration/test_integration_cli_output_format.py index 5ac8ff5139..53f5168ee2 100644 --- a/wt/integration/test_integration_cli_output_format.py +++ b/wt/integration/test_integration_cli_output_format.py @@ -1,6 +1,6 @@ """Integration tests that verify actual CLI output formatting.""" -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from unittest.mock import patch @@ -45,7 +45,7 @@ def test_status_table_rendering(self, mock_get_status, cli_runner_with_env, buil branch_name="master", dirty_files_lower_bound=1, untracked_files_lower_bound=1, - last_updated_at=datetime.now(), + last_updated_at=datetime.now(tz=UTC), commit_info=commit_info, ahead_count=2, behind_count=0, @@ -59,7 +59,7 @@ def test_status_table_rendering(self, mock_get_status, cli_runner_with_env, buil branch_name="feature/test", dirty_files_lower_bound=0, untracked_files_lower_bound=0, - last_updated_at=datetime.now(), + last_updated_at=datetime.now(tz=UTC), commit_info=commit_info, ahead_count=1, behind_count=0, @@ -95,7 +95,7 @@ def test_status_unknown_when_not_cached(self, mock_get_status, cli_runner_with_e branch_name="test/test1", dirty_files_lower_bound=0, untracked_files_lower_bound=0, - last_updated_at=datetime.now(), + last_updated_at=datetime.now(tz=UTC), commit_info=commit_info, ahead_count=0, behind_count=0, diff --git a/wt/server/handlers/status_handler.py b/wt/server/handlers/status_handler.py index 1833d6b790..8054395710 100644 --- a/wt/server/handlers/status_handler.py +++ b/wt/server/handlers/status_handler.py @@ -3,7 +3,7 @@ import asyncio import logging import time -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from pathlib import Path from wt.server.git_manager import GitManager, NoSuchRefError @@ -94,7 +94,7 @@ async def _compute_worktree_status( branch_name=branch_name or "", dirty_files_lower_bound=0, untracked_files_lower_bound=0, - last_updated_at=datetime.now(), + last_updated_at=datetime.now(tz=UTC), is_cached=False, cache_age_ms=None, is_stale=False, @@ -121,7 +121,7 @@ async def _compute_worktree_status( else: dirty_count, untracked_count = 0, 0 cache_age_ms = None - last_updated_at = datetime.now() + last_updated_at = datetime.now(tz=UTC) task = asyncio.create_task(gs_client.update_working_status()) task.add_done_callback(_log_task_exception) diff --git a/wt/server/types.py b/wt/server/types.py index 54a346a022..7166a8b4a5 100644 --- a/wt/server/types.py +++ b/wt/server/types.py @@ -1,7 +1,7 @@ from __future__ import annotations from dataclasses import dataclass, field -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from wt.shared.protocol import WorktreeID @@ -22,7 +22,7 @@ class GitWorkingStatus: def _now() -> datetime: - return datetime.now() + return datetime.now(tz=UTC) @dataclass(frozen=True, slots=True) diff --git a/wt/server/wt_server.py b/wt/server/wt_server.py index e2b5f0b116..e8289cc8f1 100644 --- a/wt/server/wt_server.py +++ b/wt/server/wt_server.py @@ -19,7 +19,7 @@ import subprocess import time import uuid -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any @@ -363,7 +363,7 @@ def _cancel_periodic_discovery(self) -> None: async def handle_client_request(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: """Handle a client request using JSON-RPC 2.0 protocol.""" - start_time = datetime.now() + start_time = datetime.now(tz=UTC) try: # Read request line @@ -428,7 +428,7 @@ async def _handle_ping_request(self, request: Request, start_time: datetime) -> async def _handle_shutdown_request(self, request: Request, start_time: datetime | None = None) -> Response: """Handle shutdown JSON-RPC method.""" logger.info("Received shutdown request") - self._shutdown_task = asyncio.create_task(self.stop()) + self.shutdown_task = asyncio.create_task(self.stop()) return self._create_success_response("shutting down", request.id) async def _ensure_git_watcher(self, worktree_info: DiscoveredWorktree) -> None: @@ -602,7 +602,7 @@ async def run_daemon(config) -> None: # Signal handling def signal_handler(): logger.info("Received shutdown signal") - daemon._shutdown_task = asyncio.create_task(daemon.stop()) + daemon.shutdown_task = asyncio.create_task(daemon.stop()) signal.signal(signal.SIGTERM, lambda s, f: signal_handler()) signal.signal(signal.SIGINT, lambda s, f: signal_handler()) diff --git a/wt/shared/protocol.py b/wt/shared/protocol.py index 5f765db248..5af3f6033b 100644 --- a/wt/shared/protocol.py +++ b/wt/shared/protocol.py @@ -8,7 +8,7 @@ import json import uuid -from datetime import datetime +from datetime import UTC, datetime from enum import IntEnum, StrEnum from pathlib import Path from typing import Annotated, Literal, NewType, cast @@ -270,11 +270,11 @@ class Collector[T](BaseModel): def ok(self, value: T) -> Collector[T]: """Record a success, preserving last_error.""" - return Collector(last_ok=SourceOk(at=datetime.now(), value=value), last_error=self.last_error) + return Collector(last_ok=SourceOk(at=datetime.now(tz=UTC), value=value), last_error=self.last_error) def error(self, err: str) -> Collector[T]: """Record an error, preserving last_ok.""" - return Collector(last_ok=self.last_ok, last_error=SourceError(at=datetime.now(), error=err)) + return Collector(last_ok=self.last_ok, last_error=SourceError(at=datetime.now(tz=UTC), error=err)) def exception(self, exc: BaseException) -> Collector[T]: """Record an exception as an error, preserving last_ok. diff --git a/wt/testing/conftest.py b/wt/testing/conftest.py index 7b9782e2eb..fa1707184e 100644 --- a/wt/testing/conftest.py +++ b/wt/testing/conftest.py @@ -7,7 +7,7 @@ import socket import subprocess import uuid -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from pathlib import Path from unittest.mock import Mock @@ -251,7 +251,7 @@ def sample_status_result(sample_commit_info): behind_count=0, pr_info=PRInfoDisabled(), commit_info=sample_commit_info, - last_updated_at=datetime.now(), + last_updated_at=datetime.now(tz=UTC), dirty_files_lower_bound=0, untracked_files_lower_bound=0, ) diff --git a/x/agent_server/presets.py b/x/agent_server/presets.py index 03f6e227f1..46deff58f9 100644 --- a/x/agent_server/presets.py +++ b/x/agent_server/presets.py @@ -2,7 +2,7 @@ import logging import os -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import TYPE_CHECKING @@ -58,10 +58,13 @@ def load_presets_from_dir(root: Path) -> dict[str, AgentPreset]: preset = AgentPreset.model_validate(data) stat = p.stat() # Fail fast on OS errors preset.file_path = str(p) - preset.modified_at = datetime.fromtimestamp(stat.st_mtime).isoformat() + preset.modified_at = datetime.fromtimestamp(stat.st_mtime, tz=UTC).isoformat() if preset.name in out: logger.warning( - f"Preset name collision: '{preset.name}' from {p} overrides preset from {out[preset.name].file_path}" + "Preset name collision: '%s' from %s overrides preset from %s", + preset.name, + p, + out[preset.name].file_path, ) out[preset.name] = preset return out @@ -99,7 +102,10 @@ def discover_presets(*, override_dir: str | Path | None = None) -> dict[str, Age out[name] = preset else: logger.warning( - f"Preset name collision: '{name}' from {preset.file_path} skipped (already loaded from {out[name].file_path})" + "Preset name collision: '%s' from %s skipped (already loaded from %s)", + name, + preset.file_path, + out[name].file_path, ) # Always include a built-in default if none present if "default" not in out: diff --git a/x/claude_linter/cli.py b/x/claude_linter/cli.py new file mode 100644 index 0000000000..10acf26188 --- /dev/null +++ b/x/claude_linter/cli.py @@ -0,0 +1,245 @@ +import datetime +import json +import sys +import tempfile +from pathlib import Path + +import click +import platformdirs +from pydantic import ValidationError + +from llm.claude_code_api import EditToolCall, MultiEditToolCall, WriteToolCall +from x.claude_linter.config import get_merged_config +from x.claude_linter.models import HookRequest, LinterHookResponse +from x.claude_linter.precommit_runner import PreCommitRunner + + +def get_cache_dir() -> Path: + """Get the cache directory for claude-linter. + + Uses platformdirs to respect XDG_CACHE_HOME on Linux. + """ + return Path(platformdirs.user_cache_dir("claude-linter")) + + +def evaluate_pre(req: HookRequest) -> LinterHookResponse: + # Pre-write hook evaluation - early bailout + if not isinstance(req.tool_call, WriteToolCall): + # Return empty response to let normal permission flow continue + return LinterHookResponse() + + tool_call = req.tool_call + if tool_call.content is None: + # Return empty response to let normal permission flow continue + return LinterHookResponse() + + # Run hooks on temp file + with tempfile.NamedTemporaryFile("w", delete=False, suffix=tool_call.file_path.suffix) as tmp: + tmp.write(tool_call.content) + tmp_path = tmp.name + + try: + # Get config for fixing + config = get_merged_config([str(tool_call.file_path.parent)], fix=True) + runner = PreCommitRunner(config) + + # First run: with fixes to see if issues are fixable + original_content = Path(tmp_path).read_text() + ret1, out1, err1 = runner.run([tmp_path], cwd=tool_call.file_path.parent) + fixed_content = Path(tmp_path).read_text() + + # If content didn't change + if original_content == fixed_content: + if ret1 != 0: + # Had violations but none were fixable + return _block_with_reason(out1, err1) + # No violations at all - let normal permission flow continue + return LinterHookResponse() + + # Content changed, check if pre-commit is satisfied with the fixed version + _ret2, out2, err2 = runner.run([tmp_path], cwd=tool_call.file_path.parent) + fixed_again_content = Path(tmp_path).read_text() + + if fixed_content == fixed_again_content: + # All violations were fixable - let normal permission flow continue + return LinterHookResponse() + # Pre-commit keeps changing things - non-fixable violations found + return _block_with_reason(out2, err2) + + finally: + Path(tmp_path).unlink() + + +def _block_with_reason(stdout: str, stderr: str) -> LinterHookResponse: + """Create a block response with formatted error output.""" + reason = f"Pre-write check failed with non-fixable errors:\nOutput:\n{stdout}\nError:\n{stderr}" + return LinterHookResponse(decision="block", reason=reason) + + +def evaluate_post(req: HookRequest) -> LinterHookResponse: + # Post-write hook evaluation + if not isinstance(req.tool_call, (WriteToolCall, EditToolCall, MultiEditToolCall)): + return LinterHookResponse() + file_path = req.tool_call.file_path + if not file_path.exists(): + return LinterHookResponse() + + original = file_path.read_text() + + # For Edit/MultiEdit, only check violations without fixing + if isinstance(req.tool_call, (EditToolCall, MultiEditToolCall)): + # Get config without fix flag for Edit/MultiEdit + config = get_merged_config([file_path], fix=False) + runner = PreCommitRunner(config) + + # Run check-only (no fixes) + ret, out, _err = runner.run([file_path], cwd=file_path.parent) + + if ret != 0: + # There are violations - report them + return LinterHookResponse( + decision="block", + reason=( + f"FYI: Your edit was applied successfully, but the file now has linting violations:\n{out}\n\n" + "This is just a notification - your changes have been saved." + ), + ) + # No violations + return LinterHookResponse() + # Write tool - keep original behavior with autofixes + config = get_merged_config([file_path], fix=True) + runner = PreCommitRunner(config) + + # First run: apply autofixes + _ret1, _out1, _err1 = runner.run([file_path], cwd=file_path.parent) + content_after_fixes = file_path.read_text() + + if content_after_fixes == original: + return LinterHookResponse() + return LinterHookResponse(decision="block", reason="FYI: Auto-fixes were applied") + + +@click.group() +@click.version_option() +def cli() -> None: + """Claude Linter CLI.""" + + +@cli.command("check") +@click.option("--files", "-f", multiple=True, type=click.Path(exists=True)) +def check(files: tuple[str, ...]) -> None: + """Run checks on given files or all in current directory.""" + paths = list(files) if files else [str(Path.cwd())] + config = get_merged_config(paths) + runner = PreCommitRunner(config) + runner.run(paths) + sys.exit(0) + + +# Hook commands have been removed - use claude-linter-v2 instead + + +@cli.command("clean") +@click.option("--dry-run", is_flag=True, help="Show what would be deleted without deleting") +@click.option("--older-than", type=int, default=7, help="Delete logs older than N days (default: 7)") +def clean(dry_run: bool, older_than: int) -> None: + """Clean up old log files.""" + log_dir = get_cache_dir() + if not log_dir.exists(): + click.echo("No log directory found") + return + + cutoff_date = datetime.datetime.now(tz=datetime.UTC) - datetime.timedelta(days=older_than) + deleted_count = 0 + total_size = 0 + + # Clean both hook-*.json and debug-*.log files + for pattern in ["hook-*.json", "debug-*.log"]: + for log_file in log_dir.glob(pattern): + # Extract timestamp from filename + try: + # Format: {type}-{iso_timestamp}.{ext} + timestamp_str = log_file.stem.split("-", 1)[1] + file_time = datetime.datetime.fromisoformat(timestamp_str) + + if file_time < cutoff_date: + size = log_file.stat().st_size + total_size += size + + if dry_run: + click.echo(f"Would delete: {log_file.name} ({size} bytes)") + else: + log_file.unlink() + + deleted_count += 1 + except (IndexError, ValueError): + # Skip files with unexpected format + continue + + if dry_run: + click.echo(f"\nWould delete {deleted_count} files ({total_size} bytes)") + else: + click.echo(f"Deleted {deleted_count} files ({total_size} bytes)") + + +@cli.command("hook") +def unified_hook() -> None: + """Unified hook command that routes based on hook_event_name in JSON input.""" + # Create log directory + log_dir = get_cache_dir() + log_dir.mkdir(parents=True, exist_ok=True) + + # Read input + input_json = sys.stdin.read() + + # Try to parse JSON for logging and routing + try: + input_data = json.loads(input_json) + except json.JSONDecodeError: + click.echo("Error: Invalid JSON input", err=True) + sys.exit(1) + + # Parse request to get hook event name + try: + req = HookRequest.model_validate_json(input_json) + except (ValidationError, json.JSONDecodeError) as e: + click.echo(f"Error parsing hook request: {e}", err=True) + sys.exit(1) + + # Route based on hook_event_name + if not req.hook_event_name: + click.echo("Error: hook_event_name not provided", err=True) + sys.exit(1) + + # Create event-specific log file + hook_type = req.hook_event_name.lower().replace("tooluse", "") # "pre" or "post" + log_file = log_dir / f"hook-{hook_type}-{datetime.datetime.now(tz=datetime.UTC).isoformat()}.json" + + # Log input + log_data = { + "timestamp": datetime.datetime.now(tz=datetime.UTC).isoformat(), + "hook_type": hook_type, + "hook_event_name": req.hook_event_name, + "input": input_data, + } + + # Route to appropriate handler + if req.hook_event_name == "PreToolUse": + decision = evaluate_pre(req) + elif req.hook_event_name == "PostToolUse": + decision = evaluate_post(req) + else: + # For other events (Notification, Stop, SubagentStop), return empty response + decision = LinterHookResponse() + + # Handle output + output_json = decision.model_dump_json(by_alias=True, exclude_none=True) + print(output_json, file=sys.stdout) + log_data["output"] = json.loads(output_json) + + # Log exit code + log_data["exit_code"] = 0 + with Path(log_file).open("w") as f: + json.dump(log_data, f, indent=2) + + sys.exit(0) diff --git a/x/claude_linter/precommit_runner.py b/x/claude_linter/precommit_runner.py new file mode 100644 index 0000000000..2c72b4052d --- /dev/null +++ b/x/claude_linter/precommit_runner.py @@ -0,0 +1,148 @@ +import datetime +import os +import subprocess +import tempfile +from collections.abc import Sequence +from pathlib import Path +from typing import Any + +import platformdirs +import yaml + +from util.bazel.subprocess import run_python_module + + +class PreCommitRunner: + """Runner for pre-commit hooks based on provided config. + + This simplified version works both in and out of git repositories + by using pre-commit's native capabilities. + """ + + def __init__(self, config: dict[str, Any]) -> None: + self.config = config + + def run(self, paths: Sequence[str | Path], cwd: str | Path | None = None) -> tuple[int, str, str]: + """Run pre-commit hooks on specified paths. + + Args: + paths: List of file paths to check + cwd: Working directory (defaults to current directory) + + Returns: + Tuple of (return_code, stdout, stderr) + """ + # Ensure cwd is set + current_working_dir = Path(cwd) if cwd else Path.cwd() + + # Only create debug logs if explicitly requested via environment variable + debug_enabled = os.environ.get("CLAUDE_LINTER_DEBUG", "").lower() in ("1", "true", "yes") + log_file = None + if debug_enabled: + # Create log file using platformdirs (respects XDG_CACHE_HOME) + log_dir = Path(platformdirs.user_cache_dir("claude-linter")) + log_dir.mkdir(parents=True, exist_ok=True) + log_file = log_dir / f"debug-{datetime.datetime.now(tz=datetime.UTC).isoformat()}.log" + + # Create a temporary git repo to make pre-commit happy + with tempfile.TemporaryDirectory() as tmpdir: + tmpdir_path = Path(tmpdir) + + # Initialize a git repo + subprocess.run(["git", "init"], cwd=tmpdir, capture_output=True, check=False) + subprocess.run( + ["git", "config", "user.email", "test@example.com"], cwd=tmpdir, capture_output=True, check=False + ) + subprocess.run(["git", "config", "user.name", "Test User"], cwd=tmpdir, capture_output=True, check=False) + + # Copy files to temp repo, preserving full path structure + temp_paths = [] + for path_input in paths: + path = Path(path_input) + # Get absolute path + abs_path = path.absolute() + + # Find the git root of the original file (if in a git repo) + try: + git_root_result = subprocess.run( + ["git", "rev-parse", "--show-toplevel"], + cwd=str(abs_path.parent), + capture_output=True, + text=True, + check=True, + ) + original_git_root = Path(git_root_result.stdout.strip()) + # Preserve the full path relative to the original git root + rel_path = abs_path.relative_to(original_git_root) + except (subprocess.CalledProcessError, ValueError): + # Not in a git repo or can't determine relative path + # Fall back to using the full absolute path structure + rel_path = Path(*abs_path.parts[1:]) # Skip the root '/' + + temp_file = tmpdir_path / rel_path + temp_file.parent.mkdir(parents=True, exist_ok=True) + temp_file.write_text(path.read_text()) + temp_paths.append(str(rel_path)) + + # Always use the config as-is (always fix=True) + config = self.config + + # Write config file + config_path = tmpdir_path / ".pre-commit-config.yaml" + config_text = yaml.dump(config) + config_path.write_text(config_text) + + # Write debug info to log if enabled + if log_file: + with log_file.open("w") as f: + f.write("=== Claude Linter Debug Log ===\n") + f.write(f"Time: {datetime.datetime.now(tz=datetime.UTC)}\n") + f.write(f"Working dir: {current_working_dir}\n") + f.write(f"Temp dir: {tmpdir}\n") + f.write(f"Paths: {paths}\n") + f.write(f"Temp paths: {temp_paths}\n") + f.write(f"\n--- Config ---\n{config_text}\n") + + # Write file contents + f.write("\n--- File contents ---\n") + for temp_path in temp_paths: + file_path = tmpdir_path / temp_path + f.write(f"\n{temp_path}:\n") + if file_path.exists(): + f.write(file_path.read_text()) + else: + f.write("(file does not exist)\n") + + # Stage files + subprocess.run(["git", "add", *temp_paths], cwd=tmpdir, capture_output=True, check=False) + + # Run pre-commit via python -m for Bazel sandbox compatibility + result = run_python_module( + "pre_commit", + "run", + "--all-files", # Safe since we're in a temp dir with only our files + "--verbose", + cwd=tmpdir, + capture_output=True, + text=True, + check=False, + ) + + # Append results to log if enabled + if log_file: + with log_file.open("a") as f: + f.write("\n--- Pre-commit command ---\n") + f.write(f"Command: {result.args}\n") + f.write(f"Return code: {result.returncode}\n") + f.write(f"\n--- Stdout ---\n{result.stdout}\n") + f.write(f"\n--- Stderr ---\n{result.stderr}\n") + + # Copy modified files back + for i, rel_path_str in enumerate(temp_paths): + temp_file = tmpdir_path / rel_path_str + if temp_file.exists(): + # Copy back to original location + original_path = Path(paths[i]) + original_path.write_text(temp_file.read_text()) + + return result.returncode, result.stdout, result.stderr diff --git a/x/claude_linter_v2/cli.py b/x/claude_linter_v2/cli.py index 185713d824..f9b9f0538f 100644 --- a/x/claude_linter_v2/cli.py +++ b/x/claude_linter_v2/cli.py @@ -9,7 +9,7 @@ import shutil import subprocess import sys -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from pathlib import Path from uuid import UUID @@ -49,7 +49,7 @@ def parse_expiry_duration(duration_str: str) -> datetime: seconds = parse_duration(duration_str) if seconds is None: raise click.ClickException(f"Invalid duration format: {duration_str}\nValid formats: 30m, 2h, 1d, 1h30m, etc.") - return datetime.now() + timedelta(seconds=seconds) + return datetime.now(tz=UTC) + timedelta(seconds=seconds) @click.group(invoke_without_command=True) diff --git a/x/claude_linter_v2/hooks/handler.py b/x/claude_linter_v2/hooks/handler.py index 3e491b9854..7f1e943ac3 100644 --- a/x/claude_linter_v2/hooks/handler.py +++ b/x/claude_linter_v2/hooks/handler.py @@ -3,7 +3,7 @@ import contextlib import json import logging -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any @@ -138,7 +138,7 @@ def _log_hook_call( """Log detailed hook information to session log file.""" log_file = self.log_dir / f"{session_id}.log" - timestamp = datetime.now().isoformat() + timestamp = datetime.now(tz=UTC).isoformat() # Build log entry log_entry = { @@ -167,7 +167,7 @@ def _log_decision(self, session_id: SessionID, decision_point: str, details: dic """Log a specific decision point.""" log_file = self.log_dir / f"{session_id}.log" - log_entry = {"timestamp": datetime.now().isoformat(), "decision_point": decision_point, "details": details} + log_entry = {"timestamp": datetime.now(tz=UTC).isoformat(), "decision_point": decision_point, "details": details} with log_file.open("a") as f: f.write(f"DECISION: {json.dumps(log_entry)}\n") @@ -457,7 +457,7 @@ def _check_access_control(self, request: PreToolUseRequest, session_id: SessionI "old_string": tool_call.old_string if isinstance(tool_call, EditToolCall) else None, "command": tool_call.command if isinstance(tool_call, BashToolCall) else None, } - context = PredicateContext(tool=tool_call.tool_name, args=args, session_id=session_id, timestamp=datetime.now()) + context = PredicateContext(tool=tool_call.tool_name, args=args, session_id=session_id, timestamp=datetime.now(tz=UTC)) return self.rule_engine.evaluate_access(context, session_id) diff --git a/x/claude_linter_v2/session/manager.py b/x/claude_linter_v2/session/manager.py index 061d24f6bd..65213fa64a 100644 --- a/x/claude_linter_v2/session/manager.py +++ b/x/claude_linter_v2/session/manager.py @@ -2,7 +2,7 @@ import json import logging -from datetime import datetime +from datetime import UTC, datetime from enum import StrEnum from pathlib import Path @@ -73,7 +73,7 @@ def _load_session(self, session_id: SessionID) -> SessionData: logger.exception(f"Failed to load session {session_id}") # Return default session data - return SessionData(id=session_id, created=datetime.now()) + return SessionData(id=session_id, created=datetime.now(tz=UTC)) def _save_session(self, session_id: SessionID, session_data: SessionData) -> None: """Save a single session to disk.""" @@ -92,7 +92,7 @@ def track_session(self, session_id: SessionID, working_dir: Path) -> None: working_dir: Current working directory for the session """ session_data = self._load_session(session_id) - session_data.last_seen = datetime.now() + session_data.last_seen = datetime.now(tz=UTC) session_data.directory = working_dir.resolve() self._save_session(session_id, session_data) @@ -125,7 +125,7 @@ def add_rule( directory = directory or Path.cwd() directory_str = str(directory.resolve()) - rule = Rule(predicate=predicate, action=action, created=datetime.now(), expires=expires) + rule = Rule(predicate=predicate, action=action, created=datetime.now(tz=UTC), expires=expires) affected = 0 @@ -179,7 +179,7 @@ def get_session_rules(self, session_id: SessionID) -> list[Rule]: session_data = self._load_session(session_id) # Filter out expired rules - now = datetime.now() + now = datetime.now(tz=UTC) active_rules = [] for rule in session_data.rules: diff --git a/x/claude_linter_v2/session/state.py b/x/claude_linter_v2/session/state.py index ebba528ea3..60eb8dc0cf 100644 --- a/x/claude_linter_v2/session/state.py +++ b/x/claude_linter_v2/session/state.py @@ -1,7 +1,7 @@ """Session state management for Claude Linter v2.""" from dataclasses import dataclass, field -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from pydantic import BaseModel, ConfigDict @@ -49,12 +49,12 @@ class SessionState: def touch_file(self, file_path: Path) -> None: """Mark a file as touched in this session.""" self.touched_files.add(file_path) - self.last_seen = datetime.now() + self.last_seen = datetime.now(tz=UTC) def add_warning(self, warning: str) -> None: """Add a warning to show in post-hook.""" self.pending_warnings.append(warning) - self.last_seen = datetime.now() + self.last_seen = datetime.now(tz=UTC) def consume_warnings(self) -> list[str]: """Get and clear pending warnings.""" @@ -69,12 +69,12 @@ def clear_touched_files(self) -> None: def add_rule(self, rule: Rule) -> None: """Add a session-specific rule.""" self.rules.append(rule) - self.last_seen = datetime.now() + self.last_seen = datetime.now(tz=UTC) def set_notification_id(self, notification_id: int) -> None: """Set the current notification ID.""" self.notification_id = notification_id - self.last_seen = datetime.now() + self.last_seen = datetime.now(tz=UTC) def clear_notification_id(self) -> None: """Clear the notification ID.""" @@ -82,4 +82,4 @@ def clear_notification_id(self) -> None: def update_last_seen(self) -> None: """Update the last seen timestamp.""" - self.last_seen = datetime.now() + self.last_seen = datetime.now(tz=UTC) diff --git a/x/claude_linter_v2/session/violations.py b/x/claude_linter_v2/session/violations.py index a691edf24c..b9b500db5c 100644 --- a/x/claude_linter_v2/session/violations.py +++ b/x/claude_linter_v2/session/violations.py @@ -3,7 +3,7 @@ from __future__ import annotations import logging -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import TYPE_CHECKING, Any @@ -48,7 +48,7 @@ def add_violation( "message": message, "severity": severity, "rule": rule, - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(tz=UTC).isoformat(), "fixed": False, } diff --git a/x/claude_linter_v2/test_mcp_tools.py b/x/claude_linter_v2/test_mcp_tools.py index 2b0b402534..476c3e291a 100644 --- a/x/claude_linter_v2/test_mcp_tools.py +++ b/x/claude_linter_v2/test_mcp_tools.py @@ -1,7 +1,7 @@ """Unit tests for MCP tool handling in claude-linter-v2.""" import json -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any from unittest.mock import MagicMock, patch @@ -267,7 +267,7 @@ def test_mcp_tool_logging(self, handler, session_id, tmp_path): tool_input={ "event": "user_action", "properties": {"action": "click", "target": "button"}, - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(tz=UTC).isoformat(), }, tool_result={"tracked": True}, ) diff --git a/x/claude_linter_v2/test_multiline_predicates.py b/x/claude_linter_v2/test_multiline_predicates.py index 62b8850902..8006e4a1ed 100644 --- a/x/claude_linter_v2/test_multiline_predicates.py +++ b/x/claude_linter_v2/test_multiline_predicates.py @@ -1,6 +1,6 @@ """Test multiline predicate evaluation.""" -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from uuid import UUID import pytest @@ -29,7 +29,7 @@ def context(self): tool="Bash", args={"file_path": "/home/user/test.py", "content": "print('hello')", "command": "grep -r pattern"}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) def test_simple_multiline_function(self, evaluator, context): @@ -45,7 +45,7 @@ def check_bash(ctx): tool="Edit", args={"file_path": "/home/user/test.py", "content": "print('hello')", "command": "grep -r pattern"}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, context_edit) is False @@ -97,19 +97,19 @@ def is_safe_pipeline(ctx): tool="Bash", args={"command": "grep -r pattern | wc -l"}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, context_safe) is True # Unsafe command context_unsafe = PredicateContext( - tool="Bash", args={"command": "rm -rf /"}, session_id=TEST_SESSION_ID, timestamp=datetime.now() + tool="Bash", args={"command": "rm -rf /"}, session_id=TEST_SESSION_ID, timestamp=datetime.now(tz=UTC) ) assert evaluator.evaluate(predicate, context_unsafe) is False # Unknown flag context_bad_flag = PredicateContext( - tool="Bash", args={"command": "grep -X pattern"}, session_id=TEST_SESSION_ID, timestamp=datetime.now() + tool="Bash", args={"command": "grep -X pattern"}, session_id=TEST_SESSION_ID, timestamp=datetime.now(tz=UTC) ) assert evaluator.evaluate(predicate, context_bad_flag) is False @@ -146,7 +146,7 @@ def check_broker_limits(ctx): tool="mcp_broker_place_order", args={"amount": 100, "margin_multiplier": 2}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, broker_context) is True @@ -155,7 +155,7 @@ def check_broker_limits(ctx): tool="mcp_broker_place_order", args={"amount": 1000, "margin_multiplier": 2}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, broker_context_high_amount) is False @@ -164,7 +164,7 @@ def check_broker_limits(ctx): tool="mcp_broker_place_order", args={"amount": 100, "margin_multiplier": 10}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, broker_context_high_margin) is False @@ -173,7 +173,7 @@ def check_broker_limits(ctx): tool="mcp_broker_withdraw", args={"amount": 100, "margin_multiplier": 2}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, broker_context_forbidden) is False @@ -194,12 +194,15 @@ def check_test_file(ctx): tool="Edit", args={"file_path": "/home/user/test_foo.py"}, session_id=TEST_SESSION_ID, - timestamp=datetime.now(), + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, context_edit_test) is True context_edit_non_test = PredicateContext( - tool="Edit", args={"file_path": "/home/user/foo.py"}, session_id=TEST_SESSION_ID, timestamp=datetime.now() + tool="Edit", + args={"file_path": "/home/user/foo.py"}, + session_id=TEST_SESSION_ID, + timestamp=datetime.now(tz=UTC), ) assert evaluator.evaluate(predicate, context_edit_non_test) is False @@ -208,18 +211,18 @@ def test_imports_and_modules(self, evaluator, context): predicate = """ import json import re -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta import pytest_bazel def check_recent_activity(ctx): # Check if activity is within last hour - now = datetime.now() + now = datetime.now(tz=UTC) one_hour_ago = now - timedelta(hours=1) return ctx.timestamp > one_hour_ago """ # Recent timestamp context_recent = PredicateContext( - tool="Bash", args={"command": "test"}, session_id=TEST_SESSION_ID, timestamp=datetime.now() + tool="Bash", args={"command": "test"}, session_id=TEST_SESSION_ID, timestamp=datetime.now(tz=UTC) ) assert evaluator.evaluate(predicate, context_recent) is True @@ -228,7 +231,7 @@ def check_recent_activity(ctx): tool="Bash", args={"command": "test"}, session_id=TEST_SESSION_ID, - timestamp=datetime.now() - timedelta(hours=2), + timestamp=datetime.now(tz=UTC) - timedelta(hours=2), ) assert evaluator.evaluate(predicate, context_old) is False @@ -275,7 +278,7 @@ def safe_git_commands(ctx): # Test with git command context_git = PredicateContext( - tool="Bash", args={"command": "git status"}, session_id=TEST_SESSION_ID, timestamp=datetime.now() + tool="Bash", args={"command": "git status"}, session_id=TEST_SESSION_ID, timestamp=datetime.now(tz=UTC) ) assert evaluator.evaluate(predicate_git, context_git) is True diff --git a/x/cotrl/llm_rl_experiment.py b/x/cotrl/llm_rl_experiment.py index ab9b83b465..65364d0eb0 100644 --- a/x/cotrl/llm_rl_experiment.py +++ b/x/cotrl/llm_rl_experiment.py @@ -5,7 +5,7 @@ import os from collections import defaultdict from dataclasses import dataclass -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any, Literal @@ -154,7 +154,7 @@ class ExperimentLogger: """Handles online logging of experiment data.""" def __init__(self, experiment_name: str): - self.timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + self.timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") self.log_dir = Path(f"logs/{experiment_name}_{self.timestamp}") self.log_dir.mkdir(parents=True, exist_ok=True) @@ -164,7 +164,7 @@ def __init__(self, experiment_name: str): self.summary_log = self.log_dir / "summary.json" # Track experiment start time - self.start_time = datetime.now() + self.start_time = datetime.now(tz=UTC) async def log_episode(self, model: str, env_name: str, run_num: int, episode_num: int, episode: Episode): """Log episode data as it completes.""" @@ -207,7 +207,7 @@ async def log_summary(self, all_runs: list[Run]): """Log experiment summary.""" summary = SummaryData( experiment_start=self.start_time, - duration_seconds=(datetime.now() - self.start_time).total_seconds(), + duration_seconds=(datetime.now(tz=UTC) - self.start_time).total_seconds(), models=MODELS, environments=ENVIRONMENTS, episodes_per_run=EPISODES_PER_RUN, @@ -463,7 +463,7 @@ def plot_results(all_runs: list[Run]): plt.tight_layout() # Save plot - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") plt.savefig(f"llm_rl_results_{timestamp}.png", dpi=300, bbox_inches="tight") plt.savefig(f"llm_rl_results_{timestamp}.pdf", bbox_inches="tight") print(f"\nPlots saved as llm_rl_results_{timestamp}.png/.pdf") @@ -520,7 +520,7 @@ async def run_with_semaphore(model, env_name, run_num, exp_id): all_runs.append(result) # Save raw results with full trajectories - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") results_file = f"llm_rl_results_{timestamp}.json" trajectories_file = f"llm_rl_trajectories_{timestamp}.jsonl" @@ -588,7 +588,7 @@ async def run_with_semaphore(model, env_name, run_num, exp_id): plot_results(all_runs) # Save plots to log directory too - plot_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + plot_timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") plt.savefig(logger.log_dir / f"llm_rl_results_{plot_timestamp}.png", dpi=300, bbox_inches="tight") plt.savefig(logger.log_dir / f"llm_rl_results_{plot_timestamp}.pdf", bbox_inches="tight") diff --git a/x/ember_evals/runner.py b/x/ember_evals/runner.py index a13f7479e5..4106973d8a 100644 --- a/x/ember_evals/runner.py +++ b/x/ember_evals/runner.py @@ -132,7 +132,7 @@ def write_values_file(path: Path, payload: Mapping[str, object]) -> None: def make_base_run_id(explicit: str | None) -> str: if explicit: return sanitize_for_k8s(explicit, "eval") - timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") + timestamp = datetime.datetime.now(tz=datetime.UTC).strftime("%Y%m%d-%H%M%S") return f"eval-{timestamp}" @@ -141,7 +141,7 @@ async def git_output(*args: str) -> str: async def compute_image_tag() -> str: - timestamp = datetime.datetime.utcnow().strftime("%Y%m%d%H%M%S") + timestamp = datetime.datetime.now(tz=datetime.UTC).strftime("%Y%m%d%H%M%S") try: short_sha = await git_output("git", "rev-parse", "--short", "HEAD") except CommandError: @@ -235,7 +235,7 @@ async def execute_run_async(request: EvalRunRequest) -> EvalRunMetadata: suite_version=request.suite.version, labels=request.labels, secrets=request.secrets, - started_at=datetime.datetime.utcnow().isoformat() + "Z", + started_at=datetime.datetime.now(tz=datetime.UTC).isoformat() + "Z", status="deploying", ) @@ -295,7 +295,7 @@ async def execute_run_async(request: EvalRunRequest) -> EvalRunMetadata: print("[ember-eval] No scenarios provided; skipping scenario execution.") metadata.status = "ready" - metadata.ready_at = datetime.datetime.utcnow().isoformat() + "Z" + metadata.ready_at = datetime.datetime.now(tz=datetime.UTC).isoformat() + "Z" write_artifact(artifact_dir / "metadata.json", metadata.model_dump()) if transcript.events: write_artifact(artifact_dir / "matrix_transcript.json", transcript.model_dump()) @@ -306,7 +306,7 @@ async def execute_run_async(request: EvalRunRequest) -> EvalRunMetadata: return metadata except Exception as exc: metadata.status = "failed" - metadata.failed_at = datetime.datetime.utcnow().isoformat() + "Z" + metadata.failed_at = datetime.datetime.now(tz=datetime.UTC).isoformat() + "Z" metadata.error = str(exc) if transcript.events: write_artifact(artifact_dir / "matrix_transcript.json", transcript.model_dump()) diff --git a/x/ember_evals/scenarios/regression.py b/x/ember_evals/scenarios/regression.py index ecfb603d98..e48df12c13 100644 --- a/x/ember_evals/scenarios/regression.py +++ b/x/ember_evals/scenarios/regression.py @@ -1,7 +1,7 @@ from __future__ import annotations import re -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from ember.evals import gitea as gitea_helpers @@ -57,10 +57,10 @@ async def run(self) -> None: if not re.fullmatch(pattern, body): self.fail(f"Response '{body}' did not match required ISO format") try: - parsed = datetime.strptime(body, "%Y-%m-%d").date() + parsed = datetime.strptime(body, "%Y-%m-%d").replace(tzinfo=UTC).date() except ValueError: self.fail(f"Response '{body}' is not a valid calendar date") - today = datetime.utcnow().date() + today = datetime.now(tz=UTC).date() if abs((parsed - today).days) > 1: self.fail(f"Date {body} outside tolerance of 1 day (today={today.isoformat()})") self.record(self.ok(description="Validated ISO 8601 response", body=body)) diff --git a/x/gatelet/server/app.py b/x/gatelet/server/app.py index 7d4d3848ed..90e85f60c1 100644 --- a/x/gatelet/server/app.py +++ b/x/gatelet/server/app.py @@ -1,7 +1,7 @@ """FastAPI application for Gatelet server.""" import logging -from datetime import datetime +from datetime import UTC, datetime from fastapi import Cookie, Depends, FastAPI, Request, status from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse @@ -73,7 +73,7 @@ async def root(request: Request, session: str | None = Cookie(default=None), csr async with get_db_session(request) as db_session: stmt = select(AdminSession).where(AdminSession.session_token == session) admin_session = (await db_session.execute(stmt)).scalar_one_or_none() - if admin_session and admin_session.expires_at > datetime.now(): + if admin_session and admin_session.expires_at > datetime.now(tz=UTC): return RedirectResponse("/admin/", status_code=302) token, signed = csrf_protect.generate_csrf_tokens() diff --git a/x/gatelet/server/auth/handlers.py b/x/gatelet/server/auth/handlers.py index 9b659c78da..bc2a3cd9d5 100644 --- a/x/gatelet/server/auth/handlers.py +++ b/x/gatelet/server/auth/handlers.py @@ -2,7 +2,7 @@ import logging from collections.abc import Callable -from datetime import datetime +from datetime import UTC, datetime from enum import StrEnum from typing import Protocol from urllib.parse import urlencode @@ -166,7 +166,7 @@ async def session_auth(session_token: str, db_session: AsyncSession, settings: S raise AuthHandlerError # Extend session if needed - now = datetime.now() + now = datetime.now(tz=UTC) session.last_activity_at = now new_exp = now + settings.auth.challenge_response.session_extension max_exp = session.created_at + settings.auth.challenge_response.session_max_duration @@ -186,7 +186,7 @@ async def admin_auth(session_token: str, db_session: AsyncSession) -> AdminAuthC """Authenticate using admin session token.""" query = select(AdminSession).where(AdminSession.session_token == session_token) admin_session = (await db_session.execute(query)).scalar_one_or_none() - if not admin_session or admin_session.expires_at <= datetime.now(): + if not admin_session or admin_session.expires_at <= datetime.now(tz=UTC): raise AuthHandlerError return AdminAuthContext(admin_session) diff --git a/x/gatelet/server/auth/test_handlers.py b/x/gatelet/server/auth/test_handlers.py index 92b4a2c0cd..8c46d51721 100644 --- a/x/gatelet/server/auth/test_handlers.py +++ b/x/gatelet/server/auth/test_handlers.py @@ -1,7 +1,7 @@ """Tests for authentication handlers.""" import uuid -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from urllib.parse import parse_qs, urlparse import pytest @@ -64,7 +64,9 @@ async def test_key_path_auth_valid(db_session: AsyncSession, test_settings: Sett unique_id = uuid.uuid4().hex[:8] key = AuthKey( - key_value=f"valid-test-key-{unique_id}", description=f"Valid test key {unique_id}", created_at=datetime.now() + key_value=f"valid-test-key-{unique_id}", + description=f"Valid test key {unique_id}", + created_at=datetime.now(tz=UTC), ) db_session.add(key) await db_session.flush() @@ -89,7 +91,9 @@ async def test_session_auth_valid(db_session: AsyncSession, test_settings: Setti # Use unique values for key and session unique_id = uuid.uuid4().hex[:8] key = AuthKey( - key_value=f"valid-test-key-{unique_id}", description=f"Valid test key {unique_id}", created_at=datetime.now() + key_value=f"valid-test-key-{unique_id}", + description=f"Valid test key {unique_id}", + created_at=datetime.now(tz=UTC), ) db_session.add(key) await db_session.flush() @@ -97,9 +101,9 @@ async def test_session_auth_valid(db_session: AsyncSession, test_settings: Setti session = AuthCRSession( session_token=f"valid-test-session-{unique_id}", auth_key_id=key.id, - created_at=datetime.now(), - expires_at=datetime.now() + timedelta(hours=1), - last_activity_at=datetime.now(), + created_at=datetime.now(tz=UTC), + expires_at=datetime.now(tz=UTC) + timedelta(hours=1), + last_activity_at=datetime.now(tz=UTC), ) original_activity_time = session.last_activity_at db_session.add(session) @@ -126,7 +130,9 @@ async def test_session_auth_expired(db_session: AsyncSession, test_settings: Set """Test session_auth with expired session.""" # Use unique values unique_id = uuid.uuid4().hex[:8] - key = AuthKey(key_value=f"test-key-{unique_id}", description=f"Test key {unique_id}", created_at=datetime.now()) + key = AuthKey( + key_value=f"test-key-{unique_id}", description=f"Test key {unique_id}", created_at=datetime.now(tz=UTC) + ) db_session.add(key) await db_session.flush() @@ -134,9 +140,9 @@ async def test_session_auth_expired(db_session: AsyncSession, test_settings: Set session = AuthCRSession( session_token=f"expired-test-session-{unique_id}", auth_key_id=key.id, - created_at=datetime.now() - timedelta(hours=2), - expires_at=datetime.now() - timedelta(hours=1), - last_activity_at=datetime.now() - timedelta(hours=2), + created_at=datetime.now(tz=UTC) - timedelta(hours=2), + expires_at=datetime.now(tz=UTC) - timedelta(hours=1), + last_activity_at=datetime.now(tz=UTC) - timedelta(hours=2), ) db_session.add(session) await db_session.flush() diff --git a/x/gatelet/server/auth/test_key_auth.py b/x/gatelet/server/auth/test_key_auth.py index 4a8266da41..4a43c04c8d 100644 --- a/x/gatelet/server/auth/test_key_auth.py +++ b/x/gatelet/server/auth/test_key_auth.py @@ -1,7 +1,7 @@ """Tests for key-in-path authentication.""" import uuid -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta import pytest import pytest_bazel @@ -20,7 +20,9 @@ async def test_validate_valid_key(db_session: AsyncSession): # Create a valid key with unique value unique_id = uuid.uuid4().hex[:8] key = AuthKey( - key_value=f"valid-test-key-{unique_id}", description=f"Valid test key {unique_id}", created_at=datetime.now() + key_value=f"valid-test-key-{unique_id}", + description=f"Valid test key {unique_id}", + created_at=datetime.now(tz=UTC), ) key = await persist(db_session, key) @@ -43,8 +45,8 @@ async def test_validate_revoked_key(db_session: AsyncSession): key = AuthKey( key_value=f"revoked-test-key-{unique_id}", description=f"Revoked test key {unique_id}", - created_at=datetime.now(), - revoked_at=datetime.now(), + created_at=datetime.now(tz=UTC), + revoked_at=datetime.now(tz=UTC), ) key = await persist(db_session, key) @@ -57,7 +59,7 @@ async def test_validate_expired_key(db_session: AsyncSession): """Test validating an expired key.""" # Create a key that was created beyond the validity period with unique value unique_id = uuid.uuid4().hex[:8] - created_at = datetime.now() - TEST_KEY_VALIDITY - timedelta(days=1) + created_at = datetime.now(tz=UTC) - TEST_KEY_VALIDITY - timedelta(days=1) key = AuthKey( key_value=f"expired-test-key-{unique_id}", description=f"Expired test key {unique_id}", created_at=created_at diff --git a/x/gatelet/server/auth/test_key_path_auth.py b/x/gatelet/server/auth/test_key_path_auth.py index 6b12384de0..cd7e19a684 100644 --- a/x/gatelet/server/auth/test_key_path_auth.py +++ b/x/gatelet/server/auth/test_key_path_auth.py @@ -2,7 +2,7 @@ import logging import uuid -from datetime import datetime +from datetime import UTC, datetime import pytest import pytest_bazel @@ -23,7 +23,7 @@ async def test_key_path_auth_success(db_session: AsyncSession, test_settings: Se unique_id = uuid.uuid4().hex[:8] key_value = f"test-key-{unique_id}" - key = AuthKey(key_value=key_value, description=f"Test key {unique_id}", created_at=datetime.now()) + key = AuthKey(key_value=key_value, description=f"Test key {unique_id}", created_at=datetime.now(tz=UTC)) # Add and commit db_session.add(key) diff --git a/x/gatelet/server/conftest.py b/x/gatelet/server/conftest.py index 594d825856..125a068794 100644 --- a/x/gatelet/server/conftest.py +++ b/x/gatelet/server/conftest.py @@ -10,7 +10,7 @@ from collections.abc import AsyncGenerator, Generator from contextlib import asynccontextmanager from dataclasses import dataclass -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from pathlib import Path from uuid import uuid4 @@ -198,7 +198,7 @@ async def test_auth_key(db_session: AsyncSession) -> AuthKey: unique_id = uuid4().hex[:8] key = AuthKey( - key_value=f"test-key-{unique_id}", description=f"Test auth key {unique_id}", created_at=datetime.now() + key_value=f"test-key-{unique_id}", description=f"Test auth key {unique_id}", created_at=datetime.now(tz=UTC) ) return await persist(db_session, key) @@ -211,9 +211,9 @@ async def test_auth_session(db_session: AsyncSession, test_auth_key: AuthKey) -> session = AuthCRSession( session_token=f"test-session-{unique_id}", auth_key_id=test_auth_key.id, - created_at=datetime.now(), - expires_at=datetime.now() + timedelta(hours=1), - last_activity_at=datetime.now(), + created_at=datetime.now(tz=UTC), + expires_at=datetime.now(tz=UTC) + timedelta(hours=1), + last_activity_at=datetime.now(tz=UTC), ) return await persist(db_session, session) @@ -223,10 +223,10 @@ async def _stub_data(monkeypatch): """Stub external data fetchers for all tests.""" async def _states(*_args, **_kwargs): - return [{"entity_id": "sensor.test", "state": "on", "last_changed": datetime(2020, 1, 1)}] + return [{"entity_id": "sensor.test", "state": "on", "last_changed": datetime(2020, 1, 1, tzinfo=UTC)}] async def _payloads(*_args, **_kwargs): - return [PayloadSummary(id=1, integration_name="test", received_at=datetime(2020, 1, 1))] + return [PayloadSummary(id=1, integration_name="test", received_at=datetime(2020, 1, 1, tzinfo=UTC))] monkeypatch.setattr("x.gatelet.server.endpoints.homeassistant.fetch_states", _states) monkeypatch.setattr("x.gatelet.server.endpoints.webhook_view.get_latest_payloads", _payloads) diff --git a/x/gatelet/server/endpoints/admin.py b/x/gatelet/server/endpoints/admin.py index 5821678e3e..47f145a7e7 100644 --- a/x/gatelet/server/endpoints/admin.py +++ b/x/gatelet/server/endpoints/admin.py @@ -2,7 +2,7 @@ import asyncio import uuid -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from pathlib import Path from typing import Annotated @@ -36,7 +36,7 @@ async def _get_admin_session( stmt = select(AdminSession).where(AdminSession.session_token == session_token) admin_session = (await db_session.execute(stmt)).scalar_one_or_none() - if not admin_session or admin_session.expires_at <= datetime.now(): + if not admin_session or admin_session.expires_at <= datetime.now(tz=UTC): raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED) return admin_session @@ -68,7 +68,9 @@ async def login( ) session = AdminSession( - session_token=uuid.uuid4().hex, created_at=datetime.now(), expires_at=datetime.now() + SESSION_DURATION + session_token=uuid.uuid4().hex, + created_at=datetime.now(tz=UTC), + expires_at=datetime.now(tz=UTC) + SESSION_DURATION, ) db_session.add(session) # Commit before redirect so the session is visible to the next request. @@ -131,7 +133,7 @@ async def create_key( description: Annotated[str, Form()] = "", ) -> HTMLResponse: await csrf_protect.validate_csrf(request) - key = AuthKey(key_value=uuid.uuid4().hex, description=description or None, created_at=datetime.now()) + key = AuthKey(key_value=uuid.uuid4().hex, description=description or None, created_at=datetime.now(tz=UTC)) db_session.add(key) await db_session.flush() token, signed = csrf_protect.generate_csrf_tokens() @@ -150,7 +152,7 @@ async def revoke_key( stmt = select(AuthKey).where(AuthKey.id == key_id) key = (await db_session.execute(stmt)).scalar_one_or_none() if key and not key.revoked_at: - key.revoked_at = datetime.now() + key.revoked_at = datetime.now(tz=UTC) await db_session.flush() return RedirectResponse("/admin/keys/", status_code=302) diff --git a/x/gatelet/server/endpoints/challenge.py b/x/gatelet/server/endpoints/challenge.py index c0b913db0d..a90290654d 100644 --- a/x/gatelet/server/endpoints/challenge.py +++ b/x/gatelet/server/endpoints/challenge.py @@ -5,7 +5,7 @@ import hashlib import inspect import uuid -from datetime import datetime +from datetime import UTC, datetime from fastapi import APIRouter, Request from fastapi.responses import HTMLResponse, RedirectResponse @@ -57,7 +57,7 @@ async def _validate_key(key_id: int, db_session: AsyncSession, settings: Setting async def _new_challenge(key: AuthKey, db_session: AsyncSession, settings: Settings): nonce_value = uuid.uuid4().hex nonce = AuthNonce( - nonce_value=nonce_value, expires_at=datetime.now() + settings.auth.challenge_response.nonce_validity + nonce_value=nonce_value, expires_at=datetime.now(tz=UTC) + settings.auth.challenge_response.nonce_validity ) db_session.add(nonce) await db_session.flush() @@ -113,14 +113,14 @@ async def answer_challenge( if not nonce or not nonce.is_valid: return await _render_new_challenge(request, key, db_session, "Invalid or expired challenge", settings) - nonce.used_at = datetime.now() + nonce.used_at = datetime.now(tz=UTC) await db_session.flush() correct_idx = compute_correct_option(key.key_value, nonce_value, settings.auth.challenge_response.num_options) if answer != str(correct_idx): return await _render_new_challenge(request, key, db_session, "Incorrect answer", settings) - now = datetime.now() + now = datetime.now(tz=UTC) session = AuthCRSession( session_token=uuid.uuid4().hex, auth_key_id=key.id, diff --git a/x/gatelet/server/endpoints/test_challenge.py b/x/gatelet/server/endpoints/test_challenge.py index 29e0ad7157..0c454a3a05 100644 --- a/x/gatelet/server/endpoints/test_challenge.py +++ b/x/gatelet/server/endpoints/test_challenge.py @@ -1,7 +1,7 @@ """Tests for challenge-response authentication endpoints.""" import html -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from http import HTTPStatus import pytest_bazel @@ -41,7 +41,7 @@ async def test_answer_challenge_success( async def test_session_extension(client: AsyncClient, db_session: AsyncSession, test_auth_session: AuthCRSession): - original_exp = datetime.now() + timedelta(seconds=1) + original_exp = datetime.now(tz=UTC) + timedelta(seconds=1) test_auth_session.expires_at = original_exp await db_session.flush() await client.get(f"/s/{test_auth_session.session_token}/") diff --git a/x/gatelet/server/models.py b/x/gatelet/server/models.py index c8f5892024..7a958069dd 100644 --- a/x/gatelet/server/models.py +++ b/x/gatelet/server/models.py @@ -1,6 +1,6 @@ """SQLAlchemy models for Gatelet.""" -from datetime import datetime, timedelta +from datetime import UTC, datetime, timedelta from typing import Any from sqlalchemy import JSON, Boolean, Column, DateTime, ForeignKey, Integer, String, create_engine, func @@ -70,7 +70,7 @@ def is_valid(self, validity_period: timedelta) -> bool: True if key is valid, False otherwise """ expiration_time = self.created_at + validity_period - return self.revoked_at is None and datetime.now() < expiration_time + return self.revoked_at is None and datetime.now(tz=UTC) < expiration_time class AuthCRSession(Base): @@ -94,7 +94,7 @@ class AuthCRSession(Base): @property def is_valid(self) -> bool: """Check if session is currently valid.""" - return self.expires_at > datetime.now() + return self.expires_at > datetime.now(tz=UTC) class AuthNonce(Base): @@ -111,7 +111,7 @@ class AuthNonce(Base): @property def is_valid(self) -> bool: """Check if nonce is valid (not used and not expired).""" - return self.used_at is None and datetime.now() < self.expires_at + return self.used_at is None and datetime.now(tz=UTC) < self.expires_at @property def is_used(self) -> bool: diff --git a/x/skype_history/skype_history.py b/x/skype_history/skype_history.py index 5d81edaf25..ee385848f8 100644 --- a/x/skype_history/skype_history.py +++ b/x/skype_history/skype_history.py @@ -22,7 +22,7 @@ def main() -> None: continue print(row[3]) else: - timestamp = datetime.datetime.fromtimestamp(row[2]) + timestamp = datetime.datetime.fromtimestamp(row[2], tz=datetime.UTC) print(f"{row[1]} {timestamp}: {row[3]}") conn.close()