diff --git a/airlock/app.py b/airlock/app.py index e13dd7f63e..0e0ebd2442 100644 --- a/airlock/app.py +++ b/airlock/app.py @@ -130,12 +130,12 @@ async def auth_config() -> dict[str, str]: @app.get("/api/actions") async def list_actions(status: str | None = None, limit: int = 100, offset: int = 0) -> list[Action]: status_enum = ActionStatus(status) if status else None - return await gate._req_storage.list_actions(status_enum, limit=limit, offset=offset) + return await gate.req_storage.list_actions(status_enum, limit=limit, offset=offset) @app.get("/api/actions/{session_key}/{action_seq}") async def get_action(session_key: str, action_seq: int) -> Action: key = ActionKey(session_key=session_key, action_seq=action_seq) - action = await gate._req_storage.get_action(key) + action = await gate.req_storage.get_action(key) if action is None: raise HTTPException(status_code=404, detail="Action not found") return action diff --git a/airlock/oauth/k8s_client.py b/airlock/oauth/k8s_client.py index ada64bb07c..93dcc8ede1 100644 --- a/airlock/oauth/k8s_client.py +++ b/airlock/oauth/k8s_client.py @@ -52,11 +52,11 @@ async def write_token( try: await self._api.read_namespaced_secret(secret_name, namespace) await self._api.replace_namespaced_secret(secret_name, namespace, secret) - logger.info(f"Updated secret {namespace}/{secret_name}") + logger.info("Updated secret %s/%s", namespace, secret_name) except ApiException as e: if e.status == 404: await self._api.create_namespaced_secret(namespace, secret) - logger.info(f"Created secret {namespace}/{secret_name}") + logger.info("Created secret %s/%s", namespace, secret_name) else: raise @@ -68,7 +68,7 @@ async def delete_orphaned_secrets(self, namespace: str, known_names: frozenset[s name = secret.metadata.name if name not in known_names: await self._api.delete_namespaced_secret(name, namespace) - logger.info(f"Deleted orphaned secret {namespace}/{name}") + logger.info("Deleted orphaned secret %s/%s", namespace, name) async def read_token(self, secret_name: str, namespace: str) -> TokenData | None: try: diff --git a/airlock/oauth/refresh.py b/airlock/oauth/refresh.py index 2a3ac789b9..ca041a1397 100644 --- a/airlock/oauth/refresh.py +++ b/airlock/oauth/refresh.py @@ -27,7 +27,7 @@ async def token_refresh_loop( continue if not provider.needs_refresh(token): continue - logger.info(f"Refreshing token for {name} (expires {token.expires_at})") + logger.info("Refreshing token for %s (expires %s)", name, token.expires_at) new_token = await provider.refresh_tokens(token.refresh_token) await k8s_store.write_token( provider.config.refresh_secret.name, @@ -42,9 +42,9 @@ async def token_refresh_loop( annotations=provider.config.access_secret.annotations or None, fields=ACCESS_TOKEN_FIELDS, ) - logger.info(f"Refreshed token for {name} (new expiry {new_token.expires_at})") + logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at) except Exception: - logger.exception(f"Failed to refresh token for {name}") + logger.exception("Failed to refresh token for %s", name) try: await k8s_store.delete_orphaned_secrets(target_namespace, known_secret_names) except Exception: diff --git a/airlock/oauth/routes.py b/airlock/oauth/routes.py index e4aaac4ab7..6327f8ef94 100644 --- a/airlock/oauth/routes.py +++ b/airlock/oauth/routes.py @@ -92,7 +92,7 @@ async def callback_get(provider_name: str, request: Request) -> RedirectResponse annotations=provider.config.access_secret.annotations or None, fields=ACCESS_TOKEN_FIELDS, ) - logger.info(f"Stored tokens for {provider_name} (expires {token.expires_at})") + logger.info("Stored tokens for %s (expires %s)", provider_name, token.expires_at) return RedirectResponse("/#/oauth") @router.post("/callback/{provider_name}") @@ -117,7 +117,7 @@ async def callback_post(provider_name: str, body: _PlaidCallbackBody) -> Redirec annotations=provider.config.access_secret.annotations or None, fields=ACCESS_TOKEN_FIELDS, ) - logger.info(f"Stored Plaid tokens for {provider_name}") + logger.info("Stored Plaid tokens for %s", provider_name) return RedirectResponse("/#/oauth", status_code=303) return router diff --git a/devinfra/claude/auth_proxy/proxy.py b/devinfra/claude/auth_proxy/proxy.py index 0becab4b5a..ba29a546ea 100644 --- a/devinfra/claude/auth_proxy/proxy.py +++ b/devinfra/claude/auth_proxy/proxy.py @@ -88,7 +88,7 @@ def __init__(self, listen_port: int, max_workers: int = 100): self._upstream_url: str | None = None self._creds_lock = threading.Lock() self.server_socket: socket.socket | None = None - self._running = False + self.running = False self._thread: threading.Thread | None = None self._executor: ThreadPoolExecutor | None = None self._connections: list[socket.socket] = [] @@ -118,7 +118,7 @@ def start(self) -> None: self.server_socket.settimeout(0.5) self._executor = ThreadPoolExecutor(max_workers=self.max_workers, thread_name_prefix="proxy") - self._running = True + self.running = True self._thread = threading.Thread(target=self._serve, daemon=True) self._thread.start() @@ -126,7 +126,7 @@ def start(self) -> None: def stop(self) -> None: """Stop the proxy server.""" - self._running = False + self.running = False if self._thread: self._thread.join(timeout=2) if self._executor: @@ -140,7 +140,7 @@ def stop(self) -> None: def _serve(self) -> None: """Main server loop.""" - while self._running: + while self.running: try: client_sock, _ = self.server_socket.accept() # type: ignore[union-attr] self._connections.append(client_sock) @@ -309,7 +309,7 @@ def __init__(self, sock_path: Path, remote_target: str, max_workers: int = 100): self._upstream_url: str | None = None self._creds_lock = threading.Lock() self.server_socket: socket.socket | None = None - self._running = False + self.running = False self._thread: threading.Thread | None = None self._executor: ThreadPoolExecutor | None = None self._connections: list[socket.socket] = [] @@ -341,7 +341,7 @@ def start(self) -> None: self.server_socket.settimeout(0.5) self._executor = ThreadPoolExecutor(max_workers=self.max_workers, thread_name_prefix="uds-proxy") - self._running = True + self.running = True self._thread = threading.Thread(target=self._serve, daemon=True) self._thread.start() @@ -349,7 +349,7 @@ def start(self) -> None: def stop(self) -> None: """Stop the UDS proxy server.""" - self._running = False + self.running = False if self._thread: self._thread.join(timeout=2) if self._executor: @@ -365,7 +365,7 @@ def stop(self) -> None: def _serve(self) -> None: """Main server loop.""" - while self._running: + while self.running: try: client_sock, _ = self.server_socket.accept() # type: ignore[union-attr] self._connections.append(client_sock) diff --git a/devinfra/claude/auth_proxy/setup.py b/devinfra/claude/auth_proxy/setup.py index d53d0a5b12..53b3c6370d 100644 --- a/devinfra/claude/auth_proxy/setup.py +++ b/devinfra/claude/auth_proxy/setup.py @@ -347,7 +347,7 @@ async def setup_auth_proxy( # Create combined CA bundle (for tools like uv that use SSL_CERT_FILE) _create_combined_ca_bundle(paths) - status = (f"running (port {port})" if proxy._running else "configured") if proxy is not None else "uds-only" + status = (f"running (port {port})" if proxy.running else "configured") if proxy is not None else "uds-only" ca_status = "custom CA" if combined_ca.exists() else "system" logger.info("Auth proxy setup complete") diff --git a/editor_agent/host/cli.py b/editor_agent/host/cli.py index b24fd71f2b..dda91afa64 100644 --- a/editor_agent/host/cli.py +++ b/editor_agent/host/cli.py @@ -39,7 +39,7 @@ MODEL_OPT = typer.Option(DEFAULT_MODEL, "--model", help="Model name (OPENAI_MODEL)") NETWORK_OPT = typer.Option(_ENV_NETWORK, "--network", help="Docker network (ADGN_EDITOR_DOCKER_NETWORK)") MAX_TURNS_OPT = typer.Option(40, "--max-turns", help="Maximum agent turns before abort") -VERBOSE_OPT = typer.Option(False, "--verbose", "-v", help="Show agent actions in real-time") +VERBOSE_OPT = typer.Option(default=False, help="Show agent actions in real-time") @app.callback(invoke_without_command=True) diff --git a/git_commit_ai/cli.py b/git_commit_ai/cli.py index 1d3c5f357b..1f7f1c5dd9 100644 --- a/git_commit_ai/cli.py +++ b/git_commit_ai/cli.py @@ -108,12 +108,12 @@ async def commit( timeout_secs: int | None = typer.Option( None, "--timeout-secs", help="Maximum seconds for the AI request; 0 disables timeout" ), - stage_all: bool = typer.Option(False, "-a", "--all", help="Stage all tracked changes"), - no_verify: bool = typer.Option(False, "--no-verify", help="Skip pre-commit hooks"), - amend: bool = typer.Option(False, "--amend", help="Amend previous commit"), - accept_ai: bool = typer.Option(False, "--accept-ai", help="Commit with AI message, skip editor"), - verbose: bool = typer.Option(False, "-v", help="Verbose git commit"), - debug: bool = typer.Option(False, "--debug", help="Show logger output"), + stage_all: bool = typer.Option(default=False, help="Stage all tracked changes"), + no_verify: bool = typer.Option(default=False, help="Skip pre-commit hooks"), + amend: bool = typer.Option(default=False, help="Amend previous commit"), + accept_ai: bool = typer.Option(default=False, help="Commit with AI message, skip editor"), + verbose: bool = typer.Option(default=False, help="Verbose git commit"), + debug: bool = typer.Option(default=False, help="Show logger output"), ): """Run the git-commit-ai process.""" repo = pygit2.Repository(get_build_workspace_directory()) diff --git a/homeassistant/proxy/config.py b/homeassistant/proxy/config.py index 0fad82a2c4..8bbcd592b7 100644 --- a/homeassistant/proxy/config.py +++ b/homeassistant/proxy/config.py @@ -76,7 +76,7 @@ class Settings(BaseModel): @classmethod def from_file(cls, path: Path) -> "Settings": - logger.info(f"Loading settings from {path.absolute()}") + logger.info("Loading settings from %s", path.absolute()) with path.open() as f: data = yaml.safe_load(f) if not isinstance(data, dict): diff --git a/homeassistant/proxy/policy.py b/homeassistant/proxy/policy.py index f12ab838bb..e28f71bd3a 100644 --- a/homeassistant/proxy/policy.py +++ b/homeassistant/proxy/policy.py @@ -74,11 +74,11 @@ async def _connection_loop(self) -> None: self._connected.clear() return except (CannotConnect, ConnectionFailed, NotConnected, OSError) as exc: - logger.warning(f"HA connection lost: {exc}. Reconnecting in {backoff:.1f}s") + logger.warning("HA connection lost: %s. Reconnecting in %.1fs", exc, backoff) except asyncio.CancelledError: raise except Exception: - logger.exception(f"Unexpected error in HA connection loop. Reconnecting in {backoff:.1f}s") + logger.exception("Unexpected error in HA connection loop. Reconnecting in %.1fs", backoff) finally: self._connected.clear() if self._client is not None: @@ -96,7 +96,7 @@ async def _ensure_entities(self) -> dict[str, EntityInfo]: self._entities_time = now except (ConnectionError, NotConnected, CannotConnect, ConnectionFailed) as exc: if self._entities is not None: - logger.warning(f"Registry refresh failed ({exc}), serving stale cache") + logger.warning("Registry refresh failed (%s), serving stale cache", exc) else: raise return self._entities @@ -126,7 +126,7 @@ async def _fetch_registry(self) -> dict[str, EntityInfo]: area_id = device_area.get(device_id) registry[entity_id] = EntityInfo(entity_id=entity_id, device_id=device_id, area_id=area_id) - logger.info(f"Fetched registry: {len(registry)} entities") + logger.info("Fetched registry: %d entities", len(registry)) return registry def _get_entity(self, entities: dict[str, EntityInfo], entity_id: str) -> EntityInfo: diff --git a/inop/engine/optimizer.py b/inop/engine/optimizer.py index 2a90388eb7..84d0bef87d 100644 --- a/inop/engine/optimizer.py +++ b/inop/engine/optimizer.py @@ -525,10 +525,10 @@ def main() -> None: seed_tasks = [t for t in all_tasks if t.type == task_type_enum.value] if not seed_tasks: - logger.error(f"No tasks found with type '{task_type_enum.value}' in {seeds_path}") + logger.error("No tasks found with type '%s' in %s", task_type_enum.value, seeds_path) sys.exit(1) - logger.info(f"Loaded {len(seed_tasks)} {task_type_enum.value} tasks from {len(all_tasks)} total tasks") + logger.info("Loaded %d %s tasks from %d total tasks", len(seed_tasks), task_type_enum.value, len(all_tasks)) # Load grading criteria from YAML logger.info("Loading grading criteria") diff --git a/inop/runners/containerized_claude.py b/inop/runners/containerized_claude.py index 1b0abd770a..522225d13c 100644 --- a/inop/runners/containerized_claude.py +++ b/inop/runners/containerized_claude.py @@ -403,7 +403,8 @@ async def _run_setup_script(self, script_path: str, script_type: str, log_prefix cmd_args = [str(setup_script), c.id, self.task_id, str(self._output_dir)] script_stat = await asyncio.to_thread(setup_script.stat) self._logger.info( - f"Running {script_type.lower()} script", + "%s script running", + script_type.lower(), script=str(setup_script), container_id=c.id, task_id=self.task_id, @@ -445,13 +446,14 @@ async def _run_setup_script(self, script_path: str, script_type: str, log_prefix if exit_code != 0: self._logger.error( - f"{script_type} script failed - CONTAINER LEFT RUNNING FOR DEBUG", + "%s script failed - CONTAINER LEFT RUNNING FOR DEBUG", + script_type, container_id=c.id, exit_code=exit_code, debug_hint=f"Run: docker logs {c.id}", ) raise RuntimeError(f"{script_type} script failed with exit code {exit_code}") - self._logger.info(f"{script_type} script completed successfully", container_id=c.id) + self._logger.info("%s script completed successfully", script_type, container_id=c.id) async def _run_pre_task_always_setup(self): """Run always pre-task setup script (runs before every task).""" diff --git a/inventree_utils/beautifier/upload_lcsc_images.py b/inventree_utils/beautifier/upload_lcsc_images.py index 3727c2014f..b30143c583 100644 --- a/inventree_utils/beautifier/upload_lcsc_images.py +++ b/inventree_utils/beautifier/upload_lcsc_images.py @@ -63,7 +63,7 @@ def upload_lcsc_images(api: InvenTreeAPI): # Gather LCSC from single supplier sp_lcsc = [sp for sp in all_supplier_parts if sp.part == p.pk and sp.supplier == lcsc.pk] if len(sp_lcsc) != 1: - log.info(f"Skip, {len(sp_lcsc)} LCSC SupplierParts.") + log.info("Skip, %s LCSC SupplierParts.", len(sp_lcsc)) continue lcsc_from_supplier = sp_lcsc[0].SKU @@ -71,7 +71,8 @@ def upload_lcsc_images(api: InvenTreeAPI): if lcsc_from_link and lcsc_from_supplier: # If both are present, assert they match if lcsc_from_link != lcsc_from_supplier: - raise ValueError(f"Conflicting LCSC IDs: {lcsc_from_link=} != {lcsc_from_supplier=}", log._context) + msg = f"Conflicting LCSC IDs: {lcsc_from_link=} != {lcsc_from_supplier=}" + raise ValueError(msg, log._context) # Both match => use either one lcsc_id = lcsc_from_link elif lcsc_from_link or lcsc_from_supplier: diff --git a/llm/claude_code_api.py b/llm/claude_code_api.py index af6b371211..1ab6ac8e2b 100644 --- a/llm/claude_code_api.py +++ b/llm/claude_code_api.py @@ -267,7 +267,7 @@ class BaseResponse(CamelCaseModel): """ # continue_ needs explicit alias since to_camel("continue_") -> "continue_" not "continue" - continue_: bool = Field(True, alias="continue") + continue_: bool = Field(default=True, alias="continue") stop_reason: str | None = Field(None, description="Message shown to USER when continue is false") suppress_output: bool | None = None diff --git a/llm/claude_hook.py b/llm/claude_hook.py index d36af8aac9..494a23aa18 100644 --- a/llm/claude_hook.py +++ b/llm/claude_hook.py @@ -150,7 +150,7 @@ def entrypoint(cls) -> None: except Exception: # Log the exception - logger.error("Hook execution failed", exc_info=True) + logger.exception("Hook execution failed") raise _emit_and_exit(response) diff --git a/llm/html/llm_html/server.py b/llm/html/llm_html/server.py index 7aa83adc9e..445945afa8 100755 --- a/llm/html/llm_html/server.py +++ b/llm/html/llm_html/server.py @@ -90,7 +90,7 @@ def load_page_titles(): else: raise ValueError(f"Missing required 'title' in frontmatter for {page}.md") except Exception: - logger.exception(f"Error loading title for {page}.md") + logger.exception("Error loading title for %s.md", page) raise @@ -116,9 +116,9 @@ def handle_page_rendering_error(error: Exception, page_name: str = "page") -> No HTTPException: Always raises with appropriate status code """ if isinstance(error, FileNotFoundError): - logger.error(f"{page_name} not found") + logger.error("%s not found", page_name) raise HTTPException(status_code=404, detail="Document not found") - logger.error(f"Error rendering {page_name}: {error}") + logger.error("Error rendering %s: %s", page_name, error) raise HTTPException(status_code=500, detail="Internal server error") @@ -216,7 +216,7 @@ async def analyze_page_tokens( tokens = count_tokens_for_models(final_markdown) return {"page": page_id, "title": title, "url": url, **tokens} except Exception: - logger.exception(f"Error analyzing {page_id} page") + logger.exception("Error analyzing %s page", page_id) return None @@ -300,11 +300,11 @@ async def verify_token(request: Request, token: str = ""): ts.verify_token(token) result = {"status": "success", "message": "Token is valid ✅"} - logger.info(f"Token verification succeeded for: {token[:20]}...") + logger.info("Token verification succeeded for: %s...", token[:20]) except VerificationError as exc: result = {"status": "failed", "errors": exc.issues} issues_str = " | ".join(f"✗ {issue}" for issue in exc.issues) - logger.exception(f"Token verification FAILED: {issues_str}") + logger.exception("Token verification FAILED: %s", issues_str) except FileNotFoundError: logger.exception("index.md not found for token verification") result = {"status": "failed", "errors": ["Source document not found"]} @@ -323,7 +323,7 @@ def main(): host = os.environ.get("HOST", "0.0.0.0") port = int(os.environ.get("PORT", "9000")) - logger.info(f"Starting FastAPI server on http://{host}:{port}") + logger.info("Starting FastAPI server on http://%s:%s", host, port) uvicorn.run(app, host=host, port=port, log_config=None) # None to use our logging config diff --git a/llm/mcp/habitify/api_reference/collect_references.py b/llm/mcp/habitify/api_reference/collect_references.py index a9f4ad6967..44fa941691 100755 --- a/llm/mcp/habitify/api_reference/collect_references.py +++ b/llm/mcp/habitify/api_reference/collect_references.py @@ -157,13 +157,13 @@ def _make_request_and_save( Raises: SystemExit: If expected_status is specified and doesn't match actual status """ - logger.info(f"Making request: {name} ({method} {endpoint})") + logger.info("Making request: %s (%s %s)", name, method, endpoint) response = self.client.request(method=method, url=endpoint, params=params, json=json_data) # If expected status is provided, validate it if response.status_code != expected_status: - logger.error(f"Expected status {expected_status} but got {response.status_code}") + logger.error("Expected status %s but got %s", expected_status, response.status_code) sys.exit(1) # Create reference data structure @@ -187,12 +187,12 @@ def _make_request_and_save( # Save to file in YAML format path = REFERENCE_DIR / f"{name.lower().replace(' ', '_')}.yaml" if path.exists(): - logger.warning(f"Overwriting existing file: {path}") + logger.warning("Overwriting existing file: %s", path) with path.open("w") as f: yaml.dump(reference, f, sort_keys=False, indent=2, default_flow_style=False) - logger.info(f"Saved reference example to {path}") + logger.info("Saved reference example to %s", path) return response.json() @@ -224,7 +224,7 @@ def collect_references(self) -> None: # Use the first habit for further API calls habit = habits[0] habit_id = habit["id"] - logger.info(f"Using habit with ID: {habit_id} and masked name: {self._mask_name(habit['name'])}") + logger.info("Using habit with ID: %s and masked name: %s", habit_id, self._mask_name(habit["name"])) # Get details for a specific habit by ID self._make_request_and_save( diff --git a/llm/mcp/habitify/cli.py b/llm/mcp/habitify/cli.py index 93c209913d..c0cdb7f773 100644 --- a/llm/mcp/habitify/cli.py +++ b/llm/mcp/habitify/cli.py @@ -8,7 +8,7 @@ import signal import subprocess import sys -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Literal @@ -65,8 +65,8 @@ def mcp( api_key: str | None = typer.Option( None, "--api-key", "-k", help="Habitify API key (overrides environment variable)" ), - quiet: bool = typer.Option(False, "--quiet", "-q", help="Disable debug output"), - debug: bool = typer.Option(False, "--debug", "-d", help="Enable debug logging"), + quiet: bool = typer.Option(default=False, help="Disable debug output"), + debug: bool = typer.Option(default=False, help="Enable debug logging"), ) -> None: """Start the Habitify MCP server with the specified transport.""" # Configure logging @@ -121,7 +121,7 @@ def mcp( except KeyboardInterrupt: err_console.print("\n[yellow]Keyboard interrupt received.[/] Shutting down...") except Exception as e: - logger.error(f"Error running server: {e}", exc_info=True) + logger.exception("Error running server") err_console.print(f"[bold red]Error:[/] {e!s}") raise typer.Exit(code=1) @@ -172,7 +172,7 @@ def install( @app.command("list") def list_habits( - include_archived: bool = typer.Option(False, "--include-archived", "-a", help="Include archived habits"), + include_archived: bool = typer.Option(default=False, help="Include archived habits"), api_key: str | None = typer.Option( None, "--api-key", "-k", help="Habitify API key (overrides environment variable)" ), @@ -331,7 +331,7 @@ async def _log_async( elif date: formatted_date = datetime.fromisoformat(date).strftime("%B %d, %Y") else: - formatted_date = datetime.now().strftime("%B %d, %Y") + formatted_date = datetime.now(tz=UTC).strftime("%B %d, %Y") # Success message with color based on status status_color = get_status_color(status) diff --git a/llm/mcp/habitify/examples/mcp_dev_runner.py b/llm/mcp/habitify/examples/mcp_dev_runner.py index 0b91122627..4e96faa654 100644 --- a/llm/mcp/habitify/examples/mcp_dev_runner.py +++ b/llm/mcp/habitify/examples/mcp_dev_runner.py @@ -72,7 +72,7 @@ def main() -> int: temp_filename = temp.name temp.write(SERVER_TEMPLATE) - logger.info(f"Created temporary server file: {temp_filename}") + logger.info("Created temporary server file: %s", temp_filename) try: # Build the command @@ -103,9 +103,9 @@ def main() -> int: # Clean up the temporary file try: Path(temp_filename).unlink() - logger.info(f"Removed temporary server file: {temp_filename}") + logger.info("Removed temporary server file: %s", temp_filename) except Exception as e: - logger.warning(f"Failed to remove temporary file: {e}") + logger.warning("Failed to remove temporary file: %s", e) if __name__ == "__main__": diff --git a/llm/mcp/habitify/examples/run_server.py b/llm/mcp/habitify/examples/run_server.py index a2ecfbab37..c9e26426a2 100644 --- a/llm/mcp/habitify/examples/run_server.py +++ b/llm/mcp/habitify/examples/run_server.py @@ -56,15 +56,15 @@ def main() -> int: logger.info("Starting server with stdio transport...") server.run(transport="stdio") else: - logger.info(f"Starting server with SSE transport on port {args.port}...") + logger.info("Starting server with SSE transport on port %s...", args.port) server.run(transport="sse", port=args.port) return 0 except KeyboardInterrupt: logger.info("Server stopped by keyboard interrupt") return 0 - except Exception as e: - logger.error(f"Error running server: {e}", exc_info=True) + except Exception: + logger.exception("Error running server") return 1 diff --git a/llm/mcp/habitify/habitify_client.py b/llm/mcp/habitify/habitify_client.py index db0f978453..64be6554e7 100644 --- a/llm/mcp/habitify/habitify_client.py +++ b/llm/mcp/habitify/habitify_client.py @@ -331,7 +331,7 @@ def _handle_error(self, error: Exception) -> HabitifyError: except Exception as json_error: # Instead of silently setting data to None, log what happened logger.warning( - f"Failed to parse error response JSON: {json_error}. Response text: {response.text[:100]}..." + "Failed to parse error response JSON: %s. Response text: %s...", json_error, response.text[:100] ) # Check for common errors with helpful messages diff --git a/mcp_infra/compositor/mount.py b/mcp_infra/compositor/mount.py index 8f946176e7..285597cd9a 100644 --- a/mcp_infra/compositor/mount.py +++ b/mcp_infra/compositor/mount.py @@ -196,7 +196,7 @@ async def setup_inproc( _ = child_client.initialize_result self._state_data = _MountActive(stack=stack, proxy=proxy, child_client=child_client) except Exception as e: - logger.warning(f"Failed to get initialize result for '{self._prefix}': {e}") + logger.warning("Failed to get initialize result for '%s': %s", self._prefix, e) self._state_data = _MountFailed(exception=e, stack=stack) # Don't raise - mount is registered but failed @@ -245,7 +245,7 @@ async def setup_external( _ = base_client.initialize_result self._state_data = _MountActive(stack=stack, proxy=proxy, child_client=base_client) except Exception as e: - logger.warning(f"Failed to get initialize result for '{self._prefix}': {e}") + logger.warning("Failed to get initialize result for '%s': %s", self._prefix, e) self._state_data = _MountFailed(exception=e, stack=stack) # Don't raise - mount is registered but failed @@ -268,8 +268,8 @@ async def cleanup(self) -> None: # Defensive check: warn if already closing if isinstance(self._state_data, _MountPending): logger.warning( - f"Cleaning up mount '{self._prefix}' that was never initialized " - "(state: PENDING). This is safe but unexpected." + "Cleaning up mount '%s' that was never initialized (state: PENDING). This is safe but unexpected.", + self._prefix, ) # Get stack to close (from Active or Failed state) @@ -281,6 +281,6 @@ async def cleanup(self) -> None: try: await stack.aclose() except Exception as e: - logger.exception(f"Error during cleanup for '{self._prefix}'", exc_info=e) + logger.exception("Error during cleanup for '%s'", self._prefix, exc_info=e) self._state_data = _MountClosed() diff --git a/mcp_infra/compositor/notifications_buffer.py b/mcp_infra/compositor/notifications_buffer.py index d5c0dd962c..4c428becf5 100644 --- a/mcp_infra/compositor/notifications_buffer.py +++ b/mcp_infra/compositor/notifications_buffer.py @@ -36,10 +36,10 @@ def __init__(self, owner: NotificationsBuffer) -> None: # Override with narrower types than base MessageHandler (which accepts Any) async def on_resource_updated(self, message: mcp_types.ResourceUpdatedNotification) -> None: - await self._buffer._on_updated(message) + await self._buffer.on_updated(message) async def on_resource_list_changed(self, message: mcp_types.ResourceListChangedNotification) -> None: - await self._buffer._on_list_changed(message) + await self._buffer.on_list_changed(message) class NotificationsBuffer: @@ -77,13 +77,13 @@ def poll(self) -> NotificationsBatch: self._servers.clear() return batch - async def _on_updated(self, message: mcp_types.ResourceUpdatedNotification) -> None: + async def on_updated(self, message: mcp_types.ResourceUpdatedNotification) -> None: # Add URI to the server's update set uri_str = str(message.params.uri) server = await self._derive_server(uri_str) self._servers.setdefault(server, _ServerNoticeAccumulator()).updated.add(uri_str) - async def _on_list_changed(self, message: mcp_types.ResourceListChangedNotification) -> None: + async def on_list_changed(self, message: mcp_types.ResourceListChangedNotification) -> None: # Attribute origin using compositor-captured child notifications when available names = list(self._compositor.pop_recent_resource_list_changes()) for name in names: @@ -91,7 +91,7 @@ async def _on_list_changed(self, message: mcp_types.ResourceListChangedNotificat async def _derive_server(self, uri: str) -> str: """Derive origin server from resource URI using all compositor mounts.""" - mount_names = await self._compositor._mount_names() + mount_names = await self._compositor.mount_names() return derive_origin_server(uri, mount_names) async def _on_resource_listener(self, name: str, uri: str) -> None: diff --git a/mcp_infra/compositor/resources_server.py b/mcp_infra/compositor/resources_server.py index ac5fc14043..8481f112f6 100644 --- a/mcp_infra/compositor/resources_server.py +++ b/mcp_infra/compositor/resources_server.py @@ -750,8 +750,8 @@ async def _broadcast_subs_updated(self) -> None: async def _present_servers(self) -> set[str]: # Include all mounted servers, including in-proc mounts without typed specs. - # Use compositor._mount_names() directly; do not swallow errors. - names = await self._compositor._mount_names() + # Use compositor.mount_names() directly; do not swallow errors. + names = await self._compositor.mount_names() return set(names) def _get_or_create_sub(self, server: str, uri: str) -> SubscriptionRecord: diff --git a/mcp_infra/compositor/server.py b/mcp_infra/compositor/server.py index 4d36d6af60..c6289b115f 100644 --- a/mcp_infra/compositor/server.py +++ b/mcp_infra/compositor/server.py @@ -54,13 +54,13 @@ def __init__(self, compositor: BaseCompositor, server_prefix: MCPMountPrefix) -> self._server_prefix = server_prefix async def on_resource_list_changed(self, message: mcp_types.ResourceListChangedNotification) -> None: - self._compositor._pending_resource_list_changes.add(self._server_prefix) + self._compositor.pending_resource_list_changes.add(self._server_prefix) # No forwarding here; child client handles forwarding via proxy - await self._compositor._notify_resource_list_change(self._server_prefix) + await self._compositor.notify_resource_list_change(self._server_prefix) async def on_resource_updated(self, message: mcp_types.ResourceUpdatedNotification) -> None: # Forward to listeners with origin attribution - await self._compositor._notify_resource_updated(self._server_prefix, str(message.params.uri)) + await self._compositor.notify_resource_updated(self._server_prefix, str(message.params.uri)) class CompositorState(Enum): @@ -130,7 +130,7 @@ def __init__( self._mount_listeners: list[Callable[[MCPMountPrefix, MountEvent], Awaitable[None] | None]] = [] # Resource change tracking - self._pending_resource_list_changes: set[MCPMountPrefix] = set() + self.pending_resource_list_changes: set[MCPMountPrefix] = set() self._resource_list_change_listeners: list[Callable[[MCPMountPrefix], Awaitable[None] | None]] = [] self._resource_updated_listeners: list[Callable[[MCPMountPrefix, str], Awaitable[None] | None]] = [] @@ -157,7 +157,7 @@ def add_resource_list_change_listener(self, cb: Callable[[MCPMountPrefix], Await """ self._resource_list_change_listeners.append(cb) - async def _notify_resource_list_change(self, prefix: MCPMountPrefix) -> None: + async def notify_resource_list_change(self, prefix: MCPMountPrefix) -> None: for cb in list(self._resource_list_change_listeners): res = cb(prefix) if asyncio.iscoroutine(res): @@ -171,7 +171,7 @@ def add_resource_updated_listener(self, cb: Callable[[MCPMountPrefix, str], Awai """ self._resource_updated_listeners.append(cb) - async def _notify_resource_updated(self, prefix: MCPMountPrefix, uri: str) -> None: + async def notify_resource_updated(self, prefix: MCPMountPrefix, uri: str) -> None: for cb in list(self._resource_updated_listeners): res = cb(prefix, uri) if asyncio.iscoroutine(res): @@ -181,8 +181,8 @@ async def _notify_resource_updated(self, prefix: MCPMountPrefix, uri: str) -> No def pop_recent_resource_list_changes(self) -> list[MCPMountPrefix]: """Return and clear servers that recently reported resource list changes.""" - names = sorted(self._pending_resource_list_changes) - self._pending_resource_list_changes.clear() + names = sorted(self.pending_resource_list_changes) + self.pending_resource_list_changes.clear() return names async def server_entries(self) -> dict[MCPMountPrefix, ServerEntry]: @@ -474,7 +474,7 @@ async def unmount_server(self, prefix: MCPMountPrefix, *, _allow_pinned: bool = # Defensive check: warn if mount is in unexpected state if not mount.is_active and not mount.is_failed: logger.warning( - f"Unmounting server '{prefix}' in unexpected state: {mount.state.name}. Cleanup will proceed anyway." + "Unmounting server '%s' in unexpected state: %s. Cleanup will proceed anyway.", prefix, mount.state.name ) # Cleanup (exception-safe, idempotent) @@ -482,7 +482,7 @@ async def unmount_server(self, prefix: MCPMountPrefix, *, _allow_pinned: bool = try: await mount.cleanup() except Exception as e: - logger.exception(f"Error cleaning up mount '{prefix}' (server will still be unmounted)", exc_info=e) + logger.exception("Error cleaning up mount '%s' (server will still be unmounted)", prefix, exc_info=e) # Remove from dict (always, even if cleanup failed) async with self._mount_lock: @@ -572,14 +572,15 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): await self.unmount_server(name, _allow_pinned=True) except Exception as e: exceptions.append(e) - logger.exception(f"Failed to unmount pinned server '{name}' during exit", exc_info=e) + logger.exception("Failed to unmount pinned server '%s' during exit", name, exc_info=e) finally: async with self._state_lock: self._state = CompositorState.CLOSED # Raise collected exceptions as a group if exceptions: - raise ExceptionGroup("Failed to unmount one or more servers during compositor exit", exceptions) + msg = "Failed to unmount one or more servers during compositor exit" + raise ExceptionGroup(msg, exceptions) return False # Don't suppress exceptions @@ -606,11 +607,12 @@ async def close(self): await self.unmount_server(name) except Exception as e: exceptions.append(e) - logger.exception(f"Failed to unmount server '{name}' during cleanup", exc_info=e) + logger.exception("Failed to unmount server '%s' during cleanup", name, exc_info=e) # Raise collected exceptions as a group if exceptions: - raise ExceptionGroup("Failed to unmount one or more non-pinned servers", exceptions) + msg = "Failed to unmount one or more non-pinned servers" + raise ExceptionGroup(msg, exceptions) def __del__(self): """Warn if compositor is garbage collected without proper cleanup. @@ -651,7 +653,7 @@ def __del__(self): print(msg, file=sys.stderr) # ---- Internals ---------------------------------------------------------- - async def _mount_names(self) -> list[str]: + async def mount_names(self) -> list[str]: async with self._mount_lock: return list(self._mounts.keys()) diff --git a/mcp_infra/exec/docker/container_session.py b/mcp_infra/exec/docker/container_session.py index 8e81130d96..53e070d0ad 100644 --- a/mcp_infra/exec/docker/container_session.py +++ b/mcp_infra/exec/docker/container_session.py @@ -224,9 +224,9 @@ async def _create_and_start_container(client: aiodocker.Docker, opts: ContainerO # Container created but start failed - clean it up before re-raising try: await container.delete(force=True) - logger.debug(f"Cleaned up failed container {container_id}") + logger.debug("Cleaned up failed container %s", container_id) except Exception: - logger.exception(f"Failed to cleanup container {container_id}") + logger.exception("Failed to cleanup container") raise @@ -261,14 +261,14 @@ async def scoped_container(client: aiodocker.Docker, opts: ContainerOptions): status = info["State"]["Status"] if status == "running": await container.kill() - logger.debug(f"Container {container_id} killed") + logger.debug("Container %s killed", container_id) else: - logger.debug(f"Container {container_id} already stopped (status: {status})") + logger.debug("Container %s already stopped (status: %s)", container_id, status) await container.delete(force=True) - logger.debug(f"Container {container_id} cleaned up successfully") + logger.debug("Container %s cleaned up successfully", container_id) except Exception: - logger.exception(f"Container cleanup failed for {container_id}") + logger.exception("Container cleanup failed for") # ---- Lifespan factory (per-session container) ---- @@ -382,10 +382,10 @@ def _normalize_docker_logs_to_bytes(logs) -> bytes: elif isinstance(chunk, str): result.extend(chunk.encode("utf-8")) else: - logger.warning(f"Unexpected chunk type in logs list: {type(chunk)}") + logger.warning("Unexpected chunk type in logs list: %s", type(chunk)) return bytes(result) - logger.warning(f"Unexpected logs type: {type(logs)}, returning empty bytes") + logger.warning("Unexpected logs type: %s, returning empty bytes", type(logs)) return b"" @@ -398,7 +398,7 @@ async def _collect_from_exec_stream(stream, stdout_buf: bytearray, stderr_buf: b data = chunk.data if not isinstance(data, bytes): - logger.warning(f"Expected bytes from exec stream, got {type(data)}, converting") + logger.warning("Expected bytes from exec stream, got %s, converting", type(data)) data = data.encode("utf-8") if isinstance(data, str) else bytes(data) if chunk.stream == STREAM_TYPE_STDOUT: @@ -406,7 +406,7 @@ async def _collect_from_exec_stream(stream, stdout_buf: bytearray, stderr_buf: b elif chunk.stream == STREAM_TYPE_STDERR: stderr_buf.extend(data) else: - logger.warning(f"Unknown stream type {chunk.stream}, defaulting to stdout") + logger.warning("Unknown stream type %s, defaulting to stdout", chunk.stream) stdout_buf.extend(data) @@ -432,7 +432,7 @@ async def run_session_container( if container_id is None: raise RuntimeError("No per-session container available") - logger.debug(f"Executing command in container {container_id[:12]}: {cmd!r} (timeout_ms={input.timeout_ms})") + logger.debug("Executing command in container %s: %r (timeout_ms=%s)", container_id[:12], cmd, input.timeout_ms) docker_client = s.docker_client container_instance = await docker_client.containers.get(container_id) @@ -471,7 +471,7 @@ async def run_session_container( if timed_out: # External timeout - kill the exec process, not the container - logger.debug(f"Command timed out after {input.timeout_ms}ms in container {container_id[:12]}") + logger.debug("Command timed out after %sms in container %s", input.timeout_ms, container_id[:12]) exit_code = None # Get the PID of the exec process and kill it @@ -484,7 +484,7 @@ async def run_session_container( # Drain the stream to ensure the kill command completes while await kill_stream.read_out() is not None: pass - logger.debug(f"Killed exec process PID {pid} in container {container_id[:12]}") + logger.debug("Killed exec process PID %s in container %s", pid, container_id[:12]) else: raise RuntimeError(f"Could not get PID for timed-out exec in container {container_id[:12]}") else: diff --git a/props/agents/critic_dev/grading.py b/props/agents/critic_dev/grading.py index c625f96473..a4468a6187 100644 --- a/props/agents/critic_dev/grading.py +++ b/props/agents/critic_dev/grading.py @@ -107,7 +107,7 @@ async def wait_until_graded( return status if last_pending_count != status.pending_count: - logger.debug(f"Waiting for grading: {status.pending_count} edges pending") + logger.debug("Waiting for grading: %s edges pending", status.pending_count) last_pending_count = status.pending_count await asyncio.sleep(poll_interval_seconds) diff --git a/props/agents/critic_dev/loop.py b/props/agents/critic_dev/loop.py index eecd94b4ef..8b7680d84a 100644 --- a/props/agents/critic_dev/loop.py +++ b/props/agents/critic_dev/loop.py @@ -129,11 +129,11 @@ async def start_critic(args: RunCriticRequest) -> StartCriticResponse: After calling this, use wait_until_critic_completed to wait for the critic to finish, then use wait_until_graded to wait for grading results. """ - logger.info(f"Starting critic: definition={args.definition_id}, example={args.example}") + logger.info("Starting critic: definition=%s, example=%s", args.definition_id, args.example) resp = await http_client.post("/api/runs/critic", json=args.model_dump(mode="json")) resp.raise_for_status() response = StartCriticResponse.model_validate(resp.json()) - logger.info(f"Critic started: {response.critic_run_id}") + logger.info("Critic started: %s", response.critic_run_id) return response @provider.tool @@ -145,7 +145,7 @@ async def wait_until_critic_completed(args: WaitUntilCriticCompletedArgs) -> Cri Raises TimeoutError if the critic does not complete within timeout_seconds. """ - logger.info(f"Waiting for critic to complete: {args.critic_run_id}") + logger.info("Waiting for critic to complete: %s", args.critic_run_id) deadline = time.monotonic() + args.timeout_seconds poll_interval = 5.0 @@ -155,7 +155,7 @@ async def wait_until_critic_completed(args: WaitUntilCriticCompletedArgs) -> Cri if run is None: raise ValueError(f"Critic run {args.critic_run_id} not found") if run.status != AgentRunStatus.IN_PROGRESS: - logger.info(f"Critic completed: {args.critic_run_id}, status={run.status}") + logger.info("Critic completed: %s, status=%s", args.critic_run_id, run.status) return CriticRunStatus( critic_run_id=args.critic_run_id, status=run.status, container_exit_code=run.container_exit_code ) @@ -170,11 +170,11 @@ async def wait_until_graded_tool(args: WaitUntilGradedToolArgs) -> GradingStatus Polls the database directly until grading is complete or timeout. The critic run must have already completed (use wait_until_critic_completed first). """ - logger.info(f"Waiting for grading: {args.critic_run_id}") + logger.info("Waiting for grading: %s", args.critic_run_id) response = await wait_until_graded( args.critic_run_id, db, timeout_seconds=args.timeout_seconds, poll_interval_seconds=5 ) - logger.info(f"Grading complete: total_credit={response.total_credit}, max_credit={response.max_credit}") + logger.info("Grading complete: total_credit=%s, max_credit=%s", response.total_credit, response.max_credit) return response @provider.tool diff --git a/props/agents/critic_dev/main.py b/props/agents/critic_dev/main.py index c5ce0cfabf..fe142d722a 100644 --- a/props/agents/critic_dev/main.py +++ b/props/agents/critic_dev/main.py @@ -306,9 +306,10 @@ def on_before_sample(self) -> LoopDecision: if isinstance(result, TerminationSuccess): logger.info( - f"Critic developer terminating: " - f"definition '{result.definition_id}' with {result.total_credit:.1f} credit " - f"beats baseline avg {result.baseline_avg:.1f}" + "Critic developer terminating: definition '%s' with %.1f credit beats baseline avg %.1f", + result.definition_id, + result.total_credit, + result.baseline_avg, ) return Abort() diff --git a/props/agents/critic_dev/optimize/test_e2e.py b/props/agents/critic_dev/optimize/test_e2e.py index ef2df456ce..bb6fd07285 100644 --- a/props/agents/critic_dev/optimize/test_e2e.py +++ b/props/agents/critic_dev/optimize/test_e2e.py @@ -121,7 +121,7 @@ async def test_optimizer_orchestrates_critic( Uses multi-model FakeOpenAIServer to route optimizer and critic to different mocks. """ snapshot_slug = test_snapshot - logger.info(f"Running orchestration test with snapshot: {snapshot_slug}") + logger.info("Running orchestration test with snapshot: %s", snapshot_slug) # Mutable container filled after image push but before mock runs digests: dict[str, str] = {} @@ -142,13 +142,13 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: ) ) critic_run_id = start_response.critic_run_id - logger.info(f"Orchestration optimizer got critic_run_id: {critic_run_id}") + logger.info("Orchestration optimizer got critic_run_id: %s", critic_run_id) # Wait for critic container to finish completed: CriticRunStatus = yield from m.wait_until_critic_completed_roundtrip( critic_run_id, timeout_seconds=120 ) - logger.info(f"Critic completed: status={completed.status}") + logger.info("Critic completed: status=%s", completed.status) # Wait for grading (polls database directly inside container) grading_response: GradingStatusResponse = yield from m.wait_until_graded_roundtrip( @@ -157,7 +157,7 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: total_credit = grading_response.total_credit or 0.0 max_credit = grading_response.max_credit or 0 recall = total_credit / max_credit if max_credit > 0 else 0.0 - logger.info(f"Orchestration optimizer got grading: total_credit={total_credit}, recall={recall:.2%}") + logger.info("Orchestration optimizer got grading: total_credit=%s, recall=%.2f%%", total_credit, recall * 100) # Report success yield m.report_success() @@ -223,7 +223,7 @@ def grader_mock(m: GraderMock) -> PlayGen: timeout_seconds=120, ) - logger.info(f"Orchestration test: critic-dev optimizer completed with run_id={run_id}") + logger.info("Orchestration test: critic-dev optimizer completed with run_id=%s", run_id) # Verify optimizer run status with synced_db.session() as session: @@ -254,7 +254,7 @@ def grader_mock(m: GraderMock) -> PlayGen: with synced_db.session() as session: for crid in critic_run_ids: edges = session.query(GradingEdge).filter_by(critique_run_id=crid).all() - logger.info(f"Critic {crid} has {len(edges)} grading edges") + logger.info("Critic %s has %s grading edges", crid, len(edges)) # The critic mock creates 1 issue, and fill_remaining creates edges for each GT occurrence assert len(edges) >= 0, "Grading edges should be created" diff --git a/props/agents/critic_dev/recipes/test_build_critic_e2e.py b/props/agents/critic_dev/recipes/test_build_critic_e2e.py index bf3df2dd76..372e6d7cc6 100644 --- a/props/agents/critic_dev/recipes/test_build_critic_e2e.py +++ b/props/agents/critic_dev/recipes/test_build_critic_e2e.py @@ -63,7 +63,7 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: assert_that(result, exited_successfully()) stdout = result.stdout if isinstance(result.stdout, str) else result.stdout.truncated_text new_digest = stdout.strip().split("\n")[-1] - logger.info(f"build_critic.sh produced digest: {new_digest}") + logger.info("build_critic.sh produced digest: %s", new_digest) assert new_digest.startswith("sha256:"), f"Expected sha256 digest, got: {new_digest!r}" # Start the custom critic image @@ -78,17 +78,17 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: ) ) critic_run_id = start_output.critic_run_id - logger.info(f"Custom critic run: {critic_run_id}") + logger.info("Custom critic run: %s", critic_run_id) # Wait for critic to finish completed: CriticRunStatus = yield from m.wait_until_critic_completed_roundtrip( critic_run_id, timeout_seconds=120 ) - logger.info(f"Critic completed: status={completed.status}") + logger.info("Critic completed: status=%s", completed.status) # Wait for grading wait_output: GradingStatusResponse = yield from m.wait_until_graded_roundtrip(critic_run_id, timeout_seconds=60) - logger.info(f"Grading complete: total_credit={wait_output.total_credit}") + logger.info("Grading complete: total_credit=%s", wait_output.total_credit) yield m.report_success() diff --git a/props/agents/critic_dev/recipes/test_recipes.py b/props/agents/critic_dev/recipes/test_recipes.py index 1a40ad69e2..3633541dc0 100644 --- a/props/agents/critic_dev/recipes/test_recipes.py +++ b/props/agents/critic_dev/recipes/test_recipes.py @@ -69,21 +69,21 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: assert any(s["slug"] == snapshot for s in gt_data["train_snapshots"]) assert len(gt_data["true_positives"]) >= 1 assert all(tp["num_occurrences"] >= 1 for tp in gt_data["true_positives"]) - logger.info(f"ground_truth: {gt_data}") + logger.info("ground_truth: %s", gt_data) ex_data = yield from _run_recipe(m, "examples_and_scopes") assert len(ex_data["train_examples"]) >= 1 assert all(e["recall_denominator"] >= 0 for e in ex_data["train_examples"]) - logger.info(f"examples_and_scopes: {ex_data}") + logger.info("examples_and_scopes: %s", ex_data) rm_data = yield from _run_recipe(m, "recall_metrics") assert len(rm_data["leaderboard"]) >= 1, f"Expected non-empty leaderboard: {rm_data}" assert all(row["recall_denominator"] >= 1 for row in rm_data["leaderboard"]) - logger.info(f"recall_metrics: {rm_data}") + logger.info("recall_metrics: %s", rm_data) ra_data = yield from _run_recipe(m, "run_analysis", snapshot) assert len(ra_data["recent_runs"]) >= 1, f"Expected non-empty runs: {ra_data}" - logger.info(f"run_analysis: {ra_data}") + logger.info("run_analysis: %s", ra_data) yield m.report_failure("Recipe verification done — exiting via report_failure") diff --git a/props/agents/critic_dev/test_e2e.py b/props/agents/critic_dev/test_e2e.py index f7406f603a..e693247729 100644 --- a/props/agents/critic_dev/test_e2e.py +++ b/props/agents/critic_dev/test_e2e.py @@ -91,17 +91,17 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: ) ) critic_run_id = start_output.critic_run_id - logger.info(f"PO got critic_run_id: {critic_run_id}") + logger.info("PO got critic_run_id: %s", critic_run_id) # Wait for critic container to finish completed: CriticRunStatus = yield from m.wait_until_critic_completed_roundtrip( critic_run_id, timeout_seconds=120 ) - logger.info(f"Critic completed: status={completed.status}") + logger.info("Critic completed: status=%s", completed.status) # Wait for grading (polls database directly inside container) wait_output: GradingStatusResponse = yield from m.wait_until_graded_roundtrip(critic_run_id, timeout_seconds=60) - logger.info(f"PO got grading: total_credit={wait_output.total_credit}") + logger.info("PO got grading: total_credit=%s", wait_output.total_credit) # Report success yield m.report_success() @@ -117,7 +117,7 @@ def critic_mock(m: CriticMock) -> PlayGen: assert "critic" in system_text.lower(), ( f"Expected system message to mention 'critic'. Got: {system_text[:200]}..." ) - logger.info(f"Critic received system message ({len(system_text)} chars)") + logger.info("Critic received system message (%s chars)", len(system_text)) # Submit zero issues yield m.submit(issues_count=0, summary="Critic completed") @@ -268,7 +268,7 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: assert_that(result, exited_successfully()) stdout = result.stdout if isinstance(result.stdout, str) else result.stdout.truncated_text new_digest = stdout.strip().split("\n")[-1] # Last line is the digest - logger.info(f"Custom image digest: {new_digest}") + logger.info("Custom image digest: %s", new_digest) # Start the custom critic image (non-blocking) example = WholeSnapshotExample(kind=ExampleKind.WHOLE_SNAPSHOT, snapshot_slug=snapshot_slug) @@ -282,17 +282,17 @@ def optimizer_mock(m: CriticDevMock) -> PlayGen: ) ) critic_run_id = start_output.critic_run_id - logger.info(f"Custom critic run: {critic_run_id}") + logger.info("Custom critic run: %s", critic_run_id) # Wait for critic container to finish completed: CriticRunStatus = yield from m.wait_until_critic_completed_roundtrip( critic_run_id, timeout_seconds=120 ) - logger.info(f"Critic completed: status={completed.status}") + logger.info("Critic completed: status=%s", completed.status) # Wait for grading to complete wait_output: GradingStatusResponse = yield from m.wait_until_graded_roundtrip(critic_run_id, timeout_seconds=60) - logger.info(f"Grading complete: total_credit={wait_output.total_credit}") + logger.info("Grading complete: total_credit=%s", wait_output.total_credit) yield m.report_success() @@ -381,8 +381,8 @@ def mock(m: CriticMock) -> PlayGen: ) stdout = result.stdout if hasattr(result, "stdout") else "" stderr = result.stderr if hasattr(result, "stderr") else "" - logger.info(f"Critic push attempt stdout: {stdout}") - logger.info(f"Critic push attempt stderr: {stderr}") + logger.info("Critic push attempt stdout: %s", stdout) + logger.info("Critic push attempt stderr: %s", stderr) # Submit zero issues (expected behavior: push failed, critic still completes) yield m.submit(issues_count=0, summary="Push attempt completed (expected to fail)") diff --git a/props/agents/grader/main.py b/props/agents/grader/main.py index 079a3c396b..45ef86dce4 100644 --- a/props/agents/grader/main.py +++ b/props/agents/grader/main.py @@ -125,7 +125,7 @@ def notification_callback( if notification.snapshot_slug != self.snapshot_slug: return # Not for us - logger.debug(f"Notification for {self.snapshot_slug}: {notification.operation} {notification.item.table}") + logger.debug("Notification for %s: %s %s", self.snapshot_slug, notification.operation, notification.item.table) self.notification_queue.append(notification) self.wake_event.set() @@ -489,7 +489,7 @@ async def _run_grader_loop(snapshot_slug: SnapshotSlug, system_prompt: str, db: listener_conn = await db.config.asyncpg_connect() await listener_conn.add_listener(GRADING_PENDING_CHANNEL, state.notification_callback) - logger.info(f"Listening on channel '{GRADING_PENDING_CHANNEL}' for {snapshot_slug}") + logger.info("Listening on channel '%s' for %s", GRADING_PENDING_CHANNEL, snapshot_slug) try: while True: diff --git a/props/agents/grader/test_e2e.py b/props/agents/grader/test_e2e.py index 4ebbf071cf..537adf8bd8 100644 --- a/props/agents/grader/test_e2e.py +++ b/props/agents/grader/test_e2e.py @@ -102,7 +102,7 @@ def mock(m: GraderMock) -> PlayGen: ) session.commit() - logger.info(f"Created critic run {critic_run_id} with reported issue") + logger.info("Created critic run %s with reported issue", critic_run_id) # Precondition: verify grading_pending has rows before starting grader drift = get_drift(test_snapshot, db) diff --git a/props/agents/grader/test_e2e_clustering.py b/props/agents/grader/test_e2e_clustering.py index a2003b120c..b59c05d95a 100644 --- a/props/agents/grader/test_e2e_clustering.py +++ b/props/agents/grader/test_e2e_clustering.py @@ -109,7 +109,7 @@ def mock(m: GraderMock) -> PlayGen: ) session.commit() - logger.info(f"Created 2 critic runs: {critic_1_id}, {critic_2_id}") + logger.info("Created 2 critic runs: %s, %s", critic_1_id, critic_2_id) # Precondition: verify grading_pending has rows assert get_drift(test_snapshot, db).grading, "grading_pending should have rows" diff --git a/props/agents/grader/test_e2e_sleep_wake.py b/props/agents/grader/test_e2e_sleep_wake.py index 220fd116ae..6a8d02f62e 100644 --- a/props/agents/grader/test_e2e_sleep_wake.py +++ b/props/agents/grader/test_e2e_sleep_wake.py @@ -148,7 +148,7 @@ def mock(m: GraderMock) -> PlayGen: ) assert tp_edge_1 is not None, "No TP edge with credit>0 for round 1" assert tp_edge_1.credit == pytest.approx(0.1) - logger.info(f"Round 1 TP edge verified: credit={tp_edge_1.credit}") + logger.info("Round 1 TP edge verified: credit=%s", tp_edge_1.credit) # --- Insert critic-2 while grader is sleeping (triggers pg_notify) --- critic_2_id = uuid4() diff --git a/props/backend/app.py b/props/backend/app.py index 264ff8369e..61666b6fed 100644 --- a/props/backend/app.py +++ b/props/backend/app.py @@ -67,14 +67,14 @@ class BackendDeps: def _sync_all_specimens(db: Database) -> None: """Scan /specimens/ directory and sync each specimen to the database.""" if not SPECIMENS_DIR.exists(): - logger.warning(f"Specimens directory {SPECIMENS_DIR} not found, skipping sync") + logger.warning("Specimens directory %s not found, skipping sync", SPECIMENS_DIR) return synced = 0 for data_yaml in sorted(SPECIMENS_DIR.rglob("specimen_data.yaml")): code_tar = data_yaml.parent / "specimen_code.tar" if not code_tar.exists(): - logger.warning(f"Missing code tar for {data_yaml}, skipping") + logger.warning("Missing code tar for %s, skipping", data_yaml) continue bundle = SpecimenBundle.from_paths(code_tar, data_yaml) with db.session() as session: @@ -82,7 +82,7 @@ def _sync_all_specimens(db: Database) -> None: session.commit() synced += 1 - logger.info(f"Synced {synced} specimens from {SPECIMENS_DIR}") + logger.info("Synced %s specimens from %s", synced, SPECIMENS_DIR) # Refresh the materialized view (reads committed specimen data) with db.session() as session: @@ -107,7 +107,7 @@ async def lifespan(app: FastAPI) -> AsyncIterator[None]: with db.session() as session: stats = sync_model_metadata_with_session(session, deps.config) if stats.added or stats.deleted: - logger.info(f"Model metadata synced: +{stats.added} added, -{stats.deleted} deleted") + logger.info("Model metadata synced: +%s added, -%s deleted", stats.added, stats.deleted) if deps.config.auto_sync_specimens: _sync_all_specimens(db) @@ -132,15 +132,15 @@ async def lifespan(app: FastAPI) -> AsyncIterator[None]: registry=app.state.registry, db_config=db_config, model=deps.grader_model, db=db ) await app.state.grader_supervisor.start() - logger.info(f"Grader supervisor started (model: {deps.grader_model})") + logger.info("Grader supervisor started (model: %s)", deps.grader_model) else: app.state.grader_supervisor = None logger.info("Grader supervisor disabled (grader_model not set in config)") admin_token = db_config.basic_auth_token protocol = "https" if deps.port == 443 else "http" - logger.info(f"Admin token: {admin_token}") - logger.info(f"Admin URL: {protocol}://{deps.host}:{deps.port}/?token={admin_token}") + logger.info("Admin token: %s", admin_token) + logger.info("Admin URL: %s://%s:%s/?token=%s", protocol, deps.host, deps.port, admin_token) logger.info("Props backend ready") yield diff --git a/props/backend/auth.py b/props/backend/auth.py index 626ab1de57..1470bf7c70 100644 --- a/props/backend/auth.py +++ b/props/backend/auth.py @@ -107,7 +107,7 @@ def extract_agent_run_id_from_username(username: str) -> UUID | None: try: return UUID(username[len(prefix) :]) except ValueError: - logger.warning(f"Invalid UUID in agent username: {username}") + logger.warning("Invalid UUID in agent username: %s", username) return None @@ -130,7 +130,7 @@ def validate_postgres_credentials(username: str, password: str, db_config: Datab ): pass except psycopg.OperationalError as e: - logger.warning(f"Postgres auth failed for user {username}: {e}") + logger.warning("Postgres auth failed for user %s: %s", username, e) raise HTTPException(status_code=401, detail="Invalid credentials") return agent_run_id diff --git a/props/backend/routes/registry.py b/props/backend/routes/registry.py index 004453671f..1ceb50e225 100644 --- a/props/backend/routes/registry.py +++ b/props/backend/routes/registry.py @@ -116,17 +116,17 @@ async def _extract_image_metadata(manifest_body: bytes, repository: str) -> _Ima display_name: str | None = labels.get("org.opencontainers.image.title") if base_digest: - logger.info(f"Extracted base_digest from annotation: {base_digest}") + logger.info("Extracted base_digest from annotation: %s", base_digest) if display_name: - logger.info(f"Extracted display_name from annotation: {display_name}") + logger.info("Extracted display_name from annotation: %s", display_name) return _ImageMetadata(base_digest=base_digest, display_name=display_name) except (httpx.RequestError, json.JSONDecodeError) as e: - logger.warning(f"Error fetching/parsing config blob: {e}") + logger.warning("Error fetching/parsing config blob: %s", e) return _ImageMetadata(base_digest=None, display_name=None) except (json.JSONDecodeError, KeyError) as e: - logger.warning(f"Error parsing manifest for image metadata extraction: {e}") + logger.warning("Error parsing manifest for image metadata extraction: %s", e) return _ImageMetadata(base_digest=None, display_name=None) @@ -145,7 +145,7 @@ async def _record_manifest_push( with db.session() as session: existing = session.get(AgentDefinition, digest) if existing: - logger.info(f"Agent definition {digest} already exists, skipping") + logger.info("Agent definition %s already exists, skipping", digest) return metadata = await _extract_image_metadata(manifest_body, repository) @@ -160,9 +160,13 @@ async def _record_manifest_push( session.commit() logger.info( - f"Recorded agent definition: {repository}@{digest} " - f"(type={agent_type}, created_by={agent_run_id}, " - f"base={metadata.base_digest or 'none'}, display_name={metadata.display_name or 'none'})" + "Recorded agent definition: %s@%s (type=%s, created_by=%s, base=%s, display_name=%s)", + repository, + digest, + agent_type, + agent_run_id, + metadata.base_digest or "none", + metadata.display_name or "none", ) @@ -222,7 +226,7 @@ async def put_manifest(request: Request, repo: str, ref: str, admin_db: AdminDb, {"channel": GRADER_DEFINITION_CHANGED_CHANNEL, "payload": notification.model_dump_json()}, ) session.commit() - logger.info(f"Notified grader definition changed: {repo}:{ref} -> {manifest_digest}") + logger.info("Notified grader definition changed: %s:%s -> %s", repo, ref, manifest_digest) return response diff --git a/props/backend/routes/runs.py b/props/backend/routes/runs.py index e078757b55..9e5cda684b 100644 --- a/props/backend/routes/runs.py +++ b/props/backend/routes/runs.py @@ -600,7 +600,7 @@ async def _run_validation_batch(job: ValidationJob, registry: AgentRegistry, db: async def run_one(example: ExampleSpec) -> bool: """Run critic for one example. Returns True on success.""" try: - logger.info(f"[Job {job.job_id}] Running critic on {example.snapshot_slug}") + logger.info("[Job %s] Running critic on %s", job.job_id, example.snapshot_slug) critic_run_id = await registry.run_critic( image=image, example=example, @@ -619,14 +619,14 @@ async def run_one(example: ExampleSpec) -> bool: or critic_run.container_exit_code != 0 ): status = critic_run.status if critic_run else "not found" - logger.warning(f"[Job {job.job_id}] Critic failed with status {status}") + logger.warning("[Job %s] Critic failed with status %s", job.job_id, status) return False - logger.info(f"[Job {job.job_id}] Critic exited: {critic_run_id}") + logger.info("[Job %s] Critic exited: %s", job.job_id, critic_run_id) return True except Exception: - logger.exception(f"[Job {job.job_id}] Error processing {example.snapshot_slug}") + logger.exception("[Job %s] Error processing %s", job.job_id, example.snapshot_slug) return False # Run all examples in parallel @@ -640,10 +640,10 @@ async def run_one(example: ExampleSpec) -> bool: job.failed += 1 job.status = JobStatus.COMPLETED - logger.info(f"[Job {job.job_id}] Finished: {job.completed} completed, {job.failed} failed") + logger.info("[Job %s] Finished: %s completed, %s failed", job.job_id, job.completed, job.failed) except Exception: - logger.exception(f"[Job {job.job_id}] Batch failed") + logger.exception("[Job %s] Batch failed", job.job_id) job.status = JobStatus.FAILED diff --git a/props/core/gepa/gepa_adapter.py b/props/core/gepa/gepa_adapter.py index e10fc582e7..b174b4cb36 100644 --- a/props/core/gepa/gepa_adapter.py +++ b/props/core/gepa/gepa_adapter.py @@ -39,7 +39,7 @@ import tempfile from collections.abc import Mapping, Sequence from dataclasses import dataclass -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import Any from uuid import UUID @@ -187,10 +187,10 @@ def __init__( # Set up proposal logging if reflection_model provided if reflection_model: - timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + timestamp = datetime.now(tz=UTC).strftime("%Y%m%d_%H%M%S") log_file = run_dir / f"gepa_proposals_{timestamp}.jsonl" self._setup_proposal_logging(log_file) - logger.info(f"GEPA proposal logging enabled: {log_file.absolute()}") + logger.info("GEPA proposal logging enabled: %s", log_file.absolute()) else: # No reflection model - GEPA will use default proposal mechanism self.propose_new_texts = None @@ -310,7 +310,7 @@ def propose_new_texts( f.write( json.dumps( { - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(tz=UTC).isoformat(), "call_id": call_count, "component": name, "type": "input", @@ -337,7 +337,7 @@ def propose_new_texts( f.write( json.dumps( { - "timestamp": datetime.now().isoformat(), + "timestamp": datetime.now(tz=UTC).isoformat(), "call_id": call_count, "component": name, "type": "output", @@ -521,7 +521,7 @@ async def load_datasets(db: Database) -> tuple[list[Example], list[Example]]: trainset = get_examples_for_split(session, Split.TRAIN) valset = get_examples_for_split(session, Split.VALID) - logger.info(f"Loaded {len(trainset)} training examples, {len(valset)} validation examples") + logger.info("Loaded %s training examples, %s validation examples", len(trainset), len(valset)) return trainset, valset @@ -559,18 +559,18 @@ def _log_run_statistics(critic_model: str, grader_model: str, db: Database) -> N # Log critic statistics total_critic = sum(count for _, count in critic_status_counts) if total_critic > 0: - logger.info(f"Critic run statistics (model={critic_model}, total={total_critic}):") + logger.info("Critic run statistics (model=%s, total=%s):", critic_model, total_critic) for status, count in sorted(critic_status_counts): - logger.info(f" {status}: {count} ({count / total_critic:.1%})") + logger.info(" %s: %s (%.1f%%)", status, count, count / total_critic * 100) else: logger.info("No critic runs found") # Log grader statistics total_grader = sum(count for _, count in grader_status_counts) if total_grader > 0: - logger.info(f"Grader run statistics (model={grader_model}, total={total_grader}):") + logger.info("Grader run statistics (model=%s, total=%s):", grader_model, total_grader) for status, count in sorted(grader_status_counts): - logger.info(f" {status}: {count} ({count / total_grader:.1%})") + logger.info(" %s: %s (%.1f%%)", status, count, count / total_grader * 100) else: logger.info("No grader runs found") @@ -631,18 +631,18 @@ async def optimize_with_gepa( """ _gepa_not_implemented() logger.info("Starting GEPA optimization") - logger.info(f"Reflection model: {reflection_model}") - logger.info(f"Max metric calls: {max_metric_calls}") - logger.info(f"Minibatch size: {minibatch_size}") - logger.info(f"Initial prompt length: {len(initial_prompt)} chars") - logger.info(f"Warm start: {warm_start}") + logger.info("Reflection model: %s", reflection_model) + logger.info("Max metric calls: %s", max_metric_calls) + logger.info("Minibatch size: %s", minibatch_size) + logger.info("Initial prompt length: %s chars", len(initial_prompt)) + logger.info("Warm start: %s", warm_start) if seed is not None: - logger.info(f"Random seed: {seed}") + logger.info("Random seed: %s", seed) # Load datasets (always uses critic scopes from database) logger.info("Loading datasets...") trainset, valset = await load_datasets(db) - logger.info(f"Loaded {len(trainset)} training examples, {len(valset)} validation examples") + logger.info("Loaded %s training examples, %s validation examples", len(trainset), len(valset)) # Prepare run directory with optional warm-start checkpoint run_dir = None @@ -659,7 +659,9 @@ async def optimize_with_gepa( with checkpoint_path.open("wb") as f: pickle.dump(historical_state, f) logger.info( - f"Saved historical state with {len(historical_state['program_candidates'])} prompts to {checkpoint_path}" + "Saved historical state with %s prompts to %s", + len(historical_state["program_candidates"]), + checkpoint_path, ) run_dir = temp_dir else: @@ -668,10 +670,10 @@ async def optimize_with_gepa( # If no run_dir yet (no warm start or no historical data), create one if run_dir is None: run_dir = tempfile.mkdtemp(prefix="gepa_run_") - logger.info(f"Created run directory: {run_dir}") + logger.info("Created run directory: %s", run_dir) # Create adapter - logger.info(f"Creating CriticAdapter with max_parallelism={max_parallelism}") + logger.info("Creating CriticAdapter with max_parallelism=%s", max_parallelism) adapter = CriticAdapter( critic_client, grader_client, @@ -683,7 +685,7 @@ async def optimize_with_gepa( ) # Run optimization (reflection_lm accepts model string directly) - logger.info(f"Starting GEPA evolutionary search (merge={'enabled' if use_merge else 'disabled'})...") + logger.info("Starting GEPA evolutionary search (merge=%s)...", "enabled" if use_merge else "disabled") result: GEPAResult[CriticOutput, Any] = gepa.optimize( seed_candidate={"system_prompt": initial_prompt}, trainset=trainset, @@ -702,7 +704,7 @@ async def optimize_with_gepa( optimized_prompt = result.candidates[result.best_idx]["system_prompt"] best_score = result.val_aggregate_scores[result.best_idx] - logger.info(f"GEPA optimization complete. Best score: {best_score:.3f}, Metric calls: {result.total_metric_calls}") + logger.info("GEPA optimization complete. Best score: %.3f, Metric calls: %s", best_score, result.total_metric_calls) # Log run statistics (critic/grader status breakdown) _log_run_statistics(critic_client.model, grader_client.model, db) diff --git a/props/db/database.py b/props/db/database.py index e3f5a88c99..4f4fd07ee7 100644 --- a/props/db/database.py +++ b/props/db/database.py @@ -63,45 +63,45 @@ def per_request(cls, config: DatabaseConfig) -> Database: call dispose() when done. """ db = cls.__new__(cls) - db._config = config - db._engine = create_engine(config.url, echo=False, poolclass=NullPool) + db.config = config + db.engine = create_engine(config.url, echo=False, poolclass=NullPool) - @event.listens_for(db._engine, "checkout") + @event.listens_for(db.engine, "checkout") def _register_composite_types( dbapi_connection: Any, connection_record: ConnectionPoolEntry, connection_proxy: Any ) -> None: try: register_composite("stats_with_ci", dbapi_connection, globally=False) except Exception as e: - logger.debug(f"Could not register stats_with_ci composite type: {e}") + logger.debug("Could not register stats_with_ci composite type: %s", e) - db._scoped_factory = scoped_session(sessionmaker(bind=db._engine)) + db.scoped_factory = scoped_session(sessionmaker(bind=db.engine)) return db def __init__(self, config: DatabaseConfig) -> None: - self._config = config + self.config = config url = config.url - logger.info(f"Connecting to database: {config.host}:{config.port}/{config.database}") - self._engine: Engine = create_engine(url, echo=False, poolclass=NullPool) + logger.info("Connecting to database: %s:%s/%s", config.host, config.port, config.database) + self.engine: Engine = create_engine(url, echo=False, poolclass=NullPool) # Register composite type adapter on each checkout. # NullPool creates a fresh connection per checkout, so this runs on every use. - @event.listens_for(self._engine, "checkout") + @event.listens_for(self.engine, "checkout") def _register_composite_types( dbapi_connection: Any, connection_record: ConnectionPoolEntry, connection_proxy: Any ) -> None: try: register_composite("stats_with_ci", dbapi_connection, globally=False) except Exception as e: - logger.debug(f"Could not register stats_with_ci composite type: {e}") + logger.debug("Could not register stats_with_ci composite type: %s", e) self._verify_connection() - self._scoped_factory = scoped_session(sessionmaker(bind=self._engine)) + self.scoped_factory = scoped_session(sessionmaker(bind=self.engine)) def _verify_connection(self, timeout_secs: int = 2) -> None: test_engine = create_engine( - self._engine.url.render_as_string(hide_password=False), + self.engine.url.render_as_string(hide_password=False), echo=False, connect_args={"connect_timeout": timeout_secs}, ) @@ -118,7 +118,7 @@ def session(self) -> Iterator[Session]: Commits on successful exit, rolls back on exception. """ - session = self._scoped_factory() + session = self.scoped_factory() try: yield session session.commit() @@ -126,21 +126,13 @@ def session(self) -> Iterator[Session]: session.rollback() raise finally: - self._scoped_factory.remove() + self.scoped_factory.remove() def recreate(self) -> None: """Recreate database from scratch (drop all + schema via Alembic).""" - recreate_database(self._engine) + recreate_database(self.engine) def dispose(self) -> None: """Dispose engine and session factory.""" - self._scoped_factory.remove() - self._engine.dispose() - - @property - def config(self) -> DatabaseConfig: - return self._config - - @property - def engine(self) -> Engine: - return self._engine + self.scoped_factory.remove() + self.engine.dispose() diff --git a/props/db/sync/model_metadata.py b/props/db/sync/model_metadata.py index 2948b4f120..aa8380bb47 100644 --- a/props/db/sync/model_metadata.py +++ b/props/db/sync/model_metadata.py @@ -55,7 +55,7 @@ def sync_model_metadata_with_session(session: Session, config: PropsConfig | Non ) # Full sync: make DB exactly match source - logger.info(f"Syncing model_metadata table (source: {len(source_models)} models)...") + logger.info("Syncing model_metadata table (source: %s models)...", len(source_models)) db_models = {m.model_id: m for m in session.query(ModelMetadata).all()} source_model_ids = set(source_models.keys()) @@ -67,7 +67,7 @@ def sync_model_metadata_with_session(session: Session, config: PropsConfig | Non # Delete orphaned models (in DB but not in source) for model_id in db_model_ids - source_model_ids: - logger.info(f" Deleting orphaned model: {model_id}") + logger.info(" Deleting orphaned model: %s", model_id) session.delete(db_models[model_id]) deleted += 1 @@ -76,7 +76,7 @@ def sync_model_metadata_with_session(session: Session, config: PropsConfig | Non is_new = model_id not in db_model_ids session.merge(model_meta) if is_new: - logger.debug(f" Adding model: {model_id}") + logger.debug(" Adding model: %s", model_id) added += 1 else: # Note: merge() updates if changed; count all as updated for stats @@ -85,6 +85,10 @@ def sync_model_metadata_with_session(session: Session, config: PropsConfig | Non session.flush() logger.info( - f"Model metadata synced: +{added} added, ~{updated} updated, -{deleted} deleted, ={len(source_models)} total" + "Model metadata synced: +%s added, ~%s updated, -%s deleted, =%s total", + added, + updated, + deleted, + len(source_models), ) return SyncStats(added=added, updated=updated, deleted=deleted, total=len(source_models)) diff --git a/props/db/sync/sync.py b/props/db/sync/sync.py index ad9b8f5611..b8886d5fff 100644 --- a/props/db/sync/sync.py +++ b/props/db/sync/sync.py @@ -146,7 +146,7 @@ def sync_snapshot_files_to_db(session: Session, slug: SnapshotSlug, archive_byte deleted = len(orphaned) total = len(seen_keys) session.flush() - logger.info(f"Snapshot files synced: +{added} added, ~{updated} updated, -{deleted} deleted, ={total} total") + logger.info("Snapshot files synced: +%s added, ~%s updated, -%s deleted, =%s total", added, updated, deleted, total) return SyncStats(total=total, added=added, updated=updated, deleted=deleted) @@ -357,7 +357,7 @@ def _sync_critic_scopes_for_specimen( true_positives: List of parsed true positives with occurrences false_positives: List of parsed false positives with occurrences """ - logger.debug(f"Syncing critic scopes for {slug}: {len(true_positives)} TPs, {len(false_positives)} FPs") + logger.debug("Syncing critic scopes for %s: %s TPs, %s FPs", slug, len(true_positives), len(false_positives)) # Collect desired critic scopes from occurrence data desired_triggers: set[tuple[SnapshotSlug, str, str, str]] = set() @@ -369,7 +369,7 @@ def _sync_critic_scopes_for_specimen( assert files_hash is not None # trigger_files is not None, so hash shouldn't be None desired_triggers.add((slug, tp.tp_id, occurrence.occurrence_id, files_hash)) - logger.debug(f"Collected {len(desired_triggers)} desired critic scopes for {slug}") + logger.debug("Collected %s desired critic scopes for %s", len(desired_triggers), slug) # Current critic scopes from DB existing_triggers: set[tuple[SnapshotSlug, str, str, str]] = { @@ -464,7 +464,7 @@ def sync_specimen(session: Session, bundle: SpecimenBundle) -> None: seen_issue_keys.add(key) if key not in existing_issues: - logger.debug(f"Adding issue: {tp.snapshot_slug}/{tp.tp_id}") + logger.debug("Adding issue: %s/%s", tp.snapshot_slug, tp.tp_id) orm_issue = TruePositive(snapshot_slug=tp.snapshot_slug, tp_id=tp.tp_id, rationale=tp.rationale) session.add(orm_issue) for occ in tp.occurrences: @@ -479,7 +479,7 @@ def sync_specimen(session: Session, bundle: SpecimenBundle) -> None: seen_fp_keys.add(fp_key) if fp_key not in existing_fps: - logger.debug(f"Adding false positive: {fp.snapshot_slug}/{fp.fp_id}") + logger.debug("Adding false positive: %s/%s", fp.snapshot_slug, fp.fp_id) orm_fp = FalsePositive(snapshot_slug=fp.snapshot_slug, fp_id=fp.fp_id, rationale=fp.rationale) session.add(orm_fp) for fp_occ in fp.occurrences: @@ -502,7 +502,7 @@ def sync_specimen(session: Session, bundle: SpecimenBundle) -> None: _sync_critic_scopes_for_specimen(session, slug, true_positives, false_positives) - logger.info(f"Synced specimen from bundle: {slug}") + logger.info("Synced specimen from bundle: %s", slug) def refresh_examples_matview(session: Session) -> None: diff --git a/props/orchestration/agent_registry.py b/props/orchestration/agent_registry.py index 8e05c64e92..31da56cfa0 100644 --- a/props/orchestration/agent_registry.py +++ b/props/orchestration/agent_registry.py @@ -243,7 +243,7 @@ async def _resolve_image_ref(self, agent_type: AgentType, ref: str) -> str: ImageResolutionError: If tag doesn't exist or proxy returns error """ if is_digest(ref): - logger.debug(f"Reference {ref} is already a digest, returning as-is") + logger.debug("Reference %s is already a digest, returning as-is", ref) return ref repository = str(agent_type) @@ -257,7 +257,7 @@ async def _resolve_image_ref(self, agent_type: AgentType, ref: str) -> str: } auth = httpx.BasicAuth(self._db_config.user, self._db_config.password) - logger.info(f"Resolving tag {repository}:{ref} via proxy at {proxy_url}") + logger.info("Resolving tag %s:%s via proxy at %s", repository, ref, proxy_url) try: async with httpx.AsyncClient() as client: @@ -275,7 +275,7 @@ async def _resolve_image_ref(self, agent_type: AgentType, ref: str) -> str: if not digest: raise ImageResolutionError(f"Proxy didn't return Docker-Content-Digest header for {repository}:{ref}") - logger.info(f"Resolved {repository}:{ref} → {digest}") + logger.info("Resolved %s:%s → %s", repository, ref, digest) return str(digest) async def resolve_image(self, agent_type: AgentType, ref: str) -> ResolvedImage: @@ -349,7 +349,7 @@ async def _collect_run( found_run.status = status found_run.container_exit_code = container_exit_code session.commit() - logger.info(f"Updated {agent_run_id} status to {status}") + logger.info("Updated %s status to %s", agent_run_id, status) return status async def _start_agent( diff --git a/props/orchestration/grader_supervisor.py b/props/orchestration/grader_supervisor.py index 7ee2f603fa..c4a462e72c 100644 --- a/props/orchestration/grader_supervisor.py +++ b/props/orchestration/grader_supervisor.py @@ -92,10 +92,10 @@ def _snapshot_created_callback( if self._shutdown: return if not isinstance(payload, str): - logger.error(f"pg_notify payload is not a string: {type(payload)}") + logger.error("pg_notify payload is not a string: %s", type(payload)) return notification = SnapshotCreatedNotification.model_validate_json(payload) - logger.info(f"Snapshot created: {notification.snapshot_slug}") + logger.info("Snapshot created: %s", notification.snapshot_slug) self._launch_background(self.reconcile(), name="reconcile-snapshot-created") def _grader_definition_changed_callback( @@ -104,10 +104,10 @@ def _grader_definition_changed_callback( if self._shutdown: return if not isinstance(payload, str): - logger.error(f"pg_notify payload is not a string: {type(payload)}") + logger.error("pg_notify payload is not a string: %s", type(payload)) return notification = GraderDefinitionChangedNotification.model_validate_json(payload) - logger.info(f"Grader definition changed: {notification.tag} -> {notification.digest}") + logger.info("Grader definition changed: %s -> %s", notification.tag, notification.digest) self._launch_background(self.reconcile(restart_existing=True), name="reconcile-definition-changed") # --- Lifecycle --- @@ -153,7 +153,7 @@ async def reconcile(self, *, restart_existing: bool = False) -> None: # Kill graders for snapshots that no longer exist. for slug in list(self._handles.keys()): if slug not in desired: - logger.info(f"Snapshot {slug} removed, killing grader") + logger.info("Snapshot %s removed, killing grader", slug) await self._kill_grader(slug) # Spawn missing graders. @@ -164,7 +164,9 @@ async def reconcile(self, *, restart_existing: bool = False) -> None: spawned += 1 if spawned or restart_existing: - logger.info(f"Reconciled: {len(self._handles)} graders running ({spawned} spawned, {len(desired)} desired)") + logger.info( + "Reconciled: %s graders running (%s spawned, %s desired)", len(self._handles), spawned, len(desired) + ) # --- Internal helpers --- @@ -174,7 +176,7 @@ async def _start_listener(self) -> None: await self._listener_conn.add_listener( GRADER_DEFINITION_CHANGED_CHANNEL, self._grader_definition_changed_callback ) - logger.info(f"Listening on channels '{SNAPSHOT_CREATED_CHANNEL}', '{GRADER_DEFINITION_CHANGED_CHANNEL}'") + logger.info("Listening on channels '%s', '%s'", SNAPSHOT_CREATED_CHANNEL, GRADER_DEFINITION_CHANGED_CHANNEL) async def _stop_listener(self) -> None: if self._listener_conn: @@ -185,19 +187,19 @@ async def _stop_listener(self) -> None: ) await self._listener_conn.close() except Exception as e: - logger.warning(f"Error closing listener connection: {e}") + logger.warning("Error closing listener connection: %s", e) self._listener_conn = None async def _spawn_grader(self, snapshot_slug: SnapshotSlug, *, image: ResolvedImage) -> None: try: - logger.info(f"Starting grader for {snapshot_slug}") + logger.info("Starting grader for %s", snapshot_slug) handle = await self._registry.start_snapshot_grader( image=image, snapshot_slug=snapshot_slug, model=self._model ) self._handles[snapshot_slug] = handle - logger.info(f"Grader container {handle.name} running for {snapshot_slug}") + logger.info("Grader container %s running for %s", handle.name, snapshot_slug) except Exception: - logger.exception(f"Failed to start grader for {snapshot_slug}") + logger.exception("Failed to start grader for %s", snapshot_slug) async def _kill_grader(self, snapshot_slug: SnapshotSlug) -> None: handle = self._handles.pop(snapshot_slug, None) diff --git a/props/testing/fixtures/e2e_infra.py b/props/testing/fixtures/e2e_infra.py index 2c2ba41628..aa5808697a 100644 --- a/props/testing/fixtures/e2e_infra.py +++ b/props/testing/fixtures/e2e_infra.py @@ -98,7 +98,7 @@ def _delete_all_manifests(registry_url: str) -> None: # Delete by digest httpx.delete(f"{registry_url}/v2/{repo}/manifests/{digest}", timeout=5.0).raise_for_status() - logger.debug(f"Deleted {repo}:{tag} ({digest})") + logger.debug("Deleted %s:%s (%s)", repo, tag, digest) @pytest.fixture diff --git a/ruff.toml b/ruff.toml index 3eefa5d50a..1450386b49 100644 --- a/ruff.toml +++ b/ruff.toml @@ -73,6 +73,7 @@ select = [ "TRY401", # redundant exception object in logging.exception "TRY203", # useless try-except (just re-raises) "TRY400", # logging.error in except → logging.exception + "G", # logging format (lazy % over f-strings) "RUF" # ruff-specific ] # Keep these off globally; handled case-by-case or via constants diff --git a/skills/hetzner_vnc_screenshot/vnc_screenshot.py b/skills/hetzner_vnc_screenshot/vnc_screenshot.py index 446940ad03..cd5f41d72c 100644 --- a/skills/hetzner_vnc_screenshot/vnc_screenshot.py +++ b/skills/hetzner_vnc_screenshot/vnc_screenshot.py @@ -50,12 +50,12 @@ async def readline(self) -> bytes: try: logger.debug("Waiting for websocket recv()...") data = await self.ws.recv() - logger.debug(f"Received {len(data)} bytes: {data[:50]}...") + logger.debug("Received %s bytes: %s...", len(data), data[:50]) if isinstance(data, str): data = data.encode() self._buffer.extend(data) except websockets.exceptions.ConnectionClosed as e: - logger.debug(f"Connection closed: {e}") + logger.debug("Connection closed: %s", e) result = bytes(self._buffer) self._buffer.clear() return result @@ -63,7 +63,7 @@ async def readline(self) -> bytes: idx = self._buffer.index(b"\n") + 1 result = bytes(self._buffer[:idx]) del self._buffer[:idx] - logger.debug(f"readline() returning: {result!r}") + logger.debug("readline() returning: %r", result) return result async def read(self, n: int) -> bytes: @@ -97,7 +97,7 @@ async def _send_pending(self): if self._pending_write: data = self._pending_write self._pending_write = b"" - logger.debug(f"Sending {len(data)} bytes") + logger.debug("Sending %s bytes", len(data)) await self.ws.send(data) async def drain(self): @@ -106,30 +106,30 @@ async def drain(self): def request_console_credentials(server_name: str, token: str | None = None) -> tuple[str, str]: """Request VNC console credentials from Hetzner Cloud API.""" - logger.debug(f"Requesting console for server '{server_name}'") + logger.debug("Requesting console for server '%s'", server_name) if token is None: token = os.environ.get("HCLOUD_TOKEN") if not token: raise ValueError("HCLOUD_TOKEN environment variable not set and no --token provided") - logger.debug(f"Token length: {len(token)}") + logger.debug("Token length: %s", len(token)) logger.debug("Creating hcloud Client...") client = Client(token=token) logger.debug("Fetching servers...") servers = client.servers.get_all(name=server_name) - logger.debug(f"Found {len(servers)} servers") + logger.debug("Found %s servers", len(servers)) if not servers: raise ValueError(f"Server '{server_name}' not found") logger.debug("Requesting console from Hetzner API...") response = client.servers.request_console(servers[0]) - logger.debug(f"Got console URL: {response.wss_url[:50]}...") + logger.debug("Got console URL: %s...", response.wss_url[:50]) return response.wss_url, response.password async def vnc_screenshot(wss_url: str, password: str, output_path: str = "screenshot.png"): """Connect to VNC over WebSocket and capture a screenshot.""" - logger.debug(f"Connecting to {wss_url[:60]}...") + logger.debug("Connecting to %s...", wss_url[:60]) logger.debug("Opening websocket connection...") async with websockets.connect(wss_url, subprotocols=[websockets.Subprotocol("binary")]) as ws: @@ -143,13 +143,13 @@ async def vnc_screenshot(wss_url: str, password: str, output_path: str = "screen password=password, ) - logger.info(f"Connected. Screen: {client.video.width}x{client.video.height}") + logger.info("Connected. Screen: %sx%s", client.video.width, client.video.height) logger.debug("Taking screenshot...") pixels = await client.screenshot() logger.debug("Converting to image...") img = Image.fromarray(pixels) img.save(output_path) - logger.info(f"Screenshot saved to {output_path}") + logger.info("Screenshot saved to %s", output_path) app = typer.Typer(help="Hetzner VNC console screenshot tool") @@ -177,14 +177,16 @@ def main( if server: if url or password: - raise typer.BadParameter("Cannot use --url/--password with server name argument") - logger.info(f"Fetching console credentials for server '{server}'...") + msg = "Cannot use --url/--password with server name argument" + raise typer.BadParameter(msg) + logger.info("Fetching console credentials for server '%s'...", server) wss_url, vnc_password = request_console_credentials(server, token) logger.info("Got console credentials") elif url and password: wss_url, vnc_password = url, password else: - raise typer.BadParameter("Provide either server name or both --url and --password") + msg = "Provide either server name or both --url and --password" + raise typer.BadParameter(msg) asyncio.run(vnc_screenshot(wss_url, vnc_password, str(output))) diff --git a/tana/query/search/evaluator.py b/tana/query/search/evaluator.py index 1652eec29c..6bdc3390a1 100644 --- a/tana/query/search/evaluator.py +++ b/tana/query/search/evaluator.py @@ -67,7 +67,7 @@ def evaluate(self, expression: SearchExpression, context: BaseNode | None = None yield from results - def _evaluate_tag(self, tag_node_id: NodeId) -> Iterator[BaseNode]: + def evaluate_tag(self, tag_node_id: NodeId) -> Iterator[BaseNode]: """ Find all nodes with a specific tag. @@ -85,7 +85,7 @@ def _evaluate_tag(self, tag_node_id: NodeId) -> Iterator[BaseNode]: # Use the existing filter_by_tag function yield from filter_by_tag(self.store, tag_node.name, skip_trash=self.skip_trash, skip_deleted=self.skip_deleted) - def _evaluate_type(self, type_node_id: NodeId) -> Iterator[BaseNode]: + def evaluate_type(self, type_node_id: NodeId) -> Iterator[BaseNode]: """ Find all nodes of a specific system type. @@ -107,7 +107,7 @@ def matches_type(node: BaseNode) -> bool: yield from filter_nodes(self.store, matches_type, skip_trash=self.skip_trash, skip_deleted=self.skip_deleted) - def _evaluate_text(self, text: str) -> Iterator[BaseNode]: + def evaluate_text(self, text: str) -> Iterator[BaseNode]: """ Find nodes matching text criteria. @@ -130,7 +130,7 @@ def matches_text(node: BaseNode) -> bool: yield from filter_nodes(self.store, matches_text, skip_trash=self.skip_trash, skip_deleted=self.skip_deleted) - def _evaluate_field(self, field_name: str, values: list[str]) -> Iterator[BaseNode]: + def evaluate_field(self, field_name: str, values: list[str]) -> Iterator[BaseNode]: """ Find nodes with specific field values. @@ -157,7 +157,7 @@ def _evaluate_field(self, field_name: str, values: list[str]) -> Iterator[BaseNo skip_deleted=self.skip_deleted, ) - def _evaluate_boolean(self, operator: BooleanOperator, operands: list[SearchExpression]) -> Iterator[BaseNode]: + def evaluate_boolean(self, operator: BooleanOperator, operands: list[SearchExpression]) -> Iterator[BaseNode]: """ Evaluate a boolean expression. @@ -234,24 +234,24 @@ def _evaluate_dispatch(expression: SearchExpression, evaluator: SearchEvaluator) @_evaluate_dispatch.register(TagSearch) def _(expression: TagSearch, evaluator: SearchEvaluator) -> Iterator[BaseNode]: - yield from evaluator._evaluate_tag(expression.tag_id) + yield from evaluator.evaluate_tag(expression.tag_id) @_evaluate_dispatch.register(TypeSearch) def _(expression: TypeSearch, evaluator: SearchEvaluator) -> Iterator[BaseNode]: - yield from evaluator._evaluate_type(expression.type_id) + yield from evaluator.evaluate_type(expression.type_id) @_evaluate_dispatch.register(TextSearch) def _(expression: TextSearch, evaluator: SearchEvaluator) -> Iterator[BaseNode]: - yield from evaluator._evaluate_text(expression.text) + yield from evaluator.evaluate_text(expression.text) @_evaluate_dispatch.register(FieldSearch) def _(expression: FieldSearch, evaluator: SearchEvaluator) -> Iterator[BaseNode]: - yield from evaluator._evaluate_field(expression.field_name, expression.values) + yield from evaluator.evaluate_field(expression.field_name, expression.values) @_evaluate_dispatch.register(BooleanSearch) def _(expression: BooleanSearch, evaluator: SearchEvaluator) -> Iterator[BaseNode]: - yield from evaluator._evaluate_boolean(expression.operator, expression.operands) + yield from evaluator.evaluate_boolean(expression.operator, expression.operands) diff --git a/tana/token_broker/broker.py b/tana/token_broker/broker.py index 6137655c89..5d7bd0d1e4 100644 --- a/tana/token_broker/broker.py +++ b/tana/token_broker/broker.py @@ -57,7 +57,7 @@ class _TanaNotReadyError(Exception): retry=retry_if_exception_type((_TanaNotReadyError, httpx.ConnectError, httpx.HTTPError)), wait=wait_exponential(multiplier=1, min=2, max=60), stop=stop_never, - before_sleep=lambda rs: logger.info(f"Tana not ready, retry attempt {rs.attempt_number}"), + before_sleep=lambda rs: logger.info("Tana not ready, retry attempt %s", rs.attempt_number), ) async def _wait_for_tana(http: httpx.AsyncClient, tana_url: str) -> None: resp = await http.get(f"{tana_url}/health") @@ -74,7 +74,7 @@ async def _register_client(http: httpx.AsyncClient, tana_url: str, redirect_uri: resp.raise_for_status() data = resp.json() client_id: str = data["client_id"] - logger.info(f"Registered OAuth client {client_id=}") + logger.info("Registered OAuth client %s", client_id) return client_id @@ -227,11 +227,11 @@ async def _write_secret(api: client.CoreV1Api, cfg: BrokerConfig, token: TokenRe try: await api.read_namespaced_secret(cfg.secret_name, cfg.namespace) await api.replace_namespaced_secret(cfg.secret_name, cfg.namespace, secret) - logger.info(f"Updated secret {cfg.namespace}/{cfg.secret_name}") + logger.info("Updated secret %s/%s", cfg.namespace, cfg.secret_name) except ApiException as e: if e.status == 404: await api.create_namespaced_secret(cfg.namespace, secret) - logger.info(f"Created secret {cfg.namespace}/{cfg.secret_name}") + logger.info("Created secret %s/%s", cfg.namespace, cfg.secret_name) else: raise @@ -302,7 +302,7 @@ async def run_broker(cfg: BrokerConfig) -> None: ) token = await _exchange_code(http, cfg.tana_url, client_id, auth_code, code_verifier, redirect_uri) await _write_secret(k8s_api, cfg, token) - logger.info(f"Token written, expires_at={token.expires_at}") + logger.info("Token written, expires_at=%s", token.expires_at) # Sleep until refresh needed await _sleep_until_refresh(token, cfg.refresh_margin_seconds) @@ -322,5 +322,5 @@ async def _sleep_until_refresh(token: TokenResult, margin_seconds: int) -> None: refresh_at = expires_at - margin_seconds sleep_seconds = max(refresh_at - time.time(), 60) - logger.info(f"Sleeping {sleep_seconds:.0f}s until next refresh") + logger.info("Sleeping %.0fs until next refresh", sleep_seconds) await asyncio.sleep(sleep_seconds) diff --git a/tana/token_broker/cli.py b/tana/token_broker/cli.py index 2b1f28afbb..2b4328becc 100644 --- a/tana/token_broker/cli.py +++ b/tana/token_broker/cli.py @@ -30,7 +30,12 @@ def main() -> None: namespace=_detect_namespace(), refresh_margin_seconds=int(os.environ.get("REFRESH_MARGIN_SECONDS", "3600")), ) - logger.info(f"Starting tana token broker: {cfg.tana_url=} {cfg.namespace=} {cfg.secret_name=}") + logger.info( + "Starting tana token broker: tana_url=%s namespace=%s secret_name=%s", + cfg.tana_url, + cfg.namespace, + cfg.secret_name, + ) asyncio.run(run_broker(cfg)) diff --git a/wt/server/worktree_service.py b/wt/server/worktree_service.py index 0fd0f878ea..91138f4427 100644 --- a/wt/server/worktree_service.py +++ b/wt/server/worktree_service.py @@ -101,12 +101,12 @@ def create_worktree( # Hydrate with dirty state if source provided if config.hydrate_worktrees: if source_worktree: - logger.info(f"Hydrating new worktree in {worktree_path} from {source_worktree}.") + logger.info("Hydrating new worktree in %s from %s.", worktree_path, source_worktree) if not source_worktree.exists(): raise RuntimeError(f"Source worktree does not exist: {source_worktree}") self._hydrate_worktree(config, source_worktree, worktree_path) else: - logger.info(f"Hydrating new worktree in {worktree_path} by checking out {branch_name}.") + logger.info("Hydrating new worktree in %s by checking out %s.", worktree_path, branch_name) repo = pygit2.Repository(worktree_path) repo.set_head(f"refs/heads/{branch_name}") repo.checkout_head(strategy=pygit2.GIT_CHECKOUT_FORCE) diff --git a/x/agent_server/mcp/approval_policy/engine.py b/x/agent_server/mcp/approval_policy/engine.py index 7166de3650..9dd947685d 100644 --- a/x/agent_server/mcp/approval_policy/engine.py +++ b/x/agent_server/mcp/approval_policy/engine.py @@ -269,7 +269,7 @@ def __init__( evaluate_policy: Callable[[PolicyRequest], Awaitable[PolicyResponse]], record_outcome: Callable[[str, str, ApprovalOutcome], Awaitable[None]] | None = None, ) -> None: - self._hub = hub + self.hub = hub self._evaluate = evaluate_policy self._record = record_outcome self._inflight: dict[str, str] = {} @@ -288,14 +288,14 @@ async def on_call_tool(self, context: MiddlewareContext[Any], call_next: CallNex tool_key = name # Evaluate policy - logger.warning(f"[POLICY_MW] Evaluating policy for tool: {name}") + logger.warning("[POLICY_MW] Evaluating policy for tool: %s", name) try: decision_res = await self._evaluate( PolicyRequest(name=name, arguments_json=_serialize_arguments_json(arguments)) ) decision = decision_res.decision rationale = decision_res.rationale - logger.warning(f"[POLICY_MW] Decision for {name}: {decision} ({rationale})") + logger.warning("[POLICY_MW] Decision for %s: %s (%s)", name, decision, rationale) except Exception as e: logger.warning("policy evaluator error", exc_info=e) raise McpError( @@ -353,7 +353,7 @@ async def on_call_tool(self, context: MiddlewareContext[Any], call_next: CallNex tool_key=tool_key, tool_call=ApprovalToolCall(name=name, call_id=call_id, args_json=_serialize_arguments_json(arguments)), ) - decision_obj = await self._hub.await_decision(call_id, req) + decision_obj = await self.hub.await_decision(call_id, req) if isinstance(decision_obj, ContinueDecision): if self._record is not None: @@ -442,7 +442,7 @@ def pending_calls() -> str: tool_key=req.tool_key, args_json=req.tool_call.args_json if req.tool_call else None, ) - for call_id, req in engine._hub.pending.items() + for call_id, req in engine.hub.pending.items() ] ) return response.model_dump_json() @@ -452,7 +452,7 @@ def pending_calls() -> str: # Register tool with typed attribute async def evaluate_policy(input: PolicyRequest) -> PolicyResponse: """Evaluate a policy decision for a single tool call via Docker-backed evaluator.""" - return await engine._evaluate_policy(input) + return await engine.evaluate_policy(input) self.evaluate_policy_tool = self.flat_model()(evaluate_policy) @@ -511,13 +511,13 @@ async def decide_call(input: DecideCallArgs) -> SimpleOk: decision = input.decision if decision == CallDecision.APPROVE: - engine._hub.resolve(call_id, ContinueDecision()) + engine.hub.resolve(call_id, ContinueDecision()) elif decision == CallDecision.DENY_ABORT: - engine._hub.resolve(call_id, AbortTurnDecision(reason="user_denied")) + engine.hub.resolve(call_id, AbortTurnDecision(reason="user_denied")) elif decision == CallDecision.DENY_CONTINUE: # Continue without executing - resolve with continue decision # The call is skipped but turn continues - engine._hub.resolve(call_id, ContinueDecision()) + engine.hub.resolve(call_id, ContinueDecision()) return SimpleOk() async def decide_proposal(input: DecideProposalArgs) -> SimpleOk: @@ -572,7 +572,7 @@ def __init__( self._bg_tasks: set[asyncio.Task] = set() # Create hub with on_change callback that broadcasts pending://calls - self._hub = _ApprovalHub(on_change=self._on_hub_change) + self.hub = _ApprovalHub(on_change=self._on_hub_change) # Create owned servers (now using extracted server classes) self.reader = PolicyReaderServer(self) @@ -581,7 +581,7 @@ def __init__( # Protocol-level resource subscriptions on reader self._session_subscriptions: defaultdict[ServerSession, set[AnyUrl]] = defaultdict(set) - mcp_server = self.reader._mcp_server + mcp_server = self.reader.mcp_server def _subscriptions() -> set[AnyUrl]: return self._session_subscriptions[mcp_server.request_context.session] @@ -596,7 +596,7 @@ async def _unsubscribe(uri: AnyUrl): # Create gateway middleware (uses internal evaluate method) self._gateway = _PolicyGatewayMiddleware( - hub=self._hub, evaluate_policy=self._evaluate_policy, record_outcome=self._record_outcome + hub=self.hub, evaluate_policy=self.evaluate_policy, record_outcome=self._record_outcome ) # Internal reader client for gateway (created lazily) @@ -615,13 +615,13 @@ def _on_hub_change(self) -> None: self._bg_tasks.add(task) task.add_done_callback(self._bg_tasks.discard) - async def _evaluate_policy(self, request: PolicyRequest) -> PolicyResponse: + async def evaluate_policy(self, request: PolicyRequest) -> PolicyResponse: """Evaluate policy for a tool call via Docker-backed evaluator (uses injected client).""" - logger.warning(f"[EVAL_START] Starting policy evaluation for {request.name}") + logger.warning("[EVAL_START] Starting policy evaluation for %s", request.name) evaluator = ContainerPolicyEvaluator(agent_id=self.agent_id, docker_client=self._docker_client, engine=self) logger.warning("[EVAL_EVALUATOR] Evaluator created, calling decide") result = await evaluator.decide(request) - logger.warning(f"[EVAL_RESULT] Got result: {result.decision}") + logger.warning("[EVAL_RESULT] Got result: %s", result.decision) return result async def _record_outcome(self, call_id: str, tool_key: str, outcome: ApprovalOutcome) -> None: diff --git a/x/agent_server/mcp_bridge/auth.py b/x/agent_server/mcp_bridge/auth.py index 2a599d4a4f..012d6408b1 100644 --- a/x/agent_server/mcp_bridge/auth.py +++ b/x/agent_server/mcp_bridge/auth.py @@ -58,14 +58,14 @@ def from_yaml_file(cls, path: Path | None = None) -> TokensConfig: config_path = path or Path(os.getenv("ADGN_TOKENS_PATH", str(DEFAULT_TOKENS_PATH))) if not config_path.exists(): - logger.warning(f"Tokens config not found at {config_path}, using empty tokens") + logger.warning("Tokens config not found at %s, using empty tokens", config_path) return cls() with config_path.open() as f: data = yaml.safe_load(f) or {} config = cls.model_validate(data) - logger.info(f"Loaded {len(config.users)} user tokens, {len(config.agents)} agent tokens") + logger.info("Loaded %s user tokens, %s agent tokens", len(config.users), len(config.agents)) return config @staticmethod @@ -129,14 +129,14 @@ async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: # Route based on token if user_id := self.user_tokens.get(token): - logger.debug(f"Routing to user compositor for user: {user_id}") + logger.debug("Routing to user compositor for user: %s", user_id) await self.user_app(scope, receive, send) elif agent_id := self.agent_tokens.get(token): if agent_app := self.agent_apps.get(agent_id): - logger.debug(f"Routing to agent compositor for agent: {agent_id}") + logger.debug("Routing to agent compositor for agent: %s", agent_id) await agent_app(scope, receive, send) else: - logger.warning(f"Agent app not found for agent_id: {agent_id}") + logger.warning("Agent app not found for agent_id: %s", agent_id) await self._send_error(scope, receive, send, f"Agent not found: {agent_id}", 404) else: await self._send_error(scope, receive, send, "Invalid token", 401) diff --git a/x/agent_server/mcp_bridge/registry.py b/x/agent_server/mcp_bridge/registry.py index 3589215359..86279c10f0 100644 --- a/x/agent_server/mcp_bridge/registry.py +++ b/x/agent_server/mcp_bridge/registry.py @@ -108,23 +108,23 @@ async def _register_and_mount_agent(self, container: AgentContainer, *, external await self._mount_agent_control(container) # Mount agent compositor to global - if container._compositor is None: + if container.compositor is None: raise RuntimeError(f"Agent container {container.agent_id} has no compositor after build") mount_prefix = agent_mount_prefix(container.agent_id) - await comp.mount_inproc(mount_prefix, container._compositor) + await comp.mount_inproc(mount_prefix, container.compositor) async def _mount_agent_control(self, container: AgentContainer) -> None: """Mount agent_control server on container's compositor. Only for internal agents - provides send_prompt and abort tools. """ - if container._compositor is None: + if container.compositor is None: raise RuntimeError(f"Agent container {container.agent_id} has no compositor for agent_control mount") control_server = container.make_control_server() - await container._compositor.mount_inproc(AGENT_CONTROL_MOUNT_PREFIX, control_server) - logger.debug(f"Mounted agent_control for internal agent: {container.agent_id}") + await container.compositor.mount_inproc(AGENT_CONTROL_MOUNT_PREFIX, control_server) + logger.debug("Mounted agent_control for internal agent: %s", container.agent_id) async def _create_container( self, @@ -253,7 +253,7 @@ async def create_external_agent(self, agent_id: AgentID) -> AgentContainer: container = await self._create_container(agent_id, mcp_config=mcp_config, external=True) await self._register_and_mount_agent(container, external=True) - logger.info(f"Created external agent: {agent_id}") + logger.info("Created external agent: %s", agent_id) return container async def shutdown_agent(self, agent_id: AgentID) -> None: @@ -274,7 +274,7 @@ async def shutdown_agent(self, agent_id: AgentID) -> None: mount_prefix = agent_mount_prefix(agent_id) await comp.unmount_server(mount_prefix) except KeyError: - logger.debug(f"Agent {agent_id} already unmounted") + logger.debug("Agent %s already unmounted", agent_id) # Close container container = self._agents.pop(agent_id) @@ -283,7 +283,7 @@ async def shutdown_agent(self, agent_id: AgentID) -> None: # Clean up external tracking self._external_agents.discard(agent_id) - logger.info(f"Shutdown agent: {agent_id}") + logger.info("Shutdown agent: %s", agent_id) async def shutdown_all(self) -> None: """Shutdown all agents. diff --git a/x/agent_server/presets.py b/x/agent_server/presets.py index 03f6e227f1..46deff58f9 100644 --- a/x/agent_server/presets.py +++ b/x/agent_server/presets.py @@ -2,7 +2,7 @@ import logging import os -from datetime import datetime +from datetime import UTC, datetime from pathlib import Path from typing import TYPE_CHECKING @@ -58,10 +58,13 @@ def load_presets_from_dir(root: Path) -> dict[str, AgentPreset]: preset = AgentPreset.model_validate(data) stat = p.stat() # Fail fast on OS errors preset.file_path = str(p) - preset.modified_at = datetime.fromtimestamp(stat.st_mtime).isoformat() + preset.modified_at = datetime.fromtimestamp(stat.st_mtime, tz=UTC).isoformat() if preset.name in out: logger.warning( - f"Preset name collision: '{preset.name}' from {p} overrides preset from {out[preset.name].file_path}" + "Preset name collision: '%s' from %s overrides preset from %s", + preset.name, + p, + out[preset.name].file_path, ) out[preset.name] = preset return out @@ -99,7 +102,10 @@ def discover_presets(*, override_dir: str | Path | None = None) -> dict[str, Age out[name] = preset else: logger.warning( - f"Preset name collision: '{name}' from {preset.file_path} skipped (already loaded from {out[name].file_path})" + "Preset name collision: '%s' from %s skipped (already loaded from %s)", + name, + preset.file_path, + out[name].file_path, ) # Always include a built-in default if none present if "default" not in out: diff --git a/x/agent_server/server/app.py b/x/agent_server/server/app.py index 5e5aa8b91b..3d513560c5 100644 --- a/x/agent_server/server/app.py +++ b/x/agent_server/server/app.py @@ -119,7 +119,7 @@ async def _on_startup() -> None: config = TokensConfig.from_yaml_file() for agent_id in config.agent_tokens().values(): await app.state.mcp_registry.create_external_agent(agent_id) - logger.info(f"Created external agent from token: {agent_id}") + logger.info("Created external agent from token: %s", agent_id) # Multi-agent: agents should be created via API after startup app.state.ready.set() @@ -144,8 +144,8 @@ async def _on_shutdown() -> None: # Legacy registry path for backwards compatibility for container in app.state.registry.list(): # Flush legacy UI manager - if container._ui_manager: - await container._ui_manager.flush() + if ui_mgr := getattr(container, "ui_manager", None): + await ui_mgr.flush() await app.state.registry.close_all() # Close async Docker client diff --git a/x/agent_server/server/runtime.py b/x/agent_server/server/runtime.py index af5814737a..68f7d425bc 100644 --- a/x/agent_server/server/runtime.py +++ b/x/agent_server/server/runtime.py @@ -48,7 +48,7 @@ def _next_event_id(self) -> int: async def _send_and_reduce(self, payload: ServerMessage) -> None: assert self._session is not None - await self._session._apply_ui_event(payload) + await self._session.apply_ui_event(payload) async def _emit_ui_bus_messages(self) -> None: assert self._session is not None @@ -125,7 +125,7 @@ def attach_agent(self, agent: Agent, *, model: str | None = None, system: str | def set_persist_handler(self, handler: RunPersistenceHandler) -> None: self._persist_handler = handler - async def _apply_ui_event(self, evt: ServerMessage) -> None: + async def apply_ui_event(self, evt: ServerMessage) -> None: self.ui_state = self._reducer.reduce(self.ui_state, evt) async def run(self, prompt: str) -> None: @@ -165,9 +165,9 @@ async def _run_impl(self, prompt: str) -> None: except asyncio.CancelledError: # Error now logged, not sent via dead send_payload logger.debug("agent_run_cancelled") - except Exception as e: + except Exception: # Error now logged, not sent via dead send_payload - logger.error(f"agent_run_exception: {e}", exc_info=True) + logger.exception("agent_run_exception") finally: await self._manager.flush() if self._persist_handler is not None: diff --git a/x/claude_hooks/base.py b/x/claude_hooks/base.py index 043667b5d3..858abc886e 100644 --- a/x/claude_hooks/base.py +++ b/x/claude_hooks/base.py @@ -77,7 +77,7 @@ def run_hook(self) -> None: try: # Read and log input raw_input = sys.stdin.read() - self.logger.info(f"Raw input (first 200 chars): {raw_input[:200]}...") + self.logger.info("Raw input (first 200 chars): %s...", raw_input[:200]) # Parse input input_data = json.loads(raw_input) @@ -93,11 +93,11 @@ def truncate_strings(obj, maxlen=100): return obj truncated_data = truncate_strings(input_data, 100) - self.logger.info(f"Parsed JSON: {json.dumps(truncated_data, indent=2)}") + self.logger.info("Parsed JSON: %s", json.dumps(truncated_data, indent=2)) hook_input = self.INPUT_MODEL.model_validate(input_data) # TODO: should know what type hook_input is, not use defensive programming - self.logger.info(f"Parsed input - event: {hook_input.hook_event_name}") + self.logger.info("Parsed input - event: %s", hook_input.hook_event_name) # Create context from hook input (use hook_event_name from JSON) context = HookContext( @@ -111,12 +111,12 @@ def truncate_strings(obj, maxlen=100): set_hook_context(invocation_id=context.invocation_id, name=self.hook_name, session_id=context.session_id) action = self.execute(hook_input, context) - self.logger.info(f"Hook executed: {action}") + self.logger.info("Hook executed: %s", action) # Convert to protocol JSON and exit protocol_dict = action.to_protocol() output_json = json.dumps(protocol_dict) - self.logger.info(f"Output JSON: {output_json}") + self.logger.info("Output JSON: %s", output_json) print(output_json) sys.exit(0) except Exception: diff --git a/x/claude_hooks/precommit_autofix.py b/x/claude_hooks/precommit_autofix.py new file mode 100644 index 0000000000..b49795c984 --- /dev/null +++ b/x/claude_hooks/precommit_autofix.py @@ -0,0 +1,289 @@ +"""Pre-commit autofix hook implementation.""" + +import ast +import textwrap +import traceback +from dataclasses import dataclass +from pathlib import Path + +import pygit2 +from platformdirs import user_state_dir + +from claude_hooks.actions import PostToolAction, PostToolContinue, PostToolFeedbackToClaude +from claude_hooks.base import PostToolUseHook +from claude_hooks.config import AutofixerConfig +from claude_hooks.inputs import HookContext, PostToolInput +from claude_hooks.logging_context import get_current_invocation_id +from claude_hooks.tool_models import EditInput, MultiEditInput, WriteInput +from util.bazel.subprocess import run_python_module + +PRECOMMIT_CONFIG_FILE = ".pre-commit-config.yaml" + + +@dataclass +class ChangesMade: + """Pre-commit ran successfully and made changes to the file.""" + + +@dataclass +class NoChanges: + """Pre-commit ran successfully but made no changes to the file.""" + + +@dataclass +class Crashed: + """Pre-commit failed unexpectedly with detailed error information.""" + + stdout: str + stderr: str + exit_code: int + + +PreCommitResult = ChangesMade | NoChanges | Crashed + + +def extract_file_path(tool_input) -> Path | None: + """Extract file path from tool input if available.""" + if isinstance(tool_input, EditInput | MultiEditInput | WriteInput): + return Path(tool_input.file_path) + return None + + +def truncate_output(output: str, max_lines: int = 20) -> str: + """Truncate output if too long, showing first and last lines. + + Args: + output: Output string to truncate + max_lines: Maximum lines to show per section (first/last) + + Returns: + Truncated output string + """ + if not output.strip(): + return "(no output)" + + lines = output.strip().split("\n") + total_lines = len(lines) + + if total_lines <= max_lines * 2: + return "\n".join(lines) + + first_part = lines[:max_lines] + last_part = lines[-max_lines:] + omitted_count = total_lines - (max_lines * 2) + + return "\n".join([*first_part, f"... {omitted_count} lines omitted ...", *last_part]) + + +def format_crashed_output(stdout: str, stderr: str, exit_code: int) -> str: + """Format output from crashed pre-commit run for user display. + + Args: + stdout: Standard output from pre-commit + stderr: Standard error from pre-commit + exit_code: Exit code from pre-commit + + Returns: + Formatted output string with truncated stdout/stderr + """ + parts = [] + + if stdout.strip(): + parts.append("STDOUT:") + parts.append(truncate_output(stdout)) + parts.append("") # Empty line separator + + if stderr.strip(): + parts.append("STDERR:") + parts.append(truncate_output(stderr)) + parts.append("") # Empty line separator + + parts.append(f"Exit code: {exit_code}") + + return "\n".join(parts) + + +def check_python_syntax(file_path: Path) -> tuple[bool, str | None]: + """Check if a Python file has valid syntax. + + Returns: + (is_valid, error_message): Tuple with validity and error message if invalid + """ + if file_path.suffix != ".py": + return True, None # Not a Python file, no syntax check needed + + try: + source_code = Path(file_path).read_text(encoding="utf-8") + + # Parse the source code to check for syntax errors + ast.parse(source_code, filename=str(file_path)) + return True, None + except SyntaxError as e: + error_msg = f"SyntaxError in {file_path.name}:{e.lineno}:{e.offset}: {e.msg}" + return False, error_msg + except (OSError, UnicodeDecodeError) as e: + error_msg = f"Error reading {file_path.name}: {e}" + return False, error_msg + + +class PreCommitAutoFixerHook(PostToolUseHook): + """Hook that automatically runs pre-commit autofix on Claude-modified files.""" + + def __init__(self): + super().__init__("precommit_autofix") + self.autofixer_config = AutofixerConfig.model_validate(self.config) + + def execute(self, hook_input: PostToolInput, context: HookContext) -> PostToolAction: + self.logger.info( + "Config: enabled=%s, dry_run=%s, tools=%s", + self.autofixer_config.enabled, + self.autofixer_config.dry_run, + self.autofixer_config.tools, + ) + + if not self.autofixer_config.enabled: + self.logger.info("Hook disabled by configuration") + return PostToolContinue() + + if hook_input.tool_name not in self.autofixer_config.tools: + self.logger.info("Tool %s not in tools list", hook_input.tool_name) + return PostToolContinue() + + file_path = extract_file_path(hook_input.tool_input) + if not file_path: + self.logger.info("No file path found in tool input") + return PostToolContinue() + + if not file_path.exists(): + self.logger.info("File does not exist: %s", file_path) + return PostToolContinue() + + self.logger.info("Processing file: %s", file_path) + + # Check Python syntax before running pre-commit + is_valid, syntax_error = check_python_syntax(file_path) + if not is_valid: + self.logger.warning("Skipping pre-commit due to syntax error: %s", syntax_error) + return PostToolFeedbackToClaude(feedback_to_claude=f"⚠️ Fix {syntax_error}.") + + try: + result = self._run_precommit_autofix(file_path, context) + if isinstance(result, ChangesMade): + message = "🧹 pre-commit autofixes applied" + self.logger.info(message) + return PostToolFeedbackToClaude(feedback_to_claude=message) + if isinstance(result, Crashed): + # Pre-commit failed unexpectedly, show user feedback with formatted output + formatted_output = format_crashed_output(result.stdout, result.stderr, result.exit_code) + return PostToolFeedbackToClaude( + feedback_to_claude=f"⚠️ Pre-commit failed on {file_path.name}\nCommand: pre-commit run --files {file_path}\n\n{formatted_output}" + ) + except Exception as e: + # Catch all exceptions to provide user-friendly error messages instead of crashing Claude Code. + # This includes file system errors (OSError, FileNotFoundError), subprocess failures + # (SubprocessError), and any unexpected errors from dependencies like pre-commit itself. + # The broad catch ensures users get actionable feedback with logs and tracebacks + # rather than cryptic tool failures. + self.logger.exception("Pre-commit autofix failed") + # Show feedback for unexpected exceptions with debugging guidance + tb_str = traceback.format_exc() + invocation_id = get_current_invocation_id() or "unknown" + log_path = Path(user_state_dir("adgn-claude-hooks")) / f"{self.hook_name}.log" + + debug_message = textwrap.dedent(f""" + Unhandled {type(e).__name__} from PreCommitAutoFixerHook: {e!s} + Logs: {log_path} + Look for invocation ID: {invocation_id} + Traceback: + {tb_str} + """).strip() + + return PostToolFeedbackToClaude(feedback_to_claude=debug_message) + return PostToolContinue() + + def _run_precommit_autofix(self, file_path: Path, context: HookContext) -> PreCommitResult: + """Run pre-commit on specific file. + + Returns: + ChangesMade: File was modified by pre-commit + NoChanges: Pre-commit ran successfully but made no changes + Crashed: Pre-commit failed unexpectedly with detailed error info + """ + + if self.autofixer_config.dry_run: + self.logger.info("DRY RUN: Would run pre-commit on %s", file_path) + return NoChanges() + + try: + original_mtime = file_path.stat().st_mtime + self.logger.info("Original mtime: %s", original_mtime) + + config_root = self._get_precommit_root(file_path) + + config_file = config_root / PRECOMMIT_CONFIG_FILE + self.logger.info("Running pre-commit on %s with config %s", file_path, config_file) + + result = run_python_module( + "pre_commit", + "run", + "--config", + str(config_file), + "--files", + str(file_path), + cwd=config_root, + capture_output=True, + text=True, + timeout=self.autofixer_config.timeout_seconds, + check=False, + ) + + # Log subprocess result for debugging + self.logger.info("Pre-commit exit code: %s", result.returncode) + if result.stdout.strip(): + self.logger.info("Pre-commit stdout: %s", result.stdout.strip()) + if result.stderr.strip(): + self.logger.warning("Pre-commit stderr: %s", result.stderr.strip()) + + # Handle different exit codes + # See: https://pre-commit.com/#exit-codes + if result.returncode in (0, 1): + # Exit code 0: Success (standard Unix convention) + # Exit code 1: "A detected / expected error" - hooks found issues and possibly fixed them + new_mtime = file_path.stat().st_mtime + self.logger.info("New mtime: %s, changed: %s", new_mtime, new_mtime > original_mtime) + return ChangesMade() if new_mtime > original_mtime else NoChanges() + # Exit code 3: "An unexpected error", 130: "interrupted by ^C", etc. + self.logger.warning("Pre-commit unexpected exit code %s", result.returncode) + return Crashed(stdout=result.stdout, stderr=result.stderr, exit_code=result.returncode) + except Exception: + self.logger.exception("Error in _run_precommit_autofix") + # Re-raise exceptions that aren't pre-commit failures + raise + + def _get_precommit_root(self, file_path: Path) -> Path: + """Get pre-commit configuration root directory using pygit2.""" + search_dir = file_path.parent if file_path.is_file() else file_path + + # Use pygit2 to find repository root, starting from file's directory + self.logger.info("Looking for git repo starting from: %s", search_dir) + git_path = pygit2.discover_repository(str(search_dir)) + if git_path is None: + self.logger.info("Not in git repo: %s", search_dir) + raise RuntimeError(f"Not in a git repository: {search_dir}") + + repo = pygit2.Repository(git_path) + repo_root = Path(repo.workdir).resolve() + self.logger.info("Found git repo root: %s", repo_root) + if (repo_root / PRECOMMIT_CONFIG_FILE).exists(): + self.logger.info("Found pre-commit config at: %s", repo_root / PRECOMMIT_CONFIG_FILE) + return repo_root + + raise RuntimeError(f"No pre-commit config in git repo root: {repo_root}") + + +def main(): + PreCommitAutoFixerHook().run_hook() + + +if __name__ == "__main__": + main() diff --git a/x/claude_linter_v2/access/rule_engine.py b/x/claude_linter_v2/access/rule_engine.py index 742fcd3a02..df28776cd1 100644 --- a/x/claude_linter_v2/access/rule_engine.py +++ b/x/claude_linter_v2/access/rule_engine.py @@ -144,9 +144,11 @@ def rule_sort_key(match: RuleMatch) -> tuple[int, int]: message = ". ".join(message_parts) if message_parts else None logger.debug( - f"Rule evaluation for {context}: " - f"winner={winner.action} from {winner.source.name}, " - f"total matches={len(matches)}" + "Rule evaluation for %s: winner=%s from %s, total matches=%s", + context, + winner.action, + winner.source.name, + len(matches), ) return winner.action, message diff --git a/x/claude_linter_v2/checker.py b/x/claude_linter_v2/checker.py new file mode 100644 index 0000000000..bbfb6e48d5 --- /dev/null +++ b/x/claude_linter_v2/checker.py @@ -0,0 +1,100 @@ +"""Direct file checking for cl2 check command.""" + +import logging +from pathlib import Path + +from x.claude_linter_v2.config.loader import ConfigLoader +from x.claude_linter_v2.config.models import AutofixCategory, Violation +from x.claude_linter_v2.linters.python_ast import PythonASTAnalyzer +from x.claude_linter_v2.linters.python_formatter import PythonFormatter +from x.claude_linter_v2.linters.python_ruff import PythonRuffLinter + +logger = logging.getLogger(__name__) + + +class FileChecker: + """Checks files for violations and optionally fixes them.""" + + def __init__( + self, *, fix: bool = False, categories: list[AutofixCategory] | None = None, verbose: bool = False + ) -> None: + """ + Initialize the file checker. + + Args: + fix: Whether to fix issues + categories: Autofix categories to apply (empty = all) + verbose: Enable verbose output + """ + self.fix = fix + self.categories = categories or [] + self.verbose = verbose + + # Load config + self.config_loader = ConfigLoader() + self.config = self.config_loader.config + + def check_file(self, file_path: Path) -> list[Violation]: + """ + Check a single file for violations. + + Args: + file_path: Path to the file to check + + Returns: + List of violations found + """ + violations: list[Violation] = [] + + # Only check Python files for now + if file_path.suffix != ".py": + return violations + + try: + content = file_path.read_text() + except (OSError, UnicodeDecodeError): + logger.exception("Failed to read") + return violations + + # Run AST checks + bare_except_config = self.config.get_rule_config("python.bare_except") + hasattr_config = self.config.get_rule_config("python.hasattr") + getattr_config = self.config.get_rule_config("python.getattr") + setattr_config = self.config.get_rule_config("python.setattr") + barrel_init_config = self.config.get_rule_config("python.barrel_init") + + analyzer = PythonASTAnalyzer( + bare_except=bare_except_config.enabled if bare_except_config else False, + getattr_setattr=( + (hasattr_config.enabled if hasattr_config else False) + or (getattr_config.enabled if getattr_config else False) + or (setattr_config.enabled if setattr_config else False) + ), + barrel_init=file_path.name == "__init__.py" + and (barrel_init_config.enabled if barrel_init_config else False), + ) + ast_violations = analyzer.analyze_code(content, file_path) + violations.extend(ast_violations) + + # Run ruff checks + ruff_linter = PythonRuffLinter(force_select=self.config.get_ruff_codes_to_select()) + ruff_violations = ruff_linter.check_code(content, file_path, critical_only=False) + violations.extend(ruff_violations) + + # Apply fixes if requested + if self.fix and self.categories: + formatter = PythonFormatter(self.config.python_tools) + formatted_content, changes = formatter.format_code(content, file_path, self.categories) + + if changes and formatted_content != content: + try: + file_path.write_text(formatted_content) + if self.verbose: + logger.info("Applied fixes to %s: %s", file_path, ", ".join(changes)) + + # Re-check after fixing to get updated violations + violations = self.check_file(file_path) + except OSError: + logger.exception("Failed to write fixes to") + + return violations diff --git a/x/claude_linter_v2/cli.py b/x/claude_linter_v2/cli.py index 185713d824..e7b1cc8d18 100644 --- a/x/claude_linter_v2/cli.py +++ b/x/claude_linter_v2/cli.py @@ -31,7 +31,7 @@ def _try_send_crash_notification(title: str, message: str) -> None: try: subprocess.run(["notify-send", "-u", "critical", title, message], check=False) except Exception as e: - logger.debug(f"Failed to send crash notification: {e}") + logger.debug("Failed to send crash notification: %s", e) def parse_expiry_duration(duration_str: str) -> datetime: @@ -151,7 +151,7 @@ def hook(request_json: str | None) -> None: # Parse request with appropriate type if not (request_class := HOOK_REQUEST_TYPES.get(hook_type)): # Log the error - logger.error(f"FATAL: Unknown hook type: {hook_type}") + logger.error("FATAL: Unknown hook type: %s", hook_type) # Send desktop notification _try_send_crash_notification("Claude Linter Hook Crashed", f"Unknown hook type: {hook_type}") @@ -163,7 +163,7 @@ def hook(request_json: str | None) -> None: request = request_class(**request_data) except Exception as e: # Log the actual error - logger.error(f"FATAL: Request validation error for {hook_type}: {e}", exc_info=True) + logger.exception("FATAL: Request validation error for") # Send desktop notification _try_send_crash_notification("Claude Linter Hook Crashed", f"Request validation failed for {hook_type}: {e!s}") @@ -179,7 +179,7 @@ def hook(request_json: str | None) -> None: click.echo(response.model_dump_json(by_alias=True, exclude_none=True)) except HookBugError as e: # Hook bug - this is OUR fault - logger.error(f"FATAL: Hook bug: {e}", exc_info=True) + logger.exception("FATAL: Hook bug") # Send desktop notification _try_send_crash_notification("Claude Linter Hook Bug", f"Hook implementation error: {e!s}") diff --git a/x/claude_linter_v2/config/loader.py b/x/claude_linter_v2/config/loader.py index 842759944a..5f18d86d65 100644 --- a/x/claude_linter_v2/config/loader.py +++ b/x/claude_linter_v2/config/loader.py @@ -41,15 +41,15 @@ def load(self) -> ModularConfig: # If explicit path provided if self.config_path: if self.config_path.exists(): - logger.info(f"Loading config from {self.config_path}") + logger.info("Loading config from %s", self.config_path) self._config = ModularConfig.from_toml(self.config_path) return self._config - logger.warning(f"Config file not found: {self.config_path}") + logger.warning("Config file not found: %s", self.config_path) # Search for config file config_file = self._find_config_file() if config_file: - logger.info(f"Loading config from {config_file}") + logger.info("Loading config from %s", config_file) self._config = ModularConfig.from_toml(config_file) else: logger.info("No config file found, using defaults") diff --git a/x/claude_linter_v2/hooks/handler.py b/x/claude_linter_v2/hooks/handler.py index 3e491b9854..fec65cb721 100644 --- a/x/claude_linter_v2/hooks/handler.py +++ b/x/claude_linter_v2/hooks/handler.py @@ -119,7 +119,7 @@ def _setup_logging(self) -> None: file_handler.setLevel(level_value) except (AttributeError, KeyError): file_handler.setLevel(logging.INFO) - logger.warning(f"Invalid log level '{log_level}', using INFO") + logger.warning("Invalid log level '%s', using INFO", log_level) file_formatter = logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s" @@ -130,7 +130,7 @@ def _setup_logging(self) -> None: # Set root logger to the lowest level to let handlers filter root_logger.setLevel(logging.DEBUG) - logger.info(f"Logging configured: level={log_level}, file={log_file}") + logger.info("Logging configured: level={log_level}, file=%s", log_file) def _log_hook_call( self, session_id: SessionID, hook_type: str, request: BaseHookRequest, outcome: Any, response: Any @@ -183,7 +183,7 @@ def handle(self, hook_type: str, request: BaseHookRequest) -> BaseResponse: self._track_session(request, session_id) # Log the incoming request - logger.info(f"Hook call: {hook_type} for session {session_id}") + logger.info("Hook call: {hook_type} for session %s", session_id) # Dispatch to typed handler outcome = self._dispatch_hook(hook_type, request, session_id) @@ -233,7 +233,7 @@ def _handle_pre_hook(self, request: PreToolUseRequest, session_id: SessionID) -> file_path = tool_call.file_path if isinstance(tool_call, FilePathToolCall) else None content = tool_call.content if isinstance(tool_call, WriteToolCall) else None - logger.info(f"Pre-hook for {tool_name} in session {session_id}") + logger.info("Pre-hook for {tool_name} in session %s", session_id) # Clear any existing notification for this session self._clear_notification(session_id) @@ -328,7 +328,7 @@ def _handle_post_hook(self, request: PostToolUseRequest, session_id: SessionID) tool_name = tool_call.tool_name file_path = tool_call.file_path if isinstance(tool_call, FilePathToolCall) else None - logger.info(f"Post-hook for {tool_name} in session {session_id}") + logger.info("Post-hook for {tool_name} in session %s", session_id) # Clear any existing notification for this session self._clear_notification(session_id) @@ -376,18 +376,18 @@ def _handle_post_hook(self, request: PostToolUseRequest, session_id: SessionID) def _handle_stop_hook(self, request: StopRequest, session_id: SessionID) -> HookOutcome: """Handle Stop hook (Claude ending its turn).""" - logger.info(f"Stop hook for session {session_id}") + logger.info("Stop hook for session %s", session_id) return StopAllow() def _handle_subagent_stop(self, request: SubagentStopRequest, session_id: SessionID) -> HookOutcome: """Handle SubagentStop.""" - logger.info(f"SubagentStop hook for session {session_id}") + logger.info("SubagentStop hook for session %s", session_id) # For now, always allow subagent to stop return SubagentStopAllow() def _handle_notification(self, request: NotificationRequest, session_id: SessionID) -> HookOutcome: """Handle Notification.""" - logger.info(f"Notification hook for session {session_id}") + logger.info("Notification hook for session %s", session_id) # Get notification hook config config = self.config_loader.config @@ -425,11 +425,14 @@ def _send_dbus_notification(self, title: str, message: str, session_id: SessionI self.session_manager.set_notification_id(session_id, notification_id) logger.info( - f"Sent D-Bus notification for session {session_id}: {title} " - f"(ID: {notification_id}, replaced: {replaces_id})" + "Sent D-Bus notification for session %s: %s (ID: %s, replaced: %s)", + session_id, + title, + notification_id, + replaces_id, ) - except (OSError, ImportError, AttributeError) as e: - logger.error(f"Failed to send D-Bus notification: {e}", exc_info=True) + except (OSError, ImportError, AttributeError): + logger.exception("Failed to send D-Bus notification") def _clear_notification(self, session_id: SessionID) -> None: """Clear any existing notification for this session.""" @@ -443,9 +446,9 @@ def _clear_notification(self, session_id: SessionID) -> None: close_desktop_notification(notification_id) self.session_manager.clear_notification_id(session_id) - logger.debug(f"Cleared notification {notification_id} for session {session_id}") + logger.debug("Cleared notification {notification_id} for session %s", session_id) except (OSError, ImportError, AttributeError) as e: - logger.debug(f"Failed to clear notification for session {session_id}: {e}") + logger.debug("Failed to clear notification for session {session_id}: %s", e) # Helper methods def _check_access_control(self, request: PreToolUseRequest, session_id: SessionID) -> tuple[RuleAction, str | None]: diff --git a/x/claude_linter_v2/hooks/validation.py b/x/claude_linter_v2/hooks/validation.py index 75620e428b..b6169d2f21 100644 --- a/x/claude_linter_v2/hooks/validation.py +++ b/x/claude_linter_v2/hooks/validation.py @@ -48,7 +48,7 @@ def validate_hook_outcome(hook_type: HookEventName, outcome: HookOutcome) -> Non # Semantic checks _validate_outcome_semantics(hook_type, outcome) - logger.debug(f"✓ Valid {hook_type} outcome: {type(outcome).__name__}") + logger.debug("✓ Valid %s outcome: %s", hook_type, type(outcome).__name__) def _validate_outcome_semantics(hook_type: HookEventName, outcome: HookOutcome) -> None: @@ -93,4 +93,4 @@ def validate_final_response(hook_type: HookEventName, response_data: dict[str, A if hook_type == HookEventName.PRE_TOOL_USE and response_data.get("decision") not in [None, "approve", "block"]: raise HookBugError(f"Invalid PreToolUse decision: {response_data.get('decision')}") - logger.debug(f"✓ Final response valid for {hook_type.value}") + logger.debug("✓ Final response valid for %s", hook_type.value) diff --git a/x/claude_linter_v2/linters/python_ast.py b/x/claude_linter_v2/linters/python_ast.py new file mode 100644 index 0000000000..4e2d7f42b4 --- /dev/null +++ b/x/claude_linter_v2/linters/python_ast.py @@ -0,0 +1,186 @@ +"""Python AST analyzer for detecting hard-blocked patterns.""" + +import ast +import logging +from pathlib import Path + +from x.claude_linter_v2.config.models import Violation + +logger = logging.getLogger(__name__) + + +class PythonASTAnalyzer: + """Analyzes Python AST for hard-blocked patterns.""" + + def __init__(self, *, bare_except: bool = True, getattr_setattr: bool = True, barrel_init: bool = True) -> None: + """ + Initialize the analyzer. + + Args: + bare_except: Check for bare except clauses + getattr_setattr: Check for hasattr/getattr/setattr usage + barrel_init: Check for barrel __init__.py files + """ + self.bare_except = bare_except + self.getattr_setattr = getattr_setattr + self.barrel_init = barrel_init + + def analyze_file(self, file_path: str | Path) -> list[Violation]: + """ + Analyze a Python file for violations. + + Args: + file_path: Path to the Python file + + Returns: + List of violations found + """ + file_path = Path(file_path) + + try: + with file_path.open(encoding="utf-8") as f: + content = f.read() + except (OSError, UnicodeDecodeError): + logger.exception("Failed to read") + return [] + + return self.analyze_code(content, file_path) + + def analyze_code(self, code: str, filename: Path) -> list[Violation]: + """ + Analyze Python code for violations. + + Args: + code: Python source code + filename: Filename for error messages + + Returns: + List of violations found + """ + violations = [] + + try: + tree = ast.parse(code, filename) + except SyntaxError as e: + # Syntax errors prevent other checks + return [ + Violation( + line=e.lineno or 1, + column=e.offset or 0, + message=f"Syntax error: {e.msg}", + rule="syntax", + fixable=False, + file_path=str(filename), + ) + ] + + # Check for various patterns + if self.bare_except: + violations.extend(self._check_bare_except(tree)) + + if self.getattr_setattr: + violations.extend(self._check_getattr_setattr(tree)) + + if self.barrel_init and filename.name == "__init__.py": + violations.extend(self._check_barrel_init(tree, code)) + + return violations + + def _check_bare_except(self, tree: ast.AST) -> list[Violation]: + """Check for bare except clauses.""" + return [ + Violation( + line=node.lineno, + column=node.col_offset, + message="Bare except clause is not allowed. Use specific exception types.", + rule="bare_except", + fixable=False, + file_path=None, + ) + for node in ast.walk(tree) + if isinstance(node, ast.ExceptHandler) and node.type is None + ] + + def _check_getattr_setattr(self, tree: ast.AST) -> list[Violation]: + """Check for hasattr/getattr/setattr usage.""" + banned_functions = {"hasattr", "getattr", "setattr"} + + return [ + Violation( + line=node.lineno, + column=node.col_offset, + message=f"Use of {node.func.id} is not allowed. Use proper type checking instead.", + rule="getattr_setattr", + fixable=False, + file_path=None, + ) + for node in ast.walk(tree) + if isinstance(node, ast.Call) and isinstance(node.func, ast.Name) and node.func.id in banned_functions + ] + + def _check_barrel_init(self, tree: ast.AST, code: str) -> list[Violation]: + """ + Check for barrel __init__.py patterns. + + A barrel __init__.py is one that imports and re-exports everything, + typically with patterns like: + - from .module import * + - from .module import Class; __all__ = ['Class'] + - Multiple imports that are immediately re-exported + """ + violations = [ + Violation( + line=node.lineno, + column=node.col_offset, + message="Barrel __init__.py with star imports is not allowed. Keep __init__.py minimal.", + rule="barrel_init", + fixable=False, + file_path=None, + ) + for node in ast.walk(tree) + if isinstance(node, ast.ImportFrom) and any(alias.name == "*" for alias in node.names) + ] + + # Check for re-export pattern + imports: set[str] = set() + exports: set[str] = set() + + for node in ast.walk(tree): + # Collect imports + if isinstance(node, ast.ImportFrom): + for alias in node.names: + if alias.name != "*": + imports.add(alias.asname or alias.name) + + # Check for __all__ assignment + if isinstance(node, ast.Assign): + for target in node.targets: + if isinstance(target, ast.Name) and target.id == "__all__" and isinstance(node.value, ast.List): + # Extract names from __all__ + for elt in node.value.elts: + if isinstance(elt, ast.Constant) and isinstance(elt.value, str): + exports.add(elt.value) + elif isinstance(elt, ast.Str) and isinstance(elt.s, str): # Python 3.7 compatibility + exports.add(elt.s) + + # If we have imports and they're all in __all__, it's a barrel + if imports and exports and imports.issubset(exports): + # Check if the file is mostly imports/exports + lines = code.strip().split("\n") + non_empty_lines = [line for line in lines if line.strip() and not line.strip().startswith("#")] + + if len(non_empty_lines) > 0: + import_lines = sum(1 for line in non_empty_lines if "import" in line or "__all__" in line) + if import_lines / len(non_empty_lines) > 0.7: # More than 70% import/export + violations.append( + Violation( + line=1, + column=0, + message="Barrel __init__.py pattern detected. Keep __init__.py files minimal or empty.", + rule="barrel_init", + fixable=False, + file_path=None, + ) + ) + + return violations diff --git a/x/claude_linter_v2/linters/python_formatter.py b/x/claude_linter_v2/linters/python_formatter.py new file mode 100644 index 0000000000..6c356102a5 --- /dev/null +++ b/x/claude_linter_v2/linters/python_formatter.py @@ -0,0 +1,112 @@ +"""Python code formatter for selective autofix.""" + +import logging +import subprocess +from pathlib import Path + +from x.claude_linter_v2.config.models import AutofixCategory +from x.claude_linter_v2.linters.ruff_binary import find_ruff_binary + +logger = logging.getLogger(__name__) + + +class PythonFormatter: + """Handles Python code formatting and autofixing via ruff.""" + + # Tools that are valid python_tools but not formatters (skip silently) + _NON_FORMATTING_TOOLS: frozenset[str] = frozenset({"mypy"}) + + def __init__(self, tools: list[str]) -> None: + formatting_tools = [t for t in tools if t not in self._NON_FORMATTING_TOOLS] + for tool in formatting_tools: + if tool != "ruff": + raise RuntimeError(f"Unknown formatting tool: {tool!r}. Only 'ruff' is supported.") + self._use_ruff = "ruff" in formatting_tools + if self._use_ruff: + ruff_bin = find_ruff_binary() + if not ruff_bin: + raise RuntimeError( + "ruff is configured as a formatting tool but the binary was not found. " + "Set RUFF_BIN env var or add ruff to PATH, or remove 'ruff' from python_tools config." + ) + self._ruff_bin: str = ruff_bin + + def format_code( + self, code: str, file_path: Path, categories: list[AutofixCategory] | None = None + ) -> tuple[str, list[str]]: + """Format Python code with specified autofix categories.""" + if not self._use_ruff: + return code, [] + + # Default to formatting only if no categories specified + if categories is None: + categories = [AutofixCategory.FORMATTING] + + # Convert ALL to all categories + if AutofixCategory.ALL in categories: + categories = list(AutofixCategory) + + formatted_code = code + changes: list[str] = [] + + if AutofixCategory.FORMATTING in categories: + formatted_code, formatting_changes = self._apply_formatting(formatted_code, file_path) + changes.extend(formatting_changes) + + if AutofixCategory.IMPORTS in categories: + formatted_code, import_changes = self._fix_imports(formatted_code, file_path) + changes.extend(import_changes) + + return formatted_code, changes + + def _apply_formatting(self, code: str, file_path: Path) -> tuple[str, list[str]]: + """Format code with ruff.""" + try: + result = subprocess.run( + [self._ruff_bin, "format", "--stdin-filename", str(file_path), "-"], + input=code, + capture_output=True, + text=True, + timeout=30, + check=False, + ) + + if result.returncode == 0: + if result.stdout != code: + return result.stdout, ["Applied ruff formatting"] + return code, [] + logger.warning("Ruff formatting failed: %s", result.stderr) + return code, [] + + except subprocess.SubprocessError: + logger.exception("Ruff error") + return code, [] + + def _fix_imports(self, code: str, file_path: Path) -> tuple[str, list[str]]: + """Fix import ordering and remove unused imports.""" + try: + result = subprocess.run( + [ + self._ruff_bin, + "check", + "--fix", + "--select", + "I,F401", # I=isort, F401=unused imports + "--stdin-filename", + str(file_path), + "-", + ], + input=code, + capture_output=True, + text=True, + timeout=30, + check=False, + ) + + if result.returncode in (0, 1) and result.stdout and result.stdout != code: + return result.stdout, ["Fixed import ordering and removed unused imports"] + + except subprocess.SubprocessError: + logger.exception("Ruff import fix error") + + return code, [] diff --git a/x/claude_linter_v2/linters/python_ruff.py b/x/claude_linter_v2/linters/python_ruff.py new file mode 100644 index 0000000000..5d2a79e205 --- /dev/null +++ b/x/claude_linter_v2/linters/python_ruff.py @@ -0,0 +1,113 @@ +"""Python ruff linter for pre-hook checks.""" + +import json +import logging +import subprocess +from pathlib import Path +from typing import ClassVar + +from x.claude_linter_v2.config.models import Violation +from x.claude_linter_v2.linters.ruff_binary import find_ruff_binary + +logger = logging.getLogger(__name__) + + +class PythonRuffLinter: + """Runs ruff checks for Python code quality.""" + + # Critical rules that should block in pre-hook + CRITICAL_RULES: ClassVar[frozenset[str]] = frozenset( + { + # From v1 config - these are the most important ones + "E722", # bare-except + "BLE001", # blind-except + "B009", # getattr-with-constant + "B010", # setattr-with-constant + "S113", # request-without-timeout + "B008", # function-call-in-default-argument + "E402", # module-import-not-at-top-of-file + "PLC0415", # import-outside-top-level + "S608", # hardcoded-sql-expression + "S611", # django-raw-sql + "B904", # raise-without-from-inside-except + "B006", # mutable-argument-default + "PGH003", # blanket-type-ignore + } + ) + + def __init__(self, force_select: list[str] | None = None) -> None: + self.force_select = force_select or [] + ruff_bin = find_ruff_binary() + if not ruff_bin: + raise RuntimeError("ruff binary not found. Set RUFF_BIN env var or add ruff to PATH.") + self._ruff_bin: str = ruff_bin + + def check_code(self, code: str, file_path: Path, *, critical_only: bool = True) -> list[Violation]: + """Check Python code with ruff.""" + violations = [] + + # Build ruff args + args: list[str] = [self._ruff_bin, "check", "--output-format", "json", "--stdin-filename", str(file_path)] + + # Add force-select rules if provided + if self.force_select: + args.extend(["--select", ",".join(self.force_select)]) + elif critical_only: + # Only check critical rules in pre-hook + args.extend(["--select", ",".join(self.CRITICAL_RULES)]) + + # Add stdin marker + args.append("-") + + try: + result = subprocess.run(args, input=code, capture_output=True, text=True, timeout=30, check=False) + + # Ruff returns 1 if violations found, 0 if clean + if result.returncode in (0, 1): + if result.stdout: + # Parse JSON output + try: + issues = json.loads(result.stdout) + for issue in issues: + # Skip if not critical and we're in critical_only mode + if critical_only and issue.get("code") not in self.CRITICAL_RULES: + continue + + violation = Violation( + rule=f"ruff:{issue.get('code', 'unknown')}", + line=issue.get("location", {}).get("row", 0), + column=issue.get("location", {}).get("column", 0), + message=issue.get("message", "Unknown violation"), + fixable=issue.get("fix") is not None, + file_path=str(file_path), + ) + violations.append(violation) + except json.JSONDecodeError: + logger.exception("Failed to parse ruff output: %s", result.stdout) + else: + logger.error("ruff failed with code %s: %s", result.returncode, result.stderr) + + except subprocess.SubprocessError: + logger.exception("ruff error") + + return violations + + def get_rule_explanation(self, rule_code: str) -> str: + """Get explanation for a ruff rule.""" + explanations = { + "E722": "Bare except catches all exceptions including system exits. Use specific exception types.", + "BLE001": "Catching Exception is too broad. Catch specific exceptions.", + "B009": "Use obj.attr instead of getattr(obj, 'attr') with constant string.", + "B010": "Use obj.attr = value instead of setattr(obj, 'attr', value) with constant string.", + "S113": "Requests without timeout can hang indefinitely. Add timeout parameter.", + "B008": "Function calls in default arguments are evaluated once at definition time.", + "E402": "Module imports should be at the top of the file.", + "PLC0415": "Import statements should be at module level, not inside functions.", + "S608": "SQL queries should use parameterized queries, not string formatting.", + "S611": "Django raw SQL is vulnerable to injection. Use ORM or parameterized queries.", + "B904": "Use 'raise ... from err' inside except blocks to preserve traceback.", + "B006": "Mutable default arguments are shared between calls. Use None and create inside function.", + "PGH003": "Use specific type: ignore comments like # type: ignore[attr-defined]", + } + + return explanations.get(rule_code, f"Ruff rule {rule_code} violation") diff --git a/x/claude_linter_v2/linters/ruff_binary.py b/x/claude_linter_v2/linters/ruff_binary.py new file mode 100644 index 0000000000..29d95cc1cd --- /dev/null +++ b/x/claude_linter_v2/linters/ruff_binary.py @@ -0,0 +1,66 @@ +"""Resolve the ruff binary path for subprocess invocation. + +Ruff is a standalone Rust binary distributed via PyPI. It cannot be reliably +invoked via ``python -m ruff`` in Bazel sandboxes because the pip package +only provides a thin Python wrapper that shells out to a platform-specific +binary, and PYTHONPATH propagation alone doesn't guarantee the binary is +findable. + +Resolution order: +1. ``RUFF_BIN`` env var — expected to be an rlocation path when running + under Bazel (set via ``env`` in BUILD.bazel, resolved via runfiles). +2. ``shutil.which("ruff")`` — for non-Bazel usage (developer machines, CI). +""" + +import logging +import os +import shutil +from functools import cache +from pathlib import Path + +logger = logging.getLogger(__name__) + +_RUFF_BIN_ENV = "RUFF_BIN" + + +def _resolve_rlocation(rlocation_path: str) -> str | None: + """Resolve an rlocation path via Bazel runfiles.""" + try: + # Lazy import: python.runfiles is optional (only available under Bazel) + from python.runfiles import runfiles # type: ignore[import-not-found] # noqa: PLC0415 + + r = runfiles.Create() + if r is None: + return None + resolved: str | None = r.Rlocation(rlocation_path) + if resolved and Path(resolved).exists(): + return resolved + except ImportError: + pass + return None + + +@cache +def find_ruff_binary() -> str | None: + """Find the ruff binary, returning its absolute path or None. + + Caches the result for the lifetime of the process. + """ + # 1. RUFF_BIN env var (Bazel rlocation path or absolute path) + if env_val := os.environ.get(_RUFF_BIN_ENV): + # Try as rlocation first + if resolved := _resolve_rlocation(env_val): + logger.debug("Resolved ruff via RUFF_BIN rlocation: %s", resolved) + return resolved + # Try as direct path + if Path(env_val).exists(): + logger.debug("Resolved ruff via RUFF_BIN path: %s", env_val) + return env_val + logger.warning("RUFF_BIN set to %r but could not resolve", env_val) + + # 2. PATH lookup + if which_path := shutil.which("ruff"): + logger.debug("Found ruff on PATH: %s", which_path) + return which_path + + return None diff --git a/x/claude_linter_v2/llm_analyzer.py b/x/claude_linter_v2/llm_analyzer.py index ee5846b61c..e3f4a5c469 100644 --- a/x/claude_linter_v2/llm_analyzer.py +++ b/x/claude_linter_v2/llm_analyzer.py @@ -36,7 +36,7 @@ def analyze_code( # Check cost limit if self._cost_tracker >= self.config.daily_cost_limit: - logger.warning(f"LLM analysis cost limit reached: ${self._cost_tracker}") + logger.warning("LLM analysis cost limit reached: $%s", self._cost_tracker) return True, None, [] try: @@ -110,7 +110,7 @@ def _call_llm(self, prompt: str) -> dict[str, Any]: """ # TODO: Implement actual LLM API call # For now, return a mock response - logger.info(f"Would call LLM with model {self.config.model}") + logger.info("Would call LLM with model %s", self.config.model) # Estimate cost (rough approximation) prompt_tokens = len(prompt) / 4 # Rough token estimate diff --git a/x/claude_linter_v2/notifications.py b/x/claude_linter_v2/notifications.py index 44c542d781..f773fa9162 100644 --- a/x/claude_linter_v2/notifications.py +++ b/x/claude_linter_v2/notifications.py @@ -52,7 +52,7 @@ def send_desktop_notification(title: str, message: str, urgency: str = "critical ) return notification_id except Exception as e: - logger.debug(f"Failed to send notification: {e}") + logger.debug("Failed to send notification: %s", e) return 0 @@ -81,4 +81,4 @@ def close_desktop_notification(notification_id: int) -> None: # Close notification notify_iface.CloseNotification(notification_id) except Exception as e: - logger.debug(f"Failed to close notification {notification_id}: {e}") + logger.debug("Failed to close notification %s: %s", notification_id, e) diff --git a/x/claude_linter_v2/session/manager.py b/x/claude_linter_v2/session/manager.py index 061d24f6bd..bcaab92da5 100644 --- a/x/claude_linter_v2/session/manager.py +++ b/x/claude_linter_v2/session/manager.py @@ -70,7 +70,7 @@ def _load_session(self, session_id: SessionID) -> SessionData: try: return SessionData.model_validate_json(session_file.read_text()) except (json.JSONDecodeError, OSError, ValueError): - logger.exception(f"Failed to load session {session_id}") + logger.exception("Failed to load session") # Return default session data return SessionData(id=session_id, created=datetime.now()) @@ -81,7 +81,7 @@ def _save_session(self, session_id: SessionID, session_data: SessionData) -> Non try: session_file.write_text(session_data.model_dump_json(indent=2)) except (OSError, TypeError): - logger.exception(f"Failed to save session {session_id}") + logger.exception("Failed to save session") def track_session(self, session_id: SessionID, working_dir: Path) -> None: """ diff --git a/x/gatelet/server/config.py b/x/gatelet/server/config.py index f71ff289e6..e327d08463 100644 --- a/x/gatelet/server/config.py +++ b/x/gatelet/server/config.py @@ -180,7 +180,7 @@ class Settings(BaseModel): @classmethod def from_file(cls, path: Path) -> "Settings": """Load settings from file at path.""" - logger.info(f"Loading settings from {path.absolute()}") + logger.info("Loading settings from %s", path.absolute()) with path.open("rb") as f: config_dict = tomllib.load(f) return cls.model_validate(config_dict) diff --git a/x/gatelet/server/lifespan.py b/x/gatelet/server/lifespan.py index 2d2c7eaeb5..6fb7fa48d9 100644 --- a/x/gatelet/server/lifespan.py +++ b/x/gatelet/server/lifespan.py @@ -179,7 +179,7 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None]: # Create database engine engine = create_async_engine(str(settings.database.dsn), echo=False, future=True, pool_pre_ping=True) app.state.db_engine = engine - logger.info(f"Database engine created for: {settings.database.dsn}") + logger.info("Database engine created for: %s", settings.database.dsn) # Create session factory session_factory = async_sessionmaker(engine, class_=AsyncSession, expire_on_commit=False, autoflush=False)