Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions airlock/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,12 @@ async def auth_config() -> dict[str, str]:
@app.get("/api/actions")
async def list_actions(status: str | None = None, limit: int = 100, offset: int = 0) -> list[Action]:
status_enum = ActionStatus(status) if status else None
return await gate._req_storage.list_actions(status_enum, limit=limit, offset=offset)
return await gate.req_storage.list_actions(status_enum, limit=limit, offset=offset)

@app.get("/api/actions/{session_key}/{action_seq}")
async def get_action(session_key: str, action_seq: int) -> Action:
key = ActionKey(session_key=session_key, action_seq=action_seq)
action = await gate._req_storage.get_action(key)
action = await gate.req_storage.get_action(key)
if action is None:
raise HTTPException(status_code=404, detail="Action not found")
return action
Expand Down
6 changes: 3 additions & 3 deletions airlock/oauth/k8s_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@
try:
await self._api.read_namespaced_secret(secret_name, namespace)
await self._api.replace_namespaced_secret(secret_name, namespace, secret)
logger.info(f"Updated secret {namespace}/{secret_name}")
logger.info("Updated secret %s/%s", namespace, secret_name)

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (password)
as clear text.

Copilot Autofix

AI 20 days ago

In general, to fix clear‑text logging issues, stop including potentially sensitive values directly in log messages. Either remove them, replace them with non‑sensitive identifiers, or heavily redact them.

Best targeted fix here: change the logger.info calls in K8sTokenStore.write_token so they no longer interpolate the namespace (and probably not the exact secret name either). Functionality of the method (reading/replacing/creating the secret) is unchanged; only the log message is made more generic. This keeps observability (“a secret was updated/created”) while avoiding logging where that secret lives.

Concretely in airlock/oauth/k8s_client.py:

  • On line 55, replace logger.info("Updated secret %s/%s", namespace, secret_name) with a message that does not include namespace or secret_name, for example logger.info("Updated Kubernetes secret").
  • On line 59, do the same for the “Created secret …” message.

No new imports, methods, or definitions are required; only the two log statements change.


Suggested changeset 1
airlock/oauth/k8s_client.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/k8s_client.py b/airlock/oauth/k8s_client.py
--- a/airlock/oauth/k8s_client.py
+++ b/airlock/oauth/k8s_client.py
@@ -52,11 +52,11 @@
         try:
             await self._api.read_namespaced_secret(secret_name, namespace)
             await self._api.replace_namespaced_secret(secret_name, namespace, secret)
-            logger.info("Updated secret %s/%s", namespace, secret_name)
+            logger.info("Updated Kubernetes secret")
         except ApiException as e:
             if e.status == 404:
                 await self._api.create_namespaced_secret(namespace, secret)
-                logger.info("Created secret %s/%s", namespace, secret_name)
+                logger.info("Created Kubernetes secret")
             else:
                 raise
 
EOF
@@ -52,11 +52,11 @@
try:
await self._api.read_namespaced_secret(secret_name, namespace)
await self._api.replace_namespaced_secret(secret_name, namespace, secret)
logger.info("Updated secret %s/%s", namespace, secret_name)
logger.info("Updated Kubernetes secret")
except ApiException as e:
if e.status == 404:
await self._api.create_namespaced_secret(namespace, secret)
logger.info("Created secret %s/%s", namespace, secret_name)
logger.info("Created Kubernetes secret")
else:
raise

Copilot is powered by AI and may make mistakes. Always verify output.

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (secret)
as clear text.

Copilot Autofix

AI 20 days ago

To fix the problem, avoid logging the clear-text value of secret_name (and arguably the full namespace/secret_name pair). Instead, log only non-sensitive context (e.g., that a secret was created/updated in some namespace), or log a redacted or hashed form of the secret name that is still useful for debugging but not directly exploitable.

The best minimal change is:

  • Introduce a small helper function in this file to produce a redacted representation of the secret name (e.g., first and last character plus length, or a short hash).
  • Update the two logger.info calls to use this redacted string instead of the raw secret_name.
  • Keep functionality intact: the Kubernetes API calls remain unchanged; only log output is altered.
  • No new external dependencies are needed; we can rely on the standard library (hashlib) if we choose hashing, or just do simple string redaction.

Concretely in airlock/oauth/k8s_client.py:

  • Add a helper function (e.g., _redact_secret_name) near the top of the file or inside the class to transform the secret name into a non-sensitive representation.
  • Change line 55 from logger.info("Updated secret %s/%s", namespace, secret_name) to log namespace plus redacted_secret_name.
  • Change line 59 from logger.info("Created secret %s/%s", namespace, secret_name) similarly.

Suggested changeset 1
airlock/oauth/k8s_client.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/k8s_client.py b/airlock/oauth/k8s_client.py
--- a/airlock/oauth/k8s_client.py
+++ b/airlock/oauth/k8s_client.py
@@ -11,6 +11,15 @@
 
 logger = logging.getLogger(__name__)
 
+
+def _redact_secret_name(name: str) -> str:
+    """Return a redacted representation of a secret name for logging."""
+    if not name:
+        return "<empty>"
+    if len(name) <= 4:
+        return "<redacted>"
+    return f"{name[:2]}***{name[-2:]}"
+
 # TODO: A more civilized cleanup strategy would be to set ownerReferences on each
 # managed secret pointing to a stable anchor object (e.g. the airlock ConfigMap).
 # That way, secrets are garbage-collected automatically by K8s even if the airlock
@@ -52,11 +61,11 @@
         try:
             await self._api.read_namespaced_secret(secret_name, namespace)
             await self._api.replace_namespaced_secret(secret_name, namespace, secret)
-            logger.info("Updated secret %s/%s", namespace, secret_name)
+            logger.info("Updated secret in namespace %s (name=%s)", namespace, _redact_secret_name(secret_name))
         except ApiException as e:
             if e.status == 404:
                 await self._api.create_namespaced_secret(namespace, secret)
-                logger.info("Created secret %s/%s", namespace, secret_name)
+                logger.info("Created secret in namespace %s (name=%s)", namespace, _redact_secret_name(secret_name))
             else:
                 raise
 
EOF
@@ -11,6 +11,15 @@

logger = logging.getLogger(__name__)


def _redact_secret_name(name: str) -> str:
"""Return a redacted representation of a secret name for logging."""
if not name:
return "<empty>"
if len(name) <= 4:
return "<redacted>"
return f"{name[:2]}***{name[-2:]}"

# TODO: A more civilized cleanup strategy would be to set ownerReferences on each
# managed secret pointing to a stable anchor object (e.g. the airlock ConfigMap).
# That way, secrets are garbage-collected automatically by K8s even if the airlock
@@ -52,11 +61,11 @@
try:
await self._api.read_namespaced_secret(secret_name, namespace)
await self._api.replace_namespaced_secret(secret_name, namespace, secret)
logger.info("Updated secret %s/%s", namespace, secret_name)
logger.info("Updated secret in namespace %s (name=%s)", namespace, _redact_secret_name(secret_name))
except ApiException as e:
if e.status == 404:
await self._api.create_namespaced_secret(namespace, secret)
logger.info("Created secret %s/%s", namespace, secret_name)
logger.info("Created secret in namespace %s (name=%s)", namespace, _redact_secret_name(secret_name))
else:
raise

Copilot is powered by AI and may make mistakes. Always verify output.
except ApiException as e:
if e.status == 404:
await self._api.create_namespaced_secret(namespace, secret)
logger.info(f"Created secret {namespace}/{secret_name}")
logger.info("Created secret %s/%s", namespace, secret_name)

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (password)
as clear text.

Copilot Autofix

AI 20 days ago

In general, to fix clear-text logging of sensitive information, you either (a) remove the sensitive value from the log message entirely, (b) replace it with a non-sensitive identifier (e.g., a constant label or hash), or (c) redact or partially mask it before logging. Functionality of the system (managing secrets in Kubernetes) should not be affected; only the human-readable log messages need adjustment.

Here, the only problematic use is logging the namespace parameter alongside the secret_name when a secret is created. We can keep useful operational information while removing the potentially sensitive namespace by logging only the secret name and possibly a generic description instead of the full namespace/secret_name pair. The behavior of write_token (reading/creating/replacing secrets) remains unchanged.

Concretely, in airlock/oauth/k8s_client.py:

  • On line 59, change:
    • logger.info("Created secret %s/%s", namespace, secret_name)
  • To something that does not include namespace, such as:
    • logger.info("Created secret %s", secret_name)

We do not need new imports or helper methods; we only modify this single log call. The other log messages that include namespace (e.g., "Updated secret %s/%s" and "Deleted orphaned secret %s/%s") have not been flagged by CodeQL through this taint path and are not directly tied to the specific source considered sensitive in this alert; to keep the fix minimal and targeted, we will not alter them.

Suggested changeset 1
airlock/oauth/k8s_client.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/k8s_client.py b/airlock/oauth/k8s_client.py
--- a/airlock/oauth/k8s_client.py
+++ b/airlock/oauth/k8s_client.py
@@ -56,7 +56,7 @@
         except ApiException as e:
             if e.status == 404:
                 await self._api.create_namespaced_secret(namespace, secret)
-                logger.info("Created secret %s/%s", namespace, secret_name)
+                logger.info("Created secret %s", secret_name)
             else:
                 raise
 
EOF
@@ -56,7 +56,7 @@
except ApiException as e:
if e.status == 404:
await self._api.create_namespaced_secret(namespace, secret)
logger.info("Created secret %s/%s", namespace, secret_name)
logger.info("Created secret %s", secret_name)
else:
raise

Copilot is powered by AI and may make mistakes. Always verify output.

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (secret)
as clear text.

Copilot Autofix

AI 20 days ago

In general, to fix clear-text logging issues, remove sensitive values from log messages or replace them with non-sensitive summaries (e.g., static text, types, or redacted/hashed forms). The goal is to retain operationally useful information (that an action occurred and whether it succeeded) without leaking identifiers or secrets.

In this file, the only problematic uses are the logger.info calls that interpolate %s/%s with namespace and secret_name when creating/updating/deleting secrets. The best fix that preserves behavior is to log the operation and the actor (managed_by) but not the specific secret name or namespace. That means changing:

  • Line 55: logger.info("Updated secret %s/%s", namespace, secret_name)
  • Line 59: logger.info("Created secret %s/%s", namespace, secret_name)
  • Line 71: logger.info("Deleted orphaned secret %s/%s", namespace, name)

to messages like:

  • logger.info("Updated managed secret for %s", self._managed_by)
  • logger.info("Created managed secret for %s", self._managed_by)
  • logger.info("Deleted orphaned managed secret for %s", self._managed_by)

This keeps useful audit information (that the token store acted and which manager label it used), avoids introducing new imports or helpers, and does not change control flow or API interactions. No new methods or definitions are required; we only adjust the log message format strings and arguments within airlock/oauth/k8s_client.py.

Suggested changeset 1
airlock/oauth/k8s_client.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/k8s_client.py b/airlock/oauth/k8s_client.py
--- a/airlock/oauth/k8s_client.py
+++ b/airlock/oauth/k8s_client.py
@@ -52,11 +52,11 @@
         try:
             await self._api.read_namespaced_secret(secret_name, namespace)
             await self._api.replace_namespaced_secret(secret_name, namespace, secret)
-            logger.info("Updated secret %s/%s", namespace, secret_name)
+            logger.info("Updated managed secret for %s", self._managed_by)
         except ApiException as e:
             if e.status == 404:
                 await self._api.create_namespaced_secret(namespace, secret)
-                logger.info("Created secret %s/%s", namespace, secret_name)
+                logger.info("Created managed secret for %s", self._managed_by)
             else:
                 raise
 
@@ -68,7 +65,7 @@
             name = secret.metadata.name
             if name not in known_names:
                 await self._api.delete_namespaced_secret(name, namespace)
-                logger.info("Deleted orphaned secret %s/%s", namespace, name)
+                logger.info("Deleted orphaned managed secret for %s", self._managed_by)
 
     async def read_token(self, secret_name: str, namespace: str) -> TokenData | None:
         try:
EOF
@@ -52,11 +52,11 @@
try:
await self._api.read_namespaced_secret(secret_name, namespace)
await self._api.replace_namespaced_secret(secret_name, namespace, secret)
logger.info("Updated secret %s/%s", namespace, secret_name)
logger.info("Updated managed secret for %s", self._managed_by)
except ApiException as e:
if e.status == 404:
await self._api.create_namespaced_secret(namespace, secret)
logger.info("Created secret %s/%s", namespace, secret_name)
logger.info("Created managed secret for %s", self._managed_by)
else:
raise

@@ -68,7 +65,7 @@
name = secret.metadata.name
if name not in known_names:
await self._api.delete_namespaced_secret(name, namespace)
logger.info("Deleted orphaned secret %s/%s", namespace, name)
logger.info("Deleted orphaned managed secret for %s", self._managed_by)

async def read_token(self, secret_name: str, namespace: str) -> TokenData | None:
try:
Copilot is powered by AI and may make mistakes. Always verify output.
else:
raise

Expand All @@ -68,7 +68,7 @@
name = secret.metadata.name
if name not in known_names:
await self._api.delete_namespaced_secret(name, namespace)
logger.info(f"Deleted orphaned secret {namespace}/{name}")
logger.info("Deleted orphaned secret %s/%s", namespace, name)

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (password)
as clear text.

Copilot Autofix

AI 20 days ago

General fix: Avoid logging potentially sensitive values such as Kubernetes namespaces (especially when associated with secret operations) in clear text. Either remove them from log messages, replace them with less specific information, or redact them.

Best concrete fix here: Adjust the log statement in K8sTokenStore.delete_orphaned_secrets so it no longer prints the namespace argument. We can still log that an orphaned secret was deleted and its name (or even omit the name if desired), but we should not log the full namespace/name pair that the analyzer flagged. This keeps existing behavior of the method (deleting secrets) unchanged; we only modify the log message format string and arguments.

Specific change:

  • File: airlock/oauth/k8s_client.py
    • Around line 71, change:
      logger.info("Deleted orphaned secret %s/%s", namespace, name)
      to something like:
      logger.info("Deleted orphaned secret %s", name)
    • No new imports or helper methods are required.
Suggested changeset 1
airlock/oauth/k8s_client.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/k8s_client.py b/airlock/oauth/k8s_client.py
--- a/airlock/oauth/k8s_client.py
+++ b/airlock/oauth/k8s_client.py
@@ -68,7 +68,7 @@
             name = secret.metadata.name
             if name not in known_names:
                 await self._api.delete_namespaced_secret(name, namespace)
-                logger.info("Deleted orphaned secret %s/%s", namespace, name)
+                logger.info("Deleted orphaned secret %s", name)
 
     async def read_token(self, secret_name: str, namespace: str) -> TokenData | None:
         try:
EOF
@@ -68,7 +68,7 @@
name = secret.metadata.name
if name not in known_names:
await self._api.delete_namespaced_secret(name, namespace)
logger.info("Deleted orphaned secret %s/%s", namespace, name)
logger.info("Deleted orphaned secret %s", name)

async def read_token(self, secret_name: str, namespace: str) -> TokenData | None:
try:
Copilot is powered by AI and may make mistakes. Always verify output.

async def read_token(self, secret_name: str, namespace: str) -> TokenData | None:
try:
Expand Down
6 changes: 3 additions & 3 deletions airlock/oauth/refresh.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
continue
if not provider.needs_refresh(token):
continue
logger.info(f"Refreshing token for {name} (expires {token.expires_at})")
logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (password)
as clear text.

Copilot Autofix

AI 20 days ago

In general, to fix clear-text logging of sensitive information, either stop logging the sensitive value, replace it with a non-sensitive surrogate (e.g., a static label or hash/truncated form), or ensure it is properly redacted before logging. The goal is to keep logs useful for operations while avoiding exposing identifiers that might help correlate secrets or accounts if logs are compromised.

Here, the sensitive part is name (the provider identifier) being logged alongside token refresh activity. The safest minimal change that preserves operational value is to remove name from the log messages and keep only generic messages plus expiry timestamps, which are not directly sensitive. We don’t need any new imports or helper methods; we just adjust the format strings and remove name from the arguments in the two logger.info calls and the logger.exception call. Concretely, in airlock/oauth/refresh.py:

  • At line 30, change logger.info("Refreshing token for %s (expires %s)", name, token.expires_at) to a message that omits name, e.g. logger.info("Refreshing token (expires %s)", token.expires_at).
  • At line 45, change logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at) similarly to omit name.
  • At line 47, change logger.exception("Failed to refresh token for %s", name) to a message without name, e.g. logger.exception("Failed to refresh token").

No changes are required in airlock/app.py for this specific finding.

Suggested changeset 1
airlock/oauth/refresh.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/refresh.py b/airlock/oauth/refresh.py
--- a/airlock/oauth/refresh.py
+++ b/airlock/oauth/refresh.py
@@ -27,7 +27,7 @@
                     continue
                 if not provider.needs_refresh(token):
                     continue
-                logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
+                logger.info("Refreshing token (expires %s)", token.expires_at)
                 new_token = await provider.refresh_tokens(token.refresh_token)
                 await k8s_store.write_token(
                     provider.config.refresh_secret.name,
@@ -42,9 +42,9 @@
                     annotations=provider.config.access_secret.annotations or None,
                     fields=ACCESS_TOKEN_FIELDS,
                 )
-                logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)
+                logger.info("Refreshed token (new expiry %s)", new_token.expires_at)
             except Exception:
-                logger.exception("Failed to refresh token for %s", name)
+                logger.exception("Failed to refresh token")
         try:
             await k8s_store.delete_orphaned_secrets(target_namespace, known_secret_names)
         except Exception:
EOF
@@ -27,7 +27,7 @@
continue
if not provider.needs_refresh(token):
continue
logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
logger.info("Refreshing token (expires %s)", token.expires_at)
new_token = await provider.refresh_tokens(token.refresh_token)
await k8s_store.write_token(
provider.config.refresh_secret.name,
@@ -42,9 +42,9 @@
annotations=provider.config.access_secret.annotations or None,
fields=ACCESS_TOKEN_FIELDS,
)
logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)
logger.info("Refreshed token (new expiry %s)", new_token.expires_at)
except Exception:
logger.exception("Failed to refresh token for %s", name)
logger.exception("Failed to refresh token")
try:
await k8s_store.delete_orphaned_secrets(target_namespace, known_secret_names)
except Exception:
Copilot is powered by AI and may make mistakes. Always verify output.
new_token = await provider.refresh_tokens(token.refresh_token)
await k8s_store.write_token(
provider.config.refresh_secret.name,
Expand All @@ -42,9 +42,9 @@
annotations=provider.config.access_secret.annotations or None,
fields=ACCESS_TOKEN_FIELDS,
)
logger.info(f"Refreshed token for {name} (new expiry {new_token.expires_at})")
logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (password)
as clear text.

Copilot Autofix

AI 20 days ago

In general, the fix is to avoid logging potentially sensitive or user-derived data directly, especially in security-sensitive flows. Instead, log only non-sensitive metadata (e.g., a generic label, or an internal, sanitized identifier), or remove the data point entirely if it is not necessary for debugging/monitoring.

Here, the problematic logs are:

logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
...
logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)

We can eliminate the exposure by no longer logging name directly. To keep the logs still useful without changing functionality, we can either (a) drop the provider identifier from the message, or (b) use a derived, non-sensitive detail (like the provider’s class name) that does not come from external configuration. Since we must not assume additional structure outside the snippet, the safest, self-contained change is to stop logging name and just describe the event generically.

Concretely, in airlock/oauth/refresh.py:

  • At line 30, change the message to omit %s and name, for example to: "Refreshing token (expires %s)" with a single argument token.expires_at.
  • At line 45, do the same: "Refreshed token (new expiry %s)" with new_token.expires_at.

No new imports or methods are needed; we simply adjust the existing log calls to remove the tainted argument while preserving the rest of the behavior.

Suggested changeset 1
airlock/oauth/refresh.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/refresh.py b/airlock/oauth/refresh.py
--- a/airlock/oauth/refresh.py
+++ b/airlock/oauth/refresh.py
@@ -27,7 +27,7 @@
                     continue
                 if not provider.needs_refresh(token):
                     continue
-                logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
+                logger.info("Refreshing token (expires %s)", token.expires_at)
                 new_token = await provider.refresh_tokens(token.refresh_token)
                 await k8s_store.write_token(
                     provider.config.refresh_secret.name,
@@ -42,7 +42,7 @@
                     annotations=provider.config.access_secret.annotations or None,
                     fields=ACCESS_TOKEN_FIELDS,
                 )
-                logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)
+                logger.info("Refreshed token (new expiry %s)", new_token.expires_at)
             except Exception:
                 logger.exception("Failed to refresh token for %s", name)
         try:
EOF
@@ -27,7 +27,7 @@
continue
if not provider.needs_refresh(token):
continue
logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
logger.info("Refreshing token (expires %s)", token.expires_at)
new_token = await provider.refresh_tokens(token.refresh_token)
await k8s_store.write_token(
provider.config.refresh_secret.name,
@@ -42,7 +42,7 @@
annotations=provider.config.access_secret.annotations or None,
fields=ACCESS_TOKEN_FIELDS,
)
logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)
logger.info("Refreshed token (new expiry %s)", new_token.expires_at)
except Exception:
logger.exception("Failed to refresh token for %s", name)
try:
Copilot is powered by AI and may make mistakes. Always verify output.
except Exception:
logger.exception(f"Failed to refresh token for {name}")
logger.exception("Failed to refresh token for %s", name)

Check failure

Code scanning / CodeQL

Clear-text logging of sensitive information High

This expression logs
sensitive data (password)
as clear text.

Copilot Autofix

AI 20 days ago

In general: avoid logging any value that might contain sensitive data (tokens, passwords, secrets, or identifiers that could embed them). When logging is needed for debugging/operations, log non-sensitive metadata or a redacted version instead.

Best fix here: adjust the log messages in airlock/oauth/refresh.py to avoid logging the potentially sensitive name value, replacing it with non-sensitive metadata derived from the Provider instance (e.g., its class name) or with a generic placeholder. This retains useful context (“which type of provider failed”) while ensuring that any potentially sensitive identifier from configuration is not written to logs.

Concretely, in airlock/oauth/refresh.py:

  • On lines 30 and 45, replace "Refreshing token for %s (expires %s)", name, token.expires_at and "Refreshed token for %s (new expiry %s)", name, new_token.expires_at with messages that don’t interpolate name. For instance, log only the expiry, or log the provider class name via type(provider).__name__.
  • On line 47, change logger.exception("Failed to refresh token for %s", name) to a message that doesn’t include name, e.g., "Failed to refresh token for provider" or use type(provider).__name__ as non-sensitive context.
  • No extra imports or helper methods are required; we can derive the provider’s class name inline (type(provider).__name__).
  • We must not change external behavior beyond log content; the logic and control flow remain untouched.
Suggested changeset 1
airlock/oauth/refresh.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/airlock/oauth/refresh.py b/airlock/oauth/refresh.py
--- a/airlock/oauth/refresh.py
+++ b/airlock/oauth/refresh.py
@@ -27,7 +27,7 @@
                     continue
                 if not provider.needs_refresh(token):
                     continue
-                logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
+                logger.info("Refreshing token for provider %s (expires %s)", type(provider).__name__, token.expires_at)
                 new_token = await provider.refresh_tokens(token.refresh_token)
                 await k8s_store.write_token(
                     provider.config.refresh_secret.name,
@@ -42,9 +42,13 @@
                     annotations=provider.config.access_secret.annotations or None,
                     fields=ACCESS_TOKEN_FIELDS,
                 )
-                logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)
+                logger.info(
+                    "Refreshed token for provider %s (new expiry %s)",
+                    type(provider).__name__,
+                    new_token.expires_at,
+                )
             except Exception:
-                logger.exception("Failed to refresh token for %s", name)
+                logger.exception("Failed to refresh token for provider %s", type(provider).__name__)
         try:
             await k8s_store.delete_orphaned_secrets(target_namespace, known_secret_names)
         except Exception:
EOF
@@ -27,7 +27,7 @@
continue
if not provider.needs_refresh(token):
continue
logger.info("Refreshing token for %s (expires %s)", name, token.expires_at)
logger.info("Refreshing token for provider %s (expires %s)", type(provider).__name__, token.expires_at)
new_token = await provider.refresh_tokens(token.refresh_token)
await k8s_store.write_token(
provider.config.refresh_secret.name,
@@ -42,9 +42,13 @@
annotations=provider.config.access_secret.annotations or None,
fields=ACCESS_TOKEN_FIELDS,
)
logger.info("Refreshed token for %s (new expiry %s)", name, new_token.expires_at)
logger.info(
"Refreshed token for provider %s (new expiry %s)",
type(provider).__name__,
new_token.expires_at,
)
except Exception:
logger.exception("Failed to refresh token for %s", name)
logger.exception("Failed to refresh token for provider %s", type(provider).__name__)
try:
await k8s_store.delete_orphaned_secrets(target_namespace, known_secret_names)
except Exception:
Copilot is powered by AI and may make mistakes. Always verify output.
try:
await k8s_store.delete_orphaned_secrets(target_namespace, known_secret_names)
except Exception:
Expand Down
4 changes: 2 additions & 2 deletions airlock/oauth/routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ async def callback_get(provider_name: str, request: Request) -> RedirectResponse
annotations=provider.config.access_secret.annotations or None,
fields=ACCESS_TOKEN_FIELDS,
)
logger.info(f"Stored tokens for {provider_name} (expires {token.expires_at})")
logger.info("Stored tokens for %s (expires %s)", provider_name, token.expires_at)
return RedirectResponse("/#/oauth")

@router.post("/callback/{provider_name}")
Expand All @@ -117,7 +117,7 @@ async def callback_post(provider_name: str, body: _PlaidCallbackBody) -> Redirec
annotations=provider.config.access_secret.annotations or None,
fields=ACCESS_TOKEN_FIELDS,
)
logger.info(f"Stored Plaid tokens for {provider_name}")
logger.info("Stored Plaid tokens for %s", provider_name)
return RedirectResponse("/#/oauth", status_code=303)

return router
16 changes: 8 additions & 8 deletions devinfra/claude/auth_proxy/proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def __init__(self, listen_port: int, max_workers: int = 100):
self._upstream_url: str | None = None
self._creds_lock = threading.Lock()
self.server_socket: socket.socket | None = None
self._running = False
self.running = False
self._thread: threading.Thread | None = None
self._executor: ThreadPoolExecutor | None = None
self._connections: list[socket.socket] = []
Expand Down Expand Up @@ -118,15 +118,15 @@ def start(self) -> None:
self.server_socket.settimeout(0.5)

self._executor = ThreadPoolExecutor(max_workers=self.max_workers, thread_name_prefix="proxy")
self._running = True
self.running = True
self._thread = threading.Thread(target=self._serve, daemon=True)
self._thread.start()

logger.info("Auth proxy started on 127.0.0.1:%d (max_workers: %d)", self.listen_port, self.max_workers)

def stop(self) -> None:
"""Stop the proxy server."""
self._running = False
self.running = False
if self._thread:
self._thread.join(timeout=2)
if self._executor:
Expand All @@ -140,7 +140,7 @@ def stop(self) -> None:

def _serve(self) -> None:
"""Main server loop."""
while self._running:
while self.running:
try:
client_sock, _ = self.server_socket.accept() # type: ignore[union-attr]
self._connections.append(client_sock)
Expand Down Expand Up @@ -309,7 +309,7 @@ def __init__(self, sock_path: Path, remote_target: str, max_workers: int = 100):
self._upstream_url: str | None = None
self._creds_lock = threading.Lock()
self.server_socket: socket.socket | None = None
self._running = False
self.running = False
self._thread: threading.Thread | None = None
self._executor: ThreadPoolExecutor | None = None
self._connections: list[socket.socket] = []
Expand Down Expand Up @@ -341,15 +341,15 @@ def start(self) -> None:
self.server_socket.settimeout(0.5)

self._executor = ThreadPoolExecutor(max_workers=self.max_workers, thread_name_prefix="uds-proxy")
self._running = True
self.running = True
self._thread = threading.Thread(target=self._serve, daemon=True)
self._thread.start()

logger.info("UDS remote proxy started on %s → %s", self.sock_path, self.remote_target)

def stop(self) -> None:
"""Stop the UDS proxy server."""
self._running = False
self.running = False
if self._thread:
self._thread.join(timeout=2)
if self._executor:
Expand All @@ -365,7 +365,7 @@ def stop(self) -> None:

def _serve(self) -> None:
"""Main server loop."""
while self._running:
while self.running:
try:
client_sock, _ = self.server_socket.accept() # type: ignore[union-attr]
self._connections.append(client_sock)
Expand Down
2 changes: 1 addition & 1 deletion devinfra/claude/auth_proxy/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ async def setup_auth_proxy(
# Create combined CA bundle (for tools like uv that use SSL_CERT_FILE)
_create_combined_ca_bundle(paths)

status = (f"running (port {port})" if proxy._running else "configured") if proxy is not None else "uds-only"
status = (f"running (port {port})" if proxy.running else "configured") if proxy is not None else "uds-only"
ca_status = "custom CA" if combined_ca.exists() else "system"

logger.info("Auth proxy setup complete")
Expand Down
2 changes: 1 addition & 1 deletion editor_agent/host/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
MODEL_OPT = typer.Option(DEFAULT_MODEL, "--model", help="Model name (OPENAI_MODEL)")
NETWORK_OPT = typer.Option(_ENV_NETWORK, "--network", help="Docker network (ADGN_EDITOR_DOCKER_NETWORK)")
MAX_TURNS_OPT = typer.Option(40, "--max-turns", help="Maximum agent turns before abort")
VERBOSE_OPT = typer.Option(False, "--verbose", "-v", help="Show agent actions in real-time")
VERBOSE_OPT = typer.Option(default=False, help="Show agent actions in real-time")


@app.callback(invoke_without_command=True)
Expand Down
12 changes: 6 additions & 6 deletions git_commit_ai/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,12 +108,12 @@ async def commit(
timeout_secs: int | None = typer.Option(
None, "--timeout-secs", help="Maximum seconds for the AI request; 0 disables timeout"
),
stage_all: bool = typer.Option(False, "-a", "--all", help="Stage all tracked changes"),
no_verify: bool = typer.Option(False, "--no-verify", help="Skip pre-commit hooks"),
amend: bool = typer.Option(False, "--amend", help="Amend previous commit"),
accept_ai: bool = typer.Option(False, "--accept-ai", help="Commit with AI message, skip editor"),
verbose: bool = typer.Option(False, "-v", help="Verbose git commit"),
debug: bool = typer.Option(False, "--debug", help="Show logger output"),
stage_all: bool = typer.Option(default=False, help="Stage all tracked changes"),
no_verify: bool = typer.Option(default=False, help="Skip pre-commit hooks"),
amend: bool = typer.Option(default=False, help="Amend previous commit"),
accept_ai: bool = typer.Option(default=False, help="Commit with AI message, skip editor"),
verbose: bool = typer.Option(default=False, help="Verbose git commit"),
debug: bool = typer.Option(default=False, help="Show logger output"),
):
"""Run the git-commit-ai process."""
repo = pygit2.Repository(get_build_workspace_directory())
Expand Down
2 changes: 1 addition & 1 deletion homeassistant/proxy/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ class Settings(BaseModel):

@classmethod
def from_file(cls, path: Path) -> "Settings":
logger.info(f"Loading settings from {path.absolute()}")
logger.info("Loading settings from %s", path.absolute())
with path.open() as f:
data = yaml.safe_load(f)
if not isinstance(data, dict):
Expand Down
8 changes: 4 additions & 4 deletions homeassistant/proxy/policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,11 @@ async def _connection_loop(self) -> None:
self._connected.clear()
return
except (CannotConnect, ConnectionFailed, NotConnected, OSError) as exc:
logger.warning(f"HA connection lost: {exc}. Reconnecting in {backoff:.1f}s")
logger.warning("HA connection lost: %s. Reconnecting in %.1fs", exc, backoff)
except asyncio.CancelledError:
raise
except Exception:
logger.exception(f"Unexpected error in HA connection loop. Reconnecting in {backoff:.1f}s")
logger.exception("Unexpected error in HA connection loop. Reconnecting in %.1fs", backoff)
finally:
self._connected.clear()
if self._client is not None:
Expand All @@ -96,7 +96,7 @@ async def _ensure_entities(self) -> dict[str, EntityInfo]:
self._entities_time = now
except (ConnectionError, NotConnected, CannotConnect, ConnectionFailed) as exc:
if self._entities is not None:
logger.warning(f"Registry refresh failed ({exc}), serving stale cache")
logger.warning("Registry refresh failed (%s), serving stale cache", exc)
else:
raise
return self._entities
Expand Down Expand Up @@ -126,7 +126,7 @@ async def _fetch_registry(self) -> dict[str, EntityInfo]:
area_id = device_area.get(device_id)
registry[entity_id] = EntityInfo(entity_id=entity_id, device_id=device_id, area_id=area_id)

logger.info(f"Fetched registry: {len(registry)} entities")
logger.info("Fetched registry: %d entities", len(registry))
return registry

def _get_entity(self, entities: dict[str, EntityInfo], entity_id: str) -> EntityInfo:
Expand Down
4 changes: 2 additions & 2 deletions inop/engine/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,10 +525,10 @@ def main() -> None:
seed_tasks = [t for t in all_tasks if t.type == task_type_enum.value]

if not seed_tasks:
logger.error(f"No tasks found with type '{task_type_enum.value}' in {seeds_path}")
logger.error("No tasks found with type '%s' in %s", task_type_enum.value, seeds_path)
sys.exit(1)

logger.info(f"Loaded {len(seed_tasks)} {task_type_enum.value} tasks from {len(all_tasks)} total tasks")
logger.info("Loaded %d %s tasks from %d total tasks", len(seed_tasks), task_type_enum.value, len(all_tasks))

# Load grading criteria from YAML
logger.info("Loading grading criteria")
Expand Down
8 changes: 5 additions & 3 deletions inop/runners/containerized_claude.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,8 @@ async def _run_setup_script(self, script_path: str, script_type: str, log_prefix
cmd_args = [str(setup_script), c.id, self.task_id, str(self._output_dir)]
script_stat = await asyncio.to_thread(setup_script.stat)
self._logger.info(
f"Running {script_type.lower()} script",
"%s script running",
script_type.lower(),
script=str(setup_script),
container_id=c.id,
task_id=self.task_id,
Expand Down Expand Up @@ -445,13 +446,14 @@ async def _run_setup_script(self, script_path: str, script_type: str, log_prefix

if exit_code != 0:
self._logger.error(
f"{script_type} script failed - CONTAINER LEFT RUNNING FOR DEBUG",
"%s script failed - CONTAINER LEFT RUNNING FOR DEBUG",
script_type,
container_id=c.id,
exit_code=exit_code,
debug_hint=f"Run: docker logs {c.id}",
)
raise RuntimeError(f"{script_type} script failed with exit code {exit_code}")
self._logger.info(f"{script_type} script completed successfully", container_id=c.id)
self._logger.info("%s script completed successfully", script_type, container_id=c.id)

async def _run_pre_task_always_setup(self):
"""Run always pre-task setup script (runs before every task)."""
Expand Down
5 changes: 3 additions & 2 deletions inventree_utils/beautifier/upload_lcsc_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,16 @@ def upload_lcsc_images(api: InvenTreeAPI):
# Gather LCSC from single supplier
sp_lcsc = [sp for sp in all_supplier_parts if sp.part == p.pk and sp.supplier == lcsc.pk]
if len(sp_lcsc) != 1:
log.info(f"Skip, {len(sp_lcsc)} LCSC SupplierParts.")
log.info("Skip, %s LCSC SupplierParts.", len(sp_lcsc))
continue
lcsc_from_supplier = sp_lcsc[0].SKU

# Decide if we have an LCSC ID
if lcsc_from_link and lcsc_from_supplier:
# If both are present, assert they match
if lcsc_from_link != lcsc_from_supplier:
raise ValueError(f"Conflicting LCSC IDs: {lcsc_from_link=} != {lcsc_from_supplier=}", log._context)
msg = f"Conflicting LCSC IDs: {lcsc_from_link=} != {lcsc_from_supplier=}"
raise ValueError(msg, log._context)
# Both match => use either one
lcsc_id = lcsc_from_link
elif lcsc_from_link or lcsc_from_supplier:
Expand Down
2 changes: 1 addition & 1 deletion llm/claude_code_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ class BaseResponse(CamelCaseModel):
"""

# continue_ needs explicit alias since to_camel("continue_") -> "continue_" not "continue"
continue_: bool = Field(True, alias="continue")
continue_: bool = Field(default=True, alias="continue")
stop_reason: str | None = Field(None, description="Message shown to USER when continue is false")
suppress_output: bool | None = None

Expand Down
2 changes: 1 addition & 1 deletion llm/claude_hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def entrypoint(cls) -> None:

except Exception:
# Log the exception
logger.error("Hook execution failed", exc_info=True)
logger.exception("Hook execution failed")
raise

_emit_and_exit(response)
Expand Down
14 changes: 7 additions & 7 deletions llm/html/llm_html/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def load_page_titles():
else:
raise ValueError(f"Missing required 'title' in frontmatter for {page}.md")
except Exception:
logger.exception(f"Error loading title for {page}.md")
logger.exception("Error loading title for %s.md", page)
raise


Expand All @@ -116,9 +116,9 @@ def handle_page_rendering_error(error: Exception, page_name: str = "page") -> No
HTTPException: Always raises with appropriate status code
"""
if isinstance(error, FileNotFoundError):
logger.error(f"{page_name} not found")
logger.error("%s not found", page_name)
raise HTTPException(status_code=404, detail="Document not found")
logger.error(f"Error rendering {page_name}: {error}")
logger.error("Error rendering %s: %s", page_name, error)
raise HTTPException(status_code=500, detail="Internal server error")


Expand Down Expand Up @@ -216,7 +216,7 @@ async def analyze_page_tokens(
tokens = count_tokens_for_models(final_markdown)
return {"page": page_id, "title": title, "url": url, **tokens}
except Exception:
logger.exception(f"Error analyzing {page_id} page")
logger.exception("Error analyzing %s page", page_id)
return None


Expand Down Expand Up @@ -300,11 +300,11 @@ async def verify_token(request: Request, token: str = ""):

ts.verify_token(token)
result = {"status": "success", "message": "Token is valid ✅"}
logger.info(f"Token verification succeeded for: {token[:20]}...")
logger.info("Token verification succeeded for: %s...", token[:20])
except VerificationError as exc:
result = {"status": "failed", "errors": exc.issues}
issues_str = " | ".join(f"✗ {issue}" for issue in exc.issues)
logger.exception(f"Token verification FAILED: {issues_str}")
logger.exception("Token verification FAILED: %s", issues_str)
except FileNotFoundError:
logger.exception("index.md not found for token verification")
result = {"status": "failed", "errors": ["Source document not found"]}
Expand All @@ -323,7 +323,7 @@ def main():
host = os.environ.get("HOST", "0.0.0.0")
port = int(os.environ.get("PORT", "9000"))

logger.info(f"Starting FastAPI server on http://{host}:{port}")
logger.info("Starting FastAPI server on http://%s:%s", host, port)
uvicorn.run(app, host=host, port=port, log_config=None) # None to use our logging config


Expand Down
10 changes: 5 additions & 5 deletions llm/mcp/habitify/api_reference/collect_references.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,13 @@ def _make_request_and_save(
Raises:
SystemExit: If expected_status is specified and doesn't match actual status
"""
logger.info(f"Making request: {name} ({method} {endpoint})")
logger.info("Making request: %s (%s %s)", name, method, endpoint)

response = self.client.request(method=method, url=endpoint, params=params, json=json_data)

# If expected status is provided, validate it
if response.status_code != expected_status:
logger.error(f"Expected status {expected_status} but got {response.status_code}")
logger.error("Expected status %s but got %s", expected_status, response.status_code)
sys.exit(1)

# Create reference data structure
Expand All @@ -187,12 +187,12 @@ def _make_request_and_save(
# Save to file in YAML format
path = REFERENCE_DIR / f"{name.lower().replace(' ', '_')}.yaml"
if path.exists():
logger.warning(f"Overwriting existing file: {path}")
logger.warning("Overwriting existing file: %s", path)

with path.open("w") as f:
yaml.dump(reference, f, sort_keys=False, indent=2, default_flow_style=False)

logger.info(f"Saved reference example to {path}")
logger.info("Saved reference example to %s", path)

return response.json()

Expand Down Expand Up @@ -224,7 +224,7 @@ def collect_references(self) -> None:
# Use the first habit for further API calls
habit = habits[0]
habit_id = habit["id"]
logger.info(f"Using habit with ID: {habit_id} and masked name: {self._mask_name(habit['name'])}")
logger.info("Using habit with ID: %s and masked name: %s", habit_id, self._mask_name(habit["name"]))

# Get details for a specific habit by ID
self._make_request_and_save(
Expand Down
Loading
Loading