diff --git a/.github/scripts/update_pr_description.sh b/.github/scripts/update_pr_description.sh
index 4457b7495564..fd8b640c74e6 100755
--- a/.github/scripts/update_pr_description.sh
+++ b/.github/scripts/update_pr_description.sh
@@ -17,9 +17,6 @@ DOCKER_RUN_COMMAND="docker run -it --rm \
--name openhands-app-${SHORT_SHA} \
docker.openhands.dev/openhands/openhands:${SHORT_SHA}"
-# Define the uvx command
-UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/OpenHands/OpenHands@${BRANCH_NAME}#subdirectory=openhands-cli openhands"
-
# Get the current PR body
PR_BODY=$(gh pr view "$PR_NUMBER" --json body --jq .body)
@@ -37,11 +34,6 @@ GUI with Docker:
\`\`\`
${DOCKER_RUN_COMMAND}
\`\`\`
-
-CLI with uvx:
-\`\`\`
-${UVX_RUN_COMMAND}
-\`\`\`
EOF
)
else
@@ -57,11 +49,6 @@ GUI with Docker:
\`\`\`
${DOCKER_RUN_COMMAND}
\`\`\`
-
-CLI with uvx:
-\`\`\`
-${UVX_RUN_COMMAND}
-\`\`\`
EOF
)
fi
diff --git a/.github/workflows/clean-up.yml b/.github/workflows/clean-up.yml
deleted file mode 100644
index b7587746cca3..000000000000
--- a/.github/workflows/clean-up.yml
+++ /dev/null
@@ -1,69 +0,0 @@
-# Workflow that cleans up outdated and old workflows to prevent out of disk issues
-name: Delete old workflow runs
-
-# This workflow is currently only triggered manually
-on:
- workflow_dispatch:
- inputs:
- days:
- description: 'Days-worth of runs to keep for each workflow'
- required: true
- default: '30'
- minimum_runs:
- description: 'Minimum runs to keep for each workflow'
- required: true
- default: '10'
- delete_workflow_pattern:
- description: 'Name or filename of the workflow (if not set, all workflows are targeted)'
- required: false
- delete_workflow_by_state_pattern:
- description: 'Filter workflows by state: active, deleted, disabled_fork, disabled_inactivity, disabled_manually'
- required: true
- default: "ALL"
- type: choice
- options:
- - "ALL"
- - active
- - deleted
- - disabled_inactivity
- - disabled_manually
- delete_run_by_conclusion_pattern:
- description: 'Remove runs based on conclusion: action_required, cancelled, failure, skipped, success'
- required: true
- default: 'ALL'
- type: choice
- options:
- - 'ALL'
- - 'Unsuccessful: action_required,cancelled,failure,skipped'
- - action_required
- - cancelled
- - failure
- - skipped
- - success
- dry_run:
- description: 'Logs simulated changes, no deletions are performed'
- required: false
-
-jobs:
- del_runs:
- runs-on: blacksmith-4vcpu-ubuntu-2204
- permissions:
- actions: write
- contents: read
- steps:
- - name: Delete workflow runs
- uses: Mattraks/delete-workflow-runs@v2
- with:
- token: ${{ github.token }}
- repository: ${{ github.repository }}
- retain_days: ${{ github.event.inputs.days }}
- keep_minimum_runs: ${{ github.event.inputs.minimum_runs }}
- delete_workflow_pattern: ${{ github.event.inputs.delete_workflow_pattern }}
- delete_workflow_by_state_pattern: ${{ github.event.inputs.delete_workflow_by_state_pattern }}
- delete_run_by_conclusion_pattern: >-
- ${{
- startsWith(github.event.inputs.delete_run_by_conclusion_pattern, 'Unsuccessful:')
- && 'action_required,cancelled,failure,skipped'
- || github.event.inputs.delete_run_by_conclusion_pattern
- }}
- dry_run: ${{ github.event.inputs.dry_run }}
diff --git a/.github/workflows/cli-build-binary-and-optionally-release.yml b/.github/workflows/cli-build-binary-and-optionally-release.yml
deleted file mode 100644
index 0aefcd382075..000000000000
--- a/.github/workflows/cli-build-binary-and-optionally-release.yml
+++ /dev/null
@@ -1,122 +0,0 @@
-# Workflow that builds and tests the CLI binary executable
-name: CLI - Build binary and optionally release
-
-# Run on pushes to main branch and CLI tags, and on pull requests when CLI files change
-on:
- push:
- branches:
- - main
- tags:
- - "*-cli"
- pull_request:
- paths:
- - "openhands-cli/**"
-
-permissions:
- contents: write # needed to create releases or upload assets
-
-# Cancel previous runs if a new commit is pushed
-concurrency:
- group: ${{ github.workflow }}-${{ (github.head_ref && github.ref) || github.run_id }}
- cancel-in-progress: true
-
-jobs:
- build-binary:
- name: Build binary executable
- strategy:
- matrix:
- include:
- # Build on Ubuntu 22.04 for maximum GLIBC compatibility (GLIBC 2.31)
- - os: ubuntu-22.04
- platform: linux
- artifact_name: openhands-cli-linux
- # Build on macOS for macOS users
- - os: macos-15
- platform: macos
- artifact_name: openhands-cli-macos
- runs-on: ${{ matrix.os }}
-
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: 3.12
-
- - name: Install uv
- uses: astral-sh/setup-uv@v3
- with:
- version: "latest"
-
- - name: Install dependencies
- working-directory: openhands-cli
- run: |
- uv sync
-
- - name: Build binary executable
- working-directory: openhands-cli
- run: |
- ./build.sh --install-pyinstaller | tee output.log
- echo "Full output:"
- cat output.log
-
- if grep -q "❌" output.log; then
- echo "❌ Found failure marker in output"
- exit 1
- fi
-
- echo "✅ Build & test finished without ❌ markers"
-
- - name: Verify binary files exist
- run: |
- if ! ls openhands-cli/dist/openhands* 1> /dev/null 2>&1; then
- echo "❌ No binaries found to upload!"
- exit 1
- fi
- echo "✅ Found binaries to upload."
-
- - name: Upload binary artifact
- uses: actions/upload-artifact@v4
- with:
- name: ${{ matrix.artifact_name }}
- path: openhands-cli/dist/openhands*
- retention-days: 30
-
- create-github-release:
- name: Create GitHub Release
- runs-on: ubuntu-latest
- needs: build-binary
- if: startsWith(github.ref, 'refs/tags/')
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Download all artifacts
- uses: actions/download-artifact@v4
- with:
- path: artifacts
-
- - name: Prepare release assets
- run: |
- mkdir -p release-assets
- # Copy binaries with appropriate names for release
- if [ -f artifacts/openhands-cli-linux/openhands ]; then
- cp artifacts/openhands-cli-linux/openhands release-assets/openhands-linux
- fi
- if [ -f artifacts/openhands-cli-macos/openhands ]; then
- cp artifacts/openhands-cli-macos/openhands release-assets/openhands-macos
- fi
- ls -la release-assets/
-
- - name: Create GitHub Release
- uses: softprops/action-gh-release@v2
- with:
- files: release-assets/*
- draft: true
- prerelease: false
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/dispatch-to-docs.yml b/.github/workflows/dispatch-to-docs.yml
deleted file mode 100644
index 301cab5fa58c..000000000000
--- a/.github/workflows/dispatch-to-docs.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-name: Dispatch to docs repo
-
-on:
- push:
- branches: [main]
- paths:
- - 'docs/**'
- workflow_dispatch:
-
-jobs:
- dispatch:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- repo: ["OpenHands/docs"]
- steps:
- - name: Push to docs repo
- uses: peter-evans/repository-dispatch@v3
- with:
- token: ${{ secrets.ALLHANDS_BOT_GITHUB_PAT }}
- repository: ${{ matrix.repo }}
- event-type: update
- client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "module": "openhands", "branch": "main"}'
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 89cb645f5fac..4c882bda07d4 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -72,21 +72,3 @@ jobs:
- name: Run pre-commit hooks
working-directory: ./enterprise
run: pre-commit run --all-files --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml
-
- lint-cli-python:
- name: Lint CLI python
- runs-on: blacksmith-4vcpu-ubuntu-2204
- steps:
- - uses: actions/checkout@v4
- with:
- fetch-depth: 0
- - name: Set up python
- uses: useblacksmith/setup-python@v6
- with:
- python-version: 3.12
- cache: "pip"
- - name: Install pre-commit
- run: pip install pre-commit==4.2.0
- - name: Run pre-commit hooks
- working-directory: ./openhands-cli
- run: pre-commit run --all-files --config ./dev_config/python/.pre-commit-config.yaml
diff --git a/.github/workflows/mdx-lint.yml b/.github/workflows/mdx-lint.yml
deleted file mode 100644
index 34cd0b6a182c..000000000000
--- a/.github/workflows/mdx-lint.yml
+++ /dev/null
@@ -1,70 +0,0 @@
-# Workflow that checks MDX format in docs/ folder
-name: MDX Lint
-
-# Run on pushes to main and on pull requests that modify docs/ files
-on:
- push:
- branches:
- - main
- paths:
- - 'docs/**/*.mdx'
- pull_request:
- paths:
- - 'docs/**/*.mdx'
-
-# If triggered by a PR, it will be in the same group. However, each commit on main will be in its own unique group
-concurrency:
- group: ${{ github.workflow }}-${{ (github.head_ref && github.ref) || github.run_id }}
- cancel-in-progress: true
-
-jobs:
- mdx-lint:
- name: Lint MDX files
- runs-on: blacksmith-4vcpu-ubuntu-2204
- steps:
- - uses: actions/checkout@v4
-
- - name: Install Node.js 22
- uses: useblacksmith/setup-node@v5
- with:
- node-version: 22
-
- - name: Install MDX dependencies
- run: |
- npm install @mdx-js/mdx@3 glob@10
-
- - name: Validate MDX files
- run: |
- node -e "
- const {compile} = require('@mdx-js/mdx');
- const fs = require('fs');
- const path = require('path');
- const glob = require('glob');
-
- async function validateMDXFiles() {
- const files = glob.sync('docs/**/*.mdx');
- console.log('Found', files.length, 'MDX files to validate');
-
- let hasErrors = false;
-
- for (const file of files) {
- try {
- const content = fs.readFileSync(file, 'utf8');
- await compile(content);
- console.log('✅ MDX parsing successful for', file);
- } catch (err) {
- console.error('❌ MDX parsing failed for', file, ':', err.message);
- hasErrors = true;
- }
- }
-
- if (hasErrors) {
- console.error('\\n❌ Some MDX files have parsing errors. Please fix them before merging.');
- process.exit(1);
- } else {
- console.log('\\n✅ All MDX files are valid!');
- }
- }
-
- validateMDXFiles();
- "
diff --git a/.github/workflows/py-tests.yml b/.github/workflows/py-tests.yml
index 4506f1ea75e7..5c4c35f6bcb8 100644
--- a/.github/workflows/py-tests.yml
+++ b/.github/workflows/py-tests.yml
@@ -101,56 +101,11 @@ jobs:
path: ".coverage.enterprise.${{ matrix.python_version }}"
include-hidden-files: true
- # Run CLI unit tests
- test-cli-python:
- name: CLI Unit Tests
- runs-on: blacksmith-4vcpu-ubuntu-2404
- strategy:
- matrix:
- python-version: ["3.12"]
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
- with:
- fetch-depth: 0
-
- - name: Set up Python
- uses: useblacksmith/setup-python@v6
- with:
- python-version: ${{ matrix.python-version }}
-
- - name: Install uv
- uses: astral-sh/setup-uv@v3
- with:
- version: "latest"
-
- - name: Install dependencies
- working-directory: ./openhands-cli
- run: |
- uv sync --group dev
-
- - name: Run CLI unit tests
- working-directory: ./openhands-cli
- env:
- # write coverage to repo root so the merge step finds it
- COVERAGE_FILE: "${{ github.workspace }}/.coverage.openhands-cli.${{ matrix.python-version }}"
- run: |
- uv run pytest --forked -n auto -s \
- -p no:ddtrace -p no:ddtrace.pytest_bdd -p no:ddtrace.pytest_benchmark \
- tests --cov=openhands_cli --cov-branch
-
- - name: Store coverage file
- uses: actions/upload-artifact@v4
- with:
- name: coverage-openhands-cli
- path: ".coverage.openhands-cli.${{ matrix.python-version }}"
- include-hidden-files: true
-
coverage-comment:
name: Coverage Comment
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
- needs: [test-on-linux, test-enterprise, test-cli-python]
+ needs: [test-on-linux, test-enterprise]
permissions:
pull-requests: write
@@ -164,9 +119,6 @@ jobs:
pattern: coverage-*
merge-multiple: true
- - name: Create symlink for CLI source files
- run: ln -sf openhands-cli/openhands_cli openhands_cli
-
- name: Coverage comment
id: coverage_comment
uses: py-cov-action/python-coverage-comment-action@v3
diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml
index 89a64aa58a89..f4df10567f49 100644
--- a/.github/workflows/pypi-release.yml
+++ b/.github/workflows/pypi-release.yml
@@ -10,7 +10,6 @@ on:
type: choice
options:
- app server
- - cli
default: app server
push:
tags:
@@ -39,36 +38,3 @@ jobs:
run: ./build.sh
- name: publish
run: poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }}
-
- release-cli:
- name: Publish CLI to PyPI
- runs-on: ubuntu-latest
- # Run when manually dispatched for "cli" OR for tag pushes that contain '-cli'
- if: |
- (github.event_name == 'workflow_dispatch' && github.event.inputs.reason == 'cli')
- || (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && contains(github.ref, '-cli'))
- steps:
- - name: Checkout repository
- uses: actions/checkout@v4
-
- - name: Set up Python
- uses: actions/setup-python@v5
- with:
- python-version: 3.12
-
- - name: Install uv
- uses: astral-sh/setup-uv@v3
- with:
- version: "latest"
-
- - name: Build CLI package
- working-directory: openhands-cli
- run: |
- # Clean dist directory to avoid conflicts with binary builds
- rm -rf dist/
- uv build
-
- - name: Publish CLI to PyPI
- working-directory: openhands-cli
- run: |
- uv publish --token ${{ secrets.PYPI_TOKEN_OPENHANDS }}
diff --git a/.github/workflows/run-eval.yml b/.github/workflows/run-eval.yml
deleted file mode 100644
index d586a0b0a62c..000000000000
--- a/.github/workflows/run-eval.yml
+++ /dev/null
@@ -1,135 +0,0 @@
-# Run evaluation on a PR, after releases, or manually
-name: Run Eval
-
-# Runs when a PR is labeled with one of the "run-eval-" labels, after releases, or manually triggered
-on:
- pull_request:
- types: [labeled]
- release:
- types: [published]
- workflow_dispatch:
- inputs:
- branch:
- description: 'Branch to evaluate'
- required: true
- default: 'main'
- eval_instances:
- description: 'Number of evaluation instances'
- required: true
- default: '50'
- type: choice
- options:
- - '1'
- - '2'
- - '50'
- - '100'
- reason:
- description: 'Reason for manual trigger'
- required: false
- default: ''
-
-env:
- # Environment variable for the master GitHub issue number where all evaluation results will be commented
- # This should be set to the issue number where you want all evaluation results to be posted
- MASTER_EVAL_ISSUE_NUMBER: ${{ vars.MASTER_EVAL_ISSUE_NUMBER || '0' }}
-
-jobs:
- trigger-job:
- name: Trigger remote eval job
- if: ${{ (github.event_name == 'pull_request' && (github.event.label.name == 'run-eval-1' || github.event.label.name == 'run-eval-2' || github.event.label.name == 'run-eval-50' || github.event.label.name == 'run-eval-100')) || github.event_name == 'release' || github.event_name == 'workflow_dispatch' }}
- runs-on: blacksmith-4vcpu-ubuntu-2204
-
- steps:
- - name: Checkout branch
- uses: actions/checkout@v4
- with:
- ref: ${{ github.event_name == 'pull_request' && github.head_ref || (github.event_name == 'workflow_dispatch' && github.event.inputs.branch) || github.ref }}
-
- - name: Set evaluation parameters
- id: eval_params
- run: |
- REPO_URL="https://github.com/${{ github.repository }}"
- echo "Repository URL: $REPO_URL"
-
- # Determine branch based on trigger type
- if [[ "${{ github.event_name }}" == "pull_request" ]]; then
- EVAL_BRANCH="${{ github.head_ref }}"
- echo "PR Branch: $EVAL_BRANCH"
- elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
- EVAL_BRANCH="${{ github.event.inputs.branch }}"
- echo "Manual Branch: $EVAL_BRANCH"
- else
- # For release events, use the tag name or main branch
- EVAL_BRANCH="${{ github.ref_name }}"
- echo "Release Branch/Tag: $EVAL_BRANCH"
- fi
-
- # Determine evaluation instances based on trigger type
- if [[ "${{ github.event_name }}" == "pull_request" ]]; then
- if [[ "${{ github.event.label.name }}" == "run-eval-1" ]]; then
- EVAL_INSTANCES="1"
- elif [[ "${{ github.event.label.name }}" == "run-eval-2" ]]; then
- EVAL_INSTANCES="2"
- elif [[ "${{ github.event.label.name }}" == "run-eval-50" ]]; then
- EVAL_INSTANCES="50"
- elif [[ "${{ github.event.label.name }}" == "run-eval-100" ]]; then
- EVAL_INSTANCES="100"
- fi
- elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
- EVAL_INSTANCES="${{ github.event.inputs.eval_instances }}"
- else
- # For release events, default to 50 instances
- EVAL_INSTANCES="50"
- fi
-
- echo "Evaluation instances: $EVAL_INSTANCES"
- echo "repo_url=$REPO_URL" >> $GITHUB_OUTPUT
- echo "eval_branch=$EVAL_BRANCH" >> $GITHUB_OUTPUT
- echo "eval_instances=$EVAL_INSTANCES" >> $GITHUB_OUTPUT
-
- - name: Trigger remote job
- run: |
- # Determine PR number for the remote evaluation system
- if [[ "${{ github.event_name }}" == "pull_request" ]]; then
- PR_NUMBER="${{ github.event.pull_request.number }}"
- else
- # For non-PR triggers, use the master issue number as PR number
- PR_NUMBER="${{ env.MASTER_EVAL_ISSUE_NUMBER }}"
- fi
-
- curl -X POST \
- -H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
- -H "Accept: application/vnd.github+json" \
- -d "{\"ref\": \"main\", \"inputs\": {\"github-repo\": \"${{ steps.eval_params.outputs.repo_url }}\", \"github-branch\": \"${{ steps.eval_params.outputs.eval_branch }}\", \"pr-number\": \"${PR_NUMBER}\", \"eval-instances\": \"${{ steps.eval_params.outputs.eval_instances }}\"}}" \
- https://api.github.com/repos/OpenHands/evaluation/actions/workflows/create-branch.yml/dispatches
-
- # Send Slack message
- if [[ "${{ github.event_name }}" == "pull_request" ]]; then
- TRIGGER_URL="https://github.com/${{ github.repository }}/pull/${{ github.event.pull_request.number }}"
- slack_text="PR $TRIGGER_URL has triggered evaluation on ${{ steps.eval_params.outputs.eval_instances }} instances..."
- elif [[ "${{ github.event_name }}" == "release" ]]; then
- TRIGGER_URL="https://github.com/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
- slack_text="Release $TRIGGER_URL has triggered evaluation on ${{ steps.eval_params.outputs.eval_instances }} instances..."
- else
- TRIGGER_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
- slack_text="Manual trigger (${{ github.event.inputs.reason || 'No reason provided' }}) has triggered evaluation on ${{ steps.eval_params.outputs.eval_instances }} instances for branch ${{ steps.eval_params.outputs.eval_branch }}..."
- fi
-
- curl -X POST -H 'Content-type: application/json' --data '{"text":"'"$slack_text"'"}' \
- https://hooks.slack.com/services/${{ secrets.SLACK_TOKEN }}
-
- - name: Comment on issue/PR
- uses: KeisukeYamashita/create-comment@v1
- with:
- # For PR triggers, comment on the PR. For other triggers, comment on the master issue
- number: ${{ github.event_name == 'pull_request' && github.event.pull_request.number || env.MASTER_EVAL_ISSUE_NUMBER }}
- unique: false
- comment: |
- **Evaluation Triggered**
-
- **Trigger:** ${{ github.event_name == 'pull_request' && format('Pull Request #{0}', github.event.pull_request.number) || (github.event_name == 'release' && 'Release') || format('Manual Trigger: {0}', github.event.inputs.reason || 'No reason provided') }}
- **Branch:** ${{ steps.eval_params.outputs.eval_branch }}
- **Instances:** ${{ steps.eval_params.outputs.eval_instances }}
- **Commit:** ${{ github.sha }}
-
- Running evaluation on the specified branch. Once eval is done, the results will be posted here.
diff --git a/COMMUNITY.md b/COMMUNITY.md
index 1c49b3932e62..4221346f964a 100644
--- a/COMMUNITY.md
+++ b/COMMUNITY.md
@@ -1,43 +1,45 @@
-# 🙌 The OpenHands Community
+# The OpenHands Community
-The OpenHands community is built around the belief that (1) AI and AI agents are going to fundamentally change the way
-we build software, and (2) if this is true, we should do everything we can to make sure that the benefits provided by
-such powerful technology are accessible to everyone.
+OpenHands is a community of engineers, academics, and enthusiasts reimagining software development for an AI-powered world.
-If this resonates with you, we'd love to have you join us in our quest!
+## Mission
-## 🤝 How to Join
+It’s very clear that AI is changing software development. We want the developer community to drive that change organically, through open source.
-Check out our [How to Join the Community section.](https://github.com/OpenHands/OpenHands?tab=readme-ov-file#-how-to-join-the-community)
+So we’re not just building friendly interfaces for AI-driven development. We’re publishing _building blocks_ that empower developers to create new experiences, tailored to your own habits, needs, and imagination.
-## 💪 Becoming a Contributor
+## Ethos
-We welcome contributions from everyone! Whether you're a developer, a researcher, or simply enthusiastic about advancing
-the field of software engineering with AI, there are many ways to get involved:
+We have two core values: **high openness** and **high agency**. While we don’t expect everyone in the community to embody these values, we want to establish them as norms.
-- **Code Contributions:** Help us develop new core functionality, improve our agents, improve the frontend and other
-interfaces, or anything else that would help make OpenHands better.
-- **Research and Evaluation:** Contribute to our understanding of LLMs in software engineering, participate in
-evaluating the models, or suggest improvements.
-- **Feedback and Testing:** Use the OpenHands toolset, report bugs, suggest features, or provide feedback on usability.
+### High Openness
-For details, please check [CONTRIBUTING.md](./CONTRIBUTING.md).
+We welcome anyone and everyone into our community by default. You don’t have to be a software developer to help us build. You don’t have to be pro-AI to help us learn.
-## Code of Conduct
+Our plans, our work, our successes, and our failures are all public record. We want the world to see not just the fruits of our work, but the whole process of growing it.
-We have a [Code of Conduct](./CODE_OF_CONDUCT.md) that we expect all contributors to adhere to.
-Long story short, we are aiming for an open, welcoming, diverse, inclusive, and healthy community.
-All contributors are expected to contribute to building this sort of community.
+We welcome thoughtful criticism, whether it’s a comment on a PR or feedback on the community as a whole.
-## 🛠️ Becoming a Maintainer
+### High Agency
-For contributors who have made significant and sustained contributions to the project, there is a possibility of joining
-the maintainer team. The process for this is as follows:
+Everyone should feel empowered to contribute to OpenHands. Whether it’s by making a PR, hosting an event, sharing feedback, or just asking a question, don’t hold back!
-1. Any contributor who has made sustained and high-quality contributions to the codebase can be nominated by any
-maintainer. If you feel that you may qualify you can reach out to any of the maintainers that have reviewed your PRs and ask if you can be nominated.
-2. Once a maintainer nominates a new maintainer, there will be a discussion period among the maintainers for at least 3 days.
-3. If no concerns are raised the nomination will be accepted by acclamation, and if concerns are raised there will be a discussion and possible vote.
+OpenHands gives everyone the building blocks to create state-of-the-art developer experiences. We experiment constantly and love building new things.
-Note that just making many PRs does not immediately imply that you will become a maintainer. We will be looking
-at sustained high-quality contributions over a period of time, as well as good teamwork and adherence to our [Code of Conduct](./CODE_OF_CONDUCT.md).
+Coding, development practices, and communities are changing rapidly. We won’t hesitate to change direction and make big bets.
+
+## Relationship to All Hands
+
+OpenHands is supported by the for-profit organization [All Hands AI, Inc](https://www.all-hands.dev/).
+
+All Hands was founded by three of the first major contributors to OpenHands:
+
+- Xingyao Wang, a UIUC PhD candidate who got OpenHands to the top of the SWE-bench leaderboards
+- Graham Neubig, a CMU Professor who rallied the academic community around OpenHands
+- Robert Brennan, a software engineer who architected the user-facing features of OpenHands
+
+All Hands is an important part of the OpenHands ecosystem. We’ve raised over $20M--mainly to hire developers and researchers who can work on OpenHands full-time, and to provide them with expensive infrastructure. ([Join us!](https://allhandsai.applytojob.com/apply/))
+
+But we see OpenHands as much larger, and ultimately more important, than All Hands. When our financial responsibility to investors is at odds with our social responsibility to the community—as it inevitably will be, from time to time—we promise to navigate that conflict thoughtfully and transparently.
+
+At some point, we may transfer custody of OpenHands to an open source foundation. But for now, the [Benevolent Dictator approach](http://www.catb.org/~esr/writings/cathedral-bazaar/homesteading/ar01s16.html) helps us move forward with speed and intention. If we ever forget the “benevolent” part, please: fork us.
diff --git a/Development.md b/Development.md
index 8b524be5116b..bfa057efc1c7 100644
--- a/Development.md
+++ b/Development.md
@@ -91,14 +91,14 @@ make run
#### Option B: Individual Server Startup
- **Start the Backend Server:** If you prefer, you can start the backend server independently to focus on
-backend-related tasks or configurations.
+ backend-related tasks or configurations.
```bash
make start-backend
```
- **Start the Frontend Server:** Similarly, you can start the frontend server on its own to work on frontend-related
-components or interface enhancements.
+ components or interface enhancements.
```bash
make start-frontend
```
@@ -110,6 +110,7 @@ You can use OpenHands to develop and improve OpenHands itself! This is a powerfu
#### Quick Start
1. **Build and run OpenHands:**
+
```bash
export INSTALL_DOCKER=0
export RUNTIME=local
@@ -117,6 +118,7 @@ You can use OpenHands to develop and improve OpenHands itself! This is a powerfu
```
2. **Access the interface:**
+
- Local development: http://localhost:3001
- Remote/cloud environments: Use the appropriate external URL
@@ -199,6 +201,6 @@ Here's a guide to the important documentation files in the repository:
- [/containers/README.md](./containers/README.md): Information about Docker containers and deployment
- [/tests/unit/README.md](./tests/unit/README.md): Guide to writing and running unit tests
- [/evaluation/README.md](./evaluation/README.md): Documentation for the evaluation framework and benchmarks
-- [/microagents/README.md](./microagents/README.md): Information about the microagents architecture and implementation
+- [/skills/README.md](./skills/README.md): Information about the skills architecture and implementation
- [/openhands/server/README.md](./openhands/server/README.md): Server implementation details and API documentation
- [/openhands/runtime/README.md](./openhands/runtime/README.md): Documentation for the runtime environment and execution model
diff --git a/README.md b/README.md
index cd47210ef1de..9fabb37a6ef1 100644
--- a/README.md
+++ b/README.md
@@ -1,22 +1,18 @@
-
-
OpenHands: Code Less, Make More
+
+
OpenHands: AI-Driven Development
-Welcome to OpenHands (formerly OpenDevin), a platform for software development agents powered by AI.
-
-OpenHands agents can do anything a human developer can: modify code, run commands, browse the web,
-call APIs, and yes—even copy code snippets from StackOverflow.
-
-Learn more at [docs.all-hands.dev](https://docs.all-hands.dev), or [sign up for OpenHands Cloud](https://app.all-hands.dev) to get started.
-
-
-> [!IMPORTANT]
-> **Upcoming change**: We are renaming our GitHub Org from `All-Hands-AI` to `OpenHands` on October 20th, 2025.
-> Check the [tracking issue](https://github.com/All-Hands-AI/OpenHands/issues/11376) for more information.
-
-
-> [!IMPORTANT]
-> Using OpenHands for work? We'd love to chat! Fill out
-> [this short form](https://docs.google.com/forms/d/e/1FAIpQLSet3VbGaz8z32gW9Wm-Grl4jpt5WgMXPgJ4EDPVmCETCBpJtQ/viewform)
-> to join our Design Partner program, where you'll get early access to commercial features and the opportunity to provide input on our product roadmap.
-
-## ☁️ OpenHands Cloud
-The easiest way to get started with OpenHands is on [OpenHands Cloud](https://app.all-hands.dev),
-which comes with $10 in free credits for new users.
-
-## 💻 Running OpenHands Locally
-
-### Option 1: CLI Launcher (Recommended)
-
-The easiest way to run OpenHands locally is using the CLI launcher with [uv](https://docs.astral.sh/uv/). This provides better isolation from your current project's virtual environment and is required for OpenHands' default MCP servers.
-
-**Install uv** (if you haven't already):
-
-See the [uv installation guide](https://docs.astral.sh/uv/getting-started/installation/) for the latest installation instructions for your platform.
-
-**Launch OpenHands**:
-```bash
-# Launch the GUI server
-uvx --python 3.12 openhands serve
-
-# Or launch the CLI
-uvx --python 3.12 openhands
-```
-
-You'll find OpenHands running at [http://localhost:3000](http://localhost:3000) (for GUI mode)!
-
-### Option 2: Docker
-
-
-Click to expand Docker command
-
-You can also run OpenHands directly with Docker:
-
-```bash
-docker pull docker.openhands.dev/openhands/runtime:0.62-nikolaik
-
-docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.openhands.dev/openhands/runtime:0.62-nikolaik \
- -e LOG_ALL_EVENTS=true \
- -v /var/run/docker.sock:/var/run/docker.sock \
- -v ~/.openhands:/.openhands \
- -p 3000:3000 \
- --add-host host.docker.internal:host-gateway \
- --name openhands-app \
- docker.openhands.dev/openhands/openhands:0.62
-```
-
-
-
-> **Note**: If you used OpenHands before version 0.44, you may want to run `mv ~/.openhands-state ~/.openhands` to migrate your conversation history to the new location.
-
-> [!WARNING]
-> On a public network? See our [Hardened Docker Installation Guide](https://docs.all-hands.dev/usage/runtimes/docker#hardened-docker-installation)
-> to secure your deployment by restricting network binding and implementing additional security measures.
-
-### Getting Started
-
-When you open the application, you'll be asked to choose an LLM provider and add an API key.
-[Anthropic's Claude Sonnet 4.5](https://www.anthropic.com/api) (`anthropic/claude-sonnet-4-5-20250929`)
-works best, but you have [many options](https://docs.all-hands.dev/usage/llms).
-
-See the [Running OpenHands](https://docs.all-hands.dev/usage/installation) guide for
-system requirements and more information.
-
-## 💡 Other ways to run OpenHands
-
-> [!WARNING]
-> OpenHands is meant to be run by a single user on their local workstation.
-> It is not appropriate for multi-tenant deployments where multiple users share the same instance. There is no built-in authentication, isolation, or scalability.
->
-> If you're interested in running OpenHands in a multi-tenant environment, check out the source-available, commercially-licensed
-> [OpenHands Cloud Helm Chart](https://github.com/openHands/OpenHands-cloud)
+
-You can [connect OpenHands to your local filesystem](https://docs.all-hands.dev/usage/runtimes/docker#connecting-to-your-filesystem),
-interact with it via a [friendly CLI](https://docs.all-hands.dev/usage/how-to/cli-mode),
-run OpenHands in a scriptable [headless mode](https://docs.all-hands.dev/usage/how-to/headless-mode),
-or run it on tagged issues with [a github action](https://docs.all-hands.dev/usage/how-to/github-action).
+🙌 Welcome to OpenHands, a [community](COMMUNITY.md) focused on AI-driven development. We’d love for you to [join us on Slack](https://dub.sh/openhands).
-Visit [Running OpenHands](https://docs.all-hands.dev/usage/installation) for more information and setup instructions.
+There are a few ways to work with OpenHands:
-If you want to modify the OpenHands source code, check out [Development.md](https://github.com/OpenHands/OpenHands/blob/main/Development.md).
+### OpenHands Software Agent SDK
+The SDK is a composable Python library that contains all of our agentic tech. It's the engine that powers everything else below.
-Having issues? The [Troubleshooting Guide](https://docs.all-hands.dev/usage/troubleshooting) can help.
+Define agents in code, then run them locally, or scale to 1000s of agents in the cloud.
-## 📖 Documentation
+[Check out the docs](https://docs.openhands.dev/sdk) or [view the source](https://github.com/OpenHands/software-agent-sdk/)
-To learn more about the project, and for tips on using OpenHands,
-check out our [documentation](https://docs.all-hands.dev/usage/getting-started).
+### OpenHands CLI
+The CLI is the easiest way to start using OpenHands. The experience will be familiar to anyone who has worked
+with e.g. Claude Code or Codex. You can power it with Claude, GPT, or any other LLM.
-There you'll find resources on how to use different LLM providers,
-troubleshooting resources, and advanced configuration options.
+[Check out the docs](https://docs.openhands.dev/openhands/usage/run-openhands/cli-mode) or [view the source](https://github.com/OpenHands/OpenHands-CLI)
-## 🤝 How to Join the Community
+### OpenHands Local GUI
+Use the Local GUI for running agents on your laptop. It comes with a REST API and a single-page React application.
+The experience will be familiar to anyone who has used Devin or Jules.
-OpenHands is a community-driven project, and we welcome contributions from everyone. We do most of our communication
-through Slack, so this is the best place to start, but we also are happy to have you contact us on Github:
+[Check out the docs](https://docs.openhands.dev/openhands/usage/run-openhands/local-setup) or view the source in this repo.
-- [Join our Slack workspace](https://all-hands.dev/joinslack) - Here we talk about research, architecture, and future development.
-- [Read or post Github Issues](https://github.com/OpenHands/OpenHands/issues) - Check out the issues we're working on, or add your own ideas.
+### OpenHands Cloud
+This is a deployment of OpenHands GUI, running on hosted infrastructure.
-See more about the community in [COMMUNITY.md](./COMMUNITY.md) or find details on contributing in [CONTRIBUTING.md](./CONTRIBUTING.md).
+You can try it with a free $10 credit by [signing in with your GitHub account](https://app.all-hands.dev).
-## 📈 Progress
+OpenHands Cloud comes with source-available features and integrations:
+- Integrations with Slack, Jira, and Linear
+- Multi-user support
+- RBAC and permissions
+- Collaboration features (e.g., conversation sharing)
-See the monthly OpenHands roadmap [here](https://github.com/orgs/OpenHands/projects/1) (updated at the maintainer's meeting at the end of each month).
+### OpenHands Enterprise
+Large enterprises can work with us to self-host OpenHands Cloud in their own VPC, via Kubernetes.
+OpenHands Enterprise can also work with the CLI and SDK above.
-
-
-
-
-
+OpenHands Enterprise is source-available--you can see all the source code here in the enterprise/ directory,
+but you'll need to purchase a license if you want to run it for more than one month.
-## 📜 License
+Enterprise contracts also come with extended support and access to our research team.
-Distributed under the MIT License, with the exception of the `enterprise/` folder. See [`LICENSE`](./LICENSE) for more information.
+Learn more at [openhands.dev/enterprise](https://openhands.dev/enterprise)
-## 🙏 Acknowledgements
+### Everything Else
-OpenHands is built by a large number of contributors, and every contribution is greatly appreciated! We also build upon other open source projects, and we are deeply thankful for their work.
+Check out our [Product Roadmap](https://github.com/orgs/openhands/projects/1), and feel free to
+[open up an issue](https://github.com/OpenHands/OpenHands/issues) if there's something you'd like to see!
-For a list of open source projects and licenses used in OpenHands, please see our [CREDITS.md](./CREDITS.md) file.
+You might also be interested in our [evaluation infrastructure](https://github.com/OpenHands/benchmarks), our [chrome extension](https://github.com/OpenHands/openhands-chrome-extension/), or our [Theory-of-Mind module](https://github.com/OpenHands/ToM-SWE).
-## 📚 Cite
+All our work is available under the MIT license, except for the `enterprise/` directory in this repository (see the [enterprise license](enterprise/LICENSE) for details).
+The core `openhands` and `agent-server` Docker images are fully MIT-licensed as well.
-```
-@inproceedings{
- wang2025openhands,
- title={OpenHands: An Open Platform for {AI} Software Developers as Generalist Agents},
- author={Xingyao Wang and Boxuan Li and Yufan Song and Frank F. Xu and Xiangru Tang and Mingchen Zhuge and Jiayi Pan and Yueqi Song and Bowen Li and Jaskirat Singh and Hoang H. Tran and Fuqiang Li and Ren Ma and Mingzhang Zheng and Bill Qian and Yanjun Shao and Niklas Muennighoff and Yizhe Zhang and Binyuan Hui and Junyang Lin and Robert Brennan and Hao Peng and Heng Ji and Graham Neubig},
- booktitle={The Thirteenth International Conference on Learning Representations},
- year={2025},
- url={https://openreview.net/forum?id=OJd3ayDDoF}
-}
-```
+If you need help with anything, or just want to chat, [come find us on Slack](https://dub.sh/openhands).
diff --git a/containers/app/Dockerfile b/containers/app/Dockerfile
index 372e5b1ef691..1fc86c023f59 100644
--- a/containers/app/Dockerfile
+++ b/containers/app/Dockerfile
@@ -73,7 +73,7 @@ ENV VIRTUAL_ENV=/app/.venv \
COPY --chown=openhands:openhands --chmod=770 --from=backend-builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
-COPY --chown=openhands:openhands --chmod=770 ./microagents ./microagents
+COPY --chown=openhands:openhands --chmod=770 ./skills ./skills
COPY --chown=openhands:openhands --chmod=770 ./openhands ./openhands
COPY --chown=openhands:openhands --chmod=777 ./openhands/runtime/plugins ./openhands/runtime/plugins
COPY --chown=openhands:openhands pyproject.toml poetry.lock README.md MANIFEST.in LICENSE ./
diff --git a/dev_config/python/.pre-commit-config.yaml b/dev_config/python/.pre-commit-config.yaml
index fe3f137cea95..2063e60562cb 100644
--- a/dev_config/python/.pre-commit-config.yaml
+++ b/dev_config/python/.pre-commit-config.yaml
@@ -3,9 +3,9 @@ repos:
rev: v5.0.0
hooks:
- id: trailing-whitespace
- exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
+ exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/)
- id: end-of-file-fixer
- exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
+ exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/)
- id: check-yaml
args: ["--allow-multiple-documents"]
- id: debug-statements
@@ -28,12 +28,12 @@ repos:
entry: ruff check --config dev_config/python/ruff.toml
types_or: [python, pyi, jupyter]
args: [--fix, --unsafe-fixes]
- exclude: ^(third_party/|enterprise/|openhands-cli/)
+ exclude: ^(third_party/|enterprise/)
# Run the formatter.
- id: ruff-format
entry: ruff format --config dev_config/python/ruff.toml
types_or: [python, pyi, jupyter]
- exclude: ^(third_party/|enterprise/|openhands-cli/)
+ exclude: ^(third_party/|enterprise/)
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.15.0
diff --git a/enterprise/experiments/experiment_manager.py b/enterprise/experiments/experiment_manager.py
index 7c53f274146b..1c212a03916a 100644
--- a/enterprise/experiments/experiment_manager.py
+++ b/enterprise/experiments/experiment_manager.py
@@ -5,12 +5,8 @@
EXPERIMENT_SYSTEM_PROMPT_EXPERIMENT,
)
from experiments.experiment_versions import (
- handle_condenser_max_step_experiment,
handle_system_prompt_experiment,
)
-from experiments.experiment_versions._004_condenser_max_step_experiment import (
- handle_condenser_max_step_experiment__v1,
-)
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.core.logger import openhands_logger as logger
@@ -31,10 +27,6 @@ def run_agent_variant_tests__v1(
)
return agent
- agent = handle_condenser_max_step_experiment__v1(
- user_id, conversation_id, agent
- )
-
if EXPERIMENT_SYSTEM_PROMPT_EXPERIMENT:
agent = agent.model_copy(
update={'system_prompt_filename': 'system_prompt_long_horizon.j2'}
@@ -60,20 +52,7 @@ def run_conversation_variant_test(
"""
logger.debug(
'experiment_manager:run_conversation_variant_test:started',
- extra={'user_id': user_id},
- )
-
- # Skip all experiment processing if the experiment manager is disabled
- if not ENABLE_EXPERIMENT_MANAGER:
- logger.info(
- 'experiment_manager:run_conversation_variant_test:skipped',
- extra={'reason': 'experiment_manager_disabled'},
- )
- return conversation_settings
-
- # Apply conversation-scoped experiments
- conversation_settings = handle_condenser_max_step_experiment(
- user_id, conversation_id, conversation_settings
+ extra={'user_id': user_id, 'conversation_id': conversation_id},
)
return conversation_settings
diff --git a/enterprise/integrations/github/github_manager.py b/enterprise/integrations/github/github_manager.py
index 1d16dd40d7f3..00ad5124cebd 100644
--- a/enterprise/integrations/github/github_manager.py
+++ b/enterprise/integrations/github/github_manager.py
@@ -22,6 +22,7 @@
HOST_URL,
OPENHANDS_RESOLVER_TEMPLATES_DIR,
)
+from integrations.v1_utils import get_saas_user_auth
from jinja2 import Environment, FileSystemLoader
from pydantic import SecretStr
from server.auth.constants import GITHUB_APP_CLIENT_ID, GITHUB_APP_PRIVATE_KEY
@@ -164,8 +165,13 @@ async def receive_message(self, message: Message):
)
if await self.is_job_requested(message):
+ payload = message.message.get('payload', {})
+ user_id = payload['sender']['id']
+ keycloak_user_id = await self.token_manager.get_user_id_from_idp_user_id(
+ user_id, ProviderType.GITHUB
+ )
github_view = await GithubFactory.create_github_view_from_payload(
- message, self.token_manager
+ message, keycloak_user_id
)
logger.info(
f'[GitHub] Creating job for {github_view.user_info.username} in {github_view.full_repo_name}#{github_view.issue_number}'
@@ -282,8 +288,15 @@ async def start_job(self, github_view: ResolverViewInterface):
f'[Github]: Error summarizing issue solvability: {str(e)}'
)
+ saas_user_auth = await get_saas_user_auth(
+ github_view.user_info.keycloak_user_id, self.token_manager
+ )
+
await github_view.create_new_conversation(
- self.jinja_env, secret_store.provider_tokens, convo_metadata
+ self.jinja_env,
+ secret_store.provider_tokens,
+ convo_metadata,
+ saas_user_auth,
)
conversation_id = github_view.conversation_id
@@ -292,18 +305,19 @@ async def start_job(self, github_view: ResolverViewInterface):
f'[GitHub] Created conversation {conversation_id} for user {user_info.username}'
)
- # Create a GithubCallbackProcessor
- processor = GithubCallbackProcessor(
- github_view=github_view,
- send_summary_instruction=True,
- )
+ if not github_view.v1:
+ # Create a GithubCallbackProcessor
+ processor = GithubCallbackProcessor(
+ github_view=github_view,
+ send_summary_instruction=True,
+ )
- # Register the callback processor
- register_callback_processor(conversation_id, processor)
+ # Register the callback processor
+ register_callback_processor(conversation_id, processor)
- logger.info(
- f'[Github] Registered callback processor for conversation {conversation_id}'
- )
+ logger.info(
+ f'[Github] Registered callback processor for conversation {conversation_id}'
+ )
# Send message with conversation link
conversation_link = CONVERSATION_URL.format(conversation_id)
diff --git a/enterprise/integrations/github/github_view.py b/enterprise/integrations/github/github_view.py
index 435dec8b3f60..4d15349dce63 100644
--- a/enterprise/integrations/github/github_view.py
+++ b/enterprise/integrations/github/github_view.py
@@ -1,4 +1,5 @@
-from uuid import uuid4
+from dataclasses import dataclass
+from uuid import UUID, uuid4
from github import Github, GithubIntegration
from github.Issue import Issue
@@ -8,6 +9,7 @@
WorkflowRunStatus,
)
from integrations.models import Message
+from integrations.resolver_context import ResolverUserContext
from integrations.types import ResolverViewInterface, UserData
from integrations.utils import (
ENABLE_PROACTIVE_CONVERSATION_STARTERS,
@@ -17,7 +19,6 @@
has_exact_mention,
)
from jinja2 import Environment
-from pydantic.dataclasses import dataclass
from server.auth.constants import GITHUB_APP_CLIENT_ID, GITHUB_APP_PRIVATE_KEY
from server.auth.token_manager import TokenManager
from server.config import get_config
@@ -26,14 +27,24 @@
from storage.saas_secrets_store import SaasSecretsStore
from storage.saas_settings_store import SaasSettingsStore
+from openhands.agent_server.models import SendMessageRequest
+from openhands.app_server.app_conversation.app_conversation_models import (
+ AppConversationStartRequest,
+ AppConversationStartTaskStatus,
+)
+from openhands.app_server.config import get_app_conversation_service
+from openhands.app_server.services.injector import InjectorState
+from openhands.app_server.user.specifiy_user_context import USER_CONTEXT_ATTR
from openhands.core.logger import openhands_logger as logger
from openhands.integrations.github.github_service import GithubServiceImpl
from openhands.integrations.provider import PROVIDER_TOKEN_TYPE, ProviderType
from openhands.integrations.service_types import Comment
+from openhands.sdk import TextContent
from openhands.server.services.conversation_service import (
initialize_conversation,
start_conversation,
)
+from openhands.server.user_auth.user_auth import UserAuth
from openhands.storage.data_models.conversation_metadata import (
ConversationMetadata,
ConversationTrigger,
@@ -76,6 +87,30 @@ async def get_user_proactive_conversation_setting(user_id: str | None) -> bool:
return settings.enable_proactive_conversation_starters
+async def get_user_v1_enabled_setting(user_id: str) -> bool:
+ """Get the user's V1 conversation API setting.
+
+ Args:
+ user_id: The keycloak user ID
+
+ Returns:
+ True if V1 conversations are enabled for this user, False otherwise
+ """
+ config = get_config()
+ settings_store = SaasSettingsStore(
+ user_id=user_id, session_maker=session_maker, config=config
+ )
+
+ settings = await call_sync_from_async(
+ settings_store.get_user_settings_by_keycloak_id, user_id
+ )
+
+ if not settings or settings.v1_enabled is None:
+ return False
+
+ return settings.v1_enabled
+
+
# =================================================
# SECTION: Github view types
# =================================================
@@ -96,6 +131,7 @@ class GithubIssue(ResolverViewInterface):
title: str
description: str
previous_comments: list[Comment]
+ v1: bool
async def _load_resolver_context(self):
github_service = GithubServiceImpl(
@@ -158,7 +194,36 @@ async def create_new_conversation(
jinja_env: Environment,
git_provider_tokens: PROVIDER_TOKEN_TYPE,
conversation_metadata: ConversationMetadata,
+ saas_user_auth: UserAuth,
+ ):
+ v1_enabled = await get_user_v1_enabled_setting(self.user_info.keycloak_user_id)
+ logger.info(
+ f'[GitHub V1]: User flag found for {self.user_info.keycloak_user_id} is {v1_enabled}'
+ )
+ if v1_enabled:
+ try:
+ # Use V1 app conversation service
+ await self._create_v1_conversation(
+ jinja_env, saas_user_auth, conversation_metadata
+ )
+ return
+
+ except Exception as e:
+ logger.warning(f'Error checking V1 settings, falling back to V0: {e}')
+
+ # Use existing V0 conversation service
+ await self._create_v0_conversation(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+
+ async def _create_v0_conversation(
+ self,
+ jinja_env: Environment,
+ git_provider_tokens: PROVIDER_TOKEN_TYPE,
+ conversation_metadata: ConversationMetadata,
):
+ """Create conversation using the legacy V0 system."""
+ logger.info('[GitHub V1]: Creating V0 conversation')
custom_secrets = await self._get_user_secrets()
user_instructions, conversation_instructions = await self._get_instructions(
@@ -177,6 +242,78 @@ async def create_new_conversation(
conversation_instructions=conversation_instructions,
)
+ async def _create_v1_conversation(
+ self,
+ jinja_env: Environment,
+ saas_user_auth: UserAuth,
+ conversation_metadata: ConversationMetadata,
+ ):
+ """Create conversation using the new V1 app conversation system."""
+ logger.info('[GitHub V1]: Creating V1 conversation')
+
+ user_instructions, conversation_instructions = await self._get_instructions(
+ jinja_env
+ )
+
+ # Create the initial message request
+ initial_message = SendMessageRequest(
+ role='user', content=[TextContent(text=user_instructions)]
+ )
+
+ # Create the GitHub V1 callback processor
+ github_callback_processor = self._create_github_v1_callback_processor()
+
+ # Get the app conversation service and start the conversation
+ injector_state = InjectorState()
+
+ # Create the V1 conversation start request with the callback processor
+ start_request = AppConversationStartRequest(
+ conversation_id=UUID(conversation_metadata.conversation_id),
+ system_message_suffix=conversation_instructions,
+ initial_message=initial_message,
+ selected_repository=self.full_repo_name,
+ git_provider=ProviderType.GITHUB,
+ title=f'GitHub Issue #{self.issue_number}: {self.title}',
+ trigger=ConversationTrigger.RESOLVER,
+ processors=[
+ github_callback_processor
+ ], # Pass the callback processor directly
+ )
+
+ # Set up the GitHub user context for the V1 system
+ github_user_context = ResolverUserContext(saas_user_auth=saas_user_auth)
+ setattr(injector_state, USER_CONTEXT_ATTR, github_user_context)
+
+ async with get_app_conversation_service(
+ injector_state
+ ) as app_conversation_service:
+ async for task in app_conversation_service.start_app_conversation(
+ start_request
+ ):
+ if task.status == AppConversationStartTaskStatus.ERROR:
+ logger.error(f'Failed to start V1 conversation: {task.detail}')
+ raise RuntimeError(
+ f'Failed to start V1 conversation: {task.detail}'
+ )
+
+ self.v1 = True
+
+ def _create_github_v1_callback_processor(self):
+ """Create a V1 callback processor for GitHub integration."""
+ from openhands.app_server.event_callback.github_v1_callback_processor import (
+ GithubV1CallbackProcessor,
+ )
+
+ # Create and return the GitHub V1 callback processor
+ return GithubV1CallbackProcessor(
+ github_view_data={
+ 'issue_number': self.issue_number,
+ 'full_repo_name': self.full_repo_name,
+ 'installation_id': self.installation_id,
+ },
+ send_summary_instruction=self.send_summary_instruction,
+ )
+
@dataclass
class GithubIssueComment(GithubIssue):
@@ -292,6 +429,24 @@ async def _get_instructions(self, jinja_env: Environment) -> tuple[str, str]:
return user_instructions, conversation_instructions
+ def _create_github_v1_callback_processor(self):
+ """Create a V1 callback processor for GitHub integration."""
+ from openhands.app_server.event_callback.github_v1_callback_processor import (
+ GithubV1CallbackProcessor,
+ )
+
+ # Create and return the GitHub V1 callback processor
+ return GithubV1CallbackProcessor(
+ github_view_data={
+ 'issue_number': self.issue_number,
+ 'full_repo_name': self.full_repo_name,
+ 'installation_id': self.installation_id,
+ 'comment_id': self.comment_id,
+ },
+ inline_pr_comment=True,
+ send_summary_instruction=self.send_summary_instruction,
+ )
+
@dataclass
class GithubFailingAction:
@@ -605,7 +760,7 @@ def get_full_repo_name(repo_obj: dict) -> str:
@staticmethod
async def create_github_view_from_payload(
- message: Message, token_manager: TokenManager
+ message: Message, keycloak_user_id: str
) -> ResolverViewInterface:
"""Create the appropriate class (GithubIssue or GithubPRComment) based on the payload.
Also return metadata about the event (e.g., action type).
@@ -615,17 +770,10 @@ async def create_github_view_from_payload(
user_id = payload['sender']['id']
username = payload['sender']['login']
- keyloak_user_id = await token_manager.get_user_id_from_idp_user_id(
- user_id, ProviderType.GITHUB
- )
-
- if keyloak_user_id is None:
- logger.warning(f'Got invalid keyloak user id for GitHub User {user_id} ')
-
selected_repo = GithubFactory.get_full_repo_name(repo_obj)
is_public_repo = not repo_obj.get('private', True)
user_info = UserData(
- user_id=user_id, username=username, keycloak_user_id=keyloak_user_id
+ user_id=user_id, username=username, keycloak_user_id=keycloak_user_id
)
installation_id = message.message['installation']
@@ -649,6 +797,7 @@ async def create_github_view_from_payload(
title='',
description='',
previous_comments=[],
+ v1=False,
)
elif GithubFactory.is_issue_comment(message):
@@ -674,6 +823,7 @@ async def create_github_view_from_payload(
title='',
description='',
previous_comments=[],
+ v1=False,
)
elif GithubFactory.is_pr_comment(message):
@@ -715,6 +865,7 @@ async def create_github_view_from_payload(
title='',
description='',
previous_comments=[],
+ v1=False,
)
elif GithubFactory.is_inline_pr_comment(message):
@@ -748,6 +899,7 @@ async def create_github_view_from_payload(
title='',
description='',
previous_comments=[],
+ v1=False,
)
else:
diff --git a/enterprise/integrations/resolver_context.py b/enterprise/integrations/resolver_context.py
new file mode 100644
index 000000000000..29840ee9c734
--- /dev/null
+++ b/enterprise/integrations/resolver_context.py
@@ -0,0 +1,55 @@
+from openhands.app_server.user.user_context import UserContext
+from openhands.app_server.user.user_models import UserInfo
+from openhands.integrations.provider import PROVIDER_TOKEN_TYPE
+from openhands.integrations.service_types import ProviderType
+from openhands.server.user_auth.user_auth import UserAuth
+
+
+class ResolverUserContext(UserContext):
+ """User context for resolver operations that inherits from UserContext."""
+
+ def __init__(
+ self,
+ saas_user_auth: UserAuth,
+ ):
+ self.saas_user_auth = saas_user_auth
+
+ async def get_user_id(self) -> str | None:
+ return await self.saas_user_auth.get_user_id()
+
+ async def get_user_info(self) -> UserInfo:
+ user_settings = await self.saas_user_auth.get_user_settings()
+ user_id = await self.saas_user_auth.get_user_id()
+ if user_settings:
+ return UserInfo(
+ id=user_id,
+ **user_settings.model_dump(context={'expose_secrets': True}),
+ )
+
+ return UserInfo(id=user_id)
+
+ async def get_authenticated_git_url(self, repository: str) -> str:
+ # This would need to be implemented based on the git provider tokens
+ # For now, return a basic HTTPS URL
+ return f'https://github.com/{repository}.git'
+
+ async def get_latest_token(self, provider_type: ProviderType) -> str | None:
+ # Return the appropriate token from git_provider_tokens
+
+ provider_tokens = await self.saas_user_auth.get_provider_tokens()
+ if provider_tokens:
+ return provider_tokens.get(provider_type)
+ return None
+
+ async def get_provider_tokens(self) -> PROVIDER_TOKEN_TYPE | None:
+ return await self.saas_user_auth.get_provider_tokens()
+
+ async def get_secrets(self) -> dict[str, str]:
+ """Get secrets for the user, including custom secrets."""
+ secrets = await self.saas_user_auth.get_secrets()
+ if secrets:
+ return dict(secrets.custom_secrets)
+ return {}
+
+ async def get_mcp_api_key(self) -> str | None:
+ return await self.saas_user_auth.get_mcp_api_key()
diff --git a/enterprise/integrations/types.py b/enterprise/integrations/types.py
index dcbcc9b7d3ce..0b8d79228c18 100644
--- a/enterprise/integrations/types.py
+++ b/enterprise/integrations/types.py
@@ -19,7 +19,7 @@ class PRStatus(Enum):
class UserData(BaseModel):
user_id: int
username: str
- keycloak_user_id: str | None
+ keycloak_user_id: str
@dataclass
diff --git a/enterprise/integrations/v1_utils.py b/enterprise/integrations/v1_utils.py
new file mode 100644
index 000000000000..78953e4e93dc
--- /dev/null
+++ b/enterprise/integrations/v1_utils.py
@@ -0,0 +1,20 @@
+from pydantic import SecretStr
+from server.auth.saas_user_auth import SaasUserAuth
+from server.auth.token_manager import TokenManager
+
+from openhands.core.logger import openhands_logger as logger
+from openhands.server.user_auth.user_auth import UserAuth
+
+
+async def get_saas_user_auth(
+ keycloak_user_id: str, token_manager: TokenManager
+) -> UserAuth:
+ offline_token = await token_manager.load_offline_token(keycloak_user_id)
+ if offline_token is None:
+ logger.info('no_offline_token_found')
+
+ user_auth = SaasUserAuth(
+ user_id=keycloak_user_id,
+ refresh_token=SecretStr(offline_token),
+ )
+ return user_auth
diff --git a/enterprise/migrations/versions/081_add_parent_conversation_id.py b/enterprise/migrations/versions/081_add_parent_conversation_id.py
new file mode 100644
index 000000000000..b27c444632e4
--- /dev/null
+++ b/enterprise/migrations/versions/081_add_parent_conversation_id.py
@@ -0,0 +1,41 @@
+"""add parent_conversation_id to conversation_metadata
+
+Revision ID: 081
+Revises: 080
+Create Date: 2025-11-06 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision: str = '081'
+down_revision: Union[str, None] = '080'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """Upgrade schema."""
+ op.add_column(
+ 'conversation_metadata',
+ sa.Column('parent_conversation_id', sa.String(), nullable=True),
+ )
+ op.create_index(
+ op.f('ix_conversation_metadata_parent_conversation_id'),
+ 'conversation_metadata',
+ ['parent_conversation_id'],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ """Downgrade schema."""
+ op.drop_index(
+ op.f('ix_conversation_metadata_parent_conversation_id'),
+ table_name='conversation_metadata',
+ )
+ op.drop_column('conversation_metadata', 'parent_conversation_id')
diff --git a/enterprise/migrations/versions/082_add_setting_up_skills_enum_value.py b/enterprise/migrations/versions/082_add_setting_up_skills_enum_value.py
new file mode 100644
index 000000000000..4f960acd2794
--- /dev/null
+++ b/enterprise/migrations/versions/082_add_setting_up_skills_enum_value.py
@@ -0,0 +1,51 @@
+"""Add SETTING_UP_SKILLS to appconversationstarttaskstatus enum
+
+Revision ID: 082
+Revises: 081
+Create Date: 2025-11-19 12:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+from sqlalchemy import text
+
+# revision identifiers, used by Alembic.
+revision: str = '082'
+down_revision: Union[str, Sequence[str], None] = '081'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """Add SETTING_UP_SKILLS enum value to appconversationstarttaskstatus."""
+ # Check if the enum value already exists before adding it
+ # This handles the case where the enum was created with the value already included
+ connection = op.get_bind()
+ result = connection.execute(
+ text(
+ "SELECT 1 FROM pg_enum WHERE enumlabel = 'SETTING_UP_SKILLS' "
+ "AND enumtypid = (SELECT oid FROM pg_type WHERE typname = 'appconversationstarttaskstatus')"
+ )
+ )
+
+ if not result.fetchone():
+ # Add the new enum value only if it doesn't already exist
+ op.execute(
+ "ALTER TYPE appconversationstarttaskstatus ADD VALUE 'SETTING_UP_SKILLS'"
+ )
+
+
+def downgrade() -> None:
+ """Remove SETTING_UP_SKILLS enum value from appconversationstarttaskstatus.
+
+ Note: PostgreSQL doesn't support removing enum values directly.
+ This would require recreating the enum type and updating all references.
+ For safety, this downgrade is not implemented.
+ """
+ # PostgreSQL doesn't support removing enum values directly
+ # This would require a complex migration to recreate the enum
+ # For now, we'll leave this as a no-op since removing enum values
+ # is rarely needed and can be dangerous
+ pass
diff --git a/enterprise/migrations/versions/083_add_v1_enabled_to_user_settings.py b/enterprise/migrations/versions/083_add_v1_enabled_to_user_settings.py
new file mode 100644
index 000000000000..33fdb470af9e
--- /dev/null
+++ b/enterprise/migrations/versions/083_add_v1_enabled_to_user_settings.py
@@ -0,0 +1,35 @@
+"""Add v1_enabled column to user_settings
+
+Revision ID: 083
+Revises: 082
+Create Date: 2025-11-18 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision: str = '083'
+down_revision: Union[str, None] = '082'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """Add v1_enabled column to user_settings table."""
+ op.add_column(
+ 'user_settings',
+ sa.Column(
+ 'v1_enabled',
+ sa.Boolean(),
+ nullable=True,
+ ),
+ )
+
+
+def downgrade() -> None:
+ """Remove v1_enabled column from user_settings table."""
+ op.drop_column('user_settings', 'v1_enabled')
diff --git a/enterprise/poetry.lock b/enterprise/poetry.lock
index cb3bfc2c3e66..ec6552085d20 100644
--- a/enterprise/poetry.lock
+++ b/enterprise/poetry.lock
@@ -201,14 +201,14 @@ files = [
[[package]]
name = "anthropic"
-version = "0.72.0"
+version = "0.75.0"
description = "The official Python library for the anthropic API"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "anthropic-0.72.0-py3-none-any.whl", hash = "sha256:0e9f5a7582f038cab8efbb4c959e49ef654a56bfc7ba2da51b5a7b8a84de2e4d"},
- {file = "anthropic-0.72.0.tar.gz", hash = "sha256:8971fe76dcffc644f74ac3883069beb1527641115ae0d6eb8fa21c1ce4082f7a"},
+ {file = "anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b"},
+ {file = "anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb"},
]
[package.dependencies]
@@ -682,37 +682,37 @@ crt = ["awscrt (==0.27.6)"]
[[package]]
name = "browser-use"
-version = "0.9.5"
+version = "0.10.1"
description = "Make websites accessible for AI agents"
optional = false
python-versions = "<4.0,>=3.11"
groups = ["main"]
files = [
- {file = "browser_use-0.9.5-py3-none-any.whl", hash = "sha256:4a2e92847204d1ded269026a99cb0cc0e60e38bd2751fa3f58aedd78f00b4e67"},
- {file = "browser_use-0.9.5.tar.gz", hash = "sha256:f8285fe253b149d01769a7084883b4cf4db351e2f38e26302c157bcbf14a703f"},
+ {file = "browser_use-0.10.1-py3-none-any.whl", hash = "sha256:96e603bfc71098175342cdcb0592519e6f244412e740f0254e4389fdd82a977f"},
+ {file = "browser_use-0.10.1.tar.gz", hash = "sha256:5f211ecfdf1f9fd186160f10df70dedd661821231e30f1bce40939787abab223"},
]
[package.dependencies]
aiohttp = "3.12.15"
-anthropic = ">=0.68.1,<1.0.0"
+anthropic = ">=0.72.1,<1.0.0"
anyio = ">=4.9.0"
authlib = ">=1.6.0"
bubus = ">=1.5.6"
-cdp-use = ">=1.4.0"
+cdp-use = ">=1.4.4"
click = ">=8.1.8"
cloudpickle = ">=3.1.1"
google-api-core = ">=2.25.0"
google-api-python-client = ">=2.174.0"
google-auth = ">=2.40.3"
google-auth-oauthlib = ">=1.2.2"
-google-genai = ">=1.29.0,<2.0.0"
+google-genai = ">=1.50.0,<2.0.0"
groq = ">=0.30.0"
httpx = ">=0.28.1"
inquirerpy = ">=0.3.4"
markdownify = ">=1.2.0"
mcp = ">=1.10.1"
ollama = ">=0.5.1"
-openai = ">=1.99.2,<2.0.0"
+openai = ">=2.7.2,<3.0.0"
pillow = ">=11.2.1"
portalocker = ">=2.7.0,<3.0.0"
posthog = ">=3.7.0"
@@ -721,6 +721,7 @@ pydantic = ">=2.11.5"
pyobjc = {version = ">=11.0", markers = "platform_system == \"darwin\""}
pyotp = ">=2.9.0"
pypdf = ">=5.7.0"
+python-docx = ">=1.2.0"
python-dotenv = ">=1.0.1"
reportlab = ">=4.0.0"
requests = ">=2.32.3"
@@ -850,14 +851,14 @@ files = [
[[package]]
name = "cdp-use"
-version = "1.4.3"
+version = "1.4.4"
description = "Type safe generator/client library for CDP"
optional = false
python-versions = ">=3.11"
groups = ["main"]
files = [
- {file = "cdp_use-1.4.3-py3-none-any.whl", hash = "sha256:c48664604470c2579aa1e677c3e3e7e24c4f300c54804c093d935abb50479ecd"},
- {file = "cdp_use-1.4.3.tar.gz", hash = "sha256:9029c04bdc49fbd3939d2bf1988ad8d88e260729c7d5e35c2f6c87591f5a10e9"},
+ {file = "cdp_use-1.4.4-py3-none-any.whl", hash = "sha256:e37e80e067db2653d6fdf953d4ff9e5d80d75daa27b7c6d48c0261cccbef73e1"},
+ {file = "cdp_use-1.4.4.tar.gz", hash = "sha256:330a848b517006eb9ad1dc468aa6434d913cf0c6918610760c36c3fdfdba0fab"},
]
[package.dependencies]
@@ -2978,28 +2979,29 @@ testing = ["pytest"]
[[package]]
name = "google-genai"
-version = "1.32.0"
+version = "1.53.0"
description = "GenAI Python SDK"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "google_genai-1.32.0-py3-none-any.whl", hash = "sha256:c0c4b1d45adf3aa99501050dd73da2f0dea09374002231052d81a6765d15e7f6"},
- {file = "google_genai-1.32.0.tar.gz", hash = "sha256:349da3f5ff0e981066bd508585fcdd308d28fc4646f318c8f6d1aa6041f4c7e3"},
+ {file = "google_genai-1.53.0-py3-none-any.whl", hash = "sha256:65a3f99e5c03c372d872cda7419f5940e723374bb12a2f3ffd5e3e56e8eb2094"},
+ {file = "google_genai-1.53.0.tar.gz", hash = "sha256:938a26d22f3fd32c6eeeb4276ef204ef82884e63af9842ce3eac05ceb39cbd8d"},
]
[package.dependencies]
anyio = ">=4.8.0,<5.0.0"
-google-auth = ">=2.14.1,<3.0.0"
+google-auth = {version = ">=2.14.1,<3.0.0", extras = ["requests"]}
httpx = ">=0.28.1,<1.0.0"
-pydantic = ">=2.0.0,<3.0.0"
+pydantic = ">=2.9.0,<3.0.0"
requests = ">=2.28.1,<3.0.0"
tenacity = ">=8.2.3,<9.2.0"
typing-extensions = ">=4.11.0,<5.0.0"
websockets = ">=13.0.0,<15.1.0"
[package.extras]
-aiohttp = ["aiohttp (<4.0.0)"]
+aiohttp = ["aiohttp (<3.13.3)"]
+local-tokenizer = ["protobuf", "sentencepiece (>=0.2.0)"]
[[package]]
name = "google-resumable-media"
@@ -3055,6 +3057,8 @@ files = [
{file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"},
{file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"},
{file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"},
+ {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f47617f698838ba98f4ff4189aef02e7343952df3a615f847bb575c3feb177a7"},
+ {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af41be48a4f60429d5cad9d22175217805098a9ef7c40bfef44f7669fb9d74d8"},
{file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"},
{file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"},
{file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"},
@@ -3064,6 +3068,8 @@ files = [
{file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"},
{file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"},
{file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"},
+ {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c"},
+ {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5"},
{file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"},
{file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"},
{file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"},
@@ -3073,6 +3079,8 @@ files = [
{file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"},
{file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"},
{file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"},
+ {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0"},
+ {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d"},
{file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"},
{file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"},
{file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"},
@@ -3082,6 +3090,8 @@ files = [
{file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"},
{file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"},
{file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"},
+ {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b"},
+ {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929"},
{file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"},
{file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"},
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"},
@@ -3089,6 +3099,8 @@ files = [
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"},
{file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"},
{file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"},
+ {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269"},
+ {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681"},
{file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"},
{file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"},
{file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"},
@@ -3098,6 +3110,8 @@ files = [
{file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"},
{file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"},
{file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"},
+ {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28a3c6b7cd72a96f61b0e4b2a36f681025b60ae4779cc73c1535eb5f29560b10"},
+ {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:52206cd642670b0b320a1fd1cbfd95bca0e043179c1d8a045f2c6109dfe973be"},
{file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"},
{file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"},
{file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"},
@@ -3166,83 +3180,87 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[[package]]
name = "grpcio"
-version = "1.74.0"
+version = "1.67.1"
description = "HTTP/2-based RPC framework"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "grpcio-1.74.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:85bd5cdf4ed7b2d6438871adf6afff9af7096486fcf51818a81b77ef4dd30907"},
- {file = "grpcio-1.74.0-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:68c8ebcca945efff9d86d8d6d7bfb0841cf0071024417e2d7f45c5e46b5b08eb"},
- {file = "grpcio-1.74.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:e154d230dc1bbbd78ad2fdc3039fa50ad7ffcf438e4eb2fa30bce223a70c7486"},
- {file = "grpcio-1.74.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8978003816c7b9eabe217f88c78bc26adc8f9304bf6a594b02e5a49b2ef9c11"},
- {file = "grpcio-1.74.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3d7bd6e3929fd2ea7fbc3f562e4987229ead70c9ae5f01501a46701e08f1ad9"},
- {file = "grpcio-1.74.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:136b53c91ac1d02c8c24201bfdeb56f8b3ac3278668cbb8e0ba49c88069e1bdc"},
- {file = "grpcio-1.74.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fe0f540750a13fd8e5da4b3eaba91a785eea8dca5ccd2bc2ffe978caa403090e"},
- {file = "grpcio-1.74.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4e4181bfc24413d1e3a37a0b7889bea68d973d4b45dd2bc68bb766c140718f82"},
- {file = "grpcio-1.74.0-cp310-cp310-win32.whl", hash = "sha256:1733969040989f7acc3d94c22f55b4a9501a30f6aaacdbccfaba0a3ffb255ab7"},
- {file = "grpcio-1.74.0-cp310-cp310-win_amd64.whl", hash = "sha256:9e912d3c993a29df6c627459af58975b2e5c897d93287939b9d5065f000249b5"},
- {file = "grpcio-1.74.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:69e1a8180868a2576f02356565f16635b99088da7df3d45aaa7e24e73a054e31"},
- {file = "grpcio-1.74.0-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:8efe72fde5500f47aca1ef59495cb59c885afe04ac89dd11d810f2de87d935d4"},
- {file = "grpcio-1.74.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a8f0302f9ac4e9923f98d8e243939a6fb627cd048f5cd38595c97e38020dffce"},
- {file = "grpcio-1.74.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f609a39f62a6f6f05c7512746798282546358a37ea93c1fcbadf8b2fed162e3"},
- {file = "grpcio-1.74.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c98e0b7434a7fa4e3e63f250456eaef52499fba5ae661c58cc5b5477d11e7182"},
- {file = "grpcio-1.74.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:662456c4513e298db6d7bd9c3b8df6f75f8752f0ba01fb653e252ed4a59b5a5d"},
- {file = "grpcio-1.74.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3d14e3c4d65e19d8430a4e28ceb71ace4728776fd6c3ce34016947474479683f"},
- {file = "grpcio-1.74.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bf949792cee20d2078323a9b02bacbbae002b9e3b9e2433f2741c15bdeba1c4"},
- {file = "grpcio-1.74.0-cp311-cp311-win32.whl", hash = "sha256:55b453812fa7c7ce2f5c88be3018fb4a490519b6ce80788d5913f3f9d7da8c7b"},
- {file = "grpcio-1.74.0-cp311-cp311-win_amd64.whl", hash = "sha256:86ad489db097141a907c559988c29718719aa3e13370d40e20506f11b4de0d11"},
- {file = "grpcio-1.74.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8533e6e9c5bd630ca98062e3a1326249e6ada07d05acf191a77bc33f8948f3d8"},
- {file = "grpcio-1.74.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:2918948864fec2a11721d91568effffbe0a02b23ecd57f281391d986847982f6"},
- {file = "grpcio-1.74.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:60d2d48b0580e70d2e1954d0d19fa3c2e60dd7cbed826aca104fff518310d1c5"},
- {file = "grpcio-1.74.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3601274bc0523f6dc07666c0e01682c94472402ac2fd1226fd96e079863bfa49"},
- {file = "grpcio-1.74.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:176d60a5168d7948539def20b2a3adcce67d72454d9ae05969a2e73f3a0feee7"},
- {file = "grpcio-1.74.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e759f9e8bc908aaae0412642afe5416c9f983a80499448fcc7fab8692ae044c3"},
- {file = "grpcio-1.74.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e7c4389771855a92934b2846bd807fc25a3dfa820fd912fe6bd8136026b2707"},
- {file = "grpcio-1.74.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cce634b10aeab37010449124814b05a62fb5f18928ca878f1bf4750d1f0c815b"},
- {file = "grpcio-1.74.0-cp312-cp312-win32.whl", hash = "sha256:885912559974df35d92219e2dc98f51a16a48395f37b92865ad45186f294096c"},
- {file = "grpcio-1.74.0-cp312-cp312-win_amd64.whl", hash = "sha256:42f8fee287427b94be63d916c90399ed310ed10aadbf9e2e5538b3e497d269bc"},
- {file = "grpcio-1.74.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:2bc2d7d8d184e2362b53905cb1708c84cb16354771c04b490485fa07ce3a1d89"},
- {file = "grpcio-1.74.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:c14e803037e572c177ba54a3e090d6eb12efd795d49327c5ee2b3bddb836bf01"},
- {file = "grpcio-1.74.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:f6ec94f0e50eb8fa1744a731088b966427575e40c2944a980049798b127a687e"},
- {file = "grpcio-1.74.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:566b9395b90cc3d0d0c6404bc8572c7c18786ede549cdb540ae27b58afe0fb91"},
- {file = "grpcio-1.74.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1ea6176d7dfd5b941ea01c2ec34de9531ba494d541fe2057c904e601879f249"},
- {file = "grpcio-1.74.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:64229c1e9cea079420527fa8ac45d80fc1e8d3f94deaa35643c381fa8d98f362"},
- {file = "grpcio-1.74.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0f87bddd6e27fc776aacf7ebfec367b6d49cad0455123951e4488ea99d9b9b8f"},
- {file = "grpcio-1.74.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:3b03d8f2a07f0fea8c8f74deb59f8352b770e3900d143b3d1475effcb08eec20"},
- {file = "grpcio-1.74.0-cp313-cp313-win32.whl", hash = "sha256:b6a73b2ba83e663b2480a90b82fdae6a7aa6427f62bf43b29912c0cfd1aa2bfa"},
- {file = "grpcio-1.74.0-cp313-cp313-win_amd64.whl", hash = "sha256:fd3c71aeee838299c5887230b8a1822795325ddfea635edd82954c1eaa831e24"},
- {file = "grpcio-1.74.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:4bc5fca10aaf74779081e16c2bcc3d5ec643ffd528d9e7b1c9039000ead73bae"},
- {file = "grpcio-1.74.0-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:6bab67d15ad617aff094c382c882e0177637da73cbc5532d52c07b4ee887a87b"},
- {file = "grpcio-1.74.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:655726919b75ab3c34cdad39da5c530ac6fa32696fb23119e36b64adcfca174a"},
- {file = "grpcio-1.74.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a2b06afe2e50ebfd46247ac3ba60cac523f54ec7792ae9ba6073c12daf26f0a"},
- {file = "grpcio-1.74.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f251c355167b2360537cf17bea2cf0197995e551ab9da6a0a59b3da5e8704f9"},
- {file = "grpcio-1.74.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8f7b5882fb50632ab1e48cb3122d6df55b9afabc265582808036b6e51b9fd6b7"},
- {file = "grpcio-1.74.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:834988b6c34515545b3edd13e902c1acdd9f2465d386ea5143fb558f153a7176"},
- {file = "grpcio-1.74.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:22b834cef33429ca6cc28303c9c327ba9a3fafecbf62fae17e9a7b7163cc43ac"},
- {file = "grpcio-1.74.0-cp39-cp39-win32.whl", hash = "sha256:7d95d71ff35291bab3f1c52f52f474c632db26ea12700c2ff0ea0532cb0b5854"},
- {file = "grpcio-1.74.0-cp39-cp39-win_amd64.whl", hash = "sha256:ecde9ab49f58433abe02f9ed076c7b5be839cf0153883a6d23995937a82392fa"},
- {file = "grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1"},
-]
-
-[package.extras]
-protobuf = ["grpcio-tools (>=1.74.0)"]
+ {file = "grpcio-1.67.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:8b0341d66a57f8a3119b77ab32207072be60c9bf79760fa609c5609f2deb1f3f"},
+ {file = "grpcio-1.67.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:f5a27dddefe0e2357d3e617b9079b4bfdc91341a91565111a21ed6ebbc51b22d"},
+ {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:43112046864317498a33bdc4797ae6a268c36345a910de9b9c17159d8346602f"},
+ {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9b929f13677b10f63124c1a410994a401cdd85214ad83ab67cc077fc7e480f0"},
+ {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7d1797a8a3845437d327145959a2c0c47c05947c9eef5ff1a4c80e499dcc6fa"},
+ {file = "grpcio-1.67.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0489063974d1452436139501bf6b180f63d4977223ee87488fe36858c5725292"},
+ {file = "grpcio-1.67.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9fd042de4a82e3e7aca44008ee2fb5da01b3e5adb316348c21980f7f58adc311"},
+ {file = "grpcio-1.67.1-cp310-cp310-win32.whl", hash = "sha256:638354e698fd0c6c76b04540a850bf1db27b4d2515a19fcd5cf645c48d3eb1ed"},
+ {file = "grpcio-1.67.1-cp310-cp310-win_amd64.whl", hash = "sha256:608d87d1bdabf9e2868b12338cd38a79969eaf920c89d698ead08f48de9c0f9e"},
+ {file = "grpcio-1.67.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:7818c0454027ae3384235a65210bbf5464bd715450e30a3d40385453a85a70cb"},
+ {file = "grpcio-1.67.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ea33986b70f83844cd00814cee4451055cd8cab36f00ac64a31f5bb09b31919e"},
+ {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c7a01337407dd89005527623a4a72c5c8e2894d22bead0895306b23c6695698f"},
+ {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80b866f73224b0634f4312a4674c1be21b2b4afa73cb20953cbbb73a6b36c3cc"},
+ {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9fff78ba10d4250bfc07a01bd6254a6d87dc67f9627adece85c0b2ed754fa96"},
+ {file = "grpcio-1.67.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8a23cbcc5bb11ea7dc6163078be36c065db68d915c24f5faa4f872c573bb400f"},
+ {file = "grpcio-1.67.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1a65b503d008f066e994f34f456e0647e5ceb34cfcec5ad180b1b44020ad4970"},
+ {file = "grpcio-1.67.1-cp311-cp311-win32.whl", hash = "sha256:e29ca27bec8e163dca0c98084040edec3bc49afd10f18b412f483cc68c712744"},
+ {file = "grpcio-1.67.1-cp311-cp311-win_amd64.whl", hash = "sha256:786a5b18544622bfb1e25cc08402bd44ea83edfb04b93798d85dca4d1a0b5be5"},
+ {file = "grpcio-1.67.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:267d1745894200e4c604958da5f856da6293f063327cb049a51fe67348e4f953"},
+ {file = "grpcio-1.67.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:85f69fdc1d28ce7cff8de3f9c67db2b0ca9ba4449644488c1e0303c146135ddb"},
+ {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f26b0b547eb8d00e195274cdfc63ce64c8fc2d3e2d00b12bf468ece41a0423a0"},
+ {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4422581cdc628f77302270ff839a44f4c24fdc57887dc2a45b7e53d8fc2376af"},
+ {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7616d2ded471231c701489190379e0c311ee0a6c756f3c03e6a62b95a7146e"},
+ {file = "grpcio-1.67.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8a00efecde9d6fcc3ab00c13f816313c040a28450e5e25739c24f432fc6d3c75"},
+ {file = "grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38"},
+ {file = "grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78"},
+ {file = "grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc"},
+ {file = "grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b"},
+ {file = "grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1"},
+ {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af"},
+ {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955"},
+ {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8"},
+ {file = "grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62"},
+ {file = "grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb"},
+ {file = "grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121"},
+ {file = "grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba"},
+ {file = "grpcio-1.67.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:178f5db771c4f9a9facb2ab37a434c46cb9be1a75e820f187ee3d1e7805c4f65"},
+ {file = "grpcio-1.67.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f3e49c738396e93b7ba9016e153eb09e0778e776df6090c1b8c91877cc1c426"},
+ {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:24e8a26dbfc5274d7474c27759b54486b8de23c709d76695237515bc8b5baeab"},
+ {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b6c16489326d79ead41689c4b84bc40d522c9a7617219f4ad94bc7f448c5085"},
+ {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e6a4dcf5af7bbc36fd9f81c9f372e8ae580870a9e4b6eafe948cd334b81cf3"},
+ {file = "grpcio-1.67.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:95b5f2b857856ed78d72da93cd7d09b6db8ef30102e5e7fe0961fe4d9f7d48e8"},
+ {file = "grpcio-1.67.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b49359977c6ec9f5d0573ea4e0071ad278ef905aa74e420acc73fd28ce39e9ce"},
+ {file = "grpcio-1.67.1-cp38-cp38-win32.whl", hash = "sha256:f5b76ff64aaac53fede0cc93abf57894ab2a7362986ba22243d06218b93efe46"},
+ {file = "grpcio-1.67.1-cp38-cp38-win_amd64.whl", hash = "sha256:804c6457c3cd3ec04fe6006c739579b8d35c86ae3298ffca8de57b493524b771"},
+ {file = "grpcio-1.67.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:a25bdea92b13ff4d7790962190bf6bf5c4639876e01c0f3dda70fc2769616335"},
+ {file = "grpcio-1.67.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc491ae35a13535fd9196acb5afe1af37c8237df2e54427be3eecda3653127e"},
+ {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:85f862069b86a305497e74d0dc43c02de3d1d184fc2c180993aa8aa86fbd19b8"},
+ {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec74ef02010186185de82cc594058a3ccd8d86821842bbac9873fd4a2cf8be8d"},
+ {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01f616a964e540638af5130469451cf580ba8c7329f45ca998ab66e0c7dcdb04"},
+ {file = "grpcio-1.67.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:299b3d8c4f790c6bcca485f9963b4846dd92cf6f1b65d3697145d005c80f9fe8"},
+ {file = "grpcio-1.67.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:60336bff760fbb47d7e86165408126f1dded184448e9a4c892189eb7c9d3f90f"},
+ {file = "grpcio-1.67.1-cp39-cp39-win32.whl", hash = "sha256:5ed601c4c6008429e3d247ddb367fe8c7259c355757448d7c1ef7bd4a6739e8e"},
+ {file = "grpcio-1.67.1-cp39-cp39-win_amd64.whl", hash = "sha256:5db70d32d6703b89912af16d6d45d78406374a8b8ef0d28140351dd0ec610e98"},
+ {file = "grpcio-1.67.1.tar.gz", hash = "sha256:3dc2ed4cabea4dc14d5e708c2b426205956077cc5de419b4d4079315017e9732"},
+]
+
+[package.extras]
+protobuf = ["grpcio-tools (>=1.67.1)"]
[[package]]
name = "grpcio-status"
-version = "1.71.2"
+version = "1.67.1"
description = "Status proto mapping for gRPC"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "grpcio_status-1.71.2-py3-none-any.whl", hash = "sha256:803c98cb6a8b7dc6dbb785b1111aed739f241ab5e9da0bba96888aa74704cfd3"},
- {file = "grpcio_status-1.71.2.tar.gz", hash = "sha256:c7a97e176df71cdc2c179cd1847d7fc86cca5832ad12e9798d7fed6b7a1aab50"},
+ {file = "grpcio_status-1.67.1-py3-none-any.whl", hash = "sha256:16e6c085950bdacac97c779e6a502ea671232385e6e37f258884d6883392c2bd"},
+ {file = "grpcio_status-1.67.1.tar.gz", hash = "sha256:2bf38395e028ceeecfd8866b081f61628114b384da7d51ae064ddc8d766a5d11"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
-grpcio = ">=1.71.2"
+grpcio = ">=1.67.1"
protobuf = ">=5.26.1,<6.0dev"
[[package]]
@@ -4540,42 +4558,39 @@ valkey = ["valkey (>=6)"]
[[package]]
name = "litellm"
-version = "1.77.7"
+version = "1.80.7"
description = "Library to easily interface with LLM API providers"
optional = false
-python-versions = ">=3.8.1,<4.0, !=3.9.7"
+python-versions = "<4.0,>=3.9"
groups = ["main"]
-files = []
-develop = false
+files = [
+ {file = "litellm-1.80.7-py3-none-any.whl", hash = "sha256:f7d993f78c1e0e4e1202b2a925cc6540b55b6e5fb055dd342d88b145ab3102ed"},
+ {file = "litellm-1.80.7.tar.gz", hash = "sha256:3977a8d195aef842d01c18bf9e22984829363c6a4b54daf9a43c9dd9f190b42c"},
+]
[package.dependencies]
aiohttp = ">=3.10"
click = "*"
fastuuid = ">=0.13.0"
+grpcio = ">=1.62.3,<1.68.0"
httpx = ">=0.23.0"
importlib-metadata = ">=6.8.0"
-jinja2 = "^3.1.2"
-jsonschema = "^4.22.0"
-openai = ">=1.99.5"
-pydantic = "^2.5.0"
+jinja2 = ">=3.1.2,<4.0.0"
+jsonschema = ">=4.22.0,<5.0.0"
+openai = ">=2.8.0"
+pydantic = ">=2.5.0,<3.0.0"
python-dotenv = ">=0.2.0"
tiktoken = ">=0.7.0"
tokenizers = "*"
[package.extras]
caching = ["diskcache (>=5.6.1,<6.0.0)"]
-extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-iam (>=2.19.1,<3.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "redisvl (>=0.4.1,<0.5.0) ; python_version >= \"3.9\" and python_version < \"3.14\"", "resend (>=0.8.0,<0.9.0)"]
+extra-proxy = ["azure-identity (>=1.15.0,<2.0.0) ; python_version >= \"3.9\"", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-iam (>=2.19.1,<3.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "redisvl (>=0.4.1,<0.5.0) ; python_version >= \"3.9\" and python_version < \"3.14\"", "resend (>=0.8.0)"]
mlflow = ["mlflow (>3.1.4) ; python_version >= \"3.10\""]
-proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-storage-blob (>=12.25.1,<13.0.0)", "backoff", "boto3 (==1.36.0)", "cryptography", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.20)", "litellm-proxy-extras (==0.2.25)", "mcp (>=1.10.0,<2.0.0) ; python_version >= \"3.10\"", "orjson (>=3.9.7,<4.0.0)", "polars (>=1.31.0,<2.0.0) ; python_version >= \"3.10\"", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0) ; sys_platform != \"win32\"", "websockets (>=13.1.0,<14.0.0)"]
-semantic-router = ["semantic-router ; python_version >= \"3.9\""]
+proxy = ["PyJWT (>=2.10.1,<3.0.0) ; python_version >= \"3.9\"", "apscheduler (>=3.10.4,<4.0.0)", "azure-identity (>=1.15.0,<2.0.0) ; python_version >= \"3.9\"", "azure-storage-blob (>=12.25.1,<13.0.0)", "backoff", "boto3 (==1.36.0)", "cryptography", "fastapi (>=0.120.1)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.22)", "litellm-proxy-extras (==0.4.9)", "mcp (>=1.21.2,<2.0.0) ; python_version >= \"3.10\"", "orjson (>=3.9.7,<4.0.0)", "polars (>=1.31.0,<2.0.0) ; python_version >= \"3.10\"", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "soundfile (>=0.12.1,<0.13.0)", "uvicorn (>=0.31.1,<0.32.0)", "uvloop (>=0.21.0,<0.22.0) ; sys_platform != \"win32\"", "websockets (>=15.0.1,<16.0.0)"]
+semantic-router = ["semantic-router (>=0.1.12) ; python_version >= \"3.9\" and python_version < \"3.14\""]
utils = ["numpydoc"]
-[package.source]
-type = "git"
-url = "https://github.com/BerriAI/litellm.git"
-reference = "v1.77.7.dev9"
-resolved_reference = "763d2f8ccdd8412dbe6d4ac0e136d9ac34dcd4c0"
-
[[package]]
name = "llvmlite"
version = "0.44.0"
@@ -5644,28 +5659,28 @@ pydantic = ">=2.9"
[[package]]
name = "openai"
-version = "1.99.9"
+version = "2.8.0"
description = "The official Python library for the openai API"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main", "test"]
files = [
- {file = "openai-1.99.9-py3-none-any.whl", hash = "sha256:9dbcdb425553bae1ac5d947147bebbd630d91bbfc7788394d4c4f3a35682ab3a"},
- {file = "openai-1.99.9.tar.gz", hash = "sha256:f2082d155b1ad22e83247c3de3958eb4255b20ccf4a1de2e6681b6957b554e92"},
+ {file = "openai-2.8.0-py3-none-any.whl", hash = "sha256:ba975e347f6add2fe13529ccb94d54a578280e960765e5224c34b08d7e029ddf"},
+ {file = "openai-2.8.0.tar.gz", hash = "sha256:4851908f6d6fcacbd47ba659c5ac084f7725b752b6bfa1e948b6fbfc111a6bad"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
-jiter = ">=0.4.0,<1"
+jiter = ">=0.10.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
tqdm = ">4"
typing-extensions = ">=4.11,<5"
[package.extras]
-aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"]
+aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<16)"]
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
@@ -5820,14 +5835,14 @@ llama = ["llama-index (>=0.12.29,<0.13.0)", "llama-index-core (>=0.12.29,<0.13.0
[[package]]
name = "openhands-agent-server"
-version = "1.1.0"
+version = "1.4.1"
description = "OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = [
- {file = "openhands_agent_server-1.1.0-py3-none-any.whl", hash = "sha256:59a856883df23488c0723e47655ef21649a321fcd4709a25a4690866eff6ac88"},
- {file = "openhands_agent_server-1.1.0.tar.gz", hash = "sha256:e39bebd39afd45cfcfd765005e7c4e5409e46678bd7612ae20bae79f7057b935"},
+ {file = "openhands_agent_server-1.4.1-py3-none-any.whl", hash = "sha256:1e621d15215a48e2398e23c58a791347f06c215c2344053aeb26b562c34a44ee"},
+ {file = "openhands_agent_server-1.4.1.tar.gz", hash = "sha256:03010a5c8d63bbd5b088458eb75308ef16559018140d75a3644ae5bbc3531bbf"},
]
[package.dependencies]
@@ -5835,6 +5850,7 @@ aiosqlite = ">=0.19"
alembic = ">=1.13"
docker = ">=7.1,<8"
fastapi = ">=0.104"
+openhands-sdk = "*"
pydantic = ">=2"
sqlalchemy = ">=2"
uvicorn = ">=0.31.1"
@@ -5843,7 +5859,7 @@ wsproto = ">=1.2.0"
[[package]]
name = "openhands-ai"
-version = "0.0.0-post.5525+0b6631523"
+version = "0.0.0-post.5625+0a98f165e"
description = "OpenHands: Code Less, Make More"
optional = false
python-versions = "^3.12,<3.14"
@@ -5860,6 +5876,7 @@ bashlex = "^0.18"
boto3 = "*"
browsergym-core = "0.13.3"
deprecated = "*"
+deprecation = "^2.1.0"
dirhash = "*"
docker = "*"
fastapi = "*"
@@ -5878,15 +5895,15 @@ json-repair = "*"
jupyter_kernel_gateway = "*"
kubernetes = "^33.1.0"
libtmux = ">=0.46.2"
-litellm = ">=1.74.3, <1.78.0, !=1.64.4, !=1.67.*"
+litellm = ">=1.74.3, <=1.80.7, !=1.64.4, !=1.67.*"
lmnr = "^0.7.20"
memory-profiler = "^0.61.0"
numpy = "*"
-openai = "1.99.9"
+openai = "2.8.0"
openhands-aci = "0.3.2"
-openhands-agent-server = "1.1.0"
-openhands-sdk = "1.1.0"
-openhands-tools = "1.1.0"
+openhands-agent-server = "1.4.1"
+openhands-sdk = "1.4.1"
+openhands-tools = "1.4.1"
opentelemetry-api = "^1.33.1"
opentelemetry-exporter-otlp-proto-grpc = "^1.33.1"
pathspec = "^0.12.1"
@@ -5942,20 +5959,21 @@ url = ".."
[[package]]
name = "openhands-sdk"
-version = "1.1.0"
+version = "1.4.1"
description = "OpenHands SDK - Core functionality for building AI agents"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = [
- {file = "openhands_sdk-1.1.0-py3-none-any.whl", hash = "sha256:4a984ce1687a48cf99a67fdf3d37b116f8b2840743d4807810b5024af6a1d57e"},
- {file = "openhands_sdk-1.1.0.tar.gz", hash = "sha256:855e0d8f3657205e4119e50520c17e65b3358b1a923f7a051a82512a54bf426c"},
+ {file = "openhands_sdk-1.4.1-py3-none-any.whl", hash = "sha256:70e453eab7f9ab6b705198c2615fdd844b21e14b29d78afaf62724f4a440bcdc"},
+ {file = "openhands_sdk-1.4.1.tar.gz", hash = "sha256:37365de25ed57cf8cc2a8003ab4d7a1fe2a40b49c8e8da84a3f1ea2b522eddf2"},
]
[package.dependencies]
+deprecation = ">=2.1.0"
fastmcp = ">=2.11.3"
httpx = ">=0.27.0"
-litellm = ">=1.77.7.dev9"
+litellm = ">=1.80.7"
lmnr = ">=0.7.20"
pydantic = ">=2.11.7"
python-frontmatter = ">=1.1.0"
@@ -5968,14 +5986,14 @@ boto3 = ["boto3 (>=1.35.0)"]
[[package]]
name = "openhands-tools"
-version = "1.1.0"
+version = "1.4.1"
description = "OpenHands Tools - Runtime tools for AI agents"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = [
- {file = "openhands_tools-1.1.0-py3-none-any.whl", hash = "sha256:767d6746f05edade49263aa24450a037485a3dc23379f56917ef19aad22033f9"},
- {file = "openhands_tools-1.1.0.tar.gz", hash = "sha256:c2fadaa4f4e16e9a3df5781ea847565dcae7171584f09ef7c0e1d97c8dfc83f6"},
+ {file = "openhands_tools-1.4.1-py3-none-any.whl", hash = "sha256:8f40189a08bf80eb4a33219ee9ccc528f9c6c4f2d5c9ab807b06c3f3fe21a612"},
+ {file = "openhands_tools-1.4.1.tar.gz", hash = "sha256:4c0caf87f520a207d9035191c77b7b5c53eeec996350a24ffaf7f740a6566b22"},
]
[package.dependencies]
@@ -5987,6 +6005,7 @@ func-timeout = ">=4.3.5"
libtmux = ">=0.46.2"
openhands-sdk = "*"
pydantic = ">=2.11.7"
+tom-swe = ">=1.0.3"
[[package]]
name = "openpyxl"
@@ -13303,6 +13322,31 @@ dev = ["tokenizers[testing]"]
docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
testing = ["black (==22.3)", "datasets", "numpy", "pytest", "pytest-asyncio", "requests", "ruff"]
+[[package]]
+name = "tom-swe"
+version = "1.0.3"
+description = "Theory of Mind modeling for Software Engineering assistants"
+optional = false
+python-versions = ">=3.10"
+groups = ["main"]
+files = [
+ {file = "tom_swe-1.0.3-py3-none-any.whl", hash = "sha256:7b1172b29eb5c8fb7f1975016e7b6a238511b9ac2a7a980bd400dcb4e29773f2"},
+ {file = "tom_swe-1.0.3.tar.gz", hash = "sha256:57c97d0104e563f15bd39edaf2aa6ac4c3e9444afd437fb92458700d22c6c0f5"},
+]
+
+[package.dependencies]
+jinja2 = ">=3.0.0"
+json-repair = ">=0.1.0"
+litellm = ">=1.0.0"
+pydantic = ">=2.0.0"
+python-dotenv = ">=1.0.0"
+tiktoken = ">=0.8.0"
+tqdm = ">=4.65.0"
+
+[package.extras]
+dev = ["aiofiles (>=23.0.0)", "black (>=22.0.0)", "datasets (>=2.0.0)", "fastapi (>=0.104.0)", "httpx (>=0.25.0)", "huggingface-hub (>=0.0.0)", "isort (>=5.0.0)", "mypy (>=1.0.0)", "numpy (>=1.24.0)", "pandas (>=2.0.0)", "pre-commit (>=3.6.0)", "pytest (>=7.0.0)", "pytest-cov (>=6.2.1)", "rich (>=13.0.0)", "ruff (>=0.3.0)", "typing-extensions (>=4.0.0)", "uvicorn (>=0.24.0)"]
+search = ["bm25s (>=0.2.0)", "pystemmer (>=2.2.0)"]
+
[[package]]
name = "toml"
version = "0.10.2"
diff --git a/enterprise/server/auth/constants.py b/enterprise/server/auth/constants.py
index c01525a43d8f..15d3b0f70457 100644
--- a/enterprise/server/auth/constants.py
+++ b/enterprise/server/auth/constants.py
@@ -30,3 +30,11 @@
JIRA_DC_BASE_URL = os.getenv('JIRA_DC_BASE_URL', '').strip()
JIRA_DC_ENABLE_OAUTH = os.getenv('JIRA_DC_ENABLE_OAUTH', '1') in ('1', 'true')
AUTH_URL = os.getenv('AUTH_URL', '').rstrip('/')
+ROLE_CHECK_ENABLED = os.getenv('ROLE_CHECK_ENABLED', 'false').lower() in (
+ '1',
+ 'true',
+ 't',
+ 'yes',
+ 'y',
+ 'on',
+)
diff --git a/enterprise/server/auth/saas_user_auth.py b/enterprise/server/auth/saas_user_auth.py
index eafb7c5b742b..2f399a74cfad 100644
--- a/enterprise/server/auth/saas_user_auth.py
+++ b/enterprise/server/auth/saas_user_auth.py
@@ -203,6 +203,15 @@ async def get_user_settings_store(self) -> SettingsStore:
self.settings_store = settings_store
return settings_store
+ async def get_mcp_api_key(self) -> str:
+ api_key_store = ApiKeyStore.get_instance()
+ mcp_api_key = api_key_store.retrieve_mcp_api_key(self.user_id)
+ if not mcp_api_key:
+ mcp_api_key = api_key_store.create_api_key(
+ self.user_id, 'MCP_API_KEY', None
+ )
+ return mcp_api_key
+
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
logger.debug('saas_user_auth_get_instance')
@@ -243,7 +252,12 @@ def get_api_key_from_header(request: Request):
# This is a temp hack
# Streamable HTTP MCP Client works via redirect requests, but drops the Authorization header for reason
# We include `X-Session-API-Key` header by default due to nested runtimes, so it used as a drop in replacement here
- return request.headers.get('X-Session-API-Key')
+ session_api_key = request.headers.get('X-Session-API-Key')
+ if session_api_key:
+ return session_api_key
+
+ # Fallback to X-Access-Token header as an additional option
+ return request.headers.get('X-Access-Token')
async def saas_user_auth_from_bearer(request: Request) -> SaasUserAuth | None:
diff --git a/enterprise/server/routes/auth.py b/enterprise/server/routes/auth.py
index d5f5cbd1ed44..ba7aadb88316 100644
--- a/enterprise/server/routes/auth.py
+++ b/enterprise/server/routes/auth.py
@@ -12,6 +12,7 @@
KEYCLOAK_CLIENT_ID,
KEYCLOAK_REALM_NAME,
KEYCLOAK_SERVER_URL_EXT,
+ ROLE_CHECK_ENABLED,
)
from server.auth.gitlab_sync import schedule_gitlab_repo_sync
from server.auth.saas_user_auth import SaasUserAuth
@@ -132,6 +133,12 @@ async def keycloak_callback(
user_info = await token_manager.get_user_info(keycloak_access_token)
logger.debug(f'user_info: {user_info}')
+ if ROLE_CHECK_ENABLED and 'roles' not in user_info:
+ return JSONResponse(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ content={'error': 'Missing required role'},
+ )
+
if 'sub' not in user_info or 'preferred_username' not in user_info:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
diff --git a/enterprise/server/saas_nested_conversation_manager.py b/enterprise/server/saas_nested_conversation_manager.py
index e0727996de14..0c5ece2675ce 100644
--- a/enterprise/server/saas_nested_conversation_manager.py
+++ b/enterprise/server/saas_nested_conversation_manager.py
@@ -70,6 +70,11 @@
else '/api/conversations/{conversation_id}'
)
+RUNTIME_USERNAME = os.getenv('RUNTIME_USERNAME')
+SU_TO_USER = os.getenv('SU_TO_USER', 'false')
+truthy = {'1', 'true', 't', 'yes', 'y', 'on'}
+SU_TO_USER = str(SU_TO_USER.lower() in truthy).lower()
+
# Time in seconds before a Redis entry is considered expired if not refreshed
_REDIS_ENTRY_TIMEOUT_SECONDS = 300
@@ -772,7 +777,11 @@ async def _create_runtime(
env_vars['SERVE_FRONTEND'] = '0'
env_vars['RUNTIME'] = 'local'
# TODO: In the long term we may come up with a more secure strategy for user management within the nested runtime.
- env_vars['USER'] = 'openhands' if config.run_as_openhands else 'root'
+ env_vars['USER'] = (
+ RUNTIME_USERNAME
+ if RUNTIME_USERNAME
+ else ('openhands' if config.run_as_openhands else 'root')
+ )
env_vars['PERMITTED_CORS_ORIGINS'] = ','.join(PERMITTED_CORS_ORIGINS)
env_vars['port'] = '60000'
# TODO: These values are static in the runtime-api project, but do not get copied into the runtime ENV
@@ -789,6 +798,7 @@ async def _create_runtime(
env_vars['INITIAL_NUM_WARM_SERVERS'] = '1'
env_vars['INIT_GIT_IN_EMPTY_WORKSPACE'] = '1'
env_vars['ENABLE_V1'] = '0'
+ env_vars['SU_TO_USER'] = SU_TO_USER
# We need this for LLM traces tracking to identify the source of the LLM calls
env_vars['WEB_HOST'] = WEB_HOST
diff --git a/enterprise/storage/saas_conversation_store.py b/enterprise/storage/saas_conversation_store.py
index 80a27ce957c6..160c3a80a2fb 100644
--- a/enterprise/storage/saas_conversation_store.py
+++ b/enterprise/storage/saas_conversation_store.py
@@ -60,6 +60,7 @@ def _to_external_model(self, conversation_metadata: StoredConversationMetadata):
kwargs.pop('reasoning_tokens', None)
kwargs.pop('context_window', None)
kwargs.pop('per_turn_token', None)
+ kwargs.pop('parent_conversation_id', None)
return ConversationMetadata(**kwargs)
diff --git a/enterprise/storage/saas_settings_store.py b/enterprise/storage/saas_settings_store.py
index bf27c4aaa56d..6cbcb5080214 100644
--- a/enterprise/storage/saas_settings_store.py
+++ b/enterprise/storage/saas_settings_store.py
@@ -97,6 +97,10 @@ async def load(self) -> Settings | None:
return settings
async def store(self, item: Settings):
+ # Check if provider is OpenHands and generate API key if needed
+ if item and self._is_openhands_provider(item):
+ await self._ensure_openhands_api_key(item)
+
with self.session_maker() as session:
existing = None
kwargs = {}
@@ -368,6 +372,30 @@ def _fernet(self):
def _should_encrypt(self, key: str) -> bool:
return key in ('llm_api_key', 'llm_api_key_for_byor', 'search_api_key')
+ def _is_openhands_provider(self, item: Settings) -> bool:
+ """Check if the settings use the OpenHands provider."""
+ return bool(item.llm_model and item.llm_model.startswith('openhands/'))
+
+ async def _ensure_openhands_api_key(self, item: Settings) -> None:
+ """Generate and set the OpenHands API key for the given settings.
+
+ First checks if an existing key with the OpenHands alias exists,
+ and reuses it if found. Otherwise, generates a new key.
+ """
+ # Generate new key if none exists
+ generated_key = await self._generate_openhands_key()
+ if generated_key:
+ item.llm_api_key = SecretStr(generated_key)
+ logger.info(
+ 'saas_settings_store:store:generated_openhands_key',
+ extra={'user_id': self.user_id},
+ )
+ else:
+ logger.warning(
+ 'saas_settings_store:store:failed_to_generate_openhands_key',
+ extra={'user_id': self.user_id},
+ )
+
async def _create_user_in_lite_llm(
self, client: httpx.AsyncClient, email: str | None, max_budget: int, spend: int
):
@@ -390,3 +418,55 @@ async def _create_user_in_lite_llm(
},
)
return response
+
+ async def _generate_openhands_key(self) -> str | None:
+ """Generate a new OpenHands provider key for a user."""
+ if not (LITE_LLM_API_KEY and LITE_LLM_API_URL):
+ logger.warning(
+ 'saas_settings_store:_generate_openhands_key:litellm_config_not_found',
+ extra={'user_id': self.user_id},
+ )
+ return None
+
+ try:
+ async with httpx.AsyncClient(
+ verify=httpx_verify_option(),
+ headers={
+ 'x-goog-api-key': LITE_LLM_API_KEY,
+ },
+ ) as client:
+ response = await client.post(
+ f'{LITE_LLM_API_URL}/key/generate',
+ json={
+ 'user_id': self.user_id,
+ 'metadata': {'type': 'openhands'},
+ },
+ )
+ response.raise_for_status()
+ response_json = response.json()
+ key = response_json.get('key')
+
+ if key:
+ logger.info(
+ 'saas_settings_store:_generate_openhands_key:success',
+ extra={
+ 'user_id': self.user_id,
+ 'key_length': len(key) if key else 0,
+ 'key_prefix': (
+ key[:10] + '...' if key and len(key) > 10 else key
+ ),
+ },
+ )
+ return key
+ else:
+ logger.error(
+ 'saas_settings_store:_generate_openhands_key:no_key_in_response',
+ extra={'user_id': self.user_id, 'response_json': response_json},
+ )
+ return None
+ except Exception as e:
+ logger.exception(
+ 'saas_settings_store:_generate_openhands_key:error',
+ extra={'user_id': self.user_id, 'error': str(e)},
+ )
+ return None
diff --git a/enterprise/storage/user_settings.py b/enterprise/storage/user_settings.py
index b84f644b7160..4d60d8b67631 100644
--- a/enterprise/storage/user_settings.py
+++ b/enterprise/storage/user_settings.py
@@ -38,3 +38,4 @@ class UserSettings(Base): # type: ignore
email_verified = Column(Boolean, nullable=True)
git_user_name = Column(String, nullable=True)
git_user_email = Column(String, nullable=True)
+ v1_enabled = Column(Boolean, nullable=True)
diff --git a/enterprise/tests/unit/experiments/test_saas_experiment_manager.py b/enterprise/tests/unit/experiments/test_saas_experiment_manager.py
index ec67c7479f63..4f1eab2a920d 100644
--- a/enterprise/tests/unit/experiments/test_saas_experiment_manager.py
+++ b/enterprise/tests/unit/experiments/test_saas_experiment_manager.py
@@ -92,11 +92,8 @@ def test_unknown_variant_returns_original_agent_without_changes(monkeypatch):
assert getattr(result, 'condenser', None) is None
-@patch('experiments.experiment_manager.handle_condenser_max_step_experiment__v1')
@patch('experiments.experiment_manager.ENABLE_EXPERIMENT_MANAGER', False)
-def test_run_agent_variant_tests_v1_noop_when_manager_disabled(
- mock_handle_condenser,
-):
+def test_run_agent_variant_tests_v1_noop_when_manager_disabled():
"""If ENABLE_EXPERIMENT_MANAGER is False, the method returns the exact same agent and does not call the handler."""
agent = make_agent()
conv_id = uuid4()
@@ -109,8 +106,6 @@ def test_run_agent_variant_tests_v1_noop_when_manager_disabled(
# Same object returned (no copy)
assert result is agent
- # Handler should not have been called
- mock_handle_condenser.assert_not_called()
@patch('experiments.experiment_manager.ENABLE_EXPERIMENT_MANAGER', True)
@@ -131,7 +126,3 @@ def test_run_agent_variant_tests_v1_calls_handler_and_sets_system_prompt(monkeyp
# Should be a different instance than the original (copied after handler runs)
assert result is not agent
assert result.system_prompt_filename == 'system_prompt_long_horizon.j2'
-
- # The condenser returned by the handler must be preserved after the system-prompt override copy
- assert isinstance(result.condenser, LLMSummarizingCondenser)
- assert result.condenser.max_size == 80
diff --git a/enterprise/tests/unit/test_github_view.py b/enterprise/tests/unit/test_github_view.py
index 731b35b55f84..1edc46bc2af3 100644
--- a/enterprise/tests/unit/test_github_view.py
+++ b/enterprise/tests/unit/test_github_view.py
@@ -1,7 +1,10 @@
from unittest import TestCase, mock
+from unittest.mock import MagicMock, patch
-from integrations.github.github_view import GithubFactory, get_oh_labels
+import pytest
+from integrations.github.github_view import GithubFactory, GithubIssue, get_oh_labels
from integrations.models import Message, SourceType
+from integrations.types import UserData
class TestGithubLabels(TestCase):
@@ -75,3 +78,132 @@ def test_issue_comment_case_insensitivity(self):
self.assertTrue(GithubFactory.is_issue_comment(message_lower))
self.assertTrue(GithubFactory.is_issue_comment(message_upper))
self.assertTrue(GithubFactory.is_issue_comment(message_mixed))
+
+
+class TestGithubV1ConversationRouting(TestCase):
+ """Test V1 conversation routing logic in GitHub integration."""
+
+ def setUp(self):
+ """Set up test fixtures."""
+ # Create a proper UserData instance instead of MagicMock
+ user_data = UserData(
+ user_id=123, username='testuser', keycloak_user_id='test-keycloak-id'
+ )
+
+ # Create a mock raw_payload
+ raw_payload = Message(
+ source=SourceType.GITHUB,
+ message={
+ 'payload': {
+ 'action': 'opened',
+ 'issue': {'number': 123},
+ }
+ },
+ )
+
+ self.github_issue = GithubIssue(
+ user_info=user_data,
+ full_repo_name='test/repo',
+ issue_number=123,
+ installation_id=456,
+ conversation_id='test-conversation-id',
+ should_extract=True,
+ send_summary_instruction=False,
+ is_public_repo=True,
+ raw_payload=raw_payload,
+ uuid='test-uuid',
+ title='Test Issue',
+ description='Test issue description',
+ previous_comments=[],
+ v1=False,
+ )
+
+ @pytest.mark.asyncio
+ @patch('integrations.github.github_view.get_user_v1_enabled_setting')
+ @patch.object(GithubIssue, '_create_v0_conversation')
+ @patch.object(GithubIssue, '_create_v1_conversation')
+ async def test_create_new_conversation_routes_to_v0_when_disabled(
+ self, mock_create_v1, mock_create_v0, mock_get_v1_setting
+ ):
+ """Test that conversation creation routes to V0 when v1_enabled is False."""
+ # Mock v1_enabled as False
+ mock_get_v1_setting.return_value = False
+ mock_create_v0.return_value = None
+ mock_create_v1.return_value = None
+
+ # Mock parameters
+ jinja_env = MagicMock()
+ git_provider_tokens = MagicMock()
+ conversation_metadata = MagicMock()
+
+ # Call the method
+ await self.github_issue.create_new_conversation(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+
+ # Verify V0 was called and V1 was not
+ mock_create_v0.assert_called_once_with(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+ mock_create_v1.assert_not_called()
+
+ @pytest.mark.asyncio
+ @patch('integrations.github.github_view.get_user_v1_enabled_setting')
+ @patch.object(GithubIssue, '_create_v0_conversation')
+ @patch.object(GithubIssue, '_create_v1_conversation')
+ async def test_create_new_conversation_routes_to_v1_when_enabled(
+ self, mock_create_v1, mock_create_v0, mock_get_v1_setting
+ ):
+ """Test that conversation creation routes to V1 when v1_enabled is True."""
+ # Mock v1_enabled as True
+ mock_get_v1_setting.return_value = True
+ mock_create_v0.return_value = None
+ mock_create_v1.return_value = None
+
+ # Mock parameters
+ jinja_env = MagicMock()
+ git_provider_tokens = MagicMock()
+ conversation_metadata = MagicMock()
+
+ # Call the method
+ await self.github_issue.create_new_conversation(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+
+ # Verify V1 was called and V0 was not
+ mock_create_v1.assert_called_once_with(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+ mock_create_v0.assert_not_called()
+
+ @pytest.mark.asyncio
+ @patch('integrations.github.github_view.get_user_v1_enabled_setting')
+ @patch.object(GithubIssue, '_create_v0_conversation')
+ @patch.object(GithubIssue, '_create_v1_conversation')
+ async def test_create_new_conversation_fallback_on_v1_setting_error(
+ self, mock_create_v1, mock_create_v0, mock_get_v1_setting
+ ):
+ """Test that conversation creation falls back to V0 when _create_v1_conversation fails."""
+ # Mock v1_enabled as True so V1 is attempted
+ mock_get_v1_setting.return_value = True
+ # Mock _create_v1_conversation to raise an exception
+ mock_create_v1.side_effect = Exception('V1 conversation creation failed')
+ mock_create_v0.return_value = None
+
+ # Mock parameters
+ jinja_env = MagicMock()
+ git_provider_tokens = MagicMock()
+ conversation_metadata = MagicMock()
+
+ # Call the method
+ await self.github_issue.create_new_conversation(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+
+ # Verify V1 was attempted first, then V0 was called as fallback
+ mock_create_v1.assert_called_once_with(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
+ mock_create_v0.assert_called_once_with(
+ jinja_env, git_provider_tokens, conversation_metadata
+ )
diff --git a/enterprise/tests/unit/test_saas_user_auth.py b/enterprise/tests/unit/test_saas_user_auth.py
index 35672af7242d..d4ba902677b0 100644
--- a/enterprise/tests/unit/test_saas_user_auth.py
+++ b/enterprise/tests/unit/test_saas_user_auth.py
@@ -535,3 +535,115 @@ def test_get_api_key_from_header_with_invalid_authorization_format():
# Assert that None was returned
assert api_key is None
+
+
+def test_get_api_key_from_header_with_x_access_token():
+ """Test that get_api_key_from_header extracts API key from X-Access-Token header."""
+ # Create a mock request with X-Access-Token header
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {'X-Access-Token': 'access_token_key'}
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that the API key was correctly extracted
+ assert api_key == 'access_token_key'
+
+
+def test_get_api_key_from_header_priority_authorization_over_x_access_token():
+ """Test that Authorization header takes priority over X-Access-Token header."""
+ # Create a mock request with both headers
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {
+ 'Authorization': 'Bearer auth_api_key',
+ 'X-Access-Token': 'access_token_key',
+ }
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that the API key from Authorization header was used
+ assert api_key == 'auth_api_key'
+
+
+def test_get_api_key_from_header_priority_x_session_over_x_access_token():
+ """Test that X-Session-API-Key header takes priority over X-Access-Token header."""
+ # Create a mock request with both headers
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {
+ 'X-Session-API-Key': 'session_api_key',
+ 'X-Access-Token': 'access_token_key',
+ }
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that the API key from X-Session-API-Key header was used
+ assert api_key == 'session_api_key'
+
+
+def test_get_api_key_from_header_all_three_headers():
+ """Test header priority when all three headers are present."""
+ # Create a mock request with all three headers
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {
+ 'Authorization': 'Bearer auth_api_key',
+ 'X-Session-API-Key': 'session_api_key',
+ 'X-Access-Token': 'access_token_key',
+ }
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that the API key from Authorization header was used (highest priority)
+ assert api_key == 'auth_api_key'
+
+
+def test_get_api_key_from_header_invalid_authorization_fallback_to_x_access_token():
+ """Test that invalid Authorization header falls back to X-Access-Token."""
+ # Create a mock request with invalid Authorization header and X-Access-Token
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {
+ 'Authorization': 'InvalidFormat api_key',
+ 'X-Access-Token': 'access_token_key',
+ }
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that the API key from X-Access-Token header was used
+ assert api_key == 'access_token_key'
+
+
+def test_get_api_key_from_header_empty_headers():
+ """Test that empty header values are handled correctly."""
+ # Create a mock request with empty header values
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {
+ 'Authorization': '',
+ 'X-Session-API-Key': '',
+ 'X-Access-Token': 'access_token_key',
+ }
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that the API key from X-Access-Token header was used
+ assert api_key == 'access_token_key'
+
+
+def test_get_api_key_from_header_bearer_with_empty_token():
+ """Test that Bearer header with empty token falls back to other headers."""
+ # Create a mock request with Bearer header with empty token
+ mock_request = MagicMock(spec=Request)
+ mock_request.headers = {
+ 'Authorization': 'Bearer ',
+ 'X-Access-Token': 'access_token_key',
+ }
+
+ # Call the function
+ api_key = get_api_key_from_header(mock_request)
+
+ # Assert that empty string from Bearer is returned (current behavior)
+ # This tests the current implementation behavior
+ assert api_key == ''
diff --git a/evaluation/benchmarks/swefficiency/README.md b/evaluation/benchmarks/swefficiency/README.md
new file mode 100644
index 000000000000..6418f3a87b7e
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/README.md
@@ -0,0 +1,65 @@
+# SWE-fficiency Evaluation
+
+This folder contains the OpenHands inference generation of the [SWE-fficiency benchmark](https://swefficiency.com/) ([paper](https://arxiv.org/pdf/2507.12415v1)).
+
+The evaluation consists of three steps:
+
+1. Environment setup: [install python environment](../../README.md#development-environment) and [configure LLM config](../../README.md#configure-openhands-and-your-llm).
+2. [Run inference](#running-inference-locally-with-docker): Generate a edit patch for each Github issue
+3. [Evaluate patches](#evaluate-generated-patches)
+
+## Setup Environment and LLM Configuration
+
+Please follow instruction [here](../../README.md#setup) to setup your local development environment and LLM.
+
+## Running inference Locally with Docker
+
+Make sure your Docker daemon is running, and you have ample disk space (at least 200-500GB, depends on the SWE-PErf set you are running on) for the instance-level docker image.
+
+When the `run_infer.sh` script is started, it will automatically pull the relevant SWE-Perf images.
+For example, for instance ID `scikit-learn_scikit-learn-11674`, it will try to pull our pre-build docker image `betty1202/sweb.eval.x86_64.scikit-learn_s_scikit-learn-11674` from DockerHub.
+This image will be used create an OpenHands runtime image where the agent will operate on.
+
+```bash
+./evaluation/benchmarks/swefficiency/scripts/run_infer.sh [model_config] [git-version] [agent] [eval_limit] [max_iter] [num_workers] [dataset] [dataset_split] [n_runs] [mode]
+
+# Example
+./evaluation/benchmarks/swefficiency/scripts/run_infer.sh llm.eval_gpt4_1106_preview HEAD CodeActAgent 500 100 1 swefficiency/swefficiency test
+```
+
+where `model_config` is mandatory, and the rest are optional.
+
+- `model_config`, e.g. `eval_gpt4_1106_preview`, is the config group name for your
+LLM settings, as defined in your `config.toml`.
+- `git-version`, e.g. `HEAD`, is the git commit hash of the OpenHands version you would
+like to evaluate. It could also be a release tag like `0.6.2`.
+- `agent`, e.g. `CodeActAgent`, is the name of the agent for benchmarks, defaulting
+to `CodeActAgent`.
+- `eval_limit`, e.g. `10`, limits the evaluation to the first `eval_limit` instances. By
+default, the script evaluates the entire SWE-Perf test set (140 issues). Note:
+in order to use `eval_limit`, you must also set `agent`.
+- `max_iter`, e.g. `20`, is the maximum number of iterations for the agent to run. By
+default, it is set to 100.
+- `num_workers`, e.g. `3`, is the number of parallel workers to run the evaluation. By
+default, it is set to 1.
+- `dataset`, a huggingface dataset name. e.g. `SWE-Perf/SWE-Perf`, specifies which dataset to evaluate on.
+- `dataset_split`, split for the huggingface dataset. e.g., `test`, `dev`. Default to `test`.
+
+- `n_runs`, e.g. `3`, is the number of times to run the evaluation. Default is 1.
+- `mode`, e.g. `swt`, `swt-ci`, or `swe`, specifies the evaluation mode. Default is `swe`.
+
+> [!CAUTION]
+> Setting `num_workers` larger than 1 is not officially tested, YMMV.
+
+
+Let's say you'd like to run 10 instances using `llm.eval_gpt4_1106_preview` and CodeActAgent,
+
+then your command would be:
+
+```bash
+./evaluation/benchmarks/swe_bench/scripts/run_infer.sh llm.eval_gpt4_1106_preview HEAD CodeActAgent 10
+```
+
+### 2. Run the SWE-fficiency benchmark official evaluation
+
+Once the output is converted, use the [official SWE-fficiency benchmark evaluation](https://github.com/swefficiency/swefficiency) to evaluate it.
diff --git a/evaluation/benchmarks/swefficiency/__init__.py b/evaluation/benchmarks/swefficiency/__init__.py
new file mode 100644
index 000000000000..e69de29bb2d1
diff --git a/evaluation/benchmarks/swefficiency/binary_patch_utils.py b/evaluation/benchmarks/swefficiency/binary_patch_utils.py
new file mode 100644
index 000000000000..9cf0dbd714d7
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/binary_patch_utils.py
@@ -0,0 +1,52 @@
+"""
+Utilities for handling binary files and patch generation in SWE-bench evaluation.
+"""
+
+
+def remove_binary_diffs(patch_text):
+ """
+ Remove binary file diffs from a git patch.
+
+ Args:
+ patch_text (str): The git patch text
+
+ Returns:
+ str: The cleaned patch text with binary diffs removed
+ """
+ lines = patch_text.splitlines()
+ cleaned_lines = []
+ block = []
+ is_binary_block = False
+
+ for line in lines:
+ if line.startswith('diff --git '):
+ if block and not is_binary_block:
+ cleaned_lines.extend(block)
+ block = [line]
+ is_binary_block = False
+ elif 'Binary files' in line:
+ is_binary_block = True
+ block.append(line)
+ else:
+ block.append(line)
+
+ if block and not is_binary_block:
+ cleaned_lines.extend(block)
+ return '\n'.join(cleaned_lines)
+
+
+def remove_binary_files_from_git():
+ """
+ Generate a bash command to remove binary files from git staging.
+
+ Returns:
+ str: A bash command that removes binary files from git staging
+ """
+ return """
+ for file in $(git status --porcelain | grep -E "^(M| M|\\?\\?|A| A)" | cut -c4-); do
+ if [ -f "$file" ] && (file "$file" | grep -q "executable" || git check-attr binary "$file" | grep -q "binary: set"); then
+ git rm -f "$file" 2>/dev/null || rm -f "$file"
+ echo "Removed: $file"
+ fi
+ done
+ """.strip()
diff --git a/evaluation/benchmarks/swefficiency/run_infer.py b/evaluation/benchmarks/swefficiency/run_infer.py
new file mode 100644
index 000000000000..42da17d2346d
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/run_infer.py
@@ -0,0 +1,960 @@
+import asyncio
+import copy
+import functools
+import json
+import multiprocessing
+import os
+import tempfile
+from typing import Any, Literal
+
+import pandas as pd
+import toml
+from datasets import load_dataset
+
+import openhands.agenthub
+from evaluation.benchmarks.swe_bench.binary_patch_utils import (
+ remove_binary_diffs,
+ remove_binary_files_from_git,
+)
+from evaluation.utils.shared import (
+ EvalException,
+ EvalMetadata,
+ EvalOutput,
+ assert_and_raise,
+ codeact_user_response,
+ get_default_sandbox_config_for_eval,
+ get_metrics,
+ is_fatal_evaluation_error,
+ make_metadata,
+ prepare_dataset,
+ reset_logger_for_multiprocessing,
+ run_evaluation,
+ update_llm_config_for_completions_logging,
+)
+from openhands.controller.state.state import State
+from openhands.core.config import (
+ AgentConfig,
+ OpenHandsConfig,
+ get_evaluation_parser,
+ get_llm_config_arg,
+)
+from openhands.core.config.condenser_config import NoOpCondenserConfig
+from openhands.core.config.utils import get_condenser_config_arg
+from openhands.core.logger import openhands_logger as logger
+from openhands.core.main import create_runtime, run_controller
+from openhands.critic import AgentFinishedCritic
+from openhands.events.action import CmdRunAction, FileReadAction, MessageAction
+from openhands.events.observation import (
+ CmdOutputObservation,
+ ErrorObservation,
+ FileReadObservation,
+)
+from openhands.events.serialization.event import event_from_dict, event_to_dict
+from openhands.runtime.base import Runtime
+from openhands.utils.async_utils import call_async_from_sync
+from openhands.utils.shutdown_listener import sleep_if_should_continue
+
+USE_HINT_TEXT = os.environ.get('USE_HINT_TEXT', 'false').lower() == 'true'
+RUN_WITH_BROWSING = os.environ.get('RUN_WITH_BROWSING', 'false').lower() == 'true'
+BenchMode = Literal['swe', 'swt', 'swt-ci']
+
+
+AGENT_CLS_TO_FAKE_USER_RESPONSE_FN = {
+ 'CodeActAgent': codeact_user_response,
+}
+
+
+def _get_swebench_workspace_dir_name(instance: pd.Series) -> str:
+ return f'{instance.repo}__{instance.version}'.replace('/', '__')
+
+
+def get_instruction(instance: pd.Series, metadata: EvalMetadata) -> MessageAction:
+ workspace_dir_name = _get_swebench_workspace_dir_name(instance)
+
+ # TODO: Change to testbed?
+ instruction = f"""
+
+/workspace/{workspace_dir_name}
+
+
+I’ve uploaded a python code repository in the directory workspace_dir_name. Consider the following performance workload and `workload()` function showing an specific usage of the repository:
+
+{instance.workload}
+
+
+Can you help me implement the necessary changes to the repository so that the runtime of the `workload()` function is faster? Basic guidelines:
+1. Your task is to make changes to non-test files in the /workspace directory to improve the performance of the code running in `workload()`. Please do not directly change the implementation of the `workload()` function to optimize things: I want you to focus on making the workload AS IS run faster by only editing the repository containing code that the `workload()` function calls.
+2. Make changes while ensuring the repository is functionally equivalent to the original: your changes should not introduce new bugs or cause already-passing tests to begin failing after your changes. However, you do not need to worry about tests that already fail without any changes made. For relevant test files you find in the repository, you can run them via the bash command `{instance.test_cmd} ` to check for correctness. Note that running all the tests may take a long time, so you need to determine which tests are relevant to your changes.
+3. Make sure the `workload()` function improves in performance after you make changes to the repository. The workload can potentially take some time to run, so please allow it to finish and be generous with setting your timeout parameter (a timeout value of 3600 or larger here is encouraged): for faster iteration, you should adjust the workload script to use fewer iterations. Before you complete your task, please make sure to check that the **original performance workload** and `workload()` function runs successfully and the performance is improved.
+4. You may need to reinstall/rebuild the repo for your changes to take effect before testing if you made non-Python changes. Reinstalling may take a long time to run (a timeout value of 3600 or larger here is encouraged), so please be patient with running it and allow it to complete if possible. You can reinstall the repository by running the bash command `{instance.rebuild_cmd}` in the workspace directory.
+5. All the dependencies required to run the `workload()` function are already installed in the environment. You should not install or upgrade any dependencies.
+
+Follow these steps to improve performance:
+1. As a first step, explore the repository structure.
+2. Create a Python script to reproduce the performance workload, execute it with python , and examine the printed output metrics.
+3. Edit the source code of the repository to improve performance. Please do not change the contents of the `workload()` function itself, but focus on optimizing the code in the repository that the original `workload()` function uses.
+4. If non-Python changes were made, rebuild the repo to make sure the changes take effect.
+5. Rerun your script to confirm that performance has improved.
+6. If necessary, identify any relevant test files in the repository related to your changes and verify that test statuses did not change after your modifications.
+7. After each attempted change, please reflect on the changes attempted and the performance impact observed. If the performance did not improve, consider alternative approaches or optimizations.
+8. Once you are satisfied, please use the finish command to complete your task.
+
+Please remember that you should not change the implementation of the `workload()` function. The performance improvement should solely come from editing the source files in the code repository.
+"""
+
+ if RUN_WITH_BROWSING:
+ instruction += (
+ '\nYou SHOULD NEVER attempt to browse the web. \n'
+ )
+
+ return MessageAction(content=instruction)
+
+
+def get_instance_docker_image(
+ instance_id: str,
+) -> str:
+ return f'ghcr.io/swefficiency/swefficiency-images:{instance_id}'
+
+
+def get_config(
+ instance: pd.Series,
+ metadata: EvalMetadata,
+ cpu_group: list[int] | None = None,
+) -> OpenHandsConfig:
+ # We use a different instance image for the each instance of swe-bench eval
+ base_container_image = get_instance_docker_image(
+ instance['instance_id'],
+ )
+ logger.info(
+ f'Using instance container image: {base_container_image}. '
+ f'Please make sure this image exists. '
+ f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ )
+
+ sandbox_config = get_default_sandbox_config_for_eval()
+ sandbox_config.base_container_image = base_container_image
+ sandbox_config.enable_auto_lint = True
+ sandbox_config.use_host_network = False
+ sandbox_config.timeout = 3600
+
+ # Control container cleanup behavior via environment variable
+ # Default to False for multiprocessing stability to prevent cascade failures
+ sandbox_config.rm_all_containers = True
+
+ sandbox_config.platform = 'linux/amd64'
+ sandbox_config.remote_runtime_resource_factor = 4.0
+ sandbox_config.runtime_startup_env_vars.update(
+ {
+ 'NO_CHANGE_TIMEOUT_SECONDS': '900', # 15 minutes
+ }
+ )
+
+ if cpu_group is not None:
+ print(f'Configuring Docker runtime with CPU group: {cpu_group}')
+ sandbox_config.docker_runtime_kwargs = {
+ # HACK: Use the cpu_group if provided, otherwise use all available CPUs
+ 'cpuset_cpus': ','.join(map(str, cpu_group)),
+ 'nano_cpus': int(1e9 * len(cpu_group)), # optional: hard cap to vCPU count
+ 'mem_limit': '16g',
+ }
+
+ # Note: We keep rm_all_containers = False for worker process safety
+
+ config = OpenHandsConfig(
+ default_agent=metadata.agent_class,
+ run_as_openhands=False,
+ max_iterations=metadata.max_iterations,
+ runtime=os.environ.get('RUNTIME', 'docker'),
+ sandbox=sandbox_config,
+ # do not mount workspace
+ workspace_base=None,
+ workspace_mount_path=None,
+ )
+ config.set_llm_config(
+ update_llm_config_for_completions_logging(
+ metadata.llm_config, metadata.eval_output_dir, instance['instance_id']
+ )
+ )
+ agent_config = AgentConfig(
+ enable_jupyter=False,
+ enable_browsing=RUN_WITH_BROWSING,
+ enable_llm_editor=False,
+ enable_mcp=False,
+ condenser=metadata.condenser_config,
+ enable_prompt_extensions=False,
+ )
+ config.set_agent_config(agent_config)
+ return config
+
+
+def initialize_runtime(
+ runtime: Runtime,
+ instance: pd.Series, # this argument is not required
+ metadata: EvalMetadata,
+):
+ """Initialize the runtime for the agent.
+
+ This function is called before the runtime is used to run the agent.
+ """
+ logger.info('-' * 30)
+ logger.info('BEGIN Runtime Initialization Fn')
+ logger.info('-' * 30)
+ workspace_dir_name = _get_swebench_workspace_dir_name(instance)
+ obs: CmdOutputObservation
+
+ # Set instance id and git configuration
+ action = CmdRunAction(
+ command=f"""echo 'export SWE_INSTANCE_ID={instance['instance_id']}' >> ~/.bashrc && echo 'export PIP_CACHE_DIR=~/.cache/pip' >> ~/.bashrc && echo "alias git='git --no-pager'" >> ~/.bashrc && git config --global core.pager "" && git config --global diff.binary false"""
+ )
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ obs.exit_code == 0,
+ f'Failed to export SWE_INSTANCE_ID and configure git: {str(obs)}',
+ )
+
+ action = CmdRunAction(command="""export USER=$(whoami); echo USER=${USER} """)
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(obs.exit_code == 0, f'Failed to export USER: {str(obs)}')
+
+ # inject the init script
+ script_dir = os.path.dirname(__file__)
+
+ # inject the instance info
+ action = CmdRunAction(command='mkdir -p /swe_util/eval_data/instances')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ obs.exit_code == 0,
+ f'Failed to create /swe_util/eval_data/instances: {str(obs)}',
+ )
+
+ swe_instance_json_name = 'swe-bench-instance.json'
+ with tempfile.TemporaryDirectory() as temp_dir:
+ # Construct the full path for the desired file name within the temporary directory
+ temp_file_path = os.path.join(temp_dir, swe_instance_json_name)
+ # Write to the file with the desired name within the temporary directory
+ with open(temp_file_path, 'w') as f:
+ if not isinstance(instance, dict):
+ json.dump([instance.to_dict()], f)
+ else:
+ json.dump([instance], f)
+
+ # Copy the file to the desired location
+ runtime.copy_to(temp_file_path, '/swe_util/eval_data/instances/')
+
+ # inject the instance swe entry
+ runtime.copy_to(
+ str(os.path.join(script_dir, 'scripts/setup/instance_swe_entry.sh')),
+ '/swe_util/',
+ )
+
+ action = CmdRunAction(command='cat ~/.bashrc')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(obs.exit_code == 0, f'Failed to cat ~/.bashrc: {str(obs)}')
+
+ action = CmdRunAction(command='source ~/.bashrc')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ if isinstance(obs, ErrorObservation):
+ logger.error(f'Failed to source ~/.bashrc: {str(obs)}')
+ assert_and_raise(obs.exit_code == 0, f'Failed to source ~/.bashrc: {str(obs)}')
+
+ action = CmdRunAction(command='source /swe_util/instance_swe_entry.sh')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ obs.exit_code == 0,
+ f'Failed to source /swe_util/instance_swe_entry.sh: {str(obs)}',
+ )
+
+ action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ obs.exit_code == 0,
+ f'Failed to cd to /workspace/{workspace_dir_name}: {str(obs)}',
+ )
+
+ action = CmdRunAction(command='git reset --hard')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(obs.exit_code == 0, f'Failed to git reset --hard: {str(obs)}')
+
+ action = CmdRunAction(
+ command='for remote_name in $(git remote); do git remote remove "${remote_name}"; done'
+ )
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(obs.exit_code == 0, f'Failed to remove git remotes: {str(obs)}')
+
+ action = CmdRunAction(command='which python')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ obs.exit_code == 0 and 'testbed' in obs.content,
+ f'Expected to find python interpreter from testbed, but got: {str(obs)}',
+ )
+
+ logger.info('-' * 30)
+ logger.info('END Runtime Initialization Fn')
+ logger.info('-' * 30)
+
+
+def complete_runtime(
+ runtime: Runtime,
+ instance: pd.Series, # this argument is not required, but it is used to get the workspace_dir_name
+) -> dict[str, Any]:
+ """Complete the runtime for the agent.
+
+ This function is called before the runtime is used to run the agent.
+ If you need to do something in the sandbox to get the correctness metric after
+ the agent has run, modify this function.
+ """
+ logger.info('-' * 30)
+ logger.info('BEGIN Runtime Completion Fn')
+ logger.info('-' * 30)
+ obs: CmdOutputObservation
+ workspace_dir_name = _get_swebench_workspace_dir_name(instance)
+
+ action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+
+ if obs.exit_code == -1:
+ # The previous command is still running
+ # We need to kill previous command
+ logger.info('The previous command is still running, trying to kill it...')
+ action = CmdRunAction(command='C-c')
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+
+ # Then run the command again
+ action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+
+ if obs.exit_code == -1:
+ # The previous command is still running
+ # We need to kill previous command
+ logger.info('The previous command is still running, trying to ctrl+z it...')
+ action = CmdRunAction(command='C-z')
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+
+ # Then run the command again
+ action = CmdRunAction(command=f'cd /workspace/{workspace_dir_name}')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+
+ assert_and_raise(
+ isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
+ f'Failed to cd to /workspace/{workspace_dir_name}: {str(obs)}',
+ )
+
+ action = CmdRunAction(command='git config --global core.pager ""')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
+ f'Failed to git config --global core.pager "": {str(obs)}',
+ )
+
+ # First check for any git repositories in subdirectories
+ action = CmdRunAction(command='find . -type d -name .git -not -path "./.git"')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
+ f'Failed to find git repositories: {str(obs)}',
+ )
+
+ git_dirs = [p for p in obs.content.strip().split('\n') if p]
+ if git_dirs:
+ # Remove all .git directories in subdirectories
+ for git_dir in git_dirs:
+ action = CmdRunAction(command=f'rm -rf "{git_dir}"')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
+ f'Failed to remove git directory {git_dir}: {str(obs)}',
+ )
+
+ # add all files
+ action = CmdRunAction(command='git add -A')
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
+ f'Failed to git add -A: {str(obs)}',
+ )
+
+ # Remove binary files from git staging
+ action = CmdRunAction(command=remove_binary_files_from_git())
+ action.set_hard_timeout(600)
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ assert_and_raise(
+ isinstance(obs, CmdOutputObservation) and obs.exit_code == 0,
+ f'Failed to remove binary files: {str(obs)}',
+ )
+
+ n_retries = 0
+ git_patch = None
+ while n_retries < 5:
+ action = CmdRunAction(
+ command=f'git diff --no-color --cached {instance["base_commit"]} > patch.diff'
+ )
+ action.set_hard_timeout(max(300 + 100 * n_retries, 600))
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ n_retries += 1
+ if isinstance(obs, CmdOutputObservation):
+ if obs.exit_code == 0:
+ # Read the patch file
+ action = FileReadAction(path='patch.diff')
+ action.set_hard_timeout(max(300 + 100 * n_retries, 600))
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ if isinstance(obs, FileReadObservation):
+ git_patch = obs.content
+ break
+ elif isinstance(obs, ErrorObservation):
+ # Fall back to cat "patch.diff" to get the patch
+ assert 'File could not be decoded as utf-8' in obs.content
+ action = CmdRunAction(command='cat patch.diff')
+ action.set_hard_timeout(max(300 + 100 * n_retries, 600))
+ logger.info(action, extra={'msg_type': 'ACTION'})
+ obs = runtime.run_action(action)
+ assert isinstance(obs, CmdOutputObservation) and obs.exit_code == 0
+ logger.info(obs, extra={'msg_type': 'OBSERVATION'})
+ git_patch = obs.content
+ break
+ else:
+ assert_and_raise(False, f'Unexpected observation type: {str(obs)}')
+ else:
+ logger.info('Failed to get git diff, retrying...')
+ sleep_if_should_continue(10)
+ elif isinstance(obs, ErrorObservation):
+ logger.error(f'Error occurred: {obs.content}. Retrying...')
+ sleep_if_should_continue(10)
+ else:
+ assert_and_raise(False, f'Unexpected observation type: {str(obs)}')
+
+ assert_and_raise(git_patch is not None, 'Failed to get git diff (None)')
+
+ # Remove binary diffs from the patch
+ git_patch = remove_binary_diffs(git_patch)
+
+ logger.info('-' * 30)
+ logger.info('END Runtime Completion Fn')
+ logger.info('-' * 30)
+ return {'git_patch': git_patch}
+
+
+class CPUGroupManager:
+ def __init__(self, cpu_groups_queue: multiprocessing.Queue):
+ self.cpu_groups_queue = cpu_groups_queue
+
+ def __enter__(self):
+ # Get the current CPU group for this worker]
+ if self.cpu_groups_queue is not None:
+ self.cpu_group = self.cpu_groups_queue.get()
+ logger.info(f'Worker started with CPU group: {self.cpu_group}')
+ return self.cpu_group
+ return None
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ # Put the CPU group back into the queue for other workers to use
+ if self.cpu_groups_queue is not None:
+ self.cpu_groups_queue.put(self.cpu_group)
+ logger.info(f'Worker finished with CPU group: {self.cpu_group}')
+
+
+def cleanup_docker_resources_for_worker():
+ """Clean up Docker resources specific to this worker process.
+
+ This prevents cascade failures when one worker's container crashes.
+ Note: This only cleans up stale locks, not containers, to avoid
+ interfering with other workers. Container cleanup is handled
+ by the DockerRuntime.close() method based on configuration.
+ """
+
+ # Clean up any stale port locks from crashed processes
+ try:
+ from openhands.runtime.utils.port_lock import cleanup_stale_locks
+
+ cleanup_stale_locks(max_age_seconds=300) # Clean up locks older than 5 minutes
+ except Exception as e:
+ logger.debug(f'Error cleaning up stale port locks: {e}')
+
+
+def process_instance(
+ instance: pd.Series,
+ metadata: EvalMetadata,
+ reset_logger: bool = True,
+ runtime_failure_count: int = 0,
+ cpu_groups_queue: multiprocessing.Queue = None,
+) -> EvalOutput:
+ # Clean up any Docker resources from previous failed runs
+ cleanup_docker_resources_for_worker()
+
+ # HACK: Use the global and get the cpu group for this worker.
+ with CPUGroupManager(cpu_groups_queue) as cpu_group:
+ config = get_config(instance, metadata, cpu_group=cpu_group)
+
+ # Setup the logger properly, so you can run multi-processing to parallelize the evaluation
+ if reset_logger:
+ log_dir = os.path.join(metadata.eval_output_dir, 'infer_logs')
+ reset_logger_for_multiprocessing(logger, instance.instance_id, log_dir)
+ else:
+ logger.info(f'Starting evaluation for instance {instance.instance_id}.')
+
+ metadata = copy.deepcopy(metadata)
+ metadata.details['runtime_failure_count'] = runtime_failure_count
+ metadata.details['remote_runtime_resource_factor'] = (
+ config.sandbox.remote_runtime_resource_factor
+ )
+
+ runtime = create_runtime(config, sid=None)
+ call_async_from_sync(runtime.connect)
+
+ try:
+ initialize_runtime(runtime, instance, metadata)
+
+ message_action = get_instruction(instance, metadata)
+
+ # Here's how you can run the agent (similar to the `main` function) and get the final task state
+ state: State | None = asyncio.run(
+ run_controller(
+ config=config,
+ initial_user_action=message_action,
+ runtime=runtime,
+ fake_user_response_fn=AGENT_CLS_TO_FAKE_USER_RESPONSE_FN[
+ metadata.agent_class
+ ],
+ )
+ )
+
+ # if fatal error, throw EvalError to trigger re-run
+ if is_fatal_evaluation_error(state.last_error):
+ raise EvalException('Fatal error detected: ' + state.last_error)
+
+ # ======= THIS IS SWE-Bench specific =======
+ # Get git patch
+ return_val = complete_runtime(runtime, instance)
+ git_patch = return_val['git_patch']
+ logger.info(
+ f'Got git diff for instance {instance.instance_id}:\n--------\n{git_patch}\n--------'
+ )
+ except Exception as e:
+ # Log the error but don't let it crash other workers
+ logger.error(
+ f'Error in worker processing instance {instance.instance_id}: {str(e)}'
+ )
+ raise
+ finally:
+ # Ensure runtime is properly closed to prevent cascade failures
+ try:
+ runtime.close()
+ except Exception as e:
+ logger.warning(
+ f'Error closing runtime for {instance.instance_id}: {str(e)}'
+ )
+ # Don't re-raise - we want to continue cleanup
+
+ # ==========================================
+
+ # ======= Attempt to evaluate the agent's edits =======
+ # we use eval_infer.sh to evaluate the agent's edits, not here
+ # because the agent may alter the environment / testcases
+ test_result = {
+ 'git_patch': git_patch,
+ }
+
+ # If you are working on some simpler benchmark that only evaluates the final model output (e.g., in a MessageAction)
+ # You can simply get the LAST `MessageAction` from the returned `state.history` and parse it for evaluation.
+ if state is None:
+ raise ValueError('State should not be None.')
+
+ # NOTE: this is NO LONGER the event stream, but an agent history that includes delegate agent's events
+ histories = [event_to_dict(event) for event in state.history]
+ metrics = get_metrics(state)
+
+ # Save the output
+ instruction = message_action.content
+ if message_action.image_urls:
+ instruction += (
+ '\n\n'
+ + '\n'.join(message_action.image_urls)
+ + ' '
+ )
+ output = EvalOutput(
+ instance_id=instance.instance_id,
+ instruction=instruction,
+ instance=instance.to_dict(), # SWE Bench specific
+ test_result=test_result,
+ metadata=metadata,
+ history=histories,
+ metrics=metrics,
+ error=state.last_error if state and state.last_error else None,
+ )
+ return output
+
+
+def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame:
+ file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'config.toml')
+ if os.path.exists(file_path):
+ with open(file_path, 'r') as file:
+ data = toml.load(file)
+ if 'selected_ids' in data:
+ selected_ids = data['selected_ids']
+ logger.info(
+ f'Filtering {len(selected_ids)} tasks from "selected_ids"...'
+ )
+ subset = dataset[dataset[filter_column].isin(selected_ids)]
+ logger.info(f'Retained {subset.shape[0]} tasks after filtering')
+ return subset
+ if 'selected_repos' in data:
+ # repos for the swe-bench instances:
+ # ['astropy/astropy', 'django/django', 'matplotlib/matplotlib', 'mwaskom/seaborn', 'pallets/flask', 'psf/requests', 'pydata/xarray', 'pylint-dev/pylint', 'pytest-dev/pytest', 'scikit-learn/scikit-learn', 'sphinx-doc/sphinx', 'sympy/sympy']
+ selected_repos = data['selected_repos']
+ if isinstance(selected_repos, str):
+ selected_repos = [selected_repos]
+ assert isinstance(selected_repos, list)
+ logger.info(
+ f'Filtering {selected_repos} tasks from "selected_repos"...'
+ )
+ subset = dataset[dataset['repo'].isin(selected_repos)]
+ logger.info(f'Retained {subset.shape[0]} tasks after filtering')
+ return subset
+
+ skip_ids = os.environ.get('SKIP_IDS', '').split(',')
+ if len(skip_ids) > 0:
+ logger.info(f'Filtering {len(skip_ids)} tasks from "SKIP_IDS"...')
+ return dataset[~dataset[filter_column].isin(skip_ids)]
+ return dataset
+
+
+def divide_cpus_among_workers(num_workers, num_cpus_per_worker=4, num_to_skip=0):
+ """Divide CPUs among workers, with better error handling for multiprocessing."""
+ try:
+ current_cpus = list(os.sched_getaffinity(0))
+ except AttributeError:
+ # os.sched_getaffinity not available on all platforms
+ import multiprocessing
+
+ current_cpus = list(range(multiprocessing.cpu_count()))
+
+ num_cpus = len(current_cpus)
+ if num_workers <= 0:
+ raise ValueError('Number of workers must be greater than 0')
+
+ # Chec that num worers and num_cpus_per_worker fit into available CPUs
+ total_cpus_needed = num_workers * num_cpus_per_worker + num_to_skip
+ if total_cpus_needed > num_cpus:
+ raise ValueError(
+ f'Not enough CPUs available. Requested {total_cpus_needed} '
+ f'CPUs (num_workers={num_workers}, num_cpus_per_worker={num_cpus_per_worker}, '
+ f'num_to_skip={num_to_skip}), but only {num_cpus} CPUs are available.'
+ )
+
+ # Divide this into groups, skipping the first `num_to_skip` CPUs.
+ available_cpus = current_cpus[num_to_skip:]
+ cpu_groups = [
+ available_cpus[i * num_cpus_per_worker : (i + 1) * num_cpus_per_worker]
+ for i in range(num_workers)
+ ]
+ print(
+ f'Divided {num_cpus} CPUs into {num_workers} groups, each with {num_cpus_per_worker} CPUs.'
+ )
+ print(f'CPU groups: {cpu_groups}')
+
+ return cpu_groups
+
+
+if __name__ == '__main__':
+ parser = get_evaluation_parser()
+ parser.add_argument(
+ '--dataset',
+ type=str,
+ default=None,
+ help='data set to evaluate on, for now use local.',
+ )
+ parser.add_argument(
+ '--split',
+ type=str,
+ default='test',
+ help='split to evaluate on',
+ )
+ parser.add_argument(
+ '--mode',
+ type=str,
+ default='swe',
+ help='mode to evaluate on',
+ )
+
+ args, _ = parser.parse_known_args()
+
+ # NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
+ # so we don't need to manage file uploading to OpenHands's repo
+
+ # dataset = load_dataset(args.dataset, split=args.split)
+ # swe_bench_tests = filter_dataset(dataset.to_pandas(), 'instance_id')
+ dataset = load_dataset(args.dataset, split=args.split)
+
+ # Convert dataset to pandas DataFrame if it is not already.
+ if not isinstance(dataset, pd.DataFrame):
+ dataset = dataset.to_pandas()
+
+ dataset['version'] = dataset['version'].astype(str)
+
+ # Convert created_at column to string.
+ dataset['created_at'] = dataset['created_at'].astype(str)
+
+ swe_bench_tests = filter_dataset(dataset, 'instance_id')
+
+ logger.info(
+ f'Loaded dataset {args.dataset} with split {args.split}: {len(swe_bench_tests)} tasks'
+ )
+
+ llm_config = None
+ if args.llm_config:
+ llm_config = get_llm_config_arg(args.llm_config)
+ llm_config.log_completions = True
+ # modify_params must be False for evaluation purpose, for reproducibility and accurancy of results
+ llm_config.modify_params = False
+
+ if llm_config is None:
+ raise ValueError(f'Could not find LLM config: --llm_config {args.llm_config}')
+
+ # Get condenser config from environment variable
+ condenser_name = os.environ.get('EVAL_CONDENSER')
+ if condenser_name:
+ condenser_config = get_condenser_config_arg(condenser_name)
+ if condenser_config is None:
+ raise ValueError(
+ f'Could not find Condenser config: EVAL_CONDENSER={condenser_name}'
+ )
+ else:
+ # If no specific condenser config is provided via env var, default to NoOpCondenser
+ condenser_config = NoOpCondenserConfig()
+ logger.debug(
+ 'No Condenser config provided via EVAL_CONDENSER, using NoOpCondenser.'
+ )
+
+ details = {'mode': args.mode}
+ _agent_cls = openhands.agenthub.Agent.get_cls(args.agent_cls)
+
+ dataset_descrption = (
+ args.dataset.replace('/', '__') + '-' + args.split.replace('/', '__')
+ )
+ metadata = make_metadata(
+ llm_config,
+ dataset_descrption,
+ args.agent_cls,
+ args.max_iterations,
+ args.eval_note,
+ args.eval_output_dir,
+ details=details,
+ condenser_config=condenser_config,
+ )
+
+ output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
+ print(f'### OUTPUT FILE: {output_file} ###')
+
+ # Run evaluation in iterative mode:
+ # If a rollout fails to output AgentFinishAction, we will try again until it succeeds OR total 3 attempts have been made.
+ ITERATIVE_EVAL_MODE = (
+ os.environ.get('ITERATIVE_EVAL_MODE', 'false').lower() == 'true'
+ )
+ ITERATIVE_EVAL_MODE_MAX_ATTEMPTS = int(
+ os.environ.get('ITERATIVE_EVAL_MODE_MAX_ATTEMPTS', '3')
+ )
+
+ # Get all CPUs and divide into groups of num_workers and put them into a multiprocessing.Queue.
+ cpu_groups_queue = None
+ cpu_groups_list = divide_cpus_among_workers(args.eval_num_workers, num_to_skip=8)
+ cpu_groups_queue = multiprocessing.Manager().Queue()
+ for cpu_group in cpu_groups_list:
+ cpu_groups_queue.put(cpu_group)
+
+ if not ITERATIVE_EVAL_MODE:
+ # load the dataset
+ instances = prepare_dataset(swe_bench_tests, output_file, args.eval_n_limit)
+
+ process_instance_with_cpu_groups = functools.partial(
+ process_instance,
+ cpu_groups_queue=cpu_groups_queue,
+ )
+
+ config = get_config(
+ instances.iloc[0], # Use the first instance to get the config
+ metadata,
+ cpu_group=None, # We will use the cpu_groups_queue to get the cpu group later
+ )
+
+ run_evaluation(
+ instances,
+ metadata,
+ output_file,
+ args.eval_num_workers,
+ process_instance_with_cpu_groups,
+ timeout_seconds=8
+ * 60
+ * 60, # 8 hour PER instance should be more than enough
+ max_retries=3,
+ )
+ else:
+ critic = AgentFinishedCritic()
+
+ def get_cur_output_file_path(attempt: int) -> str:
+ return (
+ f'{output_file.removesuffix(".jsonl")}.critic_attempt_{attempt}.jsonl'
+ )
+
+ eval_ids = None
+ for attempt in range(1, ITERATIVE_EVAL_MODE_MAX_ATTEMPTS + 1):
+ cur_output_file = get_cur_output_file_path(attempt)
+ logger.info(
+ f'Running evaluation with critic {critic.__class__.__name__} for attempt {attempt} of {ITERATIVE_EVAL_MODE_MAX_ATTEMPTS}.'
+ )
+
+ # For deterministic eval, we set temperature to 0.1 for (>1) attempt
+ # so hopefully we get slightly different results
+ if attempt > 1 and metadata.llm_config.temperature == 0:
+ logger.info(
+ f'Detected temperature is 0 for (>1) attempt {attempt}. Setting temperature to 0.1...'
+ )
+ metadata.llm_config.temperature = 0.1
+
+ # Load instances - at first attempt, we evaluate all instances
+ # On subsequent attempts, we only evaluate the instances that failed the previous attempt determined by critic
+ instances = prepare_dataset(
+ swe_bench_tests, cur_output_file, args.eval_n_limit, eval_ids=eval_ids
+ )
+ if len(instances) > 0 and not isinstance(
+ instances['PASS_TO_PASS'][instances['PASS_TO_PASS'].index[0]], str
+ ):
+ for col in ['PASS_TO_PASS', 'FAIL_TO_PASS']:
+ instances[col] = instances[col].apply(lambda x: str(x))
+
+ # Run evaluation - but save them to cur_output_file
+ logger.info(
+ f'Evaluating {len(instances)} instances for attempt {attempt}...'
+ )
+ run_evaluation(
+ instances,
+ metadata,
+ cur_output_file,
+ args.eval_num_workers,
+ process_instance,
+ timeout_seconds=8
+ * 60
+ * 60, # 8 hour PER instance should be more than enough
+ max_retries=1,
+ )
+
+ # When eval is done, we update eval_ids to the instances that failed the current attempt
+ instances_failed = []
+ logger.info(
+ f'Use critic {critic.__class__.__name__} to check {len(instances)} instances for attempt {attempt}...'
+ )
+ with open(cur_output_file, 'r') as f:
+ for line in f:
+ instance = json.loads(line)
+ try:
+ history = [
+ event_from_dict(event) for event in instance['history']
+ ]
+ critic_result = critic.evaluate(
+ history, instance['test_result'].get('git_patch', '')
+ )
+ if not critic_result.success:
+ instances_failed.append(instance['instance_id'])
+ except Exception as e:
+ logger.error(
+ f'Error loading history for instance {instance["instance_id"]}: {e}'
+ )
+ instances_failed.append(instance['instance_id'])
+ logger.info(
+ f'{len(instances_failed)} instances failed the current attempt {attempt}: {instances_failed}'
+ )
+ eval_ids = instances_failed
+
+ # If no instances failed, we break
+ if len(instances_failed) == 0:
+ break
+
+ # Then we should aggregate the results from all attempts into the original output file
+ # and remove the intermediate files
+ logger.info(
+ 'Aggregating results from all attempts into the original output file...'
+ )
+ fout = open(output_file, 'w')
+ added_instance_ids = set()
+ for attempt in reversed(range(1, ITERATIVE_EVAL_MODE_MAX_ATTEMPTS + 1)):
+ cur_output_file = get_cur_output_file_path(attempt)
+ if not os.path.exists(cur_output_file):
+ logger.warning(
+ f'Intermediate output file {cur_output_file} does not exist. Skipping...'
+ )
+ continue
+
+ with open(cur_output_file, 'r') as f:
+ for line in f:
+ instance = json.loads(line)
+ # Also make sure git_patch is not empty - otherwise we fall back to previous attempt (empty patch is worse than anything else)
+ if (
+ instance['instance_id'] not in added_instance_ids
+ and instance['test_result'].get('git_patch', '').strip()
+ ):
+ fout.write(line)
+ added_instance_ids.add(instance['instance_id'])
+ logger.info(
+ f'Aggregated instances from {cur_output_file}. Total instances added so far: {len(added_instance_ids)}'
+ )
+ fout.close()
+ logger.info(
+ f'Done! Total {len(added_instance_ids)} instances added to {output_file}'
+ )
diff --git a/evaluation/benchmarks/swefficiency/scripts/run_infer.sh b/evaluation/benchmarks/swefficiency/scripts/run_infer.sh
new file mode 100755
index 000000000000..1cd122676e58
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/scripts/run_infer.sh
@@ -0,0 +1,148 @@
+#!/usr/bin/env bash
+set -eo pipefail
+
+source "evaluation/utils/version_control.sh"
+
+MODEL_CONFIG=$1
+COMMIT_HASH=$2
+AGENT=$3
+EVAL_LIMIT=$4
+MAX_ITER=$5
+NUM_WORKERS=$6
+DATASET=$7
+SPLIT=$8
+N_RUNS=$9
+MODE=${10}
+
+
+if [ -z "$NUM_WORKERS" ]; then
+ NUM_WORKERS=1
+ echo "Number of workers not specified, use default $NUM_WORKERS"
+fi
+checkout_eval_branch
+
+if [ -z "$AGENT" ]; then
+ echo "Agent not specified, use default CodeActAgent"
+ AGENT="CodeActAgent"
+fi
+
+if [ -z "$MAX_ITER" ]; then
+ echo "MAX_ITER not specified, use default 100"
+ MAX_ITER=100
+fi
+
+if [ -z "$RUN_WITH_BROWSING" ]; then
+ echo "RUN_WITH_BROWSING not specified, use default false"
+ RUN_WITH_BROWSING=false
+fi
+
+
+if [ -z "$DATASET" ]; then
+ echo "DATASET not specified, use default princeton-nlp/SWE-bench_Lite"
+ DATASET="swefficiency/swefficiency"
+fi
+
+if [ -z "$SPLIT" ]; then
+ echo "SPLIT not specified, use default test"
+ SPLIT="test"
+fi
+
+if [ -z "$MODE" ]; then
+ MODE="swe"
+ echo "MODE not specified, use default $MODE"
+fi
+
+if [ -n "$EVAL_CONDENSER" ]; then
+ echo "Using Condenser Config: $EVAL_CONDENSER"
+else
+ echo "No Condenser Config provided via EVAL_CONDENSER, use default (NoOpCondenser)."
+fi
+
+export RUN_WITH_BROWSING=$RUN_WITH_BROWSING
+echo "RUN_WITH_BROWSING: $RUN_WITH_BROWSING"
+
+get_openhands_version
+
+echo "AGENT: $AGENT"
+echo "OPENHANDS_VERSION: $OPENHANDS_VERSION"
+echo "MODEL_CONFIG: $MODEL_CONFIG"
+echo "DATASET: $DATASET"
+echo "SPLIT: $SPLIT"
+echo "MAX_ITER: $MAX_ITER"
+echo "NUM_WORKERS: $NUM_WORKERS"
+echo "COMMIT_HASH: $COMMIT_HASH"
+echo "MODE: $MODE"
+echo "EVAL_CONDENSER: $EVAL_CONDENSER"
+
+# Default to NOT use Hint
+if [ -z "$USE_HINT_TEXT" ]; then
+ export USE_HINT_TEXT=false
+fi
+echo "USE_HINT_TEXT: $USE_HINT_TEXT"
+EVAL_NOTE="$OPENHANDS_VERSION"
+# if not using Hint, add -no-hint to the eval note
+if [ "$USE_HINT_TEXT" = false ]; then
+ EVAL_NOTE="$EVAL_NOTE-no-hint"
+fi
+
+if [ "$RUN_WITH_BROWSING" = true ]; then
+ EVAL_NOTE="$EVAL_NOTE-with-browsing"
+fi
+
+if [ -n "$EXP_NAME" ]; then
+ EVAL_NOTE="$EVAL_NOTE-$EXP_NAME"
+fi
+# if mode != swe, add mode to the eval note
+if [ "$MODE" != "swe" ]; then
+ EVAL_NOTE="${EVAL_NOTE}-${MODE}"
+fi
+# Add condenser config to eval note if provided
+if [ -n "$EVAL_CONDENSER" ]; then
+ EVAL_NOTE="${EVAL_NOTE}-${EVAL_CONDENSER}"
+fi
+
+# export RUNTIME="remote"
+# export SANDBOX_REMOTE_RUNTIME_API_URL="https://runtime.eval.all-hands.dev"
+export NO_CHANGE_TIMEOUT_SECONDS=900 # 15 minutes
+
+function run_eval() {
+ local eval_note="${1}"
+ COMMAND="poetry run python evaluation/benchmarks/swefficiency/run_infer.py \
+ --agent-cls $AGENT \
+ --llm-config $MODEL_CONFIG \
+ --max-iterations $MAX_ITER \
+ --eval-num-workers $NUM_WORKERS \
+ --eval-note $eval_note \
+ --dataset $DATASET \
+ --split $SPLIT \
+ --mode $MODE"
+
+ if [ -n "$EVAL_LIMIT" ]; then
+ echo "EVAL_LIMIT: $EVAL_LIMIT"
+ COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT"
+ fi
+
+ # Run the command
+ eval $COMMAND
+}
+
+unset SANDBOX_ENV_GITHUB_TOKEN # prevent the agent from using the github token to push
+if [ -z "$N_RUNS" ]; then
+ N_RUNS=1
+ echo "N_RUNS not specified, use default $N_RUNS"
+fi
+
+# Skip runs if the run number is in the SKIP_RUNS list
+# read from env variable SKIP_RUNS as a comma separated list of run numbers
+SKIP_RUNS=(${SKIP_RUNS//,/ })
+for i in $(seq 1 $N_RUNS); do
+ if [[ " ${SKIP_RUNS[@]} " =~ " $i " ]]; then
+ echo "Skipping run $i"
+ continue
+ fi
+ current_eval_note="$EVAL_NOTE-run_$i"
+ echo "EVAL_NOTE: $current_eval_note"
+ run_eval $current_eval_note
+done
+
+checkout_original_branch
diff --git a/evaluation/benchmarks/swefficiency/scripts/setup/instance_swe_entry.sh b/evaluation/benchmarks/swefficiency/scripts/setup/instance_swe_entry.sh
new file mode 100755
index 000000000000..61ca1e151097
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/scripts/setup/instance_swe_entry.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+source ~/.bashrc
+SWEUTIL_DIR=/swe_util
+
+# FIXME: Cannot read SWE_INSTANCE_ID from the environment variable
+# SWE_INSTANCE_ID=django__django-11099
+if [ -z "$SWE_INSTANCE_ID" ]; then
+ echo "Error: SWE_INSTANCE_ID is not set." >&2
+ exit 1
+fi
+
+# Read the swe-bench-test-lite.json file and extract the required item based on instance_id
+item=$(jq --arg INSTANCE_ID "$SWE_INSTANCE_ID" '.[] | select(.instance_id == $INSTANCE_ID)' $SWEUTIL_DIR/eval_data/instances/swe-bench-instance.json)
+
+if [[ -z "$item" ]]; then
+ echo "No item found for the provided instance ID."
+ exit 1
+fi
+
+
+WORKSPACE_NAME=$(echo "$item" | jq -r '(.repo | tostring) + "__" + (.version | tostring) | gsub("/"; "__")')
+
+echo "WORKSPACE_NAME: $WORKSPACE_NAME"
+
+# Clear the workspace
+if [ -d /workspace ]; then
+ rm -rf /workspace/*
+else
+ mkdir /workspace
+fi
+# Copy repo to workspace
+if [ -d /workspace/$WORKSPACE_NAME ]; then
+ rm -rf /workspace/$WORKSPACE_NAME
+fi
+mkdir -p /workspace
+cp -r /testbed /workspace/$WORKSPACE_NAME
+
+# Activate instance-specific environment
+if [ -d /opt/miniconda3 ]; then
+ . /opt/miniconda3/etc/profile.d/conda.sh
+ conda activate testbed
+fi
diff --git a/evaluation/benchmarks/swefficiency/scripts/setup/prepare_swe_utils.sh b/evaluation/benchmarks/swefficiency/scripts/setup/prepare_swe_utils.sh
new file mode 100755
index 000000000000..c5726a402f06
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/scripts/setup/prepare_swe_utils.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+
+set -e
+EVAL_WORKSPACE="evaluation/benchmarks/swe_bench/eval_workspace"
+mkdir -p $EVAL_WORKSPACE
+
+# 1. Prepare REPO
+echo "==== Prepare SWE-bench repo ===="
+OH_SWE_BENCH_REPO_PATH="https://github.com/All-Hands-AI/SWE-bench.git"
+OH_SWE_BENCH_REPO_BRANCH="eval"
+git clone -b $OH_SWE_BENCH_REPO_BRANCH $OH_SWE_BENCH_REPO_PATH $EVAL_WORKSPACE/OH-SWE-bench
+
+# 2. Prepare DATA
+echo "==== Prepare SWE-bench data ===="
+EVAL_IMAGE=ghcr.io/all-hands-ai/eval-swe-bench:builder_with_conda
+EVAL_WORKSPACE=$(realpath $EVAL_WORKSPACE)
+chmod +x $EVAL_WORKSPACE/OH-SWE-bench/swebench/harness/prepare_data.sh
+if [ -d $EVAL_WORKSPACE/eval_data ]; then
+ rm -r $EVAL_WORKSPACE/eval_data
+fi
+docker run \
+ -v $EVAL_WORKSPACE:/workspace \
+ -w /workspace \
+ -u $(id -u):$(id -g) \
+ -e HF_DATASETS_CACHE="/tmp" \
+ --rm -it $EVAL_IMAGE \
+ bash -c "cd OH-SWE-bench/swebench/harness && /swe_util/miniforge3/bin/conda run -n swe-bench-eval ./prepare_data.sh && mv eval_data /workspace/"
diff --git a/evaluation/benchmarks/swefficiency/scripts/setup/swe_entry.sh b/evaluation/benchmarks/swefficiency/scripts/setup/swe_entry.sh
new file mode 100755
index 000000000000..03e0de7a23b2
--- /dev/null
+++ b/evaluation/benchmarks/swefficiency/scripts/setup/swe_entry.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+
+set -e
+
+# assert user name is `root`
+if [ "$USER" != "root" ]; then
+ echo "Error: This script is intended to be run by the 'root' user only." >&2
+ exit 1
+fi
+
+source ~/.bashrc
+
+SWEUTIL_DIR=/swe_util
+
+# Create logs directory
+LOG_DIR=/openhands/logs
+mkdir -p $LOG_DIR && chmod 777 $LOG_DIR
+
+# FIXME: Cannot read SWE_INSTANCE_ID from the environment variable
+# SWE_INSTANCE_ID=django__django-11099
+if [ -z "$SWE_INSTANCE_ID" ]; then
+ echo "Error: SWE_INSTANCE_ID is not set." >&2
+ exit 1
+fi
+
+# Read the swe-bench-test-lite.json file and extract the required item based on instance_id
+item=$(jq --arg INSTANCE_ID "$SWE_INSTANCE_ID" '.[] | select(.instance_id == $INSTANCE_ID)' $SWEUTIL_DIR/eval_data/instances/swe-bench-test-lite.json)
+
+if [[ -z "$item" ]]; then
+ echo "No item found for the provided instance ID."
+ exit 1
+fi
+
+CONDA_ENV_NAME=$(echo "$item" | jq -r '.repo + "__" + .version | gsub("/"; "__")')
+
+echo "CONDA_ENV_NAME: $CONDA_ENV_NAME"
+
+SWE_TASK_DIR=/openhands/swe_tasks
+mkdir -p $SWE_TASK_DIR
+# Dump test_patch to /workspace/test.patch
+echo "$item" | jq -r '.test_patch' > $SWE_TASK_DIR/test.patch
+# Dump patch to /workspace/gold.patch
+echo "$item" | jq -r '.patch' > $SWE_TASK_DIR/gold.patch
+# Dump the item to /workspace/instance.json except for the "test_patch" and "patch" fields
+echo "$item" | jq 'del(.test_patch, .patch)' > $SWE_TASK_DIR/instance.json
+
+# Clear the workspace
+rm -rf /workspace/*
+# Copy repo to workspace
+if [ -d /workspace/$CONDA_ENV_NAME ]; then
+ rm -rf /workspace/$CONDA_ENV_NAME
+fi
+cp -r $SWEUTIL_DIR/eval_data/testbeds/$CONDA_ENV_NAME /workspace
+
+# Reset swe-bench testbed and install the repo
+. $SWEUTIL_DIR/miniforge3/etc/profile.d/conda.sh
+conda config --set changeps1 False
+conda config --append channels conda-forge
+conda activate swe-bench-eval
+
+mkdir -p $SWE_TASK_DIR/reset_testbed_temp
+mkdir -p $SWE_TASK_DIR/reset_testbed_log_dir
+SWE_BENCH_DIR=/swe_util/OH-SWE-bench
+output=$(
+ export PYTHONPATH=$SWE_BENCH_DIR && \
+ cd $SWE_BENCH_DIR && \
+ python swebench/harness/reset_swe_env.py \
+ --swe_bench_tasks $SWEUTIL_DIR/eval_data/instances/swe-bench-test.json \
+ --temp_dir $SWE_TASK_DIR/reset_testbed_temp \
+ --testbed /workspace \
+ --conda_path $SWEUTIL_DIR/miniforge3 \
+ --instance_id $SWE_INSTANCE_ID \
+ --log_dir $SWE_TASK_DIR/reset_testbed_log_dir \
+ --timeout 900 \
+ --verbose
+)
+
+REPO_PATH=$(echo "$output" | awk -F': ' '/repo_path:/ {print $2}')
+TEST_CMD=$(echo "$output" | awk -F': ' '/test_cmd:/ {print $2}')
+echo "Repo Path: $REPO_PATH"
+echo "Test Command: $TEST_CMD"
+
+echo "export SWE_BENCH_DIR=\"$SWE_BENCH_DIR\"" >> ~/.bashrc
+echo "export REPO_PATH=\"$REPO_PATH\"" >> ~/.bashrc
+echo "export TEST_CMD=\"$TEST_CMD\"" >> ~/.bashrc
+
+if [[ "$REPO_PATH" == "None" ]]; then
+ echo "Error: Failed to retrieve repository path. Tests may not have passed or output was not as expected." >&2
+ exit 1
+fi
+
+# Activate instance-specific environment
+. $SWEUTIL_DIR/miniforge3/etc/profile.d/conda.sh
+conda activate $CONDA_ENV_NAME
+
+set +e
diff --git a/frontend/__tests__/components/features/chat/task-tracking-observation-content.test.tsx b/frontend/__tests__/components/features/chat/task-tracking-observation-content.test.tsx
index e44c33ca7d93..5db3942aa28b 100644
--- a/frontend/__tests__/components/features/chat/task-tracking-observation-content.test.tsx
+++ b/frontend/__tests__/components/features/chat/task-tracking-observation-content.test.tsx
@@ -8,10 +8,11 @@ vi.mock("react-i18next", () => ({
useTranslation: () => ({
t: (key: string) => {
const translations: Record = {
- "TASK_TRACKING_OBSERVATION$TASK_LIST": "Task List",
- "TASK_TRACKING_OBSERVATION$TASK_ID": "ID",
- "TASK_TRACKING_OBSERVATION$TASK_NOTES": "Notes",
- "TASK_TRACKING_OBSERVATION$RESULT": "Result",
+ TASK_TRACKING_OBSERVATION$TASK_LIST: "Task List",
+ TASK_TRACKING_OBSERVATION$TASK_ID: "ID",
+ TASK_TRACKING_OBSERVATION$TASK_NOTES: "Notes",
+ TASK_TRACKING_OBSERVATION$RESULT: "Result",
+ COMMON$TASKS: "Tasks",
};
return translations[key] || key;
},
@@ -61,19 +62,26 @@ describe("TaskTrackingObservationContent", () => {
it("renders task list when command is 'plan' and tasks exist", () => {
render( );
- expect(screen.getByText("Task List (3 items)")).toBeInTheDocument();
+ expect(screen.getByText("Tasks")).toBeInTheDocument();
expect(screen.getByText("Implement feature A")).toBeInTheDocument();
expect(screen.getByText("Fix bug B")).toBeInTheDocument();
expect(screen.getByText("Deploy to production")).toBeInTheDocument();
});
it("displays correct status icons and badges", () => {
- render( );
+ const { container } = render(
+ ,
+ );
+
+ // Status is represented by icons, not text. Verify task items are rendered with their titles
+ // which indicates the status icons are present (status affects icon rendering)
+ expect(screen.getByText("Implement feature A")).toBeInTheDocument();
+ expect(screen.getByText("Fix bug B")).toBeInTheDocument();
+ expect(screen.getByText("Deploy to production")).toBeInTheDocument();
- // Check for status text (the icons are emojis)
- expect(screen.getByText("todo")).toBeInTheDocument();
- expect(screen.getByText("in progress")).toBeInTheDocument();
- expect(screen.getByText("done")).toBeInTheDocument();
+ // Verify task items are present (they contain the status icons)
+ const taskItems = container.querySelectorAll('[data-name="item"]');
+ expect(taskItems).toHaveLength(3);
});
it("displays task IDs and notes", () => {
@@ -84,14 +92,9 @@ describe("TaskTrackingObservationContent", () => {
expect(screen.getByText("ID: task-3")).toBeInTheDocument();
expect(screen.getByText("Notes: This is a test task")).toBeInTheDocument();
- expect(screen.getByText("Notes: Completed successfully")).toBeInTheDocument();
- });
-
- it("renders result section when content exists", () => {
- render( );
-
- expect(screen.getByText("Result")).toBeInTheDocument();
- expect(screen.getByText("Task tracking operation completed successfully")).toBeInTheDocument();
+ expect(
+ screen.getByText("Notes: Completed successfully"),
+ ).toBeInTheDocument();
});
it("does not render task list when command is not 'plan'", () => {
@@ -105,7 +108,7 @@ describe("TaskTrackingObservationContent", () => {
render( );
- expect(screen.queryByText("Task List")).not.toBeInTheDocument();
+ expect(screen.queryByText("Tasks")).not.toBeInTheDocument();
});
it("does not render task list when task list is empty", () => {
@@ -119,17 +122,6 @@ describe("TaskTrackingObservationContent", () => {
render( );
- expect(screen.queryByText("Task List")).not.toBeInTheDocument();
- });
-
- it("does not render result section when content is empty", () => {
- const eventWithoutContent = {
- ...mockEvent,
- content: "",
- };
-
- render( );
-
- expect(screen.queryByText("Result")).not.toBeInTheDocument();
+ expect(screen.queryByText("Tasks")).not.toBeInTheDocument();
});
});
diff --git a/frontend/__tests__/components/features/home/repo-connector.test.tsx b/frontend/__tests__/components/features/home/repo-connector.test.tsx
index 8e186257a092..0500d441a2da 100644
--- a/frontend/__tests__/components/features/home/repo-connector.test.tsx
+++ b/frontend/__tests__/components/features/home/repo-connector.test.tsx
@@ -71,6 +71,7 @@ beforeEach(() => {
provider_tokens_set: {
github: "some-token",
gitlab: null,
+ azure_devops: null,
},
});
});
diff --git a/frontend/__tests__/components/features/home/task-card.test.tsx b/frontend/__tests__/components/features/home/task-card.test.tsx
index 6d8fb0ee6386..48746270df8b 100644
--- a/frontend/__tests__/components/features/home/task-card.test.tsx
+++ b/frontend/__tests__/components/features/home/task-card.test.tsx
@@ -23,6 +23,7 @@ const MOCK_RESPOSITORIES: GitRepository[] = [
{ id: "2", full_name: "repo2", git_provider: "github", is_public: true },
{ id: "3", full_name: "repo3", git_provider: "gitlab", is_public: true },
{ id: "4", full_name: "repo4", git_provider: "gitlab", is_public: true },
+ { id: "5", full_name: "repo5", git_provider: "azure_devops", is_public: true },
];
const renderTaskCard = (task = MOCK_TASK_1) => {
diff --git a/frontend/__tests__/components/modals/microagents/microagent-modal.test.tsx b/frontend/__tests__/components/modals/microagents/microagent-modal.test.tsx
index f3b68c513d11..858c07207de9 100644
--- a/frontend/__tests__/components/modals/microagents/microagent-modal.test.tsx
+++ b/frontend/__tests__/components/modals/microagents/microagent-modal.test.tsx
@@ -57,7 +57,7 @@ describe("MicroagentsModal - Refresh Button", () => {
});
afterEach(() => {
- vi.clearAllMocks();
+ vi.restoreAllMocks();
});
describe("Refresh Button Rendering", () => {
@@ -74,13 +74,15 @@ describe("MicroagentsModal - Refresh Button", () => {
describe("Refresh Button Functionality", () => {
it("should call refetch when refresh button is clicked", async () => {
const user = userEvent.setup();
+ const refreshSpy = vi.spyOn(ConversationService, "getMicroagents");
renderWithProviders( );
- const refreshSpy = vi.spyOn(ConversationService, "getMicroagents");
-
// Wait for the component to load and render the refresh button
const refreshButton = await screen.findByTestId("refresh-microagents");
+
+ refreshSpy.mockClear();
+
await user.click(refreshButton);
expect(refreshSpy).toHaveBeenCalledTimes(1);
diff --git a/frontend/__tests__/conversation-websocket-handler.test.tsx b/frontend/__tests__/conversation-websocket-handler.test.tsx
index f7d67d82b5ca..f922a8876c8f 100644
--- a/frontend/__tests__/conversation-websocket-handler.test.tsx
+++ b/frontend/__tests__/conversation-websocket-handler.test.tsx
@@ -1,12 +1,26 @@
-import { describe, it, expect, beforeAll, afterAll, afterEach } from "vitest";
+import {
+ describe,
+ it,
+ expect,
+ beforeAll,
+ beforeEach,
+ afterAll,
+ afterEach,
+} from "vitest";
import { screen, waitFor, render, cleanup } from "@testing-library/react";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import { http, HttpResponse } from "msw";
import { useOptimisticUserMessageStore } from "#/stores/optimistic-user-message-store";
+import { useBrowserStore } from "#/stores/browser-store";
+import { useCommandStore } from "#/state/command-store";
import {
createMockMessageEvent,
createMockUserMessageEvent,
createMockAgentErrorEvent,
+ createMockBrowserObservationEvent,
+ createMockBrowserNavigateActionEvent,
+ createMockExecuteBashActionEvent,
+ createMockExecuteBashObservationEvent,
} from "#/mocks/mock-ws-helpers";
import {
ConnectionStatusComponent,
@@ -461,7 +475,7 @@ describe("Conversation WebSocket Handler", () => {
);
// Create a test component that displays loading state
- const HistoryLoadingComponent = () => {
+ function HistoryLoadingComponent() {
const context = useConversationWebSocket();
const { events } = useEventStore();
@@ -474,7 +488,7 @@ describe("Conversation WebSocket Handler", () => {
{expectedEventCount}
);
- };
+ }
// Render with WebSocket context
renderWithWebSocketContext(
@@ -484,7 +498,9 @@ describe("Conversation WebSocket Handler", () => {
);
// Initially should be loading history
- expect(screen.getByTestId("is-loading-history")).toHaveTextContent("true");
+ expect(screen.getByTestId("is-loading-history")).toHaveTextContent(
+ "true",
+ );
// Wait for all events to be received
await waitFor(() => {
@@ -523,7 +539,7 @@ describe("Conversation WebSocket Handler", () => {
);
// Create a test component that displays loading state
- const HistoryLoadingComponent = () => {
+ function HistoryLoadingComponent() {
const context = useConversationWebSocket();
return (
@@ -533,7 +549,7 @@ describe("Conversation WebSocket Handler", () => {
);
- };
+ }
// Render with WebSocket context
renderWithWebSocketContext(
@@ -583,7 +599,7 @@ describe("Conversation WebSocket Handler", () => {
);
// Create a test component that displays loading state
- const HistoryLoadingComponent = () => {
+ function HistoryLoadingComponent() {
const context = useConversationWebSocket();
const { events } = useEventStore();
@@ -595,7 +611,7 @@ describe("Conversation WebSocket Handler", () => {
{events.length}
);
- };
+ }
// Render with WebSocket context
renderWithWebSocketContext(
@@ -605,7 +621,9 @@ describe("Conversation WebSocket Handler", () => {
);
// Initially should be loading history
- expect(screen.getByTestId("is-loading-history")).toHaveTextContent("true");
+ expect(screen.getByTestId("is-loading-history")).toHaveTextContent(
+ "true",
+ );
// Wait for all events to be received
await waitFor(() => {
@@ -621,17 +639,133 @@ describe("Conversation WebSocket Handler", () => {
});
});
- // 9. Terminal I/O Tests (ExecuteBashAction and ExecuteBashObservation)
- describe("Terminal I/O Integration", () => {
- it("should append command to store when ExecuteBashAction event is received", async () => {
- const { createMockExecuteBashActionEvent } = await import(
- "#/mocks/mock-ws-helpers"
+ // 9. Browser State Tests (BrowserObservation)
+ describe("Browser State Integration", () => {
+ beforeEach(() => {
+ useBrowserStore.getState().reset();
+ });
+
+ it("should update browser store with screenshot when BrowserObservation event is received", async () => {
+ // Create a mock BrowserObservation event with screenshot data
+ const mockBrowserObsEvent = createMockBrowserObservationEvent(
+ "base64-screenshot-data",
+ "Page loaded successfully",
+ );
+
+ // Set up MSW to send the event when connection is established
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ // Send the mock event after connection
+ client.send(JSON.stringify(mockBrowserObsEvent));
+ }),
+ );
+
+ // Render with WebSocket context
+ renderWithWebSocketContext( );
+
+ // Wait for connection
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ // Wait for the browser store to be updated with screenshot
+ await waitFor(() => {
+ const { screenshotSrc } = useBrowserStore.getState();
+ expect(screenshotSrc).toBe(
+ "data:image/png;base64,base64-screenshot-data",
+ );
+ });
+ });
+
+ it("should update browser store with URL when BrowserNavigateAction followed by BrowserObservation", async () => {
+ // Create mock events - action first, then observation
+ const mockBrowserActionEvent = createMockBrowserNavigateActionEvent(
+ "https://example.com/test-page",
+ );
+ const mockBrowserObsEvent = createMockBrowserObservationEvent(
+ "base64-screenshot-data",
+ "Page loaded successfully",
);
- const { useCommandStore } = await import("#/state/command-store");
- // Clear the command store before test
+ // Set up MSW to send both events when connection is established
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ // Send action first, then observation
+ client.send(JSON.stringify(mockBrowserActionEvent));
+ client.send(JSON.stringify(mockBrowserObsEvent));
+ }),
+ );
+
+ // Render with WebSocket context
+ renderWithWebSocketContext( );
+
+ // Wait for connection
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ // Wait for the browser store to be updated with both screenshot and URL
+ await waitFor(() => {
+ const { screenshotSrc, url } = useBrowserStore.getState();
+ expect(screenshotSrc).toBe(
+ "data:image/png;base64,base64-screenshot-data",
+ );
+ expect(url).toBe("https://example.com/test-page");
+ });
+ });
+
+ it("should not update browser store when BrowserObservation has no screenshot data", async () => {
+ const initialScreenshot = useBrowserStore.getState().screenshotSrc;
+
+ // Create a mock BrowserObservation event WITHOUT screenshot data
+ const mockBrowserObsEvent = createMockBrowserObservationEvent(
+ null, // no screenshot
+ "Browser action completed",
+ );
+
+ // Set up MSW to send the event when connection is established
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ // Send the mock event after connection
+ client.send(JSON.stringify(mockBrowserObsEvent));
+ }),
+ );
+
+ // Render with WebSocket context
+ renderWithWebSocketContext( );
+
+ // Wait for connection
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ // Give some time for any potential updates
+ await new Promise((resolve) => {
+ setTimeout(resolve, 100);
+ });
+
+ // Screenshot should remain unchanged (empty/initial value)
+ const { screenshotSrc } = useBrowserStore.getState();
+ expect(screenshotSrc).toBe(initialScreenshot);
+ });
+ });
+
+ // 10. Terminal I/O Tests (ExecuteBashAction and ExecuteBashObservation)
+ describe("Terminal I/O Integration", () => {
+ beforeEach(() => {
useCommandStore.getState().clearTerminal();
+ });
+ it("should append command to store when ExecuteBashAction event is received", async () => {
// Create a mock ExecuteBashAction event
const mockBashActionEvent = createMockExecuteBashActionEvent("npm test");
@@ -667,14 +801,6 @@ describe("Conversation WebSocket Handler", () => {
});
it("should append output to store when ExecuteBashObservation event is received", async () => {
- const { createMockExecuteBashObservationEvent } = await import(
- "#/mocks/mock-ws-helpers"
- );
- const { useCommandStore } = await import("#/state/command-store");
-
- // Clear the command store before test
- useCommandStore.getState().clearTerminal();
-
// Create a mock ExecuteBashObservation event
const mockBashObservationEvent = createMockExecuteBashObservationEvent(
"PASS tests/example.test.js\n ✓ should work (2 ms)",
diff --git a/frontend/__tests__/hooks/use-terminal.test.tsx b/frontend/__tests__/hooks/use-terminal.test.tsx
index 4f110df1716a..08144503b5e6 100644
--- a/frontend/__tests__/hooks/use-terminal.test.tsx
+++ b/frontend/__tests__/hooks/use-terminal.test.tsx
@@ -1,3 +1,4 @@
+/* eslint-disable max-classes-per-file */
import { beforeAll, describe, expect, it, vi, afterEach } from "vitest";
import { useTerminal } from "#/hooks/use-terminal";
import { Command, useCommandStore } from "#/state/command-store";
@@ -45,17 +46,29 @@ describe("useTerminal", () => {
}));
beforeAll(() => {
- // mock ResizeObserver
- window.ResizeObserver = vi.fn().mockImplementation(() => ({
- observe: vi.fn(),
- unobserve: vi.fn(),
- disconnect: vi.fn(),
- }));
+ // mock ResizeObserver - use class for Vitest 4 constructor support
+ window.ResizeObserver = class {
+ observe = vi.fn();
+
+ unobserve = vi.fn();
- // mock Terminal
+ disconnect = vi.fn();
+ } as unknown as typeof ResizeObserver;
+
+ // mock Terminal - use class for Vitest 4 constructor support
vi.mock("@xterm/xterm", async (importOriginal) => ({
...(await importOriginal()),
- Terminal: vi.fn().mockImplementation(() => mockTerminal),
+ Terminal: class {
+ loadAddon = mockTerminal.loadAddon;
+
+ open = mockTerminal.open;
+
+ write = mockTerminal.write;
+
+ writeln = mockTerminal.writeln;
+
+ dispose = mockTerminal.dispose;
+ },
}));
});
diff --git a/frontend/__tests__/posthog-tracking.test.tsx b/frontend/__tests__/posthog-tracking.test.tsx
new file mode 100644
index 000000000000..5d76649013bc
--- /dev/null
+++ b/frontend/__tests__/posthog-tracking.test.tsx
@@ -0,0 +1,233 @@
+import {
+ describe,
+ it,
+ expect,
+ beforeAll,
+ afterAll,
+ afterEach,
+ vi,
+} from "vitest";
+import { screen, waitFor, render, cleanup } from "@testing-library/react";
+import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
+import { createMockAgentErrorEvent } from "#/mocks/mock-ws-helpers";
+import { ConversationWebSocketProvider } from "#/contexts/conversation-websocket-context";
+import { conversationWebSocketTestSetup } from "./helpers/msw-websocket-setup";
+import { ConnectionStatusComponent } from "./helpers/websocket-test-components";
+
+// Mock the tracking function
+const mockTrackCreditLimitReached = vi.fn();
+
+// Mock useTracking hook
+vi.mock("#/hooks/use-tracking", () => ({
+ useTracking: () => ({
+ trackCreditLimitReached: mockTrackCreditLimitReached,
+ trackLoginButtonClick: vi.fn(),
+ trackConversationCreated: vi.fn(),
+ trackPushButtonClick: vi.fn(),
+ trackPullButtonClick: vi.fn(),
+ trackCreatePrButtonClick: vi.fn(),
+ trackGitProviderConnected: vi.fn(),
+ trackUserSignupCompleted: vi.fn(),
+ trackCreditsPurchased: vi.fn(),
+ }),
+}));
+
+// Mock useActiveConversation hook
+vi.mock("#/hooks/query/use-active-conversation", () => ({
+ useActiveConversation: () => ({
+ data: null,
+ isLoading: false,
+ error: null,
+ }),
+}));
+
+// MSW WebSocket mock setup
+const { wsLink, server: mswServer } = conversationWebSocketTestSetup();
+
+beforeAll(() => {
+ // The global MSW server from vitest.setup.ts is already running
+ // We just need to start our WebSocket-specific server
+ mswServer.listen({ onUnhandledRequest: "bypass" });
+});
+
+afterEach(() => {
+ // Clear all mocks before each test
+ mockTrackCreditLimitReached.mockClear();
+ mswServer.resetHandlers();
+ // Clean up any React components
+ cleanup();
+});
+
+afterAll(async () => {
+ // Close the WebSocket MSW server
+ mswServer.close();
+
+ // Give time for any pending WebSocket connections to close. This is very important to prevent serious memory leaks
+ await new Promise((resolve) => {
+ setTimeout(resolve, 500);
+ });
+});
+
+// Helper function to render components with all necessary providers
+function renderWithProviders(
+ children: React.ReactNode,
+ conversationId = "test-conversation-123",
+ conversationUrl = "http://localhost:3000/api/conversations/test-conversation-123",
+) {
+ const queryClient = new QueryClient({
+ defaultOptions: {
+ queries: { retry: false },
+ mutations: { retry: false },
+ },
+ });
+
+ return render(
+
+
+ {children}
+
+ ,
+ );
+}
+
+describe("PostHog Analytics Tracking", () => {
+ describe("Credit Limit Tracking", () => {
+ it("should track credit_limit_reached when AgentErrorEvent contains budget error", async () => {
+ // Create a mock AgentErrorEvent with budget-related error message
+ const mockBudgetErrorEvent = createMockAgentErrorEvent({
+ error: "ExceededBudget: Task exceeded maximum budget of $10.00",
+ });
+
+ // Set up MSW to send the budget error event when connection is established
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ // Send the mock budget error event after connection
+ client.send(JSON.stringify(mockBudgetErrorEvent));
+ }),
+ );
+
+ // Render with all providers
+ renderWithProviders( );
+
+ // Wait for connection to be established
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ // Wait for the tracking event to be captured
+ await waitFor(() => {
+ expect(mockTrackCreditLimitReached).toHaveBeenCalledWith(
+ expect.objectContaining({
+ conversationId: "test-conversation-123",
+ }),
+ );
+ });
+ });
+
+ it("should track credit_limit_reached when AgentErrorEvent contains 'credit' keyword", async () => {
+ // Create error with "credit" keyword (case-insensitive)
+ const mockCreditErrorEvent = createMockAgentErrorEvent({
+ error: "Insufficient CREDIT to complete this operation",
+ });
+
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ client.send(JSON.stringify(mockCreditErrorEvent));
+ }),
+ );
+
+ renderWithProviders( );
+
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ await waitFor(() => {
+ expect(mockTrackCreditLimitReached).toHaveBeenCalledWith(
+ expect.objectContaining({
+ conversationId: "test-conversation-123",
+ }),
+ );
+ });
+ });
+
+ it("should NOT track credit_limit_reached for non-budget errors", async () => {
+ // Create a regular error without budget/credit keywords
+ const mockRegularErrorEvent = createMockAgentErrorEvent({
+ error: "Failed to execute command: Permission denied",
+ });
+
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ client.send(JSON.stringify(mockRegularErrorEvent));
+ }),
+ );
+
+ renderWithProviders( );
+
+ // Wait for connection and error to be processed
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ // Verify that credit_limit_reached was NOT tracked
+ expect(mockTrackCreditLimitReached).not.toHaveBeenCalled();
+ });
+
+ it("should only track credit_limit_reached once per error event", async () => {
+ const mockBudgetErrorEvent = createMockAgentErrorEvent({
+ error: "Budget exceeded: $10.00 limit reached",
+ });
+
+ mswServer.use(
+ wsLink.addEventListener("connection", ({ client, server }) => {
+ server.connect();
+ // Send the same error event twice
+ client.send(JSON.stringify(mockBudgetErrorEvent));
+ client.send(
+ JSON.stringify({ ...mockBudgetErrorEvent, id: "different-id" }),
+ );
+ }),
+ );
+
+ renderWithProviders( );
+
+ await waitFor(() => {
+ expect(screen.getByTestId("connection-state")).toHaveTextContent(
+ "OPEN",
+ );
+ });
+
+ await waitFor(() => {
+ expect(mockTrackCreditLimitReached).toHaveBeenCalledTimes(2);
+ });
+
+ // Both calls should be for credit_limit_reached (once per event)
+ expect(mockTrackCreditLimitReached).toHaveBeenNthCalledWith(
+ 1,
+ expect.objectContaining({
+ conversationId: "test-conversation-123",
+ }),
+ );
+ expect(mockTrackCreditLimitReached).toHaveBeenNthCalledWith(
+ 2,
+ expect.objectContaining({
+ conversationId: "test-conversation-123",
+ }),
+ );
+ });
+ });
+});
diff --git a/frontend/__tests__/routes/git-settings.test.tsx b/frontend/__tests__/routes/git-settings.test.tsx
index 8b35abad3f43..0c3f77bed088 100644
--- a/frontend/__tests__/routes/git-settings.test.tsx
+++ b/frontend/__tests__/routes/git-settings.test.tsx
@@ -124,6 +124,9 @@ describe("Content", () => {
await screen.findByTestId("bitbucket-token-input");
await screen.findByTestId("bitbucket-token-help-anchor");
+ await screen.findByTestId("azure-devops-token-input");
+ await screen.findByTestId("azure-devops-token-help-anchor");
+
getConfigSpy.mockResolvedValue(VALID_SAAS_CONFIG);
queryClient.invalidateQueries();
rerender();
@@ -149,6 +152,13 @@ describe("Content", () => {
expect(
screen.queryByTestId("bitbucket-token-help-anchor"),
).not.toBeInTheDocument();
+
+ expect(
+ screen.queryByTestId("azure-devops-token-input"),
+ ).not.toBeInTheDocument();
+ expect(
+ screen.queryByTestId("azure-devops-token-help-anchor"),
+ ).not.toBeInTheDocument();
});
});
@@ -287,6 +297,7 @@ describe("Form submission", () => {
github: { token: "test-token", host: "" },
gitlab: { token: "", host: "" },
bitbucket: { token: "", host: "" },
+ azure_devops: { token: "", host: "" },
});
});
@@ -308,6 +319,7 @@ describe("Form submission", () => {
github: { token: "", host: "" },
gitlab: { token: "test-token", host: "" },
bitbucket: { token: "", host: "" },
+ azure_devops: { token: "", host: "" },
});
});
@@ -329,6 +341,29 @@ describe("Form submission", () => {
github: { token: "", host: "" },
gitlab: { token: "", host: "" },
bitbucket: { token: "test-token", host: "" },
+ azure_devops: { token: "", host: "" },
+ });
+ });
+
+ it("should save the Azure DevOps token", async () => {
+ const saveProvidersSpy = vi.spyOn(SecretsService, "addGitProvider");
+ saveProvidersSpy.mockImplementation(() => Promise.resolve(true));
+ const getConfigSpy = vi.spyOn(OptionService, "getConfig");
+ getConfigSpy.mockResolvedValue(VALID_OSS_CONFIG);
+
+ renderGitSettingsScreen();
+
+ const azureDevOpsInput = await screen.findByTestId("azure-devops-token-input");
+ const submit = await screen.findByTestId("submit-button");
+
+ await userEvent.type(azureDevOpsInput, "test-token");
+ await userEvent.click(submit);
+
+ expect(saveProvidersSpy).toHaveBeenCalledWith({
+ github: { token: "", host: "" },
+ gitlab: { token: "", host: "" },
+ bitbucket: { token: "", host: "" },
+ azure_devops: { token: "test-token", host: "" },
});
});
diff --git a/frontend/__tests__/routes/secrets-settings.test.tsx b/frontend/__tests__/routes/secrets-settings.test.tsx
index 5517e965aa97..1ffccc29ffc3 100644
--- a/frontend/__tests__/routes/secrets-settings.test.tsx
+++ b/frontend/__tests__/routes/secrets-settings.test.tsx
@@ -1,5 +1,5 @@
import { render, screen, waitFor, within } from "@testing-library/react";
-import { beforeEach, describe, expect, it, vi } from "vitest";
+import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
import userEvent from "@testing-library/user-event";
import { createRoutesStub, Outlet } from "react-router";
@@ -21,25 +21,25 @@ const MOCK_GET_SECRETS_RESPONSE: GetSecretsResponse["custom_secrets"] = [
},
];
-const RouterStub = createRoutesStub([
- {
- Component: () => ,
- path: "/settings",
- children: [
- {
- Component: SecretsSettingsScreen,
- path: "/settings/secrets",
- },
- {
- Component: () =>
,
- path: "/settings/integrations",
- },
- ],
- },
-]);
-
-const renderSecretsSettings = () =>
- render( , {
+const renderSecretsSettings = () => {
+ const RouterStub = createRoutesStub([
+ {
+ Component: () => ,
+ path: "/settings",
+ children: [
+ {
+ Component: SecretsSettingsScreen,
+ path: "/settings/secrets",
+ },
+ {
+ Component: () =>
,
+ path: "/settings/integrations",
+ },
+ ],
+ },
+ ]);
+
+ return render( , {
wrapper: ({ children }) => (
),
});
+};
beforeEach(() => {
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
@@ -61,6 +62,10 @@ beforeEach(() => {
});
});
+afterEach(() => {
+ vi.restoreAllMocks();
+});
+
describe("Content", () => {
it("should render the secrets settings screen", () => {
renderSecretsSettings();
@@ -501,6 +506,8 @@ describe("Secret actions", () => {
it("should not submit whitespace secret names or values", async () => {
const createSecretSpy = vi.spyOn(SecretsService, "createSecret");
+ const getSecretsSpy = vi.spyOn(SecretsService, "getSecrets");
+ getSecretsSpy.mockResolvedValue([]);
renderSecretsSettings();
// render form & hide items
@@ -532,9 +539,11 @@ describe("Secret actions", () => {
await userEvent.click(submitButton);
expect(createSecretSpy).not.toHaveBeenCalled();
- expect(
- screen.queryByText("SECRETS$SECRET_VALUE_REQUIRED"),
- ).toBeInTheDocument();
+ await waitFor(() => {
+ expect(
+ screen.queryByText("SECRETS$SECRET_VALUE_REQUIRED"),
+ ).toBeInTheDocument();
+ });
});
it("should not reset ipout values on an invalid submit", async () => {
diff --git a/frontend/__tests__/stores/use-event-store.test.ts b/frontend/__tests__/stores/use-event-store.test.ts
index 79ea9e1509e4..82377d8179e6 100644
--- a/frontend/__tests__/stores/use-event-store.test.ts
+++ b/frontend/__tests__/stores/use-event-store.test.ts
@@ -55,7 +55,7 @@ const mockObservationEvent: ObservationEvent = {
tool_call_id: "call_123",
observation: {
kind: "ExecuteBashObservation",
- output: "hello\n",
+ content: [{ type: "text", text: "hello\n" }],
command: "echo hello",
exit_code: 0,
error: false,
diff --git a/frontend/__tests__/utils/convert-raw-providers-to-list.test.ts b/frontend/__tests__/utils/convert-raw-providers-to-list.test.ts
index d2a756cbe749..d83280bf1c7a 100644
--- a/frontend/__tests__/utils/convert-raw-providers-to-list.test.ts
+++ b/frontend/__tests__/utils/convert-raw-providers-to-list.test.ts
@@ -7,6 +7,7 @@ describe("convertRawProvidersToList", () => {
const example1: Partial> | undefined = {
github: "test-token",
gitlab: "test-token",
+ azure_devops: "test-token",
};
const example2: Partial> | undefined = {
github: "",
@@ -14,9 +15,13 @@ describe("convertRawProvidersToList", () => {
const example3: Partial> | undefined = {
gitlab: null,
};
+ const example4: Partial> | undefined = {
+ azure_devops: "test-token",
+ };
- expect(convertRawProvidersToList(example1)).toEqual(["github", "gitlab"]);
+ expect(convertRawProvidersToList(example1)).toEqual(["github", "gitlab", "azure_devops"]);
expect(convertRawProvidersToList(example2)).toEqual(["github"]);
expect(convertRawProvidersToList(example3)).toEqual(["gitlab"]);
+ expect(convertRawProvidersToList(example4)).toEqual(["azure_devops"]);
});
});
diff --git a/frontend/__tests__/utils/handle-event-for-ui.test.ts b/frontend/__tests__/utils/handle-event-for-ui.test.ts
index 01f8009c2723..dde84629c2a9 100644
--- a/frontend/__tests__/utils/handle-event-for-ui.test.ts
+++ b/frontend/__tests__/utils/handle-event-for-ui.test.ts
@@ -17,7 +17,7 @@ describe("handleEventForUI", () => {
tool_call_id: "call_123",
observation: {
kind: "ExecuteBashObservation",
- output: "hello\n",
+ content: [{ type: "text", text: "hello\n" }],
command: "echo hello",
exit_code: 0,
error: false,
diff --git a/frontend/package-lock.json b/frontend/package-lock.json
index 862f0c5a00ec..4117c2b58e89 100644
--- a/frontend/package-lock.json
+++ b/frontend/package-lock.json
@@ -8,82 +8,82 @@
"name": "openhands-frontend",
"version": "0.62.0",
"dependencies": {
- "@heroui/react": "^2.8.4",
- "@heroui/use-infinite-scroll": "^2.2.11",
+ "@heroui/react": "2.8.5",
+ "@heroui/use-infinite-scroll": "^2.2.12",
"@microlink/react-json-view": "^1.26.2",
"@monaco-editor/react": "^4.7.0-rc.0",
- "@posthog/react": "^1.4.0",
- "@react-router/node": "^7.9.3",
- "@react-router/serve": "^7.9.3",
+ "@posthog/react": "^1.5.2",
+ "@react-router/node": "^7.10.1",
+ "@react-router/serve": "^7.10.1",
"@react-types/shared": "^3.32.0",
- "@stripe/react-stripe-js": "^4.0.2",
- "@stripe/stripe-js": "^7.9.0",
- "@tailwindcss/postcss": "^4.1.13",
- "@tailwindcss/vite": "^4.1.13",
- "@tanstack/react-query": "^5.90.2",
+ "@stripe/react-stripe-js": "^5.4.1",
+ "@stripe/stripe-js": "^8.5.3",
+ "@tailwindcss/postcss": "^4.1.17",
+ "@tailwindcss/vite": "^4.1.17",
+ "@tanstack/react-query": "^5.90.12",
"@uidotdev/usehooks": "^2.4.1",
- "@vitejs/plugin-react": "^5.0.4",
+ "@vitejs/plugin-react": "^5.1.1",
"@xterm/addon-fit": "^0.10.0",
"@xterm/xterm": "^5.4.0",
- "axios": "^1.12.2",
+ "axios": "^1.13.2",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"date-fns": "^4.1.0",
- "downshift": "^9.0.10",
+ "downshift": "^9.0.12",
"eslint-config-airbnb-typescript": "^18.0.0",
- "framer-motion": "^12.23.22",
- "i18next": "^25.5.2",
+ "framer-motion": "^12.23.25",
+ "i18next": "^25.7.1",
"i18next-browser-languagedetector": "^8.2.0",
"i18next-http-backend": "^3.0.2",
- "isbot": "^5.1.31",
- "jose": "^6.1.0",
- "lucide-react": "^0.544.0",
- "monaco-editor": "^0.53.0",
- "posthog-js": "^1.290.0",
- "react": "^19.1.1",
- "react-dom": "^19.1.1",
+ "isbot": "^5.1.32",
+ "jose": "^6.1.3",
+ "lucide-react": "^0.556.0",
+ "monaco-editor": "^0.55.1",
+ "posthog-js": "^1.302.0",
+ "react": "^19.2.0",
+ "react-dom": "^19.2.0",
"react-highlight": "^0.15.0",
"react-hot-toast": "^2.6.0",
- "react-i18next": "^16.0.0",
+ "react-i18next": "^16.3.5",
"react-icons": "^5.5.0",
"react-markdown": "^10.1.0",
- "react-router": "^7.9.3",
- "react-syntax-highlighter": "^15.6.6",
+ "react-router": "^7.10.1",
+ "react-syntax-highlighter": "^16.1.0",
"remark-breaks": "^4.0.0",
"remark-gfm": "^4.0.1",
"sirv-cli": "^3.0.1",
"socket.io-client": "^4.8.1",
- "tailwind-merge": "^3.3.1",
+ "tailwind-merge": "^3.4.0",
"tailwind-scrollbar": "^4.0.2",
- "vite": "^7.1.7",
+ "vite": "^7.2.6",
"web-vitals": "^5.1.0",
"ws": "^8.18.2",
- "zustand": "^5.0.8"
+ "zustand": "^5.0.9"
},
"devDependencies": {
"@babel/parser": "^7.28.3",
"@babel/traverse": "^7.28.3",
"@babel/types": "^7.28.2",
"@mswjs/socket.io-binding": "^0.2.0",
- "@playwright/test": "^1.55.1",
- "@react-router/dev": "^7.9.3",
+ "@playwright/test": "^1.57.0",
+ "@react-router/dev": "^7.10.1",
"@tailwindcss/typography": "^0.5.19",
"@tanstack/eslint-plugin-query": "^5.91.0",
"@testing-library/dom": "^10.4.1",
- "@testing-library/jest-dom": "^6.8.0",
+ "@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.3.0",
"@testing-library/user-event": "^14.6.1",
- "@types/node": "^24.5.2",
- "@types/react": "^19.1.15",
- "@types/react-dom": "^19.1.9",
+ "@types/node": "^24.10.1",
+ "@types/react": "^19.2.7",
+ "@types/react-dom": "^19.2.3",
"@types/react-highlight": "^0.12.8",
"@types/react-syntax-highlighter": "^15.5.13",
"@types/ws": "^8.18.1",
"@typescript-eslint/eslint-plugin": "^7.18.0",
"@typescript-eslint/parser": "^7.18.0",
- "@vitest/coverage-v8": "^3.2.3",
- "autoprefixer": "^10.4.21",
- "cross-env": "^10.0.0",
+ "@vitest/coverage-v8": "^4.0.14",
+ "autoprefixer": "^10.4.22",
+ "cross-env": "^10.1.0",
"eslint": "^8.57.0",
"eslint-config-airbnb": "^19.0.4",
"eslint-config-airbnb-typescript": "^18.0.0",
@@ -96,21 +96,28 @@
"eslint-plugin-react-hooks": "^4.6.2",
"eslint-plugin-unused-imports": "^4.2.0",
"husky": "^9.1.7",
- "jsdom": "^27.0.0",
- "lint-staged": "^16.2.3",
+ "jsdom": "^27.2.0",
+ "lint-staged": "^16.2.7",
"msw": "^2.6.6",
- "prettier": "^3.6.2",
- "stripe": "^18.5.0",
+ "prettier": "^3.7.3",
+ "stripe": "^20.0.0",
"tailwindcss": "^4.1.8",
- "typescript": "^5.9.2",
+ "typescript": "^5.9.3",
"vite-plugin-svgr": "^4.5.0",
"vite-tsconfig-paths": "^5.1.4",
- "vitest": "^3.0.2"
+ "vitest": "^4.0.14"
},
"engines": {
"node": ">=22.0.0"
}
},
+ "node_modules/@acemir/cssom": {
+ "version": "0.9.24",
+ "resolved": "https://registry.npmjs.org/@acemir/cssom/-/cssom-0.9.24.tgz",
+ "integrity": "sha512-5YjgMmAiT2rjJZU7XK1SNI7iqTy92DpaYVgG6x63FxkJ11UpYfLndHJATtinWJClAXiOlW9XWaUyAQf8pMrQPg==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@adobe/css-tools": {
"version": "4.4.4",
"resolved": "https://registry.npmjs.org/@adobe/css-tools/-/css-tools-4.4.4.tgz",
@@ -130,60 +137,50 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/@ampproject/remapping": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
- "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==",
- "dev": true,
- "license": "Apache-2.0",
- "dependencies": {
- "@jridgewell/gen-mapping": "^0.3.5",
- "@jridgewell/trace-mapping": "^0.3.24"
- },
- "engines": {
- "node": ">=6.0.0"
- }
- },
"node_modules/@asamuzakjp/css-color": {
- "version": "4.0.5",
- "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.0.5.tgz",
- "integrity": "sha512-lMrXidNhPGsDjytDy11Vwlb6OIGrT3CmLg3VWNFyWkLWtijKl7xjvForlh8vuj0SHGjgl4qZEQzUmYTeQA2JFQ==",
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-4.1.0.tgz",
+ "integrity": "sha512-9xiBAtLn4aNsa4mDnpovJvBn72tNEIACyvlqaNJ+ADemR+yeMJWnBudOi2qGDviJa7SwcDOU/TRh5dnET7qk0w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@csstools/css-calc": "^2.1.4",
"@csstools/css-color-parser": "^3.1.0",
"@csstools/css-parser-algorithms": "^3.0.5",
"@csstools/css-tokenizer": "^3.0.4",
- "lru-cache": "^11.2.1"
+ "lru-cache": "^11.2.2"
}
},
"node_modules/@asamuzakjp/css-color/node_modules/lru-cache": {
- "version": "11.2.1",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.1.tgz",
- "integrity": "sha512-r8LA6i4LP4EeWOhqBaZZjDWwehd1xUJPCJd9Sv300H0ZmcUER4+JPh7bqqZeqs1o5pgtgvXm+d9UGrB5zZGDiQ==",
+ "version": "11.2.4",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz",
+ "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==",
"dev": true,
+ "license": "BlueOak-1.0.0",
"engines": {
"node": "20 || >=22"
}
},
"node_modules/@asamuzakjp/dom-selector": {
- "version": "6.5.6",
- "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.5.6.tgz",
- "integrity": "sha512-Mj3Hu9ymlsERd7WOsUKNUZnJYL4IZ/I9wVVYgtvOsWYiEFbkQ4G7VRIh2USxTVW4BBDIsLG+gBUgqOqf2Kvqow==",
+ "version": "6.7.5",
+ "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-6.7.5.tgz",
+ "integrity": "sha512-Eks6dY8zau4m4wNRQjRVaKQRTalNcPcBvU1ZQ35w5kKRk1gUeNCkVLsRiATurjASTp3TKM4H10wsI50nx3NZdw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@asamuzakjp/nwsapi": "^2.3.9",
"bidi-js": "^1.0.3",
"css-tree": "^3.1.0",
"is-potential-custom-element-name": "^1.0.1",
- "lru-cache": "^11.2.1"
+ "lru-cache": "^11.2.2"
}
},
"node_modules/@asamuzakjp/dom-selector/node_modules/lru-cache": {
- "version": "11.2.1",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.1.tgz",
- "integrity": "sha512-r8LA6i4LP4EeWOhqBaZZjDWwehd1xUJPCJd9Sv300H0ZmcUER4+JPh7bqqZeqs1o5pgtgvXm+d9UGrB5zZGDiQ==",
+ "version": "11.2.4",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.4.tgz",
+ "integrity": "sha512-B5Y16Jr9LB9dHVkh6ZevG+vAbOsNOYCX+sXvFWFu7B3Iz5mijW3zdbMyhsh8ANd2mSWBYdJgnqi+mL7/LrOPYg==",
"dev": true,
+ "license": "BlueOak-1.0.0",
"engines": {
"node": "20 || >=22"
}
@@ -192,7 +189,8 @@
"version": "2.3.9",
"resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz",
"integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/@babel/code-frame": {
"version": "7.27.1",
@@ -209,29 +207,29 @@
}
},
"node_modules/@babel/compat-data": {
- "version": "7.28.4",
- "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.4.tgz",
- "integrity": "sha512-YsmSKC29MJwf0gF8Rjjrg5LQCmyh+j/nD8/eP7f+BeoQTKYqs9RoWbjGOdy0+1Ekr68RJZMUOPVQaQisnIo4Rw==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.28.5.tgz",
+ "integrity": "sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==",
"license": "MIT",
"engines": {
"node": ">=6.9.0"
}
},
"node_modules/@babel/core": {
- "version": "7.28.4",
- "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz",
- "integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.5.tgz",
+ "integrity": "sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==",
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.27.1",
- "@babel/generator": "^7.28.3",
+ "@babel/generator": "^7.28.5",
"@babel/helper-compilation-targets": "^7.27.2",
"@babel/helper-module-transforms": "^7.28.3",
"@babel/helpers": "^7.28.4",
- "@babel/parser": "^7.28.4",
+ "@babel/parser": "^7.28.5",
"@babel/template": "^7.27.2",
- "@babel/traverse": "^7.28.4",
- "@babel/types": "^7.28.4",
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5",
"@jridgewell/remapping": "^2.3.5",
"convert-source-map": "^2.0.0",
"debug": "^4.1.0",
@@ -257,13 +255,13 @@
}
},
"node_modules/@babel/generator": {
- "version": "7.28.3",
- "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.3.tgz",
- "integrity": "sha512-3lSpxGgvnmZznmBkCRnVREPUFJv2wrv9iAoFDvADJc0ypmdOxdUtcLeBgBJ6zE0PMeTKnxeQzyk0xTBq4Ep7zw==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz",
+ "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==",
"license": "MIT",
"dependencies": {
- "@babel/parser": "^7.28.3",
- "@babel/types": "^7.28.2",
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
"@jridgewell/gen-mapping": "^0.3.12",
"@jridgewell/trace-mapping": "^0.3.28",
"jsesc": "^3.0.2"
@@ -311,18 +309,18 @@
}
},
"node_modules/@babel/helper-create-class-features-plugin": {
- "version": "7.28.3",
- "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.28.3.tgz",
- "integrity": "sha512-V9f6ZFIYSLNEbuGA/92uOvYsGCJNsuA8ESZ4ldc09bWk/j8H8TKiPw8Mk1eG6olpnO0ALHJmYfZvF4MEE4gajg==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.28.5.tgz",
+ "integrity": "sha512-q3WC4JfdODypvxArsJQROfupPBq9+lMwjKq7C33GhbFYJsufD0yd/ziwD+hJucLeWsnFPWZjsU2DNFqBPE7jwQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.27.3",
- "@babel/helper-member-expression-to-functions": "^7.27.1",
+ "@babel/helper-member-expression-to-functions": "^7.28.5",
"@babel/helper-optimise-call-expression": "^7.27.1",
"@babel/helper-replace-supers": "^7.27.1",
"@babel/helper-skip-transparent-expression-wrappers": "^7.27.1",
- "@babel/traverse": "^7.28.3",
+ "@babel/traverse": "^7.28.5",
"semver": "^6.3.1"
},
"engines": {
@@ -352,14 +350,14 @@
}
},
"node_modules/@babel/helper-member-expression-to-functions": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.27.1.tgz",
- "integrity": "sha512-E5chM8eWjTp/aNoVpcbfM7mLxu9XGLWYise2eBKGQomAk/Mb4XoxyqXTZbuTohbsl8EKqdlMhnDI2CCLfcs9wA==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.28.5.tgz",
+ "integrity": "sha512-cwM7SBRZcPCLgl8a7cY0soT1SptSzAlMH39vwiRpOQkJlh53r5hdHwLSCZpQdVLT39sZt+CRpNwYG4Y2v77atg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@babel/traverse": "^7.27.1",
- "@babel/types": "^7.27.1"
+ "@babel/traverse": "^7.28.5",
+ "@babel/types": "^7.28.5"
},
"engines": {
"node": ">=6.9.0"
@@ -459,9 +457,9 @@
}
},
"node_modules/@babel/helper-validator-identifier": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.27.1.tgz",
- "integrity": "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz",
+ "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==",
"license": "MIT",
"engines": {
"node": ">=6.9.0"
@@ -490,12 +488,12 @@
}
},
"node_modules/@babel/parser": {
- "version": "7.28.4",
- "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.4.tgz",
- "integrity": "sha512-yZbBqeM6TkpP9du/I2pUZnJsRMGGvOuIrhjzC1AwHwW+6he4mni6Bp/m8ijn0iOuZuPI2BfkCoSRunpyjnrQKg==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz",
+ "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==",
"license": "MIT",
"dependencies": {
- "@babel/types": "^7.28.4"
+ "@babel/types": "^7.28.5"
},
"bin": {
"parser": "bin/babel-parser.js"
@@ -584,14 +582,14 @@
}
},
"node_modules/@babel/plugin-transform-typescript": {
- "version": "7.28.0",
- "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.28.0.tgz",
- "integrity": "sha512-4AEiDEBPIZvLQaWlc9liCavE0xRM0dNca41WtBeM3jgFptfUOSG9z0uteLhq6+3rq+WB6jIvUwKDTpXEHPJ2Vg==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.28.5.tgz",
+ "integrity": "sha512-x2Qa+v/CuEoX7Dr31iAfr0IhInrVOWZU/2vJMJ00FOR/2nM0BcBEclpaf9sWCDc+v5e9dMrhSH8/atq/kX7+bA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@babel/helper-annotate-as-pure": "^7.27.3",
- "@babel/helper-create-class-features-plugin": "^7.27.1",
+ "@babel/helper-create-class-features-plugin": "^7.28.5",
"@babel/helper-plugin-utils": "^7.27.1",
"@babel/helper-skip-transparent-expression-wrappers": "^7.27.1",
"@babel/plugin-syntax-typescript": "^7.27.1"
@@ -604,9 +602,9 @@
}
},
"node_modules/@babel/preset-typescript": {
- "version": "7.27.1",
- "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.27.1.tgz",
- "integrity": "sha512-l7WfQfX0WK4M0v2RudjuQK4u99BS6yLHYEmdtVPP7lKV013zr9DygFuWNlnbvQ9LR+LS0Egz/XAvGx5U9MX0fQ==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.28.5.tgz",
+ "integrity": "sha512-+bQy5WOI2V6LJZpPVxY+yp66XdZ2yifu0Mc1aP5CQKgjn4QM5IN2i5fAZ4xKop47pr8rpVhiAeu+nDQa12C8+g==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -614,7 +612,7 @@
"@babel/helper-validator-option": "^7.27.1",
"@babel/plugin-syntax-jsx": "^7.27.1",
"@babel/plugin-transform-modules-commonjs": "^7.27.1",
- "@babel/plugin-transform-typescript": "^7.27.1"
+ "@babel/plugin-transform-typescript": "^7.28.5"
},
"engines": {
"node": ">=6.9.0"
@@ -647,17 +645,17 @@
}
},
"node_modules/@babel/traverse": {
- "version": "7.28.4",
- "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.4.tgz",
- "integrity": "sha512-YEzuboP2qvQavAcjgQNVgsvHIDv6ZpwXvcvjmyySP2DIMuByS/6ioU5G9pYrWHM6T2YDfc7xga9iNzYOs12CFQ==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz",
+ "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==",
"license": "MIT",
"dependencies": {
"@babel/code-frame": "^7.27.1",
- "@babel/generator": "^7.28.3",
+ "@babel/generator": "^7.28.5",
"@babel/helper-globals": "^7.28.0",
- "@babel/parser": "^7.28.4",
+ "@babel/parser": "^7.28.5",
"@babel/template": "^7.27.2",
- "@babel/types": "^7.28.4",
+ "@babel/types": "^7.28.5",
"debug": "^4.3.1"
},
"engines": {
@@ -665,13 +663,13 @@
}
},
"node_modules/@babel/types": {
- "version": "7.28.4",
- "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.4.tgz",
- "integrity": "sha512-bkFqkLhh3pMBUQQkpVgWDWq/lqzc2678eUyDlTBhRqhCHFguYYGM0Efga7tYk4TogG/3x0EEl66/OQ+WGbWB/Q==",
+ "version": "7.28.5",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz",
+ "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==",
"license": "MIT",
"dependencies": {
"@babel/helper-string-parser": "^7.27.1",
- "@babel/helper-validator-identifier": "^7.27.1"
+ "@babel/helper-validator-identifier": "^7.28.5"
},
"engines": {
"node": ">=6.9.0"
@@ -687,36 +685,6 @@
"node": ">=18"
}
},
- "node_modules/@bundled-es-modules/cookie": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/@bundled-es-modules/cookie/-/cookie-2.0.1.tgz",
- "integrity": "sha512-8o+5fRPLNbjbdGRRmJj3h6Hh1AQJf2dk3qQ/5ZFb+PXkRNiSoMGGUKlsgLfrxneb72axVJyIYji64E2+nNfYyw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "cookie": "^0.7.2"
- }
- },
- "node_modules/@bundled-es-modules/cookie/node_modules/cookie": {
- "version": "0.7.2",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
- "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 0.6"
- }
- },
- "node_modules/@bundled-es-modules/statuses": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz",
- "integrity": "sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "statuses": "^2.0.1"
- }
- },
"node_modules/@csstools/color-helpers": {
"version": "5.1.0",
"resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz",
@@ -732,6 +700,7 @@
"url": "https://opencollective.com/csstools"
}
],
+ "license": "MIT-0",
"engines": {
"node": ">=18"
}
@@ -751,6 +720,7 @@
"url": "https://opencollective.com/csstools"
}
],
+ "license": "MIT",
"engines": {
"node": ">=18"
},
@@ -774,6 +744,7 @@
"url": "https://opencollective.com/csstools"
}
],
+ "license": "MIT",
"dependencies": {
"@csstools/color-helpers": "^5.1.0",
"@csstools/css-calc": "^2.1.4"
@@ -801,6 +772,7 @@
"url": "https://opencollective.com/csstools"
}
],
+ "license": "MIT",
"engines": {
"node": ">=18"
},
@@ -809,9 +781,9 @@
}
},
"node_modules/@csstools/css-syntax-patches-for-csstree": {
- "version": "1.0.14",
- "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.14.tgz",
- "integrity": "sha512-zSlIxa20WvMojjpCSy8WrNpcZ61RqfTfX3XTaOeVlGJrt/8HF3YbzgFZa01yTbT4GWQLwfTcC3EB8i3XnB647Q==",
+ "version": "1.0.20",
+ "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.0.20.tgz",
+ "integrity": "sha512-8BHsjXfSciZxjmHQOuVdW2b8WLUPts9a+mfL13/PzEviufUEW2xnvQuOlKs9dRBHgRqJ53SF/DUoK9+MZk72oQ==",
"dev": true,
"funding": [
{
@@ -823,11 +795,9 @@
"url": "https://opencollective.com/csstools"
}
],
+ "license": "MIT-0",
"engines": {
"node": ">=18"
- },
- "peerDependencies": {
- "postcss": "^8.4"
}
},
"node_modules/@csstools/css-tokenizer": {
@@ -845,6 +815,7 @@
"url": "https://opencollective.com/csstools"
}
],
+ "license": "MIT",
"engines": {
"node": ">=18"
}
@@ -857,9 +828,9 @@
"license": "MIT"
},
"node_modules/@esbuild/aix-ppc64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.9.tgz",
- "integrity": "sha512-OaGtL73Jck6pBKjNIe24BnFE6agGl+6KxDtTfHhy1HmhthfKouEcOhqpSL64K4/0WCtbKFLOdzD/44cJ4k9opA==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz",
+ "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==",
"cpu": [
"ppc64"
],
@@ -873,9 +844,9 @@
}
},
"node_modules/@esbuild/android-arm": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.9.tgz",
- "integrity": "sha512-5WNI1DaMtxQ7t7B6xa572XMXpHAaI/9Hnhk8lcxF4zVN4xstUgTlvuGDorBguKEnZO70qwEcLpfifMLoxiPqHQ==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz",
+ "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==",
"cpu": [
"arm"
],
@@ -889,9 +860,9 @@
}
},
"node_modules/@esbuild/android-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.9.tgz",
- "integrity": "sha512-IDrddSmpSv51ftWslJMvl3Q2ZT98fUSL2/rlUXuVqRXHCs5EUF1/f+jbjF5+NG9UffUDMCiTyh8iec7u8RlTLg==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz",
+ "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==",
"cpu": [
"arm64"
],
@@ -905,9 +876,9 @@
}
},
"node_modules/@esbuild/android-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.9.tgz",
- "integrity": "sha512-I853iMZ1hWZdNllhVZKm34f4wErd4lMyeV7BLzEExGEIZYsOzqDWDf+y082izYUE8gtJnYHdeDpN/6tUdwvfiw==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz",
+ "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==",
"cpu": [
"x64"
],
@@ -921,9 +892,9 @@
}
},
"node_modules/@esbuild/darwin-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.9.tgz",
- "integrity": "sha512-XIpIDMAjOELi/9PB30vEbVMs3GV1v2zkkPnuyRRURbhqjyzIINwj+nbQATh4H9GxUgH1kFsEyQMxwiLFKUS6Rg==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz",
+ "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==",
"cpu": [
"arm64"
],
@@ -937,9 +908,9 @@
}
},
"node_modules/@esbuild/darwin-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.9.tgz",
- "integrity": "sha512-jhHfBzjYTA1IQu8VyrjCX4ApJDnH+ez+IYVEoJHeqJm9VhG9Dh2BYaJritkYK3vMaXrf7Ogr/0MQ8/MeIefsPQ==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz",
+ "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==",
"cpu": [
"x64"
],
@@ -953,9 +924,9 @@
}
},
"node_modules/@esbuild/freebsd-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.9.tgz",
- "integrity": "sha512-z93DmbnY6fX9+KdD4Ue/H6sYs+bhFQJNCPZsi4XWJoYblUqT06MQUdBCpcSfuiN72AbqeBFu5LVQTjfXDE2A6Q==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz",
+ "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==",
"cpu": [
"arm64"
],
@@ -969,9 +940,9 @@
}
},
"node_modules/@esbuild/freebsd-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.9.tgz",
- "integrity": "sha512-mrKX6H/vOyo5v71YfXWJxLVxgy1kyt1MQaD8wZJgJfG4gq4DpQGpgTB74e5yBeQdyMTbgxp0YtNj7NuHN0PoZg==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz",
+ "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==",
"cpu": [
"x64"
],
@@ -985,9 +956,9 @@
}
},
"node_modules/@esbuild/linux-arm": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.9.tgz",
- "integrity": "sha512-HBU2Xv78SMgaydBmdor38lg8YDnFKSARg1Q6AT0/y2ezUAKiZvc211RDFHlEZRFNRVhcMamiToo7bDx3VEOYQw==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz",
+ "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==",
"cpu": [
"arm"
],
@@ -1001,9 +972,9 @@
}
},
"node_modules/@esbuild/linux-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.9.tgz",
- "integrity": "sha512-BlB7bIcLT3G26urh5Dmse7fiLmLXnRlopw4s8DalgZ8ef79Jj4aUcYbk90g8iCa2467HX8SAIidbL7gsqXHdRw==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz",
+ "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==",
"cpu": [
"arm64"
],
@@ -1017,9 +988,9 @@
}
},
"node_modules/@esbuild/linux-ia32": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.9.tgz",
- "integrity": "sha512-e7S3MOJPZGp2QW6AK6+Ly81rC7oOSerQ+P8L0ta4FhVi+/j/v2yZzx5CqqDaWjtPFfYz21Vi1S0auHrap3Ma3A==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz",
+ "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==",
"cpu": [
"ia32"
],
@@ -1033,9 +1004,9 @@
}
},
"node_modules/@esbuild/linux-loong64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.9.tgz",
- "integrity": "sha512-Sbe10Bnn0oUAB2AalYztvGcK+o6YFFA/9829PhOCUS9vkJElXGdphz0A3DbMdP8gmKkqPmPcMJmJOrI3VYB1JQ==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz",
+ "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==",
"cpu": [
"loong64"
],
@@ -1049,9 +1020,9 @@
}
},
"node_modules/@esbuild/linux-mips64el": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.9.tgz",
- "integrity": "sha512-YcM5br0mVyZw2jcQeLIkhWtKPeVfAerES5PvOzaDxVtIyZ2NUBZKNLjC5z3/fUlDgT6w89VsxP2qzNipOaaDyA==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz",
+ "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==",
"cpu": [
"mips64el"
],
@@ -1065,9 +1036,9 @@
}
},
"node_modules/@esbuild/linux-ppc64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.9.tgz",
- "integrity": "sha512-++0HQvasdo20JytyDpFvQtNrEsAgNG2CY1CLMwGXfFTKGBGQT3bOeLSYE2l1fYdvML5KUuwn9Z8L1EWe2tzs1w==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz",
+ "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==",
"cpu": [
"ppc64"
],
@@ -1081,9 +1052,9 @@
}
},
"node_modules/@esbuild/linux-riscv64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.9.tgz",
- "integrity": "sha512-uNIBa279Y3fkjV+2cUjx36xkx7eSjb8IvnL01eXUKXez/CBHNRw5ekCGMPM0BcmqBxBcdgUWuUXmVWwm4CH9kg==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz",
+ "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==",
"cpu": [
"riscv64"
],
@@ -1097,9 +1068,9 @@
}
},
"node_modules/@esbuild/linux-s390x": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.9.tgz",
- "integrity": "sha512-Mfiphvp3MjC/lctb+7D287Xw1DGzqJPb/J2aHHcHxflUo+8tmN/6d4k6I2yFR7BVo5/g7x2Monq4+Yew0EHRIA==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz",
+ "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==",
"cpu": [
"s390x"
],
@@ -1113,9 +1084,9 @@
}
},
"node_modules/@esbuild/linux-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.9.tgz",
- "integrity": "sha512-iSwByxzRe48YVkmpbgoxVzn76BXjlYFXC7NvLYq+b+kDjyyk30J0JY47DIn8z1MO3K0oSl9fZoRmZPQI4Hklzg==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz",
+ "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==",
"cpu": [
"x64"
],
@@ -1129,9 +1100,9 @@
}
},
"node_modules/@esbuild/netbsd-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.9.tgz",
- "integrity": "sha512-9jNJl6FqaUG+COdQMjSCGW4QiMHH88xWbvZ+kRVblZsWrkXlABuGdFJ1E9L7HK+T0Yqd4akKNa/lO0+jDxQD4Q==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz",
+ "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==",
"cpu": [
"arm64"
],
@@ -1145,9 +1116,9 @@
}
},
"node_modules/@esbuild/netbsd-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.9.tgz",
- "integrity": "sha512-RLLdkflmqRG8KanPGOU7Rpg829ZHu8nFy5Pqdi9U01VYtG9Y0zOG6Vr2z4/S+/3zIyOxiK6cCeYNWOFR9QP87g==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz",
+ "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==",
"cpu": [
"x64"
],
@@ -1161,9 +1132,9 @@
}
},
"node_modules/@esbuild/openbsd-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.9.tgz",
- "integrity": "sha512-YaFBlPGeDasft5IIM+CQAhJAqS3St3nJzDEgsgFixcfZeyGPCd6eJBWzke5piZuZ7CtL656eOSYKk4Ls2C0FRQ==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz",
+ "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==",
"cpu": [
"arm64"
],
@@ -1177,9 +1148,9 @@
}
},
"node_modules/@esbuild/openbsd-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.9.tgz",
- "integrity": "sha512-1MkgTCuvMGWuqVtAvkpkXFmtL8XhWy+j4jaSO2wxfJtilVCi0ZE37b8uOdMItIHz4I6z1bWWtEX4CJwcKYLcuA==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz",
+ "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==",
"cpu": [
"x64"
],
@@ -1193,9 +1164,9 @@
}
},
"node_modules/@esbuild/openharmony-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.9.tgz",
- "integrity": "sha512-4Xd0xNiMVXKh6Fa7HEJQbrpP3m3DDn43jKxMjxLLRjWnRsfxjORYJlXPO4JNcXtOyfajXorRKY9NkOpTHptErg==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz",
+ "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==",
"cpu": [
"arm64"
],
@@ -1209,9 +1180,9 @@
}
},
"node_modules/@esbuild/sunos-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.9.tgz",
- "integrity": "sha512-WjH4s6hzo00nNezhp3wFIAfmGZ8U7KtrJNlFMRKxiI9mxEK1scOMAaa9i4crUtu+tBr+0IN6JCuAcSBJZfnphw==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz",
+ "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==",
"cpu": [
"x64"
],
@@ -1225,9 +1196,9 @@
}
},
"node_modules/@esbuild/win32-arm64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.9.tgz",
- "integrity": "sha512-mGFrVJHmZiRqmP8xFOc6b84/7xa5y5YvR1x8djzXpJBSv/UsNK6aqec+6JDjConTgvvQefdGhFDAs2DLAds6gQ==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz",
+ "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==",
"cpu": [
"arm64"
],
@@ -1241,9 +1212,9 @@
}
},
"node_modules/@esbuild/win32-ia32": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.9.tgz",
- "integrity": "sha512-b33gLVU2k11nVx1OhX3C8QQP6UHQK4ZtN56oFWvVXvz2VkDoe6fbG8TOgHFxEvqeqohmRnIHe5A1+HADk4OQww==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz",
+ "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==",
"cpu": [
"ia32"
],
@@ -1257,9 +1228,9 @@
}
},
"node_modules/@esbuild/win32-x64": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.9.tgz",
- "integrity": "sha512-PPOl1mi6lpLNQxnGoyAfschAodRFYXJ+9fs6WHXz7CSWKbOqiMZsubC+BQsVKuul+3vKLuwTHsS2c2y9EoKwxQ==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz",
+ "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==",
"cpu": [
"x64"
],
@@ -1273,9 +1244,9 @@
}
},
"node_modules/@eslint-community/eslint-utils": {
- "version": "4.8.0",
- "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.8.0.tgz",
- "integrity": "sha512-MJQFqrZgcW0UNYLGOuQpey/oTN59vyWwplvCGZztn1cKz9agZPPYpJB7h2OMmuu7VLqkvEjN8feFZJmxNF9D+Q==",
+ "version": "4.9.0",
+ "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.0.tgz",
+ "integrity": "sha512-ayVFHdtZ+hsq1t2Dy24wCmGXGe4q9Gu3smhLYALJrr473ZH27MsnSL+LKUlimp4BWJqMDMLmPpx/Q9R3OAlL4g==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1292,9 +1263,9 @@
}
},
"node_modules/@eslint-community/regexpp": {
- "version": "4.12.1",
- "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz",
- "integrity": "sha512-CCZCDJuduB9OUkFkY2IgppNZMi2lBQgD2qzwXkEia16cge2pijY/aXi96CJMquDMn3nJdlPV1A5KrJEXwfLNzQ==",
+ "version": "4.12.2",
+ "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz",
+ "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==",
"dev": true,
"license": "MIT",
"engines": {
@@ -1360,13 +1331,13 @@
}
},
"node_modules/@formatjs/ecma402-abstract": {
- "version": "2.3.4",
- "resolved": "https://registry.npmjs.org/@formatjs/ecma402-abstract/-/ecma402-abstract-2.3.4.tgz",
- "integrity": "sha512-qrycXDeaORzIqNhBOx0btnhpD1c+/qFIHAN9znofuMJX6QBwtbrmlpWfD4oiUUD2vJUOIYFA/gYtg2KAMGG7sA==",
+ "version": "2.3.6",
+ "resolved": "https://registry.npmjs.org/@formatjs/ecma402-abstract/-/ecma402-abstract-2.3.6.tgz",
+ "integrity": "sha512-HJnTFeRM2kVFVr5gr5kH1XP6K0JcJtE7Lzvtr3FS/so5f1kpsqqqxy5JF+FRaO6H2qmcMfAUIox7AJteieRtVw==",
"license": "MIT",
"dependencies": {
"@formatjs/fast-memoize": "2.2.7",
- "@formatjs/intl-localematcher": "0.6.1",
+ "@formatjs/intl-localematcher": "0.6.2",
"decimal.js": "^10.4.3",
"tslib": "^2.8.0"
}
@@ -1381,53 +1352,54 @@
}
},
"node_modules/@formatjs/icu-messageformat-parser": {
- "version": "2.11.2",
- "resolved": "https://registry.npmjs.org/@formatjs/icu-messageformat-parser/-/icu-messageformat-parser-2.11.2.tgz",
- "integrity": "sha512-AfiMi5NOSo2TQImsYAg8UYddsNJ/vUEv/HaNqiFjnI3ZFfWihUtD5QtuX6kHl8+H+d3qvnE/3HZrfzgdWpsLNA==",
+ "version": "2.11.4",
+ "resolved": "https://registry.npmjs.org/@formatjs/icu-messageformat-parser/-/icu-messageformat-parser-2.11.4.tgz",
+ "integrity": "sha512-7kR78cRrPNB4fjGFZg3Rmj5aah8rQj9KPzuLsmcSn4ipLXQvC04keycTI1F7kJYDwIXtT2+7IDEto842CfZBtw==",
"license": "MIT",
"dependencies": {
- "@formatjs/ecma402-abstract": "2.3.4",
- "@formatjs/icu-skeleton-parser": "1.8.14",
+ "@formatjs/ecma402-abstract": "2.3.6",
+ "@formatjs/icu-skeleton-parser": "1.8.16",
"tslib": "^2.8.0"
}
},
"node_modules/@formatjs/icu-skeleton-parser": {
- "version": "1.8.14",
- "resolved": "https://registry.npmjs.org/@formatjs/icu-skeleton-parser/-/icu-skeleton-parser-1.8.14.tgz",
- "integrity": "sha512-i4q4V4qslThK4Ig8SxyD76cp3+QJ3sAqr7f6q9VVfeGtxG9OhiAk3y9XF6Q41OymsKzsGQ6OQQoJNY4/lI8TcQ==",
+ "version": "1.8.16",
+ "resolved": "https://registry.npmjs.org/@formatjs/icu-skeleton-parser/-/icu-skeleton-parser-1.8.16.tgz",
+ "integrity": "sha512-H13E9Xl+PxBd8D5/6TVUluSpxGNvFSlN/b3coUp0e0JpuWXXnQDiavIpY3NnvSp4xhEMoXyyBvVfdFX8jglOHQ==",
"license": "MIT",
"dependencies": {
- "@formatjs/ecma402-abstract": "2.3.4",
+ "@formatjs/ecma402-abstract": "2.3.6",
"tslib": "^2.8.0"
}
},
"node_modules/@formatjs/intl-localematcher": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.6.1.tgz",
- "integrity": "sha512-ePEgLgVCqi2BBFnTMWPfIghu6FkbZnnBVhO2sSxvLfrdFw7wCHAHiDoM2h4NRgjbaY7+B7HgOLZGkK187pZTZg==",
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.6.2.tgz",
+ "integrity": "sha512-XOMO2Hupl0wdd172Y06h6kLpBz6Dv+J4okPLl4LPtzbr8f66WbIoy4ev98EBuZ6ZK4h5ydTN6XneT4QVpD7cdA==",
"license": "MIT",
"dependencies": {
"tslib": "^2.8.0"
}
},
"node_modules/@heroui/accordion": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/accordion/-/accordion-2.2.23.tgz",
- "integrity": "sha512-eXokso461YdSkJ6t3fFxBq2xkxCcZPbXECwanNHaLZPBh1QMaVdtCEZZxVB4HeoMRmZchRHWbUrbiz/l+A9hZQ==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/accordion/-/accordion-2.2.24.tgz",
+ "integrity": "sha512-iVJVKKsGN4t3hn4Exwic6n5SOQOmmmsodSsCt0VUcs5VTHu9876sAC44xlEMpc9CP8pC1wQS3DzWl3mN6Z120g==",
+ "license": "MIT",
"dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/divider": "2.2.19",
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/divider": "2.2.20",
"@heroui/dom-animation": "2.1.10",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-accordion": "2.2.17",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-stately/tree": "3.9.2",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-accordion": "2.2.18",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-stately/tree": "3.9.3",
"@react-types/accordion": "3.0.0-alpha.26",
- "@react-types/shared": "3.32.0"
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1438,14 +1410,15 @@
}
},
"node_modules/@heroui/alert": {
- "version": "2.2.26",
- "resolved": "https://registry.npmjs.org/@heroui/alert/-/alert-2.2.26.tgz",
- "integrity": "sha512-ngyPzbRrW3ZNgwb6DlsvdCboDeHrncN4Q1bvdwFKIn2uHYRF2pEJgBhWuqpCVDaIwGhypGMXrBFFwIvdCNF+Zw==",
+ "version": "2.2.27",
+ "resolved": "https://registry.npmjs.org/@heroui/alert/-/alert-2.2.27.tgz",
+ "integrity": "sha512-Y6oX9SV//tdhxhpgkSZvnjwdx7d8S7RAhgVlxCs2Hla//nCFC3yiMHIv8UotTryAGdOwZIsffmcna9vqbNL5vw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/button": "2.2.26",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/button": "2.2.27",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/shared-utils": "2.1.12",
"@react-stately/utils": "3.10.8"
},
"peerDependencies": {
@@ -1456,15 +1429,16 @@
}
},
"node_modules/@heroui/aria-utils": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/aria-utils/-/aria-utils-2.2.23.tgz",
- "integrity": "sha512-RF5vWZdBdQIGfQ5GgPt3XTsNDodLJ87criWUVt7qOox+lmJrSkYPmHgA1bEZxJdd3aCwLCJbcBGqP7vW3+OVCQ==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/aria-utils/-/aria-utils-2.2.24.tgz",
+ "integrity": "sha512-Y7FfQl2jvJr8JjpH+iuJElDwbn3eSWohuxHg6e5+xk5GcPYrEecgr0F/9qD6VU8IvVrRzJ00JzmT87lgA5iE3Q==",
+ "license": "MIT",
"dependencies": {
- "@heroui/system": "2.4.22",
- "@react-aria/utils": "3.30.1",
- "@react-stately/collections": "3.12.7",
- "@react-types/overlays": "3.9.1",
- "@react-types/shared": "3.32.0"
+ "@heroui/system": "2.4.23",
+ "@react-aria/utils": "3.31.0",
+ "@react-stately/collections": "3.12.8",
+ "@react-types/overlays": "3.9.2",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0",
@@ -1472,26 +1446,27 @@
}
},
"node_modules/@heroui/autocomplete": {
- "version": "2.3.28",
- "resolved": "https://registry.npmjs.org/@heroui/autocomplete/-/autocomplete-2.3.28.tgz",
- "integrity": "sha512-7z55VHlCG6Gh7IKypJdc7YIO45rR05nMAU0fu5D2ZbcsjBN1ie+ld2M57ypamK/DVD7TyauWvFZt55LcWN5ejQ==",
- "dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/button": "2.2.26",
- "@heroui/form": "2.1.26",
- "@heroui/input": "2.4.27",
- "@heroui/listbox": "2.3.25",
- "@heroui/popover": "2.3.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/scroll-shadow": "2.3.17",
+ "version": "2.3.29",
+ "resolved": "https://registry.npmjs.org/@heroui/autocomplete/-/autocomplete-2.3.29.tgz",
+ "integrity": "sha512-BQkiWrrhPbNMFF1Hd60QDyG4iwD+sdsjWh0h7sw2XhcT6Bjw/6Hqpf4eHsTvPElW/554vPZVtChjugRY1N2zsw==",
+ "license": "MIT",
+ "dependencies": {
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/button": "2.2.27",
+ "@heroui/form": "2.1.27",
+ "@heroui/input": "2.4.28",
+ "@heroui/listbox": "2.3.26",
+ "@heroui/popover": "2.3.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/scroll-shadow": "2.3.18",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/combobox": "3.13.1",
- "@react-aria/i18n": "3.12.12",
- "@react-stately/combobox": "3.11.1",
- "@react-types/combobox": "3.13.8",
- "@react-types/shared": "3.32.0"
+ "@react-aria/combobox": "3.14.0",
+ "@react-aria/i18n": "3.12.13",
+ "@react-stately/combobox": "3.12.0",
+ "@react-types/combobox": "3.13.9",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1502,16 +1477,16 @@
}
},
"node_modules/@heroui/avatar": {
- "version": "2.2.21",
- "resolved": "https://registry.npmjs.org/@heroui/avatar/-/avatar-2.2.21.tgz",
- "integrity": "sha512-oer+CuEAQpvhLzyBmO3eWhsdbWzcyIDn8fkPl4D2AMfpNP8ve82ysXEC+DLcoOEESS3ykkHsp4C0MPREgC3QgA==",
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/@heroui/avatar/-/avatar-2.2.22.tgz",
+ "integrity": "sha512-znmKdsrVj91Fg8+wm/HA/b8zi3iAg5g3MezliBfS2PmwgZcpBR6VtwgeeP6uN49+TR+faGIrck0Zxceuw4U0FQ==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-image": "2.1.12",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-image": "2.1.13",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1521,13 +1496,13 @@
}
},
"node_modules/@heroui/badge": {
- "version": "2.2.16",
- "resolved": "https://registry.npmjs.org/@heroui/badge/-/badge-2.2.16.tgz",
- "integrity": "sha512-gW0aVdic+5jwDhifIB8TWJ6170JOOzLn7Jkomj2IsN2G+oVrJ7XdJJGr2mYkoeNXAwYlYVyXTANV+zPSGKbx7A==",
+ "version": "2.2.17",
+ "resolved": "https://registry.npmjs.org/@heroui/badge/-/badge-2.2.17.tgz",
+ "integrity": "sha512-UNILRsAIJn+B6aWml+Rv2QCyYB7sadNqRPDPzNeVKJd8j3MNgZyyEHDwvqM2FWrgGccQIuWFaUgGdnPxRJpwwg==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1537,17 +1512,17 @@
}
},
"node_modules/@heroui/breadcrumbs": {
- "version": "2.2.21",
- "resolved": "https://registry.npmjs.org/@heroui/breadcrumbs/-/breadcrumbs-2.2.21.tgz",
- "integrity": "sha512-CB/RNyng37thY8eCbCsIHVV/hMdND4l+MapJOcCi6ffbKT0bebC+4ukcktcdZ/WucAn2qZdl4NfdyIuE0ZqjyQ==",
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/@heroui/breadcrumbs/-/breadcrumbs-2.2.22.tgz",
+ "integrity": "sha512-2fWfpbwhRPeC99Kuzu+DnzOYL4TOkDm9sznvSj0kIAbw/Rvl+D2/6fmBOaTRIUXfswWpHVRUCcNYczIAp0PkoA==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@react-aria/breadcrumbs": "3.5.28",
- "@react-aria/focus": "3.21.1",
- "@react-types/breadcrumbs": "3.7.16"
+ "@heroui/shared-utils": "2.1.12",
+ "@react-aria/breadcrumbs": "3.5.29",
+ "@react-aria/focus": "3.21.2",
+ "@react-types/breadcrumbs": "3.7.17"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1557,18 +1532,19 @@
}
},
"node_modules/@heroui/button": {
- "version": "2.2.26",
- "resolved": "https://registry.npmjs.org/@heroui/button/-/button-2.2.26.tgz",
- "integrity": "sha512-Z4Kp7M444pgzKCUDTZX8Q5GnxOxqIJnAB58+8g5ETlA++Na+qqXwAXADmAPIrBB7uqoRUrsP7U/bpp5SiZYJ2A==",
+ "version": "2.2.27",
+ "resolved": "https://registry.npmjs.org/@heroui/button/-/button-2.2.27.tgz",
+ "integrity": "sha512-Fxb8rtjPQm9T4GAtB1oW2QMUiQCtn7EtvO5AN41ANxAgmsNMM5wnLTkxQ05vNueCrp47kTDtSuyMhKU2llATHQ==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/ripple": "2.2.19",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/spinner": "2.2.23",
- "@heroui/use-aria-button": "2.2.19",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-types/shared": "3.32.0"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/ripple": "2.2.20",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/spinner": "2.2.24",
+ "@heroui/use-aria-button": "2.2.20",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1579,28 +1555,29 @@
}
},
"node_modules/@heroui/calendar": {
- "version": "2.2.26",
- "resolved": "https://registry.npmjs.org/@heroui/calendar/-/calendar-2.2.26.tgz",
- "integrity": "sha512-jCFc+JSl/yQqAVi5TladdYpiX0vf72Sy2vuCTN+HdcpH3SFkJgPLlbt6ib+pbAi14hGbUdJ+POmBC19URZ/g7g==",
+ "version": "2.2.27",
+ "resolved": "https://registry.npmjs.org/@heroui/calendar/-/calendar-2.2.27.tgz",
+ "integrity": "sha512-VtyXQSoT9u9tC4HjBkJIaSSmhau1LwPUwvof0LjYDpBfTsJKqn+308wI3nAp75BTbAkK+vFM8LI0VfbALCwR4Q==",
+ "license": "MIT",
"dependencies": {
- "@heroui/button": "2.2.26",
+ "@heroui/button": "2.2.27",
"@heroui/dom-animation": "2.1.10",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-button": "2.2.19",
- "@internationalized/date": "3.9.0",
- "@react-aria/calendar": "3.9.1",
- "@react-aria/focus": "3.21.1",
- "@react-aria/i18n": "3.12.12",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/visually-hidden": "3.8.27",
- "@react-stately/calendar": "3.8.4",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-button": "2.2.20",
+ "@internationalized/date": "3.10.0",
+ "@react-aria/calendar": "3.9.2",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/i18n": "3.12.13",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/visually-hidden": "3.8.28",
+ "@react-stately/calendar": "3.9.0",
"@react-stately/utils": "3.10.8",
- "@react-types/button": "3.14.0",
- "@react-types/calendar": "3.7.4",
- "@react-types/shared": "3.32.0",
+ "@react-types/button": "3.14.1",
+ "@react-types/calendar": "3.8.0",
+ "@react-types/shared": "3.32.1",
"scroll-into-view-if-needed": "3.0.10"
},
"peerDependencies": {
@@ -1612,18 +1589,18 @@
}
},
"node_modules/@heroui/card": {
- "version": "2.2.24",
- "resolved": "https://registry.npmjs.org/@heroui/card/-/card-2.2.24.tgz",
- "integrity": "sha512-kv4xLJTNYSar3YjiziA71VSZbco0AQUiZAuyP9rZ8XSht8HxLQsVpM6ywFa+/SGTGAh5sIv0qCYCpm0m4BrSxw==",
+ "version": "2.2.25",
+ "resolved": "https://registry.npmjs.org/@heroui/card/-/card-2.2.25.tgz",
+ "integrity": "sha512-dtd/G24zePIHPutRIxWC69IO3IGJs8X+zh9rBYM9cY5Q972D8Eet5WdWTfDBhw//fFIoagDAs5YcI9emGczGaQ==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/ripple": "2.2.19",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-button": "2.2.19",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-types/shared": "3.32.0"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/ripple": "2.2.20",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-button": "2.2.20",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1634,22 +1611,23 @@
}
},
"node_modules/@heroui/checkbox": {
- "version": "2.3.26",
- "resolved": "https://registry.npmjs.org/@heroui/checkbox/-/checkbox-2.3.26.tgz",
- "integrity": "sha512-i3f6pYNclFN/+CHhgF1xWjBaHNEbb2HoZaM3Q2zLVTzDpBx0893Vu3iDkH6wwx71ze8N/Y0cqZWFxR5v+IQUKg==",
+ "version": "2.3.27",
+ "resolved": "https://registry.npmjs.org/@heroui/checkbox/-/checkbox-2.3.27.tgz",
+ "integrity": "sha512-YC0deiB7EOzcpJtk9SdySugD1Z2TNtfyYee2voDBHrng7ZBRB+cmAvizXINHnaQGFi0yuVPrZ5ixR/wsvTNW+Q==",
+ "license": "MIT",
"dependencies": {
- "@heroui/form": "2.1.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/form": "2.1.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-callback-ref": "2.1.8",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/checkbox": "3.16.1",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-stately/checkbox": "3.7.1",
- "@react-stately/toggle": "3.9.1",
- "@react-types/checkbox": "3.10.1",
- "@react-types/shared": "3.32.0"
+ "@react-aria/checkbox": "3.16.2",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-stately/checkbox": "3.7.2",
+ "@react-stately/toggle": "3.9.2",
+ "@react-types/checkbox": "3.10.2",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1659,16 +1637,16 @@
}
},
"node_modules/@heroui/chip": {
- "version": "2.2.21",
- "resolved": "https://registry.npmjs.org/@heroui/chip/-/chip-2.2.21.tgz",
- "integrity": "sha512-vE1XbVL4U92RjuXZWnQgcPIFQ9amLEDCVTK5IbCF2MJ7Xr6ofDj6KTduauCCH1H40p9y1zk6+fioqvxDEoCgDw==",
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/@heroui/chip/-/chip-2.2.22.tgz",
+ "integrity": "sha512-6O4Sv1chP+xxftp7E5gHUJIzo04ML9BW9N9jjxWCqT0Qtl+a/ZxnDalCyup6oraMiVLLHp+zEVX93C+3LONgkg==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5"
+ "@heroui/shared-utils": "2.1.12",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1678,13 +1656,14 @@
}
},
"node_modules/@heroui/code": {
- "version": "2.2.20",
- "resolved": "https://registry.npmjs.org/@heroui/code/-/code-2.2.20.tgz",
- "integrity": "sha512-Bd0fwvBv3K1NGjjlKxbHxCIXjQ0Ost6m3z5P295JZ5yf9RIub4ztLqYx2wS0cRJ7z/AjqF6YBQlhCMt76cuEsQ==",
+ "version": "2.2.21",
+ "resolved": "https://registry.npmjs.org/@heroui/code/-/code-2.2.21.tgz",
+ "integrity": "sha512-ExHcfTGr9tCbAaBOfMzTla8iHHfwIV5/xRk4WApeVmL4MiIlLMykc9bSi1c88ltaJInQGFAmE6MOFHXuGHxBXw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/system-rsc": "2.3.19"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/system-rsc": "2.3.20"
},
"peerDependencies": {
"@heroui/theme": ">=2.4.17",
@@ -1693,19 +1672,20 @@
}
},
"node_modules/@heroui/date-input": {
- "version": "2.3.26",
- "resolved": "https://registry.npmjs.org/@heroui/date-input/-/date-input-2.3.26.tgz",
- "integrity": "sha512-iF3YRZYSk37oEzVSop9hHd8VoNTJ3lIO06Oq/Lj64HGinuK06/PZrFhEWqKKZ472RctzLTmPbAjeXuhHh2mgMg==",
+ "version": "2.3.27",
+ "resolved": "https://registry.npmjs.org/@heroui/date-input/-/date-input-2.3.27.tgz",
+ "integrity": "sha512-IxvZYezbR9jRxTWdsuHH47nsnB6RV1HPY7VwiJd9ZCy6P6oUV0Rx3cdwIRtUnyXbvz1G7+I22NL4C2Ku194l8A==",
+ "license": "MIT",
"dependencies": {
- "@heroui/form": "2.1.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@internationalized/date": "3.9.0",
- "@react-aria/datepicker": "3.15.1",
- "@react-aria/i18n": "3.12.12",
- "@react-stately/datepicker": "3.15.1",
- "@react-types/datepicker": "3.13.1",
- "@react-types/shared": "3.32.0"
+ "@heroui/form": "2.1.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@internationalized/date": "3.10.0",
+ "@react-aria/datepicker": "3.15.2",
+ "@react-aria/i18n": "3.12.13",
+ "@react-stately/datepicker": "3.15.2",
+ "@react-types/datepicker": "3.13.2",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1715,26 +1695,27 @@
}
},
"node_modules/@heroui/date-picker": {
- "version": "2.3.27",
- "resolved": "https://registry.npmjs.org/@heroui/date-picker/-/date-picker-2.3.27.tgz",
- "integrity": "sha512-FoiORJ6e8cXyoqBn5mvXaBUocW3NNXTV07ceJhqyu0GVS+jV0J0bPZBg4G8cz7BjaU+8cquHsFQanz73bViH3g==",
- "dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/button": "2.2.26",
- "@heroui/calendar": "2.2.26",
- "@heroui/date-input": "2.3.26",
- "@heroui/form": "2.1.26",
- "@heroui/popover": "2.3.26",
- "@heroui/react-utils": "2.1.13",
+ "version": "2.3.28",
+ "resolved": "https://registry.npmjs.org/@heroui/date-picker/-/date-picker-2.3.28.tgz",
+ "integrity": "sha512-duKvXijabpafxU04sItrozf982tXkUDymcT3SoEvW4LDg6bECgPI8bYNN49hlzkI8+zuwJdKzJ4hDmANGVaL8Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/button": "2.2.27",
+ "@heroui/calendar": "2.2.27",
+ "@heroui/date-input": "2.3.27",
+ "@heroui/form": "2.1.27",
+ "@heroui/popover": "2.3.27",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@internationalized/date": "3.9.0",
- "@react-aria/datepicker": "3.15.1",
- "@react-aria/i18n": "3.12.12",
- "@react-stately/datepicker": "3.15.1",
+ "@heroui/shared-utils": "2.1.12",
+ "@internationalized/date": "3.10.0",
+ "@react-aria/datepicker": "3.15.2",
+ "@react-aria/i18n": "3.12.13",
+ "@react-stately/datepicker": "3.15.2",
"@react-stately/utils": "3.10.8",
- "@react-types/datepicker": "3.13.1",
- "@react-types/shared": "3.32.0"
+ "@react-types/datepicker": "3.13.2",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1745,13 +1726,14 @@
}
},
"node_modules/@heroui/divider": {
- "version": "2.2.19",
- "resolved": "https://registry.npmjs.org/@heroui/divider/-/divider-2.2.19.tgz",
- "integrity": "sha512-FHoXojco23o/A9GJU6K2iJ3uAvcV7AJ4ppAKIGaKS4weJnYOsh5f9NE2RL3NasmIjk3DLMERDjVVuPyDdJ+rpw==",
+ "version": "2.2.20",
+ "resolved": "https://registry.npmjs.org/@heroui/divider/-/divider-2.2.20.tgz",
+ "integrity": "sha512-t+NNJ2e5okZraLKQoj+rS2l49IMy5AeXTixjsR+QRZ/WPrETNpMj4lw5cBSxG0i7WhRhlBa+KgqweUUezvCdAg==",
+ "license": "MIT",
"dependencies": {
"@heroui/react-rsc-utils": "2.1.9",
- "@heroui/system-rsc": "2.3.19",
- "@react-types/shared": "3.32.0"
+ "@heroui/system-rsc": "2.3.20",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/theme": ">=2.4.17",
@@ -1769,14 +1751,15 @@
}
},
"node_modules/@heroui/drawer": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/drawer/-/drawer-2.2.23.tgz",
- "integrity": "sha512-43/Aoi7Qi4YXmVXXy43v2pyLmi4ZW32nXSnbU5xdKhMb0zFNThAH0/eJmHdtW8AUjei2W1wTmMpGn/WHCYVXOA==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/drawer/-/drawer-2.2.24.tgz",
+ "integrity": "sha512-gb51Lj9A8jlL1UvUrQ+MLS9tz+Qw+cdXwIJd39RXDkJwDmxqhzkz+WoOPZZwcOAHtATmwlTuxxlv6Cro59iswg==",
+ "license": "MIT",
"dependencies": {
- "@heroui/framer-utils": "2.1.22",
- "@heroui/modal": "2.2.23",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/modal": "2.2.24",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1786,19 +1769,20 @@
}
},
"node_modules/@heroui/dropdown": {
- "version": "2.3.26",
- "resolved": "https://registry.npmjs.org/@heroui/dropdown/-/dropdown-2.3.26.tgz",
- "integrity": "sha512-ZuOawL7OnsC5qykYixADfaeSqZleFg4IwZnDN6cd17bXErxPnBYBVnQSnHRsyCUJm7gYiVcDXljNKwp/2reahg==",
+ "version": "2.3.27",
+ "resolved": "https://registry.npmjs.org/@heroui/dropdown/-/dropdown-2.3.27.tgz",
+ "integrity": "sha512-6aedMmxC+St5Ixz9o3s0ERkLOR6ZQE2uRccmRchPCEt7ZJU6TAeJo7fSpxIvdEUjFDe+pNhR2ojIocZEXtBZZg==",
+ "license": "MIT",
"dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/menu": "2.2.25",
- "@heroui/popover": "2.3.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@react-aria/focus": "3.21.1",
- "@react-aria/menu": "3.19.1",
- "@react-stately/menu": "3.9.7",
- "@react-types/menu": "3.10.4"
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/menu": "2.2.26",
+ "@heroui/popover": "2.3.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/menu": "3.19.3",
+ "@react-stately/menu": "3.9.8",
+ "@react-types/menu": "3.10.5"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1809,16 +1793,17 @@
}
},
"node_modules/@heroui/form": {
- "version": "2.1.26",
- "resolved": "https://registry.npmjs.org/@heroui/form/-/form-2.1.26.tgz",
- "integrity": "sha512-vBlae4k59GjD36Ho8P8rL78W9djWPPejav0ocv0PjfqlEnmXLa1Wrel/3zTAOcFVI7uKBio3QdU78IIEPM82sw==",
+ "version": "2.1.27",
+ "resolved": "https://registry.npmjs.org/@heroui/form/-/form-2.1.27.tgz",
+ "integrity": "sha512-vtaBqWhxppkJeWgbAZA/A1bRj6XIudBqJWSkoqYlejtLuvaxNwxQ2Z9u7ewxN96R6QqPrQwChlknIn0NgCWlXQ==",
+ "license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11",
- "@heroui/system": "2.4.22",
- "@heroui/theme": "2.4.22",
- "@react-stately/form": "3.2.1",
- "@react-types/form": "3.7.15",
- "@react-types/shared": "3.32.0"
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/system": "2.4.23",
+ "@heroui/theme": "2.4.23",
+ "@react-stately/form": "3.2.2",
+ "@react-types/form": "3.7.16",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1828,11 +1813,12 @@
}
},
"node_modules/@heroui/framer-utils": {
- "version": "2.1.22",
- "resolved": "https://registry.npmjs.org/@heroui/framer-utils/-/framer-utils-2.1.22.tgz",
- "integrity": "sha512-f5qlpdWToEp1re9e4Wje2/FCaGWRdkqs9U80qfjFHmZFaWHBGLBX1k8G5p7aw3lOaf+pqDcC2sIldNav57Xfpw==",
+ "version": "2.1.23",
+ "resolved": "https://registry.npmjs.org/@heroui/framer-utils/-/framer-utils-2.1.23.tgz",
+ "integrity": "sha512-crLLMjRmxs8/fysFv5gwghSGcDmYYkhNfAWh1rFzDy+FRPZN4f/bPH2rt85hdApmuHbWt0QCocqsrjHxLEzrAw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/system": "2.4.22",
+ "@heroui/system": "2.4.23",
"@heroui/use-measure": "2.1.8"
},
"peerDependencies": {
@@ -1842,14 +1828,14 @@
}
},
"node_modules/@heroui/image": {
- "version": "2.2.16",
- "resolved": "https://registry.npmjs.org/@heroui/image/-/image-2.2.16.tgz",
- "integrity": "sha512-dy3c4qoCqNbJmOoDP2dyth+ennSNXoFOH0Wmd4i1TF5f20LCJSRZbEjqp9IiVetZuh+/yw+edzFMngmcqZdTNw==",
+ "version": "2.2.17",
+ "resolved": "https://registry.npmjs.org/@heroui/image/-/image-2.2.17.tgz",
+ "integrity": "sha512-B/MrWafTsiCBFnRc0hPTLDBh7APjb/lRuQf18umuh20/1n6KiQXJ7XGSjnrHaA6HQcrtMGh6mDFZDaXq9rHuoA==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-image": "2.1.12"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-image": "2.1.13"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1859,21 +1845,22 @@
}
},
"node_modules/@heroui/input": {
- "version": "2.4.27",
- "resolved": "https://registry.npmjs.org/@heroui/input/-/input-2.4.27.tgz",
- "integrity": "sha512-sLGw7r+BXyB1MllKNKmn0xLvSW0a1l+3gXefnUCXGSvI3bwrLvk3hUgbkVSJRnxSChU41yXaYDRcHL39t7yzuQ==",
+ "version": "2.4.28",
+ "resolved": "https://registry.npmjs.org/@heroui/input/-/input-2.4.28.tgz",
+ "integrity": "sha512-uaBubg814YOlVvX13yCAMqsR9HC4jg+asQdukbOvOnFtHY/d53her1BDdXhR9tMcrRTdYWQ3FoHqWbpvd5X4OQ==",
+ "license": "MIT",
"dependencies": {
- "@heroui/form": "2.1.26",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/form": "2.1.27",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/textfield": "3.18.1",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/textfield": "3.18.2",
"@react-stately/utils": "3.10.8",
- "@react-types/shared": "3.32.0",
- "@react-types/textfield": "3.12.5",
+ "@react-types/shared": "3.32.1",
+ "@react-types/textfield": "3.12.6",
"react-textarea-autosize": "^8.5.3"
},
"peerDependencies": {
@@ -1884,19 +1871,20 @@
}
},
"node_modules/@heroui/input-otp": {
- "version": "2.1.26",
- "resolved": "https://registry.npmjs.org/@heroui/input-otp/-/input-otp-2.1.26.tgz",
- "integrity": "sha512-eVVSOvwTiuVmq/hXWDYuq9ICR59R7TuWi55dDG/hd5WN6jIBJsNkmt7MmYVaSNNISyzi27hPEK43/bvK4eO9FA==",
+ "version": "2.1.27",
+ "resolved": "https://registry.npmjs.org/@heroui/input-otp/-/input-otp-2.1.27.tgz",
+ "integrity": "sha512-VUzQ1u6/0okE0eqDx/2I/8zpGItSsn7Zml01IVwGM4wY2iJeQA+uRjfP+B1ff9jO/y8n582YU4uv/ZSOmmEQ7A==",
+ "license": "MIT",
"dependencies": {
- "@heroui/form": "2.1.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/form": "2.1.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-form-reset": "2.0.1",
- "@react-aria/focus": "3.21.1",
- "@react-aria/form": "3.1.1",
- "@react-stately/form": "3.2.1",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/form": "3.1.2",
+ "@react-stately/form": "3.2.2",
"@react-stately/utils": "3.10.8",
- "@react-types/textfield": "3.12.5",
+ "@react-types/textfield": "3.12.6",
"input-otp": "1.4.1"
},
"peerDependencies": {
@@ -1907,13 +1895,14 @@
}
},
"node_modules/@heroui/kbd": {
- "version": "2.2.21",
- "resolved": "https://registry.npmjs.org/@heroui/kbd/-/kbd-2.2.21.tgz",
- "integrity": "sha512-4AY0Q+jwDbY9ehhu0Vv68QIiSCnFEMPYpaPHVLNR/9rEJDN/BS+j4FyUfxjnyjD7EKa8CNs6Y7O0VnakUXGg+g==",
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/@heroui/kbd/-/kbd-2.2.22.tgz",
+ "integrity": "sha512-PKhgwGB7i53kBuqB1YdFZsg7H9fJ8YESMRRPwRRyPSz5feMdwGidyXs+/ix7lrlYp4mlC3wtPp7L79SEyPCpBA==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/system-rsc": "2.3.19"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/system-rsc": "2.3.20"
},
"peerDependencies": {
"@heroui/theme": ">=2.4.17",
@@ -1922,17 +1911,17 @@
}
},
"node_modules/@heroui/link": {
- "version": "2.2.22",
- "resolved": "https://registry.npmjs.org/@heroui/link/-/link-2.2.22.tgz",
- "integrity": "sha512-INWjrLwlxSU5hN0qr1lCZ1GN9Tf3X8WMTUQnPmvbqbJkPgQjqfIcO2dJyUkV3X0PiSB9QbPMlfU4Sx+loFKq4g==",
+ "version": "2.2.23",
+ "resolved": "https://registry.npmjs.org/@heroui/link/-/link-2.2.23.tgz",
+ "integrity": "sha512-lObtPRLy8ModlTvJiKhczuAV/CIt31hde6xPGFYRpPsaQN1b7RgQMmai5/Iv/M8WrzFmFZRpgW75RKYIB6hHVQ==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-link": "2.2.20",
- "@react-aria/focus": "3.21.1",
- "@react-types/link": "3.6.4"
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-link": "2.2.21",
+ "@react-aria/focus": "3.21.2",
+ "@react-types/link": "3.6.5"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1942,20 +1931,21 @@
}
},
"node_modules/@heroui/listbox": {
- "version": "2.3.25",
- "resolved": "https://registry.npmjs.org/@heroui/listbox/-/listbox-2.3.25.tgz",
- "integrity": "sha512-KaLLCpf7EPhDMamjJ7dBQK2SKo8Qrlh6lTLCbZrCAuUGiBooCc80zWJa55XiDiaZhfQC/TYeoe5MMnw4yr5xmw==",
- "dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/divider": "2.2.19",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "version": "2.3.26",
+ "resolved": "https://registry.npmjs.org/@heroui/listbox/-/listbox-2.3.26.tgz",
+ "integrity": "sha512-/k3k+xyl2d+aFfT02h+/0njhsDX8vJDEkPK+dl9ETYI9Oz3L+xbHN9yIzuWjBXYkNGlQCjQ46N+0jWjhP5B4pA==",
+ "license": "MIT",
+ "dependencies": {
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/divider": "2.2.20",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-is-mobile": "2.2.12",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/listbox": "3.14.8",
- "@react-stately/list": "3.13.0",
- "@react-types/shared": "3.32.0",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/listbox": "3.15.0",
+ "@react-stately/list": "3.13.1",
+ "@react-types/shared": "3.32.1",
"@tanstack/react-virtual": "3.11.3"
},
"peerDependencies": {
@@ -1966,21 +1956,22 @@
}
},
"node_modules/@heroui/menu": {
- "version": "2.2.25",
- "resolved": "https://registry.npmjs.org/@heroui/menu/-/menu-2.2.25.tgz",
- "integrity": "sha512-BxHD/5IvmvhzM78KVrEkkcQFie0WF2yXq7FXsGa17UHBji32D38JKgGCnJMMoko1H3cG4p5ihZjT7O7NH5rdvQ==",
+ "version": "2.2.26",
+ "resolved": "https://registry.npmjs.org/@heroui/menu/-/menu-2.2.26.tgz",
+ "integrity": "sha512-raR5pXgEqizKD9GsWS1yKqTm4RPWMrSQlqXLE2zNMQk0TkDqmPVw1z5griMqu2Zt9Vf2Ectf55vh4c0DNOUGlg==",
+ "license": "MIT",
"dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/divider": "2.2.19",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/divider": "2.2.20",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-is-mobile": "2.2.12",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/menu": "3.19.1",
- "@react-stately/tree": "3.9.2",
- "@react-types/menu": "3.10.4",
- "@react-types/shared": "3.32.0"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/menu": "3.19.3",
+ "@react-stately/tree": "3.9.3",
+ "@react-types/menu": "3.10.5",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -1990,24 +1981,25 @@
}
},
"node_modules/@heroui/modal": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/modal/-/modal-2.2.23.tgz",
- "integrity": "sha512-IOvcyX9ugEmsHhtizxP/rVHGWCO+I0zWxwzcuA+BjX8jcWYrseiyoPMPsxsjSfX2tfBY4b2empT08BsWH1n+Wg==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/modal/-/modal-2.2.24.tgz",
+ "integrity": "sha512-ISbgorNqgps9iUvQdgANxprdN+6H3Sx9TrGKpuW798qjc2f0T4rTbjrEfFPT8tFx6XYF4P5j7T7m3zoKcortHQ==",
+ "license": "MIT",
"dependencies": {
"@heroui/dom-animation": "2.1.10",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-button": "2.2.19",
- "@heroui/use-aria-modal-overlay": "2.2.18",
- "@heroui/use-disclosure": "2.2.16",
- "@heroui/use-draggable": "2.1.17",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-button": "2.2.20",
+ "@heroui/use-aria-modal-overlay": "2.2.19",
+ "@heroui/use-disclosure": "2.2.17",
+ "@heroui/use-draggable": "2.1.18",
"@heroui/use-viewport-size": "2.0.1",
- "@react-aria/dialog": "3.5.29",
- "@react-aria/focus": "3.21.1",
- "@react-aria/overlays": "3.29.0",
- "@react-stately/overlays": "3.6.19"
+ "@react-aria/dialog": "3.5.31",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/overlays": "3.30.0",
+ "@react-stately/overlays": "3.6.20"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2018,21 +2010,22 @@
}
},
"node_modules/@heroui/navbar": {
- "version": "2.2.24",
- "resolved": "https://registry.npmjs.org/@heroui/navbar/-/navbar-2.2.24.tgz",
- "integrity": "sha512-fRnHJR4QbANeTCVVg+VmvItSv51rYvkcvx4YrHYmUa8X3kWy5X+0dARqtLxuXv76Uc12+w23gb5T4eXQIBL+oQ==",
+ "version": "2.2.25",
+ "resolved": "https://registry.npmjs.org/@heroui/navbar/-/navbar-2.2.25.tgz",
+ "integrity": "sha512-5fNIMDpX2htDTMb/Xgv81qw/FuNWb+0Wpfc6rkFtNYd968I7G6Kjm782QB8WQjZ8DsMugcLEYUN4lpbJHRSdwg==",
+ "license": "MIT",
"dependencies": {
"@heroui/dom-animation": "2.1.10",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-resize": "2.1.8",
"@heroui/use-scroll-position": "2.1.8",
- "@react-aria/button": "3.14.1",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/overlays": "3.29.0",
- "@react-stately/toggle": "3.9.1",
+ "@react-aria/button": "3.14.2",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/overlays": "3.30.0",
+ "@react-stately/toggle": "3.9.2",
"@react-stately/utils": "3.10.8"
},
"peerDependencies": {
@@ -2044,24 +2037,25 @@
}
},
"node_modules/@heroui/number-input": {
- "version": "2.0.17",
- "resolved": "https://registry.npmjs.org/@heroui/number-input/-/number-input-2.0.17.tgz",
- "integrity": "sha512-6beiwciRA1qR/3nKYRSPSiKx77C8Hw9ejknBKByw6rXYE4J1jVNJTlTeuqqeIWG6yeNd3SiZGoSRc3uTMPZLlg==",
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/@heroui/number-input/-/number-input-2.0.18.tgz",
+ "integrity": "sha512-28v0/0FABs+yy3CcJimcr5uNlhaJSyKt1ENMSXfzPxdN2WgIs14+6NLMT+KV7ibcJl7kmqG0uc8vuIDLVrM5bQ==",
+ "license": "MIT",
"dependencies": {
- "@heroui/button": "2.2.26",
- "@heroui/form": "2.1.26",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/button": "2.2.27",
+ "@heroui/form": "2.1.27",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/focus": "3.21.1",
- "@react-aria/i18n": "3.12.12",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/numberfield": "3.12.1",
- "@react-stately/numberfield": "3.10.1",
- "@react-types/button": "3.14.0",
- "@react-types/numberfield": "3.8.14",
- "@react-types/shared": "3.32.0"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/i18n": "3.12.13",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/numberfield": "3.12.2",
+ "@react-stately/numberfield": "3.10.2",
+ "@react-types/button": "3.14.1",
+ "@react-types/numberfield": "3.8.15",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2071,20 +2065,20 @@
}
},
"node_modules/@heroui/pagination": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/pagination/-/pagination-2.2.23.tgz",
- "integrity": "sha512-cXVijoCmTT+u5yfx8PUHKwwA9sJqVcifW9GdHYhQm6KG5um+iqal3tKtmFt+Z0KUTlSccfrM6MtlVm0HbJqR+g==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/pagination/-/pagination-2.2.24.tgz",
+ "integrity": "sha512-5ObSJ1PzB9D1CjHV0MfDNzLR69vSYpx/rNQLBo/D4g5puaAR7kkGgw5ncf5eirhdKuy9y8VGAhjwhBxO4NUdpQ==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-intersection-observer": "2.2.14",
- "@heroui/use-pagination": "2.2.17",
- "@react-aria/focus": "3.21.1",
- "@react-aria/i18n": "3.12.12",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/utils": "3.30.1",
+ "@heroui/use-pagination": "2.2.18",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/i18n": "3.12.13",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/utils": "3.31.0",
"scroll-into-view-if-needed": "3.0.10"
},
"peerDependencies": {
@@ -2095,24 +2089,25 @@
}
},
"node_modules/@heroui/popover": {
- "version": "2.3.26",
- "resolved": "https://registry.npmjs.org/@heroui/popover/-/popover-2.3.26.tgz",
- "integrity": "sha512-m+FQmP648XRbwcRyzTPaYgbQIBJX05PtwbAp7DLbjd1SHQRJjx6wAj6uhVOTeJNXTTEy8JxwMXwh4IAJO/g3Jw==",
+ "version": "2.3.27",
+ "resolved": "https://registry.npmjs.org/@heroui/popover/-/popover-2.3.27.tgz",
+ "integrity": "sha512-PmSCKQcAvKIegK59Flr9cglbsEu7OAegQMtwNIjqWHsPT18NNphimmUSJrtuD78rcfKekrZ+Uo9qJEUf0zGZDw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/button": "2.2.26",
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/button": "2.2.27",
"@heroui/dom-animation": "2.1.10",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-button": "2.2.19",
- "@heroui/use-aria-overlay": "2.0.3",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-button": "2.2.20",
+ "@heroui/use-aria-overlay": "2.0.4",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/dialog": "3.5.29",
- "@react-aria/focus": "3.21.1",
- "@react-aria/overlays": "3.29.0",
- "@react-stately/overlays": "3.6.19",
- "@react-types/overlays": "3.9.1"
+ "@react-aria/dialog": "3.5.31",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/overlays": "3.30.0",
+ "@react-stately/overlays": "3.6.20",
+ "@react-types/overlays": "3.9.2"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2123,16 +2118,16 @@
}
},
"node_modules/@heroui/progress": {
- "version": "2.2.21",
- "resolved": "https://registry.npmjs.org/@heroui/progress/-/progress-2.2.21.tgz",
- "integrity": "sha512-f/PMOai00oV7+sArWabMfkoA80EskXgXHae4lsKhyRbeki8sKXQRpVwFY5/fINJOJu5mvVXQBwv2yKupx8rogg==",
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/@heroui/progress/-/progress-2.2.22.tgz",
+ "integrity": "sha512-ch+iWEDo8d+Owz81vu4+Kj6CLfxi0nUlivQBhXeOzgU3VZbRmxJyW8S6l7wk6GyKJZxsCbYbjV1wPSjZhKJXCg==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-is-mounted": "2.1.8",
- "@react-aria/progress": "3.4.26",
- "@react-types/progress": "3.5.15"
+ "@react-aria/progress": "3.4.27",
+ "@react-types/progress": "3.5.16"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2142,20 +2137,21 @@
}
},
"node_modules/@heroui/radio": {
- "version": "2.3.26",
- "resolved": "https://registry.npmjs.org/@heroui/radio/-/radio-2.3.26.tgz",
- "integrity": "sha512-9dyKKMP79otqWg34DslO7lhrmoQncU0Po0PH2UhFhUTQMohMSXMPQhj+T+ffiYG2fmjdlYk0E2d7mZI8Hf7IeA==",
+ "version": "2.3.27",
+ "resolved": "https://registry.npmjs.org/@heroui/radio/-/radio-2.3.27.tgz",
+ "integrity": "sha512-kfDxzPR0u4++lZX2Gf6wbEe/hGbFnoXI4XLbe4e+ZDjGdBSakNuJlcDvWHVoDFZH1xXyOO9w/dHfZuE6O2VGLA==",
+ "license": "MIT",
"dependencies": {
- "@heroui/form": "2.1.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/radio": "3.12.1",
- "@react-aria/visually-hidden": "3.8.27",
- "@react-stately/radio": "3.11.1",
- "@react-types/radio": "3.9.1",
- "@react-types/shared": "3.32.0"
+ "@heroui/form": "2.1.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/radio": "3.12.2",
+ "@react-aria/visually-hidden": "3.8.28",
+ "@react-stately/radio": "3.11.2",
+ "@react-types/radio": "3.9.2",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2165,60 +2161,61 @@
}
},
"node_modules/@heroui/react": {
- "version": "2.8.4",
- "resolved": "https://registry.npmjs.org/@heroui/react/-/react-2.8.4.tgz",
- "integrity": "sha512-qIrLbVY9vtwk1w4udnbuaE4X5JxbA2rEUgZGxshAao5TNHPsnVrd2NqGLJvSEqP9c7XA4N5c0PCtYJ7PeiM4Lg==",
- "dependencies": {
- "@heroui/accordion": "2.2.23",
- "@heroui/alert": "2.2.26",
- "@heroui/autocomplete": "2.3.28",
- "@heroui/avatar": "2.2.21",
- "@heroui/badge": "2.2.16",
- "@heroui/breadcrumbs": "2.2.21",
- "@heroui/button": "2.2.26",
- "@heroui/calendar": "2.2.26",
- "@heroui/card": "2.2.24",
- "@heroui/checkbox": "2.3.26",
- "@heroui/chip": "2.2.21",
- "@heroui/code": "2.2.20",
- "@heroui/date-input": "2.3.26",
- "@heroui/date-picker": "2.3.27",
- "@heroui/divider": "2.2.19",
- "@heroui/drawer": "2.2.23",
- "@heroui/dropdown": "2.3.26",
- "@heroui/form": "2.1.26",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/image": "2.2.16",
- "@heroui/input": "2.4.27",
- "@heroui/input-otp": "2.1.26",
- "@heroui/kbd": "2.2.21",
- "@heroui/link": "2.2.22",
- "@heroui/listbox": "2.3.25",
- "@heroui/menu": "2.2.25",
- "@heroui/modal": "2.2.23",
- "@heroui/navbar": "2.2.24",
- "@heroui/number-input": "2.0.17",
- "@heroui/pagination": "2.2.23",
- "@heroui/popover": "2.3.26",
- "@heroui/progress": "2.2.21",
- "@heroui/radio": "2.3.26",
- "@heroui/ripple": "2.2.19",
- "@heroui/scroll-shadow": "2.3.17",
- "@heroui/select": "2.4.27",
- "@heroui/skeleton": "2.2.16",
- "@heroui/slider": "2.4.23",
- "@heroui/snippet": "2.2.27",
- "@heroui/spacer": "2.2.20",
- "@heroui/spinner": "2.2.23",
- "@heroui/switch": "2.2.23",
- "@heroui/system": "2.4.22",
- "@heroui/table": "2.2.26",
- "@heroui/tabs": "2.2.23",
- "@heroui/theme": "2.4.22",
- "@heroui/toast": "2.0.16",
- "@heroui/tooltip": "2.2.23",
- "@heroui/user": "2.2.21",
- "@react-aria/visually-hidden": "3.8.27"
+ "version": "2.8.5",
+ "resolved": "https://registry.npmjs.org/@heroui/react/-/react-2.8.5.tgz",
+ "integrity": "sha512-cGiG0/DCPsYopa+zACFDmtx9LQDfY5KU58Tt82ELANhmKRyYAesAq9tSa01dG+MjOXUTUR6cxp5i5RmRn8rPYg==",
+ "license": "MIT",
+ "dependencies": {
+ "@heroui/accordion": "2.2.24",
+ "@heroui/alert": "2.2.27",
+ "@heroui/autocomplete": "2.3.29",
+ "@heroui/avatar": "2.2.22",
+ "@heroui/badge": "2.2.17",
+ "@heroui/breadcrumbs": "2.2.22",
+ "@heroui/button": "2.2.27",
+ "@heroui/calendar": "2.2.27",
+ "@heroui/card": "2.2.25",
+ "@heroui/checkbox": "2.3.27",
+ "@heroui/chip": "2.2.22",
+ "@heroui/code": "2.2.21",
+ "@heroui/date-input": "2.3.27",
+ "@heroui/date-picker": "2.3.28",
+ "@heroui/divider": "2.2.20",
+ "@heroui/drawer": "2.2.24",
+ "@heroui/dropdown": "2.3.27",
+ "@heroui/form": "2.1.27",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/image": "2.2.17",
+ "@heroui/input": "2.4.28",
+ "@heroui/input-otp": "2.1.27",
+ "@heroui/kbd": "2.2.22",
+ "@heroui/link": "2.2.23",
+ "@heroui/listbox": "2.3.26",
+ "@heroui/menu": "2.2.26",
+ "@heroui/modal": "2.2.24",
+ "@heroui/navbar": "2.2.25",
+ "@heroui/number-input": "2.0.18",
+ "@heroui/pagination": "2.2.24",
+ "@heroui/popover": "2.3.27",
+ "@heroui/progress": "2.2.22",
+ "@heroui/radio": "2.3.27",
+ "@heroui/ripple": "2.2.20",
+ "@heroui/scroll-shadow": "2.3.18",
+ "@heroui/select": "2.4.28",
+ "@heroui/skeleton": "2.2.17",
+ "@heroui/slider": "2.4.24",
+ "@heroui/snippet": "2.2.28",
+ "@heroui/spacer": "2.2.21",
+ "@heroui/spinner": "2.2.24",
+ "@heroui/switch": "2.2.24",
+ "@heroui/system": "2.4.23",
+ "@heroui/table": "2.2.27",
+ "@heroui/tabs": "2.2.24",
+ "@heroui/theme": "2.4.23",
+ "@heroui/toast": "2.0.17",
+ "@heroui/tooltip": "2.2.24",
+ "@heroui/user": "2.2.22",
+ "@react-aria/visually-hidden": "3.8.28"
},
"peerDependencies": {
"framer-motion": ">=11.5.6 || >=12.0.0-alpha.1",
@@ -2236,26 +2233,26 @@
}
},
"node_modules/@heroui/react-utils": {
- "version": "2.1.13",
- "resolved": "https://registry.npmjs.org/@heroui/react-utils/-/react-utils-2.1.13.tgz",
- "integrity": "sha512-gJ89YL5UCilKLldJ4In0ZLzngg+tYiDuo1tQ7lf2aJB7SQMrZmEutsKrGCdvn/c2CSz5cRryo0H6JZCDsji3qg==",
+ "version": "2.1.14",
+ "resolved": "https://registry.npmjs.org/@heroui/react-utils/-/react-utils-2.1.14.tgz",
+ "integrity": "sha512-hhKklYKy9sRH52C9A8P0jWQ79W4MkIvOnKBIuxEMHhigjfracy0o0lMnAUdEsJni4oZKVJYqNGdQl+UVgcmeDA==",
"license": "MIT",
"dependencies": {
"@heroui/react-rsc-utils": "2.1.9",
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/ripple": {
- "version": "2.2.19",
- "resolved": "https://registry.npmjs.org/@heroui/ripple/-/ripple-2.2.19.tgz",
- "integrity": "sha512-nmeu1vDehmv+tn0kfo3fpeCZ9fyTp/DD9dF8qJeYhBD3CR7J/LPaGXvU6M1t8WwV7RFEA5pjmsmA3jHWjwdAJQ==",
+ "version": "2.2.20",
+ "resolved": "https://registry.npmjs.org/@heroui/ripple/-/ripple-2.2.20.tgz",
+ "integrity": "sha512-3+fBx5jO7l8SE84ZG0vB5BOxKKr23Ay180AeIWcf8m8lhXXd4iShVz2S+keW9PewqVHv52YBaxLoSVQ93Ddcxw==",
"license": "MIT",
"dependencies": {
"@heroui/dom-animation": "2.1.10",
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2266,13 +2263,14 @@
}
},
"node_modules/@heroui/scroll-shadow": {
- "version": "2.3.17",
- "resolved": "https://registry.npmjs.org/@heroui/scroll-shadow/-/scroll-shadow-2.3.17.tgz",
- "integrity": "sha512-3h8SJNLjHt3CQmDWNnZ2MJTt0rXuJztV0KddZrwNlZgI54W6PeNe6JmVGX8xSHhrk72jsVz7FmSQNiPvqs8/qQ==",
+ "version": "2.3.18",
+ "resolved": "https://registry.npmjs.org/@heroui/scroll-shadow/-/scroll-shadow-2.3.18.tgz",
+ "integrity": "sha512-P/nLQbFPOlbTLRjO2tKoZCljJtU7iq81wsp7C8wZ1rZI1RmkTx3UgLLeoFWgmAp3ZlUIYgaewTnejt6eRx+28w==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-data-scroll-overflow": "2.2.12"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-data-scroll-overflow": "2.2.13"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2282,29 +2280,30 @@
}
},
"node_modules/@heroui/select": {
- "version": "2.4.27",
- "resolved": "https://registry.npmjs.org/@heroui/select/-/select-2.4.27.tgz",
- "integrity": "sha512-CgMqVWYWcdHNOnSeMMraXFBXFsToyxZ9sSwszG3YlhGwaaj0yZonquMYgl5vHCnFLkGXwggNczl+vdDErLEsbw==",
- "dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/form": "2.1.26",
- "@heroui/listbox": "2.3.25",
- "@heroui/popover": "2.3.26",
- "@heroui/react-utils": "2.1.13",
- "@heroui/scroll-shadow": "2.3.17",
+ "version": "2.4.28",
+ "resolved": "https://registry.npmjs.org/@heroui/select/-/select-2.4.28.tgz",
+ "integrity": "sha512-Dg3jv248Tu+g2WJMWseDjWA0FAG356elZIcE0OufVAIzQoWjLhgbkTqY9ths0HkcHy0nDwQWvyrrwkbif1kNqA==",
+ "license": "MIT",
+ "dependencies": {
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/form": "2.1.27",
+ "@heroui/listbox": "2.3.26",
+ "@heroui/popover": "2.3.27",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/scroll-shadow": "2.3.18",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/spinner": "2.2.23",
- "@heroui/use-aria-button": "2.2.19",
- "@heroui/use-aria-multiselect": "2.4.18",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/spinner": "2.2.24",
+ "@heroui/use-aria-button": "2.2.20",
+ "@heroui/use-aria-multiselect": "2.4.19",
"@heroui/use-form-reset": "2.0.1",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/focus": "3.21.1",
- "@react-aria/form": "3.1.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/overlays": "3.29.0",
- "@react-aria/visually-hidden": "3.8.27",
- "@react-types/shared": "3.32.0"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/form": "3.1.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/overlays": "3.30.0",
+ "@react-aria/visually-hidden": "3.8.28",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2324,19 +2323,19 @@
}
},
"node_modules/@heroui/shared-utils": {
- "version": "2.1.11",
- "resolved": "https://registry.npmjs.org/@heroui/shared-utils/-/shared-utils-2.1.11.tgz",
- "integrity": "sha512-2zKVjCc9EMMk05peVpI1Q+vFf+dzqyVdf1DBCJ2SNQEUF7E+sRe1FvhHvPoye3TIFD/Fr6b3kZ6vzjxL9GxB6A==",
+ "version": "2.1.12",
+ "resolved": "https://registry.npmjs.org/@heroui/shared-utils/-/shared-utils-2.1.12.tgz",
+ "integrity": "sha512-0iCnxVAkIPtrHQo26Qa5g0UTqMTpugTbClNOrEPsrQuyRAq7Syux998cPwGlneTfB5E5xcU3LiEdA9GUyeK2cQ==",
"hasInstallScript": true,
"license": "MIT"
},
"node_modules/@heroui/skeleton": {
- "version": "2.2.16",
- "resolved": "https://registry.npmjs.org/@heroui/skeleton/-/skeleton-2.2.16.tgz",
- "integrity": "sha512-rIerwmS5uiOpvJUT37iyuiXUJzesUE/HgSv4gH1tTxsrjgpkRRrgr/zANdbCd0wpSIi4PPNHWq51n0CMrQGUTg==",
+ "version": "2.2.17",
+ "resolved": "https://registry.npmjs.org/@heroui/skeleton/-/skeleton-2.2.17.tgz",
+ "integrity": "sha512-WDzwODs+jW+GgMr3oOdLtXXfv8ScXuuWgxN2iPWWyDBcQYXX2XCKGVjCpM5lSKf1UG4Yp3iXuqKzH1m+E+m7kg==",
"license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2346,19 +2345,20 @@
}
},
"node_modules/@heroui/slider": {
- "version": "2.4.23",
- "resolved": "https://registry.npmjs.org/@heroui/slider/-/slider-2.4.23.tgz",
- "integrity": "sha512-cohy9+wojimHQ/5AShj4Jt7aK1d8fGFP52l2gLELP02eo6CIpW8Ib213t3P1H86bMiBwRec5yi28zr8lHASftA==",
+ "version": "2.4.24",
+ "resolved": "https://registry.npmjs.org/@heroui/slider/-/slider-2.4.24.tgz",
+ "integrity": "sha512-GKdqFTCe9O8tT3HEZ/W4TEWkz7ADtUBzuOBXw779Oqqf02HNg9vSnISlNvI6G0ymYjY42EanwA+dChHbPBIVJw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/tooltip": "2.2.23",
- "@react-aria/focus": "3.21.1",
- "@react-aria/i18n": "3.12.12",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/slider": "3.8.1",
- "@react-aria/visually-hidden": "3.8.27",
- "@react-stately/slider": "3.7.1"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/tooltip": "2.2.24",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/i18n": "3.12.13",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/slider": "3.8.2",
+ "@react-aria/visually-hidden": "3.8.28",
+ "@react-stately/slider": "3.7.2"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2368,17 +2368,18 @@
}
},
"node_modules/@heroui/snippet": {
- "version": "2.2.27",
- "resolved": "https://registry.npmjs.org/@heroui/snippet/-/snippet-2.2.27.tgz",
- "integrity": "sha512-YCiZjurbK/++I8iDjmqJ/ROt+mdy5825Krc8gagdwUR7Z7jXBveFWjgvgkfg8EA/sJlDpMw9xIzubm5KUCEzfA==",
+ "version": "2.2.28",
+ "resolved": "https://registry.npmjs.org/@heroui/snippet/-/snippet-2.2.28.tgz",
+ "integrity": "sha512-UfC/ZcYpmOutAcazxkizJWlhvqzr077szDyQ85thyUC5yhuRRLrsOHDIhyLWQrEKIcWw5+CaEGS2VLwAFlgfzw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/button": "2.2.26",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/button": "2.2.27",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/tooltip": "2.2.23",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/tooltip": "2.2.24",
"@heroui/use-clipboard": "2.1.9",
- "@react-aria/focus": "3.21.1"
+ "@react-aria/focus": "3.21.2"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2389,13 +2390,14 @@
}
},
"node_modules/@heroui/spacer": {
- "version": "2.2.20",
- "resolved": "https://registry.npmjs.org/@heroui/spacer/-/spacer-2.2.20.tgz",
- "integrity": "sha512-rXqXcUvTxVQoob+VsG7AgalFwEC38S9zzyZ0sxy7cGUJEdfLjWG19g36lNdtV+LOk+Gj9FiyKvUGBFJiqrId6w==",
+ "version": "2.2.21",
+ "resolved": "https://registry.npmjs.org/@heroui/spacer/-/spacer-2.2.21.tgz",
+ "integrity": "sha512-WKD+BlgHfqJ8lrkkg/6cvzSWNsbRjzr24HpZnv6cDeWX95wVLTOco9HVR8ohwStMqwu5zYeUd1bw6yCDVTo53w==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/system-rsc": "2.3.19"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/system-rsc": "2.3.20"
},
"peerDependencies": {
"@heroui/theme": ">=2.4.17",
@@ -2404,13 +2406,14 @@
}
},
"node_modules/@heroui/spinner": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/spinner/-/spinner-2.2.23.tgz",
- "integrity": "sha512-qmQ/OanEvvtyG0gtuDP3UmjvBAESr++F1S05LRlY3w+TSzFUh6vfxviN9M/cBnJYg6QuwfmzlltqmDXnV8/fxw==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/spinner/-/spinner-2.2.24.tgz",
+ "integrity": "sha512-HfKkFffrIN9UdJY2UaenlB8xEwIzolCCFCwU0j3wVnLMX+Dw+ixwaELdAxX14Z6gPQYec6AROKetkWWit14rlw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11",
- "@heroui/system": "2.4.22",
- "@heroui/system-rsc": "2.3.19"
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/system": "2.4.23",
+ "@heroui/system-rsc": "2.3.20"
},
"peerDependencies": {
"@heroui/theme": ">=2.4.17",
@@ -2419,19 +2422,19 @@
}
},
"node_modules/@heroui/switch": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/switch/-/switch-2.2.23.tgz",
- "integrity": "sha512-7ZhLKmdFPZN/MMoSOVxX8VQVnx3EngZ1C3fARbQGiOoFXElP68VKagtQHCFSaWyjOeDQc6OdBe+FKDs3g47xrQ==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/switch/-/switch-2.2.24.tgz",
+ "integrity": "sha512-RbV+MECncBKsthX3D8r+CGoQRu8Q3AAYUEdm/7ody6+bMZFmBilm695yLiqziMI33Ct/WQ0WkpvrTClIcmxU/A==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/switch": "3.7.7",
- "@react-aria/visually-hidden": "3.8.27",
- "@react-stately/toggle": "3.9.1"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/switch": "3.7.8",
+ "@react-aria/visually-hidden": "3.8.28",
+ "@react-stately/toggle": "3.9.2"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2441,15 +2444,16 @@
}
},
"node_modules/@heroui/system": {
- "version": "2.4.22",
- "resolved": "https://registry.npmjs.org/@heroui/system/-/system-2.4.22.tgz",
- "integrity": "sha512-+RVuAxjS2QWyLdYTPxv0IfMjhsxa1GKRSwvpii13bOGEQclwwfaNL2MvBbTt1Mzu/LHaX7kyj0THbZnlOplZOA==",
+ "version": "2.4.23",
+ "resolved": "https://registry.npmjs.org/@heroui/system/-/system-2.4.23.tgz",
+ "integrity": "sha512-kgYvfkIOQKM6CCBIlNSE2tXMtNrS1mvEUbvwnaU3pEYbMlceBtwA5v7SlpaJy/5dqKcTbfmVMUCmXnY/Kw4vaQ==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
- "@heroui/system-rsc": "2.3.19",
- "@react-aria/i18n": "3.12.12",
- "@react-aria/overlays": "3.29.0",
- "@react-aria/utils": "3.30.1"
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/system-rsc": "2.3.20",
+ "@react-aria/i18n": "3.12.13",
+ "@react-aria/overlays": "3.30.0",
+ "@react-aria/utils": "3.31.0"
},
"peerDependencies": {
"framer-motion": ">=11.5.6 || >=12.0.0-alpha.1",
@@ -2458,11 +2462,12 @@
}
},
"node_modules/@heroui/system-rsc": {
- "version": "2.3.19",
- "resolved": "https://registry.npmjs.org/@heroui/system-rsc/-/system-rsc-2.3.19.tgz",
- "integrity": "sha512-ocjro5dYmDhRsxNAB/316zO6eqfKVjFDbnYnc+wlcjZXpw49A+LhE13xlo7LI+W2AHWh5NHcpo3+2O3G6WQxHA==",
+ "version": "2.3.20",
+ "resolved": "https://registry.npmjs.org/@heroui/system-rsc/-/system-rsc-2.3.20.tgz",
+ "integrity": "sha512-uZwQErEud/lAX7KRXEdsDcGLyygBffHcgnbCDrLvmTf3cyBE84YziG7AjM7Ts8ZcrF+wBXX4+a1IqnKGlsGEdQ==",
+ "license": "MIT",
"dependencies": {
- "@react-types/shared": "3.32.0",
+ "@react-types/shared": "3.32.1",
"clsx": "^1.2.1"
},
"peerDependencies": {
@@ -2474,28 +2479,30 @@
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/clsx/-/clsx-1.2.1.tgz",
"integrity": "sha512-EcR6r5a8bj6pu3ycsa/E/cKVGuTgZJZdsyUYHOksG/UHIiKfjxzRxYJpyVBwYaQeOvghal9fcc4PidlgzugAQg==",
+ "license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/@heroui/table": {
- "version": "2.2.26",
- "resolved": "https://registry.npmjs.org/@heroui/table/-/table-2.2.26.tgz",
- "integrity": "sha512-Y0NaXdoKH7MlgkQN892d23o2KCRKuPLZ4bsdPJFBDOJ9yZWEKKsmQ4+k5YEOjKF34oPSX75XJAjvzqldBuRqcQ==",
+ "version": "2.2.27",
+ "resolved": "https://registry.npmjs.org/@heroui/table/-/table-2.2.27.tgz",
+ "integrity": "sha512-XFmbEgBzf89WH1VzmnwENxVzK4JrHV5jdlzyM3snNhk8uDSjfecnUY33qR62cpdZsKiCFFcYf7kQPkCnJGnD0Q==",
+ "license": "MIT",
"dependencies": {
- "@heroui/checkbox": "2.3.26",
- "@heroui/react-utils": "2.1.13",
+ "@heroui/checkbox": "2.3.27",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/spacer": "2.2.20",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/table": "3.17.7",
- "@react-aria/visually-hidden": "3.8.27",
- "@react-stately/table": "3.15.0",
- "@react-stately/virtualizer": "4.4.3",
- "@react-types/grid": "3.3.5",
- "@react-types/table": "3.13.3",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/spacer": "2.2.21",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/table": "3.17.8",
+ "@react-aria/visually-hidden": "3.8.28",
+ "@react-stately/table": "3.15.1",
+ "@react-stately/virtualizer": "4.4.4",
+ "@react-types/grid": "3.3.6",
+ "@react-types/table": "3.13.4",
"@tanstack/react-virtual": "3.11.3"
},
"peerDependencies": {
@@ -2506,35 +2513,37 @@
}
},
"node_modules/@heroui/tabs": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/tabs/-/tabs-2.2.23.tgz",
- "integrity": "sha512-OIvWR0vOlaGS2Z0F38O3xx4E5VsNJtz/FCUTPuNjU6eTbvKvRtwj9kHq+uDSHWziHH3OrpnTHi9xuEGHyUh4kg==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/tabs/-/tabs-2.2.24.tgz",
+ "integrity": "sha512-2SfxzAXe1t2Zz0v16kqkb7DR2wW86XoDwRUpLex6zhEN4/uT5ILeynxIVSUyAvVN3z95cnaQt0XPQBfUjAIQhQ==",
+ "license": "MIT",
"dependencies": {
- "@heroui/aria-utils": "2.2.23",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/aria-utils": "2.2.24",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
"@heroui/use-is-mounted": "2.1.8",
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/tabs": "3.10.7",
- "@react-stately/tabs": "3.8.5",
- "@react-types/shared": "3.32.0",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/tabs": "3.10.8",
+ "@react-stately/tabs": "3.8.6",
+ "@react-types/shared": "3.32.1",
"scroll-into-view-if-needed": "3.0.10"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
- "@heroui/theme": ">=2.4.17",
+ "@heroui/theme": ">=2.4.22",
"framer-motion": ">=11.5.6 || >=12.0.0-alpha.1",
"react": ">=18 || >=19.0.0-rc.0",
"react-dom": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/theme": {
- "version": "2.4.22",
- "resolved": "https://registry.npmjs.org/@heroui/theme/-/theme-2.4.22.tgz",
- "integrity": "sha512-naKFQBfp7YwhKGmh7rKCC5EBjV7kdozX21fyGHucDYa6GeFfIKVqXILgZ94HZlfp+LGJfV6U+BuKIflevf0Y+w==",
+ "version": "2.4.23",
+ "resolved": "https://registry.npmjs.org/@heroui/theme/-/theme-2.4.23.tgz",
+ "integrity": "sha512-5hoaRWG+/d/t06p7Pfhz70DUP0Uggjids7/z2Ytgup4A8KAOvDIXxvHUDlk6rRHKiN1wDMNA5H+EWsSXB/m03Q==",
+ "license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11",
+ "@heroui/shared-utils": "2.1.12",
"clsx": "^1.2.1",
"color": "^4.2.3",
"color2k": "^2.0.3",
@@ -2556,18 +2565,29 @@
"node": ">=6"
}
},
+ "node_modules/@heroui/theme/node_modules/tailwind-merge": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.3.1.tgz",
+ "integrity": "sha512-gBXpgUm/3rp1lMZZrM/w7D8GKqshif0zAymAhbCyIt8KMe+0v9DQ7cdYLR4FHH/cKpdTXb+A/tKKU3eolfsI+g==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/dcastil"
+ }
+ },
"node_modules/@heroui/toast": {
- "version": "2.0.16",
- "resolved": "https://registry.npmjs.org/@heroui/toast/-/toast-2.0.16.tgz",
- "integrity": "sha512-sG6sU7oN+8pd6pQZJREC+1y9iji+Zb/KtiOQrnAksRfW0KAZSxhgNnt6VP8KvbZ+TKkmphVjDcAwiWgH5m8Uqg==",
+ "version": "2.0.17",
+ "resolved": "https://registry.npmjs.org/@heroui/toast/-/toast-2.0.17.tgz",
+ "integrity": "sha512-w3TaA1DYLcwdDjpwf9xw5YSr+odo9GGHsObsrMmLEQDS0JQhmKyK5sQqXUzb9d27EC6KVwGjeVg0hUHYQBK2JA==",
+ "license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
+ "@heroui/react-utils": "2.1.14",
"@heroui/shared-icons": "2.1.10",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/spinner": "2.2.23",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/spinner": "2.2.24",
"@heroui/use-is-mobile": "2.2.12",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/toast": "3.0.7",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/toast": "3.0.8",
"@react-stately/toast": "3.1.2"
},
"peerDependencies": {
@@ -2579,22 +2599,23 @@
}
},
"node_modules/@heroui/tooltip": {
- "version": "2.2.23",
- "resolved": "https://registry.npmjs.org/@heroui/tooltip/-/tooltip-2.2.23.tgz",
- "integrity": "sha512-tV9qXMJQEzWOhS4Fq/efbRK138e/72BftFz8HaszuMILDBZjgQrzW3W7Gmu+nHI+fcQMqmToUuMq8bCdjp/h9A==",
+ "version": "2.2.24",
+ "resolved": "https://registry.npmjs.org/@heroui/tooltip/-/tooltip-2.2.24.tgz",
+ "integrity": "sha512-H+0STFea2/Z4obDdk+ZPoDzJxJQHIWGSjnW/jieThJbJ5zow/qBfcg5DqzIdiC+FCJ4dDD5jEDZ4W4H/fQUKQA==",
+ "license": "MIT",
"dependencies": {
- "@heroui/aria-utils": "2.2.23",
+ "@heroui/aria-utils": "2.2.24",
"@heroui/dom-animation": "2.1.10",
- "@heroui/framer-utils": "2.1.22",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@heroui/use-aria-overlay": "2.0.3",
+ "@heroui/framer-utils": "2.1.23",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@heroui/use-aria-overlay": "2.0.4",
"@heroui/use-safe-layout-effect": "2.1.8",
- "@react-aria/overlays": "3.29.0",
- "@react-aria/tooltip": "3.8.7",
- "@react-stately/tooltip": "3.5.7",
- "@react-types/overlays": "3.9.1",
- "@react-types/tooltip": "3.4.20"
+ "@react-aria/overlays": "3.30.0",
+ "@react-aria/tooltip": "3.8.8",
+ "@react-stately/tooltip": "3.5.8",
+ "@react-types/overlays": "3.9.2",
+ "@react-types/tooltip": "3.4.21"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2605,62 +2626,64 @@
}
},
"node_modules/@heroui/use-aria-accordion": {
- "version": "2.2.17",
- "resolved": "https://registry.npmjs.org/@heroui/use-aria-accordion/-/use-aria-accordion-2.2.17.tgz",
- "integrity": "sha512-h3jGabUdqDXXThjN5C9UK2DPQAm5g9zm20jBDiyK6emmavGV7pO8k+2Guga48qx4cGDSq4+aA++0i2mqam1AKw==",
+ "version": "2.2.18",
+ "resolved": "https://registry.npmjs.org/@heroui/use-aria-accordion/-/use-aria-accordion-2.2.18.tgz",
+ "integrity": "sha512-qjRkae2p4MFDrNqO6v6YCor0BtVi3idMd1dsI82XM16bxLQ2stqG4Ajrg60xV0AN+WKZUq10oetqkJuY6MYg0w==",
+ "license": "MIT",
"dependencies": {
- "@react-aria/button": "3.14.1",
- "@react-aria/focus": "3.21.1",
- "@react-aria/selection": "3.25.1",
- "@react-stately/tree": "3.9.2",
+ "@react-aria/button": "3.14.2",
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/selection": "3.26.0",
+ "@react-stately/tree": "3.9.3",
"@react-types/accordion": "3.0.0-alpha.26",
- "@react-types/shared": "3.32.0"
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-aria-button": {
- "version": "2.2.19",
- "resolved": "https://registry.npmjs.org/@heroui/use-aria-button/-/use-aria-button-2.2.19.tgz",
- "integrity": "sha512-+3f8zpswFHWs50pNmsHTCXGsIGWyZw/1/hINVPjB9RakjqLwYx9Sz0QCshsAJgGklVbOUkHGtrMwfsKnTeQ82Q==",
+ "version": "2.2.20",
+ "resolved": "https://registry.npmjs.org/@heroui/use-aria-button/-/use-aria-button-2.2.20.tgz",
+ "integrity": "sha512-Y0Bmze/pxEACKsHMbA1sYA3ghMJ+9fSnWvZBwlUxqiVXDEy2YrrK2JmXEgsuHGQdKD9RqU2Od3V4VqIIiaHiMA==",
"license": "MIT",
"dependencies": {
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/utils": "3.30.1",
- "@react-types/button": "3.14.0",
- "@react-types/shared": "3.32.0"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/utils": "3.31.0",
+ "@react-types/button": "3.14.1",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-aria-link": {
- "version": "2.2.20",
- "resolved": "https://registry.npmjs.org/@heroui/use-aria-link/-/use-aria-link-2.2.20.tgz",
- "integrity": "sha512-lbMhpi5mP7wn3m8TDU2YW2oQ2psqgJodSznXha1k2H8XVsZkPhOPAogUhhR0cleah4Y+KCqXJWupqzmdfTsgyw==",
+ "version": "2.2.21",
+ "resolved": "https://registry.npmjs.org/@heroui/use-aria-link/-/use-aria-link-2.2.21.tgz",
+ "integrity": "sha512-sG2rUutT/E/FYguzZmg715cXcM6+ue9wRfs2Gi6epWJwIVpS51uEagJKY0wIutJDfuCPfQ9AuxXfJek4CnxjKw==",
"license": "MIT",
"dependencies": {
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/utils": "3.30.1",
- "@react-types/link": "3.6.4",
- "@react-types/shared": "3.32.0"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/utils": "3.31.0",
+ "@react-types/link": "3.6.5",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-aria-modal-overlay": {
- "version": "2.2.18",
- "resolved": "https://registry.npmjs.org/@heroui/use-aria-modal-overlay/-/use-aria-modal-overlay-2.2.18.tgz",
- "integrity": "sha512-26Vf7uxMYGcs5eZxwZr+w/HaVlTHXTlGKkR5tudmsDGbVULfQW5zX428fYatjYoVfH2zMZWK91USYP/jUWVyxg==",
+ "version": "2.2.19",
+ "resolved": "https://registry.npmjs.org/@heroui/use-aria-modal-overlay/-/use-aria-modal-overlay-2.2.19.tgz",
+ "integrity": "sha512-MPvszNrt+1DauiSyOAwb0pKbYahpEVi9hrmidnO8cd1SA7B2ES0fNRBeNMAwcaeR/Nzsv+Cw1hRXt3egwqi0lg==",
+ "license": "MIT",
"dependencies": {
- "@heroui/use-aria-overlay": "2.0.3",
- "@react-aria/overlays": "3.29.0",
- "@react-aria/utils": "3.30.1",
- "@react-stately/overlays": "3.6.19"
+ "@heroui/use-aria-overlay": "2.0.4",
+ "@react-aria/overlays": "3.30.0",
+ "@react-aria/utils": "3.31.0",
+ "@react-stately/overlays": "3.6.20"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0",
@@ -2668,23 +2691,24 @@
}
},
"node_modules/@heroui/use-aria-multiselect": {
- "version": "2.4.18",
- "resolved": "https://registry.npmjs.org/@heroui/use-aria-multiselect/-/use-aria-multiselect-2.4.18.tgz",
- "integrity": "sha512-b//0jJElrrxrqMuU1+W5H/P4xKzRsl5/uTFGclpdg8+mBlVtbfak32YhD9EEfFRDR7hHs116ezVmxjkEwry/GQ==",
- "dependencies": {
- "@react-aria/i18n": "3.12.12",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/label": "3.7.21",
- "@react-aria/listbox": "3.14.8",
- "@react-aria/menu": "3.19.1",
- "@react-aria/selection": "3.25.1",
- "@react-aria/utils": "3.30.1",
- "@react-stately/form": "3.2.1",
- "@react-stately/list": "3.13.0",
- "@react-stately/menu": "3.9.7",
- "@react-types/button": "3.14.0",
- "@react-types/overlays": "3.9.1",
- "@react-types/shared": "3.32.0"
+ "version": "2.4.19",
+ "resolved": "https://registry.npmjs.org/@heroui/use-aria-multiselect/-/use-aria-multiselect-2.4.19.tgz",
+ "integrity": "sha512-RLDSpOLJqNESn6OK/zKuyTriK6sqMby76si/4kTMCs+4lmMPOyFKP3fREywu+zyJjRUCuZPa6xYuN2OHKQRDow==",
+ "license": "MIT",
+ "dependencies": {
+ "@react-aria/i18n": "3.12.13",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/label": "3.7.22",
+ "@react-aria/listbox": "3.15.0",
+ "@react-aria/menu": "3.19.3",
+ "@react-aria/selection": "3.26.0",
+ "@react-aria/utils": "3.31.0",
+ "@react-stately/form": "3.2.2",
+ "@react-stately/list": "3.13.1",
+ "@react-stately/menu": "3.9.8",
+ "@react-types/button": "3.14.1",
+ "@react-types/overlays": "3.9.2",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0",
@@ -2692,14 +2716,15 @@
}
},
"node_modules/@heroui/use-aria-overlay": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/@heroui/use-aria-overlay/-/use-aria-overlay-2.0.3.tgz",
- "integrity": "sha512-R5cZh+Rg/X7iQpxNhWJkzsbthMVbxqyYkXx5ry0F2zy05viwnXKCSFQqbdKCU2f5QlEnv2oDd6KsK1AXCePG4g==",
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@heroui/use-aria-overlay/-/use-aria-overlay-2.0.4.tgz",
+ "integrity": "sha512-iv+y0+OvQd1eWiZftPI07JE3c5AdK85W5k3rDlhk5MFEI3dllkIpu8z8zLh3ge/BQGFiGkySVC5iXl8w84gMUQ==",
+ "license": "MIT",
"dependencies": {
- "@react-aria/focus": "3.21.1",
- "@react-aria/interactions": "3.25.5",
- "@react-aria/overlays": "3.29.0",
- "@react-types/shared": "3.32.0"
+ "@react-aria/focus": "3.21.2",
+ "@react-aria/interactions": "3.25.6",
+ "@react-aria/overlays": "3.30.0",
+ "@react-types/shared": "3.32.1"
},
"peerDependencies": {
"react": ">=18",
@@ -2710,6 +2735,7 @@
"version": "2.1.8",
"resolved": "https://registry.npmjs.org/@heroui/use-callback-ref/-/use-callback-ref-2.1.8.tgz",
"integrity": "sha512-D1JDo9YyFAprYpLID97xxQvf86NvyWLay30BeVVZT9kWmar6O9MbCRc7ACi7Ngko60beonj6+amTWkTm7QuY/Q==",
+ "license": "MIT",
"dependencies": {
"@heroui/use-safe-layout-effect": "2.1.8"
},
@@ -2721,28 +2747,31 @@
"version": "2.1.9",
"resolved": "https://registry.npmjs.org/@heroui/use-clipboard/-/use-clipboard-2.1.9.tgz",
"integrity": "sha512-lkBq5RpXHiPvk1BXKJG8gMM0f7jRMIGnxAXDjAUzZyXKBuWLoM+XlaUWmZHtmkkjVFMX1L4vzA+vxi9rZbenEQ==",
+ "license": "MIT",
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-data-scroll-overflow": {
- "version": "2.2.12",
- "resolved": "https://registry.npmjs.org/@heroui/use-data-scroll-overflow/-/use-data-scroll-overflow-2.2.12.tgz",
- "integrity": "sha512-An+P5Tg8BtLpw5Ozi/og7s8cThduVMkCOvxMcl3izyYSFa826SIhAI99FyaS7Xb2zkwM/2ZMbK3W7DKt6w8fkg==",
+ "version": "2.2.13",
+ "resolved": "https://registry.npmjs.org/@heroui/use-data-scroll-overflow/-/use-data-scroll-overflow-2.2.13.tgz",
+ "integrity": "sha512-zboLXO1pgYdzMUahDcVt5jf+l1jAQ/D9dFqr7AxWLfn6tn7/EgY0f6xIrgWDgJnM0U3hKxVeY13pAeB4AFTqTw==",
+ "license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-disclosure": {
- "version": "2.2.16",
- "resolved": "https://registry.npmjs.org/@heroui/use-disclosure/-/use-disclosure-2.2.16.tgz",
- "integrity": "sha512-rcDQoPygbIevGqcl7Lge8hK6FQFyeMwdu4VHH6BBzRCOE39uW/DXuZbdD1B40bw3UBhSKjdvyBp6NjLrm6Ma0g==",
+ "version": "2.2.17",
+ "resolved": "https://registry.npmjs.org/@heroui/use-disclosure/-/use-disclosure-2.2.17.tgz",
+ "integrity": "sha512-S3pN0WmpcTTZuQHcXw4RcTVsxLaCZ95H5qi/JPN83ahhWTCC+pN8lwE37vSahbMTM1YriiHyTM6AWpv/E3Jq7w==",
+ "license": "MIT",
"dependencies": {
"@heroui/use-callback-ref": "2.1.8",
- "@react-aria/utils": "3.30.1",
+ "@react-aria/utils": "3.31.0",
"@react-stately/utils": "3.10.8"
},
"peerDependencies": {
@@ -2750,11 +2779,12 @@
}
},
"node_modules/@heroui/use-draggable": {
- "version": "2.1.17",
- "resolved": "https://registry.npmjs.org/@heroui/use-draggable/-/use-draggable-2.1.17.tgz",
- "integrity": "sha512-1vsMYdny24HRSDWVVBulfzRuGdhbRGIeEzLQpqQYXhUVKzdTWZG8S84NotKoqsLdjAHHtuDQAGmKM2IODASVIA==",
+ "version": "2.1.18",
+ "resolved": "https://registry.npmjs.org/@heroui/use-draggable/-/use-draggable-2.1.18.tgz",
+ "integrity": "sha512-ihQdmLGYJ6aTEaJ0/yCXYn6VRdrRV2eO03XD2A3KANZPb1Bj/n4r298xNMql5VnGq5ZNDJB9nTv8NNCu9pmPdg==",
+ "license": "MIT",
"dependencies": {
- "@react-aria/interactions": "3.25.5"
+ "@react-aria/interactions": "3.25.6"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
@@ -2764,17 +2794,18 @@
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@heroui/use-form-reset/-/use-form-reset-2.0.1.tgz",
"integrity": "sha512-6slKWiLtVfgZnVeHVkM9eXgjwI07u0CUaLt2kQpfKPqTSTGfbHgCYJFduijtThhTdKBhdH6HCmzTcnbVlAxBXw==",
+ "license": "MIT",
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-image": {
- "version": "2.1.12",
- "resolved": "https://registry.npmjs.org/@heroui/use-image/-/use-image-2.1.12.tgz",
- "integrity": "sha512-/W6Cu5VN6LcZzYgkxJSvCEjM5gy0OE6NtRRImUDYCbUFNS1gK/apmOnIWcNbKryAg5Scpdoeu+g1lKKP15nSOw==",
+ "version": "2.1.13",
+ "resolved": "https://registry.npmjs.org/@heroui/use-image/-/use-image-2.1.13.tgz",
+ "integrity": "sha512-NLApz+xin2bKHEXr+eSrtB0lN8geKP5VOea5QGbOCiHq4DBXu4QctpRkSfCHGIQzWdBVaLPoV+5wd0lR2S2Egg==",
"license": "MIT",
"dependencies": {
- "@heroui/react-utils": "2.1.13",
+ "@heroui/react-utils": "2.1.14",
"@heroui/use-safe-layout-effect": "2.1.8"
},
"peerDependencies": {
@@ -2782,12 +2813,12 @@
}
},
"node_modules/@heroui/use-infinite-scroll": {
- "version": "2.2.11",
- "resolved": "https://registry.npmjs.org/@heroui/use-infinite-scroll/-/use-infinite-scroll-2.2.11.tgz",
- "integrity": "sha512-Myhfq8CaeIDo5zCyYan/lM6gOvmvzaJzIiKIwRSrwVxXFBtrsYiaihC/THFw1VEWlOVOu5iPicESu08X7mOaqg==",
+ "version": "2.2.12",
+ "resolved": "https://registry.npmjs.org/@heroui/use-infinite-scroll/-/use-infinite-scroll-2.2.12.tgz",
+ "integrity": "sha512-5yIrw6aP9eH6iU+bQmfixb6QM4qvcwrW3g8jaZJ5ce94nebglLs131B7rSqF/UK8Bp7OXsBM3j1pdBZM7lo/MA==",
"license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11"
+ "@heroui/shared-utils": "2.1.12"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
@@ -2806,6 +2837,7 @@
"version": "2.2.12",
"resolved": "https://registry.npmjs.org/@heroui/use-is-mobile/-/use-is-mobile-2.2.12.tgz",
"integrity": "sha512-2UKa4v1xbvFwerWKoMTrg4q9ZfP9MVIVfCl1a7JuKQlXq3jcyV6z1as5bZ41pCsTOT+wUVOFnlr6rzzQwT9ZOA==",
+ "license": "MIT",
"dependencies": {
"@react-aria/ssr": "3.9.10"
},
@@ -2826,18 +2858,19 @@
"version": "2.1.8",
"resolved": "https://registry.npmjs.org/@heroui/use-measure/-/use-measure-2.1.8.tgz",
"integrity": "sha512-GjT9tIgluqYMZWfAX6+FFdRQBqyHeuqUMGzAXMTH9kBXHU0U5C5XU2c8WFORkNDoZIg1h13h1QdV+Vy4LE1dEA==",
+ "license": "MIT",
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/use-pagination": {
- "version": "2.2.17",
- "resolved": "https://registry.npmjs.org/@heroui/use-pagination/-/use-pagination-2.2.17.tgz",
- "integrity": "sha512-fZ5t2GwLMqDiidAuH+/FsCBw/rtwNc9eIqF2Tz3Qwa4FlfMyzE+4pg99zdlrWM/GP0T/b8VvCNEbsmjKIgrliA==",
+ "version": "2.2.18",
+ "resolved": "https://registry.npmjs.org/@heroui/use-pagination/-/use-pagination-2.2.18.tgz",
+ "integrity": "sha512-qm1mUe5UgV0kPZItcs/jiX/BxzdDagmcxaJkYR6DkhfMRoCuOdoJhcoh8ncbCAgHpzPESPn1VxsOcG4/Y+Jkdw==",
"license": "MIT",
"dependencies": {
- "@heroui/shared-utils": "2.1.11",
- "@react-aria/i18n": "3.12.12"
+ "@heroui/shared-utils": "2.1.12",
+ "@react-aria/i18n": "3.12.13"
},
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
@@ -2847,6 +2880,7 @@
"version": "2.1.8",
"resolved": "https://registry.npmjs.org/@heroui/use-resize/-/use-resize-2.1.8.tgz",
"integrity": "sha512-htF3DND5GmrSiMGnzRbISeKcH+BqhQ/NcsP9sBTIl7ewvFaWiDhEDiUHdJxflmJGd/c5qZq2nYQM/uluaqIkKA==",
+ "license": "MIT",
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
@@ -2864,6 +2898,7 @@
"version": "2.1.8",
"resolved": "https://registry.npmjs.org/@heroui/use-scroll-position/-/use-scroll-position-2.1.8.tgz",
"integrity": "sha512-NxanHKObxVfWaPpNRyBR8v7RfokxrzcHyTyQfbgQgAGYGHTMaOGkJGqF8kBzInc3zJi+F0zbX7Nb0QjUgsLNUQ==",
+ "license": "MIT",
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
@@ -2872,20 +2907,21 @@
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@heroui/use-viewport-size/-/use-viewport-size-2.0.1.tgz",
"integrity": "sha512-blv8BEB/QdLePLWODPRzRS2eELJ2eyHbdOIADbL0KcfLzOUEg9EiuVk90hcSUDAFqYiJ3YZ5Z0up8sdPcR8Y7g==",
+ "license": "MIT",
"peerDependencies": {
"react": ">=18 || >=19.0.0-rc.0"
}
},
"node_modules/@heroui/user": {
- "version": "2.2.21",
- "resolved": "https://registry.npmjs.org/@heroui/user/-/user-2.2.21.tgz",
- "integrity": "sha512-q0bT4BRJaXFtG/KipsHdLN9h8GW56ZhwaR+ug9QFa85Sw65ePeOfThfwGf/yoGFyFt20BY+5P101Ok0iIV756A==",
+ "version": "2.2.22",
+ "resolved": "https://registry.npmjs.org/@heroui/user/-/user-2.2.22.tgz",
+ "integrity": "sha512-kOLxh9Bjgl/ya/f+W7/eKVO/n1GPsU5TPzwocC9+FU/+MbCOrmkevhAGGUrb259KCnp9WCv7WGRIcf8rrsreDw==",
"license": "MIT",
"dependencies": {
- "@heroui/avatar": "2.2.21",
- "@heroui/react-utils": "2.1.13",
- "@heroui/shared-utils": "2.1.11",
- "@react-aria/focus": "3.21.1"
+ "@heroui/avatar": "2.2.22",
+ "@heroui/react-utils": "2.1.14",
+ "@heroui/shared-utils": "2.1.12",
+ "@react-aria/focus": "3.21.2"
},
"peerDependencies": {
"@heroui/system": ">=2.4.18",
@@ -2956,15 +2992,25 @@
"dev": true,
"license": "BSD-3-Clause"
},
+ "node_modules/@inquirer/ansi": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/@inquirer/ansi/-/ansi-1.0.2.tgz",
+ "integrity": "sha512-S8qNSZiYzFd0wAcyG5AXCvUHC5Sr7xpZ9wZ2py9XR88jUz8wooStVx5M6dRzczbBWjic9NP7+rY0Xi7qqK/aMQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
+ },
"node_modules/@inquirer/confirm": {
- "version": "5.1.16",
- "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.16.tgz",
- "integrity": "sha512-j1a5VstaK5KQy8Mu8cHmuQvN1Zc62TbLhjJxwHvKPPKEoowSF6h/0UdOpA9DNdWZ+9Inq73+puRq1df6OJ8Sag==",
+ "version": "5.1.21",
+ "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-5.1.21.tgz",
+ "integrity": "sha512-KR8edRkIsUayMXV+o3Gv+q4jlhENF9nMYUZs9PA2HzrXeHI8M5uDag70U7RJn9yyiMZSbtF5/UexBtAVtZGSbQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@inquirer/core": "^10.2.0",
- "@inquirer/type": "^3.0.8"
+ "@inquirer/core": "^10.3.2",
+ "@inquirer/type": "^3.0.10"
},
"engines": {
"node": ">=18"
@@ -2979,20 +3025,20 @@
}
},
"node_modules/@inquirer/core": {
- "version": "10.2.0",
- "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.2.0.tgz",
- "integrity": "sha512-NyDSjPqhSvpZEMZrLCYUquWNl+XC/moEcVFqS55IEYIYsY0a1cUCevSqk7ctOlnm/RaSBU5psFryNlxcmGrjaA==",
+ "version": "10.3.2",
+ "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-10.3.2.tgz",
+ "integrity": "sha512-43RTuEbfP8MbKzedNqBrlhhNKVwoK//vUFNW3Q3vZ88BLcrs4kYpGg+B2mm5p2K/HfygoCxuKwJJiv8PbGmE0A==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@inquirer/figures": "^1.0.13",
- "@inquirer/type": "^3.0.8",
- "ansi-escapes": "^4.3.2",
+ "@inquirer/ansi": "^1.0.2",
+ "@inquirer/figures": "^1.0.15",
+ "@inquirer/type": "^3.0.10",
"cli-width": "^4.1.0",
"mute-stream": "^2.0.0",
"signal-exit": "^4.1.0",
"wrap-ansi": "^6.2.0",
- "yoctocolors-cjs": "^2.1.2"
+ "yoctocolors-cjs": "^2.1.3"
},
"engines": {
"node": ">=18"
@@ -3006,22 +3052,6 @@
}
}
},
- "node_modules/@inquirer/core/node_modules/ansi-escapes": {
- "version": "4.3.2",
- "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz",
- "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "type-fest": "^0.21.3"
- },
- "engines": {
- "node": ">=8"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
"node_modules/@inquirer/core/node_modules/emoji-regex": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
@@ -3054,23 +3084,10 @@
"node": ">=8"
}
},
- "node_modules/@inquirer/core/node_modules/type-fest": {
- "version": "0.21.3",
- "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz",
- "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==",
- "dev": true,
- "license": "(MIT OR CC0-1.0)",
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/@inquirer/core/node_modules/wrap-ansi": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
- "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
+ "node_modules/@inquirer/core/node_modules/wrap-ansi": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz",
+ "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -3083,9 +3100,9 @@
}
},
"node_modules/@inquirer/figures": {
- "version": "1.0.13",
- "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.13.tgz",
- "integrity": "sha512-lGPVU3yO9ZNqA7vTYz26jny41lE7yoQansmqdMLBEfqaGsmdg7V3W9mK9Pvb5IL4EVZ9GnSDGMO/cJXud5dMaw==",
+ "version": "1.0.15",
+ "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.15.tgz",
+ "integrity": "sha512-t2IEY+unGHOzAaVM5Xx6DEWKeXlDDcNPeDyUpsRc6CUhBfU3VQOEl+Vssh7VNp1dR8MdUJBWhuObjXCsVpjN5g==",
"dev": true,
"license": "MIT",
"engines": {
@@ -3093,9 +3110,9 @@
}
},
"node_modules/@inquirer/type": {
- "version": "3.0.8",
- "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.8.tgz",
- "integrity": "sha512-lg9Whz8onIHRthWaN1Q9EGLa/0LFJjyM8mEUbL1eTi6yMGvBf8gvyDLtxSXztQsxMvhxxNpJYrwa1YHdq+w4Jw==",
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-3.0.10.tgz",
+ "integrity": "sha512-BvziSRxfz5Ov8ch0z/n3oijRSEcEsHnhggm4xFZe93DHcUCTlutlq9Ox4SVENAfcRD22UQq7T/atg9Wr3k09eA==",
"dev": true,
"license": "MIT",
"engines": {
@@ -3111,9 +3128,9 @@
}
},
"node_modules/@internationalized/date": {
- "version": "3.9.0",
- "resolved": "https://registry.npmjs.org/@internationalized/date/-/date-3.9.0.tgz",
- "integrity": "sha512-yaN3brAnHRD+4KyyOsJyk49XUvj2wtbNACSqg0bz3u8t2VuzhC8Q5dfRnrSxjnnbDb+ienBnkn1TzQfE154vyg==",
+ "version": "3.10.0",
+ "resolved": "https://registry.npmjs.org/@internationalized/date/-/date-3.10.0.tgz",
+ "integrity": "sha512-oxDR/NTEJ1k+UFVQElaNIk65E/Z83HK1z1WI3lQyhTtnNg4R5oVXaPzK3jcpKG8UHKDVuDQHzn+wsxSz8RP3aw==",
"license": "Apache-2.0",
"dependencies": {
"@swc/helpers": "^0.5.0"
@@ -3147,75 +3164,6 @@
"@swc/helpers": "^0.5.0"
}
},
- "node_modules/@isaacs/cliui": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz",
- "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "string-width": "^5.1.2",
- "string-width-cjs": "npm:string-width@^4.2.0",
- "strip-ansi": "^7.0.1",
- "strip-ansi-cjs": "npm:strip-ansi@^6.0.1",
- "wrap-ansi": "^8.1.0",
- "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0"
- },
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/ansi-regex": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.0.tgz",
- "integrity": "sha512-TKY5pyBkHyADOPYlRT9Lx6F544mPl0vS5Ew7BJ45hA08Q+t3GjbueLliBWN3sMICk6+y7HdyxSzC4bWS8baBdg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
- }
- },
- "node_modules/@isaacs/cliui/node_modules/strip-ansi": {
- "version": "7.1.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
- "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^6.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/strip-ansi?sponsor=1"
- }
- },
- "node_modules/@isaacs/fs-minipass": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz",
- "integrity": "sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==",
- "license": "ISC",
- "dependencies": {
- "minipass": "^7.0.4"
- },
- "engines": {
- "node": ">=18.0.0"
- }
- },
- "node_modules/@istanbuljs/schema": {
- "version": "0.1.3",
- "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz",
- "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/@jridgewell/gen-mapping": {
"version": "0.3.13",
"resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz",
@@ -3252,9 +3200,9 @@
"license": "MIT"
},
"node_modules/@jridgewell/trace-mapping": {
- "version": "0.3.30",
- "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.30.tgz",
- "integrity": "sha512-GQ7Nw5G2lTu/BtHTKfXhKHok2WGetd4XYcVKGx00SjAk8GMwgJM3zr6zORiPGuOE+/vkc90KtTosSSvaCjKb2Q==",
+ "version": "0.3.31",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
+ "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
"license": "MIT",
"dependencies": {
"@jridgewell/resolve-uri": "^3.1.0",
@@ -3286,9 +3234,9 @@
"license": "MIT"
},
"node_modules/@monaco-editor/loader": {
- "version": "1.5.0",
- "resolved": "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.5.0.tgz",
- "integrity": "sha512-hKoGSM+7aAc7eRTRjpqAZucPmoNOC4UUbknb/VNoTkEIkCPhqV8LfbsgM1webRM7S/z21eHEx9Fkwx8Z/C/+Xw==",
+ "version": "1.7.0",
+ "resolved": "https://registry.npmjs.org/@monaco-editor/loader/-/loader-1.7.0.tgz",
+ "integrity": "sha512-gIwR1HrJrrx+vfyOhYmCZ0/JcWqG5kbfG7+d3f/C1LXk2EvzAbHSg3MQ5lO2sMlo9izoAZ04shohfKLVT6crVA==",
"license": "MIT",
"dependencies": {
"state-local": "^1.0.6"
@@ -3309,9 +3257,9 @@
}
},
"node_modules/@mswjs/interceptors": {
- "version": "0.39.6",
- "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.39.6.tgz",
- "integrity": "sha512-bndDP83naYYkfayr/qhBHMhk0YGwS1iv6vaEGcr0SQbO0IZtbOPqjKjds/WcG+bJA+1T5vCx6kprKOzn5Bg+Vw==",
+ "version": "0.39.8",
+ "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.39.8.tgz",
+ "integrity": "sha512-2+BzZbjRO7Ct61k8fMNHEtoKjeWI9pIlHFTqBwZ5icHpqszIgEZbjb1MW5Z0+bITTCTl3gk4PDBxs9tA/csXvA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -3379,68 +3327,6 @@
"node": ">= 8"
}
},
- "node_modules/@npmcli/git": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/@npmcli/git/-/git-4.1.0.tgz",
- "integrity": "sha512-9hwoB3gStVfa0N31ymBmrX+GuDGdVA/QWShZVqE0HK2Af+7QGGrCTbZia/SW0ImUTjTne7SP91qxDmtXvDHRPQ==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "@npmcli/promise-spawn": "^6.0.0",
- "lru-cache": "^7.4.4",
- "npm-pick-manifest": "^8.0.0",
- "proc-log": "^3.0.0",
- "promise-inflight": "^1.0.1",
- "promise-retry": "^2.0.1",
- "semver": "^7.3.5",
- "which": "^3.0.0"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/git/node_modules/lru-cache": {
- "version": "7.18.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
- "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=12"
- }
- },
- "node_modules/@npmcli/package-json": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/@npmcli/package-json/-/package-json-4.0.1.tgz",
- "integrity": "sha512-lRCEGdHZomFsURroh522YvA/2cVb9oPIJrjHanCJZkiasz1BzcnLr3tBJhlV7S86MBJBuAQ33is2D60YitZL2Q==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "@npmcli/git": "^4.1.0",
- "glob": "^10.2.2",
- "hosted-git-info": "^6.1.1",
- "json-parse-even-better-errors": "^3.0.0",
- "normalize-package-data": "^5.0.0",
- "proc-log": "^3.0.0",
- "semver": "^7.5.3"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/@npmcli/promise-spawn": {
- "version": "6.0.2",
- "resolved": "https://registry.npmjs.org/@npmcli/promise-spawn/-/promise-spawn-6.0.2.tgz",
- "integrity": "sha512-gGq0NJkIGSwdbUt4yhdF8ZrmkGKVz9vAdVzpOfnom+V8PLSmSOVhZwbNvZZS1EYcJN5hzzKBxmmVVAInM6HQLg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "which": "^3.0.0"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
"node_modules/@open-draft/deferred-promise": {
"version": "2.2.0",
"resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz",
@@ -3466,17 +3352,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/@pkgjs/parseargs": {
- "version": "0.11.0",
- "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz",
- "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==",
- "dev": true,
- "license": "MIT",
- "optional": true,
- "engines": {
- "node": ">=14"
- }
- },
"node_modules/@pkgr/core": {
"version": "0.2.9",
"resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.2.9.tgz",
@@ -3491,12 +3366,13 @@
}
},
"node_modules/@playwright/test": {
- "version": "1.55.1",
- "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.55.1.tgz",
- "integrity": "sha512-IVAh/nOJaw6W9g+RJVlIQJ6gSiER+ae6mKQ5CX1bERzQgbC1VSeBlwdvczT7pxb0GWiyrxH4TGKbMfDb4Sq/ig==",
+ "version": "1.57.0",
+ "resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.57.0.tgz",
+ "integrity": "sha512-6TyEnHgd6SArQO8UO2OMTxshln3QMWBtPGrOCgs3wVEmQmwyuNtB10IZMfmYDE0riwNR1cu4q+pPcxMVtaG3TA==",
"dev": true,
+ "license": "Apache-2.0",
"dependencies": {
- "playwright": "1.55.1"
+ "playwright": "1.57.0"
},
"bin": {
"playwright": "cli.js"
@@ -3512,19 +3388,17 @@
"license": "MIT"
},
"node_modules/@posthog/core": {
- "version": "1.5.2",
- "resolved": "https://registry.npmjs.org/@posthog/core/-/core-1.5.2.tgz",
- "integrity": "sha512-iedUP3EnOPPxTA2VaIrsrd29lSZnUV+ZrMnvY56timRVeZAXoYCkmjfIs3KBAsF8OUT5h1GXLSkoQdrV0r31OQ==",
- "license": "MIT",
+ "version": "1.7.1",
+ "resolved": "https://registry.npmjs.org/@posthog/core/-/core-1.7.1.tgz",
+ "integrity": "sha512-kjK0eFMIpKo9GXIbts8VtAknsoZ18oZorANdtuTj1CbgS28t4ZVq//HAWhnxEuXRTrtkd+SUJ6Ux3j2Af8NCuA==",
"dependencies": {
"cross-spawn": "^7.0.6"
}
},
"node_modules/@posthog/react": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/@posthog/react/-/react-1.4.0.tgz",
- "integrity": "sha512-xzPeZ753fQ0deZzdgY/0YavZvNpmdaxUzLYJYu5XjONNcZ8PwJnNLEK+7D/Cj8UM4Q8nWI7QC5mjum0uLWa4FA==",
- "license": "MIT",
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/@posthog/react/-/react-1.5.2.tgz",
+ "integrity": "sha512-KHdXbV1yba7Y2l8BVmwXlySWxqKVLNQ5ZiVvWOf7r3Eo7GIFxCM4CaNK/z83kKWn8KTskmKy7AGF6Hl6INWK3g==",
"peerDependencies": {
"@types/react": ">=16.8.0",
"posthog-js": ">=1.257.2",
@@ -3537,16 +3411,16 @@
}
},
"node_modules/@react-aria/breadcrumbs": {
- "version": "3.5.28",
- "resolved": "https://registry.npmjs.org/@react-aria/breadcrumbs/-/breadcrumbs-3.5.28.tgz",
- "integrity": "sha512-6S3QelpajodEzN7bm49XXW5gGoZksK++cl191W0sexq/E5hZHAEA9+CFC8pL3px13ji7qHGqKAxOP4IUVBdVpQ==",
+ "version": "3.5.29",
+ "resolved": "https://registry.npmjs.org/@react-aria/breadcrumbs/-/breadcrumbs-3.5.29.tgz",
+ "integrity": "sha512-rKS0dryllaZJqrr3f/EAf2liz8CBEfmL5XACj+Z1TAig6GIYe1QuA3BtkX0cV9OkMugXdX8e3cbA7nD10ORRqg==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/link": "^3.8.5",
- "@react-aria/utils": "^3.30.1",
- "@react-types/breadcrumbs": "^3.7.16",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/link": "^3.8.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/breadcrumbs": "^3.7.17",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3555,16 +3429,17 @@
}
},
"node_modules/@react-aria/button": {
- "version": "3.14.1",
- "resolved": "https://registry.npmjs.org/@react-aria/button/-/button-3.14.1.tgz",
- "integrity": "sha512-Ug06unKEYVG3OF6zKmpVR7VfLzpj7eJVuFo3TCUxwFJG7DI28pZi2TaGWnhm7qjkxfl1oz0avQiHVfDC99gSuw==",
- "dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/toolbar": "3.0.0-beta.20",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/toggle": "^3.9.1",
- "@react-types/button": "^3.14.0",
- "@react-types/shared": "^3.32.0",
+ "version": "3.14.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/button/-/button-3.14.2.tgz",
+ "integrity": "sha512-VbLIA+Kd6f/MDjd+TJBUg2+vNDw66pnvsj2E4RLomjI9dfBuN7d+Yo2UnsqKVyhePjCUZ6xxa2yDuD63IOSIYA==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/toolbar": "3.0.0-beta.21",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/toggle": "^3.9.2",
+ "@react-types/button": "^3.14.1",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3573,19 +3448,20 @@
}
},
"node_modules/@react-aria/calendar": {
- "version": "3.9.1",
- "resolved": "https://registry.npmjs.org/@react-aria/calendar/-/calendar-3.9.1.tgz",
- "integrity": "sha512-dCJliRIi3x3VmAZkJDNTZddq0+QoUX9NS7GgdqPPYcJIMbVPbyLWL61//0SrcCr3MuSRCoI1eQZ8PkQe/2PJZQ==",
+ "version": "3.9.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/calendar/-/calendar-3.9.2.tgz",
+ "integrity": "sha512-uSLxLgOPRnEU4Jg59lAhUVA+uDx/55NBg4lpfsP2ynazyiJ5LCXmYceJi+VuOqMml7d9W0dB87OldOeLdIxYVA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
+ "@internationalized/date": "^3.10.0",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
"@react-aria/live-announcer": "^3.4.4",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/calendar": "^3.8.4",
- "@react-types/button": "^3.14.0",
- "@react-types/calendar": "^3.7.4",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/calendar": "^3.9.0",
+ "@react-types/button": "^3.14.1",
+ "@react-types/calendar": "^3.8.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3594,20 +3470,21 @@
}
},
"node_modules/@react-aria/checkbox": {
- "version": "3.16.1",
- "resolved": "https://registry.npmjs.org/@react-aria/checkbox/-/checkbox-3.16.1.tgz",
- "integrity": "sha512-YcG3QhuGIwqPHo4GVGVmwxPM5Ayq9CqYfZjla/KTfJILPquAJ12J7LSMpqS/Z5TlMNgIIqZ3ZdrYmjQlUY7eUg==",
- "dependencies": {
- "@react-aria/form": "^3.1.1",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/label": "^3.7.21",
- "@react-aria/toggle": "^3.12.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/checkbox": "^3.7.1",
- "@react-stately/form": "^3.2.1",
- "@react-stately/toggle": "^3.9.1",
- "@react-types/checkbox": "^3.10.1",
- "@react-types/shared": "^3.32.0",
+ "version": "3.16.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/checkbox/-/checkbox-3.16.2.tgz",
+ "integrity": "sha512-29Mj9ZqXioJ0bcMnNGooHztnTau5pikZqX3qCRj5bYR3by/ZFFavYoMroh9F7s/MbFm/tsKX+Sf02lYFEdXRjA==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/form": "^3.1.2",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/toggle": "^3.12.2",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/checkbox": "^3.7.2",
+ "@react-stately/form": "^3.2.2",
+ "@react-stately/toggle": "^3.9.2",
+ "@react-types/checkbox": "^3.10.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3616,25 +3493,26 @@
}
},
"node_modules/@react-aria/combobox": {
- "version": "3.13.1",
- "resolved": "https://registry.npmjs.org/@react-aria/combobox/-/combobox-3.13.1.tgz",
- "integrity": "sha512-3lt3TGfjadJsN+illC23hgfeQ/VqF04mxczoU+3znOZ+vTx9zov/YfUysAsaxc8hyjr65iydz+CEbyg4+i0y3A==",
+ "version": "3.14.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/combobox/-/combobox-3.14.0.tgz",
+ "integrity": "sha512-z4ro0Hma//p4nL2IJx5iUa7NwxeXbzSoZ0se5uTYjG1rUUMszg+wqQh/AQoL+eiULn7rs18JY9wwNbVIkRNKWA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/listbox": "^3.14.8",
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/listbox": "^3.15.0",
"@react-aria/live-announcer": "^3.4.4",
- "@react-aria/menu": "^3.19.1",
- "@react-aria/overlays": "^3.29.0",
- "@react-aria/selection": "^3.25.1",
- "@react-aria/textfield": "^3.18.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/collections": "^3.12.7",
- "@react-stately/combobox": "^3.11.1",
- "@react-stately/form": "^3.2.1",
- "@react-types/button": "^3.14.0",
- "@react-types/combobox": "^3.13.8",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/menu": "^3.19.3",
+ "@react-aria/overlays": "^3.30.0",
+ "@react-aria/selection": "^3.26.0",
+ "@react-aria/textfield": "^3.18.2",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/combobox": "^3.12.0",
+ "@react-stately/form": "^3.2.2",
+ "@react-types/button": "^3.14.1",
+ "@react-types/combobox": "^3.13.9",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3643,27 +3521,28 @@
}
},
"node_modules/@react-aria/datepicker": {
- "version": "3.15.1",
- "resolved": "https://registry.npmjs.org/@react-aria/datepicker/-/datepicker-3.15.1.tgz",
- "integrity": "sha512-RfUOvsupON6E5ZELpBgb9qxsilkbqwzsZ78iqCDTVio+5kc5G9jVeHEIQOyHnavi/TmJoAnbmmVpEbE6M9lYJQ==",
+ "version": "3.15.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/datepicker/-/datepicker-3.15.2.tgz",
+ "integrity": "sha512-th078hyNqPf4P2K10su/y32zPDjs3lOYVdHvsL9/+5K1dnTvLHCK5vgUyLuyn8FchhF7cmHV49D+LZVv65PEpQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
+ "@internationalized/date": "^3.10.0",
"@internationalized/number": "^3.6.5",
"@internationalized/string": "^3.2.7",
- "@react-aria/focus": "^3.21.1",
- "@react-aria/form": "^3.1.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/label": "^3.7.21",
- "@react-aria/spinbutton": "^3.6.18",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/datepicker": "^3.15.1",
- "@react-stately/form": "^3.2.1",
- "@react-types/button": "^3.14.0",
- "@react-types/calendar": "^3.7.4",
- "@react-types/datepicker": "^3.13.1",
- "@react-types/dialog": "^3.5.21",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/form": "^3.1.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/spinbutton": "^3.6.19",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/datepicker": "^3.15.2",
+ "@react-stately/form": "^3.2.2",
+ "@react-types/button": "^3.14.1",
+ "@react-types/calendar": "^3.8.0",
+ "@react-types/datepicker": "^3.13.2",
+ "@react-types/dialog": "^3.5.22",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3672,15 +3551,16 @@
}
},
"node_modules/@react-aria/dialog": {
- "version": "3.5.29",
- "resolved": "https://registry.npmjs.org/@react-aria/dialog/-/dialog-3.5.29.tgz",
- "integrity": "sha512-GtxB0oTwkSz/GiKMPN0lU4h/r+Cr04FFUonZU5s03YmDTtgVjTSjFPmsd7pkbt3qq0aEiQASx/vWdAkKLWjRHA==",
+ "version": "3.5.31",
+ "resolved": "https://registry.npmjs.org/@react-aria/dialog/-/dialog-3.5.31.tgz",
+ "integrity": "sha512-inxQMyrzX0UBW9Mhraq0nZ4HjHdygQvllzloT1E/RlDd61lr3RbmJR6pLsrbKOTtSvDIBJpCso1xEdHCFNmA0Q==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/overlays": "^3.29.0",
- "@react-aria/utils": "^3.30.1",
- "@react-types/dialog": "^3.5.21",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/overlays": "^3.30.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/dialog": "^3.5.22",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3689,14 +3569,14 @@
}
},
"node_modules/@react-aria/focus": {
- "version": "3.21.1",
- "resolved": "https://registry.npmjs.org/@react-aria/focus/-/focus-3.21.1.tgz",
- "integrity": "sha512-hmH1IhHlcQ2lSIxmki1biWzMbGgnhdxJUM0MFfzc71Rv6YAzhlx4kX3GYn4VNcjCeb6cdPv4RZ5vunV4kgMZYQ==",
+ "version": "3.21.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/focus/-/focus-3.21.2.tgz",
+ "integrity": "sha512-JWaCR7wJVggj+ldmM/cb/DXFg47CXR55lznJhZBh4XVqJjMKwaOOqpT5vNN7kpC1wUpXicGNuDnJDN1S/+6dhQ==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0",
"clsx": "^2.0.0"
},
@@ -3706,14 +3586,15 @@
}
},
"node_modules/@react-aria/form": {
- "version": "3.1.1",
- "resolved": "https://registry.npmjs.org/@react-aria/form/-/form-3.1.1.tgz",
- "integrity": "sha512-PjZC25UgH5orit9p56Ymbbo288F3eaDd3JUvD8SG+xgx302HhlFAOYsQLLAb4k4H03bp0gWtlUEkfX6KYcE1Tw==",
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/form/-/form-3.1.2.tgz",
+ "integrity": "sha512-R3i7L7Ci61PqZQvOrnL9xJeWEbh28UkTVgkj72EvBBn39y4h7ReH++0stv7rRs8p5ozETSKezBbGfu4UsBewWw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/form": "^3.2.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/form": "^3.2.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3722,22 +3603,23 @@
}
},
"node_modules/@react-aria/grid": {
- "version": "3.14.4",
- "resolved": "https://registry.npmjs.org/@react-aria/grid/-/grid-3.14.4.tgz",
- "integrity": "sha512-l1FLQNKnoHpY4UClUTPUV0AqJ5bfAULEE0ErY86KznWLd+Hqzo7mHLqqDV02CDa/8mIUcdoax/MrYYIbPDlOZA==",
+ "version": "3.14.5",
+ "resolved": "https://registry.npmjs.org/@react-aria/grid/-/grid-3.14.5.tgz",
+ "integrity": "sha512-XHw6rgjlTqc85e3zjsWo3U0EVwjN5MOYtrolCKc/lc2ItNdcY3OlMhpsU9+6jHwg/U3VCSWkGvwAz9hg7krd8Q==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
"@react-aria/live-announcer": "^3.4.4",
- "@react-aria/selection": "^3.25.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/collections": "^3.12.7",
- "@react-stately/grid": "^3.11.5",
- "@react-stately/selection": "^3.20.5",
- "@react-types/checkbox": "^3.10.1",
- "@react-types/grid": "^3.3.5",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/selection": "^3.26.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/grid": "^3.11.6",
+ "@react-stately/selection": "^3.20.6",
+ "@react-types/checkbox": "^3.10.2",
+ "@react-types/grid": "^3.3.6",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3746,18 +3628,18 @@
}
},
"node_modules/@react-aria/i18n": {
- "version": "3.12.12",
- "resolved": "https://registry.npmjs.org/@react-aria/i18n/-/i18n-3.12.12.tgz",
- "integrity": "sha512-JN6p+Xc6Pu/qddGRoeYY6ARsrk2Oz7UiQc9nLEPOt3Ch+blJZKWwDjcpo/p6/wVZdD/2BgXS7El6q6+eMg7ibw==",
+ "version": "3.12.13",
+ "resolved": "https://registry.npmjs.org/@react-aria/i18n/-/i18n-3.12.13.tgz",
+ "integrity": "sha512-YTM2BPg0v1RvmP8keHenJBmlx8FXUKsdYIEX7x6QWRd1hKlcDwphfjzvt0InX9wiLiPHsT5EoBTpuUk8SXc0Mg==",
"license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
+ "@internationalized/date": "^3.10.0",
"@internationalized/message": "^3.1.8",
"@internationalized/number": "^3.6.5",
"@internationalized/string": "^3.2.7",
"@react-aria/ssr": "^3.9.10",
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3766,15 +3648,15 @@
}
},
"node_modules/@react-aria/interactions": {
- "version": "3.25.5",
- "resolved": "https://registry.npmjs.org/@react-aria/interactions/-/interactions-3.25.5.tgz",
- "integrity": "sha512-EweYHOEvMwef/wsiEqV73KurX/OqnmbzKQa2fLxdULbec5+yDj6wVGaRHIzM4NiijIDe+bldEl5DG05CAKOAHA==",
+ "version": "3.25.6",
+ "resolved": "https://registry.npmjs.org/@react-aria/interactions/-/interactions-3.25.6.tgz",
+ "integrity": "sha512-5UgwZmohpixwNMVkMvn9K1ceJe6TzlRlAfuYoQDUuOkk62/JVJNDLAPKIf5YMRc7d2B0rmfgaZLMtbREb0Zvkw==",
"license": "Apache-2.0",
"dependencies": {
"@react-aria/ssr": "^3.9.10",
- "@react-aria/utils": "^3.30.1",
+ "@react-aria/utils": "^3.31.0",
"@react-stately/flags": "^3.1.2",
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3783,13 +3665,13 @@
}
},
"node_modules/@react-aria/label": {
- "version": "3.7.21",
- "resolved": "https://registry.npmjs.org/@react-aria/label/-/label-3.7.21.tgz",
- "integrity": "sha512-8G+059/GZahgQbrhMcCcVcrjm7W+pfzrypH/Qkjo7C1yqPGt6geeFwWeOIbiUZoI0HD9t9QvQPryd6m46UC7Tg==",
+ "version": "3.7.22",
+ "resolved": "https://registry.npmjs.org/@react-aria/label/-/label-3.7.22.tgz",
+ "integrity": "sha512-jLquJeA5ZNqDT64UpTc9XJ7kQYltUlNcgxZ37/v4mHe0UZ7QohCKdKQhXHONb0h2jjNUpp2HOZI8J9++jOpzxA==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3798,12 +3680,13 @@
}
},
"node_modules/@react-aria/landmark": {
- "version": "3.0.6",
- "resolved": "https://registry.npmjs.org/@react-aria/landmark/-/landmark-3.0.6.tgz",
- "integrity": "sha512-dMPBqJWTDAr3Lj5hA+XYDH2PWqtFghYy+y7iq7K5sK/96cub8hZEUjhwn+HGgHsLerPp0dWt293nKupAJnf4Vw==",
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/@react-aria/landmark/-/landmark-3.0.7.tgz",
+ "integrity": "sha512-t8c610b8hPLS6Vwv+rbuSyljZosI1s5+Tosfa0Fk4q7d+Ex6Yj7hLfUFy59GxZAufhUYfGX396fT0gPqAbU1tg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0",
"use-sync-external-store": "^1.4.0"
},
@@ -3813,15 +3696,15 @@
}
},
"node_modules/@react-aria/link": {
- "version": "3.8.5",
- "resolved": "https://registry.npmjs.org/@react-aria/link/-/link-3.8.5.tgz",
- "integrity": "sha512-klhV4roPp5MLRXJv1N+7SXOj82vx4gzVpuwQa3vouA+YI1my46oNzwgtkLGSTvE9OvDqYzPDj2YxFYhMywrkuw==",
+ "version": "3.8.6",
+ "resolved": "https://registry.npmjs.org/@react-aria/link/-/link-3.8.6.tgz",
+ "integrity": "sha512-7F7UDJnwbU9IjfoAdl6f3Hho5/WB7rwcydUOjUux0p7YVWh/fTjIFjfAGyIir7MJhPapun1D0t97QQ3+8jXVcg==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-types/link": "^3.6.4",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/link": "^3.6.5",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3830,18 +3713,19 @@
}
},
"node_modules/@react-aria/listbox": {
- "version": "3.14.8",
- "resolved": "https://registry.npmjs.org/@react-aria/listbox/-/listbox-3.14.8.tgz",
- "integrity": "sha512-uRgbuD9afFv0PDhQ/VXCmAwlYctIyKRzxztkqp1p/1yz/tn/hs+bG9kew9AI02PtlRO1mSc+32O+mMDXDer8hA==",
- "dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/label": "^3.7.21",
- "@react-aria/selection": "^3.25.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/collections": "^3.12.7",
- "@react-stately/list": "^3.13.0",
- "@react-types/listbox": "^3.7.3",
- "@react-types/shared": "^3.32.0",
+ "version": "3.15.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/listbox/-/listbox-3.15.0.tgz",
+ "integrity": "sha512-Ub1Wu79R9sgxM7h4HeEdjOgOKDHwduvYcnDqsSddGXgpkL8ADjsy2YUQ0hHY5VnzA4BxK36bLp4mzSna8Qvj1w==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/selection": "^3.26.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/list": "^3.13.1",
+ "@react-types/listbox": "^3.7.4",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3853,28 +3737,30 @@
"version": "3.4.4",
"resolved": "https://registry.npmjs.org/@react-aria/live-announcer/-/live-announcer-3.4.4.tgz",
"integrity": "sha512-PTTBIjNRnrdJOIRTDGNifY2d//kA7GUAwRFJNOEwSNG4FW+Bq9awqLiflw0JkpyB0VNIwou6lqKPHZVLsGWOXA==",
+ "license": "Apache-2.0",
"dependencies": {
"@swc/helpers": "^0.5.0"
}
},
"node_modules/@react-aria/menu": {
- "version": "3.19.1",
- "resolved": "https://registry.npmjs.org/@react-aria/menu/-/menu-3.19.1.tgz",
- "integrity": "sha512-hRYFdOOj3fYyoh/tJGxY1CWY80geNb3BT3DMNHgGBVMvnZ0E6k3WoQH+QZkVnwSnNIQAIPQFcYWPyZeE+ElEhA==",
- "dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/overlays": "^3.29.0",
- "@react-aria/selection": "^3.25.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/collections": "^3.12.7",
- "@react-stately/menu": "^3.9.7",
- "@react-stately/selection": "^3.20.5",
- "@react-stately/tree": "^3.9.2",
- "@react-types/button": "^3.14.0",
- "@react-types/menu": "^3.10.4",
- "@react-types/shared": "^3.32.0",
+ "version": "3.19.3",
+ "resolved": "https://registry.npmjs.org/@react-aria/menu/-/menu-3.19.3.tgz",
+ "integrity": "sha512-52fh8y8b2776R2VrfZPpUBJYC9oTP7XDy+zZuZTxPEd7Ywk0JNUl5F92y6ru22yPkS13sdhrNM/Op+V/KulmAg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/overlays": "^3.30.0",
+ "@react-aria/selection": "^3.26.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/menu": "^3.9.8",
+ "@react-stately/selection": "^3.20.6",
+ "@react-stately/tree": "^3.9.3",
+ "@react-types/button": "^3.14.1",
+ "@react-types/menu": "^3.10.5",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3883,20 +3769,21 @@
}
},
"node_modules/@react-aria/numberfield": {
- "version": "3.12.1",
- "resolved": "https://registry.npmjs.org/@react-aria/numberfield/-/numberfield-3.12.1.tgz",
- "integrity": "sha512-3KjxGgWiF4GRvIyqrE3nCndkkEJ68v86y0nx89TpAjdzg7gCgdXgU2Lr4BhC/xImrmlqCusw0IBUMhsEq9EQWA==",
- "dependencies": {
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/spinbutton": "^3.6.18",
- "@react-aria/textfield": "^3.18.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/form": "^3.2.1",
- "@react-stately/numberfield": "^3.10.1",
- "@react-types/button": "^3.14.0",
- "@react-types/numberfield": "^3.8.14",
- "@react-types/shared": "^3.32.0",
+ "version": "3.12.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/numberfield/-/numberfield-3.12.2.tgz",
+ "integrity": "sha512-M2b+z0HIXiXpGAWOQkO2kpIjaLNUXJ5Q3/GMa3Fkr+B1piFX0VuOynYrtddKVrmXCe+r5t+XcGb0KS29uqv7nQ==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/spinbutton": "^3.6.19",
+ "@react-aria/textfield": "^3.18.2",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/form": "^3.2.2",
+ "@react-stately/numberfield": "^3.10.2",
+ "@react-types/button": "^3.14.1",
+ "@react-types/numberfield": "^3.8.15",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3905,21 +3792,21 @@
}
},
"node_modules/@react-aria/overlays": {
- "version": "3.29.0",
- "resolved": "https://registry.npmjs.org/@react-aria/overlays/-/overlays-3.29.0.tgz",
- "integrity": "sha512-OmMcwrbBMcv4KWNAPxvMZw02Wcw+z3e5dOS+MOb4AfY4bOJUvw+9hB13cfECs5lNXjV/UHT+5w2WBs32jmTwTg==",
+ "version": "3.30.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/overlays/-/overlays-3.30.0.tgz",
+ "integrity": "sha512-UpjqSjYZx5FAhceWCRVsW6fX1sEwya1fQ/TKkL53FAlLFR8QKuoKqFlmiL43YUFTcGK3UdEOy3cWTleLQwdSmQ==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
"@react-aria/ssr": "^3.9.10",
- "@react-aria/utils": "^3.30.1",
- "@react-aria/visually-hidden": "^3.8.27",
- "@react-stately/overlays": "^3.6.19",
- "@react-types/button": "^3.14.0",
- "@react-types/overlays": "^3.9.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-aria/visually-hidden": "^3.8.28",
+ "@react-stately/overlays": "^3.6.20",
+ "@react-types/button": "^3.14.1",
+ "@react-types/overlays": "^3.9.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3928,16 +3815,16 @@
}
},
"node_modules/@react-aria/progress": {
- "version": "3.4.26",
- "resolved": "https://registry.npmjs.org/@react-aria/progress/-/progress-3.4.26.tgz",
- "integrity": "sha512-EJBzbE0IjXrJ19ofSyNKDnqC70flUM0Z+9heMRPLi6Uz01o6Uuz9tjyzmoPnd9Q1jnTT7dCl7ydhdYTGsWFcUg==",
+ "version": "3.4.27",
+ "resolved": "https://registry.npmjs.org/@react-aria/progress/-/progress-3.4.27.tgz",
+ "integrity": "sha512-0OA1shs1575g1zmO8+rWozdbTnxThFFhOfuoL1m7UV5Dley6FHpueoKB1ECv7B+Qm4dQt6DoEqLg7wsbbQDhmg==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/label": "^3.7.21",
- "@react-aria/utils": "^3.30.1",
- "@react-types/progress": "^3.5.15",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/progress": "^3.5.16",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3946,19 +3833,20 @@
}
},
"node_modules/@react-aria/radio": {
- "version": "3.12.1",
- "resolved": "https://registry.npmjs.org/@react-aria/radio/-/radio-3.12.1.tgz",
- "integrity": "sha512-feZdMJyNp+UX03seIX0W6gdUk8xayTY+U0Ct61eci6YXzyyZoL2PVh49ojkbyZ2UZA/eXeygpdF5sgQrKILHCA==",
- "dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/form": "^3.1.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/label": "^3.7.21",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/radio": "^3.11.1",
- "@react-types/radio": "^3.9.1",
- "@react-types/shared": "^3.32.0",
+ "version": "3.12.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/radio/-/radio-3.12.2.tgz",
+ "integrity": "sha512-I11f6I90neCh56rT/6ieAs3XyDKvEfbj/QmbU5cX3p+SJpRRPN0vxQi5D1hkh0uxDpeClxygSr31NmZsd4sqfg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/form": "^3.1.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/radio": "^3.11.2",
+ "@react-types/radio": "^3.9.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3967,16 +3855,17 @@
}
},
"node_modules/@react-aria/selection": {
- "version": "3.25.1",
- "resolved": "https://registry.npmjs.org/@react-aria/selection/-/selection-3.25.1.tgz",
- "integrity": "sha512-HG+k3rDjuhnXPdVyv9CKiebee2XNkFYeYZBxEGlK3/pFVBzndnc8BXNVrXSgtCHLs2d090JBVKl1k912BPbj0Q==",
- "dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/selection": "^3.20.5",
- "@react-types/shared": "^3.32.0",
+ "version": "3.26.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/selection/-/selection-3.26.0.tgz",
+ "integrity": "sha512-ZBH3EfWZ+RfhTj01dH8L17uT7iNbXWS8u77/fUpHgtrm0pwNVhx0TYVnLU1YpazQ/3WVpvWhmBB8sWwD1FlD/g==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/selection": "^3.20.6",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -3985,17 +3874,18 @@
}
},
"node_modules/@react-aria/slider": {
- "version": "3.8.1",
- "resolved": "https://registry.npmjs.org/@react-aria/slider/-/slider-3.8.1.tgz",
- "integrity": "sha512-uPgwZQrcuqHaLU2prJtPEPIyN9ugZ7qGgi0SB2U8tvoODNVwuPvOaSsvR98Mn6jiAzMFNoWMydeIi+J1OjvWsQ==",
- "dependencies": {
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/label": "^3.7.21",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/slider": "^3.7.1",
- "@react-types/shared": "^3.32.0",
- "@react-types/slider": "^3.8.1",
+ "version": "3.8.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/slider/-/slider-3.8.2.tgz",
+ "integrity": "sha512-6KyUGaVzRE4xAz1LKHbNh1q5wzxe58pdTHFSnxNe6nk1SCoHw7NfI4h2s2m6LgJ0megFxsT0Ir8aHaFyyxmbgg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/slider": "^3.7.2",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/slider": "^3.8.2",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4004,15 +3894,16 @@
}
},
"node_modules/@react-aria/spinbutton": {
- "version": "3.6.18",
- "resolved": "https://registry.npmjs.org/@react-aria/spinbutton/-/spinbutton-3.6.18.tgz",
- "integrity": "sha512-dnmh7sNsprhYTpqCJhcuc9QJ9C/IG/o9TkgW5a9qcd2vS+dzEgqAiJKIMbJFG9kiJymv2NwIPysF12IWix+J3A==",
+ "version": "3.6.19",
+ "resolved": "https://registry.npmjs.org/@react-aria/spinbutton/-/spinbutton-3.6.19.tgz",
+ "integrity": "sha512-xOIXegDpts9t3RSHdIN0iYQpdts0FZ3LbpYJIYVvdEHo9OpDS+ElnDzCGtwZLguvZlwc5s1LAKuKopDUsAEMkw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/i18n": "^3.12.12",
+ "@react-aria/i18n": "^3.12.13",
"@react-aria/live-announcer": "^3.4.4",
- "@react-aria/utils": "^3.30.1",
- "@react-types/button": "^3.14.0",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/button": "^3.14.1",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4036,15 +3927,15 @@
}
},
"node_modules/@react-aria/switch": {
- "version": "3.7.7",
- "resolved": "https://registry.npmjs.org/@react-aria/switch/-/switch-3.7.7.tgz",
- "integrity": "sha512-auV3g1qh+d/AZk7Idw2BOcYeXfCD9iDaiGmlcLJb9Eaz4nkq8vOkQxIXQFrn9Xhb+PfQzmQYKkt5N6P2ZNsw/g==",
+ "version": "3.7.8",
+ "resolved": "https://registry.npmjs.org/@react-aria/switch/-/switch-3.7.8.tgz",
+ "integrity": "sha512-AfsUq1/YiuoprhcBUD9vDPyWaigAwctQNW1fMb8dROL+i/12B+Zekj8Ml+jbU69/kIVtfL0Jl7/0Bo9KK3X0xQ==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/toggle": "^3.12.1",
- "@react-stately/toggle": "^3.9.1",
- "@react-types/shared": "^3.32.0",
- "@react-types/switch": "^3.5.14",
+ "@react-aria/toggle": "^3.12.2",
+ "@react-stately/toggle": "^3.9.2",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/switch": "^3.5.15",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4053,24 +3944,25 @@
}
},
"node_modules/@react-aria/table": {
- "version": "3.17.7",
- "resolved": "https://registry.npmjs.org/@react-aria/table/-/table-3.17.7.tgz",
- "integrity": "sha512-FxXryGTxePgh8plIxlOMwXdleGWjK52vsmbRoqz66lTIHMUMLTmmm+Y0V3lBOIoaW1rxvKcolYgS79ROnbDYBw==",
- "dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/grid": "^3.14.4",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
+ "version": "3.17.8",
+ "resolved": "https://registry.npmjs.org/@react-aria/table/-/table-3.17.8.tgz",
+ "integrity": "sha512-bXiZoxTMbsqUJsYDhHPzKc3jw0HFJ/xMsJ49a0f7mp5r9zACxNLeIU0wJ4Uvx37dnYOHKzGliG+rj5l4sph7MA==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/grid": "^3.14.5",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
"@react-aria/live-announcer": "^3.4.4",
- "@react-aria/utils": "^3.30.1",
- "@react-aria/visually-hidden": "^3.8.27",
- "@react-stately/collections": "^3.12.7",
+ "@react-aria/utils": "^3.31.0",
+ "@react-aria/visually-hidden": "^3.8.28",
+ "@react-stately/collections": "^3.12.8",
"@react-stately/flags": "^3.1.2",
- "@react-stately/table": "^3.15.0",
- "@react-types/checkbox": "^3.10.1",
- "@react-types/grid": "^3.3.5",
- "@react-types/shared": "^3.32.0",
- "@react-types/table": "^3.13.3",
+ "@react-stately/table": "^3.15.1",
+ "@react-types/checkbox": "^3.10.2",
+ "@react-types/grid": "^3.3.6",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/table": "^3.13.4",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4079,17 +3971,18 @@
}
},
"node_modules/@react-aria/tabs": {
- "version": "3.10.7",
- "resolved": "https://registry.npmjs.org/@react-aria/tabs/-/tabs-3.10.7.tgz",
- "integrity": "sha512-iA1M6H+N+9GggsEy/6MmxpMpeOocwYgFy2EoEl3it24RVccY6iZT4AweJq96s5IYga5PILpn7VVcpssvhkPgeA==",
- "dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/selection": "^3.25.1",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/tabs": "^3.8.5",
- "@react-types/shared": "^3.32.0",
- "@react-types/tabs": "^3.3.18",
+ "version": "3.10.8",
+ "resolved": "https://registry.npmjs.org/@react-aria/tabs/-/tabs-3.10.8.tgz",
+ "integrity": "sha512-sPPJyTyoAqsBh76JinBAxStOcbjZvyWFYKpJ9Uqw+XT0ObshAPPFSGeh8DiQemPs02RwJdrfARPMhyqiX8t59A==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/selection": "^3.26.0",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/tabs": "^3.8.6",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/tabs": "^3.3.19",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4098,18 +3991,19 @@
}
},
"node_modules/@react-aria/textfield": {
- "version": "3.18.1",
- "resolved": "https://registry.npmjs.org/@react-aria/textfield/-/textfield-3.18.1.tgz",
- "integrity": "sha512-8yCoirnQzbbQgdk5J5bqimEu3GhHZ9FXeMHez1OF+H+lpTwyTYQ9XgioEN3HKnVUBNEufG4lYkQMxTKJdq1v9g==",
- "dependencies": {
- "@react-aria/form": "^3.1.1",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/label": "^3.7.21",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/form": "^3.2.1",
+ "version": "3.18.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/textfield/-/textfield-3.18.2.tgz",
+ "integrity": "sha512-G+lM8VYSor6g9Yptc6hLZ6BF+0cq0pYol1z6wdQUQgJN8tg4HPtzq75lsZtlCSIznL3amgRAxJtd0dUrsAnvaQ==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-aria/form": "^3.1.2",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/label": "^3.7.22",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/form": "^3.2.2",
"@react-stately/utils": "^3.10.8",
- "@react-types/shared": "^3.32.0",
- "@react-types/textfield": "^3.12.5",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/textfield": "^3.12.6",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4118,17 +4012,18 @@
}
},
"node_modules/@react-aria/toast": {
- "version": "3.0.7",
- "resolved": "https://registry.npmjs.org/@react-aria/toast/-/toast-3.0.7.tgz",
- "integrity": "sha512-nuxPQ7wcSTg9UNMhXl9Uwyc5you/D1RfwymI3VDa5OGTZdJOmV2j94nyjBfMO2168EYMZjw+wEovvOZphs2Pbw==",
+ "version": "3.0.8",
+ "resolved": "https://registry.npmjs.org/@react-aria/toast/-/toast-3.0.8.tgz",
+ "integrity": "sha512-rfJIms6AkMyQ7ZgKrMZgGfPwGcB/t1JoEwbc1PAmXcAvFI/hzF6YF7ZFDXiq38ucFsP9PnHmbXIzM9w4ccl18A==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/landmark": "^3.0.6",
- "@react-aria/utils": "^3.30.1",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/landmark": "^3.0.7",
+ "@react-aria/utils": "^3.31.0",
"@react-stately/toast": "^3.1.2",
- "@react-types/button": "^3.14.0",
- "@react-types/shared": "^3.32.0",
+ "@react-types/button": "^3.14.1",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4137,16 +4032,16 @@
}
},
"node_modules/@react-aria/toggle": {
- "version": "3.12.1",
- "resolved": "https://registry.npmjs.org/@react-aria/toggle/-/toggle-3.12.1.tgz",
- "integrity": "sha512-XaFiRs1KEcIT6bTtVY/KTQxw4kinemj/UwXw2iJTu9XS43hhJ/9cvj8KzNGrKGqaxTpOYj62TnSHZbSiFViHDA==",
+ "version": "3.12.2",
+ "resolved": "https://registry.npmjs.org/@react-aria/toggle/-/toggle-3.12.2.tgz",
+ "integrity": "sha512-g25XLYqJuJpt0/YoYz2Rab8ax+hBfbssllcEFh0v0jiwfk2gwTWfRU9KAZUvxIqbV8Nm8EBmrYychDpDcvW1kw==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/toggle": "^3.9.1",
- "@react-types/checkbox": "^3.10.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/toggle": "^3.9.2",
+ "@react-types/checkbox": "^3.10.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4155,14 +4050,15 @@
}
},
"node_modules/@react-aria/toolbar": {
- "version": "3.0.0-beta.20",
- "resolved": "https://registry.npmjs.org/@react-aria/toolbar/-/toolbar-3.0.0-beta.20.tgz",
- "integrity": "sha512-Kxvqw+TpVOE/eSi8RAQ9xjBQ2uXe8KkRvlRNQWQsrzkZDkXhzqGfQuJnBmozFxqpzSLwaVqQajHFUSvPAScT8Q==",
+ "version": "3.0.0-beta.21",
+ "resolved": "https://registry.npmjs.org/@react-aria/toolbar/-/toolbar-3.0.0-beta.21.tgz",
+ "integrity": "sha512-yRCk/GD8g+BhdDgxd3I0a0c8Ni4Wyo6ERzfSoBkPkwQ4X2E2nkopmraM9D0fXw4UcIr4bnmvADzkHXtBN0XrBg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/focus": "^3.21.1",
- "@react-aria/i18n": "^3.12.12",
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/focus": "^3.21.2",
+ "@react-aria/i18n": "^3.12.13",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4171,15 +4067,16 @@
}
},
"node_modules/@react-aria/tooltip": {
- "version": "3.8.7",
- "resolved": "https://registry.npmjs.org/@react-aria/tooltip/-/tooltip-3.8.7.tgz",
- "integrity": "sha512-Aj7DPJYGZ9/+2ZfhkvbN7YMeA5qu4oy4LVQiMCpqNwcFzvhTAVhN7J7cS6KjA64fhd1shKm3BZ693Ez6lSpqwg==",
+ "version": "3.8.8",
+ "resolved": "https://registry.npmjs.org/@react-aria/tooltip/-/tooltip-3.8.8.tgz",
+ "integrity": "sha512-CmHUqtXtFWmG4AHMEr9hIVex+oscK6xcM2V47gq9ijNInxe3M6UBu/dBdkgGP/jYv9N7tzCAjTR8nNIHQXwvWw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-stately/tooltip": "^3.5.7",
- "@react-types/shared": "^3.32.0",
- "@react-types/tooltip": "^3.4.20",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-stately/tooltip": "^3.5.8",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/tooltip": "^3.4.21",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4188,15 +4085,15 @@
}
},
"node_modules/@react-aria/utils": {
- "version": "3.30.1",
- "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.30.1.tgz",
- "integrity": "sha512-zETcbDd6Vf9GbLndO6RiWJadIZsBU2MMm23rBACXLmpRztkrIqPEb2RVdlLaq1+GklDx0Ii6PfveVjx+8S5U6A==",
+ "version": "3.31.0",
+ "resolved": "https://registry.npmjs.org/@react-aria/utils/-/utils-3.31.0.tgz",
+ "integrity": "sha512-ABOzCsZrWzf78ysswmguJbx3McQUja7yeGj6/vZo4JVsZNlxAN+E9rs381ExBRI0KzVo6iBTeX5De8eMZPJXig==",
"license": "Apache-2.0",
"dependencies": {
"@react-aria/ssr": "^3.9.10",
"@react-stately/flags": "^3.1.2",
"@react-stately/utils": "^3.10.8",
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0",
"clsx": "^2.0.0"
},
@@ -4206,14 +4103,14 @@
}
},
"node_modules/@react-aria/visually-hidden": {
- "version": "3.8.27",
- "resolved": "https://registry.npmjs.org/@react-aria/visually-hidden/-/visually-hidden-3.8.27.tgz",
- "integrity": "sha512-hD1DbL3WnjPnCdlQjwe19bQVRAGJyN0Aaup+s7NNtvZUn7AjoEH78jo8TE+L8yM7z/OZUQF26laCfYqeIwWn4g==",
+ "version": "3.8.28",
+ "resolved": "https://registry.npmjs.org/@react-aria/visually-hidden/-/visually-hidden-3.8.28.tgz",
+ "integrity": "sha512-KRRjbVVob2CeBidF24dzufMxBveEUtUu7IM+hpdZKB+gxVROoh4XRLPv9SFmaH89Z7D9To3QoykVZoWD0lan6Q==",
"license": "Apache-2.0",
"dependencies": {
- "@react-aria/interactions": "^3.25.5",
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-aria/interactions": "^3.25.6",
+ "@react-aria/utils": "^3.31.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4222,9 +4119,9 @@
}
},
"node_modules/@react-router/dev": {
- "version": "7.9.3",
- "resolved": "https://registry.npmjs.org/@react-router/dev/-/dev-7.9.3.tgz",
- "integrity": "sha512-oPaO+OpvCo/rNTJrRipHSp31/K4It19PE5A24x21FlYlemPTe3fbGX/kyC2+8au/abXbvzNHfRbuIBD/rfojmA==",
+ "version": "7.10.1",
+ "resolved": "https://registry.npmjs.org/@react-router/dev/-/dev-7.10.1.tgz",
+ "integrity": "sha512-kap9O8rTN6b3vxjd+0SGjhm5vqiAZHMmOX1Hc7Y4KXRVVdusn+0+hxs44cDSfbW6Z6fCLw6GXXe0Kr+DJIRezw==",
"dev": true,
"dependencies": {
"@babel/core": "^7.27.7",
@@ -4234,8 +4131,7 @@
"@babel/preset-typescript": "^7.27.1",
"@babel/traverse": "^7.27.7",
"@babel/types": "^7.27.7",
- "@npmcli/package-json": "^4.0.1",
- "@react-router/node": "7.9.3",
+ "@react-router/node": "7.10.1",
"@remix-run/node-fetch-server": "^0.9.0",
"arg": "^5.0.1",
"babel-dead-code-elimination": "^1.0.6",
@@ -4246,13 +4142,15 @@
"isbot": "^5.1.11",
"jsesc": "3.0.2",
"lodash": "^4.17.21",
+ "p-map": "^7.0.3",
"pathe": "^1.1.2",
"picocolors": "^1.1.1",
+ "pkg-types": "^2.3.0",
"prettier": "^3.6.2",
"react-refresh": "^0.14.0",
"semver": "^7.3.7",
"tinyglobby": "^0.2.14",
- "valibot": "^0.41.0",
+ "valibot": "^1.2.0",
"vite-node": "^3.2.2"
},
"bin": {
@@ -4262,9 +4160,9 @@
"node": ">=20.0.0"
},
"peerDependencies": {
- "@react-router/serve": "^7.9.3",
+ "@react-router/serve": "^7.10.1",
"@vitejs/plugin-rsc": "*",
- "react-router": "^7.9.3",
+ "react-router": "^7.10.1",
"typescript": "^5.1.0",
"vite": "^5.1.0 || ^6.0.0 || ^7.0.0",
"wrangler": "^3.28.2 || ^4.0.0"
@@ -4298,9 +4196,9 @@
}
},
"node_modules/@react-router/node": {
- "version": "7.9.3",
- "resolved": "https://registry.npmjs.org/@react-router/node/-/node-7.9.3.tgz",
- "integrity": "sha512-+OvWxPPUgouOshw85QlG0J6yFJM0GMCCpXqPj38IcveeFLlP7ppOAEkOi7RBFrDvg7vSUtCEBDnsbuDCvxUPJg==",
+ "version": "7.10.1",
+ "resolved": "https://registry.npmjs.org/@react-router/node/-/node-7.10.1.tgz",
+ "integrity": "sha512-RLmjlR1zQu+ve8ibI0lu91pJrXGcmfkvsrQl7z/eTc5V5FZgl0OvQVWL5JDWBlBZyzdLMQQekUOX5WcPhCP1FQ==",
"dependencies": {
"@mjackson/node-fetch-server": "^0.2.0"
},
@@ -4308,7 +4206,7 @@
"node": ">=20.0.0"
},
"peerDependencies": {
- "react-router": "7.9.3",
+ "react-router": "7.10.1",
"typescript": "^5.1.0"
},
"peerDependenciesMeta": {
@@ -4318,13 +4216,13 @@
}
},
"node_modules/@react-router/serve": {
- "version": "7.9.3",
- "resolved": "https://registry.npmjs.org/@react-router/serve/-/serve-7.9.3.tgz",
- "integrity": "sha512-wtiDLo4sY3ouADXPm1xa4eg79zRXP517E0QcuBKPfoKh/40IcANTqN11VeEKNA9QgNxLeCm4CSY3dPbqePuwkA==",
+ "version": "7.10.1",
+ "resolved": "https://registry.npmjs.org/@react-router/serve/-/serve-7.10.1.tgz",
+ "integrity": "sha512-qYco7sFpbRgoKJKsCgJmFBQwaLVsLv255K8vbPodnXe13YBEzV/ugIqRCYVz2hghvlPiEKgaHh2On0s/5npn6w==",
"dependencies": {
"@mjackson/node-fetch-server": "^0.2.0",
- "@react-router/express": "7.9.3",
- "@react-router/node": "7.9.3",
+ "@react-router/express": "7.10.1",
+ "@react-router/node": "7.10.1",
"compression": "^1.7.4",
"express": "^4.19.2",
"get-port": "5.1.1",
@@ -4338,22 +4236,22 @@
"node": ">=20.0.0"
},
"peerDependencies": {
- "react-router": "7.9.3"
+ "react-router": "7.10.1"
}
},
"node_modules/@react-router/serve/node_modules/@react-router/express": {
- "version": "7.9.3",
- "resolved": "https://registry.npmjs.org/@react-router/express/-/express-7.9.3.tgz",
- "integrity": "sha512-XNVj/8AfecE1n61bXD41LqpXAixyWBpmBWkrzVA2iG+SrQOb+J6TjqZYEmZmoqJHuHmkOjt6/Iz1f81p93peGQ==",
+ "version": "7.10.1",
+ "resolved": "https://registry.npmjs.org/@react-router/express/-/express-7.10.1.tgz",
+ "integrity": "sha512-O7xjg6wWHfrsnPyVWgQG+tCamIE09SqLqtHwa1tAFzKPjcDpCw4S4+/OkJvNXLtBL60H3VhZ1r2OQgXBgGOMpw==",
"dependencies": {
- "@react-router/node": "7.9.3"
+ "@react-router/node": "7.10.1"
},
"engines": {
"node": ">=20.0.0"
},
"peerDependencies": {
"express": "^4.17.1 || ^5",
- "react-router": "7.9.3",
+ "react-router": "7.10.1",
"typescript": "^5.1.0"
},
"peerDependenciesMeta": {
@@ -4363,14 +4261,15 @@
}
},
"node_modules/@react-stately/calendar": {
- "version": "3.8.4",
- "resolved": "https://registry.npmjs.org/@react-stately/calendar/-/calendar-3.8.4.tgz",
- "integrity": "sha512-q9mq0ydOLS5vJoHLnYfSCS/vppfjbg0XHJlAoPR+w+WpYZF4wPP453SrlX9T1DbxCEYFTpcxcMk/O8SDW3miAw==",
+ "version": "3.9.0",
+ "resolved": "https://registry.npmjs.org/@react-stately/calendar/-/calendar-3.9.0.tgz",
+ "integrity": "sha512-U5Nf2kx9gDhJRxdDUm5gjfyUlt/uUfOvM1vDW2UA62cA6+2k2cavMLc2wNlXOb/twFtl6p0joYKHG7T4xnEFkg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
+ "@internationalized/date": "^3.10.0",
"@react-stately/utils": "^3.10.8",
- "@react-types/calendar": "^3.7.4",
- "@react-types/shared": "^3.32.0",
+ "@react-types/calendar": "^3.8.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4378,14 +4277,15 @@
}
},
"node_modules/@react-stately/checkbox": {
- "version": "3.7.1",
- "resolved": "https://registry.npmjs.org/@react-stately/checkbox/-/checkbox-3.7.1.tgz",
- "integrity": "sha512-ezfKRJsDuRCLtNoNOi9JXCp6PjffZWLZ/vENW/gbRDL8i46RKC/HpfJrJhvTPmsLYazxPC99Me9iq3v0VoNCsw==",
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/checkbox/-/checkbox-3.7.2.tgz",
+ "integrity": "sha512-j1ycUVz5JmqhaL6mDZgDNZqBilOB8PBW096sDPFaTtuYreDx2HOd1igxiIvwlvPESZwsJP7FVM3mYnaoXtpKPA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/form": "^3.2.1",
+ "@react-stately/form": "^3.2.2",
"@react-stately/utils": "^3.10.8",
- "@react-types/checkbox": "^3.10.1",
- "@react-types/shared": "^3.32.0",
+ "@react-types/checkbox": "^3.10.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4393,11 +4293,12 @@
}
},
"node_modules/@react-stately/collections": {
- "version": "3.12.7",
- "resolved": "https://registry.npmjs.org/@react-stately/collections/-/collections-3.12.7.tgz",
- "integrity": "sha512-0kQc0mI986GOCQHvRy4L0JQiotIK/KmEhR9Mu/6V0GoSdqg5QeUe4kyoNWj3bl03uQXme80v0L2jLHt+fOHHjA==",
+ "version": "3.12.8",
+ "resolved": "https://registry.npmjs.org/@react-stately/collections/-/collections-3.12.8.tgz",
+ "integrity": "sha512-AceJYLLXt1Y2XIcOPi6LEJSs4G/ubeYW3LqOCQbhfIgMaNqKfQMIfagDnPeJX9FVmPFSlgoCBxb1pTJW2vjCAQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4405,18 +4306,18 @@
}
},
"node_modules/@react-stately/combobox": {
- "version": "3.11.1",
- "resolved": "https://registry.npmjs.org/@react-stately/combobox/-/combobox-3.11.1.tgz",
- "integrity": "sha512-ZZh+SaAmddoY+MeJr470oDYA0nGaJm4xoHCBapaBA0JNakGC/wTzF/IRz3tKQT2VYK4rumr1BJLZQydGp7zzeg==",
- "dependencies": {
- "@react-stately/collections": "^3.12.7",
- "@react-stately/form": "^3.2.1",
- "@react-stately/list": "^3.13.0",
- "@react-stately/overlays": "^3.6.19",
- "@react-stately/select": "^3.7.1",
+ "version": "3.12.0",
+ "resolved": "https://registry.npmjs.org/@react-stately/combobox/-/combobox-3.12.0.tgz",
+ "integrity": "sha512-A6q9R/7cEa/qoQsBkdslXWvD7ztNLLQ9AhBhVN9QvzrmrH5B4ymUwcTU8lWl22ykH7RRwfonLeLXJL4C+/L2oQ==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/form": "^3.2.2",
+ "@react-stately/list": "^3.13.1",
+ "@react-stately/overlays": "^3.6.20",
"@react-stately/utils": "^3.10.8",
- "@react-types/combobox": "^3.13.8",
- "@react-types/shared": "^3.32.0",
+ "@react-types/combobox": "^3.13.9",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4424,17 +4325,18 @@
}
},
"node_modules/@react-stately/datepicker": {
- "version": "3.15.1",
- "resolved": "https://registry.npmjs.org/@react-stately/datepicker/-/datepicker-3.15.1.tgz",
- "integrity": "sha512-t64iYPms9y+MEQgOAu0XUHccbEXWVUWBHJWnYvAmILCHY8ZAOeSPAT1g4v9nzyiApcflSNXgpsvbs9BBEsrWww==",
+ "version": "3.15.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/datepicker/-/datepicker-3.15.2.tgz",
+ "integrity": "sha512-S5GL+W37chvV8knv9v0JRv0L6hKo732qqabCCHXzOpYxkLIkV4f/y3cHdEzFWzpZ0O0Gkg7WgeYo160xOdBKYg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
+ "@internationalized/date": "^3.10.0",
"@internationalized/string": "^3.2.7",
- "@react-stately/form": "^3.2.1",
- "@react-stately/overlays": "^3.6.19",
+ "@react-stately/form": "^3.2.2",
+ "@react-stately/overlays": "^3.6.20",
"@react-stately/utils": "^3.10.8",
- "@react-types/datepicker": "^3.13.1",
- "@react-types/shared": "^3.32.0",
+ "@react-types/datepicker": "^3.13.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4451,11 +4353,12 @@
}
},
"node_modules/@react-stately/form": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/@react-stately/form/-/form-3.2.1.tgz",
- "integrity": "sha512-btgOPXkwvd6fdWKoepy5Ue43o2932OSkQxozsR7US1ffFLcQc3SNlADHaRChIXSG8ffPo9t0/Sl4eRzaKu3RgQ==",
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/form/-/form-3.2.2.tgz",
+ "integrity": "sha512-soAheOd7oaTO6eNs6LXnfn0tTqvOoe3zN9FvtIhhrErKz9XPc5sUmh3QWwR45+zKbitOi1HOjfA/gifKhZcfWw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4463,14 +4366,15 @@
}
},
"node_modules/@react-stately/grid": {
- "version": "3.11.5",
- "resolved": "https://registry.npmjs.org/@react-stately/grid/-/grid-3.11.5.tgz",
- "integrity": "sha512-4cNjGYaNkcVS2wZoNHUrMRICBpkHStYw57EVemP7MjiWEVu53kzPgR1Iwmti2WFCpi1Lwu0qWNeCfzKpXW4BTg==",
+ "version": "3.11.6",
+ "resolved": "https://registry.npmjs.org/@react-stately/grid/-/grid-3.11.6.tgz",
+ "integrity": "sha512-vWPAkzpeTIsrurHfMubzMuqEw7vKzFhIJeEK5sEcLunyr1rlADwTzeWrHNbPMl66NAIAi70Dr1yNq+kahQyvMA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/collections": "^3.12.7",
- "@react-stately/selection": "^3.20.5",
- "@react-types/grid": "^3.3.5",
- "@react-types/shared": "^3.32.0",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/selection": "^3.20.6",
+ "@react-types/grid": "^3.3.6",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4478,14 +4382,15 @@
}
},
"node_modules/@react-stately/list": {
- "version": "3.13.0",
- "resolved": "https://registry.npmjs.org/@react-stately/list/-/list-3.13.0.tgz",
- "integrity": "sha512-Panv8TmaY8lAl3R7CRhyUadhf2yid6VKsRDBCBB1FHQOOeL7lqIraz/oskvpabZincuaIUWqQhqYslC4a6dvuA==",
+ "version": "3.13.1",
+ "resolved": "https://registry.npmjs.org/@react-stately/list/-/list-3.13.1.tgz",
+ "integrity": "sha512-eHaoauh21twbcl0kkwULhVJ+CzYcy1jUjMikNVMHOQdhr4WIBdExf7PmSgKHKqsSPhpGg6IpTCY2dUX3RycjDg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/collections": "^3.12.7",
- "@react-stately/selection": "^3.20.5",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/selection": "^3.20.6",
"@react-stately/utils": "^3.10.8",
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4493,13 +4398,14 @@
}
},
"node_modules/@react-stately/menu": {
- "version": "3.9.7",
- "resolved": "https://registry.npmjs.org/@react-stately/menu/-/menu-3.9.7.tgz",
- "integrity": "sha512-mfz1YoCgtje61AGxVdQaAFLlOXt9vV5dd1lQljYUPRafA/qu5Ursz4fNVlcavWW9GscebzFQErx+y0oSP7EUtQ==",
+ "version": "3.9.8",
+ "resolved": "https://registry.npmjs.org/@react-stately/menu/-/menu-3.9.8.tgz",
+ "integrity": "sha512-bo0NOhofnTHLESiYfsSSw6gyXiPVJJ0UlN2igUXtJk5PmyhWjFzUzTzcnd7B028OB0si9w3LIWM3stqz5271Eg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/overlays": "^3.6.19",
- "@react-types/menu": "^3.10.4",
- "@react-types/shared": "^3.32.0",
+ "@react-stately/overlays": "^3.6.20",
+ "@react-types/menu": "^3.10.5",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4507,14 +4413,15 @@
}
},
"node_modules/@react-stately/numberfield": {
- "version": "3.10.1",
- "resolved": "https://registry.npmjs.org/@react-stately/numberfield/-/numberfield-3.10.1.tgz",
- "integrity": "sha512-lXABmcTneVvXYMGTgZvTCr4E+upOi7VRLL50ZzTMJqHwB/qlEQPAam3dmddQRwIsuCM3MEnL7bSZFFlSYAtkEw==",
+ "version": "3.10.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/numberfield/-/numberfield-3.10.2.tgz",
+ "integrity": "sha512-jlKVFYaH3RX5KvQ7a+SAMQuPccZCzxLkeYkBE64u1Zvi7YhJ8hkTMHG/fmZMbk1rHlseE2wfBdk0Rlya3MvoNQ==",
+ "license": "Apache-2.0",
"dependencies": {
"@internationalized/number": "^3.6.5",
- "@react-stately/form": "^3.2.1",
+ "@react-stately/form": "^3.2.2",
"@react-stately/utils": "^3.10.8",
- "@react-types/numberfield": "^3.8.14",
+ "@react-types/numberfield": "^3.8.15",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4522,13 +4429,13 @@
}
},
"node_modules/@react-stately/overlays": {
- "version": "3.6.19",
- "resolved": "https://registry.npmjs.org/@react-stately/overlays/-/overlays-3.6.19.tgz",
- "integrity": "sha512-swZXfDvxTYd7tKEpijEHBFFaEmbbnCvEhGlmrAz4K72cuRR9O5u+lcla8y1veGBbBSzrIdKNdBoIIJ+qQH+1TQ==",
+ "version": "3.6.20",
+ "resolved": "https://registry.npmjs.org/@react-stately/overlays/-/overlays-3.6.20.tgz",
+ "integrity": "sha512-YAIe+uI8GUXX8F/0Pzr53YeC5c/bjqbzDFlV8NKfdlCPa6+Jp4B/IlYVjIooBj9+94QvbQdjylegvYWK/iPwlg==",
"license": "Apache-2.0",
"dependencies": {
"@react-stately/utils": "^3.10.8",
- "@react-types/overlays": "^3.9.1",
+ "@react-types/overlays": "^3.9.2",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4536,30 +4443,15 @@
}
},
"node_modules/@react-stately/radio": {
- "version": "3.11.1",
- "resolved": "https://registry.npmjs.org/@react-stately/radio/-/radio-3.11.1.tgz",
- "integrity": "sha512-ld9KWztI64gssg7zSZi9li21sG85Exb+wFPXtCim1TtpnEpmRtB05pXDDS3xkkIU/qOL4eMEnnLO7xlNm0CRIA==",
+ "version": "3.11.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/radio/-/radio-3.11.2.tgz",
+ "integrity": "sha512-UM7L6AW+k8edhSBUEPZAqiWNRNadfOKK7BrCXyBiG79zTz0zPcXRR+N+gzkDn7EMSawDeyK1SHYUuoSltTactg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/form": "^3.2.1",
+ "@react-stately/form": "^3.2.2",
"@react-stately/utils": "^3.10.8",
- "@react-types/radio": "^3.9.1",
- "@react-types/shared": "^3.32.0",
- "@swc/helpers": "^0.5.0"
- },
- "peerDependencies": {
- "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
- }
- },
- "node_modules/@react-stately/select": {
- "version": "3.7.1",
- "resolved": "https://registry.npmjs.org/@react-stately/select/-/select-3.7.1.tgz",
- "integrity": "sha512-vZt4j9yVyOTWWJoP9plXmYaPZH2uMxbjcGMDbiShwsFiK8C2m9b3Cvy44TZehfzCWzpMVR/DYxEYuonEIGA82Q==",
- "dependencies": {
- "@react-stately/form": "^3.2.1",
- "@react-stately/list": "^3.13.0",
- "@react-stately/overlays": "^3.6.19",
- "@react-types/select": "^3.10.1",
- "@react-types/shared": "^3.32.0",
+ "@react-types/radio": "^3.9.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4567,13 +4459,14 @@
}
},
"node_modules/@react-stately/selection": {
- "version": "3.20.5",
- "resolved": "https://registry.npmjs.org/@react-stately/selection/-/selection-3.20.5.tgz",
- "integrity": "sha512-YezWUNEn2pz5mQlbhmngiX9HqQsruLSXlkrAzB1DD6aliGrUvPKufTTGCixOaB8KVeCamdiFAgx1WomNplzdQA==",
+ "version": "3.20.6",
+ "resolved": "https://registry.npmjs.org/@react-stately/selection/-/selection-3.20.6.tgz",
+ "integrity": "sha512-a0bjuP2pJYPKEiedz2Us1W1aSz0iHRuyeQEdBOyL6Z6VUa6hIMq9H60kvseir2T85cOa4QggizuRV7mcO6bU5w==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/collections": "^3.12.7",
+ "@react-stately/collections": "^3.12.8",
"@react-stately/utils": "^3.10.8",
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4581,13 +4474,14 @@
}
},
"node_modules/@react-stately/slider": {
- "version": "3.7.1",
- "resolved": "https://registry.npmjs.org/@react-stately/slider/-/slider-3.7.1.tgz",
- "integrity": "sha512-J+G18m1bZBCNQSXhxGd4GNGDUVonv4Sg7fZL+uLhXUy1x71xeJfFdKaviVvZcggtl0/q5InW41PXho7EouMDEg==",
+ "version": "3.7.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/slider/-/slider-3.7.2.tgz",
+ "integrity": "sha512-EVBHUdUYwj++XqAEiQg2fGi8Reccznba0uyQ3gPejF0pAc390Q/J5aqiTEDfiCM7uJ6WHxTM6lcCqHQBISk2dQ==",
+ "license": "Apache-2.0",
"dependencies": {
"@react-stately/utils": "^3.10.8",
- "@react-types/shared": "^3.32.0",
- "@react-types/slider": "^3.8.1",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/slider": "^3.8.2",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4595,18 +4489,19 @@
}
},
"node_modules/@react-stately/table": {
- "version": "3.15.0",
- "resolved": "https://registry.npmjs.org/@react-stately/table/-/table-3.15.0.tgz",
- "integrity": "sha512-KbvkrVF3sb25IPwyte9JcG5/4J7TgjHSsw7D61d/T/oUFMYPYVeolW9/2y+6u48WPkDJE8HJsurme+HbTN0FQA==",
+ "version": "3.15.1",
+ "resolved": "https://registry.npmjs.org/@react-stately/table/-/table-3.15.1.tgz",
+ "integrity": "sha512-MhMAgE/LgAzHcAn1P3p/nQErzJ6DiixSJ1AOt2JlnAKEb5YJg4ATKWCb2IjBLwywt9ZCzfm3KMUzkctZqAoxwA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/collections": "^3.12.7",
+ "@react-stately/collections": "^3.12.8",
"@react-stately/flags": "^3.1.2",
- "@react-stately/grid": "^3.11.5",
- "@react-stately/selection": "^3.20.5",
+ "@react-stately/grid": "^3.11.6",
+ "@react-stately/selection": "^3.20.6",
"@react-stately/utils": "^3.10.8",
- "@react-types/grid": "^3.3.5",
- "@react-types/shared": "^3.32.0",
- "@react-types/table": "^3.13.3",
+ "@react-types/grid": "^3.3.6",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/table": "^3.13.4",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4614,13 +4509,14 @@
}
},
"node_modules/@react-stately/tabs": {
- "version": "3.8.5",
- "resolved": "https://registry.npmjs.org/@react-stately/tabs/-/tabs-3.8.5.tgz",
- "integrity": "sha512-gdeI+NUH3hfqrxkJQSZkt+Zw4G2DrYJRloq/SGxu/9Bu5QD/U0psU2uqxQNtavW5qTChFK+D30rCPXpKlslWAA==",
+ "version": "3.8.6",
+ "resolved": "https://registry.npmjs.org/@react-stately/tabs/-/tabs-3.8.6.tgz",
+ "integrity": "sha512-9RYxmgjVIxUpIsGKPIF7uRoHWOEz8muwaYiStCVeyiYBPmarvZoIYtTXcwSMN/vEs7heVN5uGCL6/bfdY4+WiA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/list": "^3.13.0",
- "@react-types/shared": "^3.32.0",
- "@react-types/tabs": "^3.3.18",
+ "@react-stately/list": "^3.13.1",
+ "@react-types/shared": "^3.32.1",
+ "@react-types/tabs": "^3.3.19",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4631,6 +4527,7 @@
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/@react-stately/toast/-/toast-3.1.2.tgz",
"integrity": "sha512-HiInm7bck32khFBHZThTQaAF6e6/qm57F4mYRWdTq8IVeGDzpkbUYibnLxRhk0UZ5ybc6me+nqqPkG/lVmM42Q==",
+ "license": "Apache-2.0",
"dependencies": {
"@swc/helpers": "^0.5.0",
"use-sync-external-store": "^1.4.0"
@@ -4640,14 +4537,14 @@
}
},
"node_modules/@react-stately/toggle": {
- "version": "3.9.1",
- "resolved": "https://registry.npmjs.org/@react-stately/toggle/-/toggle-3.9.1.tgz",
- "integrity": "sha512-L6yUdE8xZfQhw4aEFZduF8u4v0VrpYrwWEA4Tu/4qwGIPukH0wd2W21Zpw+vAiLOaDKnxel1nXX68MWnm4QXpw==",
+ "version": "3.9.2",
+ "resolved": "https://registry.npmjs.org/@react-stately/toggle/-/toggle-3.9.2.tgz",
+ "integrity": "sha512-dOxs9wrVXHUmA7lc8l+N9NbTJMAaXcYsnNGsMwfXIXQ3rdq+IjWGNYJ52UmNQyRYFcg0jrzRrU16TyGbNjOdNQ==",
"license": "Apache-2.0",
"dependencies": {
"@react-stately/utils": "^3.10.8",
- "@react-types/checkbox": "^3.10.1",
- "@react-types/shared": "^3.32.0",
+ "@react-types/checkbox": "^3.10.2",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4655,12 +4552,13 @@
}
},
"node_modules/@react-stately/tooltip": {
- "version": "3.5.7",
- "resolved": "https://registry.npmjs.org/@react-stately/tooltip/-/tooltip-3.5.7.tgz",
- "integrity": "sha512-GYh764BcYZz+Lclyutyir5I3elNo+vVNYzeNOKmPGZCE3p5B+/8lgZAHKxnRc9qmBlxvofnhMcuQxAPlBhoEkw==",
+ "version": "3.5.8",
+ "resolved": "https://registry.npmjs.org/@react-stately/tooltip/-/tooltip-3.5.8.tgz",
+ "integrity": "sha512-gkcUx2ROhCiGNAYd2BaTejakXUUNLPnnoJ5+V/mN480pN+OrO8/2V9pqb/IQmpqxLsso93zkM3A4wFHHLBBmPQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/overlays": "^3.6.19",
- "@react-types/tooltip": "^3.4.20",
+ "@react-stately/overlays": "^3.6.20",
+ "@react-types/tooltip": "^3.4.21",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4668,14 +4566,15 @@
}
},
"node_modules/@react-stately/tree": {
- "version": "3.9.2",
- "resolved": "https://registry.npmjs.org/@react-stately/tree/-/tree-3.9.2.tgz",
- "integrity": "sha512-jsT1WZZhb7GRmg1iqoib9bULsilIK5KhbE8WrcfIml8NYr4usP4DJMcIYfRuiRtPLhKtUvHSoZ5CMbinPp8PUQ==",
+ "version": "3.9.3",
+ "resolved": "https://registry.npmjs.org/@react-stately/tree/-/tree-3.9.3.tgz",
+ "integrity": "sha512-ZngG79nLFxE/GYmpwX6E/Rma2MMkzdoJPRI3iWk3dgqnGMMzpPnUp/cvjDsU3UHF7xDVusC5BT6pjWN0uxCIFQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-stately/collections": "^3.12.7",
- "@react-stately/selection": "^3.20.5",
+ "@react-stately/collections": "^3.12.8",
+ "@react-stately/selection": "^3.20.6",
"@react-stately/utils": "^3.10.8",
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4695,12 +4594,12 @@
}
},
"node_modules/@react-stately/virtualizer": {
- "version": "4.4.3",
- "resolved": "https://registry.npmjs.org/@react-stately/virtualizer/-/virtualizer-4.4.3.tgz",
- "integrity": "sha512-kk6ZyMtOT51kZYGUjUhbgEdRBp/OR3WD+Vj9kFoCa1vbY+fGzbpcnjsvR2LDZuEq8W45ruOvdr1c7HRJG4gWxA==",
+ "version": "4.4.4",
+ "resolved": "https://registry.npmjs.org/@react-stately/virtualizer/-/virtualizer-4.4.4.tgz",
+ "integrity": "sha512-ri8giqXSZOrznZDCCOE4U36wSkOhy+hrFK7yo/YVcpxTqqp3d3eisfKMqbDsgqBW+XTHycTU/xeAf0u9NqrfpQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-aria/utils": "^3.30.1",
- "@react-types/shared": "^3.32.0",
+ "@react-types/shared": "^3.32.1",
"@swc/helpers": "^0.5.0"
},
"peerDependencies": {
@@ -4712,6 +4611,7 @@
"version": "3.0.0-alpha.26",
"resolved": "https://registry.npmjs.org/@react-types/accordion/-/accordion-3.0.0-alpha.26.tgz",
"integrity": "sha512-OXf/kXcD2vFlEnkcZy/GG+a/1xO9BN7Uh3/5/Ceuj9z2E/WwD55YwU3GFM5zzkZ4+DMkdowHnZX37XnmbyD3Mg==",
+ "license": "Apache-2.0",
"dependencies": {
"@react-types/shared": "^3.27.0"
},
@@ -4720,278 +4620,282 @@
}
},
"node_modules/@react-types/breadcrumbs": {
- "version": "3.7.16",
- "resolved": "https://registry.npmjs.org/@react-types/breadcrumbs/-/breadcrumbs-3.7.16.tgz",
- "integrity": "sha512-4J+7b9y6z8QGZqvsBSWQfebx6aIbc+1unQqnZCAlJl9EGzlI6SGdXRsURGkOUGJCV2GqY8bSocc8AZbRXpQ0XQ==",
+ "version": "3.7.17",
+ "resolved": "https://registry.npmjs.org/@react-types/breadcrumbs/-/breadcrumbs-3.7.17.tgz",
+ "integrity": "sha512-IhvVTcfli5o/UDlGACXxjlor2afGlMQA8pNR3faH0bBUay1Fmm3IWktVw9Xwmk+KraV2RTAg9e+E6p8DOQZfiw==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/link": "^3.6.4",
- "@react-types/shared": "^3.32.0"
+ "@react-types/link": "^3.6.5",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/button": {
- "version": "3.14.0",
- "resolved": "https://registry.npmjs.org/@react-types/button/-/button-3.14.0.tgz",
- "integrity": "sha512-pXt1a+ElxiZyWpX0uznyjy5Z6EHhYxPcaXpccZXyn6coUo9jmCbgg14xR7Odo+JcbfaaISzZTDO7oGLVTcHnpA==",
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/@react-types/button/-/button-3.14.1.tgz",
+ "integrity": "sha512-D8C4IEwKB7zEtiWYVJ3WE/5HDcWlze9mLWQ5hfsBfpePyWCgO3bT/+wjb/7pJvcAocrkXo90QrMm85LcpBtrpg==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/calendar": {
- "version": "3.7.4",
- "resolved": "https://registry.npmjs.org/@react-types/calendar/-/calendar-3.7.4.tgz",
- "integrity": "sha512-MZDyXtvdHl8CKQGYBkjYwc4ABBq6Mb4Fu7k/4boQAmMQ5Rtz29ouBCJrAs0BpR14B8ZMGzoNIolxS5RLKBmFSA==",
+ "version": "3.8.0",
+ "resolved": "https://registry.npmjs.org/@react-types/calendar/-/calendar-3.8.0.tgz",
+ "integrity": "sha512-ZDZgfZgbz1ydWOFs1mH7QFfX3ioJrmb3Y/lkoubQE0HWXLZzyYNvhhKyFJRS1QJ40IofLSBHriwbQb/tsUnGlw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
- "@react-types/shared": "^3.32.0"
+ "@internationalized/date": "^3.10.0",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/checkbox": {
- "version": "3.10.1",
- "resolved": "https://registry.npmjs.org/@react-types/checkbox/-/checkbox-3.10.1.tgz",
- "integrity": "sha512-8ZqBoGBxtn6U/znpmyutGtBBaafUzcZnbuvYjwyRSONTrqQ0IhUq6jI/jbnE9r9SslIkbMB8IS1xRh2e63qmEQ==",
+ "version": "3.10.2",
+ "resolved": "https://registry.npmjs.org/@react-types/checkbox/-/checkbox-3.10.2.tgz",
+ "integrity": "sha512-ktPkl6ZfIdGS1tIaGSU/2S5Agf2NvXI9qAgtdMDNva0oLyAZ4RLQb6WecPvofw1J7YKXu0VA5Mu7nlX+FM2weQ==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/combobox": {
- "version": "3.13.8",
- "resolved": "https://registry.npmjs.org/@react-types/combobox/-/combobox-3.13.8.tgz",
- "integrity": "sha512-HGC3X9hmDRsjSZcFiflvJ7vbIgQ2gX/ZDxo1HVtvQqUDbgQCVakCcCdrB44aYgHFnyDiO6hyp7Y7jXtDBaEIIA==",
+ "version": "3.13.9",
+ "resolved": "https://registry.npmjs.org/@react-types/combobox/-/combobox-3.13.9.tgz",
+ "integrity": "sha512-G6GmLbzVkLW6VScxPAr/RtliEyPhBClfYaIllK1IZv+Z42SVnOpKzhnoe79BpmiFqy1AaC3+LjZX783mrsHCwA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/datepicker": {
- "version": "3.13.1",
- "resolved": "https://registry.npmjs.org/@react-types/datepicker/-/datepicker-3.13.1.tgz",
- "integrity": "sha512-ub+g5pS3WOo5P/3FRNsQSwvlb9CuLl2m6v6KBkRXc5xqKhFd7UjvVpL6Oi/1zwwfow4itvD1t7l1XxgCo7wZ6Q==",
+ "version": "3.13.2",
+ "resolved": "https://registry.npmjs.org/@react-types/datepicker/-/datepicker-3.13.2.tgz",
+ "integrity": "sha512-+M6UZxJnejYY8kz0spbY/hP08QJ5rsZ3aNarRQQHc48xV2oelFLX5MhAqizfLEsvyfb0JYrhWoh4z1xZtAmYCg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@internationalized/date": "^3.9.0",
- "@react-types/calendar": "^3.7.4",
- "@react-types/overlays": "^3.9.1",
- "@react-types/shared": "^3.32.0"
+ "@internationalized/date": "^3.10.0",
+ "@react-types/calendar": "^3.8.0",
+ "@react-types/overlays": "^3.9.2",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/dialog": {
- "version": "3.5.21",
- "resolved": "https://registry.npmjs.org/@react-types/dialog/-/dialog-3.5.21.tgz",
- "integrity": "sha512-jF1gN4bvwYamsLjefaFDnaSKxTa3Wtvn5f7WLjNVZ8ICVoiMBMdUJXTlPQHAL4YWqtCj4hK/3uimR1E+Pwd7Xw==",
+ "version": "3.5.22",
+ "resolved": "https://registry.npmjs.org/@react-types/dialog/-/dialog-3.5.22.tgz",
+ "integrity": "sha512-smSvzOcqKE196rWk0oqJDnz+ox5JM5+OT0PmmJXiUD4q7P5g32O6W5Bg7hMIFUI9clBtngo8kLaX2iMg+GqAzg==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/overlays": "^3.9.1",
- "@react-types/shared": "^3.32.0"
+ "@react-types/overlays": "^3.9.2",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/form": {
- "version": "3.7.15",
- "resolved": "https://registry.npmjs.org/@react-types/form/-/form-3.7.15.tgz",
- "integrity": "sha512-a7C1RXgMpHX9b1x/+h5YCOJL/2/Ojw9ErOJhLwUWzKUu5JWpQYf8JsXNsuMSndo4YBaiH/7bXFmg09cllHUmow==",
- "dependencies": {
- "@react-types/shared": "^3.32.0"
+ "version": "3.7.16",
+ "resolved": "https://registry.npmjs.org/@react-types/form/-/form-3.7.16.tgz",
+ "integrity": "sha512-Sb7KJoWEaQ/e4XIY+xRbjKvbP1luome98ZXevpD+zVSyGjEcfIroebizP6K1yMHCWP/043xH6GUkgEqWPoVGjg==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/grid": {
- "version": "3.3.5",
- "resolved": "https://registry.npmjs.org/@react-types/grid/-/grid-3.3.5.tgz",
- "integrity": "sha512-hG6J2KDfmOHitkWoCa/9DvY1nTO2wgMIApcFoqLv7AWJr9CzvVqo5tIhZZCXiT1AvU2kafJxu9e7sr5GxAT2YA==",
+ "version": "3.3.6",
+ "resolved": "https://registry.npmjs.org/@react-types/grid/-/grid-3.3.6.tgz",
+ "integrity": "sha512-vIZJlYTii2n1We9nAugXwM2wpcpsC6JigJFBd6vGhStRdRWRoU4yv1Gc98Usbx0FQ/J7GLVIgeG8+1VMTKBdxw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/link": {
- "version": "3.6.4",
- "resolved": "https://registry.npmjs.org/@react-types/link/-/link-3.6.4.tgz",
- "integrity": "sha512-eLpIgOPf7GW4DpdMq8UqiRJkriend1kWglz5O9qU+/FM6COtvRnQkEeRhHICUaU2NZUvMRQ30KaGUo3eeZ6b+g==",
+ "version": "3.6.5",
+ "resolved": "https://registry.npmjs.org/@react-types/link/-/link-3.6.5.tgz",
+ "integrity": "sha512-+I2s3XWBEvLrzts0GnNeA84mUkwo+a7kLUWoaJkW0TOBDG7my95HFYxF9WnqKye7NgpOkCqz4s3oW96xPdIniQ==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/listbox": {
- "version": "3.7.3",
- "resolved": "https://registry.npmjs.org/@react-types/listbox/-/listbox-3.7.3.tgz",
- "integrity": "sha512-ONgror9uyGmIer5XxpRRNcc8QFVWiOzINrMKyaS8G4l3aP52ZwYpRfwMAVtra8lkVNvXDmO7hthPZkB6RYdNOA==",
+ "version": "3.7.4",
+ "resolved": "https://registry.npmjs.org/@react-types/listbox/-/listbox-3.7.4.tgz",
+ "integrity": "sha512-p4YEpTl/VQGrqVE8GIfqTS5LkT5jtjDTbVeZgrkPnX/fiPhsfbTPiZ6g0FNap4+aOGJFGEEZUv2q4vx+rCORww==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/menu": {
- "version": "3.10.4",
- "resolved": "https://registry.npmjs.org/@react-types/menu/-/menu-3.10.4.tgz",
- "integrity": "sha512-jCFVShLq3eASiuznenjoKBv3j0Jy2KQilAjBxdEp56WkZ5D338y/oY5zR6d25u9M0QslpI0DgwC8BwU7MCsPnw==",
+ "version": "3.10.5",
+ "resolved": "https://registry.npmjs.org/@react-types/menu/-/menu-3.10.5.tgz",
+ "integrity": "sha512-HBTrKll2hm0VKJNM4ubIv1L9MNo8JuOnm2G3M+wXvb6EYIyDNxxJkhjsqsGpUXJdAOSkacHBDcNh2HsZABNX4A==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/overlays": "^3.9.1",
- "@react-types/shared": "^3.32.0"
+ "@react-types/overlays": "^3.9.2",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/numberfield": {
- "version": "3.8.14",
- "resolved": "https://registry.npmjs.org/@react-types/numberfield/-/numberfield-3.8.14.tgz",
- "integrity": "sha512-tlGEHJyeQSMlUoO4g9ekoELGJcqsjc/+/FAxo6YQMhQSkuIdkUKZg3UEBKzif4hLw787u80e1D0SxPUi3KO2oA==",
+ "version": "3.8.15",
+ "resolved": "https://registry.npmjs.org/@react-types/numberfield/-/numberfield-3.8.15.tgz",
+ "integrity": "sha512-97r92D23GKCOjGIGMeW9nt+/KlfM3GeWH39Czcmd2/D5y3k6z4j0avbsfx2OttCtJszrnENjw3GraYGYI2KosQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/overlays": {
- "version": "3.9.1",
- "resolved": "https://registry.npmjs.org/@react-types/overlays/-/overlays-3.9.1.tgz",
- "integrity": "sha512-UCG3TOu8FLk4j0Pr1nlhv0opcwMoqbGEOUvsSr6ITN6Qs2y0j+KYSYQ7a4+04m3dN//8+9Wjkkid8k+V1dV2CA==",
+ "version": "3.9.2",
+ "resolved": "https://registry.npmjs.org/@react-types/overlays/-/overlays-3.9.2.tgz",
+ "integrity": "sha512-Q0cRPcBGzNGmC8dBuHyoPR7N3057KTS5g+vZfQ53k8WwmilXBtemFJPLsogJbspuewQ/QJ3o2HYsp2pne7/iNw==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/progress": {
- "version": "3.5.15",
- "resolved": "https://registry.npmjs.org/@react-types/progress/-/progress-3.5.15.tgz",
- "integrity": "sha512-3SYvEyRt7vq7w0sc6wBYmkPqLMZbhH8FI3Lrnn9r3y8+69/efRjVmmJvwjm1z+c6rukszc2gCjUGTsMPQxVk2w==",
+ "version": "3.5.16",
+ "resolved": "https://registry.npmjs.org/@react-types/progress/-/progress-3.5.16.tgz",
+ "integrity": "sha512-I9tSdCFfvQ7gHJtm90VAKgwdTWXQgVNvLRStEc0z9h+bXBxdvZb+QuiRPERChwFQ9VkK4p4rDqaFo69nDqWkpw==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/radio": {
- "version": "3.9.1",
- "resolved": "https://registry.npmjs.org/@react-types/radio/-/radio-3.9.1.tgz",
- "integrity": "sha512-DUCN3msm8QZ0MJrP55FmqMONaadYq6JTxihYFGMLP+NoKRnkxvXqNZ2PlkAOLGy3y4RHOnOF8O1LuJqFCCuxDw==",
- "dependencies": {
- "@react-types/shared": "^3.32.0"
- },
- "peerDependencies": {
- "react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
- }
- },
- "node_modules/@react-types/select": {
- "version": "3.10.1",
- "resolved": "https://registry.npmjs.org/@react-types/select/-/select-3.10.1.tgz",
- "integrity": "sha512-teANUr1byOzGsS/r2j7PatV470JrOhKP8En9lscfnqW5CeUghr+0NxkALnPkiEhCObi/Vu8GIcPareD0HNhtFA==",
+ "version": "3.9.2",
+ "resolved": "https://registry.npmjs.org/@react-types/radio/-/radio-3.9.2.tgz",
+ "integrity": "sha512-3UcJXu37JrTkRyP4GJPDBU7NmDTInrEdOe+bVzA1j4EegzdkJmLBkLg5cLDAbpiEHB+xIsvbJdx6dxeMuc+H3g==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/shared": {
- "version": "3.32.0",
- "resolved": "https://registry.npmjs.org/@react-types/shared/-/shared-3.32.0.tgz",
- "integrity": "sha512-t+cligIJsZYFMSPFMvsJMjzlzde06tZMOIOFa1OV5Z0BcMowrb2g4mB57j/9nP28iJIRYn10xCniQts+qadrqQ==",
+ "version": "3.32.1",
+ "resolved": "https://registry.npmjs.org/@react-types/shared/-/shared-3.32.1.tgz",
+ "integrity": "sha512-famxyD5emrGGpFuUlgOP6fVW2h/ZaF405G5KDi3zPHzyjAWys/8W6NAVJtNbkCkhedmvL0xOhvt8feGXyXaw5w==",
"license": "Apache-2.0",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/slider": {
- "version": "3.8.1",
- "resolved": "https://registry.npmjs.org/@react-types/slider/-/slider-3.8.1.tgz",
- "integrity": "sha512-WxiQWj6iQr5Uft0/KcB9XSr361XnyTmL6eREZZacngA9CjPhRWYP3BRDPcCTuP7fj9Yi4QKMrryyjHqMHP8OKQ==",
+ "version": "3.8.2",
+ "resolved": "https://registry.npmjs.org/@react-types/slider/-/slider-3.8.2.tgz",
+ "integrity": "sha512-MQYZP76OEOYe7/yA2To+Dl0LNb0cKKnvh5JtvNvDnAvEprn1RuLiay8Oi/rTtXmc2KmBa4VdTcsXsmkbbkeN2Q==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/switch": {
- "version": "3.5.14",
- "resolved": "https://registry.npmjs.org/@react-types/switch/-/switch-3.5.14.tgz",
- "integrity": "sha512-M8kIv97i+ejCel4Ho+Y7tDbpOehymGwPA4ChxibeyD32+deyxu5B6BXxgKiL3l+oTLQ8ihLo3sRESdPFw8vpQg==",
+ "version": "3.5.15",
+ "resolved": "https://registry.npmjs.org/@react-types/switch/-/switch-3.5.15.tgz",
+ "integrity": "sha512-r/ouGWQmIeHyYSP1e5luET+oiR7N7cLrAlWsrAfYRWHxqXOSNQloQnZJ3PLHrKFT02fsrQhx2rHaK2LfKeyN3A==",
"license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/table": {
- "version": "3.13.3",
- "resolved": "https://registry.npmjs.org/@react-types/table/-/table-3.13.3.tgz",
- "integrity": "sha512-/kY/VlXN+8l9saySd6igcsDQ3x8pOVFJAWyMh6gOaOVN7HOJkTMIchmqS+ATa4nege8jZqcdzyGeAmv7mN655A==",
+ "version": "3.13.4",
+ "resolved": "https://registry.npmjs.org/@react-types/table/-/table-3.13.4.tgz",
+ "integrity": "sha512-I/DYiZQl6aNbMmjk90J9SOhkzVDZvyA3Vn3wMWCiajkMNjvubFhTfda5DDf2SgFP5l0Yh6TGGH5XumRv9LqL5Q==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/grid": "^3.3.5",
- "@react-types/shared": "^3.32.0"
+ "@react-types/grid": "^3.3.6",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/tabs": {
- "version": "3.3.18",
- "resolved": "https://registry.npmjs.org/@react-types/tabs/-/tabs-3.3.18.tgz",
- "integrity": "sha512-yX/AVlGS7VXCuy2LSm8y8nxUrKVBgnLv+FrtkLqf6jUMtD4KP3k1c4+GPHeScR0HcYzCQF7gCF3Skba1RdYoug==",
+ "version": "3.3.19",
+ "resolved": "https://registry.npmjs.org/@react-types/tabs/-/tabs-3.3.19.tgz",
+ "integrity": "sha512-fE+qI43yR5pAMpeqPxGqQq9jDHXEPqXskuxNHERMW0PYMdPyem2Cw6goc5F4qeZO3Hf6uPZgHkvJz2OAq7TbBw==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/textfield": {
- "version": "3.12.5",
- "resolved": "https://registry.npmjs.org/@react-types/textfield/-/textfield-3.12.5.tgz",
- "integrity": "sha512-VXez8KIcop87EgIy00r+tb30xokA309TfJ32Qv5qOYB5SMqoHnb6SYvWL8Ih2PDqCo5eBiiGesSaWYrHnRIL8Q==",
+ "version": "3.12.6",
+ "resolved": "https://registry.npmjs.org/@react-types/textfield/-/textfield-3.12.6.tgz",
+ "integrity": "sha512-hpEVKE+M3uUkTjw2WrX1NrH/B3rqDJFUa+ViNK2eVranLY4ZwFqbqaYXSzHupOF3ecSjJJv2C103JrwFvx6TPQ==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/shared": "^3.32.0"
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
}
},
"node_modules/@react-types/tooltip": {
- "version": "3.4.20",
- "resolved": "https://registry.npmjs.org/@react-types/tooltip/-/tooltip-3.4.20.tgz",
- "integrity": "sha512-tF1yThwvgSgW8Gu/CLL0p92AUldHR6szlwhwW+ewT318sQlfabMGO4xlCNFdxJYtqTpEXk2rlaVrBuaC//du0w==",
+ "version": "3.4.21",
+ "resolved": "https://registry.npmjs.org/@react-types/tooltip/-/tooltip-3.4.21.tgz",
+ "integrity": "sha512-ugGHOZU6WbOdeTdbjnaEc+Ms7/WhsUCg+T3PCOIeOT9FG02Ce189yJ/+hd7oqL/tVwIhEMYJIqSCgSELFox+QA==",
+ "license": "Apache-2.0",
"dependencies": {
- "@react-types/overlays": "^3.9.1",
- "@react-types/shared": "^3.32.0"
+ "@react-types/overlays": "^3.9.2",
+ "@react-types/shared": "^3.32.1"
},
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0-rc.1 || ^18.0.0 || ^19.0.0-rc.1"
@@ -5001,12 +4905,14 @@
"version": "0.9.0",
"resolved": "https://registry.npmjs.org/@remix-run/node-fetch-server/-/node-fetch-server-0.9.0.tgz",
"integrity": "sha512-SoLMv7dbH+njWzXnOY6fI08dFMI5+/dQ+vY3n8RnnbdG7MdJEgiP28Xj/xWlnRnED/aB6SFw56Zop+LbmaaKqA==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/@rolldown/pluginutils": {
- "version": "1.0.0-beta.38",
- "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.38.tgz",
- "integrity": "sha512-N/ICGKleNhA5nc9XXQG/kkKHJ7S55u0x0XUJbbkmdCnFuoRkM1Il12q9q0eX19+M7KKUEPw/daUPIRnxhcxAIw=="
+ "version": "1.0.0-beta.47",
+ "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.47.tgz",
+ "integrity": "sha512-8QagwMH3kNCuzD8EWL8R2YPW5e4OrHNSAHRFDdmFqEwEaD/KcNKjVoumo+gP2vW5eKB2UPbM6vTYiGZX0ixLnw==",
+ "license": "MIT"
},
"node_modules/@rollup/pluginutils": {
"version": "5.3.0",
@@ -5052,9 +4958,9 @@
}
},
"node_modules/@rollup/rollup-android-arm-eabi": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.50.0.tgz",
- "integrity": "sha512-lVgpeQyy4fWN5QYebtW4buT/4kn4p4IJ+kDNB4uYNT5b8c8DLJDg6titg20NIg7E8RWwdWZORW6vUFfrLyG3KQ==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.53.3.tgz",
+ "integrity": "sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==",
"cpu": [
"arm"
],
@@ -5065,9 +4971,9 @@
]
},
"node_modules/@rollup/rollup-android-arm64": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.50.0.tgz",
- "integrity": "sha512-2O73dR4Dc9bp+wSYhviP6sDziurB5/HCym7xILKifWdE9UsOe2FtNcM+I4xZjKrfLJnq5UR8k9riB87gauiQtw==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.53.3.tgz",
+ "integrity": "sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==",
"cpu": [
"arm64"
],
@@ -5078,9 +4984,9 @@
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.50.0.tgz",
- "integrity": "sha512-vwSXQN8T4sKf1RHr1F0s98Pf8UPz7pS6P3LG9NSmuw0TVh7EmaE+5Ny7hJOZ0M2yuTctEsHHRTMi2wuHkdS6Hg==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.53.3.tgz",
+ "integrity": "sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==",
"cpu": [
"arm64"
],
@@ -5091,9 +4997,9 @@
]
},
"node_modules/@rollup/rollup-darwin-x64": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.50.0.tgz",
- "integrity": "sha512-cQp/WG8HE7BCGyFVuzUg0FNmupxC+EPZEwWu2FCGGw5WDT1o2/YlENbm5e9SMvfDFR6FRhVCBePLqj0o8MN7Vw==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.53.3.tgz",
+ "integrity": "sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==",
"cpu": [
"x64"
],
@@ -5104,9 +5010,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-arm64": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.50.0.tgz",
- "integrity": "sha512-UR1uTJFU/p801DvvBbtDD7z9mQL8J80xB0bR7DqW7UGQHRm/OaKzp4is7sQSdbt2pjjSS72eAtRh43hNduTnnQ==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.53.3.tgz",
+ "integrity": "sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==",
"cpu": [
"arm64"
],
@@ -5117,9 +5023,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-x64": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.50.0.tgz",
- "integrity": "sha512-G/DKyS6PK0dD0+VEzH/6n/hWDNPDZSMBmqsElWnCRGrYOb2jC0VSupp7UAHHQ4+QILwkxSMaYIbQ72dktp8pKA==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.53.3.tgz",
+ "integrity": "sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==",
"cpu": [
"x64"
],
@@ -5130,9 +5036,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.50.0.tgz",
- "integrity": "sha512-u72Mzc6jyJwKjJbZZcIYmd9bumJu7KNmHYdue43vT1rXPm2rITwmPWF0mmPzLm9/vJWxIRbao/jrQmxTO0Sm9w==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.53.3.tgz",
+ "integrity": "sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==",
"cpu": [
"arm"
],
@@ -5143,9 +5049,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.50.0.tgz",
- "integrity": "sha512-S4UefYdV0tnynDJV1mdkNawp0E5Qm2MtSs330IyHgaccOFrwqsvgigUD29uT+B/70PDY1eQ3t40+xf6wIvXJyg==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.53.3.tgz",
+ "integrity": "sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==",
"cpu": [
"arm"
],
@@ -5156,9 +5062,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.50.0.tgz",
- "integrity": "sha512-1EhkSvUQXJsIhk4msxP5nNAUWoB4MFDHhtc4gAYvnqoHlaL9V3F37pNHabndawsfy/Tp7BPiy/aSa6XBYbaD1g==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.53.3.tgz",
+ "integrity": "sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==",
"cpu": [
"arm64"
],
@@ -5169,9 +5075,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.50.0.tgz",
- "integrity": "sha512-EtBDIZuDtVg75xIPIK1l5vCXNNCIRM0OBPUG+tbApDuJAy9mKago6QxX+tfMzbCI6tXEhMuZuN1+CU8iDW+0UQ==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.53.3.tgz",
+ "integrity": "sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==",
"cpu": [
"arm64"
],
@@ -5181,10 +5087,10 @@
"linux"
]
},
- "node_modules/@rollup/rollup-linux-loongarch64-gnu": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.50.0.tgz",
- "integrity": "sha512-BGYSwJdMP0hT5CCmljuSNx7+k+0upweM2M4YGfFBjnFSZMHOLYR0gEEj/dxyYJ6Zc6AiSeaBY8dWOa11GF/ppQ==",
+ "node_modules/@rollup/rollup-linux-loong64-gnu": {
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.53.3.tgz",
+ "integrity": "sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==",
"cpu": [
"loong64"
],
@@ -5195,9 +5101,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.50.0.tgz",
- "integrity": "sha512-I1gSMzkVe1KzAxKAroCJL30hA4DqSi+wGc5gviD0y3IL/VkvcnAqwBf4RHXHyvH66YVHxpKO8ojrgc4SrWAnLg==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.53.3.tgz",
+ "integrity": "sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==",
"cpu": [
"ppc64"
],
@@ -5208,9 +5114,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.50.0.tgz",
- "integrity": "sha512-bSbWlY3jZo7molh4tc5dKfeSxkqnf48UsLqYbUhnkdnfgZjgufLS/NTA8PcP/dnvct5CCdNkABJ56CbclMRYCA==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.53.3.tgz",
+ "integrity": "sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==",
"cpu": [
"riscv64"
],
@@ -5221,9 +5127,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-musl": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.50.0.tgz",
- "integrity": "sha512-LSXSGumSURzEQLT2e4sFqFOv3LWZsEF8FK7AAv9zHZNDdMnUPYH3t8ZlaeYYZyTXnsob3htwTKeWtBIkPV27iQ==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.53.3.tgz",
+ "integrity": "sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==",
"cpu": [
"riscv64"
],
@@ -5234,9 +5140,9 @@
]
},
"node_modules/@rollup/rollup-linux-s390x-gnu": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.50.0.tgz",
- "integrity": "sha512-CxRKyakfDrsLXiCyucVfVWVoaPA4oFSpPpDwlMcDFQvrv3XY6KEzMtMZrA+e/goC8xxp2WSOxHQubP8fPmmjOQ==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.53.3.tgz",
+ "integrity": "sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==",
"cpu": [
"s390x"
],
@@ -5247,9 +5153,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.50.0.tgz",
- "integrity": "sha512-8PrJJA7/VU8ToHVEPu14FzuSAqVKyo5gg/J8xUerMbyNkWkO9j2ExBho/68RnJsMGNJq4zH114iAttgm7BZVkA==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.53.3.tgz",
+ "integrity": "sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==",
"cpu": [
"x64"
],
@@ -5260,9 +5166,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.50.0.tgz",
- "integrity": "sha512-SkE6YQp+CzpyOrbw7Oc4MgXFvTw2UIBElvAvLCo230pyxOLmYwRPwZ/L5lBe/VW/qT1ZgND9wJfOsdy0XptRvw==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.53.3.tgz",
+ "integrity": "sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==",
"cpu": [
"x64"
],
@@ -5273,9 +5179,9 @@
]
},
"node_modules/@rollup/rollup-openharmony-arm64": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.50.0.tgz",
- "integrity": "sha512-PZkNLPfvXeIOgJWA804zjSFH7fARBBCpCXxgkGDRjjAhRLOR8o0IGS01ykh5GYfod4c2yiiREuDM8iZ+pVsT+Q==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.53.3.tgz",
+ "integrity": "sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==",
"cpu": [
"arm64"
],
@@ -5286,9 +5192,9 @@
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.50.0.tgz",
- "integrity": "sha512-q7cIIdFvWQoaCbLDUyUc8YfR3Jh2xx3unO8Dn6/TTogKjfwrax9SyfmGGK6cQhKtjePI7jRfd7iRYcxYs93esg==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.53.3.tgz",
+ "integrity": "sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==",
"cpu": [
"arm64"
],
@@ -5299,9 +5205,9 @@
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.50.0.tgz",
- "integrity": "sha512-XzNOVg/YnDOmFdDKcxxK410PrcbcqZkBmz+0FicpW5jtjKQxcW1BZJEQOF0NJa6JO7CZhett8GEtRN/wYLYJuw==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.53.3.tgz",
+ "integrity": "sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==",
"cpu": [
"ia32"
],
@@ -5311,10 +5217,23 @@
"win32"
]
},
+ "node_modules/@rollup/rollup-win32-x64-gnu": {
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.53.3.tgz",
+ "integrity": "sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==",
+ "cpu": [
+ "x64"
+ ],
+ "license": "MIT",
+ "optional": true,
+ "os": [
+ "win32"
+ ]
+ },
"node_modules/@rollup/rollup-win32-x64-msvc": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.50.0.tgz",
- "integrity": "sha512-xMmiWRR8sp72Zqwjgtf3QbZfF1wdh8X2ABu3EaozvZcyHJeU0r+XAnXdKgs4cCAp6ORoYoCygipYP1mjmbjrsg==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.53.3.tgz",
+ "integrity": "sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==",
"cpu": [
"x64"
],
@@ -5337,23 +5256,31 @@
"integrity": "sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA==",
"license": "MIT"
},
+ "node_modules/@standard-schema/spec": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.0.0.tgz",
+ "integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/@stripe/react-stripe-js": {
- "version": "4.0.2",
- "resolved": "https://registry.npmjs.org/@stripe/react-stripe-js/-/react-stripe-js-4.0.2.tgz",
- "integrity": "sha512-l2wau+8/LOlHl+Sz8wQ1oDuLJvyw51nQCsu6/ljT6smqzTszcMHifjAJoXlnMfcou3+jK/kQyVe04u/ufyTXgg==",
+ "version": "5.4.1",
+ "resolved": "https://registry.npmjs.org/@stripe/react-stripe-js/-/react-stripe-js-5.4.1.tgz",
+ "integrity": "sha512-ipeYcAHa4EPmjwfv0lFE+YDVkOQ0TMKkFWamW+BqmnSkEln/hO8rmxGPPWcd9WjqABx6Ro8Xg4pAS7evCcR9cw==",
+ "license": "MIT",
"dependencies": {
"prop-types": "^15.7.2"
},
"peerDependencies": {
- "@stripe/stripe-js": ">=1.44.1 <8.0.0",
+ "@stripe/stripe-js": ">=8.0.0 <9.0.0",
"react": ">=16.8.0 <20.0.0",
"react-dom": ">=16.8.0 <20.0.0"
}
},
"node_modules/@stripe/stripe-js": {
- "version": "7.9.0",
- "resolved": "https://registry.npmjs.org/@stripe/stripe-js/-/stripe-js-7.9.0.tgz",
- "integrity": "sha512-ggs5k+/0FUJcIgNY08aZTqpBTtbExkJMYMLSMwyucrhtWexVOEY1KJmhBsxf+E/Q15f5rbwBpj+t0t2AW2oCsQ==",
+ "version": "8.5.3",
+ "resolved": "https://registry.npmjs.org/@stripe/stripe-js/-/stripe-js-8.5.3.tgz",
+ "integrity": "sha512-UM0GHAxlTN7v0lCK2P6t0VOlvBIdApIQxhnM3yZ2kupQ4PpSrLsK/n/NyYKtw2NJGMaNRRD1IicWS7fSL2sFtA==",
"license": "MIT",
"engines": {
"node": ">=12.16"
@@ -5543,33 +5470,6 @@
"url": "https://github.com/sponsors/gregberge"
}
},
- "node_modules/@svgr/core/node_modules/cosmiconfig": {
- "version": "8.3.6",
- "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz",
- "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "import-fresh": "^3.3.0",
- "js-yaml": "^4.1.0",
- "parse-json": "^5.2.0",
- "path-type": "^4.0.0"
- },
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/d-fischer"
- },
- "peerDependencies": {
- "typescript": ">=4.9.5"
- },
- "peerDependenciesMeta": {
- "typescript": {
- "optional": true
- }
- }
- },
"node_modules/@svgr/hast-util-to-babel-ast": {
"version": "8.0.0",
"resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-8.0.0.tgz",
@@ -5634,52 +5534,47 @@
}
},
"node_modules/@tailwindcss/node": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.13.tgz",
- "integrity": "sha512-eq3ouolC1oEFOAvOMOBAmfCIqZBJuvWvvYWh5h5iOYfe1HFC6+GZ6EIL0JdM3/niGRJmnrOc+8gl9/HGUaaptw==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.17.tgz",
+ "integrity": "sha512-csIkHIgLb3JisEFQ0vxr2Y57GUNYh447C8xzwj89U/8fdW8LhProdxvnVH6U8M2Y73QKiTIH+LWbK3V2BBZsAg==",
"license": "MIT",
"dependencies": {
"@jridgewell/remapping": "^2.3.4",
"enhanced-resolve": "^5.18.3",
- "jiti": "^2.5.1",
- "lightningcss": "1.30.1",
- "magic-string": "^0.30.18",
+ "jiti": "^2.6.1",
+ "lightningcss": "1.30.2",
+ "magic-string": "^0.30.21",
"source-map-js": "^1.2.1",
- "tailwindcss": "4.1.13"
+ "tailwindcss": "4.1.17"
}
},
"node_modules/@tailwindcss/oxide": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.13.tgz",
- "integrity": "sha512-CPgsM1IpGRa880sMbYmG1s4xhAy3xEt1QULgTJGQmZUeNgXFR7s1YxYygmJyBGtou4SyEosGAGEeYqY7R53bIA==",
- "hasInstallScript": true,
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.17.tgz",
+ "integrity": "sha512-F0F7d01fmkQhsTjXezGBLdrl1KresJTcI3DB8EkScCldyKp3Msz4hub4uyYaVnk88BAS1g5DQjjF6F5qczheLA==",
"license": "MIT",
- "dependencies": {
- "detect-libc": "^2.0.4",
- "tar": "^7.4.3"
- },
"engines": {
"node": ">= 10"
},
"optionalDependencies": {
- "@tailwindcss/oxide-android-arm64": "4.1.13",
- "@tailwindcss/oxide-darwin-arm64": "4.1.13",
- "@tailwindcss/oxide-darwin-x64": "4.1.13",
- "@tailwindcss/oxide-freebsd-x64": "4.1.13",
- "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.13",
- "@tailwindcss/oxide-linux-arm64-gnu": "4.1.13",
- "@tailwindcss/oxide-linux-arm64-musl": "4.1.13",
- "@tailwindcss/oxide-linux-x64-gnu": "4.1.13",
- "@tailwindcss/oxide-linux-x64-musl": "4.1.13",
- "@tailwindcss/oxide-wasm32-wasi": "4.1.13",
- "@tailwindcss/oxide-win32-arm64-msvc": "4.1.13",
- "@tailwindcss/oxide-win32-x64-msvc": "4.1.13"
+ "@tailwindcss/oxide-android-arm64": "4.1.17",
+ "@tailwindcss/oxide-darwin-arm64": "4.1.17",
+ "@tailwindcss/oxide-darwin-x64": "4.1.17",
+ "@tailwindcss/oxide-freebsd-x64": "4.1.17",
+ "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.17",
+ "@tailwindcss/oxide-linux-arm64-gnu": "4.1.17",
+ "@tailwindcss/oxide-linux-arm64-musl": "4.1.17",
+ "@tailwindcss/oxide-linux-x64-gnu": "4.1.17",
+ "@tailwindcss/oxide-linux-x64-musl": "4.1.17",
+ "@tailwindcss/oxide-wasm32-wasi": "4.1.17",
+ "@tailwindcss/oxide-win32-arm64-msvc": "4.1.17",
+ "@tailwindcss/oxide-win32-x64-msvc": "4.1.17"
}
},
"node_modules/@tailwindcss/oxide-android-arm64": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.13.tgz",
- "integrity": "sha512-BrpTrVYyejbgGo57yc8ieE+D6VT9GOgnNdmh5Sac6+t0m+v+sKQevpFVpwX3pBrM2qKrQwJ0c5eDbtjouY/+ew==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.17.tgz",
+ "integrity": "sha512-BMqpkJHgOZ5z78qqiGE6ZIRExyaHyuxjgrJ6eBO5+hfrfGkuya0lYfw8fRHG77gdTjWkNWEEm+qeG2cDMxArLQ==",
"cpu": [
"arm64"
],
@@ -5693,9 +5588,9 @@
}
},
"node_modules/@tailwindcss/oxide-darwin-arm64": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.13.tgz",
- "integrity": "sha512-YP+Jksc4U0KHcu76UhRDHq9bx4qtBftp9ShK/7UGfq0wpaP96YVnnjFnj3ZFrUAjc5iECzODl/Ts0AN7ZPOANQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.17.tgz",
+ "integrity": "sha512-EquyumkQweUBNk1zGEU/wfZo2qkp/nQKRZM8bUYO0J+Lums5+wl2CcG1f9BgAjn/u9pJzdYddHWBiFXJTcxmOg==",
"cpu": [
"arm64"
],
@@ -5709,9 +5604,9 @@
}
},
"node_modules/@tailwindcss/oxide-darwin-x64": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.13.tgz",
- "integrity": "sha512-aAJ3bbwrn/PQHDxCto9sxwQfT30PzyYJFG0u/BWZGeVXi5Hx6uuUOQEI2Fa43qvmUjTRQNZnGqe9t0Zntexeuw==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.17.tgz",
+ "integrity": "sha512-gdhEPLzke2Pog8s12oADwYu0IAw04Y2tlmgVzIN0+046ytcgx8uZmCzEg4VcQh+AHKiS7xaL8kGo/QTiNEGRog==",
"cpu": [
"x64"
],
@@ -5725,9 +5620,9 @@
}
},
"node_modules/@tailwindcss/oxide-freebsd-x64": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.13.tgz",
- "integrity": "sha512-Wt8KvASHwSXhKE/dJLCCWcTSVmBj3xhVhp/aF3RpAhGeZ3sVo7+NTfgiN8Vey/Fi8prRClDs6/f0KXPDTZE6nQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.17.tgz",
+ "integrity": "sha512-hxGS81KskMxML9DXsaXT1H0DyA+ZBIbyG/sSAjWNe2EDl7TkPOBI42GBV3u38itzGUOmFfCzk1iAjDXds8Oh0g==",
"cpu": [
"x64"
],
@@ -5741,9 +5636,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.13.tgz",
- "integrity": "sha512-mbVbcAsW3Gkm2MGwA93eLtWrwajz91aXZCNSkGTx/R5eb6KpKD5q8Ueckkh9YNboU8RH7jiv+ol/I7ZyQ9H7Bw==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.17.tgz",
+ "integrity": "sha512-k7jWk5E3ldAdw0cNglhjSgv501u7yrMf8oeZ0cElhxU6Y2o7f8yqelOp3fhf7evjIS6ujTI3U8pKUXV2I4iXHQ==",
"cpu": [
"arm"
],
@@ -5757,9 +5652,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.13.tgz",
- "integrity": "sha512-wdtfkmpXiwej/yoAkrCP2DNzRXCALq9NVLgLELgLim1QpSfhQM5+ZxQQF8fkOiEpuNoKLp4nKZ6RC4kmeFH0HQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.17.tgz",
+ "integrity": "sha512-HVDOm/mxK6+TbARwdW17WrgDYEGzmoYayrCgmLEw7FxTPLcp/glBisuyWkFz/jb7ZfiAXAXUACfyItn+nTgsdQ==",
"cpu": [
"arm64"
],
@@ -5773,9 +5668,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-arm64-musl": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.13.tgz",
- "integrity": "sha512-hZQrmtLdhyqzXHB7mkXfq0IYbxegaqTmfa1p9MBj72WPoDD3oNOh1Lnxf6xZLY9C3OV6qiCYkO1i/LrzEdW2mg==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.17.tgz",
+ "integrity": "sha512-HvZLfGr42i5anKtIeQzxdkw/wPqIbpeZqe7vd3V9vI3RQxe3xU1fLjss0TjyhxWcBaipk7NYwSrwTwK1hJARMg==",
"cpu": [
"arm64"
],
@@ -5789,9 +5684,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-x64-gnu": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.13.tgz",
- "integrity": "sha512-uaZTYWxSXyMWDJZNY1Ul7XkJTCBRFZ5Fo6wtjrgBKzZLoJNrG+WderJwAjPzuNZOnmdrVg260DKwXCFtJ/hWRQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.17.tgz",
+ "integrity": "sha512-M3XZuORCGB7VPOEDH+nzpJ21XPvK5PyjlkSFkFziNHGLc5d6g3di2McAAblmaSUNl8IOmzYwLx9NsE7bplNkwQ==",
"cpu": [
"x64"
],
@@ -5805,9 +5700,9 @@
}
},
"node_modules/@tailwindcss/oxide-linux-x64-musl": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.13.tgz",
- "integrity": "sha512-oXiPj5mi4Hdn50v5RdnuuIms0PVPI/EG4fxAfFiIKQh5TgQgX7oSuDWntHW7WNIi/yVLAiS+CRGW4RkoGSSgVQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.17.tgz",
+ "integrity": "sha512-k7f+pf9eXLEey4pBlw+8dgfJHY4PZ5qOUFDyNf7SI6lHjQ9Zt7+NcscjpwdCEbYi6FI5c2KDTDWyf2iHcCSyyQ==",
"cpu": [
"x64"
],
@@ -5821,9 +5716,9 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.13.tgz",
- "integrity": "sha512-+LC2nNtPovtrDwBc/nqnIKYh/W2+R69FA0hgoeOn64BdCX522u19ryLh3Vf3F8W49XBcMIxSe665kwy21FkhvA==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.17.tgz",
+ "integrity": "sha512-cEytGqSSoy7zK4JRWiTCx43FsKP/zGr0CsuMawhH67ONlH+T79VteQeJQRO/X7L0juEUA8ZyuYikcRBf0vsxhg==",
"bundleDependencies": [
"@napi-rs/wasm-runtime",
"@emnapi/core",
@@ -5838,29 +5733,29 @@
"license": "MIT",
"optional": true,
"dependencies": {
- "@emnapi/core": "^1.4.5",
- "@emnapi/runtime": "^1.4.5",
- "@emnapi/wasi-threads": "^1.0.4",
- "@napi-rs/wasm-runtime": "^0.2.12",
- "@tybys/wasm-util": "^0.10.0",
- "tslib": "^2.8.0"
+ "@emnapi/core": "^1.6.0",
+ "@emnapi/runtime": "^1.6.0",
+ "@emnapi/wasi-threads": "^1.1.0",
+ "@napi-rs/wasm-runtime": "^1.0.7",
+ "@tybys/wasm-util": "^0.10.1",
+ "tslib": "^2.4.0"
},
"engines": {
"node": ">=14.0.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": {
- "version": "1.4.5",
+ "version": "1.6.0",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
- "@emnapi/wasi-threads": "1.0.4",
+ "@emnapi/wasi-threads": "1.1.0",
"tslib": "^2.4.0"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": {
- "version": "1.4.5",
+ "version": "1.6.0",
"inBundle": true,
"license": "MIT",
"optional": true,
@@ -5869,7 +5764,7 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": {
- "version": "1.0.4",
+ "version": "1.1.0",
"inBundle": true,
"license": "MIT",
"optional": true,
@@ -5878,18 +5773,18 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": {
- "version": "0.2.12",
+ "version": "1.0.7",
"inBundle": true,
"license": "MIT",
"optional": true,
"dependencies": {
- "@emnapi/core": "^1.4.3",
- "@emnapi/runtime": "^1.4.3",
- "@tybys/wasm-util": "^0.10.0"
+ "@emnapi/core": "^1.5.0",
+ "@emnapi/runtime": "^1.5.0",
+ "@tybys/wasm-util": "^0.10.1"
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": {
- "version": "0.10.0",
+ "version": "0.10.1",
"inBundle": true,
"license": "MIT",
"optional": true,
@@ -5898,15 +5793,15 @@
}
},
"node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": {
- "version": "2.8.0",
+ "version": "2.8.1",
"inBundle": true,
"license": "0BSD",
"optional": true
},
"node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.13.tgz",
- "integrity": "sha512-dziTNeQXtoQ2KBXmrjCxsuPk3F3CQ/yb7ZNZNA+UkNTeiTGgfeh+gH5Pi7mRncVgcPD2xgHvkFCh/MhZWSgyQg==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.17.tgz",
+ "integrity": "sha512-JU5AHr7gKbZlOGvMdb4722/0aYbU+tN6lv1kONx0JK2cGsh7g148zVWLM0IKR3NeKLv+L90chBVYcJ8uJWbC9A==",
"cpu": [
"arm64"
],
@@ -5920,9 +5815,9 @@
}
},
"node_modules/@tailwindcss/oxide-win32-x64-msvc": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.13.tgz",
- "integrity": "sha512-3+LKesjXydTkHk5zXX01b5KMzLV1xl2mcktBJkje7rhFUpUlYJy7IMOLqjIRQncLTa1WZZiFY/foAeB5nmaiTw==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.17.tgz",
+ "integrity": "sha512-SKWM4waLuqx0IH+FMDUw6R66Hu4OuTALFgnleKbqhgGU30DY20NORZMZUKgLRjQXNN2TLzKvh48QXTig4h4bGw==",
"cpu": [
"x64"
],
@@ -5936,16 +5831,16 @@
}
},
"node_modules/@tailwindcss/postcss": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.13.tgz",
- "integrity": "sha512-HLgx6YSFKJT7rJqh9oJs/TkBFhxuMOfUKSBEPYwV+t78POOBsdQ7crhZLzwcH3T0UyUuOzU/GK5pk5eKr3wCiQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/postcss/-/postcss-4.1.17.tgz",
+ "integrity": "sha512-+nKl9N9mN5uJ+M7dBOOCzINw94MPstNR/GtIhz1fpZysxL/4a+No64jCBD6CPN+bIHWFx3KWuu8XJRrj/572Dw==",
"license": "MIT",
"dependencies": {
"@alloc/quick-lru": "^5.2.0",
- "@tailwindcss/node": "4.1.13",
- "@tailwindcss/oxide": "4.1.13",
+ "@tailwindcss/node": "4.1.17",
+ "@tailwindcss/oxide": "4.1.17",
"postcss": "^8.4.41",
- "tailwindcss": "4.1.13"
+ "tailwindcss": "4.1.17"
}
},
"node_modules/@tailwindcss/typography": {
@@ -5953,6 +5848,7 @@
"resolved": "https://registry.npmjs.org/@tailwindcss/typography/-/typography-0.5.19.tgz",
"integrity": "sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"postcss-selector-parser": "6.0.10"
},
@@ -5961,24 +5857,25 @@
}
},
"node_modules/@tailwindcss/vite": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.13.tgz",
- "integrity": "sha512-0PmqLQ010N58SbMTJ7BVJ4I2xopiQn/5i6nlb4JmxzQf8zcS5+m2Cv6tqh+sfDwtIdjoEnOvwsGQ1hkUi8QEHQ==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.17.tgz",
+ "integrity": "sha512-4+9w8ZHOiGnpcGI6z1TVVfWaX/koK7fKeSYF3qlYg2xpBtbteP2ddBxiarL+HVgfSJGeK5RIxRQmKm4rTJJAwA==",
"license": "MIT",
"dependencies": {
- "@tailwindcss/node": "4.1.13",
- "@tailwindcss/oxide": "4.1.13",
- "tailwindcss": "4.1.13"
+ "@tailwindcss/node": "4.1.17",
+ "@tailwindcss/oxide": "4.1.17",
+ "tailwindcss": "4.1.17"
},
"peerDependencies": {
"vite": "^5.2.0 || ^6 || ^7"
}
},
"node_modules/@tanstack/eslint-plugin-query": {
- "version": "5.91.0",
- "resolved": "https://registry.npmjs.org/@tanstack/eslint-plugin-query/-/eslint-plugin-query-5.91.0.tgz",
- "integrity": "sha512-Kn6yWyRe3dIPf7NqyDMhcsTBz2Oh8jPSOpBdlnLQhGBJ6iTMBFYA4B1UreGJ/WdfzQskSMh5imcyWF+wqa/Q5g==",
+ "version": "5.91.2",
+ "resolved": "https://registry.npmjs.org/@tanstack/eslint-plugin-query/-/eslint-plugin-query-5.91.2.tgz",
+ "integrity": "sha512-UPeWKl/Acu1IuuHJlsN+eITUHqAaa9/04geHHPedY8siVarSaWprY0SVMKrkpKfk5ehRT7+/MZ5QwWuEtkWrFw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@typescript-eslint/utils": "^8.44.1"
},
@@ -5991,20 +5888,20 @@
}
},
"node_modules/@tanstack/query-core": {
- "version": "5.90.2",
- "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.2.tgz",
- "integrity": "sha512-k/TcR3YalnzibscALLwxeiLUub6jN5EDLwKDiO7q5f4ICEoptJ+n9+7vcEFy5/x/i6Q+Lb/tXrsKCggf5uQJXQ==",
+ "version": "5.90.12",
+ "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-5.90.12.tgz",
+ "integrity": "sha512-T1/8t5DhV/SisWjDnaiU2drl6ySvsHj1bHBCWNXd+/T+Hh1cf6JodyEYMd5sgwm+b/mETT4EV3H+zCVczCU5hg==",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/tannerlinsley"
}
},
"node_modules/@tanstack/react-query": {
- "version": "5.90.2",
- "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.2.tgz",
- "integrity": "sha512-CLABiR+h5PYfOWr/z+vWFt5VsOA2ekQeRQBFSKlcoW6Ndx/f8rfyVmq4LbgOM4GG2qtxAxjLYLOpCNTYm4uKzw==",
+ "version": "5.90.12",
+ "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.12.tgz",
+ "integrity": "sha512-graRZspg7EoEaw0a8faiUASCyJrqjKPdqJ9EwuDRUF9mEYJ1YPczI9H+/agJ0mOJkPCJDk0lsz5QTrLZ/jQ2rg==",
"dependencies": {
- "@tanstack/query-core": "5.90.2"
+ "@tanstack/query-core": "5.90.12"
},
"funding": {
"type": "github",
@@ -6018,6 +5915,7 @@
"version": "3.11.3",
"resolved": "https://registry.npmjs.org/@tanstack/react-virtual/-/react-virtual-3.11.3.tgz",
"integrity": "sha512-vCU+OTylXN3hdC8RKg68tPlBPjjxtzon7Ys46MgrSLE+JhSjSTPvoQifV6DQJeJmA8Q3KT6CphJbejupx85vFw==",
+ "license": "MIT",
"dependencies": {
"@tanstack/virtual-core": "3.11.3"
},
@@ -6034,6 +5932,7 @@
"version": "3.11.3",
"resolved": "https://registry.npmjs.org/@tanstack/virtual-core/-/virtual-core-3.11.3.tgz",
"integrity": "sha512-v2mrNSnMwnPJtcVqNvV0c5roGCBqeogN8jDtgtuHCphdwBasOZ17x8UV8qpHUh+u0MLfX43c0uUHKje0s+Zb0w==",
+ "license": "MIT",
"funding": {
"type": "github",
"url": "https://github.com/sponsors/tannerlinsley"
@@ -6060,9 +5959,9 @@
}
},
"node_modules/@testing-library/jest-dom": {
- "version": "6.8.0",
- "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.8.0.tgz",
- "integrity": "sha512-WgXcWzVM6idy5JaftTVC8Vs83NKRmGJz4Hqs4oyOuO2J4r/y79vvKZsb+CaGyCSEbUPI6OsewfPd0G1A0/TUZQ==",
+ "version": "6.9.1",
+ "resolved": "https://registry.npmjs.org/@testing-library/jest-dom/-/jest-dom-6.9.1.tgz",
+ "integrity": "sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -6183,22 +6082,16 @@
"license": "MIT"
},
"node_modules/@types/chai": {
- "version": "5.2.2",
- "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz",
- "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==",
+ "version": "5.2.3",
+ "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz",
+ "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@types/deep-eql": "*"
+ "@types/deep-eql": "*",
+ "assertion-error": "^2.0.1"
}
},
- "node_modules/@types/cookie": {
- "version": "0.6.0",
- "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz",
- "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/@types/debug": {
"version": "4.1.12",
"resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.12.tgz",
@@ -6247,9 +6140,9 @@
"license": "MIT"
},
"node_modules/@types/lodash": {
- "version": "4.17.20",
- "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz",
- "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==",
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-FOvQ0YPD5NOfPgMzJihoT+Za5pdkDJWcbpuj1DjaKZIr/gxodQjY/uWEFlTNqW2ugXHUiL8lRQgw63dzKHZdeQ==",
"license": "MIT"
},
"node_modules/@types/mdast": {
@@ -6268,12 +6161,13 @@
"license": "MIT"
},
"node_modules/@types/node": {
- "version": "24.5.2",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-24.5.2.tgz",
- "integrity": "sha512-FYxk1I7wPv3K2XBaoyH2cTnocQEu8AOZ60hPbsyukMPLv5/5qr7V1i8PLHdl6Zf87I+xZXFvPCXYjiTFq+YSDQ==",
+ "version": "24.10.1",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-24.10.1.tgz",
+ "integrity": "sha512-GNWcUTRBgIRJD5zj+Tq0fKOJ5XZajIiBroOF0yvj2bSU1WvNdYS/dn9UxwsujGW4JX06dnHyjV2y9rRaybH0iQ==",
"devOptional": true,
+ "license": "MIT",
"dependencies": {
- "undici-types": "~7.12.0"
+ "undici-types": "~7.16.0"
}
},
"node_modules/@types/prismjs": {
@@ -6283,21 +6177,22 @@
"license": "MIT"
},
"node_modules/@types/react": {
- "version": "19.1.15",
- "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.15.tgz",
- "integrity": "sha512-+kLxJpaJzXybyDyFXYADyP1cznTO8HSuBpenGlnKOAkH4hyNINiywvXS/tGJhsrGGP/gM185RA3xpjY0Yg4erA==",
+ "version": "19.2.7",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.7.tgz",
+ "integrity": "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg==",
+ "license": "MIT",
"dependencies": {
- "csstype": "^3.0.2"
+ "csstype": "^3.2.2"
}
},
"node_modules/@types/react-dom": {
- "version": "19.1.9",
- "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.9.tgz",
- "integrity": "sha512-qXRuZaOsAdXKFyOhRBg6Lqqc0yay13vN7KrIg4L7N4aaHN68ma9OK3NE1BoDFgFOTfM7zg+3/8+2n8rLUH3OKQ==",
+ "version": "19.2.3",
+ "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.2.3.tgz",
+ "integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"dev": true,
"license": "MIT",
"peerDependencies": {
- "@types/react": "^19.0.0"
+ "@types/react": "^19.2.0"
}
},
"node_modules/@types/react-highlight": {
@@ -6328,9 +6223,10 @@
"license": "MIT"
},
"node_modules/@types/trusted-types": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-1.0.6.tgz",
- "integrity": "sha512-230RC8sFeHoT6sSUlRO6a8cAnclO06eeiq1QDfiv2FGCLWFvvERWgwIQD4FWqD9A69BN7Lzee4OXwoMVnnsWDw=="
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
+ "integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
+ "optional": true
},
"node_modules/@types/unist": {
"version": "3.0.3",
@@ -6435,13 +6331,14 @@
}
},
"node_modules/@typescript-eslint/project-service": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.44.1.tgz",
- "integrity": "sha512-ycSa60eGg8GWAkVsKV4E6Nz33h+HjTXbsDT4FILyL8Obk5/mx4tbvCNsLf9zret3ipSumAOG89UcCs/KRaKYrA==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.48.1.tgz",
+ "integrity": "sha512-HQWSicah4s9z2/HifRPQ6b6R7G+SBx64JlFQpgSSHWPKdvCZX57XCbszg/bapbRsOEv42q5tayTYcEFpACcX1w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/tsconfig-utils": "^8.44.1",
- "@typescript-eslint/types": "^8.44.1",
+ "@typescript-eslint/tsconfig-utils": "^8.48.1",
+ "@typescript-eslint/types": "^8.48.1",
"debug": "^4.3.4"
},
"engines": {
@@ -6456,10 +6353,11 @@
}
},
"node_modules/@typescript-eslint/project-service/node_modules/@typescript-eslint/types": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.44.1.tgz",
- "integrity": "sha512-Lk7uj7y9uQUOEguiDIDLYLJOrYHQa7oBiURYVFqIpGxclAFQ78f6VUOM8lI2XEuNOKNB7XuvM2+2cMXAoq4ALQ==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.48.1.tgz",
+ "integrity": "sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -6487,10 +6385,11 @@
}
},
"node_modules/@typescript-eslint/tsconfig-utils": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.44.1.tgz",
- "integrity": "sha512-B5OyACouEjuIvof3o86lRMvyDsFwZm+4fBOqFHccIctYgBjqR3qT39FBYGN87khcgf0ExpdCBeGKpKRhSFTjKQ==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.48.1.tgz",
+ "integrity": "sha512-k0Jhs4CpEffIBm6wPaCXBAD7jxBtrHjrSgtfCjUvPp9AZ78lXKdTR8fxyZO5y4vWNlOvYXRtngSZNSn+H53Jkw==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -6597,15 +6496,16 @@
}
},
"node_modules/@typescript-eslint/utils": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.44.1.tgz",
- "integrity": "sha512-DpX5Fp6edTlocMCwA+mHY8Mra+pPjRZ0TfHkXI8QFelIKcbADQz1LUPNtzOFUriBB2UYqw4Pi9+xV4w9ZczHFg==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.48.1.tgz",
+ "integrity": "sha512-fAnhLrDjiVfey5wwFRwrweyRlCmdz5ZxXz2G/4cLn0YDLjTapmN4gcCsTBR1N2rWnZSDeWpYtgLDsJt+FpmcwA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@eslint-community/eslint-utils": "^4.7.0",
- "@typescript-eslint/scope-manager": "8.44.1",
- "@typescript-eslint/types": "8.44.1",
- "@typescript-eslint/typescript-estree": "8.44.1"
+ "@typescript-eslint/scope-manager": "8.48.1",
+ "@typescript-eslint/types": "8.48.1",
+ "@typescript-eslint/typescript-estree": "8.48.1"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -6620,13 +6520,14 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.44.1.tgz",
- "integrity": "sha512-NdhWHgmynpSvyhchGLXh+w12OMT308Gm25JoRIyTZqEbApiBiQHD/8xgb6LqCWCFcxFtWwaVdFsLPQI3jvhywg==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.48.1.tgz",
+ "integrity": "sha512-rj4vWQsytQbLxC5Bf4XwZ0/CKd362DkWMUkviT7DCS057SK64D5lH74sSGzhI6PDD2HCEq02xAP9cX68dYyg1w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.44.1",
- "@typescript-eslint/visitor-keys": "8.44.1"
+ "@typescript-eslint/types": "8.48.1",
+ "@typescript-eslint/visitor-keys": "8.48.1"
},
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
@@ -6637,10 +6538,11 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.44.1.tgz",
- "integrity": "sha512-Lk7uj7y9uQUOEguiDIDLYLJOrYHQa7oBiURYVFqIpGxclAFQ78f6VUOM8lI2XEuNOKNB7XuvM2+2cMXAoq4ALQ==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.48.1.tgz",
+ "integrity": "sha512-+fZ3LZNeiELGmimrujsDCT4CRIbq5oXdHe7chLiW8qzqyPMnn1puNstCrMNVAqwcl2FdIxkuJ4tOs/RFDBVc/Q==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -6650,20 +6552,20 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.44.1.tgz",
- "integrity": "sha512-qnQJ+mVa7szevdEyvfItbO5Vo+GfZ4/GZWWDRRLjrxYPkhM+6zYB2vRYwCsoJLzqFCdZT4mEqyJoyzkunsZ96A==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.48.1.tgz",
+ "integrity": "sha512-/9wQ4PqaefTK6POVTjJaYS0bynCgzh6ClJHGSBj06XEHjkfylzB+A3qvyaXnErEZSaxhIo4YdyBgq6j4RysxDg==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/project-service": "8.44.1",
- "@typescript-eslint/tsconfig-utils": "8.44.1",
- "@typescript-eslint/types": "8.44.1",
- "@typescript-eslint/visitor-keys": "8.44.1",
+ "@typescript-eslint/project-service": "8.48.1",
+ "@typescript-eslint/tsconfig-utils": "8.48.1",
+ "@typescript-eslint/types": "8.48.1",
+ "@typescript-eslint/visitor-keys": "8.48.1",
"debug": "^4.3.4",
- "fast-glob": "^3.3.2",
- "is-glob": "^4.0.3",
"minimatch": "^9.0.4",
"semver": "^7.6.0",
+ "tinyglobby": "^0.2.15",
"ts-api-utils": "^2.1.0"
},
"engines": {
@@ -6678,12 +6580,13 @@
}
},
"node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": {
- "version": "8.44.1",
- "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.44.1.tgz",
- "integrity": "sha512-576+u0QD+Jp3tZzvfRfxon0EA2lzcDt3lhUbsC6Lgzy9x2VR4E+JUiNyGHi5T8vk0TV+fpJ5GLG1JsJuWCaKhw==",
+ "version": "8.48.1",
+ "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.48.1.tgz",
+ "integrity": "sha512-BmxxndzEWhE4TIEEMBs8lP3MBWN3jFPs/p6gPm/wkv02o41hI6cq9AuSmGAaTTHPtA1FTi2jBre4A9rm5ZmX+Q==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@typescript-eslint/types": "8.44.1",
+ "@typescript-eslint/types": "8.48.1",
"eslint-visitor-keys": "^4.2.1"
},
"engines": {
@@ -6699,6 +6602,7 @@
"resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz",
"integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==",
"dev": true,
+ "license": "Apache-2.0",
"engines": {
"node": "^18.18.0 || ^20.9.0 || >=21.1.0"
},
@@ -6711,6 +6615,7 @@
"resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.1.0.tgz",
"integrity": "sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18.12"
},
@@ -6756,16 +6661,17 @@
"license": "ISC"
},
"node_modules/@vitejs/plugin-react": {
- "version": "5.0.4",
- "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.0.4.tgz",
- "integrity": "sha512-La0KD0vGkVkSk6K+piWDKRUyg8Rl5iAIKRMH0vMJI0Eg47bq1eOxmoObAaQG37WMW9MSyk7Cs8EIWwJC1PtzKA==",
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-5.1.1.tgz",
+ "integrity": "sha512-WQfkSw0QbQ5aJ2CHYw23ZGkqnRwqKHD/KYsMeTkZzPT4Jcf0DcBxBtwMJxnu6E7oxw5+JC6ZAiePgh28uJ1HBA==",
+ "license": "MIT",
"dependencies": {
- "@babel/core": "^7.28.4",
+ "@babel/core": "^7.28.5",
"@babel/plugin-transform-react-jsx-self": "^7.27.1",
"@babel/plugin-transform-react-jsx-source": "^7.27.1",
- "@rolldown/pluginutils": "1.0.0-beta.38",
+ "@rolldown/pluginutils": "1.0.0-beta.47",
"@types/babel__core": "^7.20.5",
- "react-refresh": "^0.17.0"
+ "react-refresh": "^0.18.0"
},
"engines": {
"node": "^20.19.0 || >=22.12.0"
@@ -6775,41 +6681,39 @@
}
},
"node_modules/@vitejs/plugin-react/node_modules/react-refresh": {
- "version": "0.17.0",
- "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz",
- "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==",
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.18.0.tgz",
+ "integrity": "sha512-QgT5//D3jfjJb6Gsjxv0Slpj23ip+HtOpnNgnb2S5zU3CB26G/IDPGoy4RJB42wzFE46DRsstbW6tKHoKbhAxw==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/@vitest/coverage-v8": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-3.2.4.tgz",
- "integrity": "sha512-EyF9SXU6kS5Ku/U82E259WSnvg6c8KTjppUncuNdm5QHpe17mwREHnjDzozC8x9MZ0xfBUFSaLkRv4TMA75ALQ==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-4.0.15.tgz",
+ "integrity": "sha512-FUJ+1RkpTFW7rQITdgTi93qOCWJobWhBirEPCeXh2SW2wsTlFxy51apDz5gzG+ZEYt/THvWeNmhdAoS9DTwpCw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@ampproject/remapping": "^2.3.0",
"@bcoe/v8-coverage": "^1.0.2",
- "ast-v8-to-istanbul": "^0.3.3",
- "debug": "^4.4.1",
+ "@vitest/utils": "4.0.15",
+ "ast-v8-to-istanbul": "^0.3.8",
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-lib-source-maps": "^5.0.6",
- "istanbul-reports": "^3.1.7",
- "magic-string": "^0.30.17",
- "magicast": "^0.3.5",
- "std-env": "^3.9.0",
- "test-exclude": "^7.0.1",
- "tinyrainbow": "^2.0.0"
+ "istanbul-reports": "^3.2.0",
+ "magicast": "^0.5.1",
+ "obug": "^2.1.1",
+ "std-env": "^3.10.0",
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
},
"peerDependencies": {
- "@vitest/browser": "3.2.4",
- "vitest": "3.2.4"
+ "@vitest/browser": "4.0.15",
+ "vitest": "4.0.15"
},
"peerDependenciesMeta": {
"@vitest/browser": {
@@ -6818,39 +6722,40 @@
}
},
"node_modules/@vitest/expect": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.4.tgz",
- "integrity": "sha512-Io0yyORnB6sikFlt8QW5K7slY4OjqNX9jmJQ02QDda8lyM6B5oNgVWoSoKPac8/kgnCUzuHQKrSLtu/uOqqrig==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.0.15.tgz",
+ "integrity": "sha512-Gfyva9/GxPAWXIWjyGDli9O+waHDC0Q0jaLdFP1qPAUUfo1FEXPXUfUkp3eZA0sSq340vPycSyOlYUeM15Ft1w==",
"dev": true,
"license": "MIT",
"dependencies": {
+ "@standard-schema/spec": "^1.0.0",
"@types/chai": "^5.2.2",
- "@vitest/spy": "3.2.4",
- "@vitest/utils": "3.2.4",
- "chai": "^5.2.0",
- "tinyrainbow": "^2.0.0"
+ "@vitest/spy": "4.0.15",
+ "@vitest/utils": "4.0.15",
+ "chai": "^6.2.1",
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/mocker": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.4.tgz",
- "integrity": "sha512-46ryTE9RZO/rfDd7pEqFl7etuyzekzEhUbTW3BvmeO/BcCMEgq59BKhek3dXDWgAj4oMK6OZi+vRr1wPW6qjEQ==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.0.15.tgz",
+ "integrity": "sha512-CZ28GLfOEIFkvCFngN8Sfx5h+Se0zN+h4B7yOsPVCcgtiO7t5jt9xQh2E1UkFep+eb9fjyMfuC5gBypwb07fvQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/spy": "3.2.4",
+ "@vitest/spy": "4.0.15",
"estree-walker": "^3.0.3",
- "magic-string": "^0.30.17"
+ "magic-string": "^0.30.21"
},
"funding": {
"url": "https://opencollective.com/vitest"
},
"peerDependencies": {
"msw": "^2.4.9",
- "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0"
+ "vite": "^6.0.0 || ^7.0.0-0"
},
"peerDependenciesMeta": {
"msw": {
@@ -6862,28 +6767,27 @@
}
},
"node_modules/@vitest/pretty-format": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.4.tgz",
- "integrity": "sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.0.15.tgz",
+ "integrity": "sha512-SWdqR8vEv83WtZcrfLNqlqeQXlQLh2iilO1Wk1gv4eiHKjEzvgHb2OVc3mIPyhZE6F+CtfYjNlDJwP5MN6Km7A==",
"dev": true,
"license": "MIT",
"dependencies": {
- "tinyrainbow": "^2.0.0"
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/runner": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.4.tgz",
- "integrity": "sha512-oukfKT9Mk41LreEW09vt45f8wx7DordoWUZMYdY/cyAk7w5TWkTRCNZYF7sX7n2wB7jyGAl74OxgwhPgKaqDMQ==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.0.15.tgz",
+ "integrity": "sha512-+A+yMY8dGixUhHmNdPUxOh0la6uVzun86vAbuMT3hIDxMrAOmn5ILBHm8ajrqHE0t8R9T1dGnde1A5DTnmi3qw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/utils": "3.2.4",
- "pathe": "^2.0.3",
- "strip-literal": "^3.0.0"
+ "@vitest/utils": "4.0.15",
+ "pathe": "^2.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
@@ -6897,14 +6801,14 @@
"license": "MIT"
},
"node_modules/@vitest/snapshot": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.4.tgz",
- "integrity": "sha512-dEYtS7qQP2CjU27QBC5oUOxLE/v5eLkGqPE0ZKEIDGMs4vKWe7IjgLOeauHsR0D5YuuycGRO5oSRXnwnmA78fQ==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.0.15.tgz",
+ "integrity": "sha512-A7Ob8EdFZJIBjLjeO0DZF4lqR6U7Ydi5/5LIZ0xcI+23lYlsYJAfGn8PrIWTYdZQRNnSRlzhg0zyGu37mVdy5g==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/pretty-format": "3.2.4",
- "magic-string": "^0.30.17",
+ "@vitest/pretty-format": "4.0.15",
+ "magic-string": "^0.30.21",
"pathe": "^2.0.3"
},
"funding": {
@@ -6919,28 +6823,24 @@
"license": "MIT"
},
"node_modules/@vitest/spy": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.4.tgz",
- "integrity": "sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.0.15.tgz",
+ "integrity": "sha512-+EIjOJmnY6mIfdXtE/bnozKEvTC4Uczg19yeZ2vtCz5Yyb0QQ31QWVQ8hswJ3Ysx/K2EqaNsVanjr//2+P3FHw==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "tinyspy": "^4.0.3"
- },
"funding": {
"url": "https://opencollective.com/vitest"
}
},
"node_modules/@vitest/utils": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.4.tgz",
- "integrity": "sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.0.15.tgz",
+ "integrity": "sha512-HXjPW2w5dxhTD0dLwtYHDnelK3j8sR8cWIaLxr22evTyY6q8pRCjZSmhRWVjBaOVXChQd6AwMzi9pucorXCPZA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@vitest/pretty-format": "3.2.4",
- "loupe": "^3.1.4",
- "tinyrainbow": "^2.0.0"
+ "@vitest/pretty-format": "4.0.15",
+ "tinyrainbow": "^3.0.3"
},
"funding": {
"url": "https://opencollective.com/vitest"
@@ -7032,10 +6932,11 @@
}
},
"node_modules/ansi-escapes": {
- "version": "7.1.1",
- "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.1.1.tgz",
- "integrity": "sha512-Zhl0ErHcSRUaVfGUeUdDuLgpkEo8KIFjB4Y9uAc46ScOpdDiU1Dbyplh7qWJeJ/ZHpbyMSM26+X3BySgnIz40Q==",
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-7.2.0.tgz",
+ "integrity": "sha512-g6LhBsl+GBPRWGWsBtutpzBYuIIdBkLEvad5C/va/74Db018+5TZiyA26cZJAr3Rft5lprVqOIPxf5Vid6tqAw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"environment": "^1.0.0"
},
@@ -7289,13 +7190,13 @@
"license": "MIT"
},
"node_modules/ast-v8-to-istanbul": {
- "version": "0.3.5",
- "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.5.tgz",
- "integrity": "sha512-9SdXjNheSiE8bALAQCQQuT6fgQaoxJh7IRYrRGZ8/9nv8WhJeC1aXAwN8TbaOssGOukUvyvnkgD9+Yuykvl1aA==",
+ "version": "0.3.8",
+ "resolved": "https://registry.npmjs.org/ast-v8-to-istanbul/-/ast-v8-to-istanbul-0.3.8.tgz",
+ "integrity": "sha512-szgSZqUxI5T8mLKvS7WTjF9is+MVbOeLADU73IseOcrqhxr/VAvy6wfoVE39KnKzA7JRhjF5eUagNlHwvZPlKQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@jridgewell/trace-mapping": "^0.3.30",
+ "@jridgewell/trace-mapping": "^0.3.31",
"estree-walker": "^3.0.3",
"js-tokens": "^9.0.1"
}
@@ -7324,9 +7225,9 @@
"license": "MIT"
},
"node_modules/autoprefixer": {
- "version": "10.4.21",
- "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.21.tgz",
- "integrity": "sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==",
+ "version": "10.4.22",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.22.tgz",
+ "integrity": "sha512-ARe0v/t9gO28Bznv6GgqARmVqcWOV3mfgUPn9becPHMiD3o9BwlRgaeccZnwTpZ7Zwqrm+c1sUSsMxIzQzc8Xg==",
"dev": true,
"funding": [
{
@@ -7344,9 +7245,9 @@
],
"license": "MIT",
"dependencies": {
- "browserslist": "^4.24.4",
- "caniuse-lite": "^1.0.30001702",
- "fraction.js": "^4.3.7",
+ "browserslist": "^4.27.0",
+ "caniuse-lite": "^1.0.30001754",
+ "fraction.js": "^5.3.4",
"normalize-range": "^0.1.2",
"picocolors": "^1.1.1",
"postcss-value-parser": "^4.2.0"
@@ -7378,9 +7279,9 @@
}
},
"node_modules/axe-core": {
- "version": "4.10.3",
- "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.10.3.tgz",
- "integrity": "sha512-Xm7bpRXnDSX2YE2YFfBk2FnF0ep6tmG7xPh8iHee8MIcrgq762Nkce856dYtJYLkuIoYZvGfTs/PbZhideTcEg==",
+ "version": "4.11.0",
+ "resolved": "https://registry.npmjs.org/axe-core/-/axe-core-4.11.0.tgz",
+ "integrity": "sha512-ilYanEU8vxxBexpJd8cWM4ElSQq4QctCLKih0TSfjIfCQTeyH/6zVrmIJfLPrKTKJRbiG+cfnZbQIjAlJmF1jQ==",
"dev": true,
"license": "MPL-2.0",
"engines": {
@@ -7388,9 +7289,10 @@
}
},
"node_modules/axios": {
- "version": "1.12.2",
- "resolved": "https://registry.npmjs.org/axios/-/axios-1.12.2.tgz",
- "integrity": "sha512-vMJzPewAlRyOgxV2dU0Cuz2O8zzzx9VYtbJOaBgXFeLc4IV/Eg50n4LowmehOOR61S8ZMpc2K5Sa7g6A4jfkUw==",
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz",
+ "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==",
+ "license": "MIT",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.4",
@@ -7443,6 +7345,15 @@
"integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==",
"license": "MIT"
},
+ "node_modules/baseline-browser-mapping": {
+ "version": "2.9.0",
+ "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.0.tgz",
+ "integrity": "sha512-Mh++g+2LPfzZToywfE1BUzvZbfOY52Nil0rn9H1CPC5DJ7fX+Vir7nToBeoiSbB1zTNeGYbELEvJESujgGrzXw==",
+ "license": "Apache-2.0",
+ "bin": {
+ "baseline-browser-mapping": "dist/cli.js"
+ }
+ },
"node_modules/basic-auth": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
@@ -7466,27 +7377,28 @@
"resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz",
"integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"require-from-string": "^2.0.2"
}
},
"node_modules/body-parser": {
- "version": "1.20.3",
- "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
- "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
+ "version": "1.20.4",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz",
+ "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==",
"dependencies": {
- "bytes": "3.1.2",
+ "bytes": "~3.1.2",
"content-type": "~1.0.5",
"debug": "2.6.9",
"depd": "2.0.0",
- "destroy": "1.2.0",
- "http-errors": "2.0.0",
- "iconv-lite": "0.4.24",
- "on-finished": "2.4.1",
- "qs": "6.13.0",
- "raw-body": "2.5.2",
+ "destroy": "~1.2.0",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.4.24",
+ "on-finished": "~2.4.1",
+ "qs": "~6.14.0",
+ "raw-body": "~2.5.3",
"type-is": "~1.6.18",
- "unpipe": "1.0.0"
+ "unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8",
@@ -7530,9 +7442,9 @@
}
},
"node_modules/browserslist": {
- "version": "4.25.4",
- "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.25.4.tgz",
- "integrity": "sha512-4jYpcjabC606xJ3kw2QwGEZKX0Aw7sgQdZCvIK9dhVSPh76BKo+C+btT1RRofH7B+8iNpEbgGNVWiLki5q93yg==",
+ "version": "4.28.1",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz",
+ "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==",
"funding": [
{
"type": "opencollective",
@@ -7549,10 +7461,11 @@
],
"license": "MIT",
"dependencies": {
- "caniuse-lite": "^1.0.30001737",
- "electron-to-chromium": "^1.5.211",
- "node-releases": "^2.0.19",
- "update-browserslist-db": "^1.1.3"
+ "baseline-browser-mapping": "^2.9.0",
+ "caniuse-lite": "^1.0.30001759",
+ "electron-to-chromium": "^1.5.263",
+ "node-releases": "^2.0.27",
+ "update-browserslist-db": "^1.2.0"
},
"bin": {
"browserslist": "cli.js"
@@ -7658,9 +7571,9 @@
}
},
"node_modules/caniuse-lite": {
- "version": "1.0.30001741",
- "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001741.tgz",
- "integrity": "sha512-QGUGitqsc8ARjLdgAfxETDhRbJ0REsP6O3I96TAth/mVjh2cYzN2u+3AzPP3aVSm2FehEItaJw1xd+IGBXWeSw==",
+ "version": "1.0.30001759",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001759.tgz",
+ "integrity": "sha512-Pzfx9fOKoKvevQf8oCXoyNRQ5QyxJj+3O0Rqx2V5oxT61KGx8+n6hV/IUyJeifUci2clnmmKVpvtiqRzgiWjSw==",
"funding": [
{
"type": "opencollective",
@@ -7688,18 +7601,11 @@
}
},
"node_modules/chai": {
- "version": "5.3.3",
- "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz",
- "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==",
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.1.tgz",
+ "integrity": "sha512-p4Z49OGG5W/WBCPSS/dH3jQ73kD6tiMmUM+bckNK6Jr5JHMG3k9bg/BvKR8lKmtVBKmOiuVaV2ws8s9oSbwysg==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "assertion-error": "^2.0.1",
- "check-error": "^2.1.1",
- "deep-eql": "^5.0.1",
- "loupe": "^3.1.0",
- "pathval": "^2.0.0"
- },
"engines": {
"node": ">=18"
}
@@ -7761,16 +7667,6 @@
"url": "https://github.com/sponsors/wooorm"
}
},
- "node_modules/check-error": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz",
- "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 16"
- }
- },
"node_modules/chokidar": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz",
@@ -7787,15 +7683,6 @@
"url": "https://paulmillr.com/funding/"
}
},
- "node_modules/chownr": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/chownr/-/chownr-3.0.0.tgz",
- "integrity": "sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==",
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": ">=18"
- }
- },
"node_modules/class-variance-authority": {
"version": "0.7.1",
"resolved": "https://registry.npmjs.org/class-variance-authority/-/class-variance-authority-0.7.1.tgz",
@@ -7813,6 +7700,7 @@
"resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-5.0.0.tgz",
"integrity": "sha512-aCj4O5wKyszjMmDT4tZj93kxyydN/K5zPWSCe6/0AV/AA1pqe5ZBIw0a2ZfPQV7lL5/yb5HsUreJ6UFAF1tEQw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"restore-cursor": "^5.0.0"
},
@@ -7824,10 +7712,11 @@
}
},
"node_modules/cli-truncate": {
- "version": "5.1.0",
- "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-5.1.0.tgz",
- "integrity": "sha512-7JDGG+4Zp0CsknDCedl0DYdaeOhc46QNpXi3NLQblkZpXXgA6LncLDUUyvrjSvZeF3VRQa+KiMGomazQrC1V8g==",
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-5.1.1.tgz",
+ "integrity": "sha512-SroPvNHxUnk+vIW/dOSfNqdy1sPEFkrTk6TUtqLCnBlo3N7TNYYkzzN7uSD6+jVjrdO4+p8nH7JzH6cIvUem6A==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"slice-ansi": "^7.1.0",
"string-width": "^8.0.0"
@@ -7839,49 +7728,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/cli-truncate/node_modules/ansi-regex": {
- "version": "6.2.2",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
- "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
- "dev": true,
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
- }
- },
- "node_modules/cli-truncate/node_modules/string-width": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.1.0.tgz",
- "integrity": "sha512-Kxl3KJGb/gxkaUMOjRsQ8IrXiGW75O4E3RPjFIINOVH8AMl2SQ/yWdTzWwF3FevIX9LcMAjJW+GRwAlAbTSXdg==",
- "dev": true,
- "dependencies": {
- "get-east-asian-width": "^1.3.0",
- "strip-ansi": "^7.1.0"
- },
- "engines": {
- "node": ">=20"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/cli-truncate/node_modules/strip-ansi": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
- "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
- "dev": true,
- "dependencies": {
- "ansi-regex": "^6.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/strip-ansi?sponsor=1"
- }
- },
"node_modules/cli-width": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz",
@@ -8017,7 +7863,8 @@
"version": "2.0.20",
"resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
"integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/combined-stream": {
"version": "1.0.8",
@@ -8042,10 +7889,11 @@
}
},
"node_modules/commander": {
- "version": "14.0.1",
- "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.1.tgz",
- "integrity": "sha512-2JkV3gUZUVrbNA+1sjBOYLsMZ5cEEl8GTFP2a4AVz5hvasAMCQ1D2l2le/cX+pV4N6ZU17zjUahLpIXRrnWL8A==",
+ "version": "14.0.2",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-14.0.2.tgz",
+ "integrity": "sha512-TywoWNNRbhoD0BXs1P3ZEScW8W5iKrnbithIl0YH+uCmBd0QpPOA8yc82DS3BIE5Ma6FnBVUsJ7wVUDz4dvOWQ==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=20"
}
@@ -8108,6 +7956,13 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/confbox": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/confbox/-/confbox-0.2.2.tgz",
+ "integrity": "sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/confusing-browser-globals": {
"version": "1.0.11",
"resolved": "https://registry.npmjs.org/confusing-browser-globals/-/confusing-browser-globals-1.0.11.tgz",
@@ -8150,22 +8005,22 @@
"license": "MIT"
},
"node_modules/cookie": {
- "version": "0.7.1",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz",
- "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==",
+ "version": "0.7.2",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz",
+ "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==",
"engines": {
"node": ">= 0.6"
}
},
"node_modules/cookie-signature": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
- "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ=="
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz",
+ "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA=="
},
"node_modules/core-js": {
- "version": "3.45.1",
- "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.45.1.tgz",
- "integrity": "sha512-L4NPsJlCfZsPeXukyzHFlg/i7IIVwHSItR0wg0FLNqYClJ4MQYTYLbC7EkjKYRLZF2iof2MUgN0EGy7MdQFChg==",
+ "version": "3.47.0",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.47.0.tgz",
+ "integrity": "sha512-c3Q2VVkGAUyupsjRnaNX6u8Dq2vAdzm9iuPj5FW0fRxzlxgq9Q39MDq10IvmQSpLgHQNyQzQmOo6bgGHmH3NNg==",
"hasInstallScript": true,
"license": "MIT",
"funding": {
@@ -8173,10 +8028,37 @@
"url": "https://opencollective.com/core-js"
}
},
+ "node_modules/cosmiconfig": {
+ "version": "8.3.6",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.3.6.tgz",
+ "integrity": "sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "import-fresh": "^3.3.0",
+ "js-yaml": "^4.1.0",
+ "parse-json": "^5.2.0",
+ "path-type": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/d-fischer"
+ },
+ "peerDependencies": {
+ "typescript": ">=4.9.5"
+ },
+ "peerDependenciesMeta": {
+ "typescript": {
+ "optional": true
+ }
+ }
+ },
"node_modules/cross-env": {
- "version": "10.0.0",
- "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.0.0.tgz",
- "integrity": "sha512-aU8qlEK/nHYtVuN4p7UQgAwVljzMg8hB4YK5ThRqD2l/ziSnryncPNn7bMLt5cFYsKVKBh8HqLqyCoTupEUu7Q==",
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/cross-env/-/cross-env-10.1.0.tgz",
+ "integrity": "sha512-GsYosgnACZTADcmEyJctkJIoqAhHjttw7RsFrVoJNXbsWWqaq6Ym+7kZjq6mS45O0jij6vtiReppKQEtqWy6Dw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -8214,26 +8096,12 @@
"node": ">= 8"
}
},
- "node_modules/cross-spawn/node_modules/which": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
- "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
- "license": "ISC",
- "dependencies": {
- "isexe": "^2.0.0"
- },
- "bin": {
- "node-which": "bin/node-which"
- },
- "engines": {
- "node": ">= 8"
- }
- },
"node_modules/css-tree": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.1.0.tgz",
"integrity": "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"mdn-data": "2.12.2",
"source-map-js": "^1.0.1"
@@ -8263,10 +8131,11 @@
}
},
"node_modules/cssstyle": {
- "version": "5.3.1",
- "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.1.tgz",
- "integrity": "sha512-g5PC9Aiph9eiczFpcgUhd9S4UUO3F+LHGRIi5NUMZ+4xtoIYbHNZwZnWA2JsFGe8OU8nl4WyaEFiZuGuxlutJQ==",
+ "version": "5.3.3",
+ "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-5.3.3.tgz",
+ "integrity": "sha512-OytmFH+13/QXONJcC75QNdMtKpceNk3u8ThBjyyYjkEcy/ekBwR1mMAuNvi3gdBPW3N5TlCzQ0WZw8H0lN/bDw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"@asamuzakjp/css-color": "^4.0.3",
"@csstools/css-syntax-patches-for-csstree": "^1.0.14",
@@ -8277,9 +8146,9 @@
}
},
"node_modules/csstype": {
- "version": "3.1.3",
- "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz",
- "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==",
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz",
+ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==",
"license": "MIT"
},
"node_modules/damerau-levenshtein": {
@@ -8294,6 +8163,7 @@
"resolved": "https://registry.npmjs.org/data-urls/-/data-urls-6.0.0.tgz",
"integrity": "sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"whatwg-mimetype": "^4.0.0",
"whatwg-url": "^15.0.0"
@@ -8367,9 +8237,9 @@
}
},
"node_modules/debug": {
- "version": "4.4.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
- "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
+ "version": "4.4.3",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz",
+ "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==",
"license": "MIT",
"dependencies": {
"ms": "^2.1.3"
@@ -8417,16 +8287,6 @@
}
}
},
- "node_modules/deep-eql": {
- "version": "5.0.2",
- "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz",
- "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=6"
- }
- },
"node_modules/deep-is": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
@@ -8516,9 +8376,9 @@
}
},
"node_modules/detect-libc": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.4.tgz",
- "integrity": "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==",
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz",
+ "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==",
"license": "Apache-2.0",
"engines": {
"node": ">=8"
@@ -8570,6 +8430,14 @@
"dev": true,
"license": "MIT"
},
+ "node_modules/dompurify": {
+ "version": "3.2.7",
+ "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.2.7.tgz",
+ "integrity": "sha512-WhL/YuveyGXJaerVlMYGWhvQswa7myDG17P7Vu65EWC05o8vfeNbvNf4d/BOvH99+ZW+LlQsc1GDKMa1vNK6dw==",
+ "optionalDependencies": {
+ "@types/trusted-types": "^2.0.7"
+ }
+ },
"node_modules/dot-case": {
"version": "3.0.4",
"resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz",
@@ -8582,9 +8450,9 @@
}
},
"node_modules/downshift": {
- "version": "9.0.10",
- "resolved": "https://registry.npmjs.org/downshift/-/downshift-9.0.10.tgz",
- "integrity": "sha512-TP/iqV6bBok6eGD5tZ8boM8Xt7/+DZvnVNr8cNIhbAm2oUBd79Tudiccs2hbcV9p7xAgS/ozE7Hxy3a9QqS6Mw==",
+ "version": "9.0.12",
+ "resolved": "https://registry.npmjs.org/downshift/-/downshift-9.0.12.tgz",
+ "integrity": "sha512-kFq2pNHm3kmhFfW55RW7+lXliEHg98sKImodICvJfbtvRB6OUiLr138Z8MW5/8t5JaeGZ4Wtomi3Ds72EKVH2Q==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.24.5",
@@ -8611,13 +8479,6 @@
"node": ">= 0.4"
}
},
- "node_modules/eastasianwidth": {
- "version": "0.2.0",
- "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
- "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/ee-first": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
@@ -8625,9 +8486,9 @@
"license": "MIT"
},
"node_modules/electron-to-chromium": {
- "version": "1.5.214",
- "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.214.tgz",
- "integrity": "sha512-TpvUNdha+X3ybfU78NoQatKvQEm1oq3lf2QbnmCEdw+Bd9RuIAY+hJTvq1avzHM0f7EJfnH3vbCnbzKzisc/9Q==",
+ "version": "1.5.263",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.263.tgz",
+ "integrity": "sha512-DrqJ11Knd+lo+dv+lltvfMDLU27g14LMdH2b0O3Pio4uk0x+z7OR+JrmyacTPN2M8w3BrZ7/RTwG3R9B7irPlg==",
"license": "ISC"
},
"node_modules/emoji-regex": {
@@ -8736,6 +8597,7 @@
"resolved": "https://registry.npmjs.org/environment/-/environment-1.1.0.tgz",
"integrity": "sha512-xUtoPkMggbz0MPyPiIWr1Kp4aeWJjDZ6SMvURhimjdZgsRuDplF5/s9hcgGhyXMhs+6vpnuoiZ2kFiu3FMnS8Q==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18"
},
@@ -8743,23 +8605,23 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/err-code": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/err-code/-/err-code-2.0.3.tgz",
- "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/error-ex": {
- "version": "1.3.2",
- "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
- "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "version": "1.3.4",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz",
+ "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"is-arrayish": "^0.2.1"
}
},
+ "node_modules/error-ex/node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/es-abstract": {
"version": "1.24.0",
"resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.24.0.tgz",
@@ -8941,9 +8803,9 @@
}
},
"node_modules/esbuild": {
- "version": "0.25.9",
- "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.9.tgz",
- "integrity": "sha512-CRbODhYyQx3qp7ZEwzxOk4JBqmD/seJrzPa/cGjY1VtIn5E09Oi9/dB4JwctnfZ8Q8iT7rioVv5k/FNT/uf54g==",
+ "version": "0.25.12",
+ "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz",
+ "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==",
"hasInstallScript": true,
"license": "MIT",
"bin": {
@@ -8953,32 +8815,32 @@
"node": ">=18"
},
"optionalDependencies": {
- "@esbuild/aix-ppc64": "0.25.9",
- "@esbuild/android-arm": "0.25.9",
- "@esbuild/android-arm64": "0.25.9",
- "@esbuild/android-x64": "0.25.9",
- "@esbuild/darwin-arm64": "0.25.9",
- "@esbuild/darwin-x64": "0.25.9",
- "@esbuild/freebsd-arm64": "0.25.9",
- "@esbuild/freebsd-x64": "0.25.9",
- "@esbuild/linux-arm": "0.25.9",
- "@esbuild/linux-arm64": "0.25.9",
- "@esbuild/linux-ia32": "0.25.9",
- "@esbuild/linux-loong64": "0.25.9",
- "@esbuild/linux-mips64el": "0.25.9",
- "@esbuild/linux-ppc64": "0.25.9",
- "@esbuild/linux-riscv64": "0.25.9",
- "@esbuild/linux-s390x": "0.25.9",
- "@esbuild/linux-x64": "0.25.9",
- "@esbuild/netbsd-arm64": "0.25.9",
- "@esbuild/netbsd-x64": "0.25.9",
- "@esbuild/openbsd-arm64": "0.25.9",
- "@esbuild/openbsd-x64": "0.25.9",
- "@esbuild/openharmony-arm64": "0.25.9",
- "@esbuild/sunos-x64": "0.25.9",
- "@esbuild/win32-arm64": "0.25.9",
- "@esbuild/win32-ia32": "0.25.9",
- "@esbuild/win32-x64": "0.25.9"
+ "@esbuild/aix-ppc64": "0.25.12",
+ "@esbuild/android-arm": "0.25.12",
+ "@esbuild/android-arm64": "0.25.12",
+ "@esbuild/android-x64": "0.25.12",
+ "@esbuild/darwin-arm64": "0.25.12",
+ "@esbuild/darwin-x64": "0.25.12",
+ "@esbuild/freebsd-arm64": "0.25.12",
+ "@esbuild/freebsd-x64": "0.25.12",
+ "@esbuild/linux-arm": "0.25.12",
+ "@esbuild/linux-arm64": "0.25.12",
+ "@esbuild/linux-ia32": "0.25.12",
+ "@esbuild/linux-loong64": "0.25.12",
+ "@esbuild/linux-mips64el": "0.25.12",
+ "@esbuild/linux-ppc64": "0.25.12",
+ "@esbuild/linux-riscv64": "0.25.12",
+ "@esbuild/linux-s390x": "0.25.12",
+ "@esbuild/linux-x64": "0.25.12",
+ "@esbuild/netbsd-arm64": "0.25.12",
+ "@esbuild/netbsd-x64": "0.25.12",
+ "@esbuild/openbsd-arm64": "0.25.12",
+ "@esbuild/openbsd-x64": "0.25.12",
+ "@esbuild/openharmony-arm64": "0.25.12",
+ "@esbuild/sunos-x64": "0.25.12",
+ "@esbuild/win32-arm64": "0.25.12",
+ "@esbuild/win32-ia32": "0.25.12",
+ "@esbuild/win32-x64": "0.25.12"
}
},
"node_modules/escalade": {
@@ -9510,9 +9372,9 @@
}
},
"node_modules/eslint-plugin-unused-imports": {
- "version": "4.2.0",
- "resolved": "https://registry.npmjs.org/eslint-plugin-unused-imports/-/eslint-plugin-unused-imports-4.2.0.tgz",
- "integrity": "sha512-hLbJ2/wnjKq4kGA9AUaExVFIbNzyxYdVo49QZmKCnhk5pc9wcYRbfgLHvWJ8tnsdcseGhoUAddm9gn/lt+d74w==",
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/eslint-plugin-unused-imports/-/eslint-plugin-unused-imports-4.3.0.tgz",
+ "integrity": "sha512-ZFBmXMGBYfHttdRtOG9nFFpmUvMtbHSjsKrS20vdWdbfiVYsO3yA2SGYy9i9XmZJDfMGBflZGBCm70SEnFQtOA==",
"dev": true,
"license": "MIT",
"peerDependencies": {
@@ -9675,7 +9537,8 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz",
"integrity": "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/exit-hook": {
"version": "2.2.1",
@@ -9701,38 +9564,38 @@
}
},
"node_modules/express": {
- "version": "4.21.2",
- "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz",
- "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==",
+ "version": "4.22.1",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz",
+ "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==",
"dependencies": {
"accepts": "~1.3.8",
"array-flatten": "1.1.1",
- "body-parser": "1.20.3",
- "content-disposition": "0.5.4",
+ "body-parser": "~1.20.3",
+ "content-disposition": "~0.5.4",
"content-type": "~1.0.4",
- "cookie": "0.7.1",
- "cookie-signature": "1.0.6",
+ "cookie": "~0.7.1",
+ "cookie-signature": "~1.0.6",
"debug": "2.6.9",
"depd": "2.0.0",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
- "finalhandler": "1.3.1",
- "fresh": "0.5.2",
- "http-errors": "2.0.0",
+ "finalhandler": "~1.3.1",
+ "fresh": "~0.5.2",
+ "http-errors": "~2.0.0",
"merge-descriptors": "1.0.3",
"methods": "~1.1.2",
- "on-finished": "2.4.1",
+ "on-finished": "~2.4.1",
"parseurl": "~1.3.3",
- "path-to-regexp": "0.1.12",
+ "path-to-regexp": "~0.1.12",
"proxy-addr": "~2.0.7",
- "qs": "6.13.0",
+ "qs": "~6.14.0",
"range-parser": "~1.2.1",
"safe-buffer": "5.2.1",
- "send": "0.19.0",
- "serve-static": "1.16.2",
+ "send": "~0.19.0",
+ "serve-static": "~1.16.2",
"setprototypeof": "1.2.0",
- "statuses": "2.0.1",
+ "statuses": "~2.0.1",
"type-is": "~1.6.18",
"utils-merge": "1.0.1",
"vary": "~1.1.2"
@@ -9758,6 +9621,13 @@
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
+ "node_modules/exsolve": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/exsolve/-/exsolve-1.0.8.tgz",
+ "integrity": "sha512-LmDxfWXwcTArk8fUEnOfSZpHOJ6zOMUJKOtFLFqJLoKJetuQG874Uc7/Kki7zFLzYybmZhp1M7+98pfMqeX8yA==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/extend": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
@@ -9878,16 +9748,16 @@
}
},
"node_modules/finalhandler": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz",
- "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==",
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz",
+ "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==",
"dependencies": {
"debug": "2.6.9",
"encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
- "on-finished": "2.4.1",
+ "on-finished": "~2.4.1",
"parseurl": "~1.3.3",
- "statuses": "2.0.1",
+ "statuses": "~2.0.2",
"unpipe": "~1.0.0"
},
"engines": {
@@ -9991,27 +9861,10 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/foreground-child": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz",
- "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "cross-spawn": "^7.0.6",
- "signal-exit": "^4.0.1"
- },
- "engines": {
- "node": ">=14"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/form-data": {
- "version": "4.0.4",
- "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
- "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
+ "version": "4.0.5",
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz",
+ "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==",
"license": "MIT",
"dependencies": {
"asynckit": "^0.4.0",
@@ -10041,25 +9894,26 @@
}
},
"node_modules/fraction.js": {
- "version": "4.3.7",
- "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
- "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==",
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz",
+ "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": "*"
},
"funding": {
- "type": "patreon",
+ "type": "github",
"url": "https://github.com/sponsors/rawify"
}
},
"node_modules/framer-motion": {
- "version": "12.23.22",
- "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.23.22.tgz",
- "integrity": "sha512-ZgGvdxXCw55ZYvhoZChTlG6pUuehecgvEAJz0BHoC5pQKW1EC5xf1Mul1ej5+ai+pVY0pylyFfdl45qnM1/GsA==",
+ "version": "12.23.25",
+ "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-12.23.25.tgz",
+ "integrity": "sha512-gUHGl2e4VG66jOcH0JHhuJQr6ZNwrET9g31ZG0xdXzT0CznP7fHX4P8Bcvuc4MiUB90ysNnWX2ukHRIggkl6hQ==",
+ "license": "MIT",
"dependencies": {
- "motion-dom": "^12.23.21",
+ "motion-dom": "^12.23.23",
"motion-utils": "^12.23.6",
"tslib": "^2.4.0"
},
@@ -10149,6 +10003,16 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/generator-function": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/generator-function/-/generator-function-2.0.1.tgz",
+ "integrity": "sha512-SFdFmIJi+ybC0vjlHN0ZGVGHc3lgE0DxPAT0djjVg+kjOnSqclqmj0KQ7ykTOLP6YxoqOvuAODGdcHJn+43q3g==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
"node_modules/gensync": {
"version": "1.0.0-beta.2",
"resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
@@ -10173,6 +10037,7 @@
"resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.4.0.tgz",
"integrity": "sha512-QZjmEOC+IT1uk6Rx0sX22V6uHWVwbdbxf1faPqJ1QhLdGgsRGCZoyaQBm/piRdJy/D2um6hM1UP7ZEeQ4EkP+Q==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18"
},
@@ -10248,21 +10113,22 @@
}
},
"node_modules/glob": {
- "version": "10.4.5",
- "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz",
- "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==",
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "deprecated": "Glob versions prior to v9 are no longer supported",
"dev": true,
"license": "ISC",
"dependencies": {
- "foreground-child": "^3.1.0",
- "jackspeak": "^3.1.2",
- "minimatch": "^9.0.4",
- "minipass": "^7.1.2",
- "package-json-from-dist": "^1.0.0",
- "path-scurry": "^1.11.1"
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
},
- "bin": {
- "glob": "dist/esm/bin.mjs"
+ "engines": {
+ "node": "*"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
@@ -10281,6 +10147,30 @@
"node": ">=10.13.0"
}
},
+ "node_modules/glob/node_modules/brace-expansion": {
+ "version": "1.1.12",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
+ "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/glob/node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dev": true,
+ "license": "ISC",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
"node_modules/globals": {
"version": "13.24.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz",
@@ -10343,9 +10233,9 @@
"license": "MIT"
},
"node_modules/goober": {
- "version": "2.1.16",
- "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.16.tgz",
- "integrity": "sha512-erjk19y1U33+XAMe1VTvIONHYoSqE4iS7BYUZfHaqeohLmnC0FdxEh7rQU+6MZ4OajItzjZFSRtVANrQwNq6/g==",
+ "version": "2.1.18",
+ "resolved": "https://registry.npmjs.org/goober/-/goober-2.1.18.tgz",
+ "integrity": "sha512-2vFqsaDVIT9Gz7N6kAL++pLpp41l3PfDuusHcjnGLfR6+huZkl6ziX+zgVC3ZxpqWhzH6pyDdGrCeDhMIvwaxw==",
"license": "MIT",
"peerDependencies": {
"csstype": "^3.0.10"
@@ -10377,9 +10267,9 @@
"license": "MIT"
},
"node_modules/graphql": {
- "version": "16.11.0",
- "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.11.0.tgz",
- "integrity": "sha512-mS1lbMsxgQj6hge1XZ6p7GPhbrtFwUFYi3wRzXAC/FmYnyXMTvvI3td3rjmQ2u8ewXueaSvRPWaEcgVVOT9Jnw==",
+ "version": "16.12.0",
+ "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.12.0.tgz",
+ "integrity": "sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==",
"dev": true,
"license": "MIT",
"engines": {
@@ -10478,10 +10368,13 @@
}
},
"node_modules/hast-util-parse-selector": {
- "version": "2.2.5",
- "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz",
- "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==",
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz",
+ "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==",
"license": "MIT",
+ "dependencies": {
+ "@types/hast": "^3.0.0"
+ },
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
@@ -10528,70 +10421,22 @@
}
},
"node_modules/hastscript": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz",
- "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==",
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz",
+ "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==",
"license": "MIT",
"dependencies": {
- "@types/hast": "^2.0.0",
- "comma-separated-tokens": "^1.0.0",
- "hast-util-parse-selector": "^2.0.0",
- "property-information": "^5.0.0",
- "space-separated-tokens": "^1.0.0"
+ "@types/hast": "^3.0.0",
+ "comma-separated-tokens": "^2.0.0",
+ "hast-util-parse-selector": "^4.0.0",
+ "property-information": "^7.0.0",
+ "space-separated-tokens": "^2.0.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/unified"
}
},
- "node_modules/hastscript/node_modules/@types/hast": {
- "version": "2.3.10",
- "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz",
- "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==",
- "license": "MIT",
- "dependencies": {
- "@types/unist": "^2"
- }
- },
- "node_modules/hastscript/node_modules/@types/unist": {
- "version": "2.0.11",
- "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
- "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
- "license": "MIT"
- },
- "node_modules/hastscript/node_modules/comma-separated-tokens": {
- "version": "1.0.8",
- "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz",
- "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/hastscript/node_modules/property-information": {
- "version": "5.6.0",
- "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz",
- "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==",
- "license": "MIT",
- "dependencies": {
- "xtend": "^4.0.0"
- },
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/hastscript/node_modules/space-separated-tokens": {
- "version": "1.1.5",
- "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz",
- "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
"node_modules/headers-polyfill": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz",
@@ -10614,29 +10459,6 @@
"integrity": "sha512-PDEfEF102G23vHmPhLyPboFCD+BkMGu+GuJe2d9/eH4FsCwvgBpnc9n0pGE+ffKdph38s6foEZiEjdgHdzp+IA==",
"license": "CC0-1.0"
},
- "node_modules/hosted-git-info": {
- "version": "6.1.3",
- "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-6.1.3.tgz",
- "integrity": "sha512-HVJyzUrLIL1c0QmviVh5E8VGyUS7xCFPS6yydaVd1UegW+ibV/CohqTH9MkOLDp5o+rb82DMo77PTuc9F/8GKw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "lru-cache": "^7.5.1"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/hosted-git-info/node_modules/lru-cache": {
- "version": "7.18.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
- "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": ">=12"
- }
- },
"node_modules/html-encoding-sniffer": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz",
@@ -10677,18 +10499,22 @@
}
},
"node_modules/http-errors": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
- "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz",
+ "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==",
"dependencies": {
- "depd": "2.0.0",
- "inherits": "2.0.4",
- "setprototypeof": "1.2.0",
- "statuses": "2.0.1",
- "toidentifier": "1.0.1"
+ "depd": "~2.0.0",
+ "inherits": "~2.0.4",
+ "setprototypeof": "~1.2.0",
+ "statuses": "~2.0.2",
+ "toidentifier": "~1.0.1"
},
"engines": {
"node": ">= 0.8"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
}
},
"node_modules/http-proxy-agent": {
@@ -10736,9 +10562,9 @@
}
},
"node_modules/i18next": {
- "version": "25.5.2",
- "resolved": "https://registry.npmjs.org/i18next/-/i18next-25.5.2.tgz",
- "integrity": "sha512-lW8Zeh37i/o0zVr+NoCHfNnfvVw+M6FQbRp36ZZ/NyHDJ3NJVpp2HhAUyU9WafL5AssymNoOjMRB48mmx2P6Hw==",
+ "version": "25.7.1",
+ "resolved": "https://registry.npmjs.org/i18next/-/i18next-25.7.1.tgz",
+ "integrity": "sha512-XbTnkh1yCZWSAZGnA9xcQfHcYNgZs2cNxm+c6v1Ma9UAUGCeJPplRe1ILia6xnDvXBjk0uXU+Z8FYWhA19SKFw==",
"funding": [
{
"type": "individual",
@@ -10755,7 +10581,7 @@
],
"license": "MIT",
"dependencies": {
- "@babel/runtime": "^7.27.6"
+ "@babel/runtime": "^7.28.4"
},
"peerDependencies": {
"typescript": "^5"
@@ -10861,15 +10687,16 @@
"license": "ISC"
},
"node_modules/inline-style-parser": {
- "version": "0.2.4",
- "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.4.tgz",
- "integrity": "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==",
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz",
+ "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==",
"license": "MIT"
},
"node_modules/input-otp": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/input-otp/-/input-otp-1.4.1.tgz",
"integrity": "sha512-+yvpmKYKHi9jIGngxagY9oWiiblPB7+nEO75F2l2o4vs+6vpPZZmUl4tBNYuTCvQjhvEIbdNeJu70bhfYP2nbw==",
+ "license": "MIT",
"peerDependencies": {
"react": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc",
"react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0.0 || ^19.0.0-rc"
@@ -10891,14 +10718,14 @@
}
},
"node_modules/intl-messageformat": {
- "version": "10.7.16",
- "resolved": "https://registry.npmjs.org/intl-messageformat/-/intl-messageformat-10.7.16.tgz",
- "integrity": "sha512-UmdmHUmp5CIKKjSoE10la5yfU+AYJAaiYLsodbjL4lji83JNvgOQUjGaGhGrpFCb0Uh7sl7qfP1IyILa8Z40ug==",
+ "version": "10.7.18",
+ "resolved": "https://registry.npmjs.org/intl-messageformat/-/intl-messageformat-10.7.18.tgz",
+ "integrity": "sha512-m3Ofv/X/tV8Y3tHXLohcuVuhWKo7BBq62cqY15etqmLxg2DZ34AGGgQDeR+SCta2+zICb1NX83af0GJmbQ1++g==",
"license": "BSD-3-Clause",
"dependencies": {
- "@formatjs/ecma402-abstract": "2.3.4",
+ "@formatjs/ecma402-abstract": "2.3.6",
"@formatjs/fast-memoize": "2.2.7",
- "@formatjs/icu-messageformat-parser": "2.11.2",
+ "@formatjs/icu-messageformat-parser": "2.11.4",
"tslib": "^2.8.0"
}
},
@@ -10953,10 +10780,9 @@
}
},
"node_modules/is-arrayish": {
- "version": "0.2.1",
- "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
- "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==",
- "dev": true,
+ "version": "0.3.4",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.4.tgz",
+ "integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==",
"license": "MIT"
},
"node_modules/is-async-function": {
@@ -11117,6 +10943,7 @@
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz",
"integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"get-east-asian-width": "^1.3.1"
},
@@ -11128,14 +10955,15 @@
}
},
"node_modules/is-generator-function": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.0.tgz",
- "integrity": "sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==",
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.1.2.tgz",
+ "integrity": "sha512-upqt1SkGkODW9tsGNG5mtXTXtECizwtS2kA161M+gJPc1xdb/Ax629af6YrTwcOeQHbewrPNlE5Dx7kzvXTizA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "call-bound": "^1.0.3",
- "get-proto": "^1.0.0",
+ "call-bound": "^1.0.4",
+ "generator-function": "^2.0.0",
+ "get-proto": "^1.0.1",
"has-tostringtag": "^1.0.2",
"safe-regex-test": "^1.1.0"
},
@@ -11411,9 +11239,10 @@
"license": "MIT"
},
"node_modules/isbot": {
- "version": "5.1.31",
- "resolved": "https://registry.npmjs.org/isbot/-/isbot-5.1.31.tgz",
- "integrity": "sha512-DPgQshehErHAqSCKDb3rNW03pa2wS/v5evvUqtxt6TTnHRqAG8FdzcSSJs9656pK6Y+NT7K9R4acEYXLHYfpUQ==",
+ "version": "5.1.32",
+ "resolved": "https://registry.npmjs.org/isbot/-/isbot-5.1.32.tgz",
+ "integrity": "sha512-VNfjM73zz2IBZmdShMfAUg10prm6t7HFUQmNAEOAVS4YH92ZrZcvkMcGX6cIgBJAzWDzPent/EeAtYEHNPNPBQ==",
+ "license": "Unlicense",
"engines": {
"node": ">=18"
}
@@ -11496,35 +11325,19 @@
"node": ">= 0.4"
}
},
- "node_modules/jackspeak": {
- "version": "3.4.3",
- "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz",
- "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "dependencies": {
- "@isaacs/cliui": "^8.0.2"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- },
- "optionalDependencies": {
- "@pkgjs/parseargs": "^0.11.0"
- }
- },
"node_modules/jiti": {
- "version": "2.5.1",
- "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.5.1.tgz",
- "integrity": "sha512-twQoecYPiVA5K/h6SxtORw/Bs3ar+mLUtoPSc7iMXzQzK8d7eJ/R09wmTwAjiamETn1cXYPGfNnu7DMoHgu12w==",
+ "version": "2.6.1",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz",
+ "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==",
"license": "MIT",
"bin": {
"jiti": "lib/jiti-cli.mjs"
}
},
"node_modules/jose": {
- "version": "6.1.0",
- "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.0.tgz",
- "integrity": "sha512-TTQJyoEoKcC1lscpVDCSsVgYzUDg/0Bt3WE//WiTPK6uOCQC2KZS4MpugbMWt/zyjkopgZoXhZuCi00gLudfUA==",
+ "version": "6.1.3",
+ "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.3.tgz",
+ "integrity": "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/panva"
@@ -11537,9 +11350,9 @@
"license": "MIT"
},
"node_modules/js-yaml": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
- "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz",
+ "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -11550,21 +11363,22 @@
}
},
"node_modules/jsdom": {
- "version": "27.0.0",
- "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.0.0.tgz",
- "integrity": "sha512-lIHeR1qlIRrIN5VMccd8tI2Sgw6ieYXSVktcSHaNe3Z5nE/tcPQYQWOq00wxMvYOsz+73eAkNenVvmPC6bba9A==",
+ "version": "27.2.0",
+ "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-27.2.0.tgz",
+ "integrity": "sha512-454TI39PeRDW1LgpyLPyURtB4Zx1tklSr6+OFOipsxGUH1WMTvk6C65JQdrj455+DP2uJ1+veBEHTGFKWVLFoA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "@asamuzakjp/dom-selector": "^6.5.4",
- "cssstyle": "^5.3.0",
+ "@acemir/cssom": "^0.9.23",
+ "@asamuzakjp/dom-selector": "^6.7.4",
+ "cssstyle": "^5.3.3",
"data-urls": "^6.0.0",
- "decimal.js": "^10.5.0",
+ "decimal.js": "^10.6.0",
"html-encoding-sniffer": "^4.0.0",
"http-proxy-agent": "^7.0.2",
"https-proxy-agent": "^7.0.6",
"is-potential-custom-element-name": "^1.0.1",
- "parse5": "^7.3.0",
- "rrweb-cssom": "^0.8.0",
+ "parse5": "^8.0.0",
"saxes": "^6.0.0",
"symbol-tree": "^3.2.4",
"tough-cookie": "^6.0.0",
@@ -11572,12 +11386,12 @@
"webidl-conversions": "^8.0.0",
"whatwg-encoding": "^3.1.1",
"whatwg-mimetype": "^4.0.0",
- "whatwg-url": "^15.0.0",
- "ws": "^8.18.2",
+ "whatwg-url": "^15.1.0",
+ "ws": "^8.18.3",
"xml-name-validator": "^5.0.0"
},
"engines": {
- "node": ">=20"
+ "node": "^20.19.0 || ^22.12.0 || >=24.0.0"
},
"peerDependencies": {
"canvas": "^3.0.0"
@@ -11608,14 +11422,11 @@
"license": "MIT"
},
"node_modules/json-parse-even-better-errors": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz",
- "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==",
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
"dev": true,
- "license": "MIT",
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
+ "license": "MIT"
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
@@ -11713,9 +11524,9 @@
}
},
"node_modules/lightningcss": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.1.tgz",
- "integrity": "sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz",
+ "integrity": "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==",
"license": "MPL-2.0",
"dependencies": {
"detect-libc": "^2.0.3"
@@ -11728,22 +11539,43 @@
"url": "https://opencollective.com/parcel"
},
"optionalDependencies": {
- "lightningcss-darwin-arm64": "1.30.1",
- "lightningcss-darwin-x64": "1.30.1",
- "lightningcss-freebsd-x64": "1.30.1",
- "lightningcss-linux-arm-gnueabihf": "1.30.1",
- "lightningcss-linux-arm64-gnu": "1.30.1",
- "lightningcss-linux-arm64-musl": "1.30.1",
- "lightningcss-linux-x64-gnu": "1.30.1",
- "lightningcss-linux-x64-musl": "1.30.1",
- "lightningcss-win32-arm64-msvc": "1.30.1",
- "lightningcss-win32-x64-msvc": "1.30.1"
+ "lightningcss-android-arm64": "1.30.2",
+ "lightningcss-darwin-arm64": "1.30.2",
+ "lightningcss-darwin-x64": "1.30.2",
+ "lightningcss-freebsd-x64": "1.30.2",
+ "lightningcss-linux-arm-gnueabihf": "1.30.2",
+ "lightningcss-linux-arm64-gnu": "1.30.2",
+ "lightningcss-linux-arm64-musl": "1.30.2",
+ "lightningcss-linux-x64-gnu": "1.30.2",
+ "lightningcss-linux-x64-musl": "1.30.2",
+ "lightningcss-win32-arm64-msvc": "1.30.2",
+ "lightningcss-win32-x64-msvc": "1.30.2"
+ }
+ },
+ "node_modules/lightningcss-android-arm64": {
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.30.2.tgz",
+ "integrity": "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==",
+ "cpu": [
+ "arm64"
+ ],
+ "license": "MPL-2.0",
+ "optional": true,
+ "os": [
+ "android"
+ ],
+ "engines": {
+ "node": ">= 12.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/parcel"
}
},
"node_modules/lightningcss-darwin-arm64": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.1.tgz",
- "integrity": "sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.2.tgz",
+ "integrity": "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==",
"cpu": [
"arm64"
],
@@ -11761,9 +11593,9 @@
}
},
"node_modules/lightningcss-darwin-x64": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.1.tgz",
- "integrity": "sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.2.tgz",
+ "integrity": "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==",
"cpu": [
"x64"
],
@@ -11781,9 +11613,9 @@
}
},
"node_modules/lightningcss-freebsd-x64": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.1.tgz",
- "integrity": "sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.2.tgz",
+ "integrity": "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==",
"cpu": [
"x64"
],
@@ -11801,9 +11633,9 @@
}
},
"node_modules/lightningcss-linux-arm-gnueabihf": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.1.tgz",
- "integrity": "sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.2.tgz",
+ "integrity": "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==",
"cpu": [
"arm"
],
@@ -11821,9 +11653,9 @@
}
},
"node_modules/lightningcss-linux-arm64-gnu": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.1.tgz",
- "integrity": "sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.2.tgz",
+ "integrity": "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==",
"cpu": [
"arm64"
],
@@ -11841,9 +11673,9 @@
}
},
"node_modules/lightningcss-linux-arm64-musl": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.1.tgz",
- "integrity": "sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.2.tgz",
+ "integrity": "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==",
"cpu": [
"arm64"
],
@@ -11861,9 +11693,9 @@
}
},
"node_modules/lightningcss-linux-x64-gnu": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.1.tgz",
- "integrity": "sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.2.tgz",
+ "integrity": "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==",
"cpu": [
"x64"
],
@@ -11881,9 +11713,9 @@
}
},
"node_modules/lightningcss-linux-x64-musl": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.1.tgz",
- "integrity": "sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.2.tgz",
+ "integrity": "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==",
"cpu": [
"x64"
],
@@ -11901,9 +11733,9 @@
}
},
"node_modules/lightningcss-win32-arm64-msvc": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.1.tgz",
- "integrity": "sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.2.tgz",
+ "integrity": "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==",
"cpu": [
"arm64"
],
@@ -11921,9 +11753,9 @@
}
},
"node_modules/lightningcss-win32-x64-msvc": {
- "version": "1.30.1",
- "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.1.tgz",
- "integrity": "sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==",
+ "version": "1.30.2",
+ "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.2.tgz",
+ "integrity": "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==",
"cpu": [
"x64"
],
@@ -11948,15 +11780,16 @@
"license": "MIT"
},
"node_modules/lint-staged": {
- "version": "16.2.3",
- "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-16.2.3.tgz",
- "integrity": "sha512-1OnJEESB9zZqsp61XHH2fvpS1es3hRCxMplF/AJUDa8Ho8VrscYDIuxGrj3m8KPXbcWZ8fT9XTMUhEQmOVKpKw==",
+ "version": "16.2.7",
+ "resolved": "https://registry.npmjs.org/lint-staged/-/lint-staged-16.2.7.tgz",
+ "integrity": "sha512-lDIj4RnYmK7/kXMya+qJsmkRFkGolciXjrsZ6PC25GdTfWOAWetR0ZbsNXRAj1EHHImRSalc+whZFg56F5DVow==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "commander": "^14.0.1",
- "listr2": "^9.0.4",
+ "commander": "^14.0.2",
+ "listr2": "^9.0.5",
"micromatch": "^4.0.8",
- "nano-spawn": "^1.0.3",
+ "nano-spawn": "^2.0.0",
"pidtree": "^0.6.0",
"string-argv": "^0.3.2",
"yaml": "^2.8.1"
@@ -11972,10 +11805,11 @@
}
},
"node_modules/listr2": {
- "version": "9.0.4",
- "resolved": "https://registry.npmjs.org/listr2/-/listr2-9.0.4.tgz",
- "integrity": "sha512-1wd/kpAdKRLwv7/3OKC8zZ5U8e/fajCfWMxacUvB79S5nLrYGPtUI/8chMQhn3LQjsRVErTb9i1ECAwW0ZIHnQ==",
+ "version": "9.0.5",
+ "resolved": "https://registry.npmjs.org/listr2/-/listr2-9.0.5.tgz",
+ "integrity": "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"cli-truncate": "^5.0.0",
"colorette": "^2.0.20",
@@ -11988,98 +11822,19 @@
"node": ">=20.0.0"
}
},
- "node_modules/listr2/node_modules/ansi-regex": {
- "version": "6.2.2",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
- "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
- "dev": true,
+ "node_modules/local-access": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/local-access/-/local-access-1.1.0.tgz",
+ "integrity": "sha512-XfegD5pyTAfb+GY6chk283Ox5z8WexG56OvM06RWLpAc/UHozO8X6xAxEkIitZOtsSMM1Yr3DkHgW5W+onLhCw==",
+ "license": "MIT",
"engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ "node": ">=6"
}
},
- "node_modules/listr2/node_modules/ansi-styles": {
- "version": "6.2.3",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
- "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
- "dev": true,
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
- }
- },
- "node_modules/listr2/node_modules/emoji-regex": {
- "version": "10.5.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.5.0.tgz",
- "integrity": "sha512-lb49vf1Xzfx080OKA0o6l8DQQpV+6Vg95zyCJX9VB/BqKYlhG7N4wgROUUHRA+ZPUefLnteQOad7z1kT2bV7bg==",
- "dev": true
- },
- "node_modules/listr2/node_modules/string-width": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
- "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
- "dev": true,
- "dependencies": {
- "emoji-regex": "^10.3.0",
- "get-east-asian-width": "^1.0.0",
- "strip-ansi": "^7.1.0"
- },
- "engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/listr2/node_modules/strip-ansi": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
- "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
- "dev": true,
- "dependencies": {
- "ansi-regex": "^6.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/strip-ansi?sponsor=1"
- }
- },
- "node_modules/listr2/node_modules/wrap-ansi": {
- "version": "9.0.2",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
- "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^6.2.1",
- "string-width": "^7.0.0",
- "strip-ansi": "^7.1.0"
- },
- "engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
- "node_modules/local-access": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/local-access/-/local-access-1.1.0.tgz",
- "integrity": "sha512-XfegD5pyTAfb+GY6chk283Ox5z8WexG56OvM06RWLpAc/UHozO8X6xAxEkIitZOtsSMM1Yr3DkHgW5W+onLhCw==",
- "license": "MIT",
- "engines": {
- "node": ">=6"
- }
- },
- "node_modules/locate-path": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
- "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "node_modules/locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -12117,6 +11872,7 @@
"resolved": "https://registry.npmjs.org/log-update/-/log-update-6.1.0.tgz",
"integrity": "sha512-9ie8ItPR6tjY5uYJh8K/Zrv/RMZ5VOlOWvtZdEHYSTFKZfIBPQa9tOAEeAWhd+AnIneLJ22w5fjOYtoutpWq5w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"ansi-escapes": "^7.0.0",
"cli-cursor": "^5.0.0",
@@ -12136,6 +11892,7 @@
"resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
"integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=12"
},
@@ -12143,46 +11900,12 @@
"url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
- "node_modules/log-update/node_modules/ansi-styles": {
- "version": "6.2.3",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
- "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
- "dev": true,
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
- }
- },
- "node_modules/log-update/node_modules/emoji-regex": {
- "version": "10.5.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.5.0.tgz",
- "integrity": "sha512-lb49vf1Xzfx080OKA0o6l8DQQpV+6Vg95zyCJX9VB/BqKYlhG7N4wgROUUHRA+ZPUefLnteQOad7z1kT2bV7bg==",
- "dev": true
- },
- "node_modules/log-update/node_modules/string-width": {
- "version": "7.2.0",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
- "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
- "dev": true,
- "dependencies": {
- "emoji-regex": "^10.3.0",
- "get-east-asian-width": "^1.0.0",
- "strip-ansi": "^7.1.0"
- },
- "engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
"node_modules/log-update/node_modules/strip-ansi": {
"version": "7.1.2",
"resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
"integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"ansi-regex": "^6.0.1"
},
@@ -12193,23 +11916,6 @@
"url": "https://github.com/chalk/strip-ansi?sponsor=1"
}
},
- "node_modules/log-update/node_modules/wrap-ansi": {
- "version": "9.0.2",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
- "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
- "dev": true,
- "dependencies": {
- "ansi-styles": "^6.2.1",
- "string-width": "^7.0.0",
- "strip-ansi": "^7.1.0"
- },
- "engines": {
- "node": ">=18"
- },
- "funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
"node_modules/longest-streak": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
@@ -12232,13 +11938,6 @@
"loose-envify": "cli.js"
}
},
- "node_modules/loupe": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz",
- "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/lower-case": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz",
@@ -12273,9 +11972,9 @@
}
},
"node_modules/lucide-react": {
- "version": "0.544.0",
- "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.544.0.tgz",
- "integrity": "sha512-t5tS44bqd825zAW45UQxpG2CvcC4urOwn2TrwSH8u+MjeE+1NnWl6QqeQ/6NdjMqdOygyiT9p3Ev0p1NJykxjw==",
+ "version": "0.556.0",
+ "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.556.0.tgz",
+ "integrity": "sha512-iOb8dRk7kLaYBZhR2VlV1CeJGxChBgUthpSP8wom9jfj79qovgG6qcSdiy6vkoREKPnbUYzJsCn4o4PtG3Iy+A==",
"peerDependencies": {
"react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
@@ -12291,24 +11990,24 @@
}
},
"node_modules/magic-string": {
- "version": "0.30.18",
- "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.18.tgz",
- "integrity": "sha512-yi8swmWbO17qHhwIBNeeZxTceJMeBvWJaId6dyvTSOwTipqeHhMhOrz6513r1sOKnpvQ7zkhlG8tPrpilwTxHQ==",
+ "version": "0.30.21",
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz",
+ "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==",
"license": "MIT",
"dependencies": {
"@jridgewell/sourcemap-codec": "^1.5.5"
}
},
"node_modules/magicast": {
- "version": "0.3.5",
- "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz",
- "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==",
+ "version": "0.5.1",
+ "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.5.1.tgz",
+ "integrity": "sha512-xrHS24IxaLrvuo613F719wvOIv9xPHFWQHuvGUBmPnCA/3MQxKI3b+r7n1jAoDHmsbC5bRhTZYR77invLAxVnw==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@babel/parser": "^7.25.4",
- "@babel/types": "^7.25.4",
- "source-map-js": "^1.2.0"
+ "@babel/parser": "^7.28.5",
+ "@babel/types": "^7.28.5",
+ "source-map-js": "^1.2.1"
}
},
"node_modules/make-dir": {
@@ -12337,6 +12036,17 @@
"url": "https://github.com/sponsors/wooorm"
}
},
+ "node_modules/marked": {
+ "version": "14.0.0",
+ "resolved": "https://registry.npmjs.org/marked/-/marked-14.0.0.tgz",
+ "integrity": "sha512-uIj4+faQ+MgHgwUW1l2PsPglZLOLOT1uErt06dAPtx2kjteLAkbsd/0FiYg/MGS+i7ZKLb7w2WClxHkzOOuryQ==",
+ "bin": {
+ "marked": "bin/marked.js"
+ },
+ "engines": {
+ "node": ">= 18"
+ }
+ },
"node_modules/math-intrinsics": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
@@ -12588,9 +12298,9 @@
}
},
"node_modules/mdast-util-to-hast": {
- "version": "13.2.0",
- "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.0.tgz",
- "integrity": "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==",
+ "version": "13.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz",
+ "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==",
"license": "MIT",
"dependencies": {
"@types/hast": "^3.0.0",
@@ -12646,7 +12356,8 @@
"version": "2.12.2",
"resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.12.2.tgz",
"integrity": "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==",
- "dev": true
+ "dev": true,
+ "license": "CC0-1.0"
},
"node_modules/media-typer": {
"version": "0.3.0",
@@ -13305,6 +13016,7 @@
"resolved": "https://registry.npmjs.org/mimic-function/-/mimic-function-5.0.1.tgz",
"integrity": "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18"
},
@@ -13348,48 +13060,13 @@
"url": "https://github.com/sponsors/ljharb"
}
},
- "node_modules/minipass": {
- "version": "7.1.2",
- "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz",
- "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==",
- "license": "ISC",
- "engines": {
- "node": ">=16 || 14 >=14.17"
- }
- },
- "node_modules/minizlib": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-3.0.2.tgz",
- "integrity": "sha512-oG62iEk+CYt5Xj2YqI5Xi9xWUeZhDI8jjQmC5oThVH5JGCTgIjr7ciJDzC7MBzYd//WvR1OTmP5Q38Q8ShQtVA==",
- "license": "MIT",
- "dependencies": {
- "minipass": "^7.1.2"
- },
- "engines": {
- "node": ">= 18"
- }
- },
- "node_modules/mkdirp": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-3.0.1.tgz",
- "integrity": "sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==",
- "license": "MIT",
- "bin": {
- "mkdirp": "dist/cjs/src/bin.js"
- },
- "engines": {
- "node": ">=10"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
"node_modules/monaco-editor": {
- "version": "0.53.0",
- "resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.53.0.tgz",
- "integrity": "sha512-0WNThgC6CMWNXXBxTbaYYcunj08iB5rnx4/G56UOPeL9UVIUGGHA1GR0EWIh9Ebabj7NpCRawQ5b0hfN1jQmYQ==",
+ "version": "0.55.1",
+ "resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.55.1.tgz",
+ "integrity": "sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A==",
"dependencies": {
- "@types/trusted-types": "^1.0.6"
+ "dompurify": "3.2.7",
+ "marked": "14.0.0"
}
},
"node_modules/morgan": {
@@ -13436,9 +13113,10 @@
}
},
"node_modules/motion-dom": {
- "version": "12.23.21",
- "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.23.21.tgz",
- "integrity": "sha512-5xDXx/AbhrfgsQmSE7YESMn4Dpo6x5/DTZ4Iyy4xqDvVHWvFVoV+V2Ri2S/ksx+D40wrZ7gPYiMWshkdoqNgNQ==",
+ "version": "12.23.23",
+ "resolved": "https://registry.npmjs.org/motion-dom/-/motion-dom-12.23.23.tgz",
+ "integrity": "sha512-n5yolOs0TQQBRUFImrRfs/+6X4p3Q4n1dUEqt/H58Vx7OW6RF+foWEgmTVDhIWJIMXOuNNL0apKH2S16en9eiA==",
+ "license": "MIT",
"dependencies": {
"motion-utils": "^12.23.6"
}
@@ -13446,7 +13124,8 @@
"node_modules/motion-utils": {
"version": "12.23.6",
"resolved": "https://registry.npmjs.org/motion-utils/-/motion-utils-12.23.6.tgz",
- "integrity": "sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ=="
+ "integrity": "sha512-eAWoPgr4eFEOFfg2WjIsMoqJTW6Z8MTUCgn/GZ3VRpClWBdnbjryiA3ZSNLyxCTmCQx4RmYX6jX1iWHbenUPNQ==",
+ "license": "MIT"
},
"node_modules/mri": {
"version": "1.2.0",
@@ -13473,30 +13152,30 @@
"license": "MIT"
},
"node_modules/msw": {
- "version": "2.11.1",
- "resolved": "https://registry.npmjs.org/msw/-/msw-2.11.1.tgz",
- "integrity": "sha512-dGSRx0AJmQVQfpGXTsAAq4JFdwdhOBdJ6sJS/jnN0ac3s0NZB6daacHF1z5Pefx+IejmvuiLWw260RlyQOf3sQ==",
+ "version": "2.12.3",
+ "resolved": "https://registry.npmjs.org/msw/-/msw-2.12.3.tgz",
+ "integrity": "sha512-/5rpGC0eK8LlFqsHaBmL19/PVKxu/CCt8pO1vzp9X6SDLsRDh/Ccudkf3Ur5lyaKxJz9ndAx+LaThdv0ySqB6A==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
- "@bundled-es-modules/cookie": "^2.0.1",
- "@bundled-es-modules/statuses": "^1.0.1",
"@inquirer/confirm": "^5.0.0",
- "@mswjs/interceptors": "^0.39.1",
+ "@mswjs/interceptors": "^0.40.0",
"@open-draft/deferred-promise": "^2.2.0",
- "@open-draft/until": "^2.1.0",
- "@types/cookie": "^0.6.0",
- "@types/statuses": "^2.0.4",
- "graphql": "^16.8.1",
+ "@types/statuses": "^2.0.6",
+ "cookie": "^1.0.2",
+ "graphql": "^16.12.0",
"headers-polyfill": "^4.0.2",
"is-node-process": "^1.2.0",
"outvariant": "^1.4.3",
"path-to-regexp": "^6.3.0",
"picocolors": "^1.1.1",
+ "rettime": "^0.7.0",
+ "statuses": "^2.0.2",
"strict-event-emitter": "^0.5.1",
"tough-cookie": "^6.0.0",
- "type-fest": "^4.26.1",
+ "type-fest": "^5.2.0",
+ "until-async": "^3.0.2",
"yargs": "^17.7.2"
},
"bin": {
@@ -13517,6 +13196,38 @@
}
}
},
+ "node_modules/msw/node_modules/@mswjs/interceptors": {
+ "version": "0.40.0",
+ "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.40.0.tgz",
+ "integrity": "sha512-EFd6cVbHsgLa6wa4RljGj6Wk75qoHxUSyc5asLyyPSyuhIcdS2Q3Phw6ImS1q+CkALthJRShiYfKANcQMuMqsQ==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "@open-draft/deferred-promise": "^2.2.0",
+ "@open-draft/logger": "^0.3.0",
+ "@open-draft/until": "^2.0.0",
+ "is-node-process": "^1.2.0",
+ "outvariant": "^1.4.3",
+ "strict-event-emitter": "^0.5.1"
+ },
+ "engines": {
+ "node": ">=18"
+ }
+ },
+ "node_modules/msw/node_modules/cookie": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
+ }
+ },
"node_modules/msw/node_modules/path-to-regexp": {
"version": "6.3.0",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.3.0.tgz",
@@ -13525,13 +13236,16 @@
"license": "MIT"
},
"node_modules/msw/node_modules/type-fest": {
- "version": "4.41.0",
- "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz",
- "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==",
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-5.3.0.tgz",
+ "integrity": "sha512-d9CwU93nN0IA1QL+GSNDdwLAu1Ew5ZjTwupvedwg3WdfoH6pIDvYQ2hV0Uc2nKBLPq7NB5apCx57MLS5qlmO5g==",
"dev": true,
"license": "(MIT OR CC0-1.0)",
+ "dependencies": {
+ "tagged-tag": "^1.0.0"
+ },
"engines": {
- "node": ">=16"
+ "node": ">=20"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
@@ -13548,9 +13262,9 @@
}
},
"node_modules/nano-spawn": {
- "version": "1.0.3",
- "resolved": "https://registry.npmjs.org/nano-spawn/-/nano-spawn-1.0.3.tgz",
- "integrity": "sha512-jtpsQDetTnvS2Ts1fiRdci5rx0VYws5jGyC+4IYOTnIQ/wwdf6JdomlHBwqC3bJYOvaKu0C2GSZ1A60anrYpaA==",
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/nano-spawn/-/nano-spawn-2.0.0.tgz",
+ "integrity": "sha512-tacvGzUY5o2D8CBh2rrwxyNojUsZNU2zjNTzKQrkgGJQTbGAfArVWXSKMBokBeeg6C7OLRGUEyoFlYbfeWQIqw==",
"dev": true,
"license": "MIT",
"engines": {
@@ -13648,27 +13362,11 @@
}
},
"node_modules/node-releases": {
- "version": "2.0.20",
- "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.20.tgz",
- "integrity": "sha512-7gK6zSXEH6neM212JgfYFXe+GmZQM+fia5SsusuBIUgnPheLFBmIPhtFoAQRj8/7wASYQnbDlHPVwY0BefoFgA==",
+ "version": "2.0.27",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz",
+ "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==",
"license": "MIT"
},
- "node_modules/normalize-package-data": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-5.0.0.tgz",
- "integrity": "sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q==",
- "dev": true,
- "license": "BSD-2-Clause",
- "dependencies": {
- "hosted-git-info": "^6.0.0",
- "is-core-module": "^2.8.1",
- "semver": "^7.3.5",
- "validate-npm-package-license": "^3.0.4"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
"node_modules/normalize-range": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
@@ -13679,61 +13377,6 @@
"node": ">=0.10.0"
}
},
- "node_modules/npm-install-checks": {
- "version": "6.3.0",
- "resolved": "https://registry.npmjs.org/npm-install-checks/-/npm-install-checks-6.3.0.tgz",
- "integrity": "sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw==",
- "dev": true,
- "license": "BSD-2-Clause",
- "dependencies": {
- "semver": "^7.1.1"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/npm-normalize-package-bin": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz",
- "integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/npm-package-arg": {
- "version": "10.1.0",
- "resolved": "https://registry.npmjs.org/npm-package-arg/-/npm-package-arg-10.1.0.tgz",
- "integrity": "sha512-uFyyCEmgBfZTtrKk/5xDfHp6+MdrqGotX/VoOyEEl3mBwiEE5FlBaePanazJSVMPT7vKepcjYBY2ztg9A3yPIA==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "hosted-git-info": "^6.0.0",
- "proc-log": "^3.0.0",
- "semver": "^7.3.5",
- "validate-npm-package-name": "^5.0.0"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/npm-pick-manifest": {
- "version": "8.0.2",
- "resolved": "https://registry.npmjs.org/npm-pick-manifest/-/npm-pick-manifest-8.0.2.tgz",
- "integrity": "sha512-1dKY+86/AIiq1tkKVD3l0WI+Gd3vkknVGAggsFeBkTvbhMQ1OND/LKkYv4JtXPKUJ8bOTCyLiqEg2P6QNdK+Gg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "npm-install-checks": "^6.0.0",
- "npm-normalize-package-bin": "^3.0.0",
- "npm-package-arg": "^10.0.0",
- "semver": "^7.3.5"
- },
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
"node_modules/object-assign": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
@@ -13855,6 +13498,17 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/obug": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz",
+ "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==",
+ "dev": true,
+ "funding": [
+ "https://github.com/sponsors/sxzz",
+ "https://opencollective.com/debug"
+ ],
+ "license": "MIT"
+ },
"node_modules/on-finished": {
"version": "2.4.1",
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
@@ -13890,6 +13544,7 @@
"resolved": "https://registry.npmjs.org/onetime/-/onetime-7.0.0.tgz",
"integrity": "sha512-VXJjc87FScF88uafS3JllDgvAm+c/Slfz06lorj2uAY34rlUu0Nt+v8wreiImcrgAjjIHp1rXpTDlLOGw29WwQ==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"mimic-function": "^5.0.0"
},
@@ -13975,12 +13630,18 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/package-json-from-dist": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz",
- "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==",
+ "node_modules/p-map": {
+ "version": "7.0.4",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-7.0.4.tgz",
+ "integrity": "sha512-tkAQEw8ysMzmkhgw8k+1U/iPhWNhykKnSk4Rd5zLoPJCuJaGRPo6YposrZgaxHKzDHdDWWZvE/Sk7hsL2X/CpQ==",
"dev": true,
- "license": "BlueOak-1.0.0"
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
},
"node_modules/parent-module": {
"version": "1.0.1",
@@ -14039,17 +13700,10 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/parse-json/node_modules/json-parse-even-better-errors": {
- "version": "2.3.1",
- "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
- "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/parse5": {
- "version": "7.3.0",
- "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz",
- "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==",
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz",
+ "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -14103,30 +13757,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/path-scurry": {
- "version": "1.11.1",
- "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz",
- "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==",
- "dev": true,
- "license": "BlueOak-1.0.0",
- "dependencies": {
- "lru-cache": "^10.2.0",
- "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0"
- },
- "engines": {
- "node": ">=16 || 14 >=14.18"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/path-scurry/node_modules/lru-cache": {
- "version": "10.4.3",
- "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz",
- "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==",
- "dev": true,
- "license": "ISC"
- },
"node_modules/path-to-regexp": {
"version": "0.1.12",
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz",
@@ -14149,16 +13779,6 @@
"dev": true,
"license": "MIT"
},
- "node_modules/pathval": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz",
- "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 14.16"
- }
- },
"node_modules/picocolors": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz",
@@ -14191,13 +13811,33 @@
"node": ">=0.10"
}
},
+ "node_modules/pkg-types": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/pkg-types/-/pkg-types-2.3.0.tgz",
+ "integrity": "sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==",
+ "dev": true,
+ "license": "MIT",
+ "dependencies": {
+ "confbox": "^0.2.2",
+ "exsolve": "^1.0.7",
+ "pathe": "^2.0.3"
+ }
+ },
+ "node_modules/pkg-types/node_modules/pathe": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz",
+ "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==",
+ "dev": true,
+ "license": "MIT"
+ },
"node_modules/playwright": {
- "version": "1.55.1",
- "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.55.1.tgz",
- "integrity": "sha512-cJW4Xd/G3v5ovXtJJ52MAOclqeac9S/aGGgRzLabuF8TnIb6xHvMzKIa6JmrRzUkeXJgfL1MhukP0NK6l39h3A==",
+ "version": "1.57.0",
+ "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.57.0.tgz",
+ "integrity": "sha512-ilYQj1s8sr2ppEJ2YVadYBN0Mb3mdo9J0wQ+UuDhzYqURwSoW4n1Xs5vs7ORwgDGmyEh33tRMeS8KhdkMoLXQw==",
"dev": true,
+ "license": "Apache-2.0",
"dependencies": {
- "playwright-core": "1.55.1"
+ "playwright-core": "1.57.0"
},
"bin": {
"playwright": "cli.js"
@@ -14210,10 +13850,11 @@
}
},
"node_modules/playwright-core": {
- "version": "1.55.1",
- "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.55.1.tgz",
- "integrity": "sha512-Z6Mh9mkwX+zxSlHqdr5AOcJnfp+xUWLCt9uKV18fhzA8eyxUd8NUWzAjxUh55RZKSYwDGX0cfaySdhZJGMoJ+w==",
+ "version": "1.57.0",
+ "resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.57.0.tgz",
+ "integrity": "sha512-agTcKlMw/mjBWOnD6kFZttAAGHgi/Nw0CZ2o6JqWSbMlI219lAFLZZCyqByTsvVAJq5XA5H8cA6PrvBRpBWEuQ==",
"dev": true,
+ "license": "Apache-2.0",
"bin": {
"playwright-core": "cli.js"
},
@@ -14281,12 +13922,11 @@
"license": "MIT"
},
"node_modules/posthog-js": {
- "version": "1.290.0",
- "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.290.0.tgz",
- "integrity": "sha512-zavBwZkf+3JeiSDVE7ZDXBfzva/iOljicdhdJH+cZoqp0LsxjKxjnNhGOd3KpAhw0wqdwjhd7Lp1aJuI7DXyaw==",
- "license": "SEE LICENSE IN LICENSE",
+ "version": "1.302.0",
+ "resolved": "https://registry.npmjs.org/posthog-js/-/posthog-js-1.302.0.tgz",
+ "integrity": "sha512-cBvxnAyKgcoXnikFJROCSqN8i1x9bm+szj7HOQ92mw3anlW7LK7vZ8hrQ4VNUGNIMu0ogP8djvXW5W8AlWbFcQ==",
"dependencies": {
- "@posthog/core": "1.5.2",
+ "@posthog/core": "1.7.1",
"core-js": "^3.38.1",
"fflate": "^0.4.8",
"preact": "^10.19.3",
@@ -14300,9 +13940,9 @@
"license": "Apache-2.0"
},
"node_modules/preact": {
- "version": "10.27.1",
- "resolved": "https://registry.npmjs.org/preact/-/preact-10.27.1.tgz",
- "integrity": "sha512-V79raXEWch/rbqoNc7nT9E4ep7lu+mI3+sBmfRD4i1M73R3WLYcCtdI0ibxGVf4eQL8ZIz2nFacqEC+rmnOORQ==",
+ "version": "10.28.0",
+ "resolved": "https://registry.npmjs.org/preact/-/preact-10.28.0.tgz",
+ "integrity": "sha512-rytDAoiXr3+t6OIP3WGlDd0ouCUG1iCWzkcY3++Nreuoi17y6T5i/zRhe6uYfoVcxq6YU+sBtJouuRDsq8vvqA==",
"license": "MIT",
"funding": {
"type": "opencollective",
@@ -14320,9 +13960,9 @@
}
},
"node_modules/prettier": {
- "version": "3.6.2",
- "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz",
- "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==",
+ "version": "3.7.4",
+ "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.7.4.tgz",
+ "integrity": "sha512-v6UNi1+3hSlVvv8fSaoUbggEM5VErKmmpGA7Pl3HF8V6uKY7rvClBOJlH6yNwQtfTueNkGVpOv/mtWL9L4bgRA==",
"dev": true,
"license": "MIT",
"bin": {
@@ -14405,37 +14045,6 @@
"node": ">=6"
}
},
- "node_modules/proc-log": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/proc-log/-/proc-log-3.0.0.tgz",
- "integrity": "sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
- "node_modules/promise-inflight": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/promise-inflight/-/promise-inflight-1.0.1.tgz",
- "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==",
- "dev": true,
- "license": "ISC"
- },
- "node_modules/promise-retry": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/promise-retry/-/promise-retry-2.0.1.tgz",
- "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "err-code": "^2.0.2",
- "retry": "^0.12.0"
- },
- "engines": {
- "node": ">=10"
- }
- },
"node_modules/prop-types": {
"version": "15.8.1",
"resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
@@ -14492,12 +14101,12 @@
}
},
"node_modules/qs": {
- "version": "6.13.0",
- "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
- "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
+ "version": "6.14.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz",
+ "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==",
"license": "BSD-3-Clause",
"dependencies": {
- "side-channel": "^1.0.6"
+ "side-channel": "^1.1.0"
},
"engines": {
"node": ">=0.6"
@@ -14536,23 +14145,23 @@
}
},
"node_modules/raw-body": {
- "version": "2.5.2",
- "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
- "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "version": "2.5.3",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz",
+ "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==",
"dependencies": {
- "bytes": "3.1.2",
- "http-errors": "2.0.0",
- "iconv-lite": "0.4.24",
- "unpipe": "1.0.0"
+ "bytes": "~3.1.2",
+ "http-errors": "~2.0.1",
+ "iconv-lite": "~0.4.24",
+ "unpipe": "~1.0.0"
},
"engines": {
"node": ">= 0.8"
}
},
"node_modules/react": {
- "version": "19.1.1",
- "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz",
- "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==",
+ "version": "19.2.1",
+ "resolved": "https://registry.npmjs.org/react/-/react-19.2.1.tgz",
+ "integrity": "sha512-DGrYcCWK7tvYMnWh79yrPHt+vdx9tY+1gPZa7nJQtO/p8bLTDaHp4dzwEhQB7pZ4Xe3ok4XKuEPrVuc+wlpkmw==",
"license": "MIT",
"engines": {
"node": ">=0.10.0"
@@ -14599,15 +14208,15 @@
"license": "MIT"
},
"node_modules/react-dom": {
- "version": "19.1.1",
- "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz",
- "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==",
+ "version": "19.2.1",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.1.tgz",
+ "integrity": "sha512-ibrK8llX2a4eOskq1mXKu/TGZj9qzomO+sNfO98M6d9zIPOEhlBkMkBUBLd1vgS0gQsLDBzA+8jJBVXDnfHmJg==",
"license": "MIT",
"dependencies": {
- "scheduler": "^0.26.0"
+ "scheduler": "^0.27.0"
},
"peerDependencies": {
- "react": "^19.1.1"
+ "react": "^19.2.1"
}
},
"node_modules/react-highlight": {
@@ -14637,15 +14246,17 @@
}
},
"node_modules/react-i18next": {
- "version": "16.0.0",
- "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.0.0.tgz",
- "integrity": "sha512-JQ+dFfLnFSKJQt7W01lJHWRC0SX7eDPobI+MSTJ3/gP39xH2g33AuTE7iddAfXYHamJdAeMGM0VFboPaD3G68Q==",
+ "version": "16.3.5",
+ "resolved": "https://registry.npmjs.org/react-i18next/-/react-i18next-16.3.5.tgz",
+ "integrity": "sha512-F7Kglc+T0aE6W2rO5eCAFBEuWRpNb5IFmXOYEgztjZEuiuSLTe/xBIEG6Q3S0fbl8GXMNo+Q7gF8bpokFNWJww==",
+ "license": "MIT",
"dependencies": {
"@babel/runtime": "^7.27.6",
- "html-parse-stringify": "^3.0.1"
+ "html-parse-stringify": "^3.0.1",
+ "use-sync-external-store": "^1.6.0"
},
"peerDependencies": {
- "i18next": ">= 25.5.2",
+ "i18next": ">= 25.6.2",
"react": ">= 16.8.0",
"typescript": "^5"
},
@@ -14720,9 +14331,9 @@
}
},
"node_modules/react-router": {
- "version": "7.9.3",
- "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.9.3.tgz",
- "integrity": "sha512-4o2iWCFIwhI/eYAIL43+cjORXYn/aRQPgtFRRZb3VzoyQ5Uej0Bmqj7437L97N9NJW4wnicSwLOLS+yCXfAPgg==",
+ "version": "7.10.1",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-7.10.1.tgz",
+ "integrity": "sha512-gHL89dRa3kwlUYtRQ+m8NmxGI6CgqN+k4XyGjwcFoQwwCWF6xXpOCUlDovkXClS0d0XJN/5q7kc5W3kiFEd0Yw==",
"dependencies": {
"cookie": "^1.0.1",
"set-cookie-parser": "^2.6.0"
@@ -14741,26 +14352,33 @@
}
},
"node_modules/react-router/node_modules/cookie": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.0.2.tgz",
- "integrity": "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA==",
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz",
+ "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==",
"license": "MIT",
"engines": {
"node": ">=18"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/express"
}
},
"node_modules/react-syntax-highlighter": {
- "version": "15.6.6",
- "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-15.6.6.tgz",
- "integrity": "sha512-DgXrc+AZF47+HvAPEmn7Ua/1p10jNoVZVI/LoPiYdtY+OM+/nG5yefLHKJwdKqY1adMuHFbeyBaG9j64ML7vTw==",
+ "version": "16.1.0",
+ "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-16.1.0.tgz",
+ "integrity": "sha512-E40/hBiP5rCNwkeBN1vRP+xow1X0pndinO+z3h7HLsHyjztbyjfzNWNKuAsJj+7DLam9iT4AaaOZnueCU+Nplg==",
"license": "MIT",
"dependencies": {
- "@babel/runtime": "^7.3.1",
+ "@babel/runtime": "^7.28.4",
"highlight.js": "^10.4.1",
"highlightjs-vue": "^1.0.0",
"lowlight": "^1.17.0",
"prismjs": "^1.30.0",
- "refractor": "^3.6.0"
+ "refractor": "^5.0.0"
+ },
+ "engines": {
+ "node": ">= 16.20.2"
},
"peerDependencies": {
"react": ">= 0.14.0"
@@ -14835,121 +14453,21 @@
}
},
"node_modules/refractor": {
- "version": "3.6.0",
- "resolved": "https://registry.npmjs.org/refractor/-/refractor-3.6.0.tgz",
- "integrity": "sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==",
- "license": "MIT",
- "dependencies": {
- "hastscript": "^6.0.0",
- "parse-entities": "^2.0.0",
- "prismjs": "~1.27.0"
- },
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/character-entities": {
- "version": "1.2.4",
- "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz",
- "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/character-entities-legacy": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz",
- "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/character-reference-invalid": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz",
- "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/is-alphabetical": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz",
- "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/is-alphanumerical": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz",
- "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==",
- "license": "MIT",
- "dependencies": {
- "is-alphabetical": "^1.0.0",
- "is-decimal": "^1.0.0"
- },
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/is-decimal": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz",
- "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/is-hexadecimal": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz",
- "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==",
- "license": "MIT",
- "funding": {
- "type": "github",
- "url": "https://github.com/sponsors/wooorm"
- }
- },
- "node_modules/refractor/node_modules/parse-entities": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz",
- "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==",
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/refractor/-/refractor-5.0.0.tgz",
+ "integrity": "sha512-QXOrHQF5jOpjjLfiNk5GFnWhRXvxjUVnlFxkeDmewR5sXkr3iM46Zo+CnRR8B+MDVqkULW4EcLVcRBNOPXHosw==",
"license": "MIT",
"dependencies": {
- "character-entities": "^1.0.0",
- "character-entities-legacy": "^1.0.0",
- "character-reference-invalid": "^1.0.0",
- "is-alphanumerical": "^1.0.0",
- "is-decimal": "^1.0.0",
- "is-hexadecimal": "^1.0.0"
+ "@types/hast": "^3.0.0",
+ "@types/prismjs": "^1.0.0",
+ "hastscript": "^9.0.0",
+ "parse-entities": "^4.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wooorm"
}
},
- "node_modules/refractor/node_modules/prismjs": {
- "version": "1.27.0",
- "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz",
- "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==",
- "license": "MIT",
- "engines": {
- "node": ">=6"
- }
- },
"node_modules/regexp.prototype.flags": {
"version": "1.5.4",
"resolved": "https://registry.npmjs.org/regexp.prototype.flags/-/regexp.prototype.flags-1.5.4.tgz",
@@ -15067,6 +14585,7 @@
"resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
"integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=0.10.0"
}
@@ -15082,13 +14601,13 @@
}
},
"node_modules/resolve": {
- "version": "1.22.10",
- "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz",
- "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==",
+ "version": "1.22.11",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz",
+ "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==",
"dev": true,
"license": "MIT",
"dependencies": {
- "is-core-module": "^2.16.0",
+ "is-core-module": "^2.16.1",
"path-parse": "^1.0.7",
"supports-preserve-symlinks-flag": "^1.0.0"
},
@@ -15117,6 +14636,7 @@
"resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-5.1.0.tgz",
"integrity": "sha512-oMA2dcrw6u0YfxJQXm342bFKX/E4sG9rbTzO9ptUcR/e8A33cHuvStiYOwH7fszkZlZ1z/ta9AAoPk2F4qIOHA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"onetime": "^7.0.0",
"signal-exit": "^4.1.0"
@@ -15128,15 +14648,12 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/retry": {
- "version": "0.12.0",
- "resolved": "https://registry.npmjs.org/retry/-/retry-0.12.0.tgz",
- "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==",
+ "node_modules/rettime": {
+ "version": "0.7.0",
+ "resolved": "https://registry.npmjs.org/rettime/-/rettime-0.7.0.tgz",
+ "integrity": "sha512-LPRKoHnLKd/r3dVxcwO7vhCW+orkOGj9ViueosEBK6ie89CijnfRlhaDhHq/3Hxu4CkWQtxwlBG0mzTQY6uQjw==",
"dev": true,
- "license": "MIT",
- "engines": {
- "node": ">= 4"
- }
+ "license": "MIT"
},
"node_modules/reusify": {
"version": "1.1.0",
@@ -15153,7 +14670,8 @@
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/rfdc/-/rfdc-1.4.1.tgz",
"integrity": "sha512-q1b3N5QkRUWUl7iyylaaj3kOpIT0N2i9MqIEQXP73GVsN9cw3fdx8X63cEmWhJGi2PPCF23Ijp7ktmd39rawIA==",
- "dev": true
+ "dev": true,
+ "license": "MIT"
},
"node_modules/rimraf": {
"version": "3.0.2",
@@ -15172,56 +14690,10 @@
"url": "https://github.com/sponsors/isaacs"
}
},
- "node_modules/rimraf/node_modules/brace-expansion": {
- "version": "1.1.12",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
- "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "balanced-match": "^1.0.0",
- "concat-map": "0.0.1"
- }
- },
- "node_modules/rimraf/node_modules/glob": {
- "version": "7.2.3",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
- "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
- "deprecated": "Glob versions prior to v9 are no longer supported",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.1.1",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- },
- "engines": {
- "node": "*"
- },
- "funding": {
- "url": "https://github.com/sponsors/isaacs"
- }
- },
- "node_modules/rimraf/node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "brace-expansion": "^1.1.7"
- },
- "engines": {
- "node": "*"
- }
- },
"node_modules/rollup": {
- "version": "4.50.0",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.50.0.tgz",
- "integrity": "sha512-/Zl4D8zPifNmyGzJS+3kVoyXeDeT/GrsJM94sACNg9RtUE0hrHa1bNPtRSrfHTMH5HjRzce6K7rlTh3Khiw+pw==",
+ "version": "4.53.3",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.53.3.tgz",
+ "integrity": "sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==",
"license": "MIT",
"dependencies": {
"@types/estree": "1.0.8"
@@ -15234,36 +14706,31 @@
"npm": ">=8.0.0"
},
"optionalDependencies": {
- "@rollup/rollup-android-arm-eabi": "4.50.0",
- "@rollup/rollup-android-arm64": "4.50.0",
- "@rollup/rollup-darwin-arm64": "4.50.0",
- "@rollup/rollup-darwin-x64": "4.50.0",
- "@rollup/rollup-freebsd-arm64": "4.50.0",
- "@rollup/rollup-freebsd-x64": "4.50.0",
- "@rollup/rollup-linux-arm-gnueabihf": "4.50.0",
- "@rollup/rollup-linux-arm-musleabihf": "4.50.0",
- "@rollup/rollup-linux-arm64-gnu": "4.50.0",
- "@rollup/rollup-linux-arm64-musl": "4.50.0",
- "@rollup/rollup-linux-loongarch64-gnu": "4.50.0",
- "@rollup/rollup-linux-ppc64-gnu": "4.50.0",
- "@rollup/rollup-linux-riscv64-gnu": "4.50.0",
- "@rollup/rollup-linux-riscv64-musl": "4.50.0",
- "@rollup/rollup-linux-s390x-gnu": "4.50.0",
- "@rollup/rollup-linux-x64-gnu": "4.50.0",
- "@rollup/rollup-linux-x64-musl": "4.50.0",
- "@rollup/rollup-openharmony-arm64": "4.50.0",
- "@rollup/rollup-win32-arm64-msvc": "4.50.0",
- "@rollup/rollup-win32-ia32-msvc": "4.50.0",
- "@rollup/rollup-win32-x64-msvc": "4.50.0",
+ "@rollup/rollup-android-arm-eabi": "4.53.3",
+ "@rollup/rollup-android-arm64": "4.53.3",
+ "@rollup/rollup-darwin-arm64": "4.53.3",
+ "@rollup/rollup-darwin-x64": "4.53.3",
+ "@rollup/rollup-freebsd-arm64": "4.53.3",
+ "@rollup/rollup-freebsd-x64": "4.53.3",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.53.3",
+ "@rollup/rollup-linux-arm-musleabihf": "4.53.3",
+ "@rollup/rollup-linux-arm64-gnu": "4.53.3",
+ "@rollup/rollup-linux-arm64-musl": "4.53.3",
+ "@rollup/rollup-linux-loong64-gnu": "4.53.3",
+ "@rollup/rollup-linux-ppc64-gnu": "4.53.3",
+ "@rollup/rollup-linux-riscv64-gnu": "4.53.3",
+ "@rollup/rollup-linux-riscv64-musl": "4.53.3",
+ "@rollup/rollup-linux-s390x-gnu": "4.53.3",
+ "@rollup/rollup-linux-x64-gnu": "4.53.3",
+ "@rollup/rollup-linux-x64-musl": "4.53.3",
+ "@rollup/rollup-openharmony-arm64": "4.53.3",
+ "@rollup/rollup-win32-arm64-msvc": "4.53.3",
+ "@rollup/rollup-win32-ia32-msvc": "4.53.3",
+ "@rollup/rollup-win32-x64-gnu": "4.53.3",
+ "@rollup/rollup-win32-x64-msvc": "4.53.3",
"fsevents": "~2.3.2"
}
},
- "node_modules/rrweb-cssom": {
- "version": "0.8.0",
- "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz",
- "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==",
- "dev": true
- },
"node_modules/run-parallel": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
@@ -15395,9 +14862,9 @@
}
},
"node_modules/scheduler": {
- "version": "0.26.0",
- "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.26.0.tgz",
- "integrity": "sha512-NlHwttCI/l5gCPR3D1nNXtWABUmBwvZpEQiD4IXSbIDq8BzLIK/7Ir5gTFSGZDUu37K5cMNp0hFtzO38sC7gWA==",
+ "version": "0.27.0",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.27.0.tgz",
+ "integrity": "sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==",
"license": "MIT"
},
"node_modules/scroll-into-view-if-needed": {
@@ -15419,9 +14886,9 @@
}
},
"node_modules/semver": {
- "version": "7.7.2",
- "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
- "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
+ "version": "7.7.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
+ "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
"dev": true,
"license": "ISC",
"bin": {
@@ -15432,14 +14899,14 @@
}
},
"node_modules/send": {
- "version": "0.19.0",
- "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
- "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
+ "version": "0.19.1",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.1.tgz",
+ "integrity": "sha512-p4rRk4f23ynFEfcD9LA0xRYngj+IyGiEYyqqOak8kaN0TvNmuxC2dcVeBn62GpCeR2CpWqyHCNScTP91QbAVFg==",
"dependencies": {
"debug": "2.6.9",
"depd": "2.0.0",
"destroy": "1.2.0",
- "encodeurl": "~1.0.2",
+ "encodeurl": "~2.0.0",
"escape-html": "~1.0.3",
"etag": "~1.8.1",
"fresh": "0.5.2",
@@ -15467,10 +14934,25 @@
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
- "node_modules/send/node_modules/encodeurl": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
- "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "node_modules/send/node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/send/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
"engines": {
"node": ">= 0.8"
}
@@ -15489,10 +14971,77 @@
"node": ">= 0.8.0"
}
},
+ "node_modules/serve-static/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/serve-static/node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/serve-static/node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/serve-static/node_modules/send": {
+ "version": "0.19.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
+ "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/serve-static/node_modules/send/node_modules/encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/serve-static/node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
"node_modules/set-cookie-parser": {
- "version": "2.7.1",
- "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz",
- "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==",
+ "version": "2.7.2",
+ "resolved": "https://registry.npmjs.org/set-cookie-parser/-/set-cookie-parser-2.7.2.tgz",
+ "integrity": "sha512-oeM1lpU/UvhTxw+g3cIfxXHyJRc/uidd3yK1P242gzHds0udQBYzs3y8j4gCCW+ZJ7ad0yctld8RYO+bdurlvw==",
"license": "MIT"
},
"node_modules/set-function-length": {
@@ -15663,20 +15212,14 @@
}
},
"node_modules/simple-swizzle": {
- "version": "0.2.2",
- "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.2.tgz",
- "integrity": "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg==",
+ "version": "0.2.4",
+ "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.4.tgz",
+ "integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==",
"license": "MIT",
"dependencies": {
"is-arrayish": "^0.3.1"
}
},
- "node_modules/simple-swizzle/node_modules/is-arrayish": {
- "version": "0.3.2",
- "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.2.tgz",
- "integrity": "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ==",
- "license": "MIT"
- },
"node_modules/sirv": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/sirv/-/sirv-3.0.2.tgz",
@@ -15728,6 +15271,7 @@
"resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz",
"integrity": "sha512-iOBWFgUX7caIZiuutICxVgX1SdxwAVFFKwt1EvMYYec/NWO5meOJ6K5uQxhrYBdQJne4KxiqZc+KptFOWFSI9w==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"ansi-styles": "^6.2.1",
"is-fullwidth-code-point": "^5.0.0"
@@ -15744,6 +15288,7 @@
"resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
"integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=12"
},
@@ -15824,6 +15369,15 @@
}
}
},
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
"node_modules/source-map-js": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
@@ -15843,15 +15397,6 @@
"source-map": "^0.6.0"
}
},
- "node_modules/source-map-support/node_modules/source-map": {
- "version": "0.6.1",
- "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
- "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
- "license": "BSD-3-Clause",
- "engines": {
- "node": ">=0.10.0"
- }
- },
"node_modules/space-separated-tokens": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz",
@@ -15862,42 +15407,6 @@
"url": "https://github.com/sponsors/wooorm"
}
},
- "node_modules/spdx-correct": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.2.0.tgz",
- "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==",
- "dev": true,
- "license": "Apache-2.0",
- "dependencies": {
- "spdx-expression-parse": "^3.0.0",
- "spdx-license-ids": "^3.0.0"
- }
- },
- "node_modules/spdx-exceptions": {
- "version": "2.5.0",
- "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz",
- "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==",
- "dev": true,
- "license": "CC-BY-3.0"
- },
- "node_modules/spdx-expression-parse": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz",
- "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "spdx-exceptions": "^2.1.0",
- "spdx-license-ids": "^3.0.0"
- }
- },
- "node_modules/spdx-license-ids": {
- "version": "3.0.22",
- "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.22.tgz",
- "integrity": "sha512-4PRT4nh1EImPbt2jASOKHX7PB7I+e4IWNLvkKFDxNhJlfjbYlleYQh285Z/3mPTHSAK/AvdMmw5BNNuYH8ShgQ==",
- "dev": true,
- "license": "CC0-1.0"
- },
"node_modules/stackback": {
"version": "0.0.2",
"resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz",
@@ -15912,18 +15421,18 @@
"license": "MIT"
},
"node_modules/statuses": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
- "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz",
+ "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==",
"license": "MIT",
"engines": {
"node": ">= 0.8"
}
},
"node_modules/std-env": {
- "version": "3.9.0",
- "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz",
- "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==",
+ "version": "3.10.0",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz",
+ "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==",
"dev": true,
"license": "MIT"
},
@@ -15959,60 +15468,26 @@
}
},
"node_modules/string-width": {
- "version": "5.1.2",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
- "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "eastasianwidth": "^0.2.0",
- "emoji-regex": "^9.2.2",
- "strip-ansi": "^7.0.1"
- },
- "engines": {
- "node": ">=12"
- },
- "funding": {
- "url": "https://github.com/sponsors/sindresorhus"
- }
- },
- "node_modules/string-width-cjs": {
- "name": "string-width",
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.1.0.tgz",
+ "integrity": "sha512-Kxl3KJGb/gxkaUMOjRsQ8IrXiGW75O4E3RPjFIINOVH8AMl2SQ/yWdTzWwF3FevIX9LcMAjJW+GRwAlAbTSXdg==",
"dev": true,
"license": "MIT",
"dependencies": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/string-width-cjs/node_modules/emoji-regex": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
- "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/string-width-cjs/node_modules/is-fullwidth-code-point": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
- "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
- "dev": true,
- "license": "MIT",
+ "get-east-asian-width": "^1.3.0",
+ "strip-ansi": "^7.1.0"
+ },
"engines": {
- "node": ">=8"
+ "node": ">=20"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/string-width/node_modules/ansi-regex": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.0.tgz",
- "integrity": "sha512-TKY5pyBkHyADOPYlRT9Lx6F544mPl0vS5Ew7BJ45hA08Q+t3GjbueLliBWN3sMICk6+y7HdyxSzC4bWS8baBdg==",
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
+ "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
"license": "MIT",
"engines": {
@@ -16023,9 +15498,9 @@
}
},
"node_modules/string-width/node_modules/strip-ansi": {
- "version": "7.1.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
- "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
+ "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -16178,20 +15653,6 @@
"node": ">=8"
}
},
- "node_modules/strip-ansi-cjs": {
- "name": "strip-ansi",
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
- "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "ansi-regex": "^5.0.1"
- },
- "engines": {
- "node": ">=8"
- }
- },
"node_modules/strip-bom": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
@@ -16228,40 +15689,20 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
- "node_modules/strip-literal": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz",
- "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "js-tokens": "^9.0.1"
- },
- "funding": {
- "url": "https://github.com/sponsors/antfu"
- }
- },
- "node_modules/strip-literal/node_modules/js-tokens": {
- "version": "9.0.1",
- "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz",
- "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==",
- "dev": true,
- "license": "MIT"
- },
"node_modules/stripe": {
- "version": "18.5.0",
- "resolved": "https://registry.npmjs.org/stripe/-/stripe-18.5.0.tgz",
- "integrity": "sha512-Hp+wFiEQtCB0LlNgcFh5uVyKznpDjzyUZ+CNVEf+I3fhlYvh7rZruIg+jOwzJRCpy0ZTPMjlzm7J2/M2N6d+DA==",
+ "version": "20.0.0",
+ "resolved": "https://registry.npmjs.org/stripe/-/stripe-20.0.0.tgz",
+ "integrity": "sha512-EaZeWpbJOCcDytdjKSwdrL5BxzbDGNueiCfHjHXlPdBQvLqoxl6AAivC35SPzTmVXJb5duXQlXFGS45H0+e6Gg==",
"dev": true,
"license": "MIT",
"dependencies": {
"qs": "^6.11.0"
},
"engines": {
- "node": ">=12.*"
+ "node": ">=16"
},
"peerDependencies": {
- "@types/node": ">=12.x.x"
+ "@types/node": ">=16"
},
"peerDependenciesMeta": {
"@types/node": {
@@ -16270,21 +15711,21 @@
}
},
"node_modules/style-to-js": {
- "version": "1.1.17",
- "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.17.tgz",
- "integrity": "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA==",
+ "version": "1.1.21",
+ "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz",
+ "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==",
"license": "MIT",
"dependencies": {
- "style-to-object": "1.0.9"
+ "style-to-object": "1.0.14"
}
},
"node_modules/style-to-object": {
- "version": "1.0.9",
- "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.9.tgz",
- "integrity": "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw==",
+ "version": "1.0.14",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz",
+ "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==",
"license": "MIT",
"dependencies": {
- "inline-style-parser": "0.2.4"
+ "inline-style-parser": "0.2.7"
}
},
"node_modules/supports-color": {
@@ -16343,10 +15784,23 @@
"url": "https://opencollective.com/synckit"
}
},
+ "node_modules/tagged-tag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/tagged-tag/-/tagged-tag-1.0.0.tgz",
+ "integrity": "sha512-yEFYrVhod+hdNyx7g5Bnkkb0G6si8HJurOoOEgC8B/O0uXLHlaey/65KRv6cuWBNhBgHKAROVpc7QyYqE5gFng==",
+ "dev": true,
+ "license": "MIT",
+ "engines": {
+ "node": ">=20"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/tailwind-merge": {
- "version": "3.3.1",
- "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.3.1.tgz",
- "integrity": "sha512-gBXpgUm/3rp1lMZZrM/w7D8GKqshif0zAymAhbCyIt8KMe+0v9DQ7cdYLR4FHH/cKpdTXb+A/tKKU3eolfsI+g==",
+ "version": "3.4.0",
+ "resolved": "https://registry.npmjs.org/tailwind-merge/-/tailwind-merge-3.4.0.tgz",
+ "integrity": "sha512-uSaO4gnW+b3Y2aWoWfFpX62vn2sR3skfhbjsEnaBI81WD1wBLlHZe5sWf0AqjksNdYTbGBEd0UasQMT3SNV15g==",
"license": "MIT",
"funding": {
"type": "github",
@@ -16372,6 +15826,7 @@
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/tailwind-variants/-/tailwind-variants-3.1.1.tgz",
"integrity": "sha512-ftLXe3krnqkMHsuBTEmaVUXYovXtPyTK7ckEfDRXS8PBZx0bAUas+A0jYxuKA5b8qg++wvQ3d2MQ7l/xeZxbZQ==",
+ "license": "MIT",
"engines": {
"node": ">=16.x",
"pnpm": ">=7.x"
@@ -16387,15 +15842,15 @@
}
},
"node_modules/tailwindcss": {
- "version": "4.1.13",
- "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.13.tgz",
- "integrity": "sha512-i+zidfmTqtwquj4hMEwdjshYYgMbOrPzb9a0M3ZgNa0JMoZeFC6bxZvO8yr8ozS6ix2SDz0+mvryPeBs2TFE+w==",
+ "version": "4.1.17",
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.17.tgz",
+ "integrity": "sha512-j9Ee2YjuQqYT9bbRTfTZht9W/ytp5H+jJpZKiYdP/bpnXARAuELt9ofP0lPnmHjbga7SNQIxdTAXCmtKVYjN+Q==",
"license": "MIT"
},
"node_modules/tapable": {
- "version": "2.2.3",
- "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.3.tgz",
- "integrity": "sha512-ZL6DDuAlRlLGghwcfmSn9sK3Hr6ArtyudlSAiCqQ6IfE+b+HHbydbYDIG15IfS5do+7XQQBdBiubF/cV2dnDzg==",
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.3.0.tgz",
+ "integrity": "sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==",
"license": "MIT",
"engines": {
"node": ">=6"
@@ -16405,47 +15860,6 @@
"url": "https://opencollective.com/webpack"
}
},
- "node_modules/tar": {
- "version": "7.4.3",
- "resolved": "https://registry.npmjs.org/tar/-/tar-7.4.3.tgz",
- "integrity": "sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==",
- "license": "ISC",
- "dependencies": {
- "@isaacs/fs-minipass": "^4.0.0",
- "chownr": "^3.0.0",
- "minipass": "^7.1.2",
- "minizlib": "^3.0.1",
- "mkdirp": "^3.0.1",
- "yallist": "^5.0.0"
- },
- "engines": {
- "node": ">=18"
- }
- },
- "node_modules/tar/node_modules/yallist": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/yallist/-/yallist-5.0.0.tgz",
- "integrity": "sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==",
- "license": "BlueOak-1.0.0",
- "engines": {
- "node": ">=18"
- }
- },
- "node_modules/test-exclude": {
- "version": "7.0.1",
- "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz",
- "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==",
- "dev": true,
- "license": "ISC",
- "dependencies": {
- "@istanbuljs/schema": "^0.1.2",
- "glob": "^10.4.1",
- "minimatch": "^9.0.4"
- },
- "engines": {
- "node": ">=18"
- }
- },
"node_modules/text-table": {
"version": "0.2.0",
"resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
@@ -16470,16 +15884,20 @@
}
},
"node_modules/tinyexec": {
- "version": "0.3.2",
- "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz",
- "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==",
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.2.tgz",
+ "integrity": "sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==",
"dev": true,
- "license": "MIT"
+ "license": "MIT",
+ "engines": {
+ "node": ">=18"
+ }
},
"node_modules/tinyglobby": {
"version": "0.2.15",
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz",
"integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==",
+ "license": "MIT",
"dependencies": {
"fdir": "^6.5.0",
"picomatch": "^4.0.3"
@@ -16520,30 +15938,10 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
- "node_modules/tinypool": {
- "version": "1.1.1",
- "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz",
- "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": "^18.0.0 || >=20.0.0"
- }
- },
"node_modules/tinyrainbow": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz",
- "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=14.0.0"
- }
- },
- "node_modules/tinyspy": {
- "version": "4.0.3",
- "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.3.tgz",
- "integrity": "sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==",
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.0.3.tgz",
+ "integrity": "sha512-PSkbLUoxOFRzJYjjxHJt9xro7D+iilgMX/C9lawzVuYiIdcihh9DXmVibBe8lmcFrRi/VzlPjBxbN7rH24q8/Q==",
"dev": true,
"license": "MIT",
"engines": {
@@ -16551,22 +15949,24 @@
}
},
"node_modules/tldts": {
- "version": "7.0.16",
- "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.16.tgz",
- "integrity": "sha512-5bdPHSwbKTeHmXrgecID4Ljff8rQjv7g8zKQPkCozRo2HWWni+p310FSn5ImI+9kWw9kK4lzOB5q/a6iv0IJsw==",
+ "version": "7.0.19",
+ "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.19.tgz",
+ "integrity": "sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==",
"dev": true,
+ "license": "MIT",
"dependencies": {
- "tldts-core": "^7.0.16"
+ "tldts-core": "^7.0.19"
},
"bin": {
"tldts": "bin/cli.js"
}
},
"node_modules/tldts-core": {
- "version": "7.0.16",
- "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.16.tgz",
- "integrity": "sha512-XHhPmHxphLi+LGbH0G/O7dmUH9V65OY20R7vH8gETHsp5AZCjBk9l8sqmRKLaGOxnETU7XNSDUPtewAy/K6jbA==",
- "dev": true
+ "version": "7.0.19",
+ "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.19.tgz",
+ "integrity": "sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==",
+ "dev": true,
+ "license": "MIT"
},
"node_modules/to-regex-range": {
"version": "5.0.1",
@@ -16603,6 +16003,7 @@
"resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.0.tgz",
"integrity": "sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==",
"dev": true,
+ "license": "BSD-3-Clause",
"dependencies": {
"tldts": "^7.0.5"
},
@@ -16615,6 +16016,7 @@
"resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz",
"integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"punycode": "^2.3.1"
},
@@ -16825,9 +16227,9 @@
}
},
"node_modules/typescript": {
- "version": "5.9.2",
- "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.2.tgz",
- "integrity": "sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==",
+ "version": "5.9.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
+ "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"devOptional": true,
"license": "Apache-2.0",
"bin": {
@@ -16858,10 +16260,11 @@
}
},
"node_modules/undici-types": {
- "version": "7.12.0",
- "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.12.0.tgz",
- "integrity": "sha512-goOacqME2GYyOZZfb5Lgtu+1IDmAlAEu5xnD3+xTzS10hT0vzpf0SPjkXwAw9Jm+4n/mQGDP3LO8CPbYROeBfQ==",
- "devOptional": true
+ "version": "7.16.0",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz",
+ "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==",
+ "devOptional": true,
+ "license": "MIT"
},
"node_modules/unified": {
"version": "11.0.5",
@@ -16883,9 +16286,9 @@
}
},
"node_modules/unist-util-is": {
- "version": "6.0.0",
- "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.0.tgz",
- "integrity": "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw==",
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz",
+ "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==",
"license": "MIT",
"dependencies": {
"@types/unist": "^3.0.0"
@@ -16937,9 +16340,9 @@
}
},
"node_modules/unist-util-visit-parents": {
- "version": "6.0.1",
- "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.1.tgz",
- "integrity": "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw==",
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz",
+ "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==",
"license": "MIT",
"dependencies": {
"@types/unist": "^3.0.0",
@@ -16958,10 +16361,20 @@
"node": ">= 0.8"
}
},
+ "node_modules/until-async": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/until-async/-/until-async-3.0.2.tgz",
+ "integrity": "sha512-IiSk4HlzAMqTUseHHe3VhIGyuFmN90zMTpD3Z3y8jeQbzLIq500MVM7Jq2vUAnTKAFPJrqwkzr6PoTcPhGcOiw==",
+ "dev": true,
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/kettanaito"
+ }
+ },
"node_modules/update-browserslist-db": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz",
- "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==",
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.0.tgz",
+ "integrity": "sha512-Dn+NlSF/7+0lVSEZ57SYQg6/E44arLzsVOGgrElBn/BlG1B8WKdbLppOocFrXwRNTkNlgdGNaBgH1o0lggDPiw==",
"funding": [
{
"type": "opencollective",
@@ -17044,9 +16457,9 @@
}
},
"node_modules/use-sync-external-store": {
- "version": "1.5.0",
- "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.5.0.tgz",
- "integrity": "sha512-Rb46I4cGGVBmjamjphe8L/UnvJD+uPPtTkNvX5mZgqdbavhI4EbgIWJiIHXJ8bc/i9EQGPRh4DwEURJ552Do0A==",
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.6.0.tgz",
+ "integrity": "sha512-Pp6GSwGP/NrPIrxVFAIkOQeyw8lFenOHijQWkUTrDvrF4ALqylP2C/KCkeS9dpUM3KvYRQhna5vt7IL95+ZQ9w==",
"license": "MIT",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
@@ -17068,9 +16481,9 @@
}
},
"node_modules/valibot": {
- "version": "0.41.0",
- "resolved": "https://registry.npmjs.org/valibot/-/valibot-0.41.0.tgz",
- "integrity": "sha512-igDBb8CTYr8YTQlOKgaN9nSS0Be7z+WRuaeYqGf3Cjz3aKmSnqEmYnkfVjzIuumGqfHpa3fLIvMEAfhrpqN8ng==",
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/valibot/-/valibot-1.2.0.tgz",
+ "integrity": "sha512-mm1rxUsmOxzrwnX5arGS+U4T25RdvpPjPN4yR0u9pUBov9+zGVtO84tif1eY4r6zWxVxu3KzIyknJy3rxfRZZg==",
"dev": true,
"license": "MIT",
"peerDependencies": {
@@ -17082,27 +16495,6 @@
}
}
},
- "node_modules/validate-npm-package-license": {
- "version": "3.0.4",
- "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz",
- "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==",
- "dev": true,
- "license": "Apache-2.0",
- "dependencies": {
- "spdx-correct": "^3.0.0",
- "spdx-expression-parse": "^3.0.0"
- }
- },
- "node_modules/validate-npm-package-name": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/validate-npm-package-name/-/validate-npm-package-name-5.0.1.tgz",
- "integrity": "sha512-OljLrQ9SQdOUqTaQxqL5dEfZWrXExyyWsozYlAWFawPVNuD83igl7uJD2RTkNMbniIYgt8l81eCJGIdQF7avLQ==",
- "dev": true,
- "license": "ISC",
- "engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
- }
- },
"node_modules/vary": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
@@ -17141,9 +16533,10 @@
}
},
"node_modules/vite": {
- "version": "7.1.7",
- "resolved": "https://registry.npmjs.org/vite/-/vite-7.1.7.tgz",
- "integrity": "sha512-VbA8ScMvAISJNJVbRDTJdCwqQoAareR/wutevKanhR2/1EkoXVZVkkORaYm/tNVCjP/UDTKtcw3bAkwOUdedmA==",
+ "version": "7.2.6",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-7.2.6.tgz",
+ "integrity": "sha512-tI2l/nFHC5rLh7+5+o7QjKjSR04ivXDF4jcgV0f/bTQ+OJiITy5S6gaynVsEM+7RqzufMnVbIon6Sr5x1SDYaQ==",
+ "license": "MIT",
"dependencies": {
"esbuild": "^0.25.0",
"fdir": "^6.5.0",
@@ -17322,51 +16715,50 @@
}
},
"node_modules/vitest": {
- "version": "3.2.4",
- "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.4.tgz",
- "integrity": "sha512-LUCP5ev3GURDysTWiP47wRRUpLKMOfPh+yKTx3kVIEiu5KOMeqzpnYNsKyOoVrULivR8tLcks4+lga33Whn90A==",
+ "version": "4.0.15",
+ "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.0.15.tgz",
+ "integrity": "sha512-n1RxDp8UJm6N0IbJLQo+yzLZ2sQCDyl1o0LeugbPWf8+8Fttp29GghsQBjYJVmWq3gBFfe9Hs1spR44vovn2wA==",
"dev": true,
"license": "MIT",
"dependencies": {
- "@types/chai": "^5.2.2",
- "@vitest/expect": "3.2.4",
- "@vitest/mocker": "3.2.4",
- "@vitest/pretty-format": "^3.2.4",
- "@vitest/runner": "3.2.4",
- "@vitest/snapshot": "3.2.4",
- "@vitest/spy": "3.2.4",
- "@vitest/utils": "3.2.4",
- "chai": "^5.2.0",
- "debug": "^4.4.1",
- "expect-type": "^1.2.1",
- "magic-string": "^0.30.17",
+ "@vitest/expect": "4.0.15",
+ "@vitest/mocker": "4.0.15",
+ "@vitest/pretty-format": "4.0.15",
+ "@vitest/runner": "4.0.15",
+ "@vitest/snapshot": "4.0.15",
+ "@vitest/spy": "4.0.15",
+ "@vitest/utils": "4.0.15",
+ "es-module-lexer": "^1.7.0",
+ "expect-type": "^1.2.2",
+ "magic-string": "^0.30.21",
+ "obug": "^2.1.1",
"pathe": "^2.0.3",
- "picomatch": "^4.0.2",
- "std-env": "^3.9.0",
+ "picomatch": "^4.0.3",
+ "std-env": "^3.10.0",
"tinybench": "^2.9.0",
- "tinyexec": "^0.3.2",
- "tinyglobby": "^0.2.14",
- "tinypool": "^1.1.1",
- "tinyrainbow": "^2.0.0",
- "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0",
- "vite-node": "3.2.4",
+ "tinyexec": "^1.0.2",
+ "tinyglobby": "^0.2.15",
+ "tinyrainbow": "^3.0.3",
+ "vite": "^6.0.0 || ^7.0.0",
"why-is-node-running": "^2.3.0"
},
"bin": {
"vitest": "vitest.mjs"
},
"engines": {
- "node": "^18.0.0 || ^20.0.0 || >=22.0.0"
+ "node": "^20.0.0 || ^22.0.0 || >=24.0.0"
},
"funding": {
"url": "https://opencollective.com/vitest"
},
"peerDependencies": {
"@edge-runtime/vm": "*",
- "@types/debug": "^4.1.12",
- "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0",
- "@vitest/browser": "3.2.4",
- "@vitest/ui": "3.2.4",
+ "@opentelemetry/api": "^1.9.0",
+ "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0",
+ "@vitest/browser-playwright": "4.0.15",
+ "@vitest/browser-preview": "4.0.15",
+ "@vitest/browser-webdriverio": "4.0.15",
+ "@vitest/ui": "4.0.15",
"happy-dom": "*",
"jsdom": "*"
},
@@ -17374,13 +16766,19 @@
"@edge-runtime/vm": {
"optional": true
},
- "@types/debug": {
+ "@opentelemetry/api": {
"optional": true
},
"@types/node": {
"optional": true
},
- "@vitest/browser": {
+ "@vitest/browser-playwright": {
+ "optional": true
+ },
+ "@vitest/browser-preview": {
+ "optional": true
+ },
+ "@vitest/browser-webdriverio": {
"optional": true
},
"@vitest/ui": {
@@ -17447,6 +16845,7 @@
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.0.tgz",
"integrity": "sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==",
"dev": true,
+ "license": "BSD-2-Clause",
"engines": {
"node": ">=20"
}
@@ -17482,6 +16881,7 @@
"resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz",
"integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==",
"dev": true,
+ "license": "MIT",
"engines": {
"node": ">=18"
}
@@ -17491,6 +16891,7 @@
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-15.1.0.tgz",
"integrity": "sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==",
"dev": true,
+ "license": "MIT",
"dependencies": {
"tr46": "^6.0.0",
"webidl-conversions": "^8.0.0"
@@ -17500,19 +16901,18 @@
}
},
"node_modules/which": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/which/-/which-3.0.1.tgz",
- "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==",
- "dev": true,
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
"license": "ISC",
"dependencies": {
"isexe": "^2.0.0"
},
"bin": {
- "node-which": "bin/which.js"
+ "node-which": "bin/node-which"
},
"engines": {
- "node": "^14.17.0 || ^16.13.0 || >=18.0.0"
+ "node": ">= 8"
}
},
"node_modules/which-boxed-primitive": {
@@ -17632,104 +17032,78 @@
}
},
"node_modules/wrap-ansi": {
- "version": "8.1.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
- "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
+ "version": "9.0.2",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz",
+ "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==",
"dev": true,
"license": "MIT",
"dependencies": {
- "ansi-styles": "^6.1.0",
- "string-width": "^5.0.1",
- "strip-ansi": "^7.0.1"
+ "ansi-styles": "^6.2.1",
+ "string-width": "^7.0.0",
+ "strip-ansi": "^7.1.0"
},
"engines": {
- "node": ">=12"
+ "node": ">=18"
},
"funding": {
"url": "https://github.com/chalk/wrap-ansi?sponsor=1"
}
},
- "node_modules/wrap-ansi-cjs": {
- "name": "wrap-ansi",
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
- "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "node_modules/wrap-ansi/node_modules/ansi-regex": {
+ "version": "6.2.2",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz",
+ "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==",
"dev": true,
"license": "MIT",
- "dependencies": {
- "ansi-styles": "^4.0.0",
- "string-width": "^4.1.0",
- "strip-ansi": "^6.0.0"
- },
"engines": {
- "node": ">=10"
+ "node": ">=12"
},
"funding": {
- "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
- }
- },
- "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": {
- "version": "8.0.0",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
- "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
- "dev": true,
- "license": "MIT"
- },
- "node_modules/wrap-ansi-cjs/node_modules/is-fullwidth-code-point": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
- "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
- "dev": true,
- "license": "MIT",
- "engines": {
- "node": ">=8"
- }
- },
- "node_modules/wrap-ansi-cjs/node_modules/string-width": {
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
- "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
- "dev": true,
- "license": "MIT",
- "dependencies": {
- "emoji-regex": "^8.0.0",
- "is-fullwidth-code-point": "^3.0.0",
- "strip-ansi": "^6.0.1"
- },
- "engines": {
- "node": ">=8"
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
}
},
- "node_modules/wrap-ansi/node_modules/ansi-regex": {
- "version": "6.2.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.0.tgz",
- "integrity": "sha512-TKY5pyBkHyADOPYlRT9Lx6F544mPl0vS5Ew7BJ45hA08Q+t3GjbueLliBWN3sMICk6+y7HdyxSzC4bWS8baBdg==",
+ "node_modules/wrap-ansi/node_modules/ansi-styles": {
+ "version": "6.2.3",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz",
+ "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=12"
},
"funding": {
- "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
}
},
- "node_modules/wrap-ansi/node_modules/ansi-styles": {
- "version": "6.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
- "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "node_modules/wrap-ansi/node_modules/emoji-regex": {
+ "version": "10.6.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz",
+ "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==",
+ "dev": true,
+ "license": "MIT"
+ },
+ "node_modules/wrap-ansi/node_modules/string-width": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz",
+ "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==",
"dev": true,
"license": "MIT",
+ "dependencies": {
+ "emoji-regex": "^10.3.0",
+ "get-east-asian-width": "^1.0.0",
+ "strip-ansi": "^7.1.0"
+ },
"engines": {
- "node": ">=12"
+ "node": ">=18"
},
"funding": {
- "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ "url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/wrap-ansi/node_modules/strip-ansi": {
- "version": "7.1.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
- "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "version": "7.1.2",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz",
+ "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -17795,15 +17169,6 @@
"node": ">=0.4.0"
}
},
- "node_modules/xtend": {
- "version": "4.0.2",
- "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
- "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
- "license": "MIT",
- "engines": {
- "node": ">=0.4"
- }
- },
"node_modules/y18n": {
"version": "5.0.8",
"resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
@@ -17821,9 +17186,9 @@
"license": "ISC"
},
"node_modules/yaml": {
- "version": "2.8.1",
- "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.1.tgz",
- "integrity": "sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==",
+ "version": "2.8.2",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz",
+ "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==",
"devOptional": true,
"license": "ISC",
"bin": {
@@ -17831,6 +17196,9 @@
},
"engines": {
"node": ">= 14.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/eemeli"
}
},
"node_modules/yargs": {
@@ -17921,9 +17289,9 @@
}
},
"node_modules/zustand": {
- "version": "5.0.8",
- "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.8.tgz",
- "integrity": "sha512-gyPKpIaxY9XcO2vSMrLbiER7QMAMGOQZVRdJ6Zi782jkbzZygq5GI9nG8g+sMgitRtndwaBSl7uiqC49o1SSiw==",
+ "version": "5.0.9",
+ "resolved": "https://registry.npmjs.org/zustand/-/zustand-5.0.9.tgz",
+ "integrity": "sha512-ALBtUj0AfjJt3uNRQoL1tL2tMvj6Gp/6e39dnfT6uzpelGru8v1tPOGBzayOWbPJvujM8JojDk3E1LxeFisBNg==",
"license": "MIT",
"engines": {
"node": ">=12.20.0"
diff --git a/frontend/package.json b/frontend/package.json
index 46958662a05c..170c6c6bf7bc 100644
--- a/frontend/package.json
+++ b/frontend/package.json
@@ -7,57 +7,57 @@
"node": ">=22.0.0"
},
"dependencies": {
- "@heroui/react": "^2.8.4",
- "@heroui/use-infinite-scroll": "^2.2.11",
+ "@heroui/react": "2.8.5",
+ "@heroui/use-infinite-scroll": "^2.2.12",
"@microlink/react-json-view": "^1.26.2",
"@monaco-editor/react": "^4.7.0-rc.0",
- "@posthog/react": "^1.4.0",
- "@react-router/node": "^7.9.3",
- "@react-router/serve": "^7.9.3",
+ "@posthog/react": "^1.5.2",
+ "@react-router/node": "^7.10.1",
+ "@react-router/serve": "^7.10.1",
"@react-types/shared": "^3.32.0",
- "@stripe/react-stripe-js": "^4.0.2",
- "@stripe/stripe-js": "^7.9.0",
- "@tailwindcss/postcss": "^4.1.13",
- "@tailwindcss/vite": "^4.1.13",
- "@tanstack/react-query": "^5.90.2",
+ "@stripe/react-stripe-js": "^5.4.1",
+ "@stripe/stripe-js": "^8.5.3",
+ "@tailwindcss/postcss": "^4.1.17",
+ "@tailwindcss/vite": "^4.1.17",
+ "@tanstack/react-query": "^5.90.12",
"@uidotdev/usehooks": "^2.4.1",
- "@vitejs/plugin-react": "^5.0.4",
+ "@vitejs/plugin-react": "^5.1.1",
"@xterm/addon-fit": "^0.10.0",
"@xterm/xterm": "^5.4.0",
- "axios": "^1.12.2",
+ "axios": "^1.13.2",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"date-fns": "^4.1.0",
- "downshift": "^9.0.10",
+ "downshift": "^9.0.12",
"eslint-config-airbnb-typescript": "^18.0.0",
- "framer-motion": "^12.23.22",
- "i18next": "^25.5.2",
+ "framer-motion": "^12.23.25",
+ "i18next": "^25.7.1",
"i18next-browser-languagedetector": "^8.2.0",
"i18next-http-backend": "^3.0.2",
- "isbot": "^5.1.31",
- "jose": "^6.1.0",
- "lucide-react": "^0.544.0",
- "monaco-editor": "^0.53.0",
- "posthog-js": "^1.290.0",
- "react": "^19.1.1",
- "react-dom": "^19.1.1",
+ "isbot": "^5.1.32",
+ "jose": "^6.1.3",
+ "lucide-react": "^0.556.0",
+ "monaco-editor": "^0.55.1",
+ "posthog-js": "^1.302.0",
+ "react": "^19.2.0",
+ "react-dom": "^19.2.0",
"react-highlight": "^0.15.0",
"react-hot-toast": "^2.6.0",
- "react-i18next": "^16.0.0",
+ "react-i18next": "^16.3.5",
"react-icons": "^5.5.0",
"react-markdown": "^10.1.0",
- "react-router": "^7.9.3",
- "react-syntax-highlighter": "^15.6.6",
+ "react-router": "^7.10.1",
+ "react-syntax-highlighter": "^16.1.0",
"remark-breaks": "^4.0.0",
"remark-gfm": "^4.0.1",
"sirv-cli": "^3.0.1",
"socket.io-client": "^4.8.1",
- "tailwind-merge": "^3.3.1",
+ "tailwind-merge": "^3.4.0",
"tailwind-scrollbar": "^4.0.2",
- "vite": "^7.1.7",
+ "vite": "^7.2.6",
"web-vitals": "^5.1.0",
"ws": "^8.18.2",
- "zustand": "^5.0.8"
+ "zustand": "^5.0.9"
},
"scripts": {
"dev": "npm run make-i18n && cross-env VITE_MOCK_API=false react-router dev",
@@ -96,25 +96,25 @@
"@babel/traverse": "^7.28.3",
"@babel/types": "^7.28.2",
"@mswjs/socket.io-binding": "^0.2.0",
- "@playwright/test": "^1.55.1",
- "@react-router/dev": "^7.9.3",
+ "@playwright/test": "^1.57.0",
+ "@react-router/dev": "^7.10.1",
"@tailwindcss/typography": "^0.5.19",
"@tanstack/eslint-plugin-query": "^5.91.0",
"@testing-library/dom": "^10.4.1",
- "@testing-library/jest-dom": "^6.8.0",
+ "@testing-library/jest-dom": "^6.9.1",
"@testing-library/react": "^16.3.0",
"@testing-library/user-event": "^14.6.1",
- "@types/node": "^24.5.2",
- "@types/react": "^19.1.15",
- "@types/react-dom": "^19.1.9",
+ "@types/node": "^24.10.1",
+ "@types/react": "^19.2.7",
+ "@types/react-dom": "^19.2.3",
"@types/react-highlight": "^0.12.8",
"@types/react-syntax-highlighter": "^15.5.13",
"@types/ws": "^8.18.1",
"@typescript-eslint/eslint-plugin": "^7.18.0",
"@typescript-eslint/parser": "^7.18.0",
- "@vitest/coverage-v8": "^3.2.3",
- "autoprefixer": "^10.4.21",
- "cross-env": "^10.0.0",
+ "@vitest/coverage-v8": "^4.0.14",
+ "autoprefixer": "^10.4.22",
+ "cross-env": "^10.1.0",
"eslint": "^8.57.0",
"eslint-config-airbnb": "^19.0.4",
"eslint-config-airbnb-typescript": "^18.0.0",
@@ -127,16 +127,16 @@
"eslint-plugin-react-hooks": "^4.6.2",
"eslint-plugin-unused-imports": "^4.2.0",
"husky": "^9.1.7",
- "jsdom": "^27.0.0",
- "lint-staged": "^16.2.3",
+ "jsdom": "^27.2.0",
+ "lint-staged": "^16.2.7",
"msw": "^2.6.6",
- "prettier": "^3.6.2",
- "stripe": "^18.5.0",
+ "prettier": "^3.7.3",
+ "stripe": "^20.0.0",
"tailwindcss": "^4.1.8",
- "typescript": "^5.9.2",
+ "typescript": "^5.9.3",
"vite-plugin-svgr": "^4.5.0",
"vite-tsconfig-paths": "^5.1.4",
- "vitest": "^3.0.2"
+ "vitest": "^4.0.14"
},
"packageManager": "npm@10.5.0",
"volta": {
diff --git a/frontend/public/android-chrome-192x192.png b/frontend/public/android-chrome-192x192.png
index 23f4f4fd2448..31d5801adb22 100644
Binary files a/frontend/public/android-chrome-192x192.png and b/frontend/public/android-chrome-192x192.png differ
diff --git a/frontend/public/android-chrome-512x512.png b/frontend/public/android-chrome-512x512.png
index 1fe76e41968d..57e1544c5fb7 100644
Binary files a/frontend/public/android-chrome-512x512.png and b/frontend/public/android-chrome-512x512.png differ
diff --git a/frontend/public/apple-touch-icon.png b/frontend/public/apple-touch-icon.png
index d6146fed3240..31d5801adb22 100644
Binary files a/frontend/public/apple-touch-icon.png and b/frontend/public/apple-touch-icon.png differ
diff --git a/frontend/public/favicon-16x16.png b/frontend/public/favicon-16x16.png
index 5db772fa150c..4f230c5981fa 100644
Binary files a/frontend/public/favicon-16x16.png and b/frontend/public/favicon-16x16.png differ
diff --git a/frontend/public/favicon-32x32.png b/frontend/public/favicon-32x32.png
index bb75b8b65f8b..1f874a817d14 100644
Binary files a/frontend/public/favicon-32x32.png and b/frontend/public/favicon-32x32.png differ
diff --git a/frontend/public/favicon.ico b/frontend/public/favicon.ico
index 680e72b56f78..409515edfca7 100644
Binary files a/frontend/public/favicon.ico and b/frontend/public/favicon.ico differ
diff --git a/frontend/public/mockServiceWorker.js b/frontend/public/mockServiceWorker.js
index 7e23102e0b28..6951ed1ce2fe 100644
--- a/frontend/public/mockServiceWorker.js
+++ b/frontend/public/mockServiceWorker.js
@@ -7,8 +7,8 @@
* - Please do NOT modify this file.
*/
-const PACKAGE_VERSION = '2.11.1'
-const INTEGRITY_CHECKSUM = 'f5825c521429caf22a4dd13b66e243af'
+const PACKAGE_VERSION = '2.12.3'
+const INTEGRITY_CHECKSUM = '4db4a41e972cec1b64cc569c66952d82'
const IS_MOCKED_RESPONSE = Symbol('isMockedResponse')
const activeClientIds = new Set()
@@ -71,11 +71,6 @@ addEventListener('message', async function (event) {
break
}
- case 'MOCK_DEACTIVATE': {
- activeClientIds.delete(clientId)
- break
- }
-
case 'CLIENT_CLOSED': {
activeClientIds.delete(clientId)
@@ -94,6 +89,8 @@ addEventListener('message', async function (event) {
})
addEventListener('fetch', function (event) {
+ const requestInterceptedAt = Date.now()
+
// Bypass navigation requests.
if (event.request.mode === 'navigate') {
return
@@ -110,23 +107,29 @@ addEventListener('fetch', function (event) {
// Bypass all requests when there are no active clients.
// Prevents the self-unregistered worked from handling requests
- // after it's been deleted (still remains active until the next reload).
+ // after it's been terminated (still remains active until the next reload).
if (activeClientIds.size === 0) {
return
}
const requestId = crypto.randomUUID()
- event.respondWith(handleRequest(event, requestId))
+ event.respondWith(handleRequest(event, requestId, requestInterceptedAt))
})
/**
* @param {FetchEvent} event
* @param {string} requestId
+ * @param {number} requestInterceptedAt
*/
-async function handleRequest(event, requestId) {
+async function handleRequest(event, requestId, requestInterceptedAt) {
const client = await resolveMainClient(event)
const requestCloneForEvents = event.request.clone()
- const response = await getResponse(event, client, requestId)
+ const response = await getResponse(
+ event,
+ client,
+ requestId,
+ requestInterceptedAt,
+ )
// Send back the response clone for the "response:*" life-cycle events.
// Ensure MSW is active and ready to handle the message, otherwise
@@ -202,9 +205,10 @@ async function resolveMainClient(event) {
* @param {FetchEvent} event
* @param {Client | undefined} client
* @param {string} requestId
+ * @param {number} requestInterceptedAt
* @returns {Promise}
*/
-async function getResponse(event, client, requestId) {
+async function getResponse(event, client, requestId, requestInterceptedAt) {
// Clone the request because it might've been already used
// (i.e. its body has been read and sent to the client).
const requestClone = event.request.clone()
@@ -255,6 +259,7 @@ async function getResponse(event, client, requestId) {
type: 'REQUEST',
payload: {
id: requestId,
+ interceptedAt: requestInterceptedAt,
...serializedRequest,
},
},
diff --git a/frontend/public/safari-pinned-tab.svg b/frontend/public/safari-pinned-tab.svg
index fb271c3449cb..daa0090f0fd5 100644
--- a/frontend/public/safari-pinned-tab.svg
+++ b/frontend/public/safari-pinned-tab.svg
@@ -1,32 +1,7 @@
-
- safari-pinned-tab-svg
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
diff --git a/frontend/src/api/conversation-service/v1-conversation-service.api.ts b/frontend/src/api/conversation-service/v1-conversation-service.api.ts
index 717228c79ff7..bd37fa818043 100644
--- a/frontend/src/api/conversation-service/v1-conversation-service.api.ts
+++ b/frontend/src/api/conversation-service/v1-conversation-service.api.ts
@@ -60,6 +60,8 @@ class V1ConversationService {
selected_branch?: string,
conversationInstructions?: string,
trigger?: ConversationTrigger,
+ parent_conversation_id?: string,
+ agent_type?: "default" | "plan",
): Promise {
const body: V1AppConversationStartRequest = {
selected_repository: selectedRepository,
@@ -67,6 +69,8 @@ class V1ConversationService {
selected_branch,
title: conversationInstructions,
trigger,
+ parent_conversation_id: parent_conversation_id || null,
+ agent_type,
};
// Add initial message if provided
@@ -111,11 +115,11 @@ class V1ConversationService {
* Search for start tasks (ongoing tasks that haven't completed yet)
* Use this to find tasks that were started but the user navigated away
*
- * Note: Backend only supports filtering by limit. To filter by repository/trigger,
+ * Note: Backend supports filtering by limit and created_at__gte. To filter by repository/trigger,
* filter the results client-side after fetching.
*
* @param limit Maximum number of tasks to return (max 100)
- * @returns Array of start tasks
+ * @returns Array of start tasks from the last 20 minutes
*/
static async searchStartTasks(
limit: number = 100,
@@ -123,6 +127,10 @@ class V1ConversationService {
const params = new URLSearchParams();
params.append("limit", limit.toString());
+ // Only get tasks from the last 20 minutes
+ const twentyMinutesAgo = new Date(Date.now() - 20 * 60 * 1000);
+ params.append("created_at__gte", twentyMinutesAgo.toISOString());
+
const { data } = await openHands.get(
`/api/v1/app-conversations/start-tasks/search?${params.toString()}`,
);
@@ -288,6 +296,25 @@ class V1ConversationService {
const { data } = await openHands.get<{ runtime_id: string }>(url);
return data;
}
+
+ /**
+ * Read a file from a specific conversation's sandbox workspace
+ * @param conversationId The conversation ID
+ * @param filePath Path to the file to read within the sandbox workspace (defaults to /workspace/project/PLAN.md)
+ * @returns The content of the file or an empty string if the file doesn't exist
+ */
+ static async readConversationFile(
+ conversationId: string,
+ filePath: string = "/workspace/project/PLAN.md",
+ ): Promise {
+ const params = new URLSearchParams();
+ params.append("file_path", filePath);
+
+ const { data } = await openHands.get(
+ `/api/v1/app-conversations/${conversationId}/file?${params.toString()}`,
+ );
+ return data;
+ }
}
export default V1ConversationService;
diff --git a/frontend/src/api/conversation-service/v1-conversation-service.types.ts b/frontend/src/api/conversation-service/v1-conversation-service.types.ts
index b48ce5bd6b9b..621283c27452 100644
--- a/frontend/src/api/conversation-service/v1-conversation-service.types.ts
+++ b/frontend/src/api/conversation-service/v1-conversation-service.types.ts
@@ -3,15 +3,19 @@ import { Provider } from "#/types/settings";
import { V1SandboxStatus } from "../sandbox-service/sandbox-service.types";
// V1 API Types for requests
-// Note: This represents the serialized API format, not the internal TextContent/ImageContent types
-export interface V1MessageContent {
- type: "text" | "image_url";
- text?: string;
- image_url?: {
- url: string;
- };
+// These types match the SDK's TextContent and ImageContent formats
+export interface V1TextContent {
+ type: "text";
+ text: string;
}
+export interface V1ImageContent {
+ type: "image";
+ image_urls: string[];
+}
+
+export type V1MessageContent = V1TextContent | V1ImageContent;
+
type V1Role = "user" | "system" | "assistant" | "tool";
export interface V1SendMessageRequest {
@@ -30,6 +34,8 @@ export interface V1AppConversationStartRequest {
title?: string | null;
trigger?: ConversationTrigger | null;
pr_number?: number[];
+ parent_conversation_id?: string | null;
+ agent_type?: "default" | "plan";
}
export type V1AppConversationStartTaskStatus =
@@ -38,6 +44,7 @@ export type V1AppConversationStartTaskStatus =
| "PREPARING_REPOSITORY"
| "RUNNING_SETUP_SCRIPT"
| "SETTING_UP_GIT_HOOKS"
+ | "SETTING_UP_SKILLS"
| "STARTING_CONVERSATION"
| "READY"
| "ERROR";
diff --git a/frontend/src/api/open-hands.types.ts b/frontend/src/api/open-hands.types.ts
index 9a30e46027b0..47d34fe567db 100644
--- a/frontend/src/api/open-hands.types.ts
+++ b/frontend/src/api/open-hands.types.ts
@@ -77,6 +77,7 @@ export interface Conversation {
session_api_key: string | null;
pr_number?: number[] | null;
conversation_version?: "V0" | "V1";
+ sub_conversation_ids?: string[];
}
export interface ResultSet {
diff --git a/frontend/src/assets/branding/azure-devops-logo.svg b/frontend/src/assets/branding/azure-devops-logo.svg
new file mode 100644
index 000000000000..01ff9f8a90ec
--- /dev/null
+++ b/frontend/src/assets/branding/azure-devops-logo.svg
@@ -0,0 +1 @@
+
diff --git a/frontend/src/assets/branding/openhands-logo.svg b/frontend/src/assets/branding/openhands-logo.svg
index a079e0aa515e..3aa40d44049f 100644
--- a/frontend/src/assets/branding/openhands-logo.svg
+++ b/frontend/src/assets/branding/openhands-logo.svg
@@ -1,16 +1,9 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
diff --git a/frontend/src/components/features/chat/change-agent-button.tsx b/frontend/src/components/features/chat/change-agent-button.tsx
index 706f582b5934..68a0bd26997a 100644
--- a/frontend/src/components/features/chat/change-agent-button.tsx
+++ b/frontend/src/components/features/chat/change-agent-button.tsx
@@ -1,4 +1,4 @@
-import React, { useMemo, useEffect } from "react";
+import React, { useMemo, useEffect, useState } from "react";
import { useTranslation } from "react-i18next";
import { Typography } from "#/ui/typography";
import { I18nKey } from "#/i18n/declaration";
@@ -11,31 +11,87 @@ import { cn } from "#/utils/utils";
import { USE_PLANNING_AGENT } from "#/utils/feature-flags";
import { useAgentState } from "#/hooks/use-agent-state";
import { AgentState } from "#/types/agent-state";
+import { useActiveConversation } from "#/hooks/query/use-active-conversation";
+import { useUnifiedWebSocketStatus } from "#/hooks/use-unified-websocket-status";
+import { useSubConversationTaskPolling } from "#/hooks/query/use-sub-conversation-task-polling";
+import { useHandlePlanClick } from "#/hooks/use-handle-plan-click";
export function ChangeAgentButton() {
- const { t } = useTranslation();
- const [contextMenuOpen, setContextMenuOpen] = React.useState(false);
+ const [contextMenuOpen, setContextMenuOpen] = useState(false);
- const conversationMode = useConversationStore(
- (state) => state.conversationMode,
- );
+ const { conversationMode, setConversationMode, subConversationTaskId } =
+ useConversationStore();
- const setConversationMode = useConversationStore(
- (state) => state.setConversationMode,
- );
+ const webSocketStatus = useUnifiedWebSocketStatus();
+
+ const isWebSocketConnected = webSocketStatus === "CONNECTED";
const shouldUsePlanningAgent = USE_PLANNING_AGENT();
const { curAgentState } = useAgentState();
+ const { t } = useTranslation();
+
const isAgentRunning = curAgentState === AgentState.RUNNING;
+ const { data: conversation } = useActiveConversation();
+
+ // Poll sub-conversation task and invalidate parent conversation when ready
+ useSubConversationTaskPolling(
+ subConversationTaskId,
+ conversation?.conversation_id || null,
+ );
+
+ // Get handlePlanClick and isCreatingConversation from custom hook
+ const { handlePlanClick, isCreatingConversation } = useHandlePlanClick();
+
// Close context menu when agent starts running
useEffect(() => {
- if (isAgentRunning && contextMenuOpen) {
+ if ((isAgentRunning || !isWebSocketConnected) && contextMenuOpen) {
setContextMenuOpen(false);
}
- }, [isAgentRunning, contextMenuOpen]);
+ }, [isAgentRunning, contextMenuOpen, isWebSocketConnected]);
+
+ const isButtonDisabled =
+ isAgentRunning ||
+ isCreatingConversation ||
+ !isWebSocketConnected ||
+ !shouldUsePlanningAgent;
+
+ // Handle Shift + Tab keyboard shortcut to cycle through modes
+ useEffect(() => {
+ if (isButtonDisabled) {
+ return undefined;
+ }
+
+ const handleKeyDown = (event: KeyboardEvent) => {
+ // Check for Shift + Tab combination
+ if (event.shiftKey && event.key === "Tab") {
+ // Prevent default tab navigation behavior
+ event.preventDefault();
+ event.stopPropagation();
+
+ // Cycle between modes: code -> plan -> code
+ const nextMode = conversationMode === "code" ? "plan" : "code";
+ if (nextMode === "plan") {
+ handlePlanClick(event);
+ } else {
+ setConversationMode(nextMode);
+ }
+ }
+ };
+
+ document.addEventListener("keydown", handleKeyDown);
+
+ return () => {
+ document.removeEventListener("keydown", handleKeyDown);
+ };
+ }, [
+ isButtonDisabled,
+ conversationMode,
+ setConversationMode,
+ handlePlanClick,
+ ]);
const handleButtonClick = (event: React.MouseEvent) => {
event.preventDefault();
@@ -49,12 +105,6 @@ export function ChangeAgentButton() {
setConversationMode("code");
};
- const handlePlanClick = (event: React.MouseEvent) => {
- event.preventDefault();
- event.stopPropagation();
- setConversationMode("plan");
- };
-
const isExecutionAgent = conversationMode === "code";
const buttonLabel = useMemo(() => {
@@ -80,11 +130,11 @@ export function ChangeAgentButton() {
void;
onCodeClick?: (event: React.MouseEvent) => void;
@@ -52,17 +47,17 @@ export function ChangeAgentContextMenu({
testId="change-agent-context-menu"
position="top"
alignment="left"
- className="min-h-fit min-w-[195px] mb-2"
+ className="min-h-fit mb-2 min-w-[195px] max-w-[195px] gap-0"
>
-
-
diff --git a/frontend/src/components/features/chat/chat-message.tsx b/frontend/src/components/features/chat/chat-message.tsx
index b822220c4dac..6f2f388682c1 100644
--- a/frontend/src/components/features/chat/chat-message.tsx
+++ b/frontend/src/components/features/chat/chat-message.tsx
@@ -1,15 +1,9 @@
import React from "react";
-import Markdown from "react-markdown";
-import remarkGfm from "remark-gfm";
-import remarkBreaks from "remark-breaks";
-import { code } from "../markdown/code";
import { cn } from "#/utils/utils";
-import { ul, ol } from "../markdown/list";
import { CopyToClipboardButton } from "#/components/shared/buttons/copy-to-clipboard-button";
-import { anchor } from "../markdown/anchor";
import { OpenHandsSourceType } from "#/types/core/base";
-import { paragraph } from "../markdown/paragraph";
import { TooltipButton } from "#/components/shared/buttons/tooltip-button";
+import { MarkdownRenderer } from "../markdown/markdown-renderer";
interface ChatMessageProps {
type: OpenHandsSourceType;
@@ -19,6 +13,7 @@ interface ChatMessageProps {
onClick: () => void;
tooltip?: string;
}>;
+ isFromPlanningAgent?: boolean;
}
export function ChatMessage({
@@ -26,6 +21,7 @@ export function ChatMessage({
message,
children,
actions,
+ isFromPlanningAgent = false,
}: React.PropsWithChildren) {
const [isHovering, setIsHovering] = React.useState(false);
const [isCopy, setIsCopy] = React.useState(false);
@@ -59,6 +55,7 @@ export function ChatMessage({
"flex flex-col gap-2",
type === "user" && " p-4 bg-tertiary self-end",
type === "agent" && "mt-6 w-full max-w-full bg-transparent",
+ isFromPlanningAgent && "border border-[#597ff4] bg-tertiary p-4",
)}
>
-
- {message}
-
+ {message}
{children}
diff --git a/frontend/src/components/features/chat/error-message.tsx b/frontend/src/components/features/chat/error-message.tsx
index 8de367a9a2bf..da40b3786e5a 100644
--- a/frontend/src/components/features/chat/error-message.tsx
+++ b/frontend/src/components/features/chat/error-message.tsx
@@ -1,13 +1,9 @@
import React from "react";
-import Markdown from "react-markdown";
-import remarkGfm from "remark-gfm";
-import remarkBreaks from "remark-breaks";
import { useTranslation } from "react-i18next";
-import { code } from "../markdown/code";
-import { ol, ul } from "../markdown/list";
import ArrowDown from "#/icons/angle-down-solid.svg?react";
import ArrowUp from "#/icons/angle-up-solid.svg?react";
import i18n from "#/i18n";
+import { MarkdownRenderer } from "../markdown/markdown-renderer";
interface ErrorMessageProps {
errorId?: string;
@@ -40,18 +36,7 @@ export function ErrorMessage({ errorId, defaultMessage }: ErrorMessageProps) {
- {showDetails && (
-
- {defaultMessage}
-
- )}
+ {showDetails && {defaultMessage} }
);
}
diff --git a/frontend/src/components/features/chat/event-content-helpers/get-observation-content.ts b/frontend/src/components/features/chat/event-content-helpers/get-observation-content.ts
index 56bf58226368..435a6869183c 100644
--- a/frontend/src/components/features/chat/event-content-helpers/get-observation-content.ts
+++ b/frontend/src/components/features/chat/event-content-helpers/get-observation-content.ts
@@ -22,6 +22,13 @@ const getCommandObservationContent = (
if (content.length > MAX_CONTENT_LENGTH) {
content = `${content.slice(0, MAX_CONTENT_LENGTH)}...`;
}
+
+ const command = event.observation === "run" ? event.extras.command : null;
+
+ if (command) {
+ return `Command:\n\`\`\`sh\n${command}\n\`\`\`\n\nOutput:\n\`\`\`sh\n${content.trim() || i18n.t("OBSERVATION$COMMAND_NO_OUTPUT")}\n\`\`\``;
+ }
+
return `Output:\n\`\`\`sh\n${content.trim() || i18n.t("OBSERVATION$COMMAND_NO_OUTPUT")}\n\`\`\``;
};
diff --git a/frontend/src/components/features/chat/event-message-components/task-tracking-event-message.tsx b/frontend/src/components/features/chat/event-message-components/task-tracking-event-message.tsx
index 785305333c63..cd6ff59a0549 100644
--- a/frontend/src/components/features/chat/event-message-components/task-tracking-event-message.tsx
+++ b/frontend/src/components/features/chat/event-message-components/task-tracking-event-message.tsx
@@ -1,11 +1,7 @@
-import React from "react";
-import { useTranslation } from "react-i18next";
import { OpenHandsObservation } from "#/types/core/observations";
import { isTaskTrackingObservation } from "#/types/core/guards";
-import { GenericEventMessage } from "../generic-event-message";
import { TaskTrackingObservationContent } from "../task-tracking-observation-content";
import { ConfirmationButtons } from "#/components/shared/buttons/confirmation-buttons";
-import { getObservationResult } from "../event-content-helpers/get-observation-result";
interface TaskTrackingEventMessageProps {
event: OpenHandsObservation;
@@ -16,34 +12,13 @@ export function TaskTrackingEventMessage({
event,
shouldShowConfirmationButtons,
}: TaskTrackingEventMessageProps) {
- const { t } = useTranslation();
-
if (!isTaskTrackingObservation(event)) {
return null;
}
- const { command } = event.extras;
- let title: React.ReactNode;
- let initiallyExpanded = false;
-
- // Determine title and expansion state based on command
- if (command === "plan") {
- title = t("OBSERVATION_MESSAGE$TASK_TRACKING_PLAN");
- initiallyExpanded = true;
- } else {
- // command === "view"
- title = t("OBSERVATION_MESSAGE$TASK_TRACKING_VIEW");
- initiallyExpanded = false;
- }
-
return (
- }
- success={getObservationResult(event)}
- initiallyExpanded={initiallyExpanded}
- />
+
{shouldShowConfirmationButtons && }
);
diff --git a/frontend/src/components/features/chat/expandable-message.tsx b/frontend/src/components/features/chat/expandable-message.tsx
index 918eafd6b859..12942498a282 100644
--- a/frontend/src/components/features/chat/expandable-message.tsx
+++ b/frontend/src/components/features/chat/expandable-message.tsx
@@ -1,9 +1,6 @@
import { useEffect, useState } from "react";
import { Trans, useTranslation } from "react-i18next";
-import Markdown from "react-markdown";
import { Link } from "react-router";
-import remarkGfm from "remark-gfm";
-import remarkBreaks from "remark-breaks";
import { useConfig } from "#/hooks/query/use-config";
import { I18nKey } from "#/i18n/declaration";
import ArrowDown from "#/icons/angle-down-solid.svg?react";
@@ -13,9 +10,7 @@ import XCircle from "#/icons/x-circle-solid.svg?react";
import { OpenHandsAction } from "#/types/core/actions";
import { OpenHandsObservation } from "#/types/core/observations";
import { cn } from "#/utils/utils";
-import { code } from "../markdown/code";
-import { ol, ul } from "../markdown/list";
-import { paragraph } from "../markdown/paragraph";
+import { MarkdownRenderer } from "../markdown/markdown-renderer";
import { MonoComponent } from "./mono-component";
import { PathComponent } from "./path-component";
@@ -100,7 +95,7 @@ export function ExpandableMessage({
const statusIconClasses = "h-4 w-4 ml-2 inline";
if (
- config?.FEATURE_FLAGS.ENABLE_BILLING &&
+ config?.FEATURE_FLAGS?.ENABLE_BILLING &&
config?.APP_MODE === "saas" &&
id === I18nKey.STATUS$ERROR_LLM_OUT_OF_CREDITS
) {
@@ -192,17 +187,7 @@ export function ExpandableMessage({
{showDetails && (
-
- {details}
-
+ {details}
)}
diff --git a/frontend/src/components/features/chat/generic-event-message.tsx b/frontend/src/components/features/chat/generic-event-message.tsx
index e5124b69fef9..ff2ab633b189 100644
--- a/frontend/src/components/features/chat/generic-event-message.tsx
+++ b/frontend/src/components/features/chat/generic-event-message.tsx
@@ -1,13 +1,9 @@
import React from "react";
-import Markdown from "react-markdown";
-import remarkGfm from "remark-gfm";
-import remarkBreaks from "remark-breaks";
-import { code } from "../markdown/code";
-import { ol, ul } from "../markdown/list";
import ArrowDown from "#/icons/angle-down-solid.svg?react";
import ArrowUp from "#/icons/angle-up-solid.svg?react";
import { SuccessIndicator } from "./success-indicator";
import { ObservationResultStatus } from "./event-content-helpers/get-observation-result";
+import { MarkdownRenderer } from "../markdown/markdown-renderer";
interface GenericEventMessageProps {
title: React.ReactNode;
@@ -49,16 +45,7 @@ export function GenericEventMessage({
{showDetails &&
(typeof details === "string" ? (
-
- {details}
-
+ {details}
) : (
details
))}
diff --git a/frontend/src/components/features/chat/interactive-chat-box.tsx b/frontend/src/components/features/chat/interactive-chat-box.tsx
index 4c94df4b425a..56a4def14dd7 100644
--- a/frontend/src/components/features/chat/interactive-chat-box.tsx
+++ b/frontend/src/components/features/chat/interactive-chat-box.tsx
@@ -8,6 +8,8 @@ import { GitControlBar } from "./git-control-bar";
import { useConversationStore } from "#/state/conversation-store";
import { useAgentState } from "#/hooks/use-agent-state";
import { processFiles, processImages } from "#/utils/file-processing";
+import { useSubConversationTaskPolling } from "#/hooks/query/use-sub-conversation-task-polling";
+import { isTaskPolling } from "#/utils/utils";
interface InteractiveChatBoxProps {
onSubmit: (message: string, images: File[], files: File[]) => void;
@@ -24,10 +26,18 @@ export function InteractiveChatBox({ onSubmit }: InteractiveChatBoxProps) {
removeFileLoading,
addImageLoading,
removeImageLoading,
+ subConversationTaskId,
} = useConversationStore();
const { curAgentState } = useAgentState();
const { data: conversation } = useActiveConversation();
+ // Poll sub-conversation task to check if it's loading
+ const { taskStatus: subConversationTaskStatus } =
+ useSubConversationTaskPolling(
+ subConversationTaskId,
+ conversation?.conversation_id || null,
+ );
+
// Helper function to validate and filter files
const validateAndFilterFiles = (selectedFiles: File[]) => {
const validation = validateFiles(selectedFiles, [...images, ...files]);
@@ -134,7 +144,8 @@ export function InteractiveChatBox({ onSubmit }: InteractiveChatBoxProps) {
const isDisabled =
curAgentState === AgentState.LOADING ||
- curAgentState === AgentState.AWAITING_USER_CONFIRMATION;
+ curAgentState === AgentState.AWAITING_USER_CONFIRMATION ||
+ isTaskPolling(subConversationTaskStatus);
return (
diff --git a/frontend/src/components/features/chat/task-tracking-observation-content.tsx b/frontend/src/components/features/chat/task-tracking-observation-content.tsx
index e4dd95c2bfa2..7d9e7ff1467a 100644
--- a/frontend/src/components/features/chat/task-tracking-observation-content.tsx
+++ b/frontend/src/components/features/chat/task-tracking-observation-content.tsx
@@ -1,6 +1,5 @@
import { TaskTrackingObservation } from "#/types/core/observations";
import { TaskListSection } from "./task-tracking/task-list-section";
-import { ResultSection } from "./task-tracking/result-section";
interface TaskTrackingObservationContentProps {
event: TaskTrackingObservation;
@@ -16,11 +15,6 @@ export function TaskTrackingObservationContent({
{/* Task List section - only show for 'plan' command */}
{shouldShowTaskList && }
-
- {/* Result message - only show if there's meaningful content */}
- {event.content && event.content.trim() && (
-
- )}
);
}
diff --git a/frontend/src/components/features/chat/task-tracking/result-section.tsx b/frontend/src/components/features/chat/task-tracking/result-section.tsx
deleted file mode 100644
index 0cd06e3a4a96..000000000000
--- a/frontend/src/components/features/chat/task-tracking/result-section.tsx
+++ /dev/null
@@ -1,21 +0,0 @@
-import { useTranslation } from "react-i18next";
-import { Typography } from "#/ui/typography";
-
-interface ResultSectionProps {
- content: string;
-}
-
-export function ResultSection({ content }: ResultSectionProps) {
- const { t } = useTranslation();
-
- return (
-
-
- {t("TASK_TRACKING_OBSERVATION$RESULT")}
-
-
-
- );
-}
diff --git a/frontend/src/components/features/chat/task-tracking/task-item.tsx b/frontend/src/components/features/chat/task-tracking/task-item.tsx
index 923ed8ea3f51..72e9e74aacea 100644
--- a/frontend/src/components/features/chat/task-tracking/task-item.tsx
+++ b/frontend/src/components/features/chat/task-tracking/task-item.tsx
@@ -1,7 +1,11 @@
+import { useMemo } from "react";
import { useTranslation } from "react-i18next";
+import CircleIcon from "#/icons/u-circle.svg?react";
+import CheckCircleIcon from "#/icons/u-check-circle.svg?react";
+import LoadingIcon from "#/icons/loading.svg?react";
+import { I18nKey } from "#/i18n/declaration";
+import { cn } from "#/utils/utils";
import { Typography } from "#/ui/typography";
-import { StatusIcon } from "./status-icon";
-import { StatusBadge } from "./status-badge";
interface TaskItemProps {
task: {
@@ -10,33 +14,47 @@ interface TaskItemProps {
status: "todo" | "in_progress" | "done";
notes?: string;
};
- index: number;
}
-export function TaskItem({ task, index }: TaskItemProps) {
+export function TaskItem({ task }: TaskItemProps) {
const { t } = useTranslation();
+ const icon = useMemo(() => {
+ switch (task.status) {
+ case "todo":
+ return
;
+ case "in_progress":
+ return
;
+ case "done":
+ return
;
+ default:
+ return
;
+ }
+ }, [task.status]);
+
+ const isDoneStatus = task.status === "done";
+
return (
-
-
-
-
-
-
- {index + 1}.
-
-
-
-
{task.title}
-
- {t("TASK_TRACKING_OBSERVATION$TASK_ID")}: {task.id}
-
- {task.notes && (
-
- {t("TASK_TRACKING_OBSERVATION$TASK_NOTES")}: {task.notes}
-
+
+
{icon}
+
+
+ >
+ {task.title}
+
+
+ {t(I18nKey.TASK_TRACKING_OBSERVATION$TASK_ID)}: {task.id}
+
+
+ {t(I18nKey.TASK_TRACKING_OBSERVATION$TASK_NOTES)}: {task.notes}
+
);
diff --git a/frontend/src/components/features/chat/task-tracking/task-list-section.tsx b/frontend/src/components/features/chat/task-tracking/task-list-section.tsx
index 912920252242..075517aacd1d 100644
--- a/frontend/src/components/features/chat/task-tracking/task-list-section.tsx
+++ b/frontend/src/components/features/chat/task-tracking/task-list-section.tsx
@@ -1,5 +1,7 @@
import { useTranslation } from "react-i18next";
import { TaskItem } from "./task-item";
+import LessonPlanIcon from "#/icons/lesson-plan.svg?react";
+import { I18nKey } from "#/i18n/declaration";
import { Typography } from "#/ui/typography";
interface TaskListSectionProps {
@@ -15,19 +17,20 @@ export function TaskListSection({ taskList }: TaskListSectionProps) {
const { t } = useTranslation();
return (
-
-
-
- {t("TASK_TRACKING_OBSERVATION$TASK_LIST")} ({taskList.length}{" "}
- {taskList.length === 1 ? "item" : "items"})
-
+
+ {/* Header Tabs */}
+
+
+
+ {t(I18nKey.COMMON$TASKS)}
+
-
-
- {taskList.map((task, index) => (
-
- ))}
-
+
+ {/* Task Items */}
+
+ {taskList.map((task) => (
+
+ ))}
);
diff --git a/frontend/src/components/features/context-menu/account-settings-context-menu.tsx b/frontend/src/components/features/context-menu/account-settings-context-menu.tsx
index c09920e61472..a30fe5f81651 100644
--- a/frontend/src/components/features/context-menu/account-settings-context-menu.tsx
+++ b/frontend/src/components/features/context-menu/account-settings-context-menu.tsx
@@ -25,7 +25,14 @@ export function AccountSettingsContextMenu({
const { data: config } = useConfig();
const isSaas = config?.APP_MODE === "saas";
- const navItems = (isSaas ? SAAS_NAV_ITEMS : OSS_NAV_ITEMS).map((item) => ({
+
+ // Get navigation items and filter out LLM settings if the feature flag is enabled
+ let items = isSaas ? SAAS_NAV_ITEMS : OSS_NAV_ITEMS;
+ if (config?.FEATURE_FLAGS?.HIDE_LLM_SETTINGS) {
+ items = items.filter((item) => item.to !== "/settings");
+ }
+
+ const navItems = items.map((item) => ({
...item,
icon: React.cloneElement(item.icon, {
width: 16,
diff --git a/frontend/src/components/features/context-menu/context-menu-icon-text-with-description.tsx b/frontend/src/components/features/context-menu/context-menu-icon-text-with-description.tsx
new file mode 100644
index 000000000000..fd505fef5800
--- /dev/null
+++ b/frontend/src/components/features/context-menu/context-menu-icon-text-with-description.tsx
@@ -0,0 +1,39 @@
+import React from "react";
+import { ContextMenuIconText } from "./context-menu-icon-text";
+import { Typography } from "#/ui/typography";
+import { cn } from "#/utils/utils";
+
+interface ContextMenuIconTextWithDescriptionProps {
+ icon: React.ComponentType<{ className?: string }>;
+ title: string;
+ description: string;
+ className?: string;
+ iconClassName?: string;
+}
+
+export function ContextMenuIconTextWithDescription({
+ icon,
+ title,
+ description,
+ className,
+ iconClassName,
+}: ContextMenuIconTextWithDescriptionProps) {
+ return (
+
+
+
+ {description}
+
+
+ );
+}
diff --git a/frontend/src/components/features/controls/agent-status.tsx b/frontend/src/components/features/controls/agent-status.tsx
index 2fbff7192f50..078eb5f40f25 100644
--- a/frontend/src/components/features/controls/agent-status.tsx
+++ b/frontend/src/components/features/controls/agent-status.tsx
@@ -7,13 +7,14 @@ import { ChatStopButton } from "../chat/chat-stop-button";
import { AgentState } from "#/types/agent-state";
import ClockIcon from "#/icons/u-clock-three.svg?react";
import { ChatResumeAgentButton } from "../chat/chat-play-button";
-import { cn } from "#/utils/utils";
+import { cn, isTaskPolling } from "#/utils/utils";
import { AgentLoading } from "./agent-loading";
import { useConversationStore } from "#/state/conversation-store";
import CircleErrorIcon from "#/icons/circle-error.svg?react";
import { useAgentState } from "#/hooks/use-agent-state";
import { useUnifiedWebSocketStatus } from "#/hooks/use-unified-websocket-status";
import { useTaskPolling } from "#/hooks/query/use-task-polling";
+import { useSubConversationTaskPolling } from "#/hooks/query/use-sub-conversation-task-polling";
export interface AgentStatusProps {
className?: string;
@@ -38,6 +39,15 @@ export function AgentStatus({
const { data: conversation } = useActiveConversation();
const { taskStatus } = useTaskPolling();
+ const { subConversationTaskId } = useConversationStore();
+
+ // Poll sub-conversation task to track its loading state
+ const { taskStatus: subConversationTaskStatus } =
+ useSubConversationTaskPolling(
+ subConversationTaskId,
+ conversation?.conversation_id || null,
+ );
+
const statusCode = getStatusCode(
curStatusMessage,
webSocketStatus,
@@ -45,17 +55,16 @@ export function AgentStatus({
conversation?.runtime_status || null,
curAgentState,
taskStatus,
+ subConversationTaskStatus,
);
- const isTaskLoading =
- taskStatus && taskStatus !== "ERROR" && taskStatus !== "READY";
-
const shouldShownAgentLoading =
isPausing ||
curAgentState === AgentState.INIT ||
curAgentState === AgentState.LOADING ||
(webSocketStatus === "CONNECTING" && taskStatus !== "ERROR") ||
- isTaskLoading;
+ isTaskPolling(taskStatus) ||
+ isTaskPolling(subConversationTaskStatus);
const shouldShownAgentError =
curAgentState === AgentState.ERROR ||
diff --git a/frontend/src/components/features/controls/server-status.tsx b/frontend/src/components/features/controls/server-status.tsx
index a3bbbe9732ba..e79d4215ea4b 100644
--- a/frontend/src/components/features/controls/server-status.tsx
+++ b/frontend/src/components/features/controls/server-status.tsx
@@ -6,6 +6,7 @@ import { AgentState } from "#/types/agent-state";
import { useAgentState } from "#/hooks/use-agent-state";
import { useTaskPolling } from "#/hooks/query/use-task-polling";
import { getStatusColor } from "#/utils/utils";
+import { useErrorMessageStore } from "#/stores/error-message-store";
export interface ServerStatusProps {
className?: string;
@@ -21,6 +22,7 @@ export function ServerStatus({
const { curAgentState } = useAgentState();
const { t } = useTranslation();
const { isTask, taskStatus, taskDetail } = useTaskPolling();
+ const { errorMessage } = useErrorMessageStore();
const isStartingStatus =
curAgentState === AgentState.LOADING || curAgentState === AgentState.INIT;
@@ -69,7 +71,7 @@ export function ServerStatus({
return t(I18nKey.COMMON$SERVER_STOPPED);
}
if (curAgentState === AgentState.ERROR) {
- return t(I18nKey.COMMON$ERROR);
+ return errorMessage || t(I18nKey.COMMON$ERROR);
}
return t(I18nKey.COMMON$RUNNING);
};
@@ -79,7 +81,7 @@ export function ServerStatus({
return (
diff --git a/frontend/src/components/features/conversation-panel/conversation-card/conversation-card-footer.tsx b/frontend/src/components/features/conversation-panel/conversation-card/conversation-card-footer.tsx
index fb77b582428d..f44a2f814125 100644
--- a/frontend/src/components/features/conversation-panel/conversation-card/conversation-card-footer.tsx
+++ b/frontend/src/components/features/conversation-panel/conversation-card/conversation-card-footer.tsx
@@ -39,7 +39,7 @@ export function ConversationCardFooter({
{(createdAt ?? lastUpdatedAt) && (
- {`${formatTimeDelta(new Date(lastUpdatedAt ?? createdAt))} ${t(I18nKey.CONVERSATION$AGO)}`}
+ {`${formatTimeDelta(lastUpdatedAt ?? createdAt)} ${t(I18nKey.CONVERSATION$AGO)}`}
)}
diff --git a/frontend/src/components/features/conversation-panel/conversation-card/conversation-repo-link.tsx b/frontend/src/components/features/conversation-panel/conversation-card/conversation-repo-link.tsx
index 2cc937011d4c..27c50bbcb476 100644
--- a/frontend/src/components/features/conversation-panel/conversation-card/conversation-repo-link.tsx
+++ b/frontend/src/components/features/conversation-panel/conversation-card/conversation-repo-link.tsx
@@ -3,12 +3,13 @@ import { FaCodeBranch } from "react-icons/fa";
import { IconType } from "react-icons/lib";
import { RepositorySelection } from "#/api/open-hands.types";
import { Provider } from "#/types/settings";
+import AzureDevOpsLogo from "#/assets/branding/azure-devops-logo.svg?react";
interface ConversationRepoLinkProps {
selectedRepository: RepositorySelection;
}
-const providerIcon: Record
= {
+const providerIcon: Partial> = {
bitbucket: FaBitbucket,
github: FaGithub,
gitlab: FaGitlab,
@@ -26,6 +27,9 @@ export function ConversationRepoLink({
{Icon &&
}
+ {selectedRepository.git_provider === "azure_devops" && (
+
+ )}
diff --git a/frontend/src/components/features/conversation-panel/start-task-card/start-task-card-footer.tsx b/frontend/src/components/features/conversation-panel/start-task-card/start-task-card-footer.tsx
index a2f9bb4036ee..c5339183d50d 100644
--- a/frontend/src/components/features/conversation-panel/start-task-card/start-task-card-footer.tsx
+++ b/frontend/src/components/features/conversation-panel/start-task-card/start-task-card-footer.tsx
@@ -31,7 +31,7 @@ export function StartTaskCardFooter({
{createdAt && (
- {`${formatTimeDelta(new Date(createdAt))} ${t(I18nKey.CONVERSATION$AGO)}`}
+ {`${formatTimeDelta(createdAt)} ${t(I18nKey.CONVERSATION$AGO)}`}
)}
diff --git a/frontend/src/components/features/conversation-panel/start-task-card/start-task-status-indicator.tsx b/frontend/src/components/features/conversation-panel/start-task-card/start-task-status-indicator.tsx
index 958b173d4c20..d48a604b5baa 100644
--- a/frontend/src/components/features/conversation-panel/start-task-card/start-task-status-indicator.tsx
+++ b/frontend/src/components/features/conversation-panel/start-task-card/start-task-status-indicator.tsx
@@ -19,6 +19,7 @@ export function StartTaskStatusIndicator({
case "PREPARING_REPOSITORY":
case "RUNNING_SETUP_SCRIPT":
case "SETTING_UP_GIT_HOOKS":
+ case "SETTING_UP_SKILLS":
case "STARTING_CONVERSATION":
return "bg-yellow-500 animate-pulse";
default:
diff --git a/frontend/src/components/features/conversation/conversation-tabs/conversation-tabs.tsx b/frontend/src/components/features/conversation/conversation-tabs/conversation-tabs.tsx
index e84466bd2241..eedb9010e8a5 100644
--- a/frontend/src/components/features/conversation/conversation-tabs/conversation-tabs.tsx
+++ b/frontend/src/components/features/conversation/conversation-tabs/conversation-tabs.tsx
@@ -19,8 +19,10 @@ import {
} from "#/state/conversation-store";
import { ConversationTabsContextMenu } from "./conversation-tabs-context-menu";
import { USE_PLANNING_AGENT } from "#/utils/feature-flags";
+import { useConversationId } from "#/hooks/use-conversation-id";
export function ConversationTabs() {
+ const { conversationId } = useConversationId();
const {
selectedTab,
isRightPanelShown,
@@ -30,18 +32,21 @@ export function ConversationTabs() {
const [isMenuOpen, setIsMenuOpen] = useState(false);
- // Persist selectedTab and isRightPanelShown in localStorage
+ // Persist selectedTab and isRightPanelShown in localStorage per conversation
const [persistedSelectedTab, setPersistedSelectedTab] =
useLocalStorage(
- "conversation-selected-tab",
+ `conversation-selected-tab-${conversationId}`,
"editor",
);
const [persistedIsRightPanelShown, setPersistedIsRightPanelShown] =
- useLocalStorage("conversation-right-panel-shown", true);
+ useLocalStorage(
+ `conversation-right-panel-shown-${conversationId}`,
+ true,
+ );
const [persistedUnpinnedTabs] = useLocalStorage(
- "conversation-unpinned-tabs",
+ `conversation-unpinned-tabs-${conversationId}`,
[],
);
diff --git a/frontend/src/components/features/conversation/metrics-modal/context-window-section.tsx b/frontend/src/components/features/conversation/metrics-modal/context-window-section.tsx
index 5908003caede..2fbfd1bc2d55 100644
--- a/frontend/src/components/features/conversation/metrics-modal/context-window-section.tsx
+++ b/frontend/src/components/features/conversation/metrics-modal/context-window-section.tsx
@@ -12,7 +12,8 @@ export function ContextWindowSection({
}: ContextWindowSectionProps) {
const { t } = useTranslation();
- const usagePercentage = (perTurnToken / contextWindow) * 100;
+ const usagePercentage =
+ contextWindow > 0 ? (perTurnToken / contextWindow) * 100 : 0;
const progressWidth = Math.min(100, usagePercentage);
return (
diff --git a/frontend/src/components/features/home/git-provider-dropdown/git-provider-dropdown.tsx b/frontend/src/components/features/home/git-provider-dropdown/git-provider-dropdown.tsx
index a9f979f9cb00..fdc9b21b001b 100644
--- a/frontend/src/components/features/home/git-provider-dropdown/git-provider-dropdown.tsx
+++ b/frontend/src/components/features/home/git-provider-dropdown/git-provider-dropdown.tsx
@@ -51,6 +51,8 @@ export function GitProviderDropdown({
return "GitLab";
case "bitbucket":
return "Bitbucket";
+ case "azure_devops":
+ return "Azure DevOps";
case "enterprise_sso":
return "Enterprise SSO";
default:
diff --git a/frontend/src/components/features/home/recent-conversations/recent-conversation.tsx b/frontend/src/components/features/home/recent-conversations/recent-conversation.tsx
index 7fcabe2f1fb9..d86bac55bff4 100644
--- a/frontend/src/components/features/home/recent-conversations/recent-conversation.tsx
+++ b/frontend/src/components/features/home/recent-conversations/recent-conversation.tsx
@@ -67,12 +67,14 @@ export function RecentConversation({ conversation }: RecentConversationProps) {
) : null}
-
- {formatTimeDelta(
- new Date(conversation.created_at || conversation.last_updated_at),
- )}{" "}
- {t(I18nKey.CONVERSATION$AGO)}
-
+ {(conversation.created_at || conversation.last_updated_at) && (
+
+ {formatTimeDelta(
+ conversation.created_at || conversation.last_updated_at,
+ )}{" "}
+ {t(I18nKey.CONVERSATION$AGO)}
+
+ )}
diff --git a/frontend/src/components/features/home/tasks/task-card.tsx b/frontend/src/components/features/home/tasks/task-card.tsx
index 39652e880c8d..50eede97ae14 100644
--- a/frontend/src/components/features/home/tasks/task-card.tsx
+++ b/frontend/src/components/features/home/tasks/task-card.tsx
@@ -56,6 +56,15 @@ export function TaskCard({ task }: TaskCardProps) {
const issueType =
task.task_type === "OPEN_ISSUE" ? "issues" : "pull-requests";
href = `https://bitbucket.org/${task.repo}/${issueType}/${task.issue_number}`;
+ } else if (task.git_provider === "azure_devops") {
+ // Azure DevOps URL format: https://dev.azure.com/{organization}/{project}/_workitems/edit/{id}
+ // or https://dev.azure.com/{organization}/{project}/_git/{repo}/pullrequest/{id}
+ const azureDevOpsBaseUrl = "https://dev.azure.com";
+ if (task.task_type === "OPEN_ISSUE") {
+ href = `${azureDevOpsBaseUrl}/${task.repo}/_workitems/edit/${task.issue_number}`;
+ } else {
+ href = `${azureDevOpsBaseUrl}/${task.repo}/_git/${task.repo.split("/")[1]}/pullrequest/${task.issue_number}`;
+ }
} else {
const hrefType = task.task_type === "OPEN_ISSUE" ? "issues" : "pull";
href = `https://github.com/${task.repo}/${hrefType}/${task.issue_number}`;
diff --git a/frontend/src/components/features/markdown/headings.tsx b/frontend/src/components/features/markdown/headings.tsx
index 2e12fc7db4fe..3098a4514a64 100644
--- a/frontend/src/components/features/markdown/headings.tsx
+++ b/frontend/src/components/features/markdown/headings.tsx
@@ -8,7 +8,7 @@ export function h1({
React.HTMLAttributes
&
ExtraProps) {
return (
-
+
{children}
);
diff --git a/frontend/src/components/features/markdown/markdown-renderer.tsx b/frontend/src/components/features/markdown/markdown-renderer.tsx
new file mode 100644
index 000000000000..0cb55498d63f
--- /dev/null
+++ b/frontend/src/components/features/markdown/markdown-renderer.tsx
@@ -0,0 +1,80 @@
+import Markdown, { Components } from "react-markdown";
+import remarkGfm from "remark-gfm";
+import remarkBreaks from "remark-breaks";
+import { code } from "./code";
+import { ul, ol } from "./list";
+import { paragraph } from "./paragraph";
+import { anchor } from "./anchor";
+import { h1, h2, h3, h4, h5, h6 } from "./headings";
+
+interface MarkdownRendererProps {
+ /**
+ * The markdown content to render. Can be passed as children (string) or content prop.
+ */
+ children?: string;
+ content?: string;
+ /**
+ * Additional or override components for markdown elements.
+ * Default components (code, ul, ol) are always included unless overridden.
+ */
+ components?: Partial;
+ /**
+ * Whether to include standard components (anchor, paragraph).
+ * Defaults to false.
+ */
+ includeStandard?: boolean;
+ /**
+ * Whether to include heading components (h1-h6).
+ * Defaults to false.
+ */
+ includeHeadings?: boolean;
+}
+
+/**
+ * A reusable Markdown renderer component that provides consistent
+ * markdown rendering across the application.
+ *
+ * By default, includes:
+ * - code, ul, ol components
+ * - remarkGfm and remarkBreaks plugins
+ *
+ * Can be extended with:
+ * - includeStandard: adds anchor and paragraph components
+ * - includeHeadings: adds h1-h6 heading components
+ * - components prop: allows custom overrides or additional components
+ */
+export function MarkdownRenderer({
+ children,
+ content,
+ components: customComponents,
+ includeStandard = false,
+ includeHeadings = false,
+}: MarkdownRendererProps) {
+ // Build the components object with defaults and optional additions
+ const components: Components = {
+ code,
+ ul,
+ ol,
+ ...(includeStandard && {
+ a: anchor,
+ p: paragraph,
+ }),
+ ...(includeHeadings && {
+ h1,
+ h2,
+ h3,
+ h4,
+ h5,
+ h6,
+ }),
+ ...customComponents, // Custom components override defaults
+ };
+
+ const markdownContent = content ?? children ?? "";
+
+ return (
+
+ {markdownContent}
+
+ );
+}
diff --git a/frontend/src/components/features/microagent-management/microagent-management-view-microagent-content.tsx b/frontend/src/components/features/microagent-management/microagent-management-view-microagent-content.tsx
index dc5b5fecaa59..2994946731e3 100644
--- a/frontend/src/components/features/microagent-management/microagent-management-view-microagent-content.tsx
+++ b/frontend/src/components/features/microagent-management/microagent-management-view-microagent-content.tsx
@@ -1,16 +1,10 @@
import { useTranslation } from "react-i18next";
import { Spinner } from "@heroui/react";
-import Markdown from "react-markdown";
-import remarkGfm from "remark-gfm";
-import remarkBreaks from "remark-breaks";
-import { code } from "../markdown/code";
-import { ul, ol } from "../markdown/list";
-import { paragraph } from "../markdown/paragraph";
-import { anchor } from "../markdown/anchor";
import { useMicroagentManagementStore } from "#/state/microagent-management-store";
import { useRepositoryMicroagentContent } from "#/hooks/query/use-repository-microagent-content";
import { I18nKey } from "#/i18n/declaration";
import { extractRepositoryInfo } from "#/utils/utils";
+import { MarkdownRenderer } from "../markdown/markdown-renderer";
export function MicroagentManagementViewMicroagentContent() {
const { t } = useTranslation();
@@ -49,18 +43,9 @@ export function MicroagentManagementViewMicroagentContent() {
)}
{microagentData && !isLoading && !error && (
-
+
{microagentData.content}
-
+
)}
);
diff --git a/frontend/src/components/features/settings/git-settings/azure-devops-token-help-anchor.tsx b/frontend/src/components/features/settings/git-settings/azure-devops-token-help-anchor.tsx
new file mode 100644
index 000000000000..b48ccf88b083
--- /dev/null
+++ b/frontend/src/components/features/settings/git-settings/azure-devops-token-help-anchor.tsx
@@ -0,0 +1,20 @@
+import { useTranslation } from "react-i18next";
+import { I18nKey } from "#/i18n/declaration";
+
+export function AzureDevOpsTokenHelpAnchor() {
+ const { t } = useTranslation();
+
+ return (
+
+
+ {t(I18nKey.GIT$AZURE_DEVOPS_TOKEN_HELP)}
+
+
+ );
+}
diff --git a/frontend/src/components/features/settings/git-settings/azure-devops-token-input.tsx b/frontend/src/components/features/settings/git-settings/azure-devops-token-input.tsx
new file mode 100644
index 000000000000..5f8d34094a2f
--- /dev/null
+++ b/frontend/src/components/features/settings/git-settings/azure-devops-token-input.tsx
@@ -0,0 +1,64 @@
+import { useTranslation } from "react-i18next";
+import { I18nKey } from "#/i18n/declaration";
+import { SettingsInput } from "../settings-input";
+import { AzureDevOpsTokenHelpAnchor } from "./azure-devops-token-help-anchor";
+import { KeyStatusIcon } from "../key-status-icon";
+
+interface AzureDevOpsTokenInputProps {
+ onChange: (value: string) => void;
+ onAzureDevOpsHostChange: (value: string) => void;
+ isAzureDevOpsTokenSet: boolean;
+ name: string;
+ azureDevOpsHostSet: string | null | undefined;
+}
+
+export function AzureDevOpsTokenInput({
+ onChange,
+ onAzureDevOpsHostChange,
+ isAzureDevOpsTokenSet,
+ name,
+ azureDevOpsHostSet,
+}: AzureDevOpsTokenInputProps) {
+ const { t } = useTranslation();
+
+ return (
+
+
" : ""}
+ startContent={
+ isAzureDevOpsTokenSet && (
+
+ )
+ }
+ />
+
+ {})}
+ name="azure-devops-host-input"
+ testId="azure-devops-host-input"
+ label={t(I18nKey.GIT$AZURE_DEVOPS_HOST)}
+ type="text"
+ className="w-full max-w-[680px]"
+ placeholder={t(I18nKey.GIT$AZURE_DEVOPS_HOST_PLACEHOLDER)}
+ defaultValue={azureDevOpsHostSet || undefined}
+ startContent={
+ azureDevOpsHostSet &&
+ azureDevOpsHostSet.trim() !== "" && (
+
+ )
+ }
+ />
+
+
+
+ );
+}
diff --git a/frontend/src/components/features/settings/git-settings/configure-azure-devops-anchor.tsx b/frontend/src/components/features/settings/git-settings/configure-azure-devops-anchor.tsx
new file mode 100644
index 000000000000..c2afd7751c53
--- /dev/null
+++ b/frontend/src/components/features/settings/git-settings/configure-azure-devops-anchor.tsx
@@ -0,0 +1,37 @@
+import { useTranslation } from "react-i18next";
+import { I18nKey } from "#/i18n/declaration";
+import { useConfig } from "#/hooks/query/use-config";
+import { useAuthUrl } from "#/hooks/use-auth-url";
+import { BrandButton } from "../brand-button";
+
+export function ConfigureAzureDevOpsAnchor() {
+ const { t } = useTranslation();
+ const { data: config } = useConfig();
+
+ const authUrl = useAuthUrl({
+ appMode: config?.APP_MODE ?? null,
+ identityProvider: "azure_devops",
+ authUrl: config?.AUTH_URL,
+ });
+
+ const handleOAuthFlow = () => {
+ if (!authUrl) {
+ return;
+ }
+
+ window.location.href = authUrl;
+ };
+
+ return (
+
+
+ {t(I18nKey.AZURE_DEVOPS$CONNECT_ACCOUNT)}
+
+
+ );
+}
diff --git a/frontend/src/components/features/sidebar/sidebar.tsx b/frontend/src/components/features/sidebar/sidebar.tsx
index 0580d9db3bc4..467e51f2dc98 100644
--- a/frontend/src/components/features/sidebar/sidebar.tsx
+++ b/frontend/src/components/features/sidebar/sidebar.tsx
@@ -34,13 +34,7 @@ export function Sidebar() {
const { pathname } = useLocation();
- // TODO: Remove HIDE_LLM_SETTINGS check once released
- const shouldHideLlmSettings =
- config?.FEATURE_FLAGS.HIDE_LLM_SETTINGS && config?.APP_MODE === "saas";
-
React.useEffect(() => {
- if (shouldHideLlmSettings) return;
-
if (location.pathname === "/settings") {
setSettingsModalIsOpen(false);
} else if (
diff --git a/frontend/src/components/features/waitlist/auth-modal.tsx b/frontend/src/components/features/waitlist/auth-modal.tsx
index d20ef04a2839..2c431fbd955e 100644
--- a/frontend/src/components/features/waitlist/auth-modal.tsx
+++ b/frontend/src/components/features/waitlist/auth-modal.tsx
@@ -8,6 +8,7 @@ import { BrandButton } from "../settings/brand-button";
import GitHubLogo from "#/assets/branding/github-logo.svg?react";
import GitLabLogo from "#/assets/branding/gitlab-logo.svg?react";
import BitbucketLogo from "#/assets/branding/bitbucket-logo.svg?react";
+import AzureDevOpsLogo from "#/assets/branding/azure-devops-logo.svg?react";
import { useAuthUrl } from "#/hooks/use-auth-url";
import { GetConfigResponse } from "#/api/option-service/option.types";
import { Provider } from "#/types/settings";
@@ -41,6 +42,12 @@ export function AuthModal({
authUrl,
});
+ const azureDevOpsAuthUrl = useAuthUrl({
+ appMode: appMode || null,
+ identityProvider: "azure_devops",
+ authUrl,
+ });
+
const enterpriseSsoUrl = useAuthUrl({
appMode: appMode || null,
identityProvider: "enterprise_sso",
@@ -71,6 +78,13 @@ export function AuthModal({
}
};
+ const handleAzureDevOpsAuth = () => {
+ if (azureDevOpsAuthUrl) {
+ // Always start the OIDC flow, let the backend handle TOS check
+ window.location.href = azureDevOpsAuthUrl;
+ }
+ };
+
const handleEnterpriseSsoAuth = () => {
if (enterpriseSsoUrl) {
trackLoginButtonClick({ provider: "enterprise_sso" });
@@ -92,6 +106,10 @@ export function AuthModal({
providersConfigured &&
providersConfigured.length > 0 &&
providersConfigured.includes("bitbucket");
+ const showAzureDevOps =
+ providersConfigured &&
+ providersConfigured.length > 0 &&
+ providersConfigured.includes("azure_devops");
const showEnterpriseSso =
providersConfigured &&
providersConfigured.length > 0 &&
@@ -154,6 +172,18 @@ export function AuthModal({
)}
+ {showAzureDevOps && (
+
}
+ >
+ {t(I18nKey.AZURE_DEVOPS$CONNECT_ACCOUNT)}
+
+ )}
+
{showEnterpriseSso && (
{gitProvider === "github" && }
- {gitProvider === "gitlab" && }
- {gitProvider === "bitbucket" && }
+ {gitProvider === "gitlab" && }
+ {gitProvider === "bitbucket" && (
+
+ )}
+ {gitProvider === "azure_devops" && (
+
+ )}
>
);
}
diff --git a/frontend/src/components/shared/modals/settings/model-selector.tsx b/frontend/src/components/shared/modals/settings/model-selector.tsx
index 1fd03d7c8f8a..b542c4e695a0 100644
--- a/frontend/src/components/shared/modals/settings/model-selector.tsx
+++ b/frontend/src/components/shared/modals/settings/model-selector.tsx
@@ -21,7 +21,11 @@ interface ModelSelectorProps {
isDisabled?: boolean;
models: Record;
currentModel?: string;
- onChange?: (model: string | null) => void;
+ onChange?: (provider: string | null, model: string | null) => void;
+ onDefaultValuesChanged?: (
+ provider: string | null,
+ model: string | null,
+ ) => void;
wrapperClassName?: string;
labelClassName?: string;
}
@@ -31,6 +35,7 @@ export function ModelSelector({
models,
currentModel,
onChange,
+ onDefaultValuesChanged,
wrapperClassName,
labelClassName,
}: ModelSelectorProps) {
@@ -56,6 +61,7 @@ export function ModelSelector({
setLitellmId(currentModel);
setSelectedProvider(provider);
setSelectedModel(model);
+ onDefaultValuesChanged?.(provider, model);
}
}, [currentModel]);
@@ -65,6 +71,7 @@ export function ModelSelector({
const separator = models[provider]?.separator || "";
setLitellmId(provider + separator);
+ onChange?.(provider, null);
};
const handleChangeModel = (model: string) => {
@@ -76,7 +83,7 @@ export function ModelSelector({
}
setLitellmId(fullModel);
setSelectedModel(model);
- onChange?.(fullModel);
+ onChange?.(selectedProvider, model);
};
const clear = () => {
diff --git a/frontend/src/components/v1/chat/event-content-helpers/__tests__/get-observation-content.test.ts b/frontend/src/components/v1/chat/event-content-helpers/__tests__/get-observation-content.test.ts
new file mode 100644
index 000000000000..d35dc97925b0
--- /dev/null
+++ b/frontend/src/components/v1/chat/event-content-helpers/__tests__/get-observation-content.test.ts
@@ -0,0 +1,92 @@
+import { describe, it, expect } from "vitest";
+import { getObservationContent } from "../get-observation-content";
+import { ObservationEvent } from "#/types/v1/core";
+import { BrowserObservation } from "#/types/v1/core/base/observation";
+
+describe("getObservationContent - BrowserObservation", () => {
+ it("should return output content when available", () => {
+ const mockEvent: ObservationEvent = {
+ id: "test-id",
+ timestamp: "2024-01-01T00:00:00Z",
+ source: "environment",
+ tool_name: "browser_navigate",
+ tool_call_id: "call-id",
+ action_id: "action-id",
+ observation: {
+ kind: "BrowserObservation",
+ output: "Browser action completed",
+ error: null,
+ screenshot_data: "base64data",
+ },
+ };
+
+ const result = getObservationContent(mockEvent);
+
+ expect(result).toContain("**Output:**");
+ expect(result).toContain("Browser action completed");
+ });
+
+ it("should handle error cases properly", () => {
+ const mockEvent: ObservationEvent = {
+ id: "test-id",
+ timestamp: "2024-01-01T00:00:00Z",
+ source: "environment",
+ tool_name: "browser_navigate",
+ tool_call_id: "call-id",
+ action_id: "action-id",
+ observation: {
+ kind: "BrowserObservation",
+ output: "",
+ error: "Browser action failed",
+ screenshot_data: null,
+ },
+ };
+
+ const result = getObservationContent(mockEvent);
+
+ expect(result).toContain("**Error:**");
+ expect(result).toContain("Browser action failed");
+ });
+
+ it("should provide default message when no output or error", () => {
+ const mockEvent: ObservationEvent = {
+ id: "test-id",
+ timestamp: "2024-01-01T00:00:00Z",
+ source: "environment",
+ tool_name: "browser_navigate",
+ tool_call_id: "call-id",
+ action_id: "action-id",
+ observation: {
+ kind: "BrowserObservation",
+ output: "",
+ error: null,
+ screenshot_data: "base64data",
+ },
+ };
+
+ const result = getObservationContent(mockEvent);
+
+ expect(result).toBe("Browser action completed successfully.");
+ });
+
+ it("should return output when screenshot_data is null", () => {
+ const mockEvent: ObservationEvent = {
+ id: "test-id",
+ timestamp: "2024-01-01T00:00:00Z",
+ source: "environment",
+ tool_name: "browser_navigate",
+ tool_call_id: "call-id",
+ action_id: "action-id",
+ observation: {
+ kind: "BrowserObservation",
+ output: "Page loaded successfully",
+ error: null,
+ screenshot_data: null,
+ },
+ };
+
+ const result = getObservationContent(mockEvent);
+
+ expect(result).toBe("**Output:**\nPage loaded successfully");
+ });
+});
diff --git a/frontend/src/components/v1/chat/event-content-helpers/create-skill-ready-event.ts b/frontend/src/components/v1/chat/event-content-helpers/create-skill-ready-event.ts
new file mode 100644
index 000000000000..4682b8a90f59
--- /dev/null
+++ b/frontend/src/components/v1/chat/event-content-helpers/create-skill-ready-event.ts
@@ -0,0 +1,56 @@
+import { MessageEvent } from "#/types/v1/core";
+import { BaseEvent } from "#/types/v1/core/base/event";
+import { getSkillReadyContent } from "./get-skill-ready-content";
+
+/**
+ * Synthetic event type for Skill Ready events.
+ * This extends BaseEvent and includes a marker to identify it as a skill ready event.
+ */
+export interface SkillReadyEvent extends BaseEvent {
+ _isSkillReadyEvent: true;
+ _skillReadyContent: string;
+}
+
+/**
+ * Type guard for Skill Ready events.
+ */
+export const isSkillReadyEvent = (event: unknown): event is SkillReadyEvent =>
+ typeof event === "object" &&
+ event !== null &&
+ "_isSkillReadyEvent" in event &&
+ event._isSkillReadyEvent === true;
+
+/**
+ * Creates a synthetic "Skill Ready" event from a user MessageEvent.
+ * This event appears as originating from the agent and contains formatted
+ * information about activated skills and extended content.
+ */
+export const createSkillReadyEvent = (
+ userEvent: MessageEvent,
+): SkillReadyEvent => {
+ // Support both activated_skills and activated_microagents field names
+ const activatedSkills =
+ (userEvent as unknown as { activated_skills?: string[] })
+ .activated_skills ||
+ userEvent.activated_microagents ||
+ [];
+
+ const extendedContent = userEvent.extended_content || [];
+
+ // Only create event if we have skills or extended content
+ if (activatedSkills.length === 0 && extendedContent.length === 0) {
+ throw new Error(
+ "Cannot create skill ready event without activated skills or extended content",
+ );
+ }
+
+ const content = getSkillReadyContent(activatedSkills, extendedContent);
+
+ return {
+ id: `${userEvent.id}-skill-ready`,
+ timestamp: userEvent.timestamp,
+ source: "agent",
+ _isSkillReadyEvent: true,
+ _skillReadyContent: content,
+ };
+};
diff --git a/frontend/src/components/v1/chat/event-content-helpers/get-action-content.ts b/frontend/src/components/v1/chat/event-content-helpers/get-action-content.ts
index fe6bf842c3e1..148949365215 100644
--- a/frontend/src/components/v1/chat/event-content-helpers/get-action-content.ts
+++ b/frontend/src/components/v1/chat/event-content-helpers/get-action-content.ts
@@ -4,6 +4,7 @@ import i18n from "#/i18n";
import { SecurityRisk } from "#/types/v1/core/base/common";
import {
ExecuteBashAction,
+ TerminalAction,
FileEditorAction,
StrReplaceEditorAction,
MCPToolAction,
@@ -58,7 +59,7 @@ const getFileEditorActionContent = (
// Command Actions
const getExecuteBashActionContent = (
- event: ActionEvent,
+ event: ActionEvent,
): string => {
let content = `Command:\n\`${event.action.command}\``;
@@ -131,27 +132,61 @@ type BrowserAction =
const getBrowserActionContent = (action: BrowserAction): string => {
switch (action.kind) {
- case "BrowserNavigateAction":
- if ("url" in action) {
- return `Browsing ${action.url}`;
+ case "BrowserNavigateAction": {
+ let content = `Browsing ${action.url}`;
+ if (action.new_tab) {
+ content += `\n**New Tab:** Yes`;
}
- break;
- case "BrowserClickAction":
- case "BrowserTypeAction":
- case "BrowserGetStateAction":
- case "BrowserGetContentAction":
- case "BrowserScrollAction":
- case "BrowserGoBackAction":
- case "BrowserListTabsAction":
- case "BrowserSwitchTabAction":
- case "BrowserCloseTabAction":
- // These browser actions typically don't need detailed content display
+ return content;
+ }
+ case "BrowserClickAction": {
+ let content = `**Element Index:** ${action.index}`;
+ if (action.new_tab) {
+ content += `\n**New Tab:** Yes`;
+ }
+ return content;
+ }
+ case "BrowserTypeAction": {
+ const textPreview =
+ action.text.length > 50
+ ? `${action.text.slice(0, 50)}...`
+ : action.text;
+ return `**Element Index:** ${action.index}\n**Text:** ${textPreview}`;
+ }
+ case "BrowserGetStateAction": {
+ if (action.include_screenshot) {
+ return `**Include Screenshot:** Yes`;
+ }
+ return getNoContentActionContent();
+ }
+ case "BrowserGetContentAction": {
+ const parts: string[] = [];
+ if (action.extract_links) {
+ parts.push(`**Extract Links:** Yes`);
+ }
+ if (action.start_from_char > 0) {
+ parts.push(`**Start From Character:** ${action.start_from_char}`);
+ }
+ return parts.length > 0 ? parts.join("\n") : getNoContentActionContent();
+ }
+ case "BrowserScrollAction": {
+ return `**Direction:** ${action.direction}`;
+ }
+ case "BrowserGoBackAction": {
return getNoContentActionContent();
+ }
+ case "BrowserListTabsAction": {
+ return getNoContentActionContent();
+ }
+ case "BrowserSwitchTabAction": {
+ return `**Tab ID:** ${action.tab_id}`;
+ }
+ case "BrowserCloseTabAction": {
+ return `**Tab ID:** ${action.tab_id}`;
+ }
default:
return getNoContentActionContent();
}
-
- return getNoContentActionContent();
};
export const getActionContent = (event: ActionEvent): string => {
@@ -164,8 +199,9 @@ export const getActionContent = (event: ActionEvent): string => {
return getFileEditorActionContent(action);
case "ExecuteBashAction":
+ case "TerminalAction":
return getExecuteBashActionContent(
- event as ActionEvent,
+ event as ActionEvent,
);
case "MCPToolAction":
diff --git a/frontend/src/components/v1/chat/event-content-helpers/get-event-content.tsx b/frontend/src/components/v1/chat/event-content-helpers/get-event-content.tsx
index b2e7d69868d8..d9b9bf2d2db6 100644
--- a/frontend/src/components/v1/chat/event-content-helpers/get-event-content.tsx
+++ b/frontend/src/components/v1/chat/event-content-helpers/get-event-content.tsx
@@ -1,10 +1,14 @@
import { Trans } from "react-i18next";
-import { OpenHandsEvent } from "#/types/v1/core";
+import React from "react";
+import { OpenHandsEvent, ObservationEvent } from "#/types/v1/core";
import { isActionEvent, isObservationEvent } from "#/types/v1/type-guards";
import { MonoComponent } from "../../../features/chat/mono-component";
import { PathComponent } from "../../../features/chat/path-component";
import { getActionContent } from "./get-action-content";
import { getObservationContent } from "./get-observation-content";
+import { TaskTrackingObservationContent } from "../task-tracking/task-tracking-observation-content";
+import { TaskTrackerObservation } from "#/types/v1/core/base/observation";
+import { SkillReadyEvent, isSkillReadyEvent } from "./create-skill-ready-event";
import i18n from "#/i18n";
const trimText = (text: string, maxLength: number): string => {
@@ -46,6 +50,7 @@ const getActionEventTitle = (event: OpenHandsEvent): React.ReactNode => {
switch (actionType) {
case "ExecuteBashAction":
+ case "TerminalAction":
actionKey = "ACTION_MESSAGE$RUN";
actionValues = {
command: trimText(event.action.command, 80),
@@ -80,11 +85,20 @@ const getActionEventTitle = (event: OpenHandsEvent): React.ReactNode => {
actionKey = "ACTION_MESSAGE$TASK_TRACKING";
break;
case "BrowserNavigateAction":
+ case "BrowserClickAction":
+ case "BrowserTypeAction":
+ case "BrowserGetStateAction":
+ case "BrowserGetContentAction":
+ case "BrowserScrollAction":
+ case "BrowserGoBackAction":
+ case "BrowserListTabsAction":
+ case "BrowserSwitchTabAction":
+ case "BrowserCloseTabAction":
actionKey = "ACTION_MESSAGE$BROWSE";
break;
default:
// For unknown actions, use the type name
- return actionType.replace("Action", "").toUpperCase();
+ return String(actionType).replace("Action", "").toUpperCase();
}
if (actionKey) {
@@ -107,6 +121,7 @@ const getObservationEventTitle = (event: OpenHandsEvent): React.ReactNode => {
switch (observationType) {
case "ExecuteBashObservation":
+ case "TerminalObservation":
observationKey = "OBSERVATION_MESSAGE$RUN";
observationValues = {
command: event.observation.command
@@ -156,16 +171,36 @@ const getObservationEventTitle = (event: OpenHandsEvent): React.ReactNode => {
return observationType;
};
-export const getEventContent = (event: OpenHandsEvent) => {
+export const getEventContent = (event: OpenHandsEvent | SkillReadyEvent) => {
let title: React.ReactNode = "";
- let details: string = "";
-
- if (isActionEvent(event)) {
+ let details: string | React.ReactNode = "";
+
+ // Handle Skill Ready events first
+ if (isSkillReadyEvent(event)) {
+ // Use translation key if available, otherwise use "SKILL READY"
+ const skillReadyKey = "OBSERVATION_MESSAGE$SKILL_READY";
+ if (i18n.exists(skillReadyKey)) {
+ title = createTitleFromKey(skillReadyKey, {});
+ } else {
+ title = "Skill Ready";
+ }
+ details = event._skillReadyContent;
+ } else if (isActionEvent(event)) {
title = getActionEventTitle(event);
details = getActionContent(event);
} else if (isObservationEvent(event)) {
title = getObservationEventTitle(event);
- details = getObservationContent(event);
+
+ // For TaskTrackerObservation, use React component instead of markdown
+ if (event.observation.kind === "TaskTrackerObservation") {
+ details = (
+ }
+ />
+ );
+ } else {
+ details = getObservationContent(event);
+ }
}
return {
diff --git a/frontend/src/components/v1/chat/event-content-helpers/get-observation-content.ts b/frontend/src/components/v1/chat/event-content-helpers/get-observation-content.ts
index ef4ffa253fbb..bf443ea71c79 100644
--- a/frontend/src/components/v1/chat/event-content-helpers/get-observation-content.ts
+++ b/frontend/src/components/v1/chat/event-content-helpers/get-observation-content.ts
@@ -8,6 +8,7 @@ import {
ThinkObservation,
BrowserObservation,
ExecuteBashObservation,
+ TerminalObservation,
FileEditorObservation,
StrReplaceEditorObservation,
TaskTrackerObservation,
@@ -23,6 +24,15 @@ const getFileEditorObservationContent = (
return `**Error:**\n${observation.error}`;
}
+ // Extract text content from the observation if it exists
+ const textContent =
+ "content" in observation && Array.isArray(observation.content)
+ ? observation.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("\n")
+ : null;
+
const successMessage = getObservationResult(event) === "success";
// For view commands or successful edits with content changes, format as code block
@@ -34,30 +44,45 @@ const getFileEditorObservationContent = (
observation.new_content) ||
observation.command === "view"
) {
- return `\`\`\`\n${observation.output}\n\`\`\``;
+ // Prefer content over output for view commands, fallback to output if content is not available
+ const displayContent = textContent || observation.output;
+ return `\`\`\`\n${displayContent}\n\`\`\``;
}
- // For other commands, return the output as-is
- return observation.output;
+ // For other commands, prefer content if available, otherwise use output
+ return textContent || observation.output;
};
// Command Observations
-const getExecuteBashObservationContent = (
- event: ObservationEvent,
+const getTerminalObservationContent = (
+ event: ObservationEvent,
): string => {
const { observation } = event;
- let { output } = observation;
+ // Extract text content from the observation
+ const textContent = observation.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("\n");
+
+ let content = textContent || "";
- if (!output) {
- output = "";
+ if (content.length > MAX_CONTENT_LENGTH) {
+ content = `${content.slice(0, MAX_CONTENT_LENGTH)}...`;
}
- if (output.length > MAX_CONTENT_LENGTH) {
- output = `${output.slice(0, MAX_CONTENT_LENGTH)}...`;
+ // Build the output string
+ let output = "";
+
+ // Display the command if available
+ if (observation.command) {
+ output += `Command: \`${observation.command}\`\n\n`;
}
- return `Output:\n\`\`\`sh\n${output.trim() || i18n.t("OBSERVATION$COMMAND_NO_OUTPUT")}\n\`\`\``;
+ // Display the output
+ output += `Output:\n\`\`\`sh\n${content.trim() || i18n.t("OBSERVATION$COMMAND_NO_OUTPUT")}\n\`\`\``;
+
+ return output;
};
// Tool Observations
@@ -66,14 +91,25 @@ const getBrowserObservationContent = (
): string => {
const { observation } = event;
+ // Extract text content from the observation
+ const textContent =
+ "content" in observation && Array.isArray(observation.content)
+ ? observation.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("\n")
+ : observation.output || "";
+
let contentDetails = "";
- if ("error" in observation && observation.error) {
- contentDetails += `**Error:**\n${observation.error}\n\n`;
+ if (observation.error) {
+ contentDetails += `**Error:**\n${observation.error}`;
+ } else if (textContent) {
+ contentDetails += `**Output:**\n${textContent}`;
+ } else {
+ contentDetails += "Browser action completed successfully.";
}
- contentDetails += `**Output:**\n${observation.output}`;
-
if (contentDetails.length > MAX_CONTENT_LENGTH) {
contentDetails = `${contentDetails.slice(0, MAX_CONTENT_LENGTH)}...(truncated)`;
}
@@ -161,7 +197,22 @@ const getFinishObservationContent = (
event: ObservationEvent,
): string => {
const { observation } = event;
- return observation.message || "";
+
+ // Extract text content from the observation
+ const textContent = observation.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("\n");
+
+ let content = "";
+
+ if (observation.is_error) {
+ content += `**Error:**\n${textContent}`;
+ } else {
+ content += textContent;
+ }
+
+ return content;
};
export const getObservationContent = (event: ObservationEvent): string => {
@@ -177,8 +228,9 @@ export const getObservationContent = (event: ObservationEvent): string => {
);
case "ExecuteBashObservation":
- return getExecuteBashObservationContent(
- event as ObservationEvent,
+ case "TerminalObservation":
+ return getTerminalObservationContent(
+ event as ObservationEvent,
);
case "BrowserObservation":
diff --git a/frontend/src/components/v1/chat/event-content-helpers/get-observation-result.ts b/frontend/src/components/v1/chat/event-content-helpers/get-observation-result.ts
index e5a52bfe9540..790ecb00cf86 100644
--- a/frontend/src/components/v1/chat/event-content-helpers/get-observation-result.ts
+++ b/frontend/src/components/v1/chat/event-content-helpers/get-observation-result.ts
@@ -17,6 +17,15 @@ export const getObservationResult = (
if (exitCode === 0 || metadata.exit_code === 0) return "success"; // Command executed successfully
return "error"; // Command failed
}
+ case "TerminalObservation": {
+ const exitCode =
+ observation.exit_code ?? observation.metadata.exit_code ?? null;
+
+ if (observation.timeout || exitCode === -1) return "timeout";
+ if (exitCode === 0) return "success";
+ if (observation.is_error) return "error";
+ return "success";
+ }
case "FileEditorObservation":
case "StrReplaceEditorObservation":
// Check if there's an error
diff --git a/frontend/src/components/v1/chat/event-content-helpers/get-skill-ready-content.ts b/frontend/src/components/v1/chat/event-content-helpers/get-skill-ready-content.ts
new file mode 100644
index 000000000000..5f4b14f84698
--- /dev/null
+++ b/frontend/src/components/v1/chat/event-content-helpers/get-skill-ready-content.ts
@@ -0,0 +1,108 @@
+import { TextContent } from "#/types/v1/core/base/common";
+
+/**
+ * Extracts all text content from an array of TextContent items.
+ */
+const extractAllText = (extendedContent: TextContent[]): string =>
+ extendedContent
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("");
+
+/**
+ * Extracts all blocks from the given text.
+ * Returns an array of content strings (without the wrapper tags).
+ */
+const extractExtraInfoBlocks = (text: string): string[] => {
+ const blocks: string[] = [];
+ const blockRegex = /([\s\S]*?)<\/EXTRA_INFO>/gi;
+ let match = blockRegex.exec(text);
+
+ while (match !== null) {
+ const blockContent = match[1].trim();
+ if (blockContent.length > 0) {
+ blocks.push(blockContent);
+ }
+ match = blockRegex.exec(text);
+ }
+
+ return blocks;
+};
+
+/**
+ * Formats a single skill with its corresponding content block.
+ */
+const formatSkillWithContent = (
+ skill: string,
+ contentBlock: string | undefined,
+): string => {
+ let formatted = `\n\n- **${skill}**`;
+
+ if (contentBlock && contentBlock.trim().length > 0) {
+ formatted += `\n\n${contentBlock}`;
+ }
+
+ return formatted;
+};
+
+/**
+ * Formats skills paired with their corresponding extended content blocks.
+ */
+const formatSkillKnowledge = (
+ activatedSkills: string[],
+ extraInfoBlocks: string[],
+): string => {
+ let content = `\n\n**Triggered Skill Knowledge:**`;
+
+ activatedSkills.forEach((skill, index) => {
+ const contentBlock =
+ index < extraInfoBlocks.length ? extraInfoBlocks[index] : undefined;
+ content += formatSkillWithContent(skill, contentBlock);
+ });
+
+ return content;
+};
+
+/**
+ * Formats extended content blocks when no skills are present.
+ */
+const formatExtendedContentOnly = (extraInfoBlocks: string[]): string => {
+ let content = `\n\n**Extended Content:**`;
+
+ extraInfoBlocks.forEach((block) => {
+ if (block.trim().length > 0) {
+ content += `\n\n${block}`;
+ }
+ });
+
+ return content;
+};
+
+/**
+ * Formats activated skills and extended content into markdown for display.
+ * Similar to how v0 formats microagent knowledge in recall observations.
+ *
+ * Each skill is paired with its corresponding block by index.
+ */
+export const getSkillReadyContent = (
+ activatedSkills: string[],
+ extendedContent: TextContent[],
+): string => {
+ // Extract all blocks from extended_content
+ const extraInfoBlocks: string[] = [];
+ if (extendedContent && extendedContent.length > 0) {
+ const allText = extractAllText(extendedContent);
+ extraInfoBlocks.push(...extractExtraInfoBlocks(allText));
+ }
+
+ // Format output based on what we have
+ if (activatedSkills && activatedSkills.length > 0) {
+ return formatSkillKnowledge(activatedSkills, extraInfoBlocks);
+ }
+
+ if (extraInfoBlocks.length > 0) {
+ return formatExtendedContentOnly(extraInfoBlocks);
+ }
+
+ return "";
+};
diff --git a/frontend/src/components/v1/chat/event-message-components/finish-event-message.tsx b/frontend/src/components/v1/chat/event-message-components/finish-event-message.tsx
index 6ad385e8f095..3a30b0c4345a 100644
--- a/frontend/src/components/v1/chat/event-message-components/finish-event-message.tsx
+++ b/frontend/src/components/v1/chat/event-message-components/finish-event-message.tsx
@@ -27,13 +27,16 @@ export function FinishEventMessage({
microagentPRUrl,
actions,
}: FinishEventMessageProps) {
+ const eventContent = getEventContent(event);
+ // For FinishAction, details is always a string (getActionContent returns string)
+ const message =
+ typeof eventContent.details === "string"
+ ? eventContent.details
+ : String(eventContent.details);
+
return (
<>
-
+
{details} ;
+ }
+ if (event.observation.kind === "FinishObservation") {
+ return (
+
+ {details as string}
+
+ );
+ }
+ }
+ }
+
+ // Determine success status
+ let success: ObservationResultStatus | undefined;
+ if (isSkillReadyEvent(event)) {
+ // Skill Ready events should show success indicator, same as v0 recall observations
+ success = "success";
+ } else if (isObservationEvent(event)) {
+ success = getObservationResult(event);
+ }
+
return (
{isLastMessage &&
}
diff --git a/frontend/src/components/v1/chat/event-message-components/user-assistant-event-message.tsx b/frontend/src/components/v1/chat/event-message-components/user-assistant-event-message.tsx
index 6455dadbe3ad..a51b912860a1 100644
--- a/frontend/src/components/v1/chat/event-message-components/user-assistant-event-message.tsx
+++ b/frontend/src/components/v1/chat/event-message-components/user-assistant-event-message.tsx
@@ -22,6 +22,7 @@ interface UserAssistantEventMessageProps {
tooltip?: string;
}>;
isLastMessage: boolean;
+ isFromPlanningAgent: boolean;
}
export function UserAssistantEventMessage({
@@ -31,6 +32,7 @@ export function UserAssistantEventMessage({
microagentPRUrl,
actions,
isLastMessage,
+ isFromPlanningAgent,
}: UserAssistantEventMessageProps) {
const message = parseMessageFromEvent(event);
@@ -46,7 +48,12 @@ export function UserAssistantEventMessage({
return (
<>
-
+
{imageUrls.length > 0 && (
)}
diff --git a/frontend/src/components/v1/chat/event-message.tsx b/frontend/src/components/v1/chat/event-message.tsx
index dbe327b31b38..95690d89844b 100644
--- a/frontend/src/components/v1/chat/event-message.tsx
+++ b/frontend/src/components/v1/chat/event-message.tsx
@@ -5,6 +5,7 @@ import {
isActionEvent,
isObservationEvent,
isAgentErrorEvent,
+ isUserMessageEvent,
} from "#/types/v1/type-guards";
import { MicroagentStatus } from "#/types/microagent-status";
import { useConfig } from "#/hooks/query/use-config";
@@ -17,9 +18,10 @@ import {
GenericEventMessageWrapper,
ThoughtEventMessage,
} from "./event-message-components";
+import { createSkillReadyEvent } from "./event-content-helpers/create-skill-ready-event";
interface EventMessageProps {
- event: OpenHandsEvent;
+ event: OpenHandsEvent & { isFromPlanningAgent?: boolean };
messages: OpenHandsEvent[];
isLastMessage: boolean;
microagentStatus?: MicroagentStatus | null;
@@ -33,6 +35,104 @@ interface EventMessageProps {
isInLast10Actions: boolean;
}
+/**
+ * Extracts activated skills from a MessageEvent, supporting both
+ * activated_skills and activated_microagents field names.
+ */
+const getActivatedSkills = (event: MessageEvent): string[] =>
+ (event as unknown as { activated_skills?: string[] }).activated_skills ||
+ event.activated_microagents ||
+ [];
+
+/**
+ * Checks if extended content contains valid text content.
+ */
+const hasValidExtendedContent = (
+ extendedContent: MessageEvent["extended_content"],
+): boolean => {
+ if (!extendedContent || extendedContent.length === 0) {
+ return false;
+ }
+
+ return extendedContent.some(
+ (content) => content.type === "text" && content.text.trim().length > 0,
+ );
+};
+
+/**
+ * Determines if a Skill Ready event should be displayed for the given message event.
+ */
+const shouldShowSkillReadyEvent = (messageEvent: MessageEvent): boolean => {
+ const activatedSkills = getActivatedSkills(messageEvent);
+ const hasActivatedSkills = activatedSkills.length > 0;
+ const hasExtendedContent = hasValidExtendedContent(
+ messageEvent.extended_content,
+ );
+
+ return hasActivatedSkills && hasExtendedContent;
+};
+
+interface CommonProps {
+ microagentStatus?: MicroagentStatus | null;
+ microagentConversationId?: string;
+ microagentPRUrl?: string;
+ actions?: Array<{
+ icon: React.ReactNode;
+ onClick: () => void;
+ tooltip?: string;
+ }>;
+ isLastMessage: boolean;
+ isInLast10Actions: boolean;
+ config: unknown;
+ isCheckingFeedback: boolean;
+ feedbackData: { exists: boolean };
+ isFromPlanningAgent: boolean;
+}
+
+/**
+ * Renders a user message with its corresponding Skill Ready event.
+ */
+const renderUserMessageWithSkillReady = (
+ messageEvent: MessageEvent,
+ commonProps: CommonProps,
+ isLastMessage: boolean,
+): React.ReactElement => {
+ try {
+ const skillReadyEvent = createSkillReadyEvent(messageEvent);
+ return (
+ <>
+
+
+ >
+ );
+ } catch (error) {
+ // If skill ready event creation fails, just render the user message
+ // Failed to create skill ready event, fallback to user message
+ return (
+
+ );
+ }
+};
+
/* eslint-disable react/jsx-props-no-spreading */
export function EventMessage({
event,
@@ -51,6 +151,9 @@ export function EventMessage({
const feedbackData = { exists: false };
const isCheckingFeedback = false;
+ // Read isFromPlanningAgent directly from the event object
+ const isFromPlanningAgent = event.isFromPlanningAgent || false;
+
// Common props for components that need them
const commonProps = {
microagentStatus,
@@ -62,6 +165,7 @@ export function EventMessage({
config,
isCheckingFeedback,
feedbackData,
+ isFromPlanningAgent,
};
// Agent error events
@@ -114,10 +218,21 @@ export function EventMessage({
// Message events (user and assistant messages)
if (!isActionEvent(event) && !isObservationEvent(event)) {
- // This is a MessageEvent
+ const messageEvent = event as MessageEvent;
+
+ // Check if this is a user message that should display a Skill Ready event
+ if (isUserMessageEvent(event) && shouldShowSkillReadyEvent(messageEvent)) {
+ return renderUserMessageWithSkillReady(
+ messageEvent,
+ commonProps,
+ isLastMessage,
+ );
+ }
+
+ // Render normal message event (user or assistant)
return (
diff --git a/frontend/src/components/v1/chat/task-tracking/task-item.tsx b/frontend/src/components/v1/chat/task-tracking/task-item.tsx
new file mode 100644
index 000000000000..b25664a61140
--- /dev/null
+++ b/frontend/src/components/v1/chat/task-tracking/task-item.tsx
@@ -0,0 +1,56 @@
+import { useMemo } from "react";
+import { useTranslation } from "react-i18next";
+import { TaskItem as TaskItemType } from "#/types/v1/core/base/common";
+import CircleIcon from "#/icons/u-circle.svg?react";
+import CheckCircleIcon from "#/icons/u-check-circle.svg?react";
+import LoadingIcon from "#/icons/loading.svg?react";
+import { cn } from "#/utils/utils";
+import { Typography } from "#/ui/typography";
+import { I18nKey } from "#/i18n/declaration";
+
+interface TaskItemProps {
+ task: TaskItemType;
+}
+
+export function TaskItem({ task }: TaskItemProps) {
+ const { t } = useTranslation();
+
+ const icon = useMemo(() => {
+ switch (task.status) {
+ case "todo":
+ return ;
+ case "in_progress":
+ return (
+
+ );
+ case "done":
+ return ;
+ default:
+ return ;
+ }
+ }, [task.status]);
+
+ const isDoneStatus = task.status === "done";
+
+ return (
+
+
{icon}
+
+
+ {task.title}
+
+
+ {t(I18nKey.TASK_TRACKING_OBSERVATION$TASK_NOTES)}: {task.notes}
+
+
+
+ );
+}
diff --git a/frontend/src/components/v1/chat/task-tracking/task-list-section.tsx b/frontend/src/components/v1/chat/task-tracking/task-list-section.tsx
new file mode 100644
index 000000000000..aa3821036f4e
--- /dev/null
+++ b/frontend/src/components/v1/chat/task-tracking/task-list-section.tsx
@@ -0,0 +1,33 @@
+import { useTranslation } from "react-i18next";
+import { TaskItem } from "./task-item";
+import LessonPlanIcon from "#/icons/lesson-plan.svg?react";
+import { TaskItem as TaskItemType } from "#/types/v1/core/base/common";
+import { I18nKey } from "#/i18n/declaration";
+import { Typography } from "#/ui/typography";
+
+interface TaskListSectionProps {
+ taskList: TaskItemType[];
+}
+
+export function TaskListSection({ taskList }: TaskListSectionProps) {
+ const { t } = useTranslation();
+
+ return (
+
+ {/* Header Tabs */}
+
+
+
+ {t(I18nKey.COMMON$TASKS)}
+
+
+
+ {/* Task Items */}
+
+ {taskList.map((task, index) => (
+
+ ))}
+
+
+ );
+}
diff --git a/frontend/src/components/v1/chat/task-tracking/task-tracking-observation-content.tsx b/frontend/src/components/v1/chat/task-tracking/task-tracking-observation-content.tsx
new file mode 100644
index 000000000000..167429cae84a
--- /dev/null
+++ b/frontend/src/components/v1/chat/task-tracking/task-tracking-observation-content.tsx
@@ -0,0 +1,23 @@
+import React from "react";
+import { ObservationEvent } from "#/types/v1/core";
+import { TaskTrackerObservation } from "#/types/v1/core/base/observation";
+import { TaskListSection } from "./task-list-section";
+
+interface TaskTrackingObservationContentProps {
+ event: ObservationEvent;
+}
+
+export function TaskTrackingObservationContent({
+ event,
+}: TaskTrackingObservationContentProps): React.ReactNode {
+ const { observation } = event;
+ const { command, task_list: taskList } = observation;
+ const shouldShowTaskList = command === "plan" && taskList.length > 0;
+
+ return (
+
+ {/* Task List section - only show for 'plan' command */}
+ {shouldShowTaskList && }
+
+ );
+}
diff --git a/frontend/src/context/conversation-subscriptions-provider.tsx b/frontend/src/context/conversation-subscriptions-provider.tsx
index 74217cf51389..c83c0d703e7d 100644
--- a/frontend/src/context/conversation-subscriptions-provider.tsx
+++ b/frontend/src/context/conversation-subscriptions-provider.tsx
@@ -31,7 +31,13 @@ interface ConversationSubscriptionsContextType {
subscribeToConversation: (options: {
conversationId: string;
sessionApiKey: string | null;
- providersSet: ("github" | "gitlab" | "bitbucket" | "enterprise_sso")[];
+ providersSet: (
+ | "github"
+ | "gitlab"
+ | "bitbucket"
+ | "azure_devops"
+ | "enterprise_sso"
+ )[];
baseUrl: string;
socketPath?: string;
onEvent?: (event: unknown, conversationId: string) => void;
@@ -135,7 +141,13 @@ export function ConversationSubscriptionsProvider({
(options: {
conversationId: string;
sessionApiKey: string | null;
- providersSet: ("github" | "gitlab" | "bitbucket" | "enterprise_sso")[];
+ providersSet: (
+ | "github"
+ | "gitlab"
+ | "bitbucket"
+ | "azure_devops"
+ | "enterprise_sso"
+ )[];
baseUrl: string;
socketPath?: string;
onEvent?: (event: unknown, conversationId: string) => void;
diff --git a/frontend/src/contexts/conversation-websocket-context.tsx b/frontend/src/contexts/conversation-websocket-context.tsx
index a9f29fb42681..68c50f94990d 100644
--- a/frontend/src/contexts/conversation-websocket-context.tsx
+++ b/frontend/src/contexts/conversation-websocket-context.tsx
@@ -14,6 +14,7 @@ import { useErrorMessageStore } from "#/stores/error-message-store";
import { useOptimisticUserMessageStore } from "#/stores/optimistic-user-message-store";
import { useV1ConversationStateStore } from "#/stores/v1-conversation-state-store";
import { useCommandStore } from "#/state/command-store";
+import { useBrowserStore } from "#/stores/browser-store";
import {
isV1Event,
isAgentErrorEvent,
@@ -22,13 +23,27 @@ import {
isConversationStateUpdateEvent,
isFullStateConversationStateUpdateEvent,
isAgentStatusConversationStateUpdateEvent,
+ isStatsConversationStateUpdateEvent,
isExecuteBashActionEvent,
isExecuteBashObservationEvent,
+ isConversationErrorEvent,
+ isPlanningFileEditorObservationEvent,
+ isBrowserObservationEvent,
+ isBrowserNavigateActionEvent,
} from "#/types/v1/type-guards";
+import { ConversationStateUpdateEventStats } from "#/types/v1/core/events/conversation-state-event";
import { handleActionEventCacheInvalidation } from "#/utils/cache-utils";
import { buildWebSocketUrl } from "#/utils/websocket-url";
-import type { V1SendMessageRequest } from "#/api/conversation-service/v1-conversation-service.types";
+import type {
+ V1AppConversation,
+ V1SendMessageRequest,
+} from "#/api/conversation-service/v1-conversation-service.types";
import EventService from "#/api/event-service/event-service.api";
+import { useConversationStore } from "#/state/conversation-store";
+import { isBudgetOrCreditError } from "#/utils/error-handler";
+import { useTracking } from "#/hooks/use-tracking";
+import { useReadConversationFile } from "#/hooks/mutation/use-read-conversation-file";
+import useMetricsStore from "#/stores/metrics-store";
// eslint-disable-next-line @typescript-eslint/naming-convention
export type V1_WebSocketConnectionState =
@@ -52,30 +67,92 @@ export function ConversationWebSocketProvider({
conversationId,
conversationUrl,
sessionApiKey,
+ subConversations,
+ subConversationIds,
}: {
children: React.ReactNode;
conversationId?: string;
conversationUrl?: string | null;
sessionApiKey?: string | null;
+ subConversations?: V1AppConversation[];
+ subConversationIds?: string[];
}) {
- const [connectionState, setConnectionState] =
+ // Separate connection state tracking for each WebSocket
+ const [mainConnectionState, setMainConnectionState] =
useState("CONNECTING");
- // Track if we've ever successfully connected
+ const [planningConnectionState, setPlanningConnectionState] =
+ useState("CONNECTING");
+
+ // Track if we've ever successfully connected for each connection
// Don't show errors until after first successful connection
- const hasConnectedRef = React.useRef(false);
+ const hasConnectedRefMain = React.useRef(false);
+ const hasConnectedRefPlanning = React.useRef(false);
+
const queryClient = useQueryClient();
const { addEvent } = useEventStore();
const { setErrorMessage, removeErrorMessage } = useErrorMessageStore();
const { removeOptimisticUserMessage } = useOptimisticUserMessageStore();
const { setExecutionStatus } = useV1ConversationStateStore();
const { appendInput, appendOutput } = useCommandStore();
+ const { trackCreditLimitReached } = useTracking();
+
+ // History loading state - separate per connection
+ const [isLoadingHistoryMain, setIsLoadingHistoryMain] = useState(true);
+ const [isLoadingHistoryPlanning, setIsLoadingHistoryPlanning] =
+ useState(true);
+ const [expectedEventCountMain, setExpectedEventCountMain] = useState<
+ number | null
+ >(null);
+ const [expectedEventCountPlanning, setExpectedEventCountPlanning] = useState<
+ number | null
+ >(null);
+
+ const { conversationMode, setPlanContent } = useConversationStore();
+
+ // Hook for reading conversation file
+ const { mutate: readConversationFile } = useReadConversationFile();
+
+ // Separate received event count tracking per connection
+ const receivedEventCountRefMain = useRef(0);
+ const receivedEventCountRefPlanning = useRef(0);
- // History loading state
- const [isLoadingHistory, setIsLoadingHistory] = useState(true);
- const [expectedEventCount, setExpectedEventCount] = useState(
- null,
+ // Track the latest PlanningFileEditorObservation event during history replay
+ // We'll only call the API once after history loading completes
+ const latestPlanningFileEventRef = useRef<{
+ path: string;
+ conversationId: string;
+ } | null>(null);
+
+ // Helper function to update metrics from stats event
+ const updateMetricsFromStats = useCallback(
+ (event: ConversationStateUpdateEventStats) => {
+ if (event.value.usage_to_metrics?.agent) {
+ const agentMetrics = event.value.usage_to_metrics.agent;
+ const metrics = {
+ cost: agentMetrics.accumulated_cost,
+ max_budget_per_task: agentMetrics.max_budget_per_task ?? null,
+ usage: agentMetrics.accumulated_token_usage
+ ? {
+ prompt_tokens:
+ agentMetrics.accumulated_token_usage.prompt_tokens,
+ completion_tokens:
+ agentMetrics.accumulated_token_usage.completion_tokens,
+ cache_read_tokens:
+ agentMetrics.accumulated_token_usage.cache_read_tokens,
+ cache_write_tokens:
+ agentMetrics.accumulated_token_usage.cache_write_tokens,
+ context_window:
+ agentMetrics.accumulated_token_usage.context_window,
+ per_turn_token:
+ agentMetrics.accumulated_token_usage.per_turn_token,
+ }
+ : null,
+ };
+ useMetricsStore.getState().setMetrics(metrics);
+ }
+ },
+ [],
);
- const receivedEventCountRef = useRef(0);
// Build WebSocket URL from props
// Only build URL if we have both conversationId and conversationUrl
@@ -88,40 +165,159 @@ export function ConversationWebSocketProvider({
return buildWebSocketUrl(conversationId, conversationUrl);
}, [conversationId, conversationUrl]);
- // Reset hasConnected flag and history loading state when conversation changes
+ const planningAgentWsUrl = useMemo(() => {
+ if (!subConversations?.length) {
+ return null;
+ }
+
+ // Currently, there is only one sub-conversation and it uses the planning agent.
+ const planningAgentConversation = subConversations[0];
+
+ if (
+ !planningAgentConversation?.id ||
+ !planningAgentConversation.conversation_url
+ ) {
+ return null;
+ }
+
+ return buildWebSocketUrl(
+ planningAgentConversation.id,
+ planningAgentConversation.conversation_url,
+ );
+ }, [subConversations]);
+
+ // Merged connection state - reflects combined status of both connections
+ const connectionState = useMemo(() => {
+ // If planning agent connection doesn't exist, use main connection state
+ if (!planningAgentWsUrl) {
+ return mainConnectionState;
+ }
+
+ // If either is connecting, merged state is connecting
+ if (
+ mainConnectionState === "CONNECTING" ||
+ planningConnectionState === "CONNECTING"
+ ) {
+ return "CONNECTING";
+ }
+
+ // If both are open, merged state is open
+ if (mainConnectionState === "OPEN" && planningConnectionState === "OPEN") {
+ return "OPEN";
+ }
+
+ // If both are closed, merged state is closed
+ if (
+ mainConnectionState === "CLOSED" &&
+ planningConnectionState === "CLOSED"
+ ) {
+ return "CLOSED";
+ }
+
+ // If either is closing, merged state is closing
+ if (
+ mainConnectionState === "CLOSING" ||
+ planningConnectionState === "CLOSING"
+ ) {
+ return "CLOSING";
+ }
+
+ // Default to closed if states don't match expected patterns
+ return "CLOSED";
+ }, [mainConnectionState, planningConnectionState, planningAgentWsUrl]);
+
useEffect(() => {
- hasConnectedRef.current = false;
- setIsLoadingHistory(true);
- setExpectedEventCount(null);
- receivedEventCountRef.current = 0;
- }, [conversationId]);
+ if (
+ expectedEventCountMain !== null &&
+ receivedEventCountRefMain.current >= expectedEventCountMain &&
+ isLoadingHistoryMain
+ ) {
+ setIsLoadingHistoryMain(false);
+ }
+ }, [expectedEventCountMain, isLoadingHistoryMain, receivedEventCountRefMain]);
- // Check if we've received all events when expectedEventCount becomes available
useEffect(() => {
if (
- expectedEventCount !== null &&
- receivedEventCountRef.current >= expectedEventCount &&
- isLoadingHistory
+ expectedEventCountPlanning !== null &&
+ receivedEventCountRefPlanning.current >= expectedEventCountPlanning &&
+ isLoadingHistoryPlanning
) {
- setIsLoadingHistory(false);
+ setIsLoadingHistoryPlanning(false);
}
- }, [expectedEventCount, isLoadingHistory]);
+ }, [
+ expectedEventCountPlanning,
+ isLoadingHistoryPlanning,
+ receivedEventCountRefPlanning,
+ ]);
+
+ // Call API once after history loading completes if we tracked any PlanningFileEditorObservation events
+ useEffect(() => {
+ if (!isLoadingHistoryPlanning && latestPlanningFileEventRef.current) {
+ const { path, conversationId: currentPlanningConversationId } =
+ latestPlanningFileEventRef.current;
+
+ readConversationFile(
+ {
+ conversationId: currentPlanningConversationId,
+ filePath: path,
+ },
+ {
+ onSuccess: (fileContent) => {
+ setPlanContent(fileContent);
+ },
+ onError: (error) => {
+ // eslint-disable-next-line no-console
+ console.warn("Failed to read conversation file:", error);
+ },
+ },
+ );
+
+ // Clear the ref after calling the API
+ latestPlanningFileEventRef.current = null;
+ }
+ }, [isLoadingHistoryPlanning, readConversationFile, setPlanContent]);
- const handleMessage = useCallback(
+ useEffect(() => {
+ hasConnectedRefMain.current = false;
+ setIsLoadingHistoryPlanning(!!subConversationIds?.length);
+ setExpectedEventCountPlanning(null);
+ receivedEventCountRefPlanning.current = 0;
+ // Reset the tracked event ref when sub-conversations change
+ latestPlanningFileEventRef.current = null;
+ }, [subConversationIds]);
+
+ // Merged loading history state - true if either connection is still loading
+ const isLoadingHistory = useMemo(
+ () => isLoadingHistoryMain || isLoadingHistoryPlanning,
+ [isLoadingHistoryMain, isLoadingHistoryPlanning],
+ );
+
+ // Reset hasConnected flags and history loading state when conversation changes
+ useEffect(() => {
+ hasConnectedRefPlanning.current = false;
+ setIsLoadingHistoryMain(true);
+ setExpectedEventCountMain(null);
+ receivedEventCountRefMain.current = 0;
+ // Reset the tracked event ref when conversation changes
+ latestPlanningFileEventRef.current = null;
+ }, [conversationId]);
+
+ // Separate message handlers for each connection
+ const handleMainMessage = useCallback(
(messageEvent: MessageEvent) => {
try {
const event = JSON.parse(messageEvent.data);
// Track received events for history loading (count ALL events from WebSocket)
// Always count when loading, even if we don't have the expected count yet
- if (isLoadingHistory) {
- receivedEventCountRef.current += 1;
+ if (isLoadingHistoryMain) {
+ receivedEventCountRefMain.current += 1;
if (
- expectedEventCount !== null &&
- receivedEventCountRef.current >= expectedEventCount
+ expectedEventCountMain !== null &&
+ receivedEventCountRefMain.current >= expectedEventCountMain
) {
- setIsLoadingHistory(false);
+ setIsLoadingHistoryMain(false);
}
}
@@ -129,9 +325,21 @@ export function ConversationWebSocketProvider({
if (isV1Event(event)) {
addEvent(event);
+ // Handle ConversationErrorEvent specifically
+ if (isConversationErrorEvent(event)) {
+ setErrorMessage(event.detail);
+ }
+
// Handle AgentErrorEvent specifically
if (isAgentErrorEvent(event)) {
setErrorMessage(event.error);
+
+ // Track credit limit reached if the error is budget-related
+ if (isBudgetOrCreditError(event.error)) {
+ trackCreditLimitReached({
+ conversationId: conversationId || "unknown",
+ });
+ }
}
// Clear optimistic user message when a user message is confirmed
@@ -159,6 +367,9 @@ export function ConversationWebSocketProvider({
if (isAgentStatusConversationStateUpdateEvent(event)) {
setExecutionStatus(event.value);
}
+ if (isStatsConversationStateUpdateEvent(event)) {
+ updateMetricsFromStats(event);
+ }
}
// Handle ExecuteBashAction events - add command as input to terminal
@@ -168,7 +379,28 @@ export function ConversationWebSocketProvider({
// Handle ExecuteBashObservation events - add output to terminal
if (isExecuteBashObservationEvent(event)) {
- appendOutput(event.observation.output);
+ // Extract text content from the observation content array
+ const textContent = event.observation.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("\n");
+ appendOutput(textContent);
+ }
+
+ // Handle BrowserObservation events - update browser store with screenshot
+ if (isBrowserObservationEvent(event)) {
+ const { screenshot_data: screenshotData } = event.observation;
+ if (screenshotData) {
+ const screenshotSrc = screenshotData.startsWith("data:")
+ ? screenshotData
+ : `data:image/png;base64,${screenshotData}`;
+ useBrowserStore.getState().setScreenshotSrc(screenshotSrc);
+ }
+ }
+
+ // Handle BrowserNavigateAction events - update browser store with URL
+ if (isBrowserNavigateActionEvent(event)) {
+ useBrowserStore.getState().setUrl(event.action.url);
}
}
} catch (error) {
@@ -178,8 +410,8 @@ export function ConversationWebSocketProvider({
},
[
addEvent,
- isLoadingHistory,
- expectedEventCount,
+ isLoadingHistoryMain,
+ expectedEventCountMain,
setErrorMessage,
removeOptimisticUserMessage,
queryClient,
@@ -187,10 +419,147 @@ export function ConversationWebSocketProvider({
setExecutionStatus,
appendInput,
appendOutput,
+ updateMetricsFromStats,
+ ],
+ );
+
+ const handlePlanningMessage = useCallback(
+ (messageEvent: MessageEvent) => {
+ try {
+ const event = JSON.parse(messageEvent.data);
+
+ // Track received events for history loading (count ALL events from WebSocket)
+ // Always count when loading, even if we don't have the expected count yet
+ if (isLoadingHistoryPlanning) {
+ receivedEventCountRefPlanning.current += 1;
+
+ if (
+ expectedEventCountPlanning !== null &&
+ receivedEventCountRefPlanning.current >= expectedEventCountPlanning
+ ) {
+ setIsLoadingHistoryPlanning(false);
+ }
+ }
+
+ // Use type guard to validate v1 event structure
+ if (isV1Event(event)) {
+ // Mark this event as coming from the planning agent
+ const eventWithPlanningFlag = {
+ ...event,
+ isFromPlanningAgent: true,
+ };
+ addEvent(eventWithPlanningFlag);
+
+ // Handle AgentErrorEvent specifically
+ if (isAgentErrorEvent(event)) {
+ setErrorMessage(event.error);
+ }
+
+ // Clear optimistic user message when a user message is confirmed
+ if (isUserMessageEvent(event)) {
+ removeOptimisticUserMessage();
+ }
+
+ // Handle cache invalidation for ActionEvent
+ if (isActionEvent(event)) {
+ const planningAgentConversation = subConversations?.[0];
+ const currentConversationId =
+ planningAgentConversation?.id || "test-conversation-id"; // TODO: Get from context
+ handleActionEventCacheInvalidation(
+ event,
+ currentConversationId,
+ queryClient,
+ );
+ }
+
+ // Handle conversation state updates
+ // TODO: Tests
+ if (isConversationStateUpdateEvent(event)) {
+ if (isFullStateConversationStateUpdateEvent(event)) {
+ setExecutionStatus(event.value.execution_status);
+ }
+ if (isAgentStatusConversationStateUpdateEvent(event)) {
+ setExecutionStatus(event.value);
+ }
+ if (isStatsConversationStateUpdateEvent(event)) {
+ updateMetricsFromStats(event);
+ }
+ }
+
+ // Handle ExecuteBashAction events - add command as input to terminal
+ if (isExecuteBashActionEvent(event)) {
+ appendInput(event.action.command);
+ }
+
+ // Handle ExecuteBashObservation events - add output to terminal
+ if (isExecuteBashObservationEvent(event)) {
+ // Extract text content from the observation content array
+ const textContent = event.observation.content
+ .filter((c) => c.type === "text")
+ .map((c) => c.text)
+ .join("\n");
+ appendOutput(textContent);
+ }
+
+ // Handle PlanningFileEditorObservation events - read and update plan content
+ if (isPlanningFileEditorObservationEvent(event)) {
+ const planningAgentConversation = subConversations?.[0];
+ const planningConversationId = planningAgentConversation?.id;
+
+ if (planningConversationId && event.observation.path) {
+ // During history replay, track the latest event but don't call API
+ // After history loading completes, we'll call the API once with the latest event
+ if (isLoadingHistoryPlanning) {
+ latestPlanningFileEventRef.current = {
+ path: event.observation.path,
+ conversationId: planningConversationId,
+ };
+ } else {
+ // History loading is complete - this is a new real-time event
+ // Call the API immediately for real-time updates
+ readConversationFile(
+ {
+ conversationId: planningConversationId,
+ filePath: event.observation.path,
+ },
+ {
+ onSuccess: (fileContent) => {
+ setPlanContent(fileContent);
+ },
+ onError: (error) => {
+ // eslint-disable-next-line no-console
+ console.warn("Failed to read conversation file:", error);
+ },
+ },
+ );
+ }
+ }
+ }
+ }
+ } catch (error) {
+ // eslint-disable-next-line no-console
+ console.warn("Failed to parse WebSocket message as JSON:", error);
+ }
+ },
+ [
+ addEvent,
+ isLoadingHistoryPlanning,
+ expectedEventCountPlanning,
+ setErrorMessage,
+ removeOptimisticUserMessage,
+ queryClient,
+ subConversations,
+ setExecutionStatus,
+ appendInput,
+ appendOutput,
+ readConversationFile,
+ setPlanContent,
+ updateMetricsFromStats,
],
);
- const websocketOptions: WebSocketHookOptions = useMemo(() => {
+ // Separate WebSocket options for main connection
+ const mainWebsocketOptions: WebSocketHookOptions = useMemo(() => {
const queryParams: Record = {
resend_all: true,
};
@@ -204,57 +573,136 @@ export function ConversationWebSocketProvider({
queryParams,
reconnect: { enabled: true },
onOpen: async () => {
- setConnectionState("OPEN");
- hasConnectedRef.current = true; // Mark that we've successfully connected
+ setMainConnectionState("OPEN");
+ hasConnectedRefMain.current = true; // Mark that we've successfully connected
removeErrorMessage(); // Clear any previous error messages on successful connection
// Fetch expected event count for history loading detection
if (conversationId) {
try {
const count = await EventService.getEventCount(conversationId);
- setExpectedEventCount(count);
+ setExpectedEventCountMain(count);
// If no events expected, mark as loaded immediately
if (count === 0) {
- setIsLoadingHistory(false);
+ setIsLoadingHistoryMain(false);
}
} catch (error) {
// Fall back to marking as loaded to avoid infinite loading state
- setIsLoadingHistory(false);
+ setIsLoadingHistoryMain(false);
}
}
},
onClose: (event: CloseEvent) => {
- setConnectionState("CLOSED");
+ setMainConnectionState("CLOSED");
// Only show error message if we've previously connected successfully
// This prevents showing errors during initial connection attempts (e.g., when auto-starting a conversation)
- if (event.code !== 1000 && hasConnectedRef.current) {
+ if (event.code !== 1000 && hasConnectedRefMain.current) {
setErrorMessage(
`Connection lost: ${event.reason || "Unexpected disconnect"}`,
);
}
},
onError: () => {
- setConnectionState("CLOSED");
+ setMainConnectionState("CLOSED");
// Only show error message if we've previously connected successfully
- if (hasConnectedRef.current) {
+ if (hasConnectedRefMain.current) {
setErrorMessage("Failed to connect to server");
}
},
- onMessage: handleMessage,
+ onMessage: handleMainMessage,
};
}, [
- handleMessage,
+ handleMainMessage,
setErrorMessage,
removeErrorMessage,
sessionApiKey,
conversationId,
]);
+ // Separate WebSocket options for planning agent connection
+ const planningWebsocketOptions: WebSocketHookOptions = useMemo(() => {
+ const queryParams: Record = {
+ resend_all: true,
+ };
+
+ // Add session_api_key if available
+ if (sessionApiKey) {
+ queryParams.session_api_key = sessionApiKey;
+ }
+
+ const planningAgentConversation = subConversations?.[0];
+
+ return {
+ queryParams,
+ reconnect: { enabled: true },
+ onOpen: async () => {
+ setPlanningConnectionState("OPEN");
+ hasConnectedRefPlanning.current = true; // Mark that we've successfully connected
+ removeErrorMessage(); // Clear any previous error messages on successful connection
+
+ // Fetch expected event count for history loading detection
+ if (planningAgentConversation?.id) {
+ try {
+ const count = await EventService.getEventCount(
+ planningAgentConversation.id,
+ );
+ setExpectedEventCountPlanning(count);
+
+ // If no events expected, mark as loaded immediately
+ if (count === 0) {
+ setIsLoadingHistoryPlanning(false);
+ }
+ } catch (error) {
+ // Fall back to marking as loaded to avoid infinite loading state
+ setIsLoadingHistoryPlanning(false);
+ }
+ }
+ },
+ onClose: (event: CloseEvent) => {
+ setPlanningConnectionState("CLOSED");
+ // Only show error message if we've previously connected successfully
+ // This prevents showing errors during initial connection attempts (e.g., when auto-starting a conversation)
+ if (event.code !== 1000 && hasConnectedRefPlanning.current) {
+ setErrorMessage(
+ `Connection lost: ${event.reason || "Unexpected disconnect"}`,
+ );
+ }
+ },
+ onError: () => {
+ setPlanningConnectionState("CLOSED");
+ // Only show error message if we've previously connected successfully
+ if (hasConnectedRefPlanning.current) {
+ setErrorMessage("Failed to connect to server");
+ }
+ },
+ onMessage: handlePlanningMessage,
+ };
+ }, [
+ handlePlanningMessage,
+ setErrorMessage,
+ removeErrorMessage,
+ sessionApiKey,
+ subConversations,
+ ]);
+
// Only attempt WebSocket connection when we have a valid URL
// This prevents connection attempts during task polling phase
const websocketUrl = wsUrl;
- const { socket } = useWebSocket(websocketUrl || "", websocketOptions);
+ const { socket: mainSocket } = useWebSocket(
+ websocketUrl || "",
+ mainWebsocketOptions,
+ );
+
+ const { socket: planningAgentSocket } = useWebSocket(
+ planningAgentWsUrl || "",
+ planningWebsocketOptions,
+ );
+
+ const socket = useMemo(
+ () => (conversationMode === "plan" ? planningAgentSocket : mainSocket),
+ [conversationMode, planningAgentSocket, mainSocket],
+ );
// V1 send message function via WebSocket
const sendMessage = useCallback(
@@ -278,33 +726,63 @@ export function ConversationWebSocketProvider({
[socket, setErrorMessage],
);
+ // Track main socket state changes
+ useEffect(() => {
+ // Only process socket updates if we have a valid URL and socket
+ if (mainSocket && wsUrl) {
+ // Update state based on socket readyState
+ const updateState = () => {
+ switch (mainSocket.readyState) {
+ case WebSocket.CONNECTING:
+ setMainConnectionState("CONNECTING");
+ break;
+ case WebSocket.OPEN:
+ setMainConnectionState("OPEN");
+ break;
+ case WebSocket.CLOSING:
+ setMainConnectionState("CLOSING");
+ break;
+ case WebSocket.CLOSED:
+ setMainConnectionState("CLOSED");
+ break;
+ default:
+ setMainConnectionState("CLOSED");
+ break;
+ }
+ };
+
+ updateState();
+ }
+ }, [mainSocket, wsUrl]);
+
+ // Track planning agent socket state changes
useEffect(() => {
// Only process socket updates if we have a valid URL and socket
- if (socket && wsUrl) {
+ if (planningAgentSocket && planningAgentWsUrl) {
// Update state based on socket readyState
const updateState = () => {
- switch (socket.readyState) {
+ switch (planningAgentSocket.readyState) {
case WebSocket.CONNECTING:
- setConnectionState("CONNECTING");
+ setPlanningConnectionState("CONNECTING");
break;
case WebSocket.OPEN:
- setConnectionState("OPEN");
+ setPlanningConnectionState("OPEN");
break;
case WebSocket.CLOSING:
- setConnectionState("CLOSING");
+ setPlanningConnectionState("CLOSING");
break;
case WebSocket.CLOSED:
- setConnectionState("CLOSED");
+ setPlanningConnectionState("CLOSED");
break;
default:
- setConnectionState("CLOSED");
+ setPlanningConnectionState("CLOSED");
break;
}
};
updateState();
}
- }, [socket, wsUrl]);
+ }, [planningAgentSocket, planningAgentWsUrl]);
const contextValue = useMemo(
() => ({ connectionState, sendMessage, isLoadingHistory }),
diff --git a/frontend/src/contexts/websocket-provider-wrapper.tsx b/frontend/src/contexts/websocket-provider-wrapper.tsx
index bf2a28d6b03c..d278a4655165 100644
--- a/frontend/src/contexts/websocket-provider-wrapper.tsx
+++ b/frontend/src/contexts/websocket-provider-wrapper.tsx
@@ -2,6 +2,7 @@ import React from "react";
import { WsClientProvider } from "#/context/ws-client-provider";
import { ConversationWebSocketProvider } from "#/contexts/conversation-websocket-context";
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
+import { useSubConversations } from "#/hooks/query/use-sub-conversations";
interface WebSocketProviderWrapperProps {
children: React.ReactNode;
@@ -36,6 +37,15 @@ export function WebSocketProviderWrapper({
}: WebSocketProviderWrapperProps) {
// Get conversation data for V1 provider
const { data: conversation } = useActiveConversation();
+ // Get sub-conversation data for V1 provider
+ const { data: subConversations } = useSubConversations(
+ conversation?.sub_conversation_ids ?? [],
+ );
+
+ // Filter out null sub-conversations
+ const filteredSubConversations = subConversations?.filter(
+ (subConversation) => subConversation !== null,
+ );
if (version === 0) {
return (
@@ -51,6 +61,8 @@ export function WebSocketProviderWrapper({
conversationId={conversationId}
conversationUrl={conversation?.url}
sessionApiKey={conversation?.session_api_key}
+ subConversationIds={conversation?.sub_conversation_ids}
+ subConversations={filteredSubConversations}
>
{children}
diff --git a/frontend/src/hooks/mutation/use-create-conversation.ts b/frontend/src/hooks/mutation/use-create-conversation.ts
index 4baba328026d..8f6df2c27271 100644
--- a/frontend/src/hooks/mutation/use-create-conversation.ts
+++ b/frontend/src/hooks/mutation/use-create-conversation.ts
@@ -4,8 +4,8 @@ import V1ConversationService from "#/api/conversation-service/v1-conversation-se
import { SuggestedTask } from "#/utils/types";
import { Provider } from "#/types/settings";
import { CreateMicroagent, Conversation } from "#/api/open-hands.types";
-import { USE_V1_CONVERSATION_API } from "#/utils/feature-flags";
import { useTracking } from "#/hooks/use-tracking";
+import { useSettings } from "#/hooks/query/use-settings";
interface CreateConversationVariables {
query?: string;
@@ -17,6 +17,8 @@ interface CreateConversationVariables {
suggestedTask?: SuggestedTask;
conversationInstructions?: string;
createMicroagent?: CreateMicroagent;
+ parentConversationId?: string;
+ agentType?: "default" | "plan";
}
// Response type that combines both V1 and legacy responses
@@ -32,6 +34,7 @@ interface CreateConversationResponse extends Partial {
export const useCreateConversation = () => {
const queryClient = useQueryClient();
const { trackConversationCreated } = useTracking();
+ const { data: settings } = useSettings();
return useMutation({
mutationKey: ["create-conversation"],
@@ -44,9 +47,11 @@ export const useCreateConversation = () => {
suggestedTask,
conversationInstructions,
createMicroagent,
+ parentConversationId,
+ agentType,
} = variables;
- const useV1 = USE_V1_CONVERSATION_API() && !createMicroagent;
+ const useV1 = !!settings?.V1_ENABLED && !createMicroagent;
if (useV1) {
// Use V1 API - creates a conversation start task
@@ -57,6 +62,8 @@ export const useCreateConversation = () => {
repository?.branch,
conversationInstructions,
undefined, // trigger - will be set by backend
+ parentConversationId,
+ agentType,
);
// Return a special task ID that the frontend will recognize
diff --git a/frontend/src/hooks/mutation/use-read-conversation-file.ts b/frontend/src/hooks/mutation/use-read-conversation-file.ts
new file mode 100644
index 000000000000..5dd8c51eb965
--- /dev/null
+++ b/frontend/src/hooks/mutation/use-read-conversation-file.ts
@@ -0,0 +1,17 @@
+import { useMutation } from "@tanstack/react-query";
+import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
+
+interface UseReadConversationFileVariables {
+ conversationId: string;
+ filePath?: string;
+}
+
+export const useReadConversationFile = () =>
+ useMutation({
+ mutationKey: ["read-conversation-file"],
+ mutationFn: async ({
+ conversationId,
+ filePath,
+ }: UseReadConversationFileVariables): Promise =>
+ V1ConversationService.readConversationFile(conversationId, filePath),
+ });
diff --git a/frontend/src/hooks/mutation/use-save-settings.ts b/frontend/src/hooks/mutation/use-save-settings.ts
index c8a433e9dc39..099ab41ea425 100644
--- a/frontend/src/hooks/mutation/use-save-settings.ts
+++ b/frontend/src/hooks/mutation/use-save-settings.ts
@@ -35,6 +35,7 @@ const saveSettingsMutationFn = async (settings: Partial) => {
settings.GIT_USER_NAME?.trim() || DEFAULT_SETTINGS.GIT_USER_NAME,
git_user_email:
settings.GIT_USER_EMAIL?.trim() || DEFAULT_SETTINGS.GIT_USER_EMAIL,
+ v1_enabled: settings.V1_ENABLED,
};
await SettingsService.saveSettings(apiSettings);
diff --git a/frontend/src/hooks/query/use-balance.ts b/frontend/src/hooks/query/use-balance.ts
index 1d89454f74f0..1bc7075e9f6d 100644
--- a/frontend/src/hooks/query/use-balance.ts
+++ b/frontend/src/hooks/query/use-balance.ts
@@ -13,6 +13,6 @@ export const useBalance = () => {
enabled:
!isOnTosPage &&
config?.APP_MODE === "saas" &&
- config?.FEATURE_FLAGS.ENABLE_BILLING,
+ config?.FEATURE_FLAGS?.ENABLE_BILLING,
});
};
diff --git a/frontend/src/hooks/query/use-settings.ts b/frontend/src/hooks/query/use-settings.ts
index 74a516f4a6b4..c1769c8422e0 100644
--- a/frontend/src/hooks/query/use-settings.ts
+++ b/frontend/src/hooks/query/use-settings.ts
@@ -36,6 +36,7 @@ const getSettingsQueryFn = async (): Promise => {
GIT_USER_EMAIL:
apiSettings.git_user_email || DEFAULT_SETTINGS.GIT_USER_EMAIL,
IS_NEW_USER: false,
+ V1_ENABLED: apiSettings.v1_enabled ?? DEFAULT_SETTINGS.V1_ENABLED,
};
};
diff --git a/frontend/src/hooks/query/use-start-tasks.ts b/frontend/src/hooks/query/use-start-tasks.ts
index 833ce86258db..6af56f2296a9 100644
--- a/frontend/src/hooks/query/use-start-tasks.ts
+++ b/frontend/src/hooks/query/use-start-tasks.ts
@@ -1,6 +1,6 @@
import { useQuery } from "@tanstack/react-query";
import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
-import { USE_V1_CONVERSATION_API } from "#/utils/feature-flags";
+import { useSettings } from "#/hooks/query/use-settings";
/**
* Hook to fetch in-progress V1 conversation start tasks
@@ -13,13 +13,17 @@ import { USE_V1_CONVERSATION_API } from "#/utils/feature-flags";
* @param limit Maximum number of tasks to return (max 100)
* @returns Query result with array of in-progress start tasks
*/
-export const useStartTasks = (limit = 10) =>
- useQuery({
+export const useStartTasks = (limit = 10) => {
+ const { data: settings } = useSettings();
+ const isV1Enabled = settings?.V1_ENABLED;
+
+ return useQuery({
queryKey: ["start-tasks", "search", limit],
queryFn: () => V1ConversationService.searchStartTasks(limit),
- enabled: USE_V1_CONVERSATION_API(),
+ enabled: isV1Enabled,
select: (tasks) =>
tasks.filter(
(task) => task.status !== "READY" && task.status !== "ERROR",
),
});
+};
diff --git a/frontend/src/hooks/query/use-sub-conversation-task-polling.ts b/frontend/src/hooks/query/use-sub-conversation-task-polling.ts
new file mode 100644
index 000000000000..e7dd29aae0ff
--- /dev/null
+++ b/frontend/src/hooks/query/use-sub-conversation-task-polling.ts
@@ -0,0 +1,72 @@
+import { useEffect } from "react";
+import { useQuery, useQueryClient } from "@tanstack/react-query";
+import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
+
+/**
+ * Hook that polls V1 sub-conversation start tasks and invalidates parent conversation cache when ready.
+ *
+ * This hook:
+ * - Polls the V1 start task API every 3 seconds until status is READY or ERROR
+ * - Automatically invalidates the parent conversation cache when the task becomes READY
+ * - Exposes task status and details for UI components to show loading states and errors
+ *
+ * Use case:
+ * - When creating a sub-conversation (e.g., plan mode), track the task and refresh parent conversation
+ * data once the sub-conversation is ready
+ *
+ * @param taskId - The task ID to poll (from createConversation response)
+ * @param parentConversationId - The parent conversation ID to invalidate when ready
+ */
+export const useSubConversationTaskPolling = (
+ taskId: string | null,
+ parentConversationId: string | null,
+) => {
+ const queryClient = useQueryClient();
+
+ // Poll the task if we have both taskId and parentConversationId
+ const taskQuery = useQuery({
+ queryKey: ["sub-conversation-task", taskId],
+ queryFn: async () => {
+ if (!taskId) return null;
+ return V1ConversationService.getStartTask(taskId);
+ },
+ enabled: !!taskId && !!parentConversationId,
+ refetchInterval: (query) => {
+ const task = query.state.data;
+ if (!task) return false;
+
+ // Stop polling if ready or error
+ if (task.status === "READY" || task.status === "ERROR") {
+ return false;
+ }
+
+ // Poll every 3 seconds while task is in progress
+ return 3000;
+ },
+ retry: false,
+ });
+
+ // Invalidate parent conversation cache when task is ready
+ useEffect(() => {
+ const task = taskQuery.data;
+ if (
+ task?.status === "READY" &&
+ task.app_conversation_id &&
+ parentConversationId
+ ) {
+ // Invalidate the parent conversation to refetch with updated sub_conversation_ids
+ queryClient.invalidateQueries({
+ queryKey: ["user", "conversation", parentConversationId],
+ });
+ }
+ }, [taskQuery.data, parentConversationId, queryClient]);
+
+ return {
+ task: taskQuery.data,
+ taskStatus: taskQuery.data?.status,
+ taskDetail: taskQuery.data?.detail,
+ taskError: taskQuery.error,
+ isLoadingTask: taskQuery.isLoading,
+ subConversationId: taskQuery.data?.app_conversation_id,
+ };
+};
diff --git a/frontend/src/hooks/query/use-sub-conversations.ts b/frontend/src/hooks/query/use-sub-conversations.ts
new file mode 100644
index 000000000000..53e6c84d4526
--- /dev/null
+++ b/frontend/src/hooks/query/use-sub-conversations.ts
@@ -0,0 +1,39 @@
+import { useQuery } from "@tanstack/react-query";
+import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
+import { V1AppConversation } from "#/api/conversation-service/v1-conversation-service.types";
+
+const FIVE_MINUTES = 1000 * 60 * 5;
+const FIFTEEN_MINUTES = 1000 * 60 * 15;
+
+/**
+ * React hook to fetch sub-conversations by their IDs
+ *
+ * @param subConversationIds Array of sub-conversation IDs to fetch
+ * @returns React Query result with sub-conversation data, loading, and error states
+ *
+ * @example
+ * ```tsx
+ * const { data: subConversations, isLoading, isError } = useSubConversations(
+ * conversation.sub_conversation_ids || []
+ * );
+ * ```
+ */
+export const useSubConversations = (
+ subConversationIds: string[] | null | undefined,
+) => {
+ const ids = subConversationIds || [];
+
+ return useQuery<(V1AppConversation | null)[]>({
+ queryKey: ["v1", "sub-conversations", ids],
+ queryFn: async () => {
+ if (ids.length === 0) {
+ return [];
+ }
+ return V1ConversationService.batchGetAppConversations(ids);
+ },
+ enabled: ids.length > 0,
+ staleTime: FIVE_MINUTES,
+ gcTime: FIFTEEN_MINUTES,
+ retry: false,
+ });
+};
diff --git a/frontend/src/hooks/use-handle-plan-click.ts b/frontend/src/hooks/use-handle-plan-click.ts
new file mode 100644
index 000000000000..9734bab8da86
--- /dev/null
+++ b/frontend/src/hooks/use-handle-plan-click.ts
@@ -0,0 +1,71 @@
+import { useCallback } from "react";
+import { useTranslation } from "react-i18next";
+import { I18nKey } from "#/i18n/declaration";
+import { useConversationStore } from "#/state/conversation-store";
+import { useActiveConversation } from "#/hooks/query/use-active-conversation";
+import { useCreateConversation } from "#/hooks/mutation/use-create-conversation";
+import { displaySuccessToast } from "#/utils/custom-toast-handlers";
+
+/**
+ * Custom hook that encapsulates the logic for handling plan creation.
+ * Returns a function that can be called to create a plan conversation and
+ * the pending state of the conversation creation.
+ *
+ * @returns An object containing handlePlanClick function and isCreatingConversation boolean
+ */
+export const useHandlePlanClick = () => {
+ const { t } = useTranslation();
+ const { setConversationMode, setSubConversationTaskId } =
+ useConversationStore();
+ const { data: conversation } = useActiveConversation();
+ const { mutate: createConversation, isPending: isCreatingConversation } =
+ useCreateConversation();
+
+ const handlePlanClick = useCallback(
+ (event?: React.MouseEvent | KeyboardEvent) => {
+ event?.preventDefault();
+ event?.stopPropagation();
+
+ // Set conversation mode to "plan" immediately
+ setConversationMode("plan");
+
+ // Check if sub_conversation_ids is not empty
+ if (
+ (conversation?.sub_conversation_ids &&
+ conversation.sub_conversation_ids.length > 0) ||
+ !conversation?.conversation_id
+ ) {
+ // Do nothing if both conditions are true
+ return;
+ }
+
+ // Create a new sub-conversation if we have a current conversation ID
+ createConversation(
+ {
+ parentConversationId: conversation.conversation_id,
+ agentType: "plan",
+ },
+ {
+ onSuccess: (data) => {
+ displaySuccessToast(
+ t(I18nKey.PLANNING_AGENTT$PLANNING_AGENT_INITIALIZED),
+ );
+ // Track the task ID to poll for sub-conversation creation
+ if (data.v1_task_id) {
+ setSubConversationTaskId(data.v1_task_id);
+ }
+ },
+ },
+ );
+ },
+ [
+ conversation,
+ createConversation,
+ setConversationMode,
+ setSubConversationTaskId,
+ t,
+ ],
+ );
+
+ return { handlePlanClick, isCreatingConversation };
+};
diff --git a/frontend/src/hooks/use-send-message.ts b/frontend/src/hooks/use-send-message.ts
index 1e1d627181d4..c6655b823076 100644
--- a/frontend/src/hooks/use-send-message.ts
+++ b/frontend/src/hooks/use-send-message.ts
@@ -41,13 +41,11 @@ export function useSendMessage() {
},
];
- // Add images if present
+ // Add images if present - using SDK's ImageContent format
if (args.image_urls && args.image_urls.length > 0) {
- args.image_urls.forEach((url) => {
- content.push({
- type: "image_url",
- image_url: { url },
- });
+ content.push({
+ type: "image",
+ image_urls: args.image_urls,
});
}
diff --git a/frontend/src/hooks/use-sync-posthog-consent.ts b/frontend/src/hooks/use-sync-posthog-consent.ts
new file mode 100644
index 000000000000..615aa9a1bf8f
--- /dev/null
+++ b/frontend/src/hooks/use-sync-posthog-consent.ts
@@ -0,0 +1,41 @@
+import React from "react";
+import { usePostHog } from "posthog-js/react";
+import { handleCaptureConsent } from "#/utils/handle-capture-consent";
+import { useSettings } from "./query/use-settings";
+
+/**
+ * Hook to sync PostHog opt-in/out state with backend setting on mount.
+ * This ensures that if the backend setting changes (e.g., via API or different client),
+ * the PostHog instance reflects the current user preference.
+ */
+export const useSyncPostHogConsent = () => {
+ const posthog = usePostHog();
+ const { data: settings } = useSettings();
+ const hasSyncedRef = React.useRef(false);
+
+ React.useEffect(() => {
+ // Only run once when both PostHog and settings are available
+ if (!posthog || settings === undefined || hasSyncedRef.current) {
+ return;
+ }
+
+ const backendConsent = settings.USER_CONSENTS_TO_ANALYTICS;
+
+ // Only sync if there's a backend preference set
+ if (backendConsent !== null) {
+ const posthogHasOptedIn = posthog.has_opted_in_capturing();
+ const posthogHasOptedOut = posthog.has_opted_out_capturing();
+
+ // Check if PostHog state is out of sync with backend
+ const needsSync =
+ (backendConsent === true && !posthogHasOptedIn) ||
+ (backendConsent === false && !posthogHasOptedOut);
+
+ if (needsSync) {
+ handleCaptureConsent(posthog, backendConsent);
+ }
+
+ hasSyncedRef.current = true;
+ }
+ }, [posthog, settings]);
+};
diff --git a/frontend/src/hooks/use-terminal.ts b/frontend/src/hooks/use-terminal.ts
index b5ffb6baf9f7..d01132a85fd4 100644
--- a/frontend/src/hooks/use-terminal.ts
+++ b/frontend/src/hooks/use-terminal.ts
@@ -44,7 +44,7 @@ export const useTerminal = () => {
new Terminal({
fontFamily: "Menlo, Monaco, 'Courier New', monospace",
fontSize: 14,
- scrollback: 1000,
+ scrollback: 10000,
scrollSensitivity: 1,
fastScrollModifier: "alt",
fastScrollSensitivity: 5,
@@ -62,6 +62,7 @@ export const useTerminal = () => {
terminal.current.open(ref.current);
// Hide cursor for read-only terminal using ANSI escape sequence
terminal.current.write("\x1b[?25l");
+ fitAddon.current?.fit();
}
}
};
diff --git a/frontend/src/hooks/use-tracking.ts b/frontend/src/hooks/use-tracking.ts
index 4b7959c1dd9c..0dfc0f0705b7 100644
--- a/frontend/src/hooks/use-tracking.ts
+++ b/frontend/src/hooks/use-tracking.ts
@@ -67,6 +67,38 @@ export const useTracking = () => {
});
};
+ const trackUserSignupCompleted = () => {
+ posthog.capture("user_signup_completed", {
+ signup_timestamp: new Date().toISOString(),
+ ...commonProperties,
+ });
+ };
+
+ const trackCreditsPurchased = ({
+ amountUsd,
+ stripeSessionId,
+ }: {
+ amountUsd: number;
+ stripeSessionId: string;
+ }) => {
+ posthog.capture("credits_purchased", {
+ amount_usd: amountUsd,
+ stripe_session_id: stripeSessionId,
+ ...commonProperties,
+ });
+ };
+
+ const trackCreditLimitReached = ({
+ conversationId,
+ }: {
+ conversationId: string;
+ }) => {
+ posthog.capture("credit_limit_reached", {
+ conversation_id: conversationId,
+ ...commonProperties,
+ });
+ };
+
return {
trackLoginButtonClick,
trackConversationCreated,
@@ -74,5 +106,8 @@ export const useTracking = () => {
trackPullButtonClick,
trackCreatePrButtonClick,
trackGitProviderConnected,
+ trackUserSignupCompleted,
+ trackCreditsPurchased,
+ trackCreditLimitReached,
};
};
diff --git a/frontend/src/i18n/declaration.ts b/frontend/src/i18n/declaration.ts
index f3fa1744e707..420709ef9b33 100644
--- a/frontend/src/i18n/declaration.ts
+++ b/frontend/src/i18n/declaration.ts
@@ -1,6 +1,11 @@
// this file generate by script, don't modify it manually!!!
export enum I18nKey {
MAINTENANCE$SCHEDULED_MESSAGE = "MAINTENANCE$SCHEDULED_MESSAGE",
+ AZURE_DEVOPS$CONNECT_ACCOUNT = "AZURE_DEVOPS$CONNECT_ACCOUNT",
+ GIT$AZURE_DEVOPS_TOKEN = "GIT$AZURE_DEVOPS_TOKEN",
+ GIT$AZURE_DEVOPS_HOST = "GIT$AZURE_DEVOPS_HOST",
+ GIT$AZURE_DEVOPS_HOST_PLACEHOLDER = "GIT$AZURE_DEVOPS_HOST_PLACEHOLDER",
+ GIT$AZURE_DEVOPS_TOKEN_HELP = "GIT$AZURE_DEVOPS_TOKEN_HELP",
MICROAGENT$NO_REPOSITORY_FOUND = "MICROAGENT$NO_REPOSITORY_FOUND",
MICROAGENT$ADD_TO_MICROAGENT = "MICROAGENT$ADD_TO_MICROAGENT",
MICROAGENT$WHAT_TO_ADD = "MICROAGENT$WHAT_TO_ADD",
@@ -117,6 +122,7 @@ export enum I18nKey {
SETTINGS$NAV_SECRETS = "SETTINGS$NAV_SECRETS",
SETTINGS$NAV_API_KEYS = "SETTINGS$NAV_API_KEYS",
SETTINGS$GITHUB = "SETTINGS$GITHUB",
+ SETTINGS$AZURE_DEVOPS = "SETTINGS$AZURE_DEVOPS",
SETTINGS$SLACK = "SETTINGS$SLACK",
SETTINGS$NAV_LLM = "SETTINGS$NAV_LLM",
GIT$MERGE_REQUEST = "GIT$MERGE_REQUEST",
@@ -440,6 +446,7 @@ export enum I18nKey {
STATUS$STARTING_RUNTIME = "STATUS$STARTING_RUNTIME",
STATUS$SETTING_UP_WORKSPACE = "STATUS$SETTING_UP_WORKSPACE",
STATUS$SETTING_UP_GIT_HOOKS = "STATUS$SETTING_UP_GIT_HOOKS",
+ STATUS$SETTING_UP_SKILLS = "STATUS$SETTING_UP_SKILLS",
ACCOUNT_SETTINGS_MODAL$DISCONNECT = "ACCOUNT_SETTINGS_MODAL$DISCONNECT",
ACCOUNT_SETTINGS_MODAL$SAVE = "ACCOUNT_SETTINGS_MODAL$SAVE",
ACCOUNT_SETTINGS_MODAL$CLOSE = "ACCOUNT_SETTINGS_MODAL$CLOSE",
@@ -937,10 +944,15 @@ export enum I18nKey {
AGENT_STATUS$WAITING_FOR_USER_CONFIRMATION = "AGENT_STATUS$WAITING_FOR_USER_CONFIRMATION",
COMMON$MORE_OPTIONS = "COMMON$MORE_OPTIONS",
COMMON$CREATE_A_PLAN = "COMMON$CREATE_A_PLAN",
+ COMMON$TASKS = "COMMON$TASKS",
COMMON$PLAN_MD = "COMMON$PLAN_MD",
COMMON$READ_MORE = "COMMON$READ_MORE",
COMMON$BUILD = "COMMON$BUILD",
COMMON$ASK = "COMMON$ASK",
COMMON$PLAN = "COMMON$PLAN",
COMMON$LET_S_WORK_ON_A_PLAN = "COMMON$LET_S_WORK_ON_A_PLAN",
+ COMMON$CODE_AGENT_DESCRIPTION = "COMMON$CODE_AGENT_DESCRIPTION",
+ COMMON$PLAN_AGENT_DESCRIPTION = "COMMON$PLAN_AGENT_DESCRIPTION",
+ PLANNING_AGENTT$PLANNING_AGENT_INITIALIZED = "PLANNING_AGENTT$PLANNING_AGENT_INITIALIZED",
+ OBSERVATION_MESSAGE$SKILL_READY = "OBSERVATION_MESSAGE$SKILL_READY",
}
diff --git a/frontend/src/i18n/translation.json b/frontend/src/i18n/translation.json
index 3765faee4f64..2278092e8ee3 100644
--- a/frontend/src/i18n/translation.json
+++ b/frontend/src/i18n/translation.json
@@ -15,6 +15,86 @@
"de": "Die geplante Wartung beginnt um {{time}}",
"uk": "Планове технічне обслуговування розпочнеться о {{time}}"
},
+ "AZURE_DEVOPS$CONNECT_ACCOUNT": {
+ "en": "Connect Azure DevOps Account",
+ "ja": "Azure DevOps アカウントを接続",
+ "zh-CN": "连接 Azure DevOps 账户",
+ "zh-TW": "連接 Azure DevOps 帳戶",
+ "ko-KR": "Azure DevOps 계정 연결",
+ "no": "Koble til Azure DevOps-konto",
+ "it": "Connetti account Azure DevOps",
+ "pt": "Conectar conta do Azure DevOps",
+ "es": "Conectar cuenta de Azure DevOps",
+ "ar": "ربط حساب Azure DevOps",
+ "fr": "Connecter le compte Azure DevOps",
+ "tr": "Azure DevOps hesabını bağla",
+ "de": "Azure DevOps-Konto verbinden",
+ "uk": "Підключити обліковий запис Azure DevOps"
+ },
+ "GIT$AZURE_DEVOPS_TOKEN": {
+ "en": "Azure DevOps Personal Access Token",
+ "ja": "Azure DevOps 個人用アクセス トークン",
+ "zh-CN": "Azure DevOps 个人访问令牌",
+ "zh-TW": "Azure DevOps 個人存取權杖",
+ "ko-KR": "Azure DevOps 개인 액세스 토큰",
+ "no": "Azure DevOps personlig tilgangstoken",
+ "it": "Token di accesso personale Azure DevOps",
+ "pt": "Token de acesso pessoal do Azure DevOps",
+ "es": "Token de acceso personal de Azure DevOps",
+ "ar": "رمز الوصول الشخصي لـ Azure DevOps",
+ "fr": "Jeton d'accès personnel Azure DevOps",
+ "tr": "Azure DevOps kişisel erişim belirteci",
+ "de": "Azure DevOps persönliches Zugriffstoken",
+ "uk": "Персональний токен доступу Azure DevOps"
+ },
+ "GIT$AZURE_DEVOPS_HOST": {
+ "en": "Azure DevOps Organization",
+ "ja": "Azure DevOps 組織",
+ "zh-CN": "Azure DevOps 组织",
+ "zh-TW": "Azure DevOps 組織",
+ "ko-KR": "Azure DevOps 조직",
+ "no": "Azure DevOps organisasjon",
+ "it": "Organizzazione Azure DevOps",
+ "pt": "Organização do Azure DevOps",
+ "es": "Organización de Azure DevOps",
+ "ar": "مؤسسة Azure DevOps",
+ "fr": "Organisation Azure DevOps",
+ "tr": "Azure DevOps kuruluş",
+ "de": "Azure DevOps Organisation",
+ "uk": "Організація Azure DevOps"
+ },
+ "GIT$AZURE_DEVOPS_HOST_PLACEHOLDER": {
+ "en": "organization",
+ "ja": "組織",
+ "zh-CN": "组织",
+ "zh-TW": "組織",
+ "ko-KR": "조직",
+ "no": "organisasjon",
+ "it": "organizzazione",
+ "pt": "organização",
+ "es": "organización",
+ "ar": "مؤسسة",
+ "fr": "organisation",
+ "tr": "kuruluş/proje",
+ "de": "organisation/projekt",
+ "uk": "організація/проект"
+ },
+ "GIT$AZURE_DEVOPS_TOKEN_HELP": {
+ "en": "How to create an Azure DevOps token",
+ "ja": "Azure DevOps トークンの作成方法",
+ "zh-CN": "如何创建 Azure DevOps 令牌",
+ "zh-TW": "如何創建 Azure DevOps 權杖",
+ "ko-KR": "Azure DevOps 토큰 생성 방법",
+ "no": "Hvordan lage et Azure DevOps-token",
+ "it": "Come creare un token Azure DevOps",
+ "pt": "Como criar um token do Azure DevOps",
+ "es": "Cómo crear un token de Azure DevOps",
+ "ar": "كيفية إنشاء رمز Azure DevOps",
+ "fr": "Comment créer un jeton Azure DevOps",
+ "tr": "Azure DevOps belirteci nasıl oluşturulur",
+ "de": "Wie man ein Azure DevOps-Token erstellt",
+ "uk": "Як створити токен Azure DevOps"
+ },
"MICROAGENT$NO_REPOSITORY_FOUND": {
"en": "No repository found to launch microagent",
"ja": "マイクロエージェントを起動するためのリポジトリが見つかりません",
@@ -1232,20 +1312,20 @@
"uk": "Невірний JSON"
},
"HOME$CONNECT_PROVIDER_MESSAGE": {
- "en": "To get started with suggested tasks, please connect your GitHub, GitLab, or Bitbucket account.",
- "ja": "提案されたタスクを始めるには、GitHub、GitLab、またはBitbucketアカウントを接続してください。",
- "zh-CN": "要开始使用建议的任务,请连接您的GitHub、GitLab或Bitbucket账户。",
- "zh-TW": "要開始使用建議的任務,請連接您的GitHub、GitLab或Bitbucket帳戶。",
- "ko-KR": "제안된 작업을 시작하려면 GitHub, GitLab 또는 Bitbucket 계정을 연결하세요.",
- "no": "For å komme i gang med foreslåtte oppgaver, vennligst koble til GitHub, GitLab eller Bitbucket-kontoen din.",
- "it": "Per iniziare con le attività suggerite, collega il tuo account GitHub, GitLab o Bitbucket.",
- "pt": "Para começar com tarefas sugeridas, conecte sua conta GitHub, GitLab ou Bitbucket.",
- "es": "Para comenzar con las tareas sugeridas, conecte su cuenta de GitHub, GitLab o Bitbucket.",
- "ar": "للبدء بالمهام المقترحة، يرجى ربط حساب GitHub أو GitLab أو Bitbucket الخاص بك.",
- "fr": "Pour commencer avec les tâches suggérées, veuillez connecter votre compte GitHub, GitLab ou Bitbucket.",
- "tr": "Önerilen görevlerle başlamak için lütfen GitHub, GitLab veya Bitbucket hesabınızı bağlayın.",
- "de": "Um mit vorgeschlagenen Aufgaben zu beginnen, verbinden Sie bitte Ihr GitHub-, GitLab- oder Bitbucket-Konto.",
- "uk": "Щоб розпочати роботу з запропонованими завданнями, підключіть свій обліковий запис GitHub, GitLab або Bitbucket."
+ "en": "To get started with suggested tasks, please connect your GitHub, GitLab, Bitbucket, or Azure DevOps account.",
+ "ja": "提案されたタスクを始めるには、GitHub、GitLab、Bitbucket、またはAzure DevOpsアカウントを接続してください。",
+ "zh-CN": "要开始使用建议的任务,请连接您的GitHub、GitLab、Bitbucket或Azure DevOps账户。",
+ "zh-TW": "要開始使用建議的任務,請連接您的GitHub、GitLab、Bitbucket或Azure DevOps帳戶。",
+ "ko-KR": "제안된 작업을 시작하려면 GitHub, GitLab, Bitbucket 또는 Azure DevOps 계정을 연결하세요.",
+ "no": "For å komme i gang med foreslåtte oppgaver, vennligst koble til GitHub, GitLab, Bitbucket eller Azure DevOps-kontoen din.",
+ "it": "Per iniziare con le attività suggerite, collega il tuo account GitHub, GitLab, Bitbucket o Azure DevOps.",
+ "pt": "Para começar com tarefas sugeridas, conecte sua conta GitHub, GitLab, Bitbucket ou Azure DevOps.",
+ "es": "Para comenzar con las tareas sugeridas, conecte su cuenta de GitHub, GitLab, Bitbucket o Azure DevOps.",
+ "ar": "للبدء بالمهام المقترحة، يرجى ربط حساب GitHub أو GitLab أو Bitbucket أو Azure DevOps الخاص بك.",
+ "fr": "Pour commencer avec les tâches suggérées, veuillez connecter votre compte GitHub, GitLab, Bitbucket ou Azure DevOps.",
+ "tr": "Önerilen görevlerle başlamak için lütfen GitHub, GitLab, Bitbucket veya Azure DevOps hesabınızı bağlayın.",
+ "de": "Um mit vorgeschlagenen Aufgaben zu beginnen, verbinden Sie bitte Ihr GitHub-, GitLab-, Bitbucket- oder Azure DevOps-Konto.",
+ "uk": "Щоб розпочати роботу з запропонованими завданнями, підключіть свій обліковий запис GitHub, GitLab, Bitbucket або Azure DevOps."
},
"HOME$LETS_START_BUILDING": {
"en": "Let's Start Building!",
@@ -1871,6 +1951,22 @@
"de": "GitHub",
"uk": "GitHub"
},
+ "SETTINGS$AZURE_DEVOPS": {
+ "en": "Azure DevOps",
+ "ja": "Azure DevOps",
+ "zh-CN": "Azure DevOps",
+ "zh-TW": "Azure DevOps",
+ "ko-KR": "Azure DevOps",
+ "no": "Azure DevOps",
+ "it": "Azure DevOps",
+ "pt": "Azure DevOps",
+ "es": "Azure DevOps",
+ "ar": "Azure DevOps",
+ "fr": "Azure DevOps",
+ "tr": "Azure DevOps",
+ "de": "Azure DevOps",
+ "uk": "Azure DevOps"
+ },
"SETTINGS$SLACK": {
"en": "Slack",
"ja": "Slack",
@@ -7039,6 +7135,22 @@
"ja": "git フックを設定中...",
"uk": "Налаштування git-хуків..."
},
+ "STATUS$SETTING_UP_SKILLS": {
+ "en": "Setting up skills...",
+ "zh-CN": "正在设置技能...",
+ "zh-TW": "正在設置技能...",
+ "de": "Fähigkeiten werden eingerichtet...",
+ "ko-KR": "기술을 설정하는 중...",
+ "no": "Setter opp ferdigheter...",
+ "it": "Configurazione delle competenze...",
+ "pt": "Configurando habilidades...",
+ "es": "Configurando habilidades...",
+ "ar": "جاري إعداد المهارات...",
+ "fr": "Configuration des compétences...",
+ "tr": "Yetenekler ayarlanıyor...",
+ "ja": "スキルを設定中...",
+ "uk": "Налаштування навичок..."
+ },
"ACCOUNT_SETTINGS_MODAL$DISCONNECT": {
"en": "Disconnect",
"es": "Desconectar",
@@ -14991,6 +15103,22 @@
"de": "Einen Plan erstellen",
"uk": "Створити план"
},
+ "COMMON$TASKS": {
+ "en": "Tasks",
+ "ja": "タスク",
+ "zh-CN": "任务",
+ "zh-TW": "任務",
+ "ko-KR": "작업",
+ "no": "Oppgaver",
+ "it": "Attività",
+ "pt": "Tarefas",
+ "es": "Tareas",
+ "ar": "مهام",
+ "fr": "Tâches",
+ "tr": "Görevler",
+ "de": "Aufgaben",
+ "uk": "Завдання"
+ },
"COMMON$PLAN_MD": {
"en": "Plan.md",
"ja": "Plan.md",
@@ -15086,5 +15214,69 @@
"tr": "Bir plan üzerinde çalışalım",
"de": "Lassen Sie uns an einem Plan arbeiten",
"uk": "Давайте розробимо план"
+ },
+ "COMMON$CODE_AGENT_DESCRIPTION": {
+ "en": "Write, edit, and debug with AI assistance in real time.",
+ "ja": "AIの支援をリアルタイムで受けながら、コードの作成、編集、デバッグを行いましょう。",
+ "zh-CN": "实时在 AI 协助下编写、编辑和调试。",
+ "zh-TW": "即時在 AI 協助下編寫、編輯和除錯。",
+ "ko-KR": "AI의 지원을 받아 실시간으로 작성, 편집 및 디버깅하세요.",
+ "no": "Skriv, rediger og feilsøk med AI-assistanse i sanntid.",
+ "it": "Scrivi, modifica e esegui il debug con assistenza AI in tempo reale.",
+ "pt": "Escreva, edite e depure com assistência de IA em tempo real.",
+ "es": "Escribe, edita y depura con ayuda de IA en tiempo real.",
+ "ar": "اكتب وعدّل وصحّح الأخطاء بمساعدة الذكاء الاصطناعي في الوقت الفعلي.",
+ "fr": "Rédigez, modifiez et déboguez avec l’aide de l’IA en temps réel.",
+ "tr": "AI desteğiyle gerçek zamanlı olarak yazın, düzenleyin ve hata ayıklayın.",
+ "de": "Schreiben, bearbeiten und debuggen Sie mit KI-Unterstützung in Echtzeit.",
+ "uk": "Пишіть, редагуйте та налагоджуйте з підтримкою ШІ у реальному часі."
+ },
+ "COMMON$PLAN_AGENT_DESCRIPTION": {
+ "en": "Outline goals, structure tasks, and map your next steps.",
+ "ja": "目標を明確にし、タスクを構造化し、次のステップを計画しましょう。",
+ "zh-CN": "概述目标、结构化任务,并规划下一步。",
+ "zh-TW": "概述目標、結構化任務,並規劃下一步。",
+ "ko-KR": "목표를 개요하고, 작업을 구조화하며, 다음 단계를 구상하세요.",
+ "no": "Skisser mål, strukturer oppgaver og planlegg dine neste steg.",
+ "it": "Definisci gli obiettivi, struttura le attività e pianifica i prossimi passi.",
+ "pt": "Esboce objetivos, estruture tarefas e trace seus próximos passos.",
+ "es": "Define objetivos, estructura tareas y planifica tus próximos pasos.",
+ "ar": "حدد الأهداف، نظم المهام، وارسم خطواتك التالية.",
+ "fr": "Dressez des objectifs, structurez vos tâches et planifiez vos prochaines étapes.",
+ "tr": "Hedefleri belirtin, görevleri yapılandırın ve sonraki adımlarınızı belirleyin.",
+ "de": "Umreißen Sie Ziele, strukturieren Sie Aufgaben und planen Sie Ihre nächsten Schritte.",
+ "uk": "Окресліть цілі, структуруйте завдання та сплануйте наступні кроки."
+ },
+ "PLANNING_AGENTT$PLANNING_AGENT_INITIALIZED": {
+ "en": "Planning agent initialized",
+ "ja": "プランニングエージェントが初期化されました",
+ "zh-CN": "规划代理已初始化",
+ "zh-TW": "規劃代理已初始化",
+ "ko-KR": "계획 에이전트가 초기화되었습니다",
+ "no": "Planleggingsagent er initialisert",
+ "it": "Agente di pianificazione inizializzato",
+ "pt": "Agente de planejamento inicializado",
+ "es": "Agente de planificación inicializado",
+ "ar": "تم تهيئة وكيل التخطيط",
+ "fr": "Agent de planification initialisé",
+ "tr": "Planlama ajanı başlatıldı",
+ "de": "Planungsagent wurde initialisiert",
+ "uk": "Агент планування ініціалізовано"
+ },
+ "OBSERVATION_MESSAGE$SKILL_READY": {
+ "en": "Skill Ready",
+ "ja": "スキル準備完了",
+ "zh-CN": "技能已就绪",
+ "zh-TW": "技能已就緒",
+ "ko-KR": "스킬 준비 완료",
+ "no": "Ferdighet klar",
+ "it": "Abilità pronta",
+ "pt": "Habilidade pronta",
+ "es": "Habilidad lista",
+ "ar": "المهارة جاهزة",
+ "fr": "Compétence prête",
+ "tr": "Yetenek hazır",
+ "de": "Fähigkeit bereit",
+ "uk": "Навичка готова"
}
}
diff --git a/frontend/src/icons/loading.svg b/frontend/src/icons/loading.svg
new file mode 100644
index 000000000000..2da678957f05
--- /dev/null
+++ b/frontend/src/icons/loading.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/src/icons/u-check-circle.svg b/frontend/src/icons/u-check-circle.svg
new file mode 100644
index 000000000000..e98e0c8f3714
--- /dev/null
+++ b/frontend/src/icons/u-check-circle.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/src/icons/u-circle.svg b/frontend/src/icons/u-circle.svg
new file mode 100644
index 000000000000..c562817d9b3e
--- /dev/null
+++ b/frontend/src/icons/u-circle.svg
@@ -0,0 +1,3 @@
+
+
+
diff --git a/frontend/src/mocks/mock-ws-helpers.ts b/frontend/src/mocks/mock-ws-helpers.ts
index c3205b77311b..ae4e214943fb 100644
--- a/frontend/src/mocks/mock-ws-helpers.ts
+++ b/frontend/src/mocks/mock-ws-helpers.ts
@@ -165,7 +165,7 @@ export const createMockExecuteBashActionEvent = (
* Creates a mock ExecuteBashObservation event for testing terminal output handling
*/
export const createMockExecuteBashObservationEvent = (
- output: string = "total 24\ndrwxr-xr-x 5 user staff 160 Jan 10 12:00 .",
+ content: string = "total 24\ndrwxr-xr-x 5 user staff 160 Jan 10 12:00 .",
command: string = "ls -la",
) => ({
id: "bash-obs-123",
@@ -175,7 +175,7 @@ export const createMockExecuteBashObservationEvent = (
tool_call_id: "bash-call-456",
observation: {
kind: "ExecuteBashObservation",
- output,
+ content: [{ type: "text", text: content }],
command,
exit_code: 0,
error: false,
@@ -184,3 +184,55 @@ export const createMockExecuteBashObservationEvent = (
},
action_id: "bash-action-123",
});
+
+/**
+ * Creates a mock BrowserObservation event for testing browser state handling
+ */
+export const createMockBrowserObservationEvent = (
+ screenshotData: string | null = "base64-screenshot-data",
+ output: string = "Browser action completed",
+ error: string | null = null,
+) => ({
+ id: "browser-obs-123",
+ timestamp: new Date().toISOString(),
+ source: "environment",
+ tool_name: "browser_navigate",
+ tool_call_id: "browser-call-456",
+ observation: {
+ kind: "BrowserObservation",
+ output,
+ error,
+ screenshot_data: screenshotData,
+ },
+ action_id: "browser-action-123",
+});
+
+/**
+ * Creates a mock BrowserNavigateAction event for testing browser URL extraction
+ */
+export const createMockBrowserNavigateActionEvent = (
+ url: string = "https://example.com",
+) => ({
+ id: "browser-action-123",
+ timestamp: new Date().toISOString(),
+ source: "agent",
+ thought: [{ type: "text", text: "Navigating to URL" }],
+ thinking_blocks: [],
+ action: {
+ kind: "BrowserNavigateAction",
+ url,
+ new_tab: false,
+ },
+ tool_name: "browser_navigate",
+ tool_call_id: "browser-call-456",
+ tool_call: {
+ id: "browser-call-456",
+ type: "function",
+ function: {
+ name: "browser_navigate",
+ arguments: JSON.stringify({ url, new_tab: false }),
+ },
+ },
+ llm_response_id: "llm-response-789",
+ security_risk: { level: "low" },
+});
diff --git a/frontend/src/routes/accept-tos.tsx b/frontend/src/routes/accept-tos.tsx
index 773f7ba2eeb6..f723f2a5f659 100644
--- a/frontend/src/routes/accept-tos.tsx
+++ b/frontend/src/routes/accept-tos.tsx
@@ -10,6 +10,7 @@ import { BrandButton } from "#/components/features/settings/brand-button";
import { handleCaptureConsent } from "#/utils/handle-capture-consent";
import { openHands } from "#/api/open-hands-axios";
import { ModalBackdrop } from "#/components/shared/modals/modal-backdrop";
+import { useTracking } from "#/hooks/use-tracking";
export default function AcceptTOS() {
const posthog = usePostHog();
@@ -17,6 +18,7 @@ export default function AcceptTOS() {
const navigate = useNavigate();
const [searchParams] = useSearchParams();
const [isTosAccepted, setIsTosAccepted] = React.useState(false);
+ const { trackUserSignupCompleted } = useTracking();
// Get the redirect URL from the query parameters
const redirectUrl = searchParams.get("redirect_url") || "/";
@@ -33,6 +35,9 @@ export default function AcceptTOS() {
});
},
onSuccess: (response) => {
+ // Track user signup completion
+ trackUserSignupCompleted();
+
// Get the redirect URL from the response
const finalRedirectUrl = response.data.redirect_url || redirectUrl;
diff --git a/frontend/src/routes/app-settings.tsx b/frontend/src/routes/app-settings.tsx
index 4206141cbe51..e825bb3e0fe4 100644
--- a/frontend/src/routes/app-settings.tsx
+++ b/frontend/src/routes/app-settings.tsx
@@ -239,18 +239,20 @@ function AppSettingsScreen() {
)}
-
+ {!settings?.V1_ENABLED && (
+
+ )}
diff --git a/frontend/src/routes/billing.tsx b/frontend/src/routes/billing.tsx
index fdd410f6c4e3..05d23fe276d6 100644
--- a/frontend/src/routes/billing.tsx
+++ b/frontend/src/routes/billing.tsx
@@ -7,21 +7,36 @@ import {
displaySuccessToast,
} from "#/utils/custom-toast-handlers";
import { I18nKey } from "#/i18n/declaration";
+import { useTracking } from "#/hooks/use-tracking";
function BillingSettingsScreen() {
const { t } = useTranslation();
const [searchParams, setSearchParams] = useSearchParams();
+ const { trackCreditsPurchased } = useTracking();
const checkoutStatus = searchParams.get("checkout");
React.useEffect(() => {
if (checkoutStatus === "success") {
+ // Get purchase details from URL params
+ const amount = searchParams.get("amount");
+ const sessionId = searchParams.get("session_id");
+
+ // Track credits purchased if we have the necessary data
+ if (amount && sessionId) {
+ trackCreditsPurchased({
+ amountUsd: parseFloat(amount),
+ stripeSessionId: sessionId,
+ });
+ }
+
displaySuccessToast(t(I18nKey.PAYMENT$SUCCESS));
+
+ setSearchParams({});
} else if (checkoutStatus === "cancel") {
displayErrorToast(t(I18nKey.PAYMENT$CANCELLED));
+ setSearchParams({});
}
-
- setSearchParams({});
- }, [checkoutStatus]);
+ }, [checkoutStatus, searchParams, setSearchParams, t, trackCreditsPurchased]);
return ;
}
diff --git a/frontend/src/routes/git-settings.tsx b/frontend/src/routes/git-settings.tsx
index 5f898c36a2d7..aff64afb55e6 100644
--- a/frontend/src/routes/git-settings.tsx
+++ b/frontend/src/routes/git-settings.tsx
@@ -7,6 +7,7 @@ import { useLogout } from "#/hooks/mutation/use-logout";
import { GitHubTokenInput } from "#/components/features/settings/git-settings/github-token-input";
import { GitLabTokenInput } from "#/components/features/settings/git-settings/gitlab-token-input";
import { BitbucketTokenInput } from "#/components/features/settings/git-settings/bitbucket-token-input";
+import { AzureDevOpsTokenInput } from "#/components/features/settings/git-settings/azure-devops-token-input";
import { ConfigureGitHubRepositoriesAnchor } from "#/components/features/settings/git-settings/configure-github-repositories-anchor";
import { InstallSlackAppAnchor } from "#/components/features/settings/git-settings/install-slack-app-anchor";
import { I18nKey } from "#/i18n/declaration";
@@ -37,6 +38,8 @@ function GitSettingsScreen() {
React.useState(false);
const [bitbucketTokenInputHasValue, setBitbucketTokenInputHasValue] =
React.useState(false);
+ const [azureDevOpsTokenInputHasValue, setAzureDevOpsTokenInputHasValue] =
+ React.useState(false);
const [githubHostInputHasValue, setGithubHostInputHasValue] =
React.useState(false);
@@ -44,15 +47,19 @@ function GitSettingsScreen() {
React.useState(false);
const [bitbucketHostInputHasValue, setBitbucketHostInputHasValue] =
React.useState(false);
+ const [azureDevOpsHostInputHasValue, setAzureDevOpsHostInputHasValue] =
+ React.useState(false);
const existingGithubHost = settings?.PROVIDER_TOKENS_SET.github;
const existingGitlabHost = settings?.PROVIDER_TOKENS_SET.gitlab;
const existingBitbucketHost = settings?.PROVIDER_TOKENS_SET.bitbucket;
+ const existingAzureDevOpsHost = settings?.PROVIDER_TOKENS_SET.azure_devops;
const isSaas = config?.APP_MODE === "saas";
const isGitHubTokenSet = providers.includes("github");
const isGitLabTokenSet = providers.includes("gitlab");
const isBitbucketTokenSet = providers.includes("bitbucket");
+ const isAzureDevOpsTokenSet = providers.includes("azure_devops");
const formAction = async (formData: FormData) => {
const disconnectButtonClicked =
@@ -67,16 +74,21 @@ function GitSettingsScreen() {
const gitlabToken = formData.get("gitlab-token-input")?.toString() || "";
const bitbucketToken =
formData.get("bitbucket-token-input")?.toString() || "";
+ const azureDevOpsToken =
+ formData.get("azure-devops-token-input")?.toString() || "";
const githubHost = formData.get("github-host-input")?.toString() || "";
const gitlabHost = formData.get("gitlab-host-input")?.toString() || "";
const bitbucketHost =
formData.get("bitbucket-host-input")?.toString() || "";
+ const azureDevOpsHost =
+ formData.get("azure-devops-host-input")?.toString() || "";
// Create providers object with all tokens
const providerTokens: Record = {
github: { token: githubToken, host: githubHost },
gitlab: { token: gitlabToken, host: gitlabHost },
bitbucket: { token: bitbucketToken, host: bitbucketHost },
+ azure_devops: { token: azureDevOpsToken, host: azureDevOpsHost },
};
saveGitProviders(
@@ -95,9 +107,11 @@ function GitSettingsScreen() {
setGithubTokenInputHasValue(false);
setGitlabTokenInputHasValue(false);
setBitbucketTokenInputHasValue(false);
+ setAzureDevOpsTokenInputHasValue(false);
setGithubHostInputHasValue(false);
setGitlabHostInputHasValue(false);
setBitbucketHostInputHasValue(false);
+ setAzureDevOpsHostInputHasValue(false);
},
},
);
@@ -107,9 +121,11 @@ function GitSettingsScreen() {
!githubTokenInputHasValue &&
!gitlabTokenInputHasValue &&
!bitbucketTokenInputHasValue &&
+ !azureDevOpsTokenInputHasValue &&
!githubHostInputHasValue &&
!gitlabHostInputHasValue &&
- !bitbucketHostInputHasValue;
+ !bitbucketHostInputHasValue &&
+ !azureDevOpsHostInputHasValue;
const shouldRenderExternalConfigureButtons = isSaas && config.APP_SLUG;
const shouldRenderProjectManagementIntegrations =
config?.FEATURE_FLAGS?.ENABLE_JIRA ||
@@ -196,6 +212,20 @@ function GitSettingsScreen() {
bitbucketHostSet={existingBitbucketHost}
/>
)}
+
+ {!isSaas && (
+ {
+ setAzureDevOpsTokenInputHasValue(!!value);
+ }}
+ onAzureDevOpsHostChange={(value) => {
+ setAzureDevOpsHostInputHasValue(!!value);
+ }}
+ azureDevOpsHostSet={existingAzureDevOpsHost}
+ />
+ )}
)}
@@ -211,7 +241,10 @@ function GitSettingsScreen() {
type="submit"
variant="secondary"
isDisabled={
- !isGitHubTokenSet && !isGitLabTokenSet && !isBitbucketTokenSet
+ !isGitHubTokenSet &&
+ !isGitLabTokenSet &&
+ !isBitbucketTokenSet &&
+ !isAzureDevOpsTokenSet
}
>
{t(I18nKey.GIT$DISCONNECT_TOKENS)}
diff --git a/frontend/src/routes/llm-settings.tsx b/frontend/src/routes/llm-settings.tsx
index ed89d03882bc..81c864fc2540 100644
--- a/frontend/src/routes/llm-settings.tsx
+++ b/frontend/src/routes/llm-settings.tsx
@@ -102,10 +102,25 @@ function LlmSettingsScreen() {
: (settings?.SECURITY_ANALYZER ?? DEFAULT_SETTINGS.SECURITY_ANALYZER),
);
+ const [selectedProvider, setSelectedProvider] = React.useState
(
+ null,
+ );
+
const modelsAndProviders = organizeModelsAndProviders(
resources?.models || [],
);
+ // Determine if we should hide the API key input and use OpenHands-managed key (when using OpenHands provider in SaaS mode)
+ const currentModel = currentSelectedModel || settings?.LLM_MODEL;
+ const isOpenHandsProvider =
+ (view === "basic" && selectedProvider === "openhands") ||
+ (view === "advanced" && currentModel?.startsWith("openhands/"));
+ const isSaasMode = config?.APP_MODE === "saas";
+ const shouldUseOpenHandsKey = isOpenHandsProvider && isSaasMode;
+
+ // Determine if we should hide the agent dropdown when V1 conversation API is enabled
+ const isV1Enabled = settings?.V1_ENABLED;
+
React.useEffect(() => {
const determineWhetherToToggleAdvancedSettings = () => {
if (resources && settings) {
@@ -196,10 +211,13 @@ function LlmSettingsScreen() {
const fullLlmModel = provider && model && `${provider}/${model}`;
+ // Use OpenHands-managed key for OpenHands provider in SaaS mode
+ const finalApiKey = shouldUseOpenHandsKey ? null : apiKey;
+
saveSettings(
{
LLM_MODEL: fullLlmModel,
- llm_api_key: apiKey || null,
+ llm_api_key: finalApiKey || null,
SEARCH_API_KEY: searchApiKey || "",
CONFIRMATION_MODE: confirmationMode,
SECURITY_ANALYZER:
@@ -244,11 +262,14 @@ function LlmSettingsScreen() {
.get("security-analyzer-input")
?.toString();
+ // Use OpenHands-managed key for OpenHands provider in SaaS mode
+ const finalApiKey = shouldUseOpenHandsKey ? null : apiKey;
+
saveSettings(
{
LLM_MODEL: model,
LLM_BASE_URL: baseUrl,
- llm_api_key: apiKey || null,
+ llm_api_key: finalApiKey || null,
SEARCH_API_KEY: searchApiKey || "",
AGENT: agent,
CONFIRMATION_MODE: confirmationMode,
@@ -282,7 +303,10 @@ function LlmSettingsScreen() {
});
};
- const handleModelIsDirty = (model: string | null) => {
+ const handleModelIsDirty = (
+ provider: string | null,
+ model: string | null,
+ ) => {
// openai providers are special case; see ModelSelector
// component for details
const modelIsDirty = model !== settings?.LLM_MODEL.replace("openai/", "");
@@ -293,6 +317,15 @@ function LlmSettingsScreen() {
// Track the currently selected model for help text display
setCurrentSelectedModel(model);
+ setSelectedProvider(provider);
+ };
+
+ const onDefaultValuesChanged = (
+ provider: string | null,
+ model: string | null,
+ ) => {
+ setSelectedProvider(provider);
+ setCurrentSelectedModel(model);
};
const handleApiKeyIsDirty = (apiKey: string) => {
@@ -463,6 +496,7 @@ function LlmSettingsScreen() {
models={modelsAndProviders}
currentModel={settings.LLM_MODEL || DEFAULT_OPENHANDS_MODEL}
onChange={handleModelIsDirty}
+ onDefaultValuesChanged={onDefaultValuesChanged}
wrapperClassName="!flex-col !gap-6"
/>
{(settings.LLM_MODEL?.startsWith("openhands/") ||
@@ -472,27 +506,31 @@ function LlmSettingsScreen() {
>
)}
- " : ""}
- onChange={handleApiKeyIsDirty}
- startContent={
- settings.LLM_API_KEY_SET && (
-
- )
- }
- />
+ {!shouldUseOpenHandsKey && (
+ <>
+ " : ""}
+ onChange={handleApiKeyIsDirty}
+ startContent={
+ settings.LLM_API_KEY_SET && (
+
+ )
+ }
+ />
-
+
+ >
+ )}
)}
@@ -527,26 +565,30 @@ function LlmSettingsScreen() {
onChange={handleBaseUrlIsDirty}
/>
-
" : ""}
- onChange={handleApiKeyIsDirty}
- startContent={
- settings.LLM_API_KEY_SET && (
-
- )
- }
- />
-
+ {!shouldUseOpenHandsKey && (
+ <>
+ " : ""}
+ onChange={handleApiKeyIsDirty}
+ startContent={
+ settings.LLM_API_KEY_SET && (
+
+ )
+ }
+ />
+
+ >
+ )}
{config?.APP_MODE !== "saas" && (
<>
@@ -573,21 +615,23 @@ function LlmSettingsScreen() {
href="https://tavily.com/"
/>
- ({
- key: agent,
- label: agent, // TODO: Add i18n support for agent names
- })) || []
- }
- defaultSelectedKey={settings.AGENT}
- isClearable={false}
- onInputChange={handleAgentIsDirty}
- wrapperClassName="w-full max-w-[680px]"
- />
+ {!isV1Enabled && (
+ ({
+ key: agent,
+ label: agent, // TODO: Add i18n support for agent names
+ })) || []
+ }
+ defaultSelectedKey={settings.AGENT}
+ isClearable={false}
+ onInputChange={handleAgentIsDirty}
+ wrapperClassName="w-full max-w-[680px]"
+ />
+ )}
>
)}
diff --git a/frontend/src/routes/planner-tab.tsx b/frontend/src/routes/planner-tab.tsx
index a3002c665119..2e5af229efbe 100644
--- a/frontend/src/routes/planner-tab.tsx
+++ b/frontend/src/routes/planner-tab.tsx
@@ -1,49 +1,31 @@
+import React from "react";
import { useTranslation } from "react-i18next";
-import Markdown from "react-markdown";
-import remarkGfm from "remark-gfm";
-import remarkBreaks from "remark-breaks";
import { I18nKey } from "#/i18n/declaration";
import LessonPlanIcon from "#/icons/lesson-plan.svg?react";
import { useConversationStore } from "#/state/conversation-store";
-import { code } from "#/components/features/markdown/code";
-import { ul, ol } from "#/components/features/markdown/list";
-import { paragraph } from "#/components/features/markdown/paragraph";
-import { anchor } from "#/components/features/markdown/anchor";
-import {
- h1,
- h2,
- h3,
- h4,
- h5,
- h6,
-} from "#/components/features/markdown/headings";
+import { useScrollToBottom } from "#/hooks/use-scroll-to-bottom";
+import { MarkdownRenderer } from "#/components/features/markdown/markdown-renderer";
+import { useHandlePlanClick } from "#/hooks/use-handle-plan-click";
function PlannerTab() {
const { t } = useTranslation();
+ const { scrollRef: scrollContainerRef, onChatBodyScroll } = useScrollToBottom(
+ React.useRef(null),
+ );
- const { planContent, setConversationMode } = useConversationStore();
+ const { planContent } = useConversationStore();
+ const { handlePlanClick } = useHandlePlanClick();
- if (planContent) {
+ if (planContent !== null && planContent !== undefined) {
return (
-
-
+ onChatBodyScroll(e.currentTarget)}
+ className="flex flex-col w-full h-full p-4 overflow-auto"
+ >
+
{planContent}
-
+
);
}
@@ -56,7 +38,7 @@ function PlannerTab() {
setConversationMode("plan")}
+ onClick={handlePlanClick}
className="flex w-[164px] h-[40px] p-2 justify-center items-center shrink-0 rounded-lg bg-white overflow-hidden text-black text-ellipsis font-sans text-[16px] not-italic font-normal leading-[20px] hover:cursor-pointer hover:opacity-80"
>
{t(I18nKey.COMMON$CREATE_A_PLAN)}
diff --git a/frontend/src/routes/root-layout.tsx b/frontend/src/routes/root-layout.tsx
index 930451dae9ce..264ae541c88d 100644
--- a/frontend/src/routes/root-layout.tsx
+++ b/frontend/src/routes/root-layout.tsx
@@ -25,6 +25,7 @@ import { useIsOnTosPage } from "#/hooks/use-is-on-tos-page";
import { useAutoLogin } from "#/hooks/use-auto-login";
import { useAuthCallback } from "#/hooks/use-auth-callback";
import { useReoTracking } from "#/hooks/use-reo-tracking";
+import { useSyncPostHogConsent } from "#/hooks/use-sync-posthog-consent";
import { LOCAL_STORAGE_KEYS } from "#/utils/local-storage";
import { EmailVerificationGuard } from "#/components/features/guards/email-verification-guard";
import { MaintenanceBanner } from "#/components/features/maintenance/maintenance-banner";
@@ -100,6 +101,9 @@ export default function MainApp() {
// Initialize Reo.dev tracking in SaaS mode
useReoTracking();
+ // Sync PostHog opt-in/out state with backend setting on mount
+ useSyncPostHogConsent();
+
React.useEffect(() => {
// Don't change language when on TOS page
if (!isOnTosPage && settings?.LANGUAGE) {
diff --git a/frontend/src/routes/settings.tsx b/frontend/src/routes/settings.tsx
index 19370245b330..2d7f7cb6b6a1 100644
--- a/frontend/src/routes/settings.tsx
+++ b/frontend/src/routes/settings.tsx
@@ -34,6 +34,15 @@ export const clientLoader = async ({ request }: Route.ClientLoaderArgs) => {
return redirect("/settings");
}
+ // If LLM settings are hidden and user tries to access the LLM settings page
+ if (config?.FEATURE_FLAGS?.HIDE_LLM_SETTINGS && pathname === "/settings") {
+ // Redirect to the first available settings page
+ if (isSaas) {
+ return redirect("/settings/user");
+ }
+ return redirect("/settings/mcp");
+ }
+
return null;
};
@@ -52,13 +61,24 @@ function SettingsScreen() {
} else {
items.push(...OSS_NAV_ITEMS);
}
+
+ // Filter out LLM settings if the feature flag is enabled
+ if (config?.FEATURE_FLAGS?.HIDE_LLM_SETTINGS) {
+ return items.filter((item) => item.to !== "/settings");
+ }
+
return items;
- }, [isSaas]);
+ }, [isSaas, config?.FEATURE_FLAGS?.HIDE_LLM_SETTINGS]);
// Current section title for the main content area
const currentSectionTitle = useMemo(() => {
const currentItem = navItems.find((item) => item.to === location.pathname);
- return currentItem ? currentItem.text : "SETTINGS$NAV_LLM";
+ if (currentItem) {
+ return currentItem.text;
+ }
+
+ // Default to the first available navigation item if current page is not found
+ return navItems.length > 0 ? navItems[0].text : "SETTINGS$TITLE";
}, [navItems, location.pathname]);
return (
diff --git a/frontend/src/services/settings.ts b/frontend/src/services/settings.ts
index f7cad15b43a9..7c648247d69e 100644
--- a/frontend/src/services/settings.ts
+++ b/frontend/src/services/settings.ts
@@ -31,6 +31,7 @@ export const DEFAULT_SETTINGS: Settings = {
},
GIT_USER_NAME: "openhands",
GIT_USER_EMAIL: "openhands@all-hands.dev",
+ V1_ENABLED: false,
};
/**
diff --git a/frontend/src/settings-service/settings.types.ts b/frontend/src/settings-service/settings.types.ts
index bdd1610f4923..c6d33a7ee5de 100644
--- a/frontend/src/settings-service/settings.types.ts
+++ b/frontend/src/settings-service/settings.types.ts
@@ -35,6 +35,7 @@ export type ApiSettings = {
email_verified?: boolean;
git_user_name?: string;
git_user_email?: string;
+ v1_enabled?: boolean;
};
export type PostApiSettings = ApiSettings & {
diff --git a/frontend/src/state/conversation-store.ts b/frontend/src/state/conversation-store.ts
index fc6868cc1acd..a8edd16f6a44 100644
--- a/frontend/src/state/conversation-store.ts
+++ b/frontend/src/state/conversation-store.ts
@@ -30,6 +30,7 @@ interface ConversationState {
hasRightPanelToggled: boolean;
planContent: string | null;
conversationMode: ConversationMode;
+ subConversationTaskId: string | null; // Task ID for sub-conversation creation
}
interface ConversationActions {
@@ -54,14 +55,54 @@ interface ConversationActions {
resetConversationState: () => void;
setHasRightPanelToggled: (hasRightPanelToggled: boolean) => void;
setConversationMode: (conversationMode: ConversationMode) => void;
+ setSubConversationTaskId: (taskId: string | null) => void;
+ setPlanContent: (planContent: string | null) => void;
}
type ConversationStore = ConversationState & ConversationActions;
-// Helper function to get initial right panel state from localStorage
+const getConversationIdFromLocation = (): string | null => {
+ if (typeof window === "undefined") {
+ return null;
+ }
+
+ const match = window.location.pathname.match(/\/conversations\/([^/]+)/);
+ return match ? match[1] : null;
+};
+
+const parseStoredBoolean = (value: string | null): boolean | null => {
+ if (value === null) {
+ return null;
+ }
+
+ try {
+ return JSON.parse(value);
+ } catch {
+ return null;
+ }
+};
+
const getInitialRightPanelState = (): boolean => {
- const stored = localStorage.getItem("conversation-right-panel-shown");
- return stored !== null ? JSON.parse(stored) : true;
+ if (typeof window === "undefined") {
+ return true;
+ }
+
+ const conversationId = getConversationIdFromLocation();
+ const keysToCheck = conversationId
+ ? [`conversation-right-panel-shown-${conversationId}`]
+ : [];
+
+ // Fallback to legacy global key for users who haven't switched tabs yet
+ keysToCheck.push("conversation-right-panel-shown");
+
+ for (const key of keysToCheck) {
+ const parsed = parseStoredBoolean(localStorage.getItem(key));
+ if (parsed !== null) {
+ return parsed;
+ }
+ }
+
+ return true;
};
export const useConversationStore = create()(
@@ -79,92 +120,9 @@ export const useConversationStore = create()(
submittedMessage: null,
shouldHideSuggestions: false,
hasRightPanelToggled: true,
- planContent: `
-# Improve Developer Onboarding and Examples
-
-## Overview
-
-Based on the analysis of Browser-Use's current documentation and examples, this plan addresses gaps in developer onboarding by creating a progressive learning path, troubleshooting resources, and practical examples that address real-world scenarios (like the LM Studio/local LLM integration issues encountered).
-
-## Current State Analysis
-
-**Strengths:**
-
-- Good quickstart documentation in \`docs/quickstart.mdx\`
-- Extensive examples across multiple categories (60+ example files)
-- Well-structured docs with multiple LLM provider examples
-- Active community support via Discord
-
-**Gaps Identified:**
-
-- No progressive tutorial series that builds complexity gradually
-- Limited troubleshooting documentation for common issues
-- Sparse comments in example files explaining what's happening
-- Local LLM setup (Ollama/LM Studio) not prominently featured
-- No "first 10 minutes" success path
-- Missing visual/conceptual architecture guides for beginners
-- Error messages don't always point to solutions
-
-## Proposed Improvements
-
-### 1. Create Interactive Tutorial Series (\`examples/tutorials/\`)
-
-**New folder structure:**
-
-\`\`\`
-examples/tutorials/
-├── README.md # Tutorial overview and prerequisites
-├── 00_hello_world.py # Absolute minimal example
-├── 01_your_first_search.py # Basic search with detailed comments
-├── 02_understanding_actions.py # How actions work
-├── 03_data_extraction_basics.py # Extract data step-by-step
-├── 04_error_handling.py # Common errors and solutions
-├── 05_custom_tools_intro.py # First custom tool
-├── 06_local_llm_setup.py # Ollama/LM Studio complete guide
-└── 07_debugging_tips.py # Debugging strategies
-\`\`\`
-
-**Key Features:**
-
-- Each file 50–80 lines max
-- Extensive inline comments explaining every concept
-- Clear learning objectives at the top of each file
-- "What you'll learn" and "Prerequisites" sections
-- Common pitfalls highlighted
-- Expected output shown in comments
-
-### 2. Troubleshooting Guide (\`docs/troubleshooting.mdx\`)
-
-**Sections:**
-
-- Installation issues (Chromium, dependencies, virtual environments)
-- LLM provider connection errors (API keys, timeouts, rate limits)
-- Local LLM setup (Ollama vs LM Studio, model compatibility)
-- Browser automation issues (element not found, timeout errors)
-- Common error messages with solutions
-- Performance optimization tips
-- When to ask for help (Discord/GitHub)
-
-**Format:**
-
-**Error: "LLM call timed out after 60 seconds"**
-
-**What it means:**
-The model took too long to respond
-
-**Common causes:**
-
-1. Model is too slow for the task
-2. LM Studio/Ollama not responding properly
-3. Complex page overwhelming the model
-
-**Solutions:**
-
-- Use flash_mode for faster execution
-- Try a faster model (Gemini Flash, GPT-4 Turbo Mini)
-- Simplify the task
-- Check model server logs`,
+ planContent: null,
conversationMode: "code",
+ subConversationTaskId: null,
// Actions
setIsRightPanelShown: (isRightPanelShown) =>
@@ -296,13 +254,28 @@ The model took too long to respond
set({ submittedMessage }, false, "setSubmittedMessage"),
resetConversationState: () =>
- set({ shouldHideSuggestions: false }, false, "resetConversationState"),
+ set(
+ {
+ shouldHideSuggestions: false,
+ conversationMode: "code",
+ subConversationTaskId: null,
+ planContent: null,
+ },
+ false,
+ "resetConversationState",
+ ),
setHasRightPanelToggled: (hasRightPanelToggled) =>
set({ hasRightPanelToggled }, false, "setHasRightPanelToggled"),
setConversationMode: (conversationMode) =>
set({ conversationMode }, false, "setConversationMode"),
+
+ setSubConversationTaskId: (subConversationTaskId) =>
+ set({ subConversationTaskId }, false, "setSubConversationTaskId"),
+
+ setPlanContent: (planContent) =>
+ set({ planContent }, false, "setPlanContent"),
}),
{
name: "conversation-store",
diff --git a/frontend/src/stores/use-event-store.ts b/frontend/src/stores/use-event-store.ts
index 307f4ced0d7b..2d8ecf0a3b6c 100644
--- a/frontend/src/stores/use-event-store.ts
+++ b/frontend/src/stores/use-event-store.ts
@@ -5,7 +5,9 @@ import { OpenHandsParsedEvent } from "#/types/core";
import { isV1Event } from "#/types/v1/type-guards";
// While we transition to v1 events, our store can handle both v0 and v1 events
-type OHEvent = OpenHandsEvent | OpenHandsParsedEvent;
+type OHEvent = (OpenHandsEvent | OpenHandsParsedEvent) & {
+ isFromPlanningAgent?: boolean;
+};
interface EventState {
events: OHEvent[];
diff --git a/frontend/src/types/core/actions.ts b/frontend/src/types/core/actions.ts
index 89852f16e31e..bb80971e3285 100644
--- a/frontend/src/types/core/actions.ts
+++ b/frontend/src/types/core/actions.ts
@@ -31,8 +31,7 @@ export interface CommandAction extends OpenHandsActionEvent<"run"> {
};
}
-export interface AssistantMessageAction
- extends OpenHandsActionEvent<"message"> {
+export interface AssistantMessageAction extends OpenHandsActionEvent<"message"> {
source: "agent";
args: {
thought: string;
@@ -87,8 +86,7 @@ export interface BrowseAction extends OpenHandsActionEvent<"browse"> {
};
}
-export interface BrowseInteractiveAction
- extends OpenHandsActionEvent<"browse_interactive"> {
+export interface BrowseInteractiveAction extends OpenHandsActionEvent<"browse_interactive"> {
source: "agent";
timeout: number;
args: {
@@ -162,8 +160,7 @@ export interface MCPAction extends OpenHandsActionEvent<"call_tool_mcp"> {
};
}
-export interface TaskTrackingAction
- extends OpenHandsActionEvent<"task_tracking"> {
+export interface TaskTrackingAction extends OpenHandsActionEvent<"task_tracking"> {
source: "agent";
args: {
command: string;
diff --git a/frontend/src/types/core/base.ts b/frontend/src/types/core/base.ts
index 4014d2bbb5fa..e305bf7d4d6d 100644
--- a/frontend/src/types/core/base.ts
+++ b/frontend/src/types/core/base.ts
@@ -30,14 +30,16 @@ interface OpenHandsBaseEvent {
timestamp: string; // ISO 8601
}
-export interface OpenHandsActionEvent
- extends OpenHandsBaseEvent {
+export interface OpenHandsActionEvent<
+ T extends OpenHandsEventType,
+> extends OpenHandsBaseEvent {
action: T;
args: Record;
}
-export interface OpenHandsObservationEvent
- extends OpenHandsBaseEvent {
+export interface OpenHandsObservationEvent<
+ T extends OpenHandsEventType,
+> extends OpenHandsBaseEvent {
cause: number;
observation: T;
content: string;
diff --git a/frontend/src/types/core/observations.ts b/frontend/src/types/core/observations.ts
index 01a73ec81beb..2741926fdaf7 100644
--- a/frontend/src/types/core/observations.ts
+++ b/frontend/src/types/core/observations.ts
@@ -1,8 +1,7 @@
import { AgentState } from "../agent-state";
import { OpenHandsObservationEvent } from "./base";
-export interface AgentStateChangeObservation
- extends OpenHandsObservationEvent<"agent_state_changed"> {
+export interface AgentStateChangeObservation extends OpenHandsObservationEvent<"agent_state_changed"> {
source: "agent";
extras: {
agent_state: AgentState;
@@ -19,8 +18,7 @@ export interface CommandObservation extends OpenHandsObservationEvent<"run"> {
};
}
-export interface IPythonObservation
- extends OpenHandsObservationEvent<"run_ipython"> {
+export interface IPythonObservation extends OpenHandsObservationEvent<"run_ipython"> {
source: "agent";
extras: {
code: string;
@@ -28,8 +26,7 @@ export interface IPythonObservation
};
}
-export interface DelegateObservation
- extends OpenHandsObservationEvent<"delegate"> {
+export interface DelegateObservation extends OpenHandsObservationEvent<"delegate"> {
source: "agent";
extras: {
outputs: Record;
@@ -53,8 +50,7 @@ export interface BrowseObservation extends OpenHandsObservationEvent<"browse"> {
};
}
-export interface BrowseInteractiveObservation
- extends OpenHandsObservationEvent<"browse_interactive"> {
+export interface BrowseInteractiveObservation extends OpenHandsObservationEvent<"browse_interactive"> {
source: "agent";
extras: {
url: string;
@@ -103,8 +99,7 @@ export interface ErrorObservation extends OpenHandsObservationEvent<"error"> {
};
}
-export interface AgentThinkObservation
- extends OpenHandsObservationEvent<"think"> {
+export interface AgentThinkObservation extends OpenHandsObservationEvent<"think"> {
source: "agent";
extras: {
thought: string;
@@ -141,14 +136,12 @@ export interface MCPObservation extends OpenHandsObservationEvent<"mcp"> {
};
}
-export interface UserRejectedObservation
- extends OpenHandsObservationEvent<"user_rejected"> {
+export interface UserRejectedObservation extends OpenHandsObservationEvent<"user_rejected"> {
source: "agent";
extras: Record;
}
-export interface TaskTrackingObservation
- extends OpenHandsObservationEvent<"task_tracking"> {
+export interface TaskTrackingObservation extends OpenHandsObservationEvent<"task_tracking"> {
source: "agent";
extras: {
command: string;
diff --git a/frontend/src/types/runtime-status.ts b/frontend/src/types/runtime-status.ts
index edc35f23bd39..f16933d616d5 100644
--- a/frontend/src/types/runtime-status.ts
+++ b/frontend/src/types/runtime-status.ts
@@ -5,6 +5,7 @@ export type RuntimeStatus =
| "STATUS$RUNTIME_STARTED"
| "STATUS$SETTING_UP_WORKSPACE"
| "STATUS$SETTING_UP_GIT_HOOKS"
+ | "STATUS$SETTING_UP_SKILLS"
| "STATUS$READY"
| "STATUS$ERROR"
| "STATUS$ERROR_RUNTIME_DISCONNECTED"
diff --git a/frontend/src/types/settings.ts b/frontend/src/types/settings.ts
index 1d9a54270563..22992881327b 100644
--- a/frontend/src/types/settings.ts
+++ b/frontend/src/types/settings.ts
@@ -2,6 +2,7 @@ export const ProviderOptions = {
github: "github",
gitlab: "gitlab",
bitbucket: "bitbucket",
+ azure_devops: "azure_devops",
enterprise_sso: "enterprise_sso",
} as const;
@@ -62,6 +63,7 @@ export type Settings = {
EMAIL_VERIFIED?: boolean;
GIT_USER_NAME?: string;
GIT_USER_EMAIL?: string;
+ V1_ENABLED?: boolean;
};
export type PostSettings = Settings & {
diff --git a/frontend/src/types/v1/core/base/action.ts b/frontend/src/types/v1/core/base/action.ts
index ce08d5a1b99a..8d3ec41bff48 100644
--- a/frontend/src/types/v1/core/base/action.ts
+++ b/frontend/src/types/v1/core/base/action.ts
@@ -41,6 +41,25 @@ export interface ExecuteBashAction extends ActionBase<"ExecuteBashAction"> {
reset: boolean;
}
+export interface TerminalAction extends ActionBase<"TerminalAction"> {
+ /**
+ * The terminal command to execute.
+ */
+ command: string;
+ /**
+ * If True, the command is an input to the running process. If False, the command is executed directly.
+ */
+ is_input: boolean;
+ /**
+ * Optional max time limit (seconds) for the command.
+ */
+ timeout: number | null;
+ /**
+ * If True, reset the terminal session before running the command.
+ */
+ reset: boolean;
+}
+
export interface FileEditorAction extends ActionBase<"FileEditorAction"> {
/**
* The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.
@@ -72,8 +91,7 @@ export interface FileEditorAction extends ActionBase<"FileEditorAction"> {
view_range: [number, number] | null;
}
-export interface StrReplaceEditorAction
- extends ActionBase<"StrReplaceEditorAction"> {
+export interface StrReplaceEditorAction extends ActionBase<"StrReplaceEditorAction"> {
/**
* The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.
*/
@@ -115,8 +133,7 @@ export interface TaskTrackerAction extends ActionBase<"TaskTrackerAction"> {
task_list: TaskItem[];
}
-export interface BrowserNavigateAction
- extends ActionBase<"BrowserNavigateAction"> {
+export interface BrowserNavigateAction extends ActionBase<"BrowserNavigateAction"> {
/**
* The URL to navigate to
*/
@@ -149,16 +166,14 @@ export interface BrowserTypeAction extends ActionBase<"BrowserTypeAction"> {
text: string;
}
-export interface BrowserGetStateAction
- extends ActionBase<"BrowserGetStateAction"> {
+export interface BrowserGetStateAction extends ActionBase<"BrowserGetStateAction"> {
/**
* Whether to include a screenshot of the current page. Default: False
*/
include_screenshot: boolean;
}
-export interface BrowserGetContentAction
- extends ActionBase<"BrowserGetContentAction"> {
+export interface BrowserGetContentAction extends ActionBase<"BrowserGetContentAction"> {
/**
* Whether to include links in the content (default: False)
*/
@@ -180,21 +195,18 @@ export interface BrowserGoBackAction extends ActionBase<"BrowserGoBackAction"> {
// No additional properties - this action has no parameters
}
-export interface BrowserListTabsAction
- extends ActionBase<"BrowserListTabsAction"> {
+export interface BrowserListTabsAction extends ActionBase<"BrowserListTabsAction"> {
// No additional properties - this action has no parameters
}
-export interface BrowserSwitchTabAction
- extends ActionBase<"BrowserSwitchTabAction"> {
+export interface BrowserSwitchTabAction extends ActionBase<"BrowserSwitchTabAction"> {
/**
* 4 Character Tab ID of the tab to switch to (from browser_list_tabs)
*/
tab_id: string;
}
-export interface BrowserCloseTabAction
- extends ActionBase<"BrowserCloseTabAction"> {
+export interface BrowserCloseTabAction extends ActionBase<"BrowserCloseTabAction"> {
/**
* 4 Character Tab ID of the tab to close (from browser_list_tabs)
*/
@@ -206,6 +218,7 @@ export type Action =
| FinishAction
| ThinkAction
| ExecuteBashAction
+ | TerminalAction
| FileEditorAction
| StrReplaceEditorAction
| TaskTrackerAction
diff --git a/frontend/src/types/v1/core/base/base.ts b/frontend/src/types/v1/core/base/base.ts
index 5925e8599d43..7704f1105de5 100644
--- a/frontend/src/types/v1/core/base/base.ts
+++ b/frontend/src/types/v1/core/base/base.ts
@@ -3,9 +3,11 @@ type EventType =
| "Finish"
| "Think"
| "ExecuteBash"
+ | "Terminal"
| "FileEditor"
| "StrReplaceEditor"
- | "TaskTracker";
+ | "TaskTracker"
+ | "PlanningFileEditor";
type ActionOnlyType =
| "BrowserNavigate"
@@ -24,7 +26,8 @@ type ObservationOnlyType = "Browser";
type ActionEventType = `${ActionOnlyType}Action` | `${EventType}Action`;
type ObservationEventType =
| `${ObservationOnlyType}Observation`
- | `${EventType}Observation`;
+ | `${EventType}Observation`
+ | "TerminalObservation";
export interface ActionBase {
kind: T;
diff --git a/frontend/src/types/v1/core/base/observation.ts b/frontend/src/types/v1/core/base/observation.ts
index 5433087548d6..42726c2b323b 100644
--- a/frontend/src/types/v1/core/base/observation.ts
+++ b/frontend/src/types/v1/core/base/observation.ts
@@ -6,8 +6,7 @@ import {
ImageContent,
} from "./common";
-export interface MCPToolObservation
- extends ObservationBase<"MCPToolObservation"> {
+export interface MCPToolObservation extends ObservationBase<"MCPToolObservation"> {
/**
* Content returned from the MCP tool converted to LLM Ready TextContent or ImageContent
*/
@@ -22,12 +21,15 @@ export interface MCPToolObservation
tool_name: string;
}
-export interface FinishObservation
- extends ObservationBase<"FinishObservation"> {
+export interface FinishObservation extends ObservationBase<"FinishObservation"> {
/**
- * Final message sent to the user
+ * Content returned from the finish action as a list of TextContent/ImageContent objects.
*/
- message: string;
+ content: Array;
+ /**
+ * Whether the finish action resulted in an error
+ */
+ is_error: boolean;
}
export interface ThinkObservation extends ObservationBase<"ThinkObservation"> {
@@ -37,8 +39,7 @@ export interface ThinkObservation extends ObservationBase<"ThinkObservation"> {
content: string;
}
-export interface BrowserObservation
- extends ObservationBase<"BrowserObservation"> {
+export interface BrowserObservation extends ObservationBase<"BrowserObservation"> {
/**
* The output message from the browser operation
*/
@@ -53,12 +54,11 @@ export interface BrowserObservation
screenshot_data: string | null;
}
-export interface ExecuteBashObservation
- extends ObservationBase<"ExecuteBashObservation"> {
+export interface ExecuteBashObservation extends ObservationBase<"ExecuteBashObservation"> {
/**
- * The raw output from the tool.
+ * Content returned from the tool as a list of TextContent/ImageContent objects.
*/
- output: string;
+ content: Array;
/**
* The bash command that was executed. Can be empty string if the observation is from a previous command that hit soft timeout and is not yet finished.
*/
@@ -81,8 +81,34 @@ export interface ExecuteBashObservation
metadata: CmdOutputMetadata;
}
-export interface FileEditorObservation
- extends ObservationBase<"FileEditorObservation"> {
+export interface TerminalObservation extends ObservationBase<"TerminalObservation"> {
+ /**
+ * Content returned from the terminal as a list of TextContent/ImageContent objects.
+ */
+ content: Array;
+ /**
+ * The bash command that was executed.
+ */
+ command: string | null;
+ /**
+ * The exit code of the command if it has finished.
+ */
+ exit_code: number | null;
+ /**
+ * Whether the command execution produced an error.
+ */
+ is_error: boolean;
+ /**
+ * Whether the command execution timed out.
+ */
+ timeout: boolean;
+ /**
+ * Additional metadata captured from the shell after command execution.
+ */
+ metadata: CmdOutputMetadata;
+}
+
+export interface FileEditorObservation extends ObservationBase<"FileEditorObservation"> {
/**
* The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.
*/
@@ -114,8 +140,7 @@ export interface FileEditorObservation
}
// Keep StrReplaceEditorObservation as a separate interface for backward compatibility
-export interface StrReplaceEditorObservation
- extends ObservationBase<"StrReplaceEditorObservation"> {
+export interface StrReplaceEditorObservation extends ObservationBase<"StrReplaceEditorObservation"> {
/**
* The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.
*/
@@ -146,8 +171,7 @@ export interface StrReplaceEditorObservation
error: string | null;
}
-export interface TaskTrackerObservation
- extends ObservationBase<"TaskTrackerObservation"> {
+export interface TaskTrackerObservation extends ObservationBase<"TaskTrackerObservation"> {
/**
* The formatted task list or status message.
*/
@@ -162,12 +186,45 @@ export interface TaskTrackerObservation
task_list: TaskItem[];
}
+export interface PlanningFileEditorObservation extends ObservationBase<"PlanningFileEditorObservation"> {
+ /**
+ * Content returned from the tool as a list of TextContent/ImageContent objects.
+ */
+ content: Array;
+ /**
+ * Whether the call resulted in an error.
+ */
+ is_error: boolean;
+ /**
+ * The commands to run. Allowed options are: `view`, `create`, `str_replace`, `insert`, `undo_edit`.
+ */
+ command: "view" | "create" | "str_replace" | "insert" | "undo_edit";
+ /**
+ * The file path that was edited.
+ */
+ path: string | null;
+ /**
+ * Indicates if the file previously existed. If not, it was created.
+ */
+ prev_exist: boolean;
+ /**
+ * The content of the file before the edit.
+ */
+ old_content: string | null;
+ /**
+ * The content of the file after the edit.
+ */
+ new_content: string | null;
+}
+
export type Observation =
| MCPToolObservation
| FinishObservation
| ThinkObservation
| BrowserObservation
| ExecuteBashObservation
+ | TerminalObservation
| FileEditorObservation
| StrReplaceEditorObservation
- | TaskTrackerObservation;
+ | TaskTrackerObservation
+ | PlanningFileEditorObservation;
diff --git a/frontend/src/types/v1/core/events/conversation-state-event.ts b/frontend/src/types/v1/core/events/conversation-state-event.ts
index 225dbfa08396..93679d667170 100644
--- a/frontend/src/types/v1/core/events/conversation-state-event.ts
+++ b/frontend/src/types/v1/core/events/conversation-state-event.ts
@@ -1,11 +1,63 @@
import { BaseEvent } from "../base/event";
import { V1ExecutionStatus } from "../base/common";
+/**
+ * Token usage metrics for LLM calls
+ */
+export interface TokenUsage {
+ model: string;
+ prompt_tokens: number;
+ completion_tokens: number;
+ cache_read_tokens: number;
+ cache_write_tokens: number;
+ reasoning_tokens: number;
+ context_window: number;
+ per_turn_token: number;
+ response_id: string;
+}
+
+/**
+ * LLM metrics for a specific component (agent or condenser)
+ */
+export interface LLMMetrics {
+ model_name: string;
+ accumulated_cost: number;
+ max_budget_per_task: number | null;
+ accumulated_token_usage: TokenUsage;
+ costs: Array<{
+ model: string;
+ cost: number;
+ timestamp: number;
+ }>;
+ response_latencies: Array<{
+ model: string;
+ latency: number;
+ response_id: string;
+ }>;
+ token_usages: TokenUsage[];
+}
+
+/**
+ * Usage metrics mapping for different components
+ */
+export interface UsageToMetrics {
+ agent: LLMMetrics;
+ condenser: LLMMetrics;
+}
+
+/**
+ * Stats containing usage metrics
+ */
+export interface ConversationStats {
+ usage_to_metrics: UsageToMetrics;
+}
+
/**
* Conversation state value types
*/
export interface ConversationState {
execution_status: V1ExecutionStatus;
+ stats?: ConversationStats;
// Add other conversation state fields here as needed
}
@@ -19,29 +71,52 @@ interface ConversationStateUpdateEventBase extends BaseEvent {
* Unique key for this state update event.
* Can be "full_state" for full state snapshots or field names for partial updates.
*/
- key: "full_state" | "execution_status"; // Extend with other keys as needed
+ key: "full_state" | "execution_status" | "stats"; // Extend with other keys as needed
/**
* Conversation state updates
*/
- value: ConversationState | V1ExecutionStatus;
+ value: ConversationState | V1ExecutionStatus | ConversationStats;
}
// Narrowed interfaces for full state update event
-export interface ConversationStateUpdateEventFullState
- extends ConversationStateUpdateEventBase {
+export interface ConversationStateUpdateEventFullState extends ConversationStateUpdateEventBase {
key: "full_state";
value: ConversationState;
}
// Narrowed interface for agent status update event
-export interface ConversationStateUpdateEventAgentStatus
- extends ConversationStateUpdateEventBase {
+export interface ConversationStateUpdateEventAgentStatus extends ConversationStateUpdateEventBase {
key: "execution_status";
value: V1ExecutionStatus;
}
+// Narrowed interface for stats update event
+export interface ConversationStateUpdateEventStats extends ConversationStateUpdateEventBase {
+ key: "stats";
+ value: ConversationStats;
+}
+
// Conversation state update event - contains conversation state updates
export type ConversationStateUpdateEvent =
| ConversationStateUpdateEventFullState
- | ConversationStateUpdateEventAgentStatus;
+ | ConversationStateUpdateEventAgentStatus
+ | ConversationStateUpdateEventStats;
+
+// Conversation error event - contains error information
+export interface ConversationErrorEvent extends BaseEvent {
+ /**
+ * The source is always "environment" for conversation error events
+ */
+ source: "environment";
+
+ /**
+ * Error code (e.g., "AuthenticationError")
+ */
+ code: string;
+
+ /**
+ * Detailed error message
+ */
+ detail: string;
+}
diff --git a/frontend/src/types/v1/core/events/observation-event.ts b/frontend/src/types/v1/core/events/observation-event.ts
index 62750d72898c..bf4e22a70983 100644
--- a/frontend/src/types/v1/core/events/observation-event.ts
+++ b/frontend/src/types/v1/core/events/observation-event.ts
@@ -21,8 +21,9 @@ export interface ObservationBaseEvent extends BaseEvent {
}
// Main observation event interface
-export interface ObservationEvent
- extends ObservationBaseEvent {
+export interface ObservationEvent<
+ T extends Observation = Observation,
+> extends ObservationBaseEvent {
/**
* The observation (tool call) sent to LLM
*/
diff --git a/frontend/src/types/v1/core/openhands-event.ts b/frontend/src/types/v1/core/openhands-event.ts
index 909f5221c0c8..4793c5a0ae5d 100644
--- a/frontend/src/types/v1/core/openhands-event.ts
+++ b/frontend/src/types/v1/core/openhands-event.ts
@@ -10,6 +10,7 @@ import {
CondensationRequestEvent,
CondensationSummaryEvent,
ConversationStateUpdateEvent,
+ ConversationErrorEvent,
PauseEvent,
} from "./events/index";
@@ -30,5 +31,6 @@ export type OpenHandsEvent =
| CondensationRequestEvent
| CondensationSummaryEvent
| ConversationStateUpdateEvent
+ | ConversationErrorEvent
// Control events
| PauseEvent;
diff --git a/frontend/src/types/v1/type-guards.ts b/frontend/src/types/v1/type-guards.ts
index bf409360c085..ee831ea489c5 100644
--- a/frontend/src/types/v1/type-guards.ts
+++ b/frontend/src/types/v1/type-guards.ts
@@ -3,7 +3,12 @@ import {
ObservationEvent,
BaseEvent,
ExecuteBashAction,
+ TerminalAction,
ExecuteBashObservation,
+ PlanningFileEditorObservation,
+ TerminalObservation,
+ BrowserObservation,
+ BrowserNavigateAction,
} from "./core";
import { AgentErrorEvent } from "./core/events/observation-event";
import { MessageEvent } from "./core/events/message-event";
@@ -12,6 +17,8 @@ import {
ConversationStateUpdateEvent,
ConversationStateUpdateEventAgentStatus,
ConversationStateUpdateEventFullState,
+ ConversationStateUpdateEventStats,
+ ConversationErrorEvent,
} from "./core/events/conversation-state-event";
import { SystemPromptEvent } from "./core/events/system-event";
import type { OpenHandsParsedEvent } from "../core/index";
@@ -97,17 +104,45 @@ export const isActionEvent = (event: OpenHandsEvent): event is ActionEvent =>
*/
export const isExecuteBashActionEvent = (
event: OpenHandsEvent,
-): event is ActionEvent =>
- isActionEvent(event) && event.action.kind === "ExecuteBashAction";
+): event is ActionEvent =>
+ isActionEvent(event) &&
+ (event.action.kind === "ExecuteBashAction" ||
+ event.action.kind === "TerminalAction");
/**
- * Type guard function to check if an observation event is an ExecuteBashObservation
+ * Type guard function to check if an observation event contains terminal output
*/
export const isExecuteBashObservationEvent = (
event: OpenHandsEvent,
-): event is ObservationEvent =>
+): event is ObservationEvent =>
isObservationEvent(event) &&
- event.observation.kind === "ExecuteBashObservation";
+ (event.observation.kind === "ExecuteBashObservation" ||
+ event.observation.kind === "TerminalObservation");
+
+/**
+ * Type guard function to check if an observation event is a PlanningFileEditorObservation
+ */
+export const isPlanningFileEditorObservationEvent = (
+ event: OpenHandsEvent,
+): event is ObservationEvent =>
+ isObservationEvent(event) &&
+ event.observation.kind === "PlanningFileEditorObservation";
+
+/**
+ * Type guard function to check if an observation event is a BrowserObservation
+ */
+export const isBrowserObservationEvent = (
+ event: OpenHandsEvent,
+): event is ObservationEvent =>
+ isObservationEvent(event) && event.observation.kind === "BrowserObservation";
+
+/**
+ * Type guard function to check if an action event is a BrowserNavigateAction
+ */
+export const isBrowserNavigateActionEvent = (
+ event: OpenHandsEvent,
+): event is ActionEvent =>
+ isActionEvent(event) && event.action.kind === "BrowserNavigateAction";
/**
* Type guard function to check if an event is a system prompt event
@@ -138,6 +173,18 @@ export const isAgentStatusConversationStateUpdateEvent = (
): event is ConversationStateUpdateEventAgentStatus =>
event.key === "execution_status";
+export const isStatsConversationStateUpdateEvent = (
+ event: ConversationStateUpdateEvent,
+): event is ConversationStateUpdateEventStats => event.key === "stats";
+
+/**
+ * Type guard function to check if an event is a conversation error event
+ */
+export const isConversationErrorEvent = (
+ event: OpenHandsEvent,
+): event is ConversationErrorEvent =>
+ "kind" in event && event.kind === "ConversationErrorEvent";
+
// =============================================================================
// TEMPORARY COMPATIBILITY TYPE GUARDS
// These will be removed once we fully migrate to V1 events
diff --git a/frontend/src/utils/error-handler.ts b/frontend/src/utils/error-handler.ts
index 385881e0ce01..d479853b6df0 100644
--- a/frontend/src/utils/error-handler.ts
+++ b/frontend/src/utils/error-handler.ts
@@ -50,3 +50,11 @@ export function showChatError({
status_update: true,
});
}
+
+/**
+ * Checks if an error message indicates a budget or credit limit issue
+ */
+export function isBudgetOrCreditError(errorMessage: string): boolean {
+ const lowerCaseError = errorMessage.toLowerCase();
+ return lowerCaseError.includes("budget") || lowerCaseError.includes("credit");
+}
diff --git a/frontend/src/utils/feature-flags.ts b/frontend/src/utils/feature-flags.ts
index acbe83d7d7e5..0f38a4d7eace 100644
--- a/frontend/src/utils/feature-flags.ts
+++ b/frontend/src/utils/feature-flags.ts
@@ -17,6 +17,4 @@ export const HIDE_LLM_SETTINGS = () => loadFeatureFlag("HIDE_LLM_SETTINGS");
export const VSCODE_IN_NEW_TAB = () => loadFeatureFlag("VSCODE_IN_NEW_TAB");
export const ENABLE_TRAJECTORY_REPLAY = () =>
loadFeatureFlag("TRAJECTORY_REPLAY");
-export const USE_V1_CONVERSATION_API = () =>
- loadFeatureFlag("USE_V1_CONVERSATION_API");
export const USE_PLANNING_AGENT = () => loadFeatureFlag("USE_PLANNING_AGENT");
diff --git a/frontend/src/utils/format-time-delta.ts b/frontend/src/utils/format-time-delta.ts
index 8f2425a234fe..6785d9c845cc 100644
--- a/frontend/src/utils/format-time-delta.ts
+++ b/frontend/src/utils/format-time-delta.ts
@@ -1,16 +1,45 @@
+/**
+ * Parses a date string as UTC if it doesn't have a timezone indicator.
+ * This fixes the issue where ISO strings without timezone info are interpreted as local time.
+ * @param dateString ISO 8601 date string
+ * @returns Date object parsed as UTC
+ *
+ * @example
+ * parseDateAsUTC("2025-12-01T11:53:37.273886"); // Parsed as UTC
+ * parseDateAsUTC("2025-12-01T11:53:37.273886Z"); // Already has timezone, parsed correctly
+ * parseDateAsUTC("2025-12-01T11:53:37+00:00"); // Already has timezone, parsed correctly
+ */
+const parseDateAsUTC = (dateString: string): Date => {
+ // Check if the string already has a timezone indicator
+ // Look for 'Z' (UTC), '+' (positive offset), or '-' after the time part (negative offset)
+ const hasTimezone =
+ dateString.includes("Z") || dateString.match(/[+-]\d{2}:\d{2}$/) !== null;
+
+ if (hasTimezone) {
+ // Already has timezone info, parse normally
+ return new Date(dateString);
+ }
+
+ // No timezone indicator - append 'Z' to force UTC parsing
+ return new Date(`${dateString}Z`);
+};
+
/**
* Formats a date into a compact string representing the time delta between the given date and the current date.
- * @param date The date to format
+ * @param date The date to format (Date object or ISO 8601 string)
* @returns A compact string representing the time delta between the given date and the current date
*
* @example
* // now is 2024-01-01T00:00:00Z
* formatTimeDelta(new Date("2023-12-31T23:59:59Z")); // "1s"
- * formatTimeDelta(new Date("2022-01-01T00:00:00Z")); // "2y"
+ * formatTimeDelta("2023-12-31T23:59:59Z"); // "1s"
+ * formatTimeDelta("2025-12-01T11:53:37.273886"); // Parsed as UTC automatically
*/
-export const formatTimeDelta = (date: Date) => {
+export const formatTimeDelta = (date: Date | string) => {
+ // Parse string dates as UTC if needed, or use Date object directly
+ const dateObj = typeof date === "string" ? parseDateAsUTC(date) : date;
const now = new Date();
- const delta = now.getTime() - date.getTime();
+ const delta = now.getTime() - dateObj.getTime();
const seconds = Math.floor(delta / 1000);
const minutes = Math.floor(seconds / 60);
diff --git a/frontend/src/utils/generate-auth-url.ts b/frontend/src/utils/generate-auth-url.ts
index 0d7990ad817c..0fdf71d5b162 100644
--- a/frontend/src/utils/generate-auth-url.ts
+++ b/frontend/src/utils/generate-auth-url.ts
@@ -1,6 +1,6 @@
/**
* Generates a URL to redirect to for OAuth authentication
- * @param identityProvider The identity provider to use (e.g., "github", "gitlab", "bitbucket")
+ * @param identityProvider The identity provider to use (e.g., "github", "gitlab", "bitbucket", "azure_devops")
* @param requestUrl The URL of the request
* @returns The URL to redirect to for OAuth
*/
diff --git a/frontend/src/utils/local-storage.ts b/frontend/src/utils/local-storage.ts
index ffdf14a164c3..45a8f924a7d2 100644
--- a/frontend/src/utils/local-storage.ts
+++ b/frontend/src/utils/local-storage.ts
@@ -8,12 +8,13 @@ export enum LoginMethod {
GITHUB = "github",
GITLAB = "gitlab",
BITBUCKET = "bitbucket",
+ AZURE_DEVOPS = "azure_devops",
ENTERPRISE_SSO = "enterprise_sso",
}
/**
* Set the login method in local storage
- * @param method The login method (github, gitlab, or bitbucket)
+ * @param method The login method (github, gitlab, bitbucket, or azure_devops)
*/
export const setLoginMethod = (method: LoginMethod): void => {
localStorage.setItem(LOCAL_STORAGE_KEYS.LOGIN_METHOD, method);
diff --git a/frontend/src/utils/reo.ts b/frontend/src/utils/reo.ts
index 9f76c98d314f..b2b8773ec804 100644
--- a/frontend/src/utils/reo.ts
+++ b/frontend/src/utils/reo.ts
@@ -4,6 +4,8 @@
* Using CDN approach for better TypeScript compatibility
*/
+import EventLogger from "./event-logger";
+
export interface ReoIdentity {
username: string;
type: "github" | "email";
@@ -41,7 +43,7 @@ class ReoService {
this.initialized = true;
}
} catch (error) {
- console.error("Failed to initialize Reo.dev tracking:", error);
+ EventLogger.error(`Failed to initialize Reo.dev tracking: ${error}`);
}
}
@@ -78,7 +80,7 @@ class ReoService {
*/
identify(identity: ReoIdentity): void {
if (!this.initialized) {
- console.warn("Reo.dev not initialized. Call init() first.");
+ EventLogger.warning("Reo.dev not initialized. Call init() first.");
return;
}
@@ -87,7 +89,7 @@ class ReoService {
window.Reo.identify(identity);
}
} catch (error) {
- console.error("Failed to identify user in Reo.dev:", error);
+ EventLogger.error(`Failed to identify user in Reo.dev: ${error}`);
}
}
diff --git a/frontend/src/utils/status.ts b/frontend/src/utils/status.ts
index 7b5ef5a126f4..e64820b291b9 100644
--- a/frontend/src/utils/status.ts
+++ b/frontend/src/utils/status.ts
@@ -5,6 +5,7 @@ import { ConversationStatus } from "#/types/conversation-status";
import { StatusMessage } from "#/types/message";
import { RuntimeStatus } from "#/types/runtime-status";
import { V1AppConversationStartTaskStatus } from "#/api/conversation-service/v1-conversation-service.types";
+import { isTaskPolling } from "./utils";
export enum IndicatorColor {
BLUE = "bg-blue-500",
@@ -105,10 +106,11 @@ export function getStatusCode(
runtimeStatus: RuntimeStatus | null,
agentState: AgentState | null,
taskStatus?: V1AppConversationStartTaskStatus | null,
+ subConversationTaskStatus?: V1AppConversationStartTaskStatus | null,
) {
// PRIORITY 1: Handle task error state (when start-tasks API returns ERROR)
// This must come first to prevent "Connecting..." from showing when task has errored
- if (taskStatus === "ERROR") {
+ if (taskStatus === "ERROR" || subConversationTaskStatus === "ERROR") {
return I18nKey.AGENT_STATUS$ERROR_OCCURRED;
}
@@ -147,7 +149,10 @@ export function getStatusCode(
if (webSocketStatus === "DISCONNECTED") {
return I18nKey.CHAT_INTERFACE$DISCONNECTED;
}
- if (webSocketStatus === "CONNECTING") {
+ if (
+ webSocketStatus === "CONNECTING" ||
+ isTaskPolling(subConversationTaskStatus)
+ ) {
return I18nKey.CHAT_INTERFACE$CONNECTING;
}
diff --git a/frontend/src/utils/utils.ts b/frontend/src/utils/utils.ts
index a7fe39e46334..69ff7aae5f01 100644
--- a/frontend/src/utils/utils.ts
+++ b/frontend/src/utils/utils.ts
@@ -182,6 +182,8 @@ export const shouldUseInstallationRepos = (
return true;
case "gitlab":
return false;
+ case "azure_devops":
+ return false;
case "github":
return app_mode === "saas";
default:
@@ -197,6 +199,8 @@ export const getGitProviderBaseUrl = (gitProvider: Provider): string => {
return "https://gitlab.com";
case "bitbucket":
return "https://bitbucket.org";
+ case "azure_devops":
+ return "https://dev.azure.com";
default:
return "";
}
@@ -210,6 +214,7 @@ export const getGitProviderBaseUrl = (gitProvider: Provider): string => {
export const getProviderName = (gitProvider: Provider) => {
if (gitProvider === "gitlab") return "GitLab";
if (gitProvider === "bitbucket") return "Bitbucket";
+ if (gitProvider === "azure_devops") return "Azure DevOps";
return "GitHub";
};
@@ -254,6 +259,15 @@ export const constructPullRequestUrl = (
return `${baseUrl}/${repositoryName}/-/merge_requests/${prNumber}`;
case "bitbucket":
return `${baseUrl}/${repositoryName}/pull-requests/${prNumber}`;
+ case "azure_devops": {
+ // Azure DevOps format: org/project/repo
+ const parts = repositoryName.split("/");
+ if (parts.length === 3) {
+ const [org, project, repo] = parts;
+ return `${baseUrl}/${org}/${project}/_git/${repo}/pullrequest/${prNumber}`;
+ }
+ return "";
+ }
default:
return "";
}
@@ -288,6 +302,15 @@ export const constructMicroagentUrl = (
return `${baseUrl}/${repositoryName}/-/blob/main/${microagentPath}`;
case "bitbucket":
return `${baseUrl}/${repositoryName}/src/main/${microagentPath}`;
+ case "azure_devops": {
+ // Azure DevOps format: org/project/repo
+ const parts = repositoryName.split("/");
+ if (parts.length === 3) {
+ const [org, project, repo] = parts;
+ return `${baseUrl}/${org}/${project}/_git/${repo}?path=/${microagentPath}&version=GBmain`;
+ }
+ return "";
+ }
default:
return "";
}
@@ -357,6 +380,15 @@ export const constructBranchUrl = (
return `${baseUrl}/${repositoryName}/-/tree/${branchName}`;
case "bitbucket":
return `${baseUrl}/${repositoryName}/src/${branchName}`;
+ case "azure_devops": {
+ // Azure DevOps format: org/project/repo
+ const parts = repositoryName.split("/");
+ if (parts.length === 3) {
+ const [org, project, repo] = parts;
+ return `${baseUrl}/${org}/${project}/_git/${repo}?version=GB${branchName}`;
+ }
+ return "";
+ }
default:
return "";
}
@@ -574,10 +606,15 @@ export const shouldIncludeRepository = (
* @returns The query string for searching OpenHands repositories
*/
export const getOpenHandsQuery = (provider: Provider | null): string => {
- if (provider === "gitlab") {
- return "openhands-config";
- }
- return ".openhands";
+ const providerRepositorySuffix: Record = {
+ gitlab: "openhands-config",
+ azure_devops: "openhands-config",
+ default: ".openhands",
+ } as const;
+
+ return provider && provider in providerRepositorySuffix
+ ? providerRepositorySuffix[provider]
+ : providerRepositorySuffix.default;
};
/**
@@ -589,12 +626,7 @@ export const getOpenHandsQuery = (provider: Provider | null): string => {
export const hasOpenHandsSuffix = (
repo: GitRepository,
provider: Provider | null,
-): boolean => {
- if (provider === "gitlab") {
- return repo.full_name.endsWith("/openhands-config");
- }
- return repo.full_name.endsWith("/.openhands");
-};
+): boolean => repo.full_name.endsWith(`/${getOpenHandsQuery(provider)}`);
/**
* Build headers for V1 API requests that require session authentication
@@ -611,6 +643,22 @@ export const buildSessionHeaders = (
return headers;
};
+/**
+ * Check if a task is currently being polled (loading state)
+ * @param taskStatus The task status string (e.g., "WORKING", "ERROR", "READY")
+ * @returns True if the task is in a loading state (not ERROR and not READY)
+ *
+ * @example
+ * isTaskPolling("WORKING") // Returns true
+ * isTaskPolling("PREPARING_REPOSITORY") // Returns true
+ * isTaskPolling("READY") // Returns false
+ * isTaskPolling("ERROR") // Returns false
+ * isTaskPolling(null) // Returns false
+ * isTaskPolling(undefined) // Returns false
+ */
+export const isTaskPolling = (taskStatus: string | null | undefined): boolean =>
+ !!taskStatus && taskStatus !== "ERROR" && taskStatus !== "READY";
+
/**
* Get the appropriate color based on agent status
* @param options Configuration object for status color calculation
diff --git a/frontend/src/utils/verified-models.ts b/frontend/src/utils/verified-models.ts
index 12453c6c8642..5b2e19e9efe4 100644
--- a/frontend/src/utils/verified-models.ts
+++ b/frontend/src/utils/verified-models.ts
@@ -59,6 +59,7 @@ export const VERIFIED_ANTHROPIC_MODELS = [
"claude-haiku-4-5-20251001",
"claude-opus-4-20250514",
"claude-opus-4-1-20250805",
+ "claude-opus-4-5-20251101",
];
// LiteLLM does not return the compatible Mistral models with the provider, so we list them here to set them ourselves
diff --git a/openhands-cli/.gitignore b/openhands-cli/.gitignore
deleted file mode 100644
index a83411f80e7b..000000000000
--- a/openhands-cli/.gitignore
+++ /dev/null
@@ -1,56 +0,0 @@
-# Python
-__pycache__/
-*.py[cod]
-*$py.class
-*.so
-.Python
-build/
-develop-eggs/
-dist/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-
-# Virtual Environment
-.env
-.venv
-env/
-venv/
-ENV/
-
-# IDE
-.idea/
-.vscode/
-*.swp
-*.swo
-
-# Testing
-.pytest_cache/
-.coverage
-htmlcov/
-.tox/
-.nox/
-.coverage.*
-coverage.xml
-*.cover
-.hypothesis/
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-# Note: We keep our custom spec file in version control
-# *.spec
-
-# Generated artifacts
-build
-
diff --git a/openhands-cli/Makefile b/openhands-cli/Makefile
deleted file mode 100644
index 0736a2852f91..000000000000
--- a/openhands-cli/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-.PHONY: help install install-dev test format clean run
-
-# Default target
-help:
- @echo "OpenHands CLI - Available commands:"
- @echo " install - Install the package"
- @echo " install-dev - Install with development dependencies"
- @echo " test - Run tests"
- @echo " format - Format code with ruff"
- @echo " clean - Clean build artifacts"
- @echo " run - Run the CLI"
-
-# Install the package
-install:
- uv sync
-
-# Install with development dependencies
-install-dev:
- uv sync --group dev
-
-# Run tests
-test:
- uv run pytest
-
-# Format code
-format:
- uv run ruff format openhands_cli/
-
-# Clean build artifacts
-clean:
- rm -rf .venv/
- find . -type d -name "__pycache__" -exec rm -rf {} +
- find . -type f -name "*.pyc" -delete
-
-# Run the CLI
-run:
- uv run openhands
-
-# Install UV if not present
-install-uv:
- @if ! command -v uv &> /dev/null; then \
- echo "Installing UV..."; \
- curl -LsSf https://astral.sh/uv/install.sh | sh; \
- else \
- echo "UV is already installed"; \
- fi
diff --git a/openhands-cli/README.md b/openhands-cli/README.md
deleted file mode 100644
index 07f49dcfc975..000000000000
--- a/openhands-cli/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# OpenHands V1 CLI
-
-A **lightweight, modern CLI** to interact with the OpenHands agent (powered by [OpenHands software-agent-sdk](https://github.com/OpenHands/software-agent-sdk)).
-
----
-
-## Quickstart
-
-- Prerequisites: Python 3.12+, curl
-- Install uv (package manager):
- ```bash
- curl -LsSf https://astral.sh/uv/install.sh | sh
- # Restart your shell so "uv" is on PATH, or follow the installer hint
- ```
-
-### Run the CLI locally
-```bash
-make install
-
-# Start the CLI
-make run
-# or
-uv run openhands
-```
-
-### Build a standalone executable
-```bash
-# Build (installs PyInstaller if needed)
-./build.sh --install-pyinstaller
-
-# The binary will be in dist/
-./dist/openhands # macOS/Linux
-# dist/openhands.exe # Windows
-```
diff --git a/openhands-cli/build.py b/openhands-cli/build.py
deleted file mode 100755
index 1b574294b104..000000000000
--- a/openhands-cli/build.py
+++ /dev/null
@@ -1,292 +0,0 @@
-#!/usr/bin/env python3
-"""
-Build script for OpenHands CLI using PyInstaller.
-
-This script packages the OpenHands CLI into a standalone executable binary
-using PyInstaller with the custom spec file.
-"""
-
-import argparse
-import os
-import select
-import shutil
-import subprocess
-import sys
-import time
-from pathlib import Path
-
-from openhands_cli.utils import get_default_cli_agent, get_llm_metadata
-from openhands_cli.locations import AGENT_SETTINGS_PATH, PERSISTENCE_DIR
-
-from openhands.sdk import LLM
-
-# =================================================
-# SECTION: Build Binary
-# =================================================
-
-
-def clean_build_directories() -> None:
- """Clean up previous build artifacts."""
- print('🧹 Cleaning up previous build artifacts...')
-
- build_dirs = ['build', 'dist', '__pycache__']
- for dir_name in build_dirs:
- if os.path.exists(dir_name):
- print(f' Removing {dir_name}/')
- shutil.rmtree(dir_name)
-
- # Clean up .pyc files
- for root, _dirs, files in os.walk('.'):
- for file in files:
- if file.endswith('.pyc'):
- os.remove(os.path.join(root, file))
-
- print('✅ Cleanup complete!')
-
-
-def check_pyinstaller() -> bool:
- """Check if PyInstaller is available."""
- try:
- subprocess.run(
- ['uv', 'run', 'pyinstaller', '--version'], check=True, capture_output=True
- )
- return True
- except (subprocess.CalledProcessError, FileNotFoundError):
- print(
- '❌ PyInstaller is not available. Use --install-pyinstaller flag or install manually with:'
- )
- print(' uv add --dev pyinstaller')
- return False
-
-
-def build_executable(
- spec_file: str = 'openhands.spec',
- clean: bool = True,
-) -> bool:
- """Build the executable using PyInstaller."""
- if clean:
- clean_build_directories()
-
- # Check if PyInstaller is available (installation is handled by build.sh)
- if not check_pyinstaller():
- return False
-
- print(f'🔨 Building executable using {spec_file}...')
-
- try:
- # Run PyInstaller with uv
- cmd = ['uv', 'run', 'pyinstaller', spec_file, '--clean']
-
- print(f'Running: {" ".join(cmd)}')
- subprocess.run(cmd, check=True, capture_output=True, text=True)
-
- print('✅ Build completed successfully!')
-
- # Check if the executable was created
- dist_dir = Path('dist')
- if dist_dir.exists():
- executables = list(dist_dir.glob('*'))
- if executables:
- print('📁 Executable(s) created in dist/:')
- for exe in executables:
- size = exe.stat().st_size / (1024 * 1024) # Size in MB
- print(f' - {exe.name} ({size:.1f} MB)')
- else:
- print('⚠️ No executables found in dist/ directory')
-
- return True
-
- except subprocess.CalledProcessError as e:
- print(f'❌ Build failed: {e}')
- if e.stdout:
- print('STDOUT:', e.stdout)
- if e.stderr:
- print('STDERR:', e.stderr)
- return False
-
-
-# =================================================
-# SECTION: Test and profile binary
-# =================================================
-
-WELCOME_MARKERS = ['welcome', 'openhands cli', 'type /help', 'available commands', '>']
-
-
-def _is_welcome(line: str) -> bool:
- s = line.strip().lower()
- return any(marker in s for marker in WELCOME_MARKERS)
-
-
-def test_executable(dummy_agent) -> bool:
- """Test the built executable, measuring boot time and total test time."""
- print('🧪 Testing the built executable...')
-
- spec_path = os.path.join(PERSISTENCE_DIR, AGENT_SETTINGS_PATH)
-
- specs_path = Path(os.path.expanduser(spec_path))
- if specs_path.exists():
- print(f'⚠️ Using existing settings at {specs_path}')
- else:
- print(f'💾 Creating dummy settings at {specs_path}')
- specs_path.parent.mkdir(parents=True, exist_ok=True)
- specs_path.write_text(dummy_agent.model_dump_json())
-
- exe_path = Path('dist/openhands')
- if not exe_path.exists():
- exe_path = Path('dist/openhands.exe')
- if not exe_path.exists():
- print('❌ Executable not found!')
- return False
-
- try:
- if os.name != 'nt':
- os.chmod(exe_path, 0o755)
-
- boot_start = time.time()
- proc = subprocess.Popen(
- [str(exe_path)],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True,
- bufsize=1,
- env={**os.environ},
- )
-
- # --- Wait for welcome ---
- deadline = boot_start + 60
- saw_welcome = False
- captured = []
-
- while time.time() < deadline:
- if proc.poll() is not None:
- break
- rlist, _, _ = select.select([proc.stdout], [], [], 0.2)
- if not rlist:
- continue
- line = proc.stdout.readline()
- if not line:
- continue
- captured.append(line)
- if _is_welcome(line):
- saw_welcome = True
- break
-
- if not saw_welcome:
- print('❌ Did not detect welcome prompt')
- try:
- proc.kill()
- except Exception:
- pass
- return False
-
- boot_end = time.time()
- print(f'⏱️ Boot to welcome: {boot_end - boot_start:.2f} seconds')
-
- # --- Run /help then /exit ---
- if proc.stdin is None:
- print('❌ stdin unavailable')
- proc.kill()
- return False
-
- proc.stdin.write('/help\n/exit\n')
- proc.stdin.flush()
- out, _ = proc.communicate(timeout=60)
-
- total_end = time.time()
- full_output = ''.join(captured) + (out or '')
-
- print(f'⏱️ End-to-end test time: {total_end - boot_start:.2f} seconds')
-
- if 'available commands' in full_output.lower():
- print('✅ Executable starts, welcome detected, and /help works')
- return True
- else:
- print('❌ /help output not found')
- print('Output preview:', full_output[-500:])
- return False
-
- except subprocess.TimeoutExpired:
- print('❌ Executable test timed out')
- try:
- proc.kill()
- except Exception:
- pass
- return False
- except Exception as e:
- print(f'❌ Error testing executable: {e}')
- try:
- proc.kill()
- except Exception:
- pass
- return False
-
-
-# =================================================
-# SECTION: Main
-# =================================================
-
-
-def main() -> int:
- """Main function."""
- parser = argparse.ArgumentParser(description='Build OpenHands CLI executable')
- parser.add_argument(
- '--spec', default='openhands.spec', help='PyInstaller spec file to use'
- )
- parser.add_argument(
- '--no-clean', action='store_true', help='Skip cleaning build directories'
- )
- parser.add_argument(
- '--no-test', action='store_true', help='Skip testing the built executable'
- )
- parser.add_argument(
- '--install-pyinstaller',
- action='store_true',
- help='Install PyInstaller using uv before building',
- )
-
- parser.add_argument(
- '--no-build', action='store_true', help='Skip testing the built executable'
- )
-
- args = parser.parse_args()
-
- print('🚀 OpenHands CLI Build Script')
- print('=' * 40)
-
- # Check if spec file exists
- if not os.path.exists(args.spec):
- print(f"❌ Spec file '{args.spec}' not found!")
- return 1
-
- # Build the executable
- if not args.no_build and not build_executable(args.spec, clean=not args.no_clean):
- return 1
-
- # Test the executable
- if not args.no_test:
- dummy_agent = get_default_cli_agent(
- llm=LLM(
- model='dummy-model',
- api_key='dummy-key',
- litellm_extra_body={"metadata": get_llm_metadata(model_name='dummy-model', llm_type='openhands')},
- )
- )
- if not test_executable(dummy_agent):
- print('❌ Executable test failed, build process failed')
- return 1
-
- print('\n🎉 Build process completed!')
- print("📁 Check the 'dist/' directory for your executable")
-
- return 0
-
-
-if __name__ == '__main__':
- try:
- sys.exit(main())
- except Exception as e:
- print(e)
- print('❌ Executable test failed')
- sys.exit(1)
-
diff --git a/openhands-cli/build.sh b/openhands-cli/build.sh
deleted file mode 100755
index 102a1bcb06b4..000000000000
--- a/openhands-cli/build.sh
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/bin/bash
-#
-# Shell script wrapper for building OpenHands CLI executable.
-#
-# This script provides a simple interface to build the OpenHands CLI
-# using PyInstaller with uv package management.
-#
-
-set -e # Exit on any error
-
-echo "🚀 OpenHands CLI Build Script"
-echo "=============================="
-
-# Check if uv is available
-if ! command -v uv &> /dev/null; then
- echo "❌ uv is required but not found! Please install uv first."
- exit 1
-fi
-
-# Parse arguments to check for --install-pyinstaller
-INSTALL_PYINSTALLER=false
-PYTHON_ARGS=()
-
-for arg in "$@"; do
- case $arg in
- --install-pyinstaller)
- INSTALL_PYINSTALLER=true
- PYTHON_ARGS+=("$arg")
- ;;
- *)
- PYTHON_ARGS+=("$arg")
- ;;
- esac
-done
-
-# Install PyInstaller if requested
-if [ "$INSTALL_PYINSTALLER" = true ]; then
- echo "📦 Installing PyInstaller with uv..."
- if uv add --dev pyinstaller; then
- echo "✅ PyInstaller installed successfully with uv!"
- else
- echo "❌ Failed to install PyInstaller"
- exit 1
- fi
-fi
-
-# Run the Python build script using uv
-uv run python build.py "${PYTHON_ARGS[@]}"
diff --git a/openhands-cli/dev_config/python/.pre-commit-config.yaml b/openhands-cli/dev_config/python/.pre-commit-config.yaml
new file mode 100644
index 000000000000..fe3f137cea95
--- /dev/null
+++ b/openhands-cli/dev_config/python/.pre-commit-config.yaml
@@ -0,0 +1,56 @@
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v5.0.0
+ hooks:
+ - id: trailing-whitespace
+ exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
+ - id: end-of-file-fixer
+ exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
+ - id: check-yaml
+ args: ["--allow-multiple-documents"]
+ - id: debug-statements
+
+ - repo: https://github.com/tox-dev/pyproject-fmt
+ rev: v2.5.1
+ hooks:
+ - id: pyproject-fmt
+ - repo: https://github.com/abravalheri/validate-pyproject
+ rev: v0.24.1
+ hooks:
+ - id: validate-pyproject
+
+ - repo: https://github.com/astral-sh/ruff-pre-commit
+ # Ruff version.
+ rev: v0.11.8
+ hooks:
+ # Run the linter.
+ - id: ruff
+ entry: ruff check --config dev_config/python/ruff.toml
+ types_or: [python, pyi, jupyter]
+ args: [--fix, --unsafe-fixes]
+ exclude: ^(third_party/|enterprise/|openhands-cli/)
+ # Run the formatter.
+ - id: ruff-format
+ entry: ruff format --config dev_config/python/ruff.toml
+ types_or: [python, pyi, jupyter]
+ exclude: ^(third_party/|enterprise/|openhands-cli/)
+
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: v1.15.0
+ hooks:
+ - id: mypy
+ additional_dependencies:
+ [
+ types-requests,
+ types-setuptools,
+ types-pyyaml,
+ types-toml,
+ types-docker,
+ types-Markdown,
+ pydantic,
+ lxml,
+ ]
+ # To see gaps add `--html-report mypy-report/`
+ entry: mypy --config-file dev_config/python/mypy.ini openhands/
+ always_run: true
+ pass_filenames: false
diff --git a/openhands-cli/hooks/rthook_profile_imports.py b/openhands-cli/hooks/rthook_profile_imports.py
deleted file mode 100644
index 2175b51146a5..000000000000
--- a/openhands-cli/hooks/rthook_profile_imports.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import atexit
-import os
-import sys
-import time
-from collections import defaultdict
-
-ENABLE = os.getenv('IMPORT_PROFILING', '0') not in ('', '0', 'false', 'False')
-OUT = 'dist/import_profiler.csv'
-THRESHOLD_MS = float(os.getenv('IMPORT_PROFILING_THRESHOLD_MS', '0'))
-
-if ENABLE:
- timings = defaultdict(float) # module -> total seconds (first load only)
- counts = defaultdict(int) # module -> number of first-loads (should be 1)
- max_dur = defaultdict(float) # module -> max single load seconds
-
- try:
- import importlib._bootstrap as _bootstrap # type: ignore[attr-defined]
- except Exception:
- _bootstrap = None
-
- start_time = time.perf_counter()
-
- if _bootstrap is not None:
- _orig_find_and_load = _bootstrap._find_and_load
-
- def _timed_find_and_load(name, import_):
- preloaded = name in sys.modules # cache hit?
- t0 = time.perf_counter()
- try:
- return _orig_find_and_load(name, import_)
- finally:
- if not preloaded:
- dt = time.perf_counter() - t0
- timings[name] += dt
- counts[name] += 1
- if dt > max_dur[name]:
- max_dur[name] = dt
-
- _bootstrap._find_and_load = _timed_find_and_load
-
- @atexit.register
- def _dump_import_profile():
- def ms(s):
- return f'{s * 1000:.3f}'
-
- items = [
- (name, counts[name], timings[name], max_dur[name])
- for name in timings
- if timings[name] * 1000 >= THRESHOLD_MS
- ]
- items.sort(key=lambda x: x[2], reverse=True)
- try:
- with open(OUT, 'w', encoding='utf-8') as f:
- f.write('module,count,total_ms,max_ms\n')
- for name, cnt, tot_s, max_s in items:
- f.write(f'{name},{cnt},{ms(tot_s)},{ms(max_s)}\n')
- # brief summary
- if items:
- w = max(len(n) for n, *_ in items[:25])
- sys.stderr.write('\n=== Import Time Profile (first-load only) ===\n')
- sys.stderr.write(f'{"module".ljust(w)} count total_ms max_ms\n')
- for name, cnt, tot_s, max_s in items[:25]:
- sys.stderr.write(
- f'{name.ljust(w)} {str(cnt).rjust(5)} {ms(tot_s).rjust(8)} {ms(max_s).rjust(7)}\n'
- )
- sys.stderr.write(f'\nImport profile written to: {OUT}\n')
- except Exception as e:
- sys.stderr.write(f'[import-profiler] failed to write profile: {e}\n')
diff --git a/openhands-cli/openhands.spec b/openhands-cli/openhands.spec
deleted file mode 100644
index 909d1480a883..000000000000
--- a/openhands-cli/openhands.spec
+++ /dev/null
@@ -1,110 +0,0 @@
-# -*- mode: python ; coding: utf-8 -*-
-"""
-PyInstaller spec file for OpenHands CLI.
-
-This spec file configures PyInstaller to create a standalone executable
-for the OpenHands CLI application.
-"""
-
-from pathlib import Path
-import os
-import sys
-from PyInstaller.utils.hooks import (
- collect_submodules,
- collect_data_files,
- copy_metadata
-)
-
-
-
-# Get the project root directory (current working directory when running PyInstaller)
-project_root = Path.cwd()
-
-a = Analysis(
- ['openhands_cli/simple_main.py'],
- pathex=[str(project_root)],
- binaries=[],
- datas=[
- # Include any data files that might be needed
- # Add more data files here if needed in the future
- *collect_data_files('tiktoken'),
- *collect_data_files('tiktoken_ext'),
- *collect_data_files('litellm'),
- *collect_data_files('fastmcp'),
- *collect_data_files('mcp'),
- # Include all data files from openhands.sdk (templates, configs, etc.)
- *collect_data_files('openhands.sdk'),
- # Include package metadata for importlib.metadata
- *copy_metadata('fastmcp'),
- ],
- hiddenimports=[
- # Explicitly include modules that might not be detected automatically
- *collect_submodules('openhands_cli'),
- *collect_submodules('prompt_toolkit'),
- # Include OpenHands SDK submodules explicitly to avoid resolution issues
- *collect_submodules('openhands.sdk'),
- *collect_submodules('openhands.tools'),
- *collect_submodules('tiktoken'),
- *collect_submodules('tiktoken_ext'),
- *collect_submodules('litellm'),
- *collect_submodules('fastmcp'),
- # Include mcp but exclude CLI parts that require typer
- 'mcp.types',
- 'mcp.client',
- 'mcp.server',
- 'mcp.shared',
- 'openhands.tools.terminal',
- 'openhands.tools.str_replace_editor',
- 'openhands.tools.task_tracker',
- ],
- hookspath=[],
- hooksconfig={},
- runtime_hooks=[],
- # runtime_hooks=[str(project_root / "hooks" / "rthook_profile_imports.py")],
- excludes=[
- # Exclude unnecessary modules to reduce binary size
- 'tkinter',
- 'matplotlib',
- 'numpy',
- 'scipy',
- 'pandas',
- 'IPython',
- 'jupyter',
- 'notebook',
- # Exclude mcp CLI parts that cause issues
- 'mcp.cli',
- 'prompt_toolkit.contrib.ssh',
- 'fastmcp.cli',
- 'boto3',
- 'botocore',
- 'posthog',
- 'browser-use',
- 'openhands.tools.browser_use'
- ],
- noarchive=False,
- # IMPORTANT: do not use optimize=2 (-OO) because it strips docstrings used by PLY/bashlex grammar
- optimize=0,
-)
-pyz = PYZ(a.pure)
-
-exe = EXE(
- pyz,
- a.scripts,
- a.binaries,
- a.datas,
- [],
- name='openhands',
- debug=False,
- bootloader_ignore_signals=False,
- strip=True, # Strip debug symbols to reduce size
- upx=True, # Use UPX compression if available
- upx_exclude=[],
- runtime_tmpdir=None,
- console=True, # CLI application needs console
- disable_windowed_traceback=False,
- argv_emulation=False,
- target_arch=None,
- codesign_identity=None,
- entitlements_file=None,
- icon=None, # Add icon path here if you have one
-)
diff --git a/openhands-cli/openhands_cli/__init__.py b/openhands-cli/openhands_cli/__init__.py
deleted file mode 100644
index a354bd0e4695..000000000000
--- a/openhands-cli/openhands_cli/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""OpenHands package."""
-
-from importlib.metadata import version, PackageNotFoundError
-
-try:
- __version__ = version("openhands")
-except PackageNotFoundError:
- __version__ = "0.0.0"
diff --git a/openhands-cli/openhands_cli/agent_chat.py b/openhands-cli/openhands_cli/agent_chat.py
deleted file mode 100644
index e71efb7a6fd6..000000000000
--- a/openhands-cli/openhands_cli/agent_chat.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python3
-"""
-Agent chat functionality for OpenHands CLI.
-Provides a conversation interface with an AI agent using OpenHands patterns.
-"""
-
-import sys
-from datetime import datetime
-import uuid
-
-from openhands.sdk import (
- Message,
- TextContent,
-)
-from openhands.sdk.conversation.state import ConversationExecutionStatus
-from prompt_toolkit import print_formatted_text
-from prompt_toolkit.formatted_text import HTML
-
-from openhands_cli.runner import ConversationRunner
-from openhands_cli.setup import (
- MissingAgentSpec,
- setup_conversation,
- verify_agent_exists_or_setup_agent
-)
-from openhands_cli.tui.settings.mcp_screen import MCPScreen
-from openhands_cli.tui.settings.settings_screen import SettingsScreen
-from openhands_cli.tui.status import display_status
-from openhands_cli.tui.tui import (
- display_help,
- display_welcome,
-)
-from openhands_cli.user_actions import UserConfirmation, exit_session_confirmation
-from openhands_cli.user_actions.utils import get_session_prompter
-
-
-def _restore_tty() -> None:
- """
- Ensure terminal modes are reset in case prompt_toolkit cleanup didn't run.
- - Turn off application cursor keys (DECCKM): ESC[?1l
- - Turn off bracketed paste: ESC[?2004l
- """
- try:
- sys.stdout.write('\x1b[?1l\x1b[?2004l')
- sys.stdout.flush()
- except Exception:
- pass
-
-
-def _print_exit_hint(conversation_id: str) -> None:
- """Print a resume hint with the current conversation ID."""
- print_formatted_text(
- HTML(f'Conversation ID: {conversation_id} ')
- )
- print_formatted_text(
- HTML(
- f'Hint: run openhands --resume {conversation_id} '
- 'to resume this conversation.'
- )
- )
-
-
-
-def run_cli_entry(resume_conversation_id: str | None = None) -> None:
- """Run the agent chat session using the agent SDK.
-
-
- Raises:
- AgentSetupError: If agent setup fails
- KeyboardInterrupt: If user interrupts the session
- EOFError: If EOF is encountered
- """
-
- conversation_id = uuid.uuid4()
- if resume_conversation_id:
- try:
- conversation_id = uuid.UUID(resume_conversation_id)
- except ValueError as e:
- print_formatted_text(
- HTML(
- f"Warning: '{resume_conversation_id}' is not a valid UUID. "
- )
- )
- return
-
- try:
- initialized_agent = verify_agent_exists_or_setup_agent()
- except MissingAgentSpec:
- print_formatted_text(HTML('\nSetup is required to use OpenHands CLI. '))
- print_formatted_text(HTML('\nGoodbye! 👋 '))
- return
-
-
- display_welcome(conversation_id, bool(resume_conversation_id))
-
- # Track session start time for uptime calculation
- session_start_time = datetime.now()
-
- # Create conversation runner to handle state machine logic
- runner = None
- session = get_session_prompter()
-
- # Main chat loop
- while True:
- try:
- # Get user input
- user_input = session.prompt(
- HTML('> '),
- multiline=False,
- )
-
- if not user_input.strip():
- continue
-
- # Handle commands
- command = user_input.strip().lower()
-
- message = Message(
- role='user',
- content=[TextContent(text=user_input)],
- )
-
- if command == '/exit':
- exit_confirmation = exit_session_confirmation()
- if exit_confirmation == UserConfirmation.ACCEPT:
- print_formatted_text(HTML('\nGoodbye! 👋 '))
- _print_exit_hint(conversation_id)
- break
-
- elif command == '/settings':
- settings_screen = SettingsScreen(runner.conversation if runner else None)
- settings_screen.display_settings()
- continue
-
- elif command == '/mcp':
- mcp_screen = MCPScreen()
- mcp_screen.display_mcp_info(initialized_agent)
- continue
-
- elif command == '/clear':
- display_welcome(conversation_id)
- continue
-
- elif command == '/new':
- try:
- # Start a fresh conversation (no resume ID = new conversation)
- conversation_id = uuid.uuid4()
- runner = None
- conversation = None
- display_welcome(conversation_id, resume=False)
- print_formatted_text(
- HTML('✓ Started fresh conversation ')
- )
- continue
- except Exception as e:
- print_formatted_text(
- HTML(f'Error starting fresh conversation: {e} ')
- )
- continue
-
- elif command == '/help':
- display_help()
- continue
-
- elif command == '/status':
- display_status(conversation, session_start_time=session_start_time)
- continue
-
- elif command == '/confirm':
- runner.toggle_confirmation_mode()
- new_status = (
- 'enabled' if runner.is_confirmation_mode_active else 'disabled'
- )
- print_formatted_text(
- HTML(f'Confirmation mode {new_status} ')
- )
- continue
-
- elif command == '/resume':
- if not runner:
- print_formatted_text(
- HTML('No active conversation running... ')
- )
- continue
-
- conversation = runner.conversation
- if not (
- conversation.state.execution_status == ConversationExecutionStatus.PAUSED
- or conversation.state.execution_status
- == ConversationExecutionStatus.WAITING_FOR_CONFIRMATION
- ):
- print_formatted_text(
- HTML('No paused conversation to resume... ')
- )
- continue
-
- # Resume without new message
- message = None
-
- if not runner or not conversation:
- conversation = setup_conversation(conversation_id)
- runner = ConversationRunner(conversation)
- runner.process_message(message)
-
- print() # Add spacing
-
- except KeyboardInterrupt:
- exit_confirmation = exit_session_confirmation()
- if exit_confirmation == UserConfirmation.ACCEPT:
- print_formatted_text(HTML('\nGoodbye! 👋 '))
- _print_exit_hint(conversation_id)
- break
-
- # Clean up terminal state
- _restore_tty()
diff --git a/openhands-cli/openhands_cli/argparsers/main_parser.py b/openhands-cli/openhands_cli/argparsers/main_parser.py
deleted file mode 100644
index 6f28d1e63784..000000000000
--- a/openhands-cli/openhands_cli/argparsers/main_parser.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""Main argument parser for OpenHands CLI."""
-
-import argparse
-
-
-def create_main_parser() -> argparse.ArgumentParser:
- """Create the main argument parser with CLI as default and serve as subcommand.
-
- Returns:
- The configured argument parser
- """
- parser = argparse.ArgumentParser(
- description='OpenHands CLI - Terminal User Interface for OpenHands AI Agent',
- formatter_class=argparse.RawDescriptionHelpFormatter,
- epilog="""
-By default, OpenHands runs in CLI mode (terminal interface).
-Use 'serve' subcommand to launch the GUI server instead.
-
-Examples:
- openhands # Start CLI mode
- openhands --resume conversation-id # Resume a conversation in CLI mode
- openhands serve # Launch GUI server
- openhands serve --gpu # Launch GUI server with GPU support
-"""
- )
-
- # CLI arguments at top level (default mode)
- parser.add_argument(
- '--resume',
- type=str,
- help='Conversation ID to resume'
- )
-
- # Only serve as subcommand
- subparsers = parser.add_subparsers(
- dest='command',
- help='Additional commands'
- )
-
- # Add serve subcommand
- serve_parser = subparsers.add_parser(
- 'serve',
- help='Launch the OpenHands GUI server using Docker (web interface)'
- )
- serve_parser.add_argument(
- '--mount-cwd',
- action='store_true',
- help='Mount the current working directory in the Docker container'
- )
- serve_parser.add_argument(
- '--gpu',
- action='store_true',
- help='Enable GPU support in the Docker container'
- )
-
- return parser
\ No newline at end of file
diff --git a/openhands-cli/openhands_cli/argparsers/serve_parser.py b/openhands-cli/openhands_cli/argparsers/serve_parser.py
deleted file mode 100644
index dea991254809..000000000000
--- a/openhands-cli/openhands_cli/argparsers/serve_parser.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""Argument parser for serve subcommand."""
-
-import argparse
-
-
-def add_serve_parser(subparsers: argparse._SubParsersAction) -> argparse.ArgumentParser:
- """Add serve subcommand parser.
-
- Args:
- subparsers: The subparsers object to add the serve parser to
-
- Returns:
- The serve argument parser
- """
- serve_parser = subparsers.add_parser(
- 'serve',
- help='Launch the OpenHands GUI server using Docker (web interface)'
- )
- serve_parser.add_argument(
- '--mount-cwd',
- help='Mount the current working directory into the GUI server container',
- action='store_true',
- default=False,
- )
- serve_parser.add_argument(
- '--gpu',
- help='Enable GPU support by mounting all GPUs into the Docker container via nvidia-docker',
- action='store_true',
- default=False,
- )
- return serve_parser
\ No newline at end of file
diff --git a/openhands-cli/openhands_cli/gui_launcher.py b/openhands-cli/openhands_cli/gui_launcher.py
deleted file mode 100644
index b8724961239f..000000000000
--- a/openhands-cli/openhands_cli/gui_launcher.py
+++ /dev/null
@@ -1,220 +0,0 @@
-"""GUI launcher for OpenHands CLI."""
-
-import os
-import shutil
-import subprocess
-import sys
-from pathlib import Path
-
-from prompt_toolkit import print_formatted_text
-from prompt_toolkit.formatted_text import HTML
-from openhands_cli.locations import PERSISTENCE_DIR
-
-
-def _format_docker_command_for_logging(cmd: list[str]) -> str:
- """Format a Docker command for logging with grey color.
-
- Args:
- cmd (list[str]): The Docker command as a list of strings
-
- Returns:
- str: The formatted command string in grey HTML color
- """
- cmd_str = ' '.join(cmd)
- return f'Running Docker command: {cmd_str} '
-
-
-def check_docker_requirements() -> bool:
- """Check if Docker is installed and running.
-
- Returns:
- bool: True if Docker is available and running, False otherwise.
- """
- # Check if Docker is installed
- if not shutil.which('docker'):
- print_formatted_text(
- HTML('❌ Docker is not installed or not in PATH. ')
- )
- print_formatted_text(
- HTML(
- 'Please install Docker first: https://docs.docker.com/get-docker/ '
- )
- )
- return False
-
- # Check if Docker daemon is running
- try:
- result = subprocess.run(
- ['docker', 'info'], capture_output=True, text=True, timeout=10
- )
- if result.returncode != 0:
- print_formatted_text(
- HTML('❌ Docker daemon is not running. ')
- )
- print_formatted_text(
- HTML('Please start Docker and try again. ')
- )
- return False
- except (subprocess.TimeoutExpired, subprocess.SubprocessError) as e:
- print_formatted_text(
- HTML('❌ Failed to check Docker status. ')
- )
- print_formatted_text(HTML(f'Error: {e} '))
- return False
-
- return True
-
-
-def ensure_config_dir_exists() -> Path:
- """Ensure the OpenHands configuration directory exists and return its path."""
- path = Path(PERSISTENCE_DIR)
- path.mkdir(exist_ok=True, parents=True)
- return path
-
-
-def get_openhands_version() -> str:
- """Get the OpenHands version for Docker images.
-
- Returns:
- str: The version string to use for Docker images
- """
- # For now, use 'latest' as the default version
- # In the future, this could be read from a version file or environment variable
- return os.environ.get('OPENHANDS_VERSION', 'latest')
-
-
-def launch_gui_server(mount_cwd: bool = False, gpu: bool = False) -> None:
- """Launch the OpenHands GUI server using Docker.
-
- Args:
- mount_cwd: If True, mount the current working directory into the container.
- gpu: If True, enable GPU support by mounting all GPUs into the container via nvidia-docker.
- """
- print_formatted_text(
- HTML('🚀 Launching OpenHands GUI server... ')
- )
- print_formatted_text('')
-
- # Check Docker requirements
- if not check_docker_requirements():
- sys.exit(1)
-
- # Ensure config directory exists
- config_dir = ensure_config_dir_exists()
-
- # Get the current version for the Docker image
- version = get_openhands_version()
- runtime_image = f'docker.openhands.dev/openhands/runtime:{version}-nikolaik'
- app_image = f'docker.openhands.dev/openhands/openhands:{version}'
-
- print_formatted_text(HTML('Pulling required Docker images... '))
-
- # Pull the runtime image first
- pull_cmd = ['docker', 'pull', runtime_image]
- print_formatted_text(HTML(_format_docker_command_for_logging(pull_cmd)))
- try:
- subprocess.run(pull_cmd, check=True)
- except subprocess.CalledProcessError:
- print_formatted_text(
- HTML('❌ Failed to pull runtime image. ')
- )
- sys.exit(1)
-
- print_formatted_text('')
- print_formatted_text(
- HTML('✅ Starting OpenHands GUI server... ')
- )
- print_formatted_text(
- HTML('The server will be available at: http://localhost:3000 ')
- )
- print_formatted_text(HTML('Press Ctrl+C to stop the server. '))
- print_formatted_text('')
-
- # Build the Docker command
- docker_cmd = [
- 'docker',
- 'run',
- '-it',
- '--rm',
- '--pull=always',
- '-e',
- f'SANDBOX_RUNTIME_CONTAINER_IMAGE={runtime_image}',
- '-e',
- 'LOG_ALL_EVENTS=true',
- '-v',
- '/var/run/docker.sock:/var/run/docker.sock',
- '-v',
- f'{config_dir}:/.openhands',
- ]
-
- # Add GPU support if requested
- if gpu:
- print_formatted_text(
- HTML('🖥️ Enabling GPU support via nvidia-docker... ')
- )
- # Add the --gpus all flag to enable all GPUs
- docker_cmd.insert(2, '--gpus')
- docker_cmd.insert(3, 'all')
- # Add environment variable to pass GPU support to sandbox containers
- docker_cmd.extend(
- [
- '-e',
- 'SANDBOX_ENABLE_GPU=true',
- ]
- )
-
- # Add current working directory mount if requested
- if mount_cwd:
- cwd = Path.cwd()
- # Following the documentation at https://docs.all-hands.dev/usage/runtimes/docker#connecting-to-your-filesystem
- docker_cmd.extend(
- [
- '-e',
- f'SANDBOX_VOLUMES={cwd}:/workspace:rw',
- ]
- )
-
- # Set user ID for Unix-like systems only
- if os.name != 'nt': # Not Windows
- try:
- user_id = subprocess.check_output(['id', '-u'], text=True).strip()
- docker_cmd.extend(['-e', f'SANDBOX_USER_ID={user_id}'])
- except (subprocess.CalledProcessError, FileNotFoundError):
- # If 'id' command fails or doesn't exist, skip setting user ID
- pass
- # Print the folder that will be mounted to inform the user
- print_formatted_text(
- HTML(
- f'📂 Mounting current directory: {cwd} to /workspace '
- )
- )
-
- docker_cmd.extend(
- [
- '-p',
- '3000:3000',
- '--add-host',
- 'host.docker.internal:host-gateway',
- '--name',
- 'openhands-app',
- app_image,
- ]
- )
-
- try:
- # Log and run the Docker command
- print_formatted_text(HTML(_format_docker_command_for_logging(docker_cmd)))
- subprocess.run(docker_cmd, check=True)
- except subprocess.CalledProcessError as e:
- print_formatted_text('')
- print_formatted_text(
- HTML('❌ Failed to start OpenHands GUI server. ')
- )
- print_formatted_text(HTML(f'Error: {e} '))
- sys.exit(1)
- except KeyboardInterrupt:
- print_formatted_text('')
- print_formatted_text(
- HTML('✓ OpenHands GUI server stopped successfully. ')
- )
- sys.exit(0)
diff --git a/openhands-cli/openhands_cli/listeners/__init__.py b/openhands-cli/openhands_cli/listeners/__init__.py
deleted file mode 100644
index 76725db747dc..000000000000
--- a/openhands-cli/openhands_cli/listeners/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from openhands_cli.listeners.pause_listener import PauseListener
-
-__all__ = ['PauseListener']
diff --git a/openhands-cli/openhands_cli/listeners/pause_listener.py b/openhands-cli/openhands_cli/listeners/pause_listener.py
deleted file mode 100644
index bb18b9c7aab5..000000000000
--- a/openhands-cli/openhands_cli/listeners/pause_listener.py
+++ /dev/null
@@ -1,83 +0,0 @@
-import threading
-from collections.abc import Callable, Iterator
-from contextlib import contextmanager
-
-from prompt_toolkit import HTML, print_formatted_text
-from prompt_toolkit.input import Input, create_input
-from prompt_toolkit.keys import Keys
-
-from openhands.sdk import BaseConversation
-
-
-class PauseListener(threading.Thread):
- """Background key listener that triggers pause on Ctrl-P.
-
- Starts and stops around agent run() loops to avoid interfering with user prompts.
- """
-
- def __init__(
- self,
- on_pause: Callable,
- input_source: Input | None = None, # used to pipe inputs for unit tests
- ):
- super().__init__(daemon=True)
- self.on_pause = on_pause
- self._stop_event = threading.Event()
- self._pause_event = threading.Event()
- self._input = input_source or create_input()
-
- def _detect_pause_key_presses(self) -> bool:
- pause_detected = False
-
- for key_press in self._input.read_keys():
- pause_detected = pause_detected or key_press.key == Keys.ControlP
- pause_detected = pause_detected or key_press.key == Keys.ControlC
- pause_detected = pause_detected or key_press.key == Keys.ControlD
-
- return pause_detected
-
- def _execute_pause(self) -> None:
- self._pause_event.set() # Mark pause event occurred
- print_formatted_text(HTML(''))
- print_formatted_text(
- HTML('Pausing agent once step is completed... ')
- )
- try:
- self.on_pause()
- except Exception:
- pass
-
- def run(self) -> None:
- try:
- with self._input.raw_mode():
- # User hasn't paused and pause listener hasn't been shut down
- while not (self.is_paused() or self.is_stopped()):
- if self._detect_pause_key_presses():
- self._execute_pause()
- finally:
- try:
- self._input.close()
- except Exception:
- pass
-
- def stop(self) -> None:
- self._stop_event.set()
-
- def is_stopped(self) -> bool:
- return self._stop_event.is_set()
-
- def is_paused(self) -> bool:
- return self._pause_event.is_set()
-
-
-@contextmanager
-def pause_listener(
- conversation: BaseConversation, input_source: Input | None = None
-) -> Iterator[PauseListener]:
- """Ensure PauseListener always starts/stops cleanly."""
- listener = PauseListener(on_pause=conversation.pause, input_source=input_source)
- listener.start()
- try:
- yield listener
- finally:
- listener.stop()
diff --git a/openhands-cli/openhands_cli/locations.py b/openhands-cli/openhands_cli/locations.py
deleted file mode 100644
index fe01a30a2823..000000000000
--- a/openhands-cli/openhands_cli/locations.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import os
-
-# Configuration directory for storing agent settings and CLI configuration
-PERSISTENCE_DIR = os.path.expanduser('~/.openhands')
-CONVERSATIONS_DIR = os.path.join(PERSISTENCE_DIR, 'conversations')
-
-# Working directory for agent operations (current directory where CLI is run)
-WORK_DIR = os.getcwd()
-
-AGENT_SETTINGS_PATH = 'agent_settings.json'
-
-# MCP configuration file (relative to PERSISTENCE_DIR)
-MCP_CONFIG_FILE = 'mcp.json'
diff --git a/openhands-cli/openhands_cli/pt_style.py b/openhands-cli/openhands_cli/pt_style.py
deleted file mode 100644
index 3b4ade6c9ad6..000000000000
--- a/openhands-cli/openhands_cli/pt_style.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from prompt_toolkit.styles import Style, merge_styles
-from prompt_toolkit.styles.base import BaseStyle
-from prompt_toolkit.styles.defaults import default_ui_style
-
-# Centralized helper for CLI styles so we can safely merge our custom colors
-# with prompt_toolkit's default UI style. This preserves completion menu and
-# fuzzy-match visibility across different terminal themes (e.g., Ubuntu).
-
-COLOR_GOLD = '#FFD700'
-COLOR_GREY = '#808080'
-COLOR_AGENT_BLUE = '#4682B4' # Steel blue - readable on light/dark backgrounds
-
-
-def get_cli_style() -> BaseStyle:
- base = default_ui_style()
- custom = Style.from_dict(
- {
- 'gold': COLOR_GOLD,
- 'grey': COLOR_GREY,
- 'prompt': f'{COLOR_GOLD} bold',
- # Ensure good contrast for fuzzy matches on the selected completion row
- # across terminals/themes (e.g., Ubuntu GNOME, Alacritty, Kitty).
- # See https://github.com/OpenHands/OpenHands/issues/10330
- 'completion-menu.completion.current fuzzymatch.outside': 'fg:#ffffff bg:#888888',
- 'selected': COLOR_GOLD,
- 'risk-high': '#FF0000 bold', # Red bold for HIGH risk
- 'placeholder': '#888888 italic',
- }
- )
- return merge_styles([base, custom])
diff --git a/openhands-cli/openhands_cli/runner.py b/openhands-cli/openhands_cli/runner.py
deleted file mode 100644
index 0ef15acdff2f..000000000000
--- a/openhands-cli/openhands_cli/runner.py
+++ /dev/null
@@ -1,188 +0,0 @@
-from prompt_toolkit import HTML, print_formatted_text
-
-from openhands.sdk import BaseConversation, Message
-from openhands.sdk.conversation.state import (
- ConversationExecutionStatus,
- ConversationState,
-)
-from openhands.sdk.security.confirmation_policy import (
- AlwaysConfirm,
- ConfirmationPolicyBase,
- ConfirmRisky,
- NeverConfirm,
-)
-from openhands_cli.listeners.pause_listener import PauseListener, pause_listener
-from openhands_cli.user_actions import ask_user_confirmation
-from openhands_cli.user_actions.types import UserConfirmation
-from openhands_cli.setup import setup_conversation
-
-
-class ConversationRunner:
- """Handles the conversation state machine logic cleanly."""
-
- def __init__(self, conversation: BaseConversation):
- self.conversation = conversation
-
- @property
- def is_confirmation_mode_active(self):
- return self.conversation.is_confirmation_mode_active
-
- def toggle_confirmation_mode(self):
- new_confirmation_mode_state = not self.is_confirmation_mode_active
-
- self.conversation = setup_conversation(
- self.conversation.id,
- include_security_analyzer=new_confirmation_mode_state
- )
-
- if new_confirmation_mode_state:
- # Enable confirmation mode: set AlwaysConfirm policy
- self.set_confirmation_policy(AlwaysConfirm())
- else:
- # Disable confirmation mode: set NeverConfirm policy and remove security analyzer
- self.set_confirmation_policy(NeverConfirm())
-
- def set_confirmation_policy(
- self, confirmation_policy: ConfirmationPolicyBase
- ) -> None:
- self.conversation.set_confirmation_policy(confirmation_policy)
-
-
- def _start_listener(self) -> None:
- self.listener = PauseListener(on_pause=self.conversation.pause)
- self.listener.start()
-
- def _print_run_status(self) -> None:
- print_formatted_text('')
- if (
- self.conversation.state.execution_status
- == ConversationExecutionStatus.PAUSED
- ):
- print_formatted_text(
- HTML(
- 'Resuming paused conversation... (Press Ctrl-P to pause) '
- )
- )
-
- else:
- print_formatted_text(
- HTML(
- 'Agent running... (Press Ctrl-P to pause) '
- )
- )
- print_formatted_text('')
-
- def process_message(self, message: Message | None) -> None:
- """Process a user message through the conversation.
-
- Args:
- message: The user message to process
- """
-
- self._print_run_status()
-
- # Send message to conversation
- if message:
- self.conversation.send_message(message)
-
- if self.is_confirmation_mode_active:
- self._run_with_confirmation()
- else:
- self._run_without_confirmation()
-
- def _run_without_confirmation(self) -> None:
- with pause_listener(self.conversation):
- self.conversation.run()
-
- def _run_with_confirmation(self) -> None:
- # If agent was paused, resume with confirmation request
- if (
- self.conversation.state.execution_status
- == ConversationExecutionStatus.WAITING_FOR_CONFIRMATION
- ):
- user_confirmation = self._handle_confirmation_request()
- if user_confirmation == UserConfirmation.DEFER:
- return
-
- while True:
- with pause_listener(self.conversation) as listener:
- self.conversation.run()
-
- if listener.is_paused():
- break
-
- # In confirmation mode, agent either finishes or waits for user confirmation
- if (
- self.conversation.state.execution_status
- == ConversationExecutionStatus.FINISHED
- ):
- break
-
- elif (
- self.conversation.state.execution_status
- == ConversationExecutionStatus.WAITING_FOR_CONFIRMATION
- ):
- user_confirmation = self._handle_confirmation_request()
- if user_confirmation == UserConfirmation.DEFER:
- return
-
- else:
- raise Exception('Infinite loop')
-
-
- def _handle_confirmation_request(self) -> UserConfirmation:
- """Handle confirmation request from user.
-
- Returns:
- UserConfirmation indicating the user's choice
- """
-
- pending_actions = ConversationState.get_unmatched_actions(
- self.conversation.state.events
- )
- if not pending_actions:
- return UserConfirmation.ACCEPT
-
- result = ask_user_confirmation(
- pending_actions,
- isinstance(self.conversation.state.confirmation_policy, ConfirmRisky),
- )
- decision = result.decision
- policy_change = result.policy_change
-
- if decision == UserConfirmation.REJECT:
- self.conversation.reject_pending_actions(
- result.reason or 'User rejected the actions'
- )
- return decision
-
- if decision == UserConfirmation.DEFER:
- self.conversation.pause()
- return decision
-
- if isinstance(policy_change, NeverConfirm):
- print_formatted_text(
- HTML(
- 'Confirmation mode disabled. Agent will proceed without asking. '
- )
- )
-
- # Remove security analyzer when policy is never confirm
- self.toggle_confirmation_mode()
- return decision
-
- if isinstance(policy_change, ConfirmRisky):
- print_formatted_text(
- HTML(
- 'Security-based confirmation enabled. '
- 'LOW/MEDIUM risk actions will auto-confirm, HIGH risk actions will ask for confirmation. '
- )
- )
-
- # Keep security analyzer, change existing policy
- self.set_confirmation_policy(policy_change)
- return decision
-
- # Accept action without changing existing policies
- assert decision == UserConfirmation.ACCEPT
- return decision
diff --git a/openhands-cli/openhands_cli/setup.py b/openhands-cli/openhands_cli/setup.py
deleted file mode 100644
index 91b7ab946448..000000000000
--- a/openhands-cli/openhands_cli/setup.py
+++ /dev/null
@@ -1,101 +0,0 @@
-import uuid
-
-from openhands.sdk.conversation import visualizer
-from openhands.sdk.security.llm_analyzer import LLMSecurityAnalyzer
-from prompt_toolkit import HTML, print_formatted_text
-
-from openhands.sdk import Agent, BaseConversation, Conversation, Workspace
-from openhands_cli.locations import CONVERSATIONS_DIR, WORK_DIR
-from openhands_cli.tui.settings.store import AgentStore
-from openhands.sdk.security.confirmation_policy import (
- AlwaysConfirm,
-)
-from openhands_cli.tui.settings.settings_screen import SettingsScreen
-from openhands_cli.tui.visualizer import CLIVisualizer
-
-# register tools
-from openhands.tools.terminal import TerminalTool
-from openhands.tools.file_editor import FileEditorTool
-from openhands.tools.task_tracker import TaskTrackerTool
-
-
-class MissingAgentSpec(Exception):
- """Raised when agent specification is not found or invalid."""
-
- pass
-
-
-
-def load_agent_specs(
- conversation_id: str | None = None,
-) -> Agent:
- agent_store = AgentStore()
- agent = agent_store.load(session_id=conversation_id)
- if not agent:
- raise MissingAgentSpec(
- 'Agent specification not found. Please configure your agent settings.'
- )
- return agent
-
-
-def verify_agent_exists_or_setup_agent() -> Agent:
- """Verify agent specs exists by attempting to load it.
-
- """
- settings_screen = SettingsScreen()
- try:
- agent = load_agent_specs()
- return agent
- except MissingAgentSpec:
- # For first-time users, show the full settings flow with choice between basic/advanced
- settings_screen.configure_settings(first_time=True)
-
-
- # Try once again after settings setup attempt
- return load_agent_specs()
-
-
-def setup_conversation(
- conversation_id: uuid,
- include_security_analyzer: bool = True
-) -> BaseConversation:
- """
- Setup the conversation with agent.
-
- Args:
- conversation_id: conversation ID to use. If not provided, a random UUID will be generated.
-
- Raises:
- MissingAgentSpec: If agent specification is not found or invalid.
- """
-
- print_formatted_text(
- HTML(f'Initializing agent... ')
- )
-
- agent = load_agent_specs(str(conversation_id))
-
-
-
- # Create conversation - agent context is now set in AgentStore.load()
- conversation: BaseConversation = Conversation(
- agent=agent,
- workspace=Workspace(working_dir=WORK_DIR),
- # Conversation will add / to this path
- persistence_dir=CONVERSATIONS_DIR,
- conversation_id=conversation_id,
- visualizer=CLIVisualizer
- )
-
- # Security analyzer is set though conversation API now
- if not include_security_analyzer:
- conversation.set_security_analyzer(None)
- else:
- conversation.set_security_analyzer(LLMSecurityAnalyzer())
- conversation.set_confirmation_policy(AlwaysConfirm())
-
- print_formatted_text(
- HTML(f'✓ Agent initialized with model: {agent.llm.model} ')
- )
- return conversation
-
diff --git a/openhands-cli/openhands_cli/simple_main.py b/openhands-cli/openhands_cli/simple_main.py
deleted file mode 100644
index 343d37a4d3c9..000000000000
--- a/openhands-cli/openhands_cli/simple_main.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python3
-"""
-Simple main entry point for OpenHands CLI.
-This is a simplified version that demonstrates the TUI functionality.
-"""
-
-import logging
-import os
-import sys
-import warnings
-
-debug_env = os.getenv('DEBUG', 'false').lower()
-if debug_env != '1' and debug_env != 'true':
- logging.disable(logging.WARNING)
- warnings.filterwarnings('ignore')
-
-from prompt_toolkit import print_formatted_text
-from prompt_toolkit.formatted_text import HTML
-
-from openhands_cli.argparsers.main_parser import create_main_parser
-
-
-def main() -> None:
- """Main entry point for the OpenHands CLI.
-
- Raises:
- ImportError: If agent chat dependencies are missing
- Exception: On other error conditions
- """
- parser = create_main_parser()
- args = parser.parse_args()
-
- try:
- if args.command == 'serve':
- # Import gui_launcher only when needed
- from openhands_cli.gui_launcher import launch_gui_server
-
- launch_gui_server(mount_cwd=args.mount_cwd, gpu=args.gpu)
- else:
- # Default CLI behavior - no subcommand needed
- # Import agent_chat only when needed
- from openhands_cli.agent_chat import run_cli_entry
-
- # Start agent chat
- run_cli_entry(resume_conversation_id=args.resume)
- except KeyboardInterrupt:
- print_formatted_text(HTML('\nGoodbye! 👋 '))
- except EOFError:
- print_formatted_text(HTML('\nGoodbye! 👋 '))
- except Exception as e:
- print_formatted_text(HTML(f'Error: {e} '))
- import traceback
-
- traceback.print_exc()
- raise
-
-
-if __name__ == '__main__':
- main()
diff --git a/openhands-cli/openhands_cli/tui/__init__.py b/openhands-cli/openhands_cli/tui/__init__.py
deleted file mode 100644
index 00205468cb58..000000000000
--- a/openhands-cli/openhands_cli/tui/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from openhands_cli.tui.tui import DEFAULT_STYLE
-
-__all__ = [
- 'DEFAULT_STYLE',
-]
diff --git a/openhands-cli/openhands_cli/tui/settings/mcp_screen.py b/openhands-cli/openhands_cli/tui/settings/mcp_screen.py
deleted file mode 100644
index 8284f353b57d..000000000000
--- a/openhands-cli/openhands_cli/tui/settings/mcp_screen.py
+++ /dev/null
@@ -1,217 +0,0 @@
-import json
-from pathlib import Path
-from typing import Any
-
-from fastmcp.mcp_config import MCPConfig
-from openhands_cli.locations import MCP_CONFIG_FILE, PERSISTENCE_DIR
-from prompt_toolkit import HTML, print_formatted_text
-
-from openhands.sdk import Agent
-
-
-class MCPScreen:
- """
- MCP Screen
-
- 1. Display information about setting up MCP
- 2. See existing servers that are setup
- 3. Debug additional servers passed via mcp.json
- 4. Identify servers waiting to sync on session restart
- """
-
- # ---------- server spec handlers ----------
-
- def _check_server_specs_are_equal(
- self, first_server_spec, second_server_spec
- ) -> bool:
- first_stringified_server_spec = json.dumps(first_server_spec, sort_keys=True)
- second_stringified_server_spec = json.dumps(second_server_spec, sort_keys=True)
- return first_stringified_server_spec == second_stringified_server_spec
-
- def _check_mcp_config_status(self) -> dict:
- """Check the status of the MCP configuration file and return information about it."""
- config_path = Path(PERSISTENCE_DIR) / MCP_CONFIG_FILE
-
- if not config_path.exists():
- return {
- 'exists': False,
- 'valid': False,
- 'servers': {},
- 'message': f'MCP configuration file not found at ~/.openhands/{MCP_CONFIG_FILE}',
- }
-
- try:
- mcp_config = MCPConfig.from_file(config_path)
- servers = mcp_config.to_dict().get('mcpServers', {})
- return {
- 'exists': True,
- 'valid': True,
- 'servers': servers,
- 'message': f'Valid MCP configuration found with {len(servers)} server(s)',
- }
- except Exception as e:
- return {
- 'exists': True,
- 'valid': False,
- 'servers': {},
- 'message': f'Invalid MCP configuration file: {str(e)}',
- }
-
- # ---------- TUI helpers ----------
-
- def _get_mcp_server_diff(
- self,
- current: dict[str, Any],
- incoming: dict[str, Any],
- ) -> None:
- """
- Display a diff-style view:
-
- - Always show the MCP servers the agent is *currently* configured with
- - If there are incoming servers (from ~/.openhands/mcp.json),
- clearly show which ones are NEW (not in current) and which ones are CHANGED
- (same name but different config). Unchanged servers are not repeated.
- """
-
- print_formatted_text(HTML('Current Agent MCP Servers: '))
- if current:
- for name, cfg in current.items():
- self._render_server_summary(name, cfg, indent=2)
- else:
- print_formatted_text(
- HTML(' None configured on the current agent. ')
- )
- print_formatted_text('')
-
- # If no incoming, we're done
- if not incoming:
- print_formatted_text(
- HTML('No incoming servers detected for next restart. ')
- )
- print_formatted_text('')
- return
-
- # Compare names and configs
- current_names = set(current.keys())
- incoming_names = set(incoming.keys())
- new_servers = sorted(incoming_names - current_names)
-
- overriden_servers = []
- for name in sorted(incoming_names & current_names):
- if not self._check_server_specs_are_equal(current[name], incoming[name]):
- overriden_servers.append(name)
-
- # Display incoming section header
- print_formatted_text(
- HTML(
- 'Incoming Servers on Restart (from ~/.openhands/mcp.json): '
- )
- )
-
- if not new_servers and not overriden_servers:
- print_formatted_text(
- HTML(
- ' All configured servers match the current agent configuration. '
- )
- )
- print_formatted_text('')
- return
-
- if new_servers:
- print_formatted_text(HTML(' New servers (will be added): '))
- for name in new_servers:
- self._render_server_summary(name, incoming[name], indent=4)
-
- if overriden_servers:
- print_formatted_text(
- HTML(' Updated servers (configuration will change): ')
- )
- for name in overriden_servers:
- print_formatted_text(HTML(f' • {name} '))
- print_formatted_text(HTML(' Current: '))
- self._render_server_summary(None, current[name], indent=8)
- print_formatted_text(HTML(' Incoming: '))
- self._render_server_summary(None, incoming[name], indent=8)
-
- print_formatted_text('')
-
- def _render_server_summary(
- self, server_name: str | None, server_spec: dict[str, Any], indent: int = 2
- ) -> None:
- pad = ' ' * indent
-
- if server_name:
- print_formatted_text(HTML(f'{pad}• {server_name} '))
-
- if isinstance(server_spec, dict):
- if 'command' in server_spec:
- cmd = server_spec.get('command', '')
- args = server_spec.get('args', [])
- args_str = ' '.join(args) if args else ''
- print_formatted_text(HTML(f'{pad} Type: Command-based '))
- if cmd or args_str:
- print_formatted_text(
- HTML(f'{pad} Command: {cmd} {args_str} ')
- )
- elif 'url' in server_spec:
- url = server_spec.get('url', '')
- auth = server_spec.get('auth', 'none')
- print_formatted_text(HTML(f'{pad} Type: URL-based '))
- if url:
- print_formatted_text(HTML(f'{pad} URL: {url} '))
- print_formatted_text(HTML(f'{pad} Auth: {auth} '))
-
- def _display_information_header(self) -> None:
- print_formatted_text(
- HTML('MCP (Model Context Protocol) Configuration ')
- )
- print_formatted_text('')
- print_formatted_text(HTML('To get started: '))
- print_formatted_text(
- HTML(
- ' 1. Create the configuration file: ~/.openhands/mcp.json '
- )
- )
- print_formatted_text(
- HTML(
- ' 2. Add your MCP server configurations '
- 'https://gofastmcp.com/clients/client#configuration-format '
- )
- )
- print_formatted_text(
- HTML(' 3. Restart your OpenHands session to load the new configuration')
- )
- print_formatted_text('')
-
- # ---------- status + display entrypoint ----------
-
- def display_mcp_info(self, existing_agent: Agent) -> None:
- """Display comprehensive MCP configuration information."""
-
- self._display_information_header()
-
- # Always determine current & incoming first
- status = self._check_mcp_config_status()
- incoming_servers = status.get('servers', {}) if status.get('valid') else {}
- current_servers = existing_agent.mcp_config.get('mcpServers', {})
-
- # Show file status
- if not status['exists']:
- print_formatted_text(
- HTML('Status: Configuration file not found ')
- )
-
- elif not status['valid']:
- print_formatted_text(HTML(f'Status: {status["message"]} '))
- print_formatted_text('')
- print_formatted_text(
- HTML('Please check your configuration file format. ')
- )
- else:
- print_formatted_text(HTML(f'Status: {status["message"]} '))
-
- print_formatted_text('')
-
- # Always show the agent's current servers
- # Then show incoming (deduped and changes highlighted)
- self._get_mcp_server_diff(current_servers, incoming_servers)
diff --git a/openhands-cli/openhands_cli/tui/settings/settings_screen.py b/openhands-cli/openhands_cli/tui/settings/settings_screen.py
deleted file mode 100644
index 0db491fc3e80..000000000000
--- a/openhands-cli/openhands_cli/tui/settings/settings_screen.py
+++ /dev/null
@@ -1,212 +0,0 @@
-import os
-
-from openhands.sdk import LLM, BaseConversation, LLMSummarizingCondenser, LocalFileStore
-from prompt_toolkit import HTML, print_formatted_text
-from prompt_toolkit.shortcuts import print_container
-from prompt_toolkit.widgets import Frame, TextArea
-
-from openhands_cli.utils import get_default_cli_agent, get_llm_metadata
-from openhands_cli.locations import AGENT_SETTINGS_PATH, PERSISTENCE_DIR
-from openhands_cli.pt_style import COLOR_GREY
-from openhands_cli.tui.settings.store import AgentStore
-from openhands_cli.tui.utils import StepCounter
-from openhands_cli.user_actions.settings_action import (
- SettingsType,
- choose_llm_model,
- choose_llm_provider,
- choose_memory_condensation,
- prompt_api_key,
- prompt_base_url,
- prompt_custom_model,
- save_settings_confirmation,
- settings_type_confirmation,
-)
-
-
-class SettingsScreen:
- def __init__(self, conversation: BaseConversation | None = None):
- self.file_store = LocalFileStore(PERSISTENCE_DIR)
- self.agent_store = AgentStore()
- self.conversation = conversation
-
- def display_settings(self) -> None:
- agent_spec = self.agent_store.load()
- if not agent_spec:
- return
-
- llm = agent_spec.llm
- advanced_llm_settings = True if llm.base_url else False
-
- # Prepare labels and values based on settings
- labels_and_values = []
- if not advanced_llm_settings:
- # Attempt to determine provider, fallback if not directly available
- provider = llm.model.split('/')[0] if '/' in llm.model else 'Unknown'
-
- labels_and_values.extend(
- [
- (' LLM Provider', str(provider)),
- (' LLM Model', str(llm.model)),
- ]
- )
- else:
- labels_and_values.extend(
- [
- (' Custom Model', llm.model),
- (' Base URL', llm.base_url),
- ]
- )
- labels_and_values.extend(
- [
- (' API Key', '********' if llm.api_key else 'Not Set'),
- ]
- )
-
- if self.conversation:
- labels_and_values.extend([
- (
- ' Confirmation Mode',
- 'Enabled'
- if self.conversation.is_confirmation_mode_active
- else 'Disabled',
- )
- ])
-
- labels_and_values.extend([
- (
- ' Memory Condensation',
- 'Enabled' if agent_spec.condenser else 'Disabled',
- ),
- (
- ' Configuration File',
- os.path.join(PERSISTENCE_DIR, AGENT_SETTINGS_PATH),
- ),
- ]
- )
-
- # Calculate max widths for alignment
- # Ensure values are strings for len() calculation
- str_labels_and_values = [
- (label, str(value)) for label, value in labels_and_values
- ]
- max_label_width = (
- max(len(label) for label, _ in str_labels_and_values)
- if str_labels_and_values
- else 0
- )
-
- # Construct the summary text with aligned columns
- settings_lines = [
- f'{label + ":":<{max_label_width + 1}} {value:<}' # Changed value alignment to left (<)
- for label, value in str_labels_and_values
- ]
- settings_text = '\n'.join(settings_lines)
-
- container = Frame(
- TextArea(
- text=settings_text,
- read_only=True,
- style=COLOR_GREY,
- wrap_lines=True,
- ),
- title='Settings',
- style=f'fg:{COLOR_GREY}',
- )
-
- print_container(container)
-
- self.configure_settings()
-
- def configure_settings(self, first_time=False):
- try:
- settings_type = settings_type_confirmation(first_time=first_time)
- except KeyboardInterrupt:
- return
-
- if settings_type == SettingsType.BASIC:
- self.handle_basic_settings()
- elif settings_type == SettingsType.ADVANCED:
- self.handle_advanced_settings()
-
- def handle_basic_settings(self):
- step_counter = StepCounter(3)
- try:
- provider = choose_llm_provider(step_counter, escapable=True)
- llm_model = choose_llm_model(step_counter, provider, escapable=True)
- api_key = prompt_api_key(
- step_counter,
- provider,
- self.conversation.state.agent.llm.api_key
- if self.conversation
- else None,
- escapable=True,
- )
- save_settings_confirmation()
- except KeyboardInterrupt:
- print_formatted_text(HTML('\nCancelled settings change. '))
- return
-
- # Store the collected settings for persistence
- self._save_llm_settings(f'{provider}/{llm_model}', api_key)
-
- def handle_advanced_settings(self, escapable=True):
- """Handle advanced settings configuration with clean step-by-step flow."""
- step_counter = StepCounter(4)
- try:
- custom_model = prompt_custom_model(step_counter)
- base_url = prompt_base_url(step_counter)
- api_key = prompt_api_key(
- step_counter,
- custom_model.split('/')[0] if len(custom_model.split('/')) > 1 else '',
- self.conversation.state.agent.llm.api_key if self.conversation else None,
- escapable=escapable,
- )
- memory_condensation = choose_memory_condensation(step_counter)
-
- # Confirm save
- save_settings_confirmation()
- except KeyboardInterrupt:
- print_formatted_text(HTML('\nCancelled settings change. '))
- return
-
- # Store the collected settings for persistence
- self._save_advanced_settings(
- custom_model, base_url, api_key, memory_condensation
- )
-
- def _save_llm_settings(self, model, api_key, base_url: str | None = None) -> None:
- llm = LLM(
- model=model,
- api_key=api_key,
- base_url=base_url,
- usage_id='agent',
- litellm_extra_body={"metadata": get_llm_metadata(model_name=model, llm_type='agent')},
- )
-
- agent = self.agent_store.load()
- if not agent:
- agent = get_default_cli_agent(llm=llm)
-
- # Must update all LLMs
- agent = agent.model_copy(update={'llm': llm})
- condenser = LLMSummarizingCondenser(
- llm=llm.model_copy(
- update={"usage_id": "condenser"}
- )
- )
- agent = agent.model_copy(update={'condenser': condenser})
- self.agent_store.save(agent)
-
- def _save_advanced_settings(
- self, custom_model: str, base_url: str, api_key: str, memory_condensation: bool
- ):
- self._save_llm_settings(custom_model, api_key, base_url=base_url)
-
- agent_spec = self.agent_store.load()
- if not agent_spec:
- return
-
- if not memory_condensation:
- agent_spec.model_copy(update={'condenser': None})
-
- self.agent_store.save(agent_spec)
diff --git a/openhands-cli/openhands_cli/tui/settings/store.py b/openhands-cli/openhands_cli/tui/settings/store.py
deleted file mode 100644
index 018a7484e02e..000000000000
--- a/openhands-cli/openhands_cli/tui/settings/store.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# openhands_cli/settings/store.py
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Any
-
-from fastmcp.mcp_config import MCPConfig
-from openhands_cli.locations import (
- AGENT_SETTINGS_PATH,
- MCP_CONFIG_FILE,
- PERSISTENCE_DIR,
- WORK_DIR,
-)
-from openhands_cli.utils import get_llm_metadata
-from prompt_toolkit import HTML, print_formatted_text
-
-from openhands.sdk import Agent, AgentContext, LocalFileStore
-from openhands.sdk.context.condenser import LLMSummarizingCondenser
-from openhands.tools.preset.default import get_default_tools
-
-
-class AgentStore:
- """Single source of truth for persisting/retrieving AgentSpec."""
-
- def __init__(self) -> None:
- self.file_store = LocalFileStore(root=PERSISTENCE_DIR)
-
- def load_mcp_configuration(self) -> dict[str, Any]:
- try:
- mcp_config_path = Path(self.file_store.root) / MCP_CONFIG_FILE
- mcp_config = MCPConfig.from_file(mcp_config_path)
- return mcp_config.to_dict()['mcpServers']
- except Exception:
- return {}
-
- def load(self, session_id: str | None = None) -> Agent | None:
- try:
- str_spec = self.file_store.read(AGENT_SETTINGS_PATH)
- agent = Agent.model_validate_json(str_spec)
-
-
- # Temporary to remove security analyzer from agent specs
- # Security analyzer is set via conversation API now
- # Doing this so that deprecation warning is thrown only the first time running CLI
- if agent.security_analyzer:
- agent = agent.model_copy(
- update={"security_analyzer": None}
- )
- self.save(agent)
-
- # Update tools with most recent working directory
- updated_tools = get_default_tools(enable_browser=False)
-
- agent_context = AgentContext(
- system_message_suffix=f'You current working directory is: {WORK_DIR}',
- )
-
- mcp_config: dict = self.load_mcp_configuration()
-
- # Update LLM metadata with current information
- agent_llm_metadata = get_llm_metadata(
- model_name=agent.llm.model, llm_type='agent', session_id=session_id
- )
- updated_llm = agent.llm.model_copy(update={'litellm_extra_body': {'metadata': agent_llm_metadata}})
-
- condenser_updates = {}
- if agent.condenser and isinstance(agent.condenser, LLMSummarizingCondenser):
- condenser_updates['llm'] = agent.condenser.llm.model_copy(
- update={
- 'litellm_extra_body': {
- 'metadata': get_llm_metadata(
- model_name=agent.condenser.llm.model,
- llm_type='condenser',
- session_id=session_id,
- )
- }
- }
- )
-
- # Update tools and context
- agent = agent.model_copy(
- update={
- 'llm': updated_llm,
- 'tools': updated_tools,
- 'mcp_config': {'mcpServers': mcp_config} if mcp_config else {},
- 'agent_context': agent_context,
- 'condenser': agent.condenser.model_copy(update=condenser_updates)
- if agent.condenser
- else None,
- }
- )
-
- return agent
- except FileNotFoundError:
- return None
- except Exception:
- print_formatted_text(
- HTML('\nAgent configuration file is corrupted! ')
- )
- return None
-
- def save(self, agent: Agent) -> None:
- serialized_spec = agent.model_dump_json(context={'expose_secrets': True})
- self.file_store.write(AGENT_SETTINGS_PATH, serialized_spec)
diff --git a/openhands-cli/openhands_cli/tui/status.py b/openhands-cli/openhands_cli/tui/status.py
deleted file mode 100644
index 91d0ef0142be..000000000000
--- a/openhands-cli/openhands_cli/tui/status.py
+++ /dev/null
@@ -1,109 +0,0 @@
-"""Status display components for OpenHands CLI TUI."""
-
-from datetime import datetime
-
-from openhands.sdk import BaseConversation
-from prompt_toolkit import print_formatted_text
-from prompt_toolkit.formatted_text import HTML
-from prompt_toolkit.shortcuts import print_container
-from prompt_toolkit.widgets import Frame, TextArea
-
-
-def display_status(
- conversation: BaseConversation,
- session_start_time: datetime,
-) -> None:
- """Display detailed conversation status including metrics and uptime.
-
- Args:
- conversation: The conversation to display status for
- session_start_time: The session start time for uptime calculation
- """
- # Get conversation stats
- stats = conversation.conversation_stats.get_combined_metrics()
-
- # Calculate uptime from session start time
- now = datetime.now()
- diff = now - session_start_time
-
- # Format as hours, minutes, seconds
- total_seconds = int(diff.total_seconds())
- hours = total_seconds // 3600
- minutes = (total_seconds % 3600) // 60
- seconds = total_seconds % 60
- uptime_str = f"{hours}h {minutes}m {seconds}s"
-
- # Display conversation ID and uptime
- print_formatted_text(HTML(f'Conversation ID: {conversation.id} '))
- print_formatted_text(HTML(f'Uptime: {uptime_str} '))
- print_formatted_text('')
-
- # Calculate token metrics
- token_usage = stats.accumulated_token_usage
- total_input_tokens = token_usage.prompt_tokens if token_usage else 0
- total_output_tokens = token_usage.completion_tokens if token_usage else 0
- cache_hits = token_usage.cache_read_tokens if token_usage else 0
- cache_writes = token_usage.cache_write_tokens if token_usage else 0
- total_tokens = total_input_tokens + total_output_tokens
- total_cost = stats.accumulated_cost
-
- # Use prompt_toolkit containers for formatted display
- _display_usage_metrics_container(
- total_cost,
- total_input_tokens,
- total_output_tokens,
- cache_hits,
- cache_writes,
- total_tokens
- )
-
-
-def _display_usage_metrics_container(
- total_cost: float,
- total_input_tokens: int,
- total_output_tokens: int,
- cache_hits: int,
- cache_writes: int,
- total_tokens: int
-) -> None:
- """Display usage metrics using prompt_toolkit containers."""
- # Format values with proper formatting
- cost_str = f'${total_cost:.6f}'
- input_tokens_str = f'{total_input_tokens:,}'
- cache_read_str = f'{cache_hits:,}'
- cache_write_str = f'{cache_writes:,}'
- output_tokens_str = f'{total_output_tokens:,}'
- total_tokens_str = f'{total_tokens:,}'
-
- labels_and_values = [
- (' Total Cost (USD):', cost_str),
- ('', ''),
- (' Total Input Tokens:', input_tokens_str),
- (' Cache Hits:', cache_read_str),
- (' Cache Writes:', cache_write_str),
- (' Total Output Tokens:', output_tokens_str),
- ('', ''),
- (' Total Tokens:', total_tokens_str),
- ]
-
- # Calculate max widths for alignment
- max_label_width = max(len(label) for label, _ in labels_and_values)
- max_value_width = max(len(value) for _, value in labels_and_values)
-
- # Construct the summary text with aligned columns
- summary_lines = [
- f'{label:<{max_label_width}} {value:<{max_value_width}}'
- for label, value in labels_and_values
- ]
- summary_text = '\n'.join(summary_lines)
-
- container = Frame(
- TextArea(
- text=summary_text,
- read_only=True,
- wrap_lines=True,
- ),
- title='Usage Metrics',
- )
-
- print_container(container)
diff --git a/openhands-cli/openhands_cli/tui/tui.py b/openhands-cli/openhands_cli/tui/tui.py
deleted file mode 100644
index b966d877dbfc..000000000000
--- a/openhands-cli/openhands_cli/tui/tui.py
+++ /dev/null
@@ -1,100 +0,0 @@
-from collections.abc import Generator
-from uuid import UUID
-
-from prompt_toolkit import print_formatted_text
-from prompt_toolkit.completion import CompleteEvent, Completer, Completion
-from prompt_toolkit.document import Document
-from prompt_toolkit.formatted_text import HTML
-from prompt_toolkit.shortcuts import clear
-
-from openhands_cli import __version__
-from openhands_cli.pt_style import get_cli_style
-
-DEFAULT_STYLE = get_cli_style()
-
-# Available commands with descriptions
-COMMANDS = {
- '/exit': 'Exit the application',
- '/help': 'Display available commands',
- '/clear': 'Clear the screen',
- '/new': 'Start a fresh conversation',
- '/status': 'Display conversation details',
- '/confirm': 'Toggle confirmation mode on/off',
- '/resume': 'Resume a paused conversation',
- '/settings': 'Display and modify current settings',
- '/mcp': 'View MCP (Model Context Protocol) server configuration',
-}
-
-
-class CommandCompleter(Completer):
- """Custom completer for commands with interactive dropdown."""
-
- def get_completions(
- self, document: Document, complete_event: CompleteEvent
- ) -> Generator[Completion, None, None]:
- text = document.text_before_cursor.lstrip()
- if text.startswith('/'):
- for command, description in COMMANDS.items():
- if command.startswith(text):
- yield Completion(
- command,
- start_position=-len(text),
- display_meta=description,
- style='bg:ansidarkgray fg:gold',
- )
-
-
-def display_banner(conversation_id: str, resume: bool = False) -> None:
- print_formatted_text(
- HTML(r"""
- ___ _ _ _
- / _ \ _ __ ___ _ __ | | | | __ _ _ __ __| |___
- | | | | '_ \ / _ \ '_ \| |_| |/ _` | '_ \ / _` / __|
- | |_| | |_) | __/ | | | _ | (_| | | | | (_| \__ \
- \___ /| .__/ \___|_| |_|_| |_|\__,_|_| |_|\__,_|___/
- |_|
- """),
- style=DEFAULT_STYLE,
- )
-
- print_formatted_text('')
- if not resume:
- print_formatted_text(
- HTML(f'Initialized conversation {conversation_id} ')
- )
- else:
- print_formatted_text(
- HTML(f'Resumed conversation {conversation_id} ')
- )
- print_formatted_text('')
-
-
-def display_help() -> None:
- """Display help information about available commands."""
- print_formatted_text('')
- print_formatted_text(HTML('🤖 OpenHands CLI Help '))
- print_formatted_text(HTML('Available commands: '))
- print_formatted_text('')
-
- for command, description in COMMANDS.items():
- print_formatted_text(HTML(f' {command} - {description}'))
-
- print_formatted_text('')
- print_formatted_text(HTML('Tips: '))
- print_formatted_text(' • Type / and press Tab to see command suggestions')
- print_formatted_text(' • Use arrow keys to navigate through suggestions')
- print_formatted_text(' • Press Enter to select a command')
- print_formatted_text('')
-
-
-def display_welcome(conversation_id: UUID, resume: bool = False) -> None:
- """Display welcome message."""
- clear()
- display_banner(str(conversation_id), resume)
- print_formatted_text(HTML("Let's start building! "))
- print_formatted_text(
- HTML(
- 'What do you want to build? Type /help for help '
- )
- )
- print()
diff --git a/openhands-cli/openhands_cli/tui/utils.py b/openhands-cli/openhands_cli/tui/utils.py
deleted file mode 100644
index fbf620022326..000000000000
--- a/openhands-cli/openhands_cli/tui/utils.py
+++ /dev/null
@@ -1,14 +0,0 @@
-class StepCounter:
- """Automatically manages step numbering for settings flows."""
-
- def __init__(self, total_steps: int):
- self.current_step = 0
- self.total_steps = total_steps
-
- def next_step(self, prompt: str) -> str:
- """Get the next step prompt with automatic numbering."""
- self.current_step += 1
- return f'(Step {self.current_step}/{self.total_steps}) {prompt}'
-
- def existing_step(self, prompt: str) -> str:
- return f'(Step {self.current_step}/{self.total_steps}) {prompt}'
diff --git a/openhands-cli/openhands_cli/tui/visualizer.py b/openhands-cli/openhands_cli/tui/visualizer.py
deleted file mode 100644
index efcdb338bd85..000000000000
--- a/openhands-cli/openhands_cli/tui/visualizer.py
+++ /dev/null
@@ -1,312 +0,0 @@
-import re
-
-from rich.console import Console
-from rich.panel import Panel
-from rich.text import Text
-
-from openhands.sdk.conversation.visualizer.base import (
- ConversationVisualizerBase,
-)
-from openhands.sdk.event import (
- ActionEvent,
- AgentErrorEvent,
- MessageEvent,
- ObservationEvent,
- PauseEvent,
- SystemPromptEvent,
- UserRejectObservation,
-)
-from openhands.sdk.event.base import Event
-from openhands.sdk.event.condenser import Condensation
-
-
-# These are external inputs
-_OBSERVATION_COLOR = "yellow"
-_MESSAGE_USER_COLOR = "gold3"
-_PAUSE_COLOR = "bright_yellow"
-# These are internal system stuff
-_SYSTEM_COLOR = "magenta"
-_THOUGHT_COLOR = "bright_black"
-_ERROR_COLOR = "red"
-# These are agent actions
-_ACTION_COLOR = "blue"
-_MESSAGE_ASSISTANT_COLOR = _ACTION_COLOR
-
-DEFAULT_HIGHLIGHT_REGEX = {
- r"^Reasoning:": f"bold {_THOUGHT_COLOR}",
- r"^Thought:": f"bold {_THOUGHT_COLOR}",
- r"^Action:": f"bold {_ACTION_COLOR}",
- r"^Arguments:": f"bold {_ACTION_COLOR}",
- r"^Tool:": f"bold {_OBSERVATION_COLOR}",
- r"^Result:": f"bold {_OBSERVATION_COLOR}",
- r"^Rejection Reason:": f"bold {_ERROR_COLOR}",
- # Markdown-style
- r"\*\*(.*?)\*\*": "bold",
- r"\*(.*?)\*": "italic",
-}
-
-_PANEL_PADDING = (1, 1)
-
-
-class CLIVisualizer(ConversationVisualizerBase):
- """Handles visualization of conversation events with Rich formatting.
-
- Provides Rich-formatted output with panels and complete content display.
- """
-
- _console: Console
- _skip_user_messages: bool
- _highlight_patterns: dict[str, str]
-
- def __init__(
- self,
- name: str | None = None,
- highlight_regex: dict[str, str] | None = DEFAULT_HIGHLIGHT_REGEX,
- skip_user_messages: bool = False,
- ):
- """Initialize the visualizer.
-
- Args:
- name: Optional name to prefix in panel titles to identify
- which agent/conversation is speaking.
- highlight_regex: Dictionary mapping regex patterns to Rich color styles
- for highlighting keywords in the visualizer.
- For example: {"Reasoning:": "bold blue",
- "Thought:": "bold green"}
- skip_user_messages: If True, skip displaying user messages. Useful for
- scenarios where user input is not relevant to show.
- """
- super().__init__(
- name=name,
- )
- self._console = Console()
- self._skip_user_messages = skip_user_messages
- self._highlight_patterns = highlight_regex or {}
-
- def on_event(self, event: Event) -> None:
- """Main event handler that displays events with Rich formatting."""
- panel = self._create_event_panel(event)
- if panel:
- self._console.print(panel)
- self._console.print() # Add spacing between events
-
- def _apply_highlighting(self, text: Text) -> Text:
- """Apply regex-based highlighting to text content.
-
- Args:
- text: The Rich Text object to highlight
-
- Returns:
- A new Text object with highlighting applied
- """
- if not self._highlight_patterns:
- return text
-
- # Create a copy to avoid modifying the original
- highlighted = text.copy()
-
- # Apply each pattern using Rich's built-in highlight_regex method
- for pattern, style in self._highlight_patterns.items():
- pattern_compiled = re.compile(pattern, re.MULTILINE)
- highlighted.highlight_regex(pattern_compiled, style)
-
- return highlighted
-
- def _create_event_panel(self, event: Event) -> Panel | None:
- """Create a Rich Panel for the event with appropriate styling."""
- # Use the event's visualize property for content
- content = event.visualize
-
- if not content.plain.strip():
- return None
-
- # Apply highlighting if configured
- if self._highlight_patterns:
- content = self._apply_highlighting(content)
-
- # Don't emit system prompt in CLI
- if isinstance(event, SystemPromptEvent):
- title = f"[bold {_SYSTEM_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"System Prompt[/bold {_SYSTEM_COLOR}]"
- return None
- elif isinstance(event, ActionEvent):
- # Check if action is None (non-executable)
- title = f"[bold {_ACTION_COLOR}]"
- if self._name:
- title += f"{self._name} "
- if event.action is None:
- title += f"Agent Action (Not Executed)[/bold {_ACTION_COLOR}]"
- else:
- title += f"Agent Action[/bold {_ACTION_COLOR}]"
- return Panel(
- content,
- title=title,
- subtitle=self._format_metrics_subtitle(),
- border_style=_ACTION_COLOR,
- padding=_PANEL_PADDING,
- expand=True,
- )
- elif isinstance(event, ObservationEvent):
- title = f"[bold {_OBSERVATION_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"Observation[/bold {_OBSERVATION_COLOR}]"
- return Panel(
- content,
- title=title,
- border_style=_OBSERVATION_COLOR,
- padding=_PANEL_PADDING,
- expand=True,
- )
- elif isinstance(event, UserRejectObservation):
- title = f"[bold {_ERROR_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"User Rejected Action[/bold {_ERROR_COLOR}]"
- return Panel(
- content,
- title=title,
- border_style=_ERROR_COLOR,
- padding=_PANEL_PADDING,
- expand=True,
- )
- elif isinstance(event, MessageEvent):
- if (
- self._skip_user_messages
- and event.llm_message
- and event.llm_message.role == "user"
- ):
- return
- assert event.llm_message is not None
- # Role-based styling
- role_colors = {
- "user": _MESSAGE_USER_COLOR,
- "assistant": _MESSAGE_ASSISTANT_COLOR,
- }
- role_color = role_colors.get(event.llm_message.role, "white")
-
- # "User Message To [Name] Agent" for user
- # "Message from [Name] Agent" for agent
- agent_name = f"{self._name} " if self._name else ""
-
- if event.llm_message.role == "user":
- title_text = (
- f"[bold {role_color}]User Message to "
- f"{agent_name}Agent[/bold {role_color}]"
- )
- else:
- title_text = (
- f"[bold {role_color}]Message from "
- f"{agent_name}Agent[/bold {role_color}]"
- )
- return Panel(
- content,
- title=title_text,
- subtitle=self._format_metrics_subtitle(),
- border_style=role_color,
- padding=_PANEL_PADDING,
- expand=True,
- )
- elif isinstance(event, AgentErrorEvent):
- title = f"[bold {_ERROR_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"Agent Error[/bold {_ERROR_COLOR}]"
- return Panel(
- content,
- title=title,
- subtitle=self._format_metrics_subtitle(),
- border_style=_ERROR_COLOR,
- padding=_PANEL_PADDING,
- expand=True,
- )
- elif isinstance(event, PauseEvent):
- title = f"[bold {_PAUSE_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"User Paused[/bold {_PAUSE_COLOR}]"
- return Panel(
- content,
- title=title,
- border_style=_PAUSE_COLOR,
- padding=_PANEL_PADDING,
- expand=True,
- )
- elif isinstance(event, Condensation):
- title = f"[bold {_SYSTEM_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"Condensation[/bold {_SYSTEM_COLOR}]"
- return Panel(
- content,
- title=title,
- subtitle=self._format_metrics_subtitle(),
- border_style=_SYSTEM_COLOR,
- expand=True,
- )
- else:
- # Fallback panel for unknown event types
- title = f"[bold {_ERROR_COLOR}]"
- if self._name:
- title += f"{self._name} "
- title += f"UNKNOWN Event: {event.__class__.__name__}[/bold {_ERROR_COLOR}]"
- return Panel(
- content,
- title=title,
- subtitle=f"({event.source})",
- border_style=_ERROR_COLOR,
- padding=_PANEL_PADDING,
- expand=True,
- )
-
- def _format_metrics_subtitle(self) -> str | None:
- """Format LLM metrics as a visually appealing subtitle string with icons,
- colors, and k/m abbreviations using conversation stats."""
- stats = self.conversation_stats
- if not stats:
- return None
-
- combined_metrics = stats.get_combined_metrics()
- if not combined_metrics or not combined_metrics.accumulated_token_usage:
- return None
-
- usage = combined_metrics.accumulated_token_usage
- cost = combined_metrics.accumulated_cost or 0.0
-
- # helper: 1234 -> "1.2K", 1200000 -> "1.2M"
- def abbr(n: int | float) -> str:
- n = int(n or 0)
- if n >= 1_000_000_000:
- val, suffix = n / 1_000_000_000, "B"
- elif n >= 1_000_000:
- val, suffix = n / 1_000_000, "M"
- elif n >= 1_000:
- val, suffix = n / 1_000, "K"
- else:
- return str(n)
- return f"{val:.2f}".rstrip("0").rstrip(".") + suffix
-
- input_tokens = abbr(usage.prompt_tokens or 0)
- output_tokens = abbr(usage.completion_tokens or 0)
-
- # Cache hit rate (prompt + cache)
- prompt = usage.prompt_tokens or 0
- cache_read = usage.cache_read_tokens or 0
- cache_rate = f"{(cache_read / prompt * 100):.2f}%" if prompt > 0 else "N/A"
- reasoning_tokens = usage.reasoning_tokens or 0
-
- # Cost
- cost_str = f"{cost:.4f}" if cost > 0 else "0.00"
-
- # Build with fixed color scheme
- parts: list[str] = []
- parts.append(f"[cyan]↑ input {input_tokens}[/cyan]")
- parts.append(f"[magenta]cache hit {cache_rate}[/magenta]")
- if reasoning_tokens > 0:
- parts.append(f"[yellow] reasoning {abbr(reasoning_tokens)}[/yellow]")
- parts.append(f"[blue]↓ output {output_tokens}[/blue]")
- parts.append(f"[green]$ {cost_str}[/green]")
-
- return "Tokens: " + " • ".join(parts)
diff --git a/openhands-cli/openhands_cli/user_actions/__init__.py b/openhands-cli/openhands_cli/user_actions/__init__.py
deleted file mode 100644
index 9bfa461c6a8e..000000000000
--- a/openhands-cli/openhands_cli/user_actions/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-from openhands_cli.user_actions.agent_action import ask_user_confirmation
-from openhands_cli.user_actions.exit_session import (
- exit_session_confirmation,
-)
-from openhands_cli.user_actions.settings_action import (
- choose_llm_provider,
- settings_type_confirmation,
-)
-from openhands_cli.user_actions.types import UserConfirmation
-
-__all__ = [
- 'ask_user_confirmation',
- 'exit_session_confirmation',
- 'UserConfirmation',
- 'settings_type_confirmation',
- 'choose_llm_provider',
-]
diff --git a/openhands-cli/openhands_cli/user_actions/agent_action.py b/openhands-cli/openhands_cli/user_actions/agent_action.py
deleted file mode 100644
index 630cae7f522b..000000000000
--- a/openhands-cli/openhands_cli/user_actions/agent_action.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import html
-from prompt_toolkit import HTML, print_formatted_text
-
-from openhands.sdk.security.confirmation_policy import (
- ConfirmRisky,
- NeverConfirm,
- SecurityRisk,
-)
-from openhands_cli.user_actions.types import ConfirmationResult, UserConfirmation
-from openhands_cli.user_actions.utils import cli_confirm, cli_text_input
-
-
-def ask_user_confirmation(
- pending_actions: list, using_risk_based_policy: bool = False
-) -> ConfirmationResult:
- """Ask user to confirm pending actions.
-
- Args:
- pending_actions: List of pending actions from the agent
-
- Returns:
- ConfirmationResult with decision, optional policy_change, and reason
- """
-
- if not pending_actions:
- return ConfirmationResult(decision=UserConfirmation.ACCEPT)
-
- print_formatted_text(
- HTML(
- f'🔍 Agent created {len(pending_actions)} action(s) and is waiting for confirmation: '
- )
- )
-
- for i, action in enumerate(pending_actions, 1):
- tool_name = getattr(action, 'tool_name', '[unknown tool]')
- action_content = (
- str(getattr(action, 'action', ''))[:100].replace('\n', ' ')
- or '[unknown action]'
- )
- print_formatted_text(
- HTML(f' {i}. {tool_name}: {html.escape(action_content)}... ')
- )
-
- question = 'Choose an option:'
- options = [
- 'Yes, proceed',
- 'Reject',
- "Always proceed (don't ask again)",
- ]
-
- if not using_risk_based_policy:
- options.append('Auto-confirm LOW/MEDIUM risk, ask for HIGH risk')
-
- try:
- index = cli_confirm(question, options, escapable=True)
- except (EOFError, KeyboardInterrupt):
- print_formatted_text(HTML('\nNo input received; pausing agent. '))
- return ConfirmationResult(decision=UserConfirmation.DEFER)
-
- if index == 0:
- return ConfirmationResult(decision=UserConfirmation.ACCEPT)
- elif index == 1:
- # Handle "Reject" option with optional reason
- try:
- reason = cli_text_input('Reason (and let OpenHands know why): ').strip()
- except (EOFError, KeyboardInterrupt):
- return ConfirmationResult(decision=UserConfirmation.DEFER)
-
- return ConfirmationResult(decision=UserConfirmation.REJECT, reason=reason)
- elif index == 2:
- return ConfirmationResult(
- decision=UserConfirmation.ACCEPT, policy_change=NeverConfirm()
- )
- elif index == 3:
- return ConfirmationResult(
- decision=UserConfirmation.ACCEPT,
- policy_change=ConfirmRisky(threshold=SecurityRisk.HIGH),
- )
-
- return ConfirmationResult(decision=UserConfirmation.REJECT)
diff --git a/openhands-cli/openhands_cli/user_actions/exit_session.py b/openhands-cli/openhands_cli/user_actions/exit_session.py
deleted file mode 100644
index c624d5209b45..000000000000
--- a/openhands-cli/openhands_cli/user_actions/exit_session.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from openhands_cli.user_actions.types import UserConfirmation
-from openhands_cli.user_actions.utils import cli_confirm
-
-
-def exit_session_confirmation() -> UserConfirmation:
- """
- Ask user to confirm exiting session.
- """
-
- question = 'Terminate session?'
- options = ['Yes, proceed', 'No, dismiss']
- index = cli_confirm(question, options) # Blocking UI, not escapable
-
- options_mapping = {
- 0: UserConfirmation.ACCEPT, # User accepts termination session
- 1: UserConfirmation.REJECT, # User does not terminate session
- }
- return options_mapping.get(index, UserConfirmation.REJECT)
diff --git a/openhands-cli/openhands_cli/user_actions/settings_action.py b/openhands-cli/openhands_cli/user_actions/settings_action.py
deleted file mode 100644
index e41e08bdb079..000000000000
--- a/openhands-cli/openhands_cli/user_actions/settings_action.py
+++ /dev/null
@@ -1,171 +0,0 @@
-from enum import Enum
-
-from openhands.sdk.llm import UNVERIFIED_MODELS_EXCLUDING_BEDROCK, VERIFIED_MODELS
-from prompt_toolkit.completion import FuzzyWordCompleter
-from pydantic import SecretStr
-
-from openhands_cli.tui.utils import StepCounter
-from openhands_cli.user_actions.utils import (
- NonEmptyValueValidator,
- cli_confirm,
- cli_text_input,
-)
-
-
-class SettingsType(Enum):
- BASIC = 'basic'
- ADVANCED = 'advanced'
-
-
-def settings_type_confirmation(first_time: bool = False) -> SettingsType:
- question = (
- '\nWelcome to OpenHands! Let\'s configure your LLM settings.\n'
- 'Choose your preferred setup method:'
- )
- choices = [
- 'LLM (Basic)',
- 'LLM (Advanced)'
- ]
- if not first_time:
- question = 'Which settings would you like to modify?'
- choices.append('Go back')
-
-
- index = cli_confirm(question, choices, escapable=True)
-
- if choices[index] == 'Go back':
- raise KeyboardInterrupt
-
- options_map = {0: SettingsType.BASIC, 1: SettingsType.ADVANCED}
-
- return options_map.get(index)
-
-
-def choose_llm_provider(step_counter: StepCounter, escapable=True) -> str:
- question = step_counter.next_step(
- 'Select LLM Provider (TAB for options, CTRL-c to cancel): '
- )
- options = (
- list(VERIFIED_MODELS.keys()).copy()
- + list(UNVERIFIED_MODELS_EXCLUDING_BEDROCK.keys()).copy()
- )
- alternate_option = 'Select another provider'
-
- display_options = options[:4] + [alternate_option]
-
- index = cli_confirm(question, display_options, escapable=escapable)
- chosen_option = display_options[index]
- if display_options[index] != alternate_option:
- return chosen_option
-
- question = step_counter.existing_step(
- 'Type LLM Provider (TAB to complete, CTRL-c to cancel): '
- )
- return cli_text_input(
- question, escapable=True, completer=FuzzyWordCompleter(options, WORD=True)
- )
-
-
-def choose_llm_model(step_counter: StepCounter, provider: str, escapable=True) -> str:
- """Choose LLM model using spec-driven approach. Return (model, deferred)."""
-
- models = VERIFIED_MODELS.get(
- provider, []
- ) + UNVERIFIED_MODELS_EXCLUDING_BEDROCK.get(provider, [])
-
- if provider == 'openhands':
- question = (
- step_counter.next_step('Select Available OpenHands Model:\n')
- + 'LLM usage is billed at the providers’ rates with no markup. Details: https://docs.all-hands.dev/usage/llms/openhands-llms'
- )
- else:
- question = step_counter.next_step(
- 'Select LLM Model (TAB for options, CTRL-c to cancel): '
- )
- alternate_option = 'Select another model'
- display_options = models[:4] + [alternate_option]
- index = cli_confirm(question, display_options, escapable=escapable)
- chosen_option = display_options[index]
-
- if chosen_option != alternate_option:
- return chosen_option
-
- question = step_counter.existing_step(
- 'Type model id (TAB to complete, CTRL-c to cancel): '
- )
-
- return cli_text_input(
- question, escapable=True, completer=FuzzyWordCompleter(models, WORD=True)
- )
-
-
-def prompt_api_key(
- step_counter: StepCounter,
- provider: str,
- existing_api_key: SecretStr | None = None,
- escapable=True,
-) -> str:
- helper_text = (
- '\nYou can find your OpenHands LLM API Key in the API Keys tab of OpenHands Cloud: '
- 'https://app.all-hands.dev/settings/api-keys\n'
- if provider == 'openhands'
- else ''
- )
-
- if existing_api_key:
- masked_key = existing_api_key.get_secret_value()[:3] + '***'
- question = f'Enter API Key [{masked_key}] (CTRL-c to cancel, ENTER to keep current, type new to change): '
- # For existing keys, allow empty input to keep current key
- validator = None
- else:
- question = 'Enter API Key (CTRL-c to cancel): '
- # For new keys, require non-empty input
- validator = NonEmptyValueValidator()
-
- question = helper_text + step_counter.next_step(question)
- user_input = cli_text_input(
- question, escapable=escapable, validator=validator, is_password=True
- )
-
- # If user pressed ENTER with existing key (empty input), return the existing key
- if existing_api_key and not user_input.strip():
- return existing_api_key.get_secret_value()
-
- return user_input
-
-
-# Advanced settings functions
-def prompt_custom_model(step_counter: StepCounter, escapable=True) -> str:
- """Prompt for custom model name."""
- question = step_counter.next_step('Custom Model (CTRL-c to cancel): ')
- return cli_text_input(question, escapable=escapable)
-
-
-def prompt_base_url(step_counter: StepCounter, escapable=True) -> str:
- """Prompt for base URL."""
- question = step_counter.next_step('Base URL (CTRL-c to cancel): ')
- return cli_text_input(
- question, escapable=escapable, validator=NonEmptyValueValidator()
- )
-
-
-def choose_memory_condensation(step_counter: StepCounter, escapable=True) -> bool:
- """Choose memory condensation setting."""
- question = step_counter.next_step('Memory Condensation (CTRL-c to cancel): ')
- choices = ['Enable', 'Disable']
-
- index = cli_confirm(question, choices, escapable=escapable)
- return index == 0 # True for Enable, False for Disable
-
-
-def save_settings_confirmation() -> bool:
- """Prompt user to confirm saving settings."""
- question = 'Save new settings? (They will take effect after restart)'
- discard = 'No, discard'
- options = ['Yes, save', discard]
-
- index = cli_confirm(question, options, escapable=True)
- if options[index] == discard:
- raise KeyboardInterrupt
-
- return options[index]
diff --git a/openhands-cli/openhands_cli/user_actions/types.py b/openhands-cli/openhands_cli/user_actions/types.py
deleted file mode 100644
index 472f5b02d03c..000000000000
--- a/openhands-cli/openhands_cli/user_actions/types.py
+++ /dev/null
@@ -1,18 +0,0 @@
-from enum import Enum
-from typing import Optional
-
-from pydantic import BaseModel
-
-from openhands.sdk.security.confirmation_policy import ConfirmationPolicyBase
-
-
-class UserConfirmation(Enum):
- ACCEPT = 'accept'
- REJECT = 'reject'
- DEFER = 'defer'
-
-
-class ConfirmationResult(BaseModel):
- decision: UserConfirmation
- policy_change: Optional[ConfirmationPolicyBase] = None
- reason: str = ''
diff --git a/openhands-cli/openhands_cli/user_actions/utils.py b/openhands-cli/openhands_cli/user_actions/utils.py
deleted file mode 100644
index bf27a7782c13..000000000000
--- a/openhands-cli/openhands_cli/user_actions/utils.py
+++ /dev/null
@@ -1,199 +0,0 @@
-from prompt_toolkit import HTML, PromptSession
-from prompt_toolkit.application import Application
-from prompt_toolkit.completion import Completer
-from prompt_toolkit.input.base import Input
-from prompt_toolkit.key_binding import KeyBindings
-from prompt_toolkit.key_binding.key_processor import KeyPressEvent
-from prompt_toolkit.layout.containers import HSplit, Window
-from prompt_toolkit.layout.controls import FormattedTextControl
-from prompt_toolkit.layout.dimension import Dimension
-from prompt_toolkit.layout.layout import Layout
-from prompt_toolkit.output.base import Output
-from prompt_toolkit.shortcuts import prompt
-from prompt_toolkit.validation import ValidationError, Validator
-
-from openhands_cli.tui import DEFAULT_STYLE
-from openhands_cli.tui.tui import CommandCompleter
-
-
-def build_keybindings(
- choices: list[str], selected: list[int], escapable: bool
-) -> KeyBindings:
- """Create keybindings for the confirm UI. Split for testability."""
- kb = KeyBindings()
-
- @kb.add('up')
- def _handle_up(event: KeyPressEvent) -> None:
- selected[0] = (selected[0] - 1) % len(choices)
-
- @kb.add('down')
- def _handle_down(event: KeyPressEvent) -> None:
- selected[0] = (selected[0] + 1) % len(choices)
-
- @kb.add('enter')
- def _handle_enter(event: KeyPressEvent) -> None:
- event.app.exit(result=selected[0])
-
- if escapable:
-
- @kb.add('c-c') # Ctrl+C
- def _handle_hard_interrupt(event: KeyPressEvent) -> None:
- event.app.exit(exception=KeyboardInterrupt())
-
- @kb.add('c-p') # Ctrl+P
- def _handle_pause_interrupt(event: KeyPressEvent) -> None:
- event.app.exit(exception=KeyboardInterrupt())
-
- @kb.add('escape') # Escape key
- def _handle_escape(event: KeyPressEvent) -> None:
- event.app.exit(exception=KeyboardInterrupt())
-
- return kb
-
-
-def build_layout(question: str, choices: list[str], selected_ref: list[int]) -> Layout:
- """Create the layout for the confirm UI. Split for testability."""
-
- def get_choice_text() -> list[tuple[str, str]]:
- lines: list[tuple[str, str]] = []
- lines.append(('class:question', f'{question}\n\n'))
- for i, choice in enumerate(choices):
- is_selected = i == selected_ref[0]
- prefix = '> ' if is_selected else ' '
- style = 'class:selected' if is_selected else 'class:unselected'
- lines.append((style, f'{prefix}{choice}\n'))
- return lines
-
- content_window = Window(
- FormattedTextControl(get_choice_text),
- always_hide_cursor=True,
- height=Dimension(max=8),
- )
- return Layout(HSplit([content_window]))
-
-
-def cli_confirm(
- question: str = 'Are you sure?',
- choices: list[str] | None = None,
- initial_selection: int = 0,
- escapable: bool = False,
- input: Input | None = None, # strictly for unit testing
- output: Output | None = None, # strictly for unit testing
-) -> int:
- """Display a confirmation prompt with the given question and choices.
-
- Returns the index of the selected choice.
- """
- if choices is None:
- choices = ['Yes', 'No']
- selected = [initial_selection] # Using list to allow modification in closure
-
- kb = build_keybindings(choices, selected, escapable)
- layout = build_layout(question, choices, selected)
-
- app = Application(
- layout=layout,
- key_bindings=kb,
- style=DEFAULT_STYLE,
- full_screen=False,
- input=input,
- output=output,
- )
-
- return int(app.run(in_thread=True))
-
-
-def cli_text_input(
- question: str,
- escapable: bool = True,
- completer: Completer | None = None,
- validator: Validator = None,
- is_password: bool = False,
-) -> str:
- """Prompt user to enter text input with optional validation.
-
- Args:
- question: The prompt question to display
- escapable: Whether the user can escape with Ctrl+C or Ctrl+P
- completer: Optional completer for tab completion
- validator: Optional callable that takes a string and returns True if valid.
- If validation fails, the callable should display error messages
- and the user will be reprompted.
-
- Returns:
- The validated user input string (stripped of whitespace)
- """
-
- kb = KeyBindings()
-
- if escapable:
-
- @kb.add('c-c')
- def _(event: KeyPressEvent) -> None:
- event.app.exit(exception=KeyboardInterrupt())
-
- @kb.add('c-p')
- def _(event: KeyPressEvent) -> None:
- event.app.exit(exception=KeyboardInterrupt())
-
- @kb.add('enter')
- def _handle_enter(event: KeyPressEvent):
- event.app.exit(result=event.current_buffer.text)
-
- reason = str(
- prompt(
- question,
- style=DEFAULT_STYLE,
- key_bindings=kb,
- completer=completer,
- is_password=is_password,
- validator=validator,
- )
- )
- return reason.strip()
-
-
-def get_session_prompter(
- input: Input | None = None, # strictly for unit testing
- output: Output | None = None, # strictly for unit testing
-) -> PromptSession:
- bindings = KeyBindings()
-
- @bindings.add('\\', 'enter')
- def _(event: KeyPressEvent) -> None:
- # Typing '\' + Enter forces a newline regardless
- event.current_buffer.insert_text('\n')
-
- @bindings.add('enter')
- def _handle_enter(event: KeyPressEvent):
- event.app.exit(result=event.current_buffer.text)
-
- @bindings.add('c-c')
- def _keyboard_interrupt(event: KeyPressEvent):
- event.app.exit(exception=KeyboardInterrupt())
-
- session = PromptSession(
- completer=CommandCompleter(),
- key_bindings=bindings,
- prompt_continuation=lambda width, line_number, is_soft_wrap: '...',
- multiline=True,
- input=input,
- output=output,
- style=DEFAULT_STYLE,
- placeholder=HTML(
- ''
- 'Type your message… (tip: press \\ + Enter to insert a newline)'
- ' '
- ),
- )
-
- return session
-
-
-class NonEmptyValueValidator(Validator):
- def validate(self, document):
- text = document.text
- if not text:
- raise ValidationError(
- message='API key cannot be empty. Please enter a valid API key.'
- )
diff --git a/openhands-cli/openhands_cli/utils.py b/openhands-cli/openhands_cli/utils.py
deleted file mode 100644
index 50571cd7beaa..000000000000
--- a/openhands-cli/openhands_cli/utils.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""Utility functions for LLM configuration in OpenHands CLI."""
-
-import os
-from typing import Any
-from openhands.tools.preset import get_default_agent
-from openhands.sdk import LLM
-
-def get_llm_metadata(
- model_name: str,
- llm_type: str,
- session_id: str | None = None,
- user_id: str | None = None,
-) -> dict[str, Any]:
- """
- Generate LLM metadata for OpenHands CLI.
-
- Args:
- model_name: Name of the LLM model
- agent_name: Name of the agent (defaults to "openhands")
- session_id: Optional session identifier
- user_id: Optional user identifier
-
- Returns:
- Dictionary containing metadata for LLM initialization
- """
- # Import here to avoid circular imports
- openhands_sdk_version: str = 'n/a'
- try:
- import openhands.sdk
-
- openhands_sdk_version = openhands.sdk.__version__
- except (ModuleNotFoundError, AttributeError):
- pass
-
- openhands_tools_version: str = 'n/a'
- try:
- import openhands.tools
-
- openhands_tools_version = openhands.tools.__version__
- except (ModuleNotFoundError, AttributeError):
- pass
-
- metadata = {
- 'trace_version': openhands_sdk_version,
- 'tags': [
- 'app:openhands',
- f'model:{model_name}',
- f'type:{llm_type}',
- f'web_host:{os.environ.get("WEB_HOST", "unspecified")}',
- f'openhands_sdk_version:{openhands_sdk_version}',
- f'openhands_tools_version:{openhands_tools_version}',
- ],
- }
- if session_id is not None:
- metadata['session_id'] = session_id
- if user_id is not None:
- metadata['trace_user_id'] = user_id
- return metadata
-
-
-def get_default_cli_agent(
- llm: LLM
-):
- agent = get_default_agent(
- llm=llm,
- cli_mode=True
- )
-
- return agent
diff --git a/openhands-cli/pyproject.toml b/openhands-cli/pyproject.toml
index 7d2e600e2a56..2d0e101d5fc5 100644
--- a/openhands-cli/pyproject.toml
+++ b/openhands-cli/pyproject.toml
@@ -24,8 +24,6 @@ dependencies = [
"typer>=0.17.4",
]
-scripts = { openhands = "openhands_cli.simple_main:main" }
-
[dependency-groups]
# Hatchling wheel target: include the package directory
dev = [
@@ -46,9 +44,6 @@ dev = [
[tool.hatch.metadata]
allow-direct-references = true
-[tool.hatch.build.targets.wheel]
-packages = [ "openhands_cli" ]
-
# uv source pins for internal packages
[tool.black]
@@ -86,12 +81,6 @@ line_length = 88
relative_files = true
omit = [ "tests/*", "**/test_*" ]
-[tool.coverage.paths]
-source = [
- "openhands_cli/",
- "openhands-cli/openhands_cli/",
-]
-
[tool.mypy]
python_version = "3.12"
warn_return_any = true
diff --git a/openhands-cli/tests/__init__.py b/openhands-cli/tests/__init__.py
deleted file mode 100644
index 91a8d7c4bbad..000000000000
--- a/openhands-cli/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-"""Tests for OpenHands CLI."""
diff --git a/openhands-cli/tests/commands/test_confirm_command.py b/openhands-cli/tests/commands/test_confirm_command.py
deleted file mode 100644
index 95c8d0a4e007..000000000000
--- a/openhands-cli/tests/commands/test_confirm_command.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python3
-
-from unittest.mock import MagicMock, patch, call
-import pytest
-
-from openhands_cli.runner import ConversationRunner
-from openhands.sdk.security.confirmation_policy import AlwaysConfirm, NeverConfirm
-
-CONV_ID = "test-conversation-id"
-
-
-# ---------- Helpers ----------
-def make_conv(enabled: bool) -> MagicMock:
- """Return a conversation mock in enabled/disabled confirmation mode."""
- m = MagicMock()
- m.id = CONV_ID
- m.agent.security_analyzer = MagicMock() if enabled else None
- m.confirmation_policy_active = enabled
- m.is_confirmation_mode_active = enabled
- return m
-
-
-@pytest.fixture
-def runner_disabled() -> ConversationRunner:
- """Runner starting with confirmation mode disabled."""
- return ConversationRunner(make_conv(enabled=False))
-
-
-@pytest.fixture
-def runner_enabled() -> ConversationRunner:
- """Runner starting with confirmation mode enabled."""
- return ConversationRunner(make_conv(enabled=True))
-
-
-# ---------- Core toggle behavior (parametrized) ----------
-@pytest.mark.parametrize(
- "start_enabled, include_security_analyzer, expected_enabled, expected_policy_cls",
- [
- # disabled -> enable
- (False, True, True, AlwaysConfirm),
- # enabled -> disable
- (True, False, False, NeverConfirm),
- ],
-)
-def test_toggle_confirmation_mode_transitions(
- start_enabled, include_security_analyzer, expected_enabled, expected_policy_cls
-):
- # Arrange: pick starting runner & prepare the target conversation
- runner = ConversationRunner(make_conv(enabled=start_enabled))
- target_conv = make_conv(enabled=expected_enabled)
-
- with patch("openhands_cli.runner.setup_conversation", return_value=target_conv) as mock_setup:
- # Act
- runner.toggle_confirmation_mode()
-
- # Assert state
- assert runner.is_confirmation_mode_active is expected_enabled
- assert runner.conversation is target_conv
-
- # Assert setup called with same conversation ID + correct analyzer flag
- mock_setup.assert_called_once_with(CONV_ID, include_security_analyzer=include_security_analyzer)
-
- # Assert policy applied to the *new* conversation
- target_conv.set_confirmation_policy.assert_called_once()
- assert isinstance(target_conv.set_confirmation_policy.call_args.args[0], expected_policy_cls)
-
-
-# ---------- Conversation ID is preserved across multiple toggles ----------
-def test_maintains_conversation_id_across_toggles(runner_disabled: ConversationRunner):
- enabled_conv = make_conv(enabled=True)
- disabled_conv = make_conv(enabled=False)
-
- with patch("openhands_cli.runner.setup_conversation") as mock_setup:
- mock_setup.side_effect = [enabled_conv, disabled_conv]
-
- # Toggle on, then off
- runner_disabled.toggle_confirmation_mode()
- runner_disabled.toggle_confirmation_mode()
-
- assert runner_disabled.conversation.id == CONV_ID
- mock_setup.assert_has_calls(
- [
- call(CONV_ID, include_security_analyzer=True),
- call(CONV_ID, include_security_analyzer=False),
- ],
- any_order=False,
- )
-
-
-# ---------- Idempotency under rapid alternating toggles ----------
-def test_rapid_alternating_toggles_produce_expected_states(runner_disabled: ConversationRunner):
- enabled_conv = make_conv(enabled=True)
- disabled_conv = make_conv(enabled=False)
-
- with patch("openhands_cli.runner.setup_conversation") as mock_setup:
- mock_setup.side_effect = [enabled_conv, disabled_conv, enabled_conv, disabled_conv]
-
- # Start disabled
- assert runner_disabled.is_confirmation_mode_active is False
-
- # Enable, Disable, Enable, Disable
- runner_disabled.toggle_confirmation_mode()
- assert runner_disabled.is_confirmation_mode_active is True
-
- runner_disabled.toggle_confirmation_mode()
- assert runner_disabled.is_confirmation_mode_active is False
-
- runner_disabled.toggle_confirmation_mode()
- assert runner_disabled.is_confirmation_mode_active is True
-
- runner_disabled.toggle_confirmation_mode()
- assert runner_disabled.is_confirmation_mode_active is False
-
- mock_setup.assert_has_calls(
- [
- call(CONV_ID, include_security_analyzer=True),
- call(CONV_ID, include_security_analyzer=False),
- call(CONV_ID, include_security_analyzer=True),
- call(CONV_ID, include_security_analyzer=False),
- ],
- any_order=False,
- )
diff --git a/openhands-cli/tests/commands/test_new_command.py b/openhands-cli/tests/commands/test_new_command.py
deleted file mode 100644
index a02f69f49b21..000000000000
--- a/openhands-cli/tests/commands/test_new_command.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""Tests for the /new command functionality."""
-
-from unittest.mock import MagicMock, patch
-from uuid import UUID
-
-from prompt_toolkit.input.defaults import create_pipe_input
-from prompt_toolkit.output.defaults import DummyOutput
-
-from openhands_cli.setup import (
- MissingAgentSpec,
- verify_agent_exists_or_setup_agent,
-)
-from openhands_cli.user_actions import UserConfirmation
-
-
-@patch("openhands_cli.setup.load_agent_specs")
-def test_verify_agent_exists_or_setup_agent_success(mock_load_agent_specs):
- """Test that verify_agent_exists_or_setup_agent returns agent successfully."""
- # Mock the agent object
- mock_agent = MagicMock()
- mock_load_agent_specs.return_value = mock_agent
-
- # Call the function
- result = verify_agent_exists_or_setup_agent()
-
- # Verify the result
- assert result == mock_agent
- mock_load_agent_specs.assert_called_once_with()
-
-
-@patch("openhands_cli.setup.SettingsScreen")
-@patch("openhands_cli.setup.load_agent_specs")
-def test_verify_agent_exists_or_setup_agent_missing_agent_spec(
- mock_load_agent_specs, mock_settings_screen_class
-):
- """Test that verify_agent_exists_or_setup_agent handles MissingAgentSpec exception."""
- # Mock the SettingsScreen instance
- mock_settings_screen = MagicMock()
- mock_settings_screen_class.return_value = mock_settings_screen
-
- # Mock load_agent_specs to raise MissingAgentSpec on first call, then succeed
- mock_agent = MagicMock()
- mock_load_agent_specs.side_effect = [
- MissingAgentSpec("Agent spec missing"),
- mock_agent,
- ]
-
- # Call the function
- result = verify_agent_exists_or_setup_agent()
-
- # Verify the result
- assert result == mock_agent
- # Should be called twice: first fails, second succeeds
- assert mock_load_agent_specs.call_count == 2
- # Settings screen should be called once with first_time=True (new behavior)
- mock_settings_screen.configure_settings.assert_called_once_with(first_time=True)
-
-
-@patch("openhands_cli.agent_chat.exit_session_confirmation")
-@patch("openhands_cli.agent_chat.get_session_prompter")
-@patch("openhands_cli.agent_chat.setup_conversation")
-@patch("openhands_cli.agent_chat.verify_agent_exists_or_setup_agent")
-@patch("openhands_cli.agent_chat.ConversationRunner")
-def test_new_command_resets_confirmation_mode(
- mock_runner_cls,
- mock_verify_agent,
- mock_setup_conversation,
- mock_get_session_prompter,
- mock_exit_confirm,
-):
- # Auto-accept the exit prompt to avoid interactive UI and EOFError
- mock_exit_confirm.return_value = UserConfirmation.ACCEPT
-
- # Mock agent verification to succeed
- mock_agent = MagicMock()
- mock_verify_agent.return_value = mock_agent
-
- # Mock conversation - only one is created when /new is called
- conv1 = MagicMock()
- conv1.id = UUID("aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa")
- mock_setup_conversation.return_value = conv1
-
- # One runner instance for the conversation
- runner1 = MagicMock()
- runner1.is_confirmation_mode_active = True
- mock_runner_cls.return_value = runner1
-
- # Real session fed by a pipe (no interactive confirmation now)
- from openhands_cli.user_actions.utils import (
- get_session_prompter as real_get_session_prompter,
- )
-
- with create_pipe_input() as pipe:
- output = DummyOutput()
- session = real_get_session_prompter(input=pipe, output=output)
- mock_get_session_prompter.return_value = session
-
- from openhands_cli.agent_chat import run_cli_entry
-
- # Trigger /new
- # First user message should trigger runner creation
- # Then /exit (exit will be auto-accepted)
- for ch in "/new\rhello\r/exit\r":
- pipe.send_text(ch)
-
- run_cli_entry(None)
-
- # Assert we created one runner for the conversation when a message was processed after /new
- assert mock_runner_cls.call_count == 1
- assert mock_runner_cls.call_args_list[0].args[0] is conv1
diff --git a/openhands-cli/tests/commands/test_resume_command.py b/openhands-cli/tests/commands/test_resume_command.py
deleted file mode 100644
index af9a040f184d..000000000000
--- a/openhands-cli/tests/commands/test_resume_command.py
+++ /dev/null
@@ -1,147 +0,0 @@
-"""Tests for the /resume command functionality."""
-
-from unittest.mock import MagicMock, patch
-from uuid import UUID
-import pytest
-from prompt_toolkit.input.defaults import create_pipe_input
-from prompt_toolkit.output.defaults import DummyOutput
-
-from openhands.sdk.conversation.state import ConversationExecutionStatus
-from openhands_cli.user_actions import UserConfirmation
-
-
-# ---------- Fixtures & helpers ----------
-
-@pytest.fixture
-def mock_agent():
- """Mock agent for verification."""
- return MagicMock()
-
-
-@pytest.fixture
-def mock_conversation():
- """Mock conversation with default settings."""
- conv = MagicMock()
- conv.id = UUID('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
- return conv
-
-
-@pytest.fixture
-def mock_runner():
- """Mock conversation runner."""
- return MagicMock()
-
-
-def run_resume_command_test(commands, agent_status=None, expect_runner_created=True):
- """Helper function to run resume command tests with common setup."""
- with patch('openhands_cli.agent_chat.exit_session_confirmation') as mock_exit_confirm, \
- patch('openhands_cli.agent_chat.get_session_prompter') as mock_get_session_prompter, \
- patch('openhands_cli.agent_chat.setup_conversation') as mock_setup_conversation, \
- patch('openhands_cli.agent_chat.verify_agent_exists_or_setup_agent') as mock_verify_agent, \
- patch('openhands_cli.agent_chat.ConversationRunner') as mock_runner_cls:
-
- # Auto-accept the exit prompt to avoid interactive UI
- mock_exit_confirm.return_value = UserConfirmation.ACCEPT
-
- # Mock agent verification to succeed
- mock_agent = MagicMock()
- mock_verify_agent.return_value = mock_agent
-
- # Mock conversation setup
- conv = MagicMock()
- conv.id = UUID('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
- if agent_status:
- conv.state.execution_status = agent_status
- mock_setup_conversation.return_value = conv
-
- # Mock runner
- runner = MagicMock()
- runner.conversation = conv
- mock_runner_cls.return_value = runner
-
- # Real session fed by a pipe
- from openhands_cli.user_actions.utils import get_session_prompter as real_get_session_prompter
- with create_pipe_input() as pipe:
- output = DummyOutput()
- session = real_get_session_prompter(input=pipe, output=output)
- mock_get_session_prompter.return_value = session
-
- from openhands_cli.agent_chat import run_cli_entry
-
- # Send commands
- for ch in commands:
- pipe.send_text(ch)
-
- # Capture printed output
- with patch('openhands_cli.agent_chat.print_formatted_text') as mock_print:
- run_cli_entry(None)
-
- return mock_runner_cls, runner, mock_print
-
-
-# ---------- Warning tests (parametrized) ----------
-
-@pytest.mark.parametrize(
- "commands,expected_warning,expect_runner_created",
- [
- # No active conversation - /resume immediately
- ("/resume\r/exit\r", "No active conversation running", False),
- # Conversation exists but not in paused state - send message first, then /resume
- ("hello\r/resume\r/exit\r", "No paused conversation to resume", True),
- ],
-)
-def test_resume_command_warnings(commands, expected_warning, expect_runner_created):
- """Test /resume command shows appropriate warnings."""
- # Set agent status to FINISHED for the "conversation exists but not paused" test
- agent_status = ConversationExecutionStatus.FINISHED if expect_runner_created else None
-
- mock_runner_cls, runner, mock_print = run_resume_command_test(
- commands, agent_status=agent_status, expect_runner_created=expect_runner_created
- )
-
- # Verify warning message was printed
- warning_calls = [call for call in mock_print.call_args_list
- if expected_warning in str(call)]
- assert len(warning_calls) > 0, f"Expected warning about {expected_warning}"
-
- # Verify runner creation expectation
- if expect_runner_created:
- assert mock_runner_cls.call_count == 1
- runner.process_message.assert_called()
- else:
- assert mock_runner_cls.call_count == 0
-
-
-# ---------- Successful resume tests (parametrized) ----------
-
-@pytest.mark.parametrize(
- "agent_status",
- [
- ConversationExecutionStatus.PAUSED,
- ConversationExecutionStatus.WAITING_FOR_CONFIRMATION,
- ],
-)
-def test_resume_command_successful_resume(agent_status):
- """Test /resume command successfully resumes paused/waiting conversations."""
- commands = "hello\r/resume\r/exit\r"
-
- mock_runner_cls, runner, mock_print = run_resume_command_test(
- commands, agent_status=agent_status, expect_runner_created=True
- )
-
- # Verify runner was created and process_message was called
- assert mock_runner_cls.call_count == 1
-
- # Verify process_message was called twice: once with the initial message, once with None for resume
- assert runner.process_message.call_count == 2
-
- # Check the calls to process_message
- calls = runner.process_message.call_args_list
-
- # First call should have a message (the "hello" message)
- first_call_args = calls[0][0]
- assert first_call_args[0] is not None, "First call should have a message"
-
- # Second call should have None (the /resume command)
- second_call_args = calls[1][0]
- assert second_call_args[0] is None, "Second call should have None message for resume"
diff --git a/openhands-cli/tests/commands/test_settings_command.py b/openhands-cli/tests/commands/test_settings_command.py
deleted file mode 100644
index b822242517fa..000000000000
--- a/openhands-cli/tests/commands/test_settings_command.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""Test for the /settings command functionality."""
-
-from unittest.mock import MagicMock, patch
-from prompt_toolkit.input.defaults import create_pipe_input
-from prompt_toolkit.output.defaults import DummyOutput
-
-from openhands_cli.agent_chat import run_cli_entry
-from openhands_cli.user_actions import UserConfirmation
-
-
-@patch('openhands_cli.agent_chat.exit_session_confirmation')
-@patch('openhands_cli.agent_chat.get_session_prompter')
-@patch('openhands_cli.agent_chat.setup_conversation')
-@patch('openhands_cli.agent_chat.verify_agent_exists_or_setup_agent')
-@patch('openhands_cli.agent_chat.ConversationRunner')
-@patch('openhands_cli.agent_chat.SettingsScreen')
-def test_settings_command_works_without_conversation(
- mock_settings_screen_class,
- mock_runner_cls,
- mock_verify_agent,
- mock_setup_conversation,
- mock_get_session_prompter,
- mock_exit_confirm,
-):
- """Test that /settings command works when no conversation is active (bug fix scenario)."""
- # Auto-accept the exit prompt to avoid interactive UI
- mock_exit_confirm.return_value = UserConfirmation.ACCEPT
-
- # Mock agent verification to succeed
- mock_agent = MagicMock()
- mock_verify_agent.return_value = mock_agent
-
- # Mock the SettingsScreen instance
- mock_settings_screen = MagicMock()
- mock_settings_screen_class.return_value = mock_settings_screen
-
- # No runner initially (simulates starting CLI without a conversation)
- mock_runner_cls.return_value = None
-
- # Real session fed by a pipe
- from openhands_cli.user_actions.utils import get_session_prompter as real_get_session_prompter
- with create_pipe_input() as pipe:
- output = DummyOutput()
- session = real_get_session_prompter(input=pipe, output=output)
- mock_get_session_prompter.return_value = session
-
- # Trigger /settings, then /exit (exit will be auto-accepted)
- for ch in "/settings\r/exit\r":
- pipe.send_text(ch)
-
- run_cli_entry(None)
-
- # Assert SettingsScreen was created with None conversation (the bug fix)
- mock_settings_screen_class.assert_called_once_with(None)
-
- # Assert display_settings was called (settings screen was shown)
- mock_settings_screen.display_settings.assert_called_once()
\ No newline at end of file
diff --git a/openhands-cli/tests/commands/test_status_command.py b/openhands-cli/tests/commands/test_status_command.py
deleted file mode 100644
index a8f0c778cd8e..000000000000
--- a/openhands-cli/tests/commands/test_status_command.py
+++ /dev/null
@@ -1,124 +0,0 @@
-"""Simplified tests for the /status command functionality."""
-
-from datetime import datetime, timedelta
-from uuid import uuid4
-from unittest.mock import Mock, patch
-
-import pytest
-
-from openhands_cli.tui.status import display_status
-from openhands.sdk.llm.utils.metrics import Metrics, TokenUsage
-
-
-# ---------- Fixtures & helpers ----------
-
-@pytest.fixture
-def conversation():
- """Minimal conversation with empty events and pluggable stats."""
- conv = Mock()
- conv.id = uuid4()
- conv.state = Mock(events=[])
- conv.conversation_stats = Mock()
- return conv
-
-
-def make_metrics(cost=None, usage=None) -> Metrics:
- m = Metrics()
- if cost is not None:
- m.accumulated_cost = cost
- m.accumulated_token_usage = usage
- return m
-
-
-def call_display_status(conversation, session_start):
- """Call display_status with prints patched; return (mock_pf, mock_pc, text)."""
- with patch('openhands_cli.tui.status.print_formatted_text') as pf, \
- patch('openhands_cli.tui.status.print_container') as pc:
- display_status(conversation, session_start_time=session_start)
- # First container call; extract the Frame/TextArea text
- container = pc.call_args_list[0][0][0]
- text = getattr(container.body, "text", "")
- return pf, pc, str(text)
-
-
-# ---------- Tests ----------
-
-def test_display_status_box_title(conversation):
- session_start = datetime.now()
- conversation.conversation_stats.get_combined_metrics.return_value = make_metrics()
-
- with patch('openhands_cli.tui.status.print_formatted_text') as pf, \
- patch('openhands_cli.tui.status.print_container') as pc:
- display_status(conversation, session_start_time=session_start)
-
- assert pf.called and pc.called
-
- container = pc.call_args_list[0][0][0]
- assert hasattr(container, "title")
- assert "Usage Metrics" in container.title
-
-
-@pytest.mark.parametrize(
- "delta,expected",
- [
- (timedelta(seconds=0), "0h 0m"),
- (timedelta(minutes=5, seconds=30), "5m"),
- (timedelta(hours=1, minutes=30, seconds=45), "1h 30m"),
- (timedelta(hours=2, minutes=15, seconds=30), "2h 15m"),
- ],
-)
-def test_display_status_uptime(conversation, delta, expected):
- session_start = datetime.now() - delta
- conversation.conversation_stats.get_combined_metrics.return_value = make_metrics()
-
- with patch('openhands_cli.tui.status.print_formatted_text') as pf, \
- patch('openhands_cli.tui.status.print_container'):
- display_status(conversation, session_start_time=session_start)
- # uptime is printed in the 2nd print_formatted_text call
- uptime_call_str = str(pf.call_args_list[1])
- assert expected in uptime_call_str
- # conversation id appears in the first print call
- id_call_str = str(pf.call_args_list[0])
- assert str(conversation.id) in id_call_str
-
-
-@pytest.mark.parametrize(
- "cost,usage,expecteds",
- [
- # Empty/zero case
- (None, None, ["$0.000000", "0", "0", "0", "0", "0"]),
- # Only cost, usage=None
- (0.05, None, ["$0.050000", "0", "0", "0", "0", "0"]),
- # Full metrics
- (
- 0.123456,
- TokenUsage(
- prompt_tokens=1500,
- completion_tokens=800,
- cache_read_tokens=200,
- cache_write_tokens=100,
- ),
- ["$0.123456", "1,500", "800", "200", "100", "2,300"],
- ),
- # Larger numbers (comprehensive)
- (
- 1.234567,
- TokenUsage(
- prompt_tokens=5000,
- completion_tokens=3000,
- cache_read_tokens=500,
- cache_write_tokens=250,
- ),
- ["$1.234567", "5,000", "3,000", "500", "250", "8,000"],
- ),
- ],
-)
-def test_display_status_metrics(conversation, cost, usage, expecteds):
- session_start = datetime.now()
- conversation.conversation_stats.get_combined_metrics.return_value = make_metrics(cost, usage)
-
- pf, pc, text = call_display_status(conversation, session_start)
-
- assert pf.called and pc.called
- for expected in expecteds:
- assert expected in text
diff --git a/openhands-cli/tests/conftest.py b/openhands-cli/tests/conftest.py
deleted file mode 100644
index 454b14cb3adc..000000000000
--- a/openhands-cli/tests/conftest.py
+++ /dev/null
@@ -1,56 +0,0 @@
-from unittest.mock import patch
-
-import pytest
-
-
-# Fixture: mock_verified_models - Simplified model data
-@pytest.fixture
-def mock_verified_models():
- with (
- patch(
- 'openhands_cli.user_actions.settings_action.VERIFIED_MODELS',
- {
- 'openai': ['gpt-4o', 'gpt-4o-mini'],
- 'anthropic': ['claude-3-5-sonnet', 'claude-3-5-haiku'],
- },
- ),
- patch(
- 'openhands_cli.user_actions.settings_action.UNVERIFIED_MODELS_EXCLUDING_BEDROCK',
- {
- 'openai': ['gpt-custom'],
- 'anthropic': [],
- 'custom': ['my-model'],
- },
- ),
- ):
- yield
-
-
-# Fixture: mock_cli_interactions - Reusable CLI mock patterns
-@pytest.fixture
-def mock_cli_interactions():
- class Mocks:
- def __init__(self):
- self.p_confirm = patch(
- 'openhands_cli.user_actions.settings_action.cli_confirm'
- )
- self.p_text = patch(
- 'openhands_cli.user_actions.settings_action.cli_text_input'
- )
- self.cli_confirm = None
- self.cli_text_input = None
-
- def start(self):
- self.cli_confirm = self.p_confirm.start()
- self.cli_text_input = self.p_text.start()
- return self
-
- def stop(self):
- self.p_confirm.stop()
- self.p_text.stop()
-
- mocks = Mocks().start()
- try:
- yield mocks
- finally:
- mocks.stop()
diff --git a/openhands-cli/tests/settings/test_api_key_preservation.py b/openhands-cli/tests/settings/test_api_key_preservation.py
deleted file mode 100644
index 29fa3c405d68..000000000000
--- a/openhands-cli/tests/settings/test_api_key_preservation.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""Test for API key preservation bug when updating settings."""
-
-from unittest.mock import patch
-import pytest
-from pydantic import SecretStr
-
-from openhands_cli.user_actions.settings_action import prompt_api_key
-from openhands_cli.tui.utils import StepCounter
-
-
-def test_api_key_preservation_when_user_presses_enter():
- """Test that API key is preserved when user presses ENTER to keep current key.
-
- This test replicates the bug where API keys disappear when updating settings.
- When a user presses ENTER to keep the current API key, the function should
- return the existing API key, not an empty string.
- """
- step_counter = StepCounter(1)
- existing_api_key = SecretStr("sk-existing-key-123")
-
- # Mock cli_text_input to return empty string (simulating user pressing ENTER)
- with patch('openhands_cli.user_actions.settings_action.cli_text_input', return_value=''):
- result = prompt_api_key(
- step_counter=step_counter,
- provider='openai',
- existing_api_key=existing_api_key,
- escapable=True
- )
-
- # The bug: result is empty string instead of the existing key
- # This test will fail initially, demonstrating the bug
- assert result == existing_api_key.get_secret_value(), (
- f"Expected existing API key '{existing_api_key.get_secret_value()}' "
- f"but got '{result}'. API key should be preserved when user presses ENTER."
- )
-
-
-def test_api_key_update_when_user_enters_new_key():
- """Test that API key is updated when user enters a new key."""
- step_counter = StepCounter(1)
- existing_api_key = SecretStr("sk-existing-key-123")
- new_api_key = "sk-new-key-456"
-
- # Mock cli_text_input to return new API key
- with patch('openhands_cli.user_actions.settings_action.cli_text_input', return_value=new_api_key):
- result = prompt_api_key(
- step_counter=step_counter,
- provider='openai',
- existing_api_key=existing_api_key,
- escapable=True
- )
-
- # Should return the new API key
- assert result == new_api_key
-
-
diff --git a/openhands-cli/tests/settings/test_default_agent_security_analyzer.py b/openhands-cli/tests/settings/test_default_agent_security_analyzer.py
deleted file mode 100644
index 61ab9b2a2feb..000000000000
--- a/openhands-cli/tests/settings/test_default_agent_security_analyzer.py
+++ /dev/null
@@ -1,117 +0,0 @@
-"""Test that first-time settings screen usage creates a default agent and conversation with security analyzer."""
-
-from unittest.mock import patch
-import pytest
-from openhands_cli.tui.settings.settings_screen import SettingsScreen
-from openhands_cli.user_actions.settings_action import SettingsType
-from openhands.sdk import LLM, Conversation, Workspace
-from openhands.sdk.security.llm_analyzer import LLMSecurityAnalyzer
-from pydantic import SecretStr
-
-
-def test_first_time_settings_creates_default_agent_and_conversation_with_security_analyzer():
- """Test that using the settings screen for the first time creates a default agent and conversation with security analyzer."""
-
- # Create a settings screen instance (no conversation initially)
- screen = SettingsScreen(conversation=None)
-
- # Mock all the user interaction steps to simulate first-time setup
- with (
- patch(
- 'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
- return_value=SettingsType.BASIC,
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
- return_value='openai',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_model',
- return_value='gpt-4o-mini',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_api_key',
- return_value='sk-test-key-123',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
- return_value=True,
- ),
- ):
- # Run the settings configuration workflow
- screen.configure_settings(first_time=True)
-
- # Load the saved agent from the store
- saved_agent = screen.agent_store.load()
-
- # Verify that an agent was created and saved
- assert saved_agent is not None, "Agent should be created and saved after first-time settings configuration"
-
- # Verify that the agent has the expected LLM configuration
- assert saved_agent.llm.model == 'openai/gpt-4o-mini', f"Expected model 'openai/gpt-4o-mini', got '{saved_agent.llm.model}'"
- assert saved_agent.llm.api_key.get_secret_value() == 'sk-test-key-123', "API key should match the provided value"
-
- # Test that a conversation can be created with the agent and security analyzer can be set
- conversation = Conversation(agent=saved_agent, workspace=Workspace(working_dir='/tmp'))
-
- # Set security analyzer using the new API
- security_analyzer = LLMSecurityAnalyzer()
- conversation.set_security_analyzer(security_analyzer)
-
- # Verify that the security analyzer was set correctly
- assert conversation.state.security_analyzer is not None, "Conversation should have a security analyzer"
- assert conversation.state.security_analyzer.kind == 'LLMSecurityAnalyzer', f"Expected security analyzer kind 'LLMSecurityAnalyzer', got '{conversation.state.security_analyzer.kind}'"
-
-
-def test_first_time_settings_with_advanced_configuration():
- """Test that advanced settings also create a default agent and conversation with security analyzer."""
-
- screen = SettingsScreen(conversation=None)
-
- with (
- patch(
- 'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
- return_value=SettingsType.ADVANCED,
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_custom_model',
- return_value='anthropic/claude-3-5-sonnet',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_base_url',
- return_value='https://api.anthropic.com',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_api_key',
- return_value='sk-ant-test-key',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_memory_condensation',
- return_value=True,
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
- return_value=True,
- ),
- ):
- screen.configure_settings(first_time=True)
-
- saved_agent = screen.agent_store.load()
-
- # Verify agent creation
- assert saved_agent is not None, "Agent should be created with advanced settings"
-
- # Verify advanced settings were applied
- assert saved_agent.llm.model == 'anthropic/claude-3-5-sonnet', "Custom model should be set"
- assert saved_agent.llm.base_url == 'https://api.anthropic.com', "Base URL should be set"
-
- # Test that a conversation can be created with the agent and security analyzer can be set
- conversation = Conversation(agent=saved_agent, workspace=Workspace(working_dir='/tmp'))
-
- # Set security analyzer using the new API
- security_analyzer = LLMSecurityAnalyzer()
- conversation.set_security_analyzer(security_analyzer)
-
- # Verify that the security analyzer was set correctly
- assert conversation.state.security_analyzer is not None, "Conversation should have a security analyzer"
- assert conversation.state.security_analyzer.kind == 'LLMSecurityAnalyzer', "Security analyzer should be LLMSecurityAnalyzer"
\ No newline at end of file
diff --git a/openhands-cli/tests/settings/test_first_time_user_settings.py b/openhands-cli/tests/settings/test_first_time_user_settings.py
deleted file mode 100644
index 64d4a59a2fb5..000000000000
--- a/openhands-cli/tests/settings/test_first_time_user_settings.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from unittest.mock import patch
-from openhands_cli.agent_chat import run_cli_entry
-import pytest
-
-
-@patch("openhands_cli.agent_chat.print_formatted_text")
-@patch("openhands_cli.tui.settings.settings_screen.save_settings_confirmation")
-@patch("openhands_cli.tui.settings.settings_screen.prompt_api_key")
-@patch("openhands_cli.tui.settings.settings_screen.choose_llm_model")
-@patch("openhands_cli.tui.settings.settings_screen.choose_llm_provider")
-@patch("openhands_cli.tui.settings.settings_screen.settings_type_confirmation")
-@patch("openhands_cli.tui.settings.store.AgentStore.load")
-@pytest.mark.parametrize("interrupt_step", ["settings_type", "provider", "model", "api_key", "save"])
-def test_first_time_users_can_escape_settings_flow_and_exit_app(
- mock_agentstore_load,
- mock_type,
- mock_provider,
- mock_model,
- mock_api_key,
- mock_save,
- mock_print,
- interrupt_step,
-):
- """Test that KeyboardInterrupt is handled at each step of basic settings."""
-
- # Force first-time user: no saved agent
- mock_agentstore_load.return_value = None
-
- # Happy path defaults
- mock_type.return_value = "basic"
- mock_provider.return_value = "openai"
- mock_model.return_value = "gpt-4o-mini"
- mock_api_key.return_value = "sk-test"
- mock_save.return_value = True
-
- # Inject KeyboardInterrupt at the specified step
- if interrupt_step == "settings_type":
- mock_type.side_effect = KeyboardInterrupt()
- elif interrupt_step == "provider":
- mock_provider.side_effect = KeyboardInterrupt()
- elif interrupt_step == "model":
- mock_model.side_effect = KeyboardInterrupt()
- elif interrupt_step == "api_key":
- mock_api_key.side_effect = KeyboardInterrupt()
- elif interrupt_step == "save":
- mock_save.side_effect = KeyboardInterrupt()
-
- # Run
- run_cli_entry()
-
- # Assert graceful messaging
- calls = [call.args[0] for call in mock_print.call_args_list]
- assert any("Setup is required" in str(c) for c in calls)
- assert any("Goodbye!" in str(c) for c in calls)
diff --git a/openhands-cli/tests/settings/test_mcp_settings_reconciliation.py b/openhands-cli/tests/settings/test_mcp_settings_reconciliation.py
deleted file mode 100644
index 65a5687335c8..000000000000
--- a/openhands-cli/tests/settings/test_mcp_settings_reconciliation.py
+++ /dev/null
@@ -1,114 +0,0 @@
-"""Minimal tests: mcp.json overrides persisted agent MCP servers."""
-
-import json
-from pathlib import Path
-from unittest.mock import patch
-import pytest
-from pydantic import SecretStr
-
-from openhands.sdk import Agent, LLM
-from openhands_cli.locations import MCP_CONFIG_FILE, AGENT_SETTINGS_PATH
-from openhands_cli.tui.settings.store import AgentStore
-
-
-# ---------------------- tiny helpers ----------------------
-
-def write_json(path: Path, obj: dict) -> None:
- path.write_text(json.dumps(obj))
-
-
-def write_agent(root: Path, agent: Agent) -> None:
- (root / AGENT_SETTINGS_PATH).write_text(
- agent.model_dump_json(context={"expose_secrets": True})
- )
-
-
-# ---------------------- fixtures ----------------------
-
-@pytest.fixture
-def persistence_dir(tmp_path, monkeypatch) -> Path:
- # Create root dir and point AgentStore at it
- root = tmp_path / "openhands"
- root.mkdir()
- monkeypatch.setattr("openhands_cli.tui.settings.store.PERSISTENCE_DIR", str(root))
- return root
-
-
-@pytest.fixture
-def agent_store() -> AgentStore:
- return AgentStore()
-
-
-# ---------------------- tests ----------------------
-
-@patch("openhands_cli.tui.settings.store.get_default_tools", return_value=[])
-@patch("openhands_cli.tui.settings.store.get_llm_metadata", return_value={})
-def test_load_overrides_persisted_mcp_with_mcp_json_file(
- mock_meta,
- mock_tools,
- persistence_dir,
- agent_store
-):
- """If agent has MCP servers, mcp.json must replace them entirely."""
- # Persist an agent that already contains MCP servers
- persisted_agent = Agent(
- llm=LLM(model="gpt-4", api_key=SecretStr("k"), usage_id="svc"),
- tools=[],
- mcp_config={
- "mcpServers": {
- "persistent_server": {"command": "python", "args": ["-m", "old_server"]}
- }
- },
- )
- write_agent(persistence_dir, persisted_agent)
-
- # Create mcp.json with different servers (this must fully override)
- write_json(
- persistence_dir / MCP_CONFIG_FILE,
- {
- "mcpServers": {
- "file_server": {"command": "uvx", "args": ["mcp-server-fetch"]}
- }
- },
- )
-
- loaded = agent_store.load()
- assert loaded is not None
- # Expect ONLY the MCP json file's config
- assert loaded.mcp_config == {
- "mcpServers": {
- "file_server": {
- "command": "uvx",
- "args": ["mcp-server-fetch"],
- "env": {},
- "transport": "stdio",
- }
- }
- }
-
-
-@patch("openhands_cli.tui.settings.store.get_default_tools", return_value=[])
-@patch("openhands_cli.tui.settings.store.get_llm_metadata", return_value={})
-def test_load_when_mcp_file_missing_ignores_persisted_mcp(
- mock_meta,
- mock_tools,
- persistence_dir,
- agent_store
-):
- """If mcp.json is absent, loaded agent.mcp_config should be empty (persisted MCP ignored)."""
- persisted_agent = Agent(
- llm=LLM(model="gpt-4", api_key=SecretStr("k"), usage_id="svc"),
- tools=[],
- mcp_config={
- "mcpServers": {
- "persistent_server": {"command": "python", "args": ["-m", "old_server"]}
- }
- },
- )
- write_agent(persistence_dir, persisted_agent)
-
- # No mcp.json created
-
- loaded = agent_store.load()
- assert loaded is not None
- assert loaded.mcp_config == {} # persisted MCP is ignored if file is missin
diff --git a/openhands-cli/tests/settings/test_settings_input.py b/openhands-cli/tests/settings/test_settings_input.py
deleted file mode 100644
index 744ba0cdee71..000000000000
--- a/openhands-cli/tests/settings/test_settings_input.py
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/usr/bin/env python3
-"""
-Core Settings Logic tests
-"""
-
-from typing import Any
-from unittest.mock import MagicMock
-
-import pytest
-from openhands_cli.user_actions.settings_action import (
- NonEmptyValueValidator,
- SettingsType,
- choose_llm_model,
- choose_llm_provider,
- prompt_api_key,
- settings_type_confirmation,
-)
-from prompt_toolkit.completion import FuzzyWordCompleter
-from prompt_toolkit.validation import ValidationError
-from pydantic import SecretStr
-
-# -------------------------------
-# Settings type selection
-# -------------------------------
-
-
-def test_settings_type_selection(mock_cli_interactions: Any) -> None:
- mocks = mock_cli_interactions
-
- # Basic
- mocks.cli_confirm.return_value = 0
- assert settings_type_confirmation() == SettingsType.BASIC
-
- # Cancel/Go back
- mocks.cli_confirm.return_value = 2
- with pytest.raises(KeyboardInterrupt):
- settings_type_confirmation()
-
-
-# -------------------------------
-# Provider selection flows
-# -------------------------------
-
-
-def test_provider_selection_with_predefined_options(
- mock_verified_models: Any, mock_cli_interactions: Any
-) -> None:
- from openhands_cli.tui.utils import StepCounter
-
- mocks = mock_cli_interactions
- # first option among display_options is index 0
- mocks.cli_confirm.return_value = 0
- step_counter = StepCounter(1)
- result = choose_llm_provider(step_counter)
- assert result == 'openai'
-
-
-def test_provider_selection_with_custom_input(
- mock_verified_models: Any, mock_cli_interactions: Any
-) -> None:
- from openhands_cli.tui.utils import StepCounter
-
- mocks = mock_cli_interactions
- # Due to overlapping provider keys between VERIFIED and UNVERIFIED in fixture,
- # display_options contains 4 providers (with duplicates) + alternate at index 4
- mocks.cli_confirm.return_value = 4
- mocks.cli_text_input.return_value = 'my-provider'
- step_counter = StepCounter(1)
- result = choose_llm_provider(step_counter)
- assert result == 'my-provider'
-
- # Verify fuzzy completer passed
- _, kwargs = mocks.cli_text_input.call_args
- assert isinstance(kwargs['completer'], FuzzyWordCompleter)
-
-
-# -------------------------------
-# Model selection flows
-# -------------------------------
-
-
-def test_model_selection_flows(
- mock_verified_models: Any, mock_cli_interactions: Any
-) -> None:
- from openhands_cli.tui.utils import StepCounter
-
- mocks = mock_cli_interactions
-
- # Direct pick from predefined list
- mocks.cli_confirm.return_value = 0
- step_counter = StepCounter(1)
- result = choose_llm_model(step_counter, 'openai')
- assert result in ['gpt-4o']
-
- # Choose custom model via input
- mocks.cli_confirm.return_value = 4 # for provider with >=4 models this would be alt; in our data openai has 3 -> alt index is 3
- mocks.cli_text_input.return_value = 'custom-model'
- # Adjust to actual alt index produced by code (len(models[:4]) yields 3 + 1 alt -> index 3)
- mocks.cli_confirm.return_value = 3
- step_counter2 = StepCounter(1)
- result2 = choose_llm_model(step_counter2, 'openai')
- assert result2 == 'custom-model'
-
-
-# -------------------------------
-# API key validation and prompting
-# -------------------------------
-
-
-def test_api_key_validation_and_prompting(mock_cli_interactions: Any) -> None:
- # Validator standalone
- validator = NonEmptyValueValidator()
- doc = MagicMock()
- doc.text = 'sk-abc'
- validator.validate(doc)
-
- doc_empty = MagicMock()
- doc_empty.text = ''
- with pytest.raises(ValidationError):
- validator.validate(doc_empty)
-
- # Prompting for new key enforces validator
- from openhands_cli.tui.utils import StepCounter
-
- mocks = mock_cli_interactions
- mocks.cli_text_input.return_value = 'sk-new'
- step_counter = StepCounter(1)
- new_key = prompt_api_key(step_counter, 'provider')
- assert new_key == 'sk-new'
- assert mocks.cli_text_input.call_args[1]['validator'] is not None
-
- # Prompting with existing key shows mask and no validator
- mocks.cli_text_input.reset_mock()
- mocks.cli_text_input.return_value = 'sk-updated'
- existing = SecretStr('sk-existing-123')
- step_counter2 = StepCounter(1)
- updated = prompt_api_key(step_counter2, 'provider', existing)
- assert updated == 'sk-updated'
- assert mocks.cli_text_input.call_args[1]['validator'] is None
- assert 'sk-***' in mocks.cli_text_input.call_args[0][0]
diff --git a/openhands-cli/tests/settings/test_settings_workflow.py b/openhands-cli/tests/settings/test_settings_workflow.py
deleted file mode 100644
index 157b3cddaddd..000000000000
--- a/openhands-cli/tests/settings/test_settings_workflow.py
+++ /dev/null
@@ -1,210 +0,0 @@
-import json
-from pathlib import Path
-from unittest.mock import MagicMock, patch
-
-import pytest
-from openhands_cli.tui.settings.settings_screen import SettingsScreen
-from openhands_cli.tui.settings.store import AgentStore
-from openhands_cli.user_actions.settings_action import SettingsType
-from openhands_cli.utils import get_default_cli_agent
-from pydantic import SecretStr
-
-from openhands.sdk import LLM, Conversation, LocalFileStore
-
-
-def read_json(path: Path) -> dict:
- with open(path, 'r') as f:
- return json.load(f)
-
-
-def make_screen_with_conversation(model='openai/gpt-4o-mini', api_key='sk-xyz'):
- llm = LLM(model=model, api_key=SecretStr(api_key), usage_id='test-service')
- # Conversation(agent) signature may vary across versions; adapt if needed:
- from openhands.sdk.agent import Agent
-
- agent = Agent(llm=llm, tools=[])
- conv = Conversation(agent)
- return SettingsScreen(conversation=conv)
-
-
-def seed_file(path: Path, model: str = 'openai/gpt-4o-mini', api_key: str = 'sk-old'):
- store = AgentStore()
- store.file_store = LocalFileStore(root=str(path))
- agent = get_default_cli_agent(
- llm=LLM(model=model, api_key=SecretStr(api_key), usage_id='test-service')
- )
- store.save(agent)
-
-
-def test_llm_settings_save_and_load(tmp_path: Path):
- """Test that the settings screen can save basic LLM settings."""
- screen = SettingsScreen(conversation=None)
-
- # Mock the spec store to verify settings are saved
- with patch.object(screen.agent_store, 'save') as mock_save:
- screen._save_llm_settings(model='openai/gpt-4o-mini', api_key='sk-test-123')
-
- # Verify that save was called
- mock_save.assert_called_once()
-
- # Get the agent spec that was saved
- saved_spec = mock_save.call_args[0][0]
- assert saved_spec.llm.model == 'openai/gpt-4o-mini'
- assert saved_spec.llm.api_key.get_secret_value() == 'sk-test-123'
-
-
-def test_first_time_setup_workflow(tmp_path: Path):
- """Test that the basic settings workflow completes without errors."""
- screen = SettingsScreen()
-
- with (
- patch(
- 'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
- return_value=SettingsType.BASIC,
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
- return_value='openai',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_model',
- return_value='gpt-4o-mini',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_api_key',
- return_value='sk-first',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
- return_value=True,
- ),
- ):
- # The workflow should complete without errors
- screen.configure_settings()
-
- # Since the current implementation doesn't save to file, we just verify the workflow completed
- assert True # If we get here, the workflow completed successfully
-
-
-def test_update_existing_settings_workflow(tmp_path: Path):
- """Test that the settings update workflow completes without errors."""
- settings_path = tmp_path / 'agent_settings.json'
- seed_file(settings_path, model='openai/gpt-4o-mini', api_key='sk-old')
- screen = make_screen_with_conversation(model='openai/gpt-4o-mini', api_key='sk-old')
-
- with (
- patch(
- 'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
- return_value=SettingsType.BASIC,
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
- return_value='anthropic',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_model',
- return_value='claude-3-5-sonnet',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_api_key',
- return_value='sk-updated',
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
- return_value=True,
- ),
- ):
- # The workflow should complete without errors
- screen.configure_settings()
-
- # Since the current implementation doesn't save to file, we just verify the workflow completed
- assert True # If we get here, the workflow completed successfully
-
-
-def test_all_llms_in_agent_are_updated():
- """Test that modifying LLM settings creates multiple LLMs with same API key but different usage_ids."""
- # Create a screen with existing agent settings
- screen = SettingsScreen(conversation=None)
- initial_llm = LLM(model='openai/gpt-3.5-turbo', api_key=SecretStr('sk-initial'), usage_id='test-service')
- initial_agent = get_default_cli_agent(llm=initial_llm)
-
- # Mock the agent store to return the initial agent and capture the save call
- with (
- patch.object(screen.agent_store, 'load', return_value=initial_agent),
- patch.object(screen.agent_store, 'save') as mock_save
- ):
- # Modify the LLM settings with new API key
- screen._save_llm_settings(model='openai/gpt-4o-mini', api_key='sk-updated-123')
- mock_save.assert_called_once()
-
- # Get the saved agent from the mock
- saved_agent = mock_save.call_args[0][0]
- all_llms = list(saved_agent.get_all_llms())
- assert len(all_llms) >= 2, f"Expected at least 2 LLMs, got {len(all_llms)}"
-
- # Verify all LLMs have the same API key
- api_keys = [llm.api_key.get_secret_value() for llm in all_llms]
- assert all(api_key == 'sk-updated-123' for api_key in api_keys), \
- f"Not all LLMs have the same API key: {api_keys}"
-
- # Verify none of the usage_id attributes match
- usage_ids = [llm.usage_id for llm in all_llms]
- assert len(set(usage_ids)) == len(usage_ids), \
- f"Some usage_ids are duplicated: {usage_ids}"
-
-
-@pytest.mark.parametrize(
- 'step_to_cancel',
- ['type', 'provider', 'model', 'apikey', 'save'],
-)
-def test_workflow_cancellation_at_each_step(tmp_path: Path, step_to_cancel: str):
- screen = make_screen_with_conversation()
-
- # Base happy-path patches
- patches = {
- 'settings_type_confirmation': MagicMock(return_value=SettingsType.BASIC),
- 'choose_llm_provider': MagicMock(return_value='openai'),
- 'choose_llm_model': MagicMock(return_value='gpt-4o-mini'),
- 'prompt_api_key': MagicMock(return_value='sk-new'),
- 'save_settings_confirmation': MagicMock(return_value=True),
- }
-
- # Turn one step into a cancel
- if step_to_cancel == 'type':
- patches['settings_type_confirmation'].side_effect = KeyboardInterrupt()
- elif step_to_cancel == 'provider':
- patches['choose_llm_provider'].side_effect = KeyboardInterrupt()
- elif step_to_cancel == 'model':
- patches['choose_llm_model'].side_effect = KeyboardInterrupt()
- elif step_to_cancel == 'apikey':
- patches['prompt_api_key'].side_effect = KeyboardInterrupt()
- elif step_to_cancel == 'save':
- patches['save_settings_confirmation'].side_effect = KeyboardInterrupt()
-
- with (
- patch(
- 'openhands_cli.tui.settings.settings_screen.settings_type_confirmation',
- patches['settings_type_confirmation'],
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_provider',
- patches['choose_llm_provider'],
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.choose_llm_model',
- patches['choose_llm_model'],
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.prompt_api_key',
- patches['prompt_api_key'],
- ),
- patch(
- 'openhands_cli.tui.settings.settings_screen.save_settings_confirmation',
- patches['save_settings_confirmation'],
- ),
- patch.object(screen.agent_store, 'save') as mock_save,
- ):
- screen.configure_settings()
-
- # No settings should be saved on cancel
- mock_save.assert_not_called()
diff --git a/openhands-cli/tests/test_confirmation_mode.py b/openhands-cli/tests/test_confirmation_mode.py
deleted file mode 100644
index e5832e752283..000000000000
--- a/openhands-cli/tests/test_confirmation_mode.py
+++ /dev/null
@@ -1,511 +0,0 @@
-#!/usr/bin/env python3
-"""
-Tests for confirmation mode functionality in OpenHands CLI.
-"""
-
-import os
-import uuid
-from concurrent.futures import ThreadPoolExecutor
-from typing import Any
-from unittest.mock import ANY, MagicMock, patch
-
-import pytest
-from openhands_cli.runner import ConversationRunner
-from openhands_cli.setup import MissingAgentSpec, setup_conversation
-from openhands_cli.user_actions import agent_action, ask_user_confirmation, utils
-from openhands_cli.user_actions.types import ConfirmationResult, UserConfirmation
-from prompt_toolkit.input.defaults import create_pipe_input
-from prompt_toolkit.output.defaults import DummyOutput
-
-from openhands.sdk import Action
-from openhands.sdk.security.confirmation_policy import (
- AlwaysConfirm,
- ConfirmRisky,
- NeverConfirm,
- SecurityRisk,
-)
-from tests.utils import _send_keys
-
-
-class MockAction(Action):
- """Mock action schema for testing."""
-
- command: str
-
-
-class TestConfirmationMode:
- """Test suite for confirmation mode functionality."""
-
- def test_setup_conversation_creates_conversation(self) -> None:
- """Test that setup_conversation creates a conversation successfully."""
- with patch.dict(os.environ, {'LLM_MODEL': 'test-model'}):
- with (
- patch('openhands_cli.setup.Conversation') as mock_conversation_class,
- patch('openhands_cli.setup.AgentStore') as mock_agent_store_class,
- patch('openhands_cli.setup.print_formatted_text') as mock_print,
- patch('openhands_cli.setup.HTML'),
- patch('openhands_cli.setup.uuid') as mock_uuid,
- patch('openhands_cli.setup.CLIVisualizer') as mock_visualizer,
- ):
- # Mock dependencies
- mock_conversation_id = MagicMock()
- mock_uuid.uuid4.return_value = mock_conversation_id
-
- # Mock AgentStore
- mock_agent_store_instance = MagicMock()
- mock_agent_instance = MagicMock()
- mock_agent_instance.llm.model = 'test-model'
- mock_agent_store_instance.load.return_value = mock_agent_instance
- mock_agent_store_class.return_value = mock_agent_store_instance
-
- # Mock Conversation constructor to return a mock conversation
- mock_conversation_instance = MagicMock()
- mock_conversation_class.return_value = mock_conversation_instance
-
- result = setup_conversation(mock_conversation_id)
-
- # Verify conversation was created and returned
- assert result == mock_conversation_instance
- mock_agent_store_class.assert_called_once()
- mock_agent_store_instance.load.assert_called_once()
- mock_conversation_class.assert_called_once_with(
- agent=mock_agent_instance,
- workspace=ANY,
- persistence_dir=ANY,
- conversation_id=mock_conversation_id,
- visualizer=mock_visualizer
- )
-
- def test_setup_conversation_raises_missing_agent_spec(self) -> None:
- """Test that setup_conversation raises MissingAgentSpec when agent is not found."""
- with (
- patch('openhands_cli.setup.AgentStore') as mock_agent_store_class,
- ):
- # Mock AgentStore to return None (no agent found)
- mock_agent_store_instance = MagicMock()
- mock_agent_store_instance.load.return_value = None
- mock_agent_store_class.return_value = mock_agent_store_instance
-
- # Should raise MissingAgentSpec
- with pytest.raises(MissingAgentSpec) as exc_info:
- setup_conversation(uuid.uuid4())
-
- assert 'Agent specification not found' in str(exc_info.value)
- mock_agent_store_class.assert_called_once()
- mock_agent_store_instance.load.assert_called_once()
-
- def test_conversation_runner_set_confirmation_mode(self) -> None:
- """Test that ConversationRunner can set confirmation policy."""
-
- mock_conversation = MagicMock()
- mock_conversation.confirmation_policy_active = False
- mock_conversation.is_confirmation_mode_active = False
- runner = ConversationRunner(mock_conversation)
-
- # Test enabling confirmation mode
- runner.set_confirmation_policy(AlwaysConfirm())
- mock_conversation.set_confirmation_policy.assert_called_with(AlwaysConfirm())
-
- # Test disabling confirmation mode
- runner.set_confirmation_policy(NeverConfirm())
- mock_conversation.set_confirmation_policy.assert_called_with(NeverConfirm())
-
- def test_conversation_runner_initial_state(self) -> None:
- """Test that ConversationRunner starts with confirmation mode disabled."""
-
- mock_conversation = MagicMock()
- mock_conversation.confirmation_policy_active = False
- mock_conversation.is_confirmation_mode_active = False
- runner = ConversationRunner(mock_conversation)
-
- # Verify initial state
- assert runner.is_confirmation_mode_active is False
-
- def test_ask_user_confirmation_empty_actions(self) -> None:
- """Test that ask_user_confirmation returns ACCEPT for empty actions list."""
- result = ask_user_confirmation([])
- assert isinstance(result, ConfirmationResult)
- assert result.decision == UserConfirmation.ACCEPT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
- assert result.policy_change is None
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_yes(self, mock_cli_confirm: Any) -> None:
- """Test that ask_user_confirmation returns ACCEPT when user selects yes."""
- mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'ls -la'
-
- result = ask_user_confirmation([mock_action])
- assert isinstance(result, ConfirmationResult)
- assert result.decision == UserConfirmation.ACCEPT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
- assert result.policy_change is None
-
- @patch('openhands_cli.user_actions.agent_action.cli_text_input')
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_no(self, mock_cli_confirm: Any, mock_cli_text_input: Any) -> None:
- """Test that ask_user_confirmation returns REJECT when user selects reject without reason."""
- mock_cli_confirm.return_value = 1 # Second option (Reject)
- mock_cli_text_input.return_value = '' # Empty reason (reject without reason)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'rm -rf /'
-
- result = ask_user_confirmation([mock_action])
- assert isinstance(result, ConfirmationResult)
- assert result.decision == UserConfirmation.REJECT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
- assert result.policy_change is None
- mock_cli_text_input.assert_called_once_with('Reason (and let OpenHands know why): ')
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_y_shorthand(self, mock_cli_confirm: Any) -> None:
- """Test that ask_user_confirmation accepts first option as yes."""
- mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo hello'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.ACCEPT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
-
- @patch('openhands_cli.user_actions.agent_action.cli_text_input')
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_n_shorthand(self, mock_cli_confirm: Any, mock_cli_text_input: Any) -> None:
- """Test that ask_user_confirmation accepts second option as reject."""
- mock_cli_confirm.return_value = 1 # Second option (Reject)
- mock_cli_text_input.return_value = '' # Empty reason (reject without reason)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'dangerous command'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.REJECT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
- mock_cli_text_input.assert_called_once_with('Reason (and let OpenHands know why): ')
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_invalid_then_yes(
- self, mock_cli_confirm: Any
- ) -> None:
- """Test that ask_user_confirmation handles selection and accepts yes."""
- mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.ACCEPT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
- assert mock_cli_confirm.call_count == 1
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_keyboard_interrupt(
- self, mock_cli_confirm: Any
- ) -> None:
- """Test that ask_user_confirmation handles KeyboardInterrupt gracefully."""
- mock_cli_confirm.side_effect = KeyboardInterrupt()
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.DEFER
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_eof_error(self, mock_cli_confirm: Any) -> None:
- """Test that ask_user_confirmation handles EOFError gracefully."""
- mock_cli_confirm.side_effect = EOFError()
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.DEFER
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
-
- def test_ask_user_confirmation_multiple_actions(self) -> None:
- """Test that ask_user_confirmation displays multiple actions correctly."""
- with (
- patch(
- 'openhands_cli.user_actions.agent_action.cli_confirm'
- ) as mock_cli_confirm,
- patch(
- 'openhands_cli.user_actions.agent_action.print_formatted_text'
- ) as mock_print,
- ):
- mock_cli_confirm.return_value = 0 # First option (Yes, proceed)
-
- mock_action1 = MagicMock()
- mock_action1.tool_name = 'bash'
- mock_action1.action = 'ls -la'
-
- mock_action2 = MagicMock()
- mock_action2.tool_name = 'str_replace_editor'
- mock_action2.action = 'create file.txt'
-
- result = ask_user_confirmation([mock_action1, mock_action2])
- assert isinstance(result, ConfirmationResult)
- assert result.decision == UserConfirmation.ACCEPT
- assert result.reason == ''
- assert result.policy_change is None
-
- # Verify that both actions were displayed
- assert mock_print.call_count >= 3 # Header + 2 actions
-
- @patch('openhands_cli.user_actions.agent_action.cli_text_input')
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_no_with_reason(
- self, mock_cli_confirm: Any, mock_cli_text_input: Any
- ) -> None:
- """Test that ask_user_confirmation returns REJECT when user selects 'Reject' and provides a reason."""
- mock_cli_confirm.return_value = 1 # Second option (Reject)
- mock_cli_text_input.return_value = 'This action is too risky'
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'rm -rf /'
-
- result = ask_user_confirmation([mock_action])
- assert isinstance(result, ConfirmationResult)
- assert result.decision == UserConfirmation.REJECT
- assert result.reason == 'This action is too risky'
- assert result.policy_change is None
- mock_cli_text_input.assert_called_once_with('Reason (and let OpenHands know why): ')
-
- @patch('openhands_cli.user_actions.agent_action.cli_text_input')
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_no_with_reason_cancelled(
- self, mock_cli_confirm: Any, mock_cli_text_input: Any
- ) -> None:
- """Test that ask_user_confirmation falls back to DEFER when reason input is cancelled."""
- mock_cli_confirm.return_value = 1 # Second option (Reject)
- mock_cli_text_input.side_effect = KeyboardInterrupt() # User cancelled reason input
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'dangerous command'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.DEFER
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert result.policy_change is None
- mock_cli_text_input.assert_called_once_with('Reason (and let OpenHands know why): ')
-
- @patch('openhands_cli.user_actions.agent_action.cli_text_input')
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_reject_empty_reason(
- self, mock_cli_confirm: Any, mock_cli_text_input: Any
- ) -> None:
- """Test that ask_user_confirmation handles empty reason input correctly."""
- mock_cli_confirm.return_value = 1 # Second option (Reject)
- mock_cli_text_input.return_value = ' ' # Whitespace-only reason (should be treated as empty)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'dangerous command'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.REJECT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == '' # Should be empty after stripping whitespace
- assert result.policy_change is None
- mock_cli_text_input.assert_called_once_with('Reason (and let OpenHands know why): ')
-
- def test_user_confirmation_is_escapable_e2e(
- self, monkeypatch: pytest.MonkeyPatch
- ) -> None:
- """E2E: non-escapable should ignore Ctrl-C/Ctrl-P/Esc; only Enter returns."""
- real_cli_confirm = utils.cli_confirm
-
- with create_pipe_input() as pipe:
- output = DummyOutput()
-
- def wrapper(
- question: str,
- choices: list[str] | None = None,
- initial_selection: int = 0,
- escapable: bool = False,
- **extra: object,
- ) -> int:
- # keep original params; inject test IO
- return real_cli_confirm(
- question=question,
- choices=choices,
- initial_selection=initial_selection,
- escapable=escapable,
- input=pipe,
- output=output,
- )
-
- # Patch the symbol the caller uses
- monkeypatch.setattr(agent_action, 'cli_confirm', wrapper, raising=True)
-
- with ThreadPoolExecutor(max_workers=1) as ex:
- fut = ex.submit(
- ask_user_confirmation, [MockAction(command='echo hello world')]
- )
-
- _send_keys(pipe, '\x03') # Ctrl-C (ignored)
- result = fut.result(timeout=2.0)
- assert isinstance(result, ConfirmationResult)
- assert (
- result.decision == UserConfirmation.DEFER
- ) # escaped confirmation view
- assert result.reason == ''
- assert result.policy_change is None
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_always_accept(self, mock_cli_confirm: Any) -> None:
- """Test that ask_user_confirmation returns ACCEPT with NeverConfirm policy when user selects third option."""
- mock_cli_confirm.return_value = 2 # Third option (Always proceed)
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
-
- result = ask_user_confirmation([mock_action])
- assert result.decision == UserConfirmation.ACCEPT
- assert isinstance(result, ConfirmationResult)
- assert result.reason == ''
- assert isinstance(result.policy_change, NeverConfirm)
-
- def test_conversation_runner_handles_always_accept(self) -> None:
- """Test that ConversationRunner disables confirmation mode when NeverConfirm policy is returned."""
- mock_conversation = MagicMock()
- mock_conversation.confirmation_policy_active = True
- mock_conversation.is_confirmation_mode_active = True
- runner = ConversationRunner(mock_conversation)
-
- # Enable confirmation mode first
- runner.set_confirmation_policy(AlwaysConfirm())
- assert runner.is_confirmation_mode_active is True
-
- # Mock get_unmatched_actions to return some actions
- with patch(
- 'openhands_cli.runner.ConversationState.get_unmatched_actions'
- ) as mock_get_actions:
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
- mock_get_actions.return_value = [mock_action]
-
- # Mock ask_user_confirmation to return ACCEPT with NeverConfirm policy
- with patch('openhands_cli.runner.ask_user_confirmation') as mock_ask:
- mock_ask.return_value = ConfirmationResult(
- decision=UserConfirmation.ACCEPT,
- reason='',
- policy_change=NeverConfirm(),
- )
-
- # Mock print_formatted_text to avoid output during test
- with patch('openhands_cli.runner.print_formatted_text'):
- # Mock setup_conversation to avoid real conversation creation
- with patch('openhands_cli.runner.setup_conversation') as mock_setup:
- # Return a new mock conversation with confirmation mode disabled
- new_mock_conversation = MagicMock()
- new_mock_conversation.id = mock_conversation.id
- new_mock_conversation.is_confirmation_mode_active = False
- mock_setup.return_value = new_mock_conversation
-
- result = runner._handle_confirmation_request()
-
- # Verify that confirmation mode was disabled
- assert result == UserConfirmation.ACCEPT
- # Should have called setup_conversation to toggle confirmation mode
- mock_setup.assert_called_once_with(
- mock_conversation.id, include_security_analyzer=False
- )
- # Should have called set_confirmation_policy with NeverConfirm on new conversation
- new_mock_conversation.set_confirmation_policy.assert_called_with(
- NeverConfirm()
- )
-
- @patch('openhands_cli.user_actions.agent_action.cli_confirm')
- def test_ask_user_confirmation_auto_confirm_safe(
- self, mock_cli_confirm: Any
- ) -> None:
- """Test that ask_user_confirmation returns ACCEPT with policy_change when user selects fourth option."""
- mock_cli_confirm.return_value = (
- 3 # Fourth option (Auto-confirm LOW/MEDIUM, ask for HIGH)
- )
-
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
-
- result = ask_user_confirmation([mock_action])
- assert isinstance(result, ConfirmationResult)
- assert result.decision == UserConfirmation.ACCEPT
- assert result.reason == ''
- assert result.policy_change is not None
- assert isinstance(result.policy_change, ConfirmRisky)
- assert result.policy_change.threshold == SecurityRisk.HIGH
-
- def test_conversation_runner_handles_auto_confirm_safe(self) -> None:
- """Test that ConversationRunner sets ConfirmRisky policy when policy_change is provided."""
- mock_conversation = MagicMock()
- mock_conversation.confirmation_policy_active = True
- mock_conversation.is_confirmation_mode_active = True
- runner = ConversationRunner(mock_conversation)
-
- # Enable confirmation mode first
- runner.set_confirmation_policy(AlwaysConfirm())
- assert runner.is_confirmation_mode_active is True
-
- # Mock get_unmatched_actions to return some actions
- with patch(
- 'openhands_cli.runner.ConversationState.get_unmatched_actions'
- ) as mock_get_actions:
- mock_action = MagicMock()
- mock_action.tool_name = 'bash'
- mock_action.action = 'echo test'
- mock_get_actions.return_value = [mock_action]
-
- # Mock ask_user_confirmation to return ConfirmationResult with policy_change
- with patch('openhands_cli.runner.ask_user_confirmation') as mock_ask:
- expected_policy = ConfirmRisky(threshold=SecurityRisk.HIGH)
- mock_ask.return_value = ConfirmationResult(
- decision=UserConfirmation.ACCEPT,
- reason='',
- policy_change=expected_policy,
- )
-
- # Mock print_formatted_text to avoid output during test
- with patch('openhands_cli.runner.print_formatted_text'):
- result = runner._handle_confirmation_request()
-
- # Verify that security-based confirmation policy was set
- assert result == UserConfirmation.ACCEPT
- # Should set ConfirmRisky policy with HIGH threshold
- mock_conversation.set_confirmation_policy.assert_called_with(
- expected_policy
- )
diff --git a/openhands-cli/tests/test_conversation_runner.py b/openhands-cli/tests/test_conversation_runner.py
deleted file mode 100644
index ebc7f04c4400..000000000000
--- a/openhands-cli/tests/test_conversation_runner.py
+++ /dev/null
@@ -1,155 +0,0 @@
-from typing import Any, Self
-from unittest.mock import patch
-
-import pytest
-from openhands_cli.runner import ConversationRunner
-from openhands_cli.user_actions.types import UserConfirmation
-from pydantic import ConfigDict, SecretStr, model_validator
-
-from openhands.sdk import Conversation, ConversationCallbackType, LocalConversation
-from openhands.sdk.agent.base import AgentBase
-from openhands.sdk.conversation import ConversationState
-from openhands.sdk.conversation.state import ConversationExecutionStatus
-from openhands.sdk.llm import LLM
-from openhands.sdk.security.confirmation_policy import AlwaysConfirm, NeverConfirm
-from unittest.mock import MagicMock
-
-class FakeLLM(LLM):
- @model_validator(mode='after')
- def _set_env_side_effects(self) -> Self:
- return self
-
-
-def default_config() -> dict[str, Any]:
- return {
- 'model': 'gpt-4o',
- 'api_key': SecretStr('test_key'),
- 'num_retries': 2,
- 'retry_min_wait': 1,
- 'retry_max_wait': 2,
- }
-
-
-class FakeAgent(AgentBase):
- model_config = ConfigDict(frozen=False)
- step_count: int = 0
- finish_on_step: int | None = None
-
- def init_state(
- self, state: ConversationState, on_event: ConversationCallbackType
- ) -> None:
- pass
-
- def step(
- self, conversation: LocalConversation, on_event: ConversationCallbackType
- ) -> None:
- self.step_count += 1
- if self.step_count == self.finish_on_step:
- conversation.state.execution_status = ConversationExecutionStatus.FINISHED
-
-
-@pytest.fixture()
-def agent() -> FakeAgent:
- llm = LLM(**default_config(), usage_id='test-service')
- return FakeAgent(llm=llm, tools=[])
-
-
-class TestConversationRunner:
- @pytest.mark.parametrize(
- 'agent_status', [ConversationExecutionStatus.RUNNING, ConversationExecutionStatus.PAUSED]
- )
- def test_non_confirmation_mode_runs_once(
- self, agent: FakeAgent, agent_status: ConversationExecutionStatus
- ) -> None:
- """
- 1. Confirmation mode is not on
- 2. Process message resumes paused conversation or continues running conversation
- """
-
- convo = Conversation(agent)
- convo.max_iteration_per_run = 1
- convo.state.execution_status = agent_status
- cr = ConversationRunner(convo)
- cr.set_confirmation_policy(NeverConfirm())
- cr.process_message(message=None)
-
- assert agent.step_count == 1
- assert (
- convo.state.execution_status != ConversationExecutionStatus.PAUSED
- )
-
- @pytest.mark.parametrize(
- 'confirmation, final_status, expected_run_calls',
- [
- # Case 1: Agent waiting for confirmation; user DEFERS -> early return, no run()
- (
- UserConfirmation.DEFER,
- ConversationExecutionStatus.WAITING_FOR_CONFIRMATION,
- 0,
- ),
- # Case 2: Agent waiting for confirmation; user ACCEPTS -> run() once, break (finished=True)
- (
- UserConfirmation.ACCEPT,
- ConversationExecutionStatus.FINISHED,
- 1,
- ),
- ],
- )
- def test_confirmation_mode_waiting_and_user_decision_controls_run(
- self,
- agent: FakeAgent,
- confirmation: UserConfirmation,
- final_status: ConversationExecutionStatus,
- expected_run_calls: int,
- ) -> None:
- """
- 1. Agent may be paused but is waiting for consent on actions
- 2. If paused, we should have asked for confirmation on action
- 3. If not paused, we should still ask for confirmation on actions
- 4. If deferred no run call to agent should be made
- 5. If accepted, run call to agent should be made
- """
- if final_status == ConversationExecutionStatus.FINISHED:
- agent.finish_on_step = 1
-
- convo = Conversation(agent)
-
- # Set security analyzer using the new API to enable confirmation mode
- convo.set_security_analyzer(MagicMock())
-
- convo.state.execution_status = (
- ConversationExecutionStatus.WAITING_FOR_CONFIRMATION
- )
- cr = ConversationRunner(convo)
- cr.set_confirmation_policy(AlwaysConfirm())
-
- with patch.object(
- cr, '_handle_confirmation_request', return_value=confirmation
- ) as mock_confirmation_request:
- cr.process_message(message=None)
-
- mock_confirmation_request.assert_called_once()
- assert agent.step_count == expected_run_calls
- assert convo.state.execution_status == final_status
-
- def test_confirmation_mode_not_waiting__runs_once_when_finished_true(
- self, agent: FakeAgent
- ) -> None:
- """
- 1. Agent was not waiting
- 2. Agent finished without any actions
- 3. Conversation should finished without asking user for instructions
- """
- agent.finish_on_step = 1
- convo = Conversation(agent)
- convo.state.execution_status = ConversationExecutionStatus.PAUSED
-
- cr = ConversationRunner(convo)
- cr.set_confirmation_policy(AlwaysConfirm())
-
- with patch.object(cr, '_handle_confirmation_request') as _mock_h:
- cr.process_message(message=None)
-
- # No confirmation was needed up front; we still expect exactly one run.
- assert agent.step_count == 1
- _mock_h.assert_not_called()
diff --git a/openhands-cli/tests/test_directory_separation.py b/openhands-cli/tests/test_directory_separation.py
deleted file mode 100644
index 444583455ff3..000000000000
--- a/openhands-cli/tests/test_directory_separation.py
+++ /dev/null
@@ -1,70 +0,0 @@
-"""Tests to demonstrate the fix for WORK_DIR and PERSISTENCE_DIR separation."""
-
-import os
-from unittest.mock import MagicMock, patch
-
-from openhands_cli.locations import PERSISTENCE_DIR, WORK_DIR
-from openhands_cli.tui.settings.store import AgentStore
-
-from openhands.sdk import LLM, Agent, Tool
-
-
-class TestDirectorySeparation:
- """Test that WORK_DIR and PERSISTENCE_DIR are properly separated."""
-
- def test_work_dir_and_persistence_dir_are_different(self):
- """Test that WORK_DIR and PERSISTENCE_DIR are separate directories."""
- # WORK_DIR should be the current working directory
- assert WORK_DIR == os.getcwd()
-
- # PERSISTENCE_DIR should be ~/.openhands
- expected_config_dir = os.path.expanduser('~/.openhands')
- assert PERSISTENCE_DIR == expected_config_dir
-
- # They should be different
- assert WORK_DIR != PERSISTENCE_DIR
-
- def test_agent_store_uses_persistence_dir(self):
- """Test that AgentStore uses PERSISTENCE_DIR for file storage."""
- agent_store = AgentStore()
- assert agent_store.file_store.root == PERSISTENCE_DIR
-
-
-class TestToolFix:
- """Test that tool specs are replaced with default tools using current directory."""
-
- def test_tools_replaced_with_default_tools_on_load(self):
- """Test that entire tools list is replaced with default tools when loading agent."""
- # Create a mock agent with different tools and working directories
- mock_agent = Agent(
- llm=LLM(model='test/model', api_key='test-key', usage_id='test-service'),
- tools=[
- Tool(name='BashTool'),
- Tool(name='FileEditorTool'),
- Tool(name='TaskTrackerTool'),
- ],
- )
-
- # Mock the file store to return our test agent
- with patch(
- 'openhands_cli.tui.settings.store.LocalFileStore'
- ) as mock_file_store:
- mock_store_instance = MagicMock()
- mock_file_store.return_value = mock_store_instance
- mock_store_instance.read.return_value = mock_agent.model_dump_json()
-
- agent_store = AgentStore()
- loaded_agent = agent_store.load()
-
- # Verify the agent was loaded
- assert loaded_agent is not None
-
- # Verify that tools are replaced with default tools
- assert (
- len(loaded_agent.tools) == 3
- ) # BashTool, FileEditorTool, TaskTrackerTool
-
- tool_names = [tool.name for tool in loaded_agent.tools]
- assert 'terminal' in tool_names
- assert 'file_editor' in tool_names
- assert 'task_tracker' in tool_names
diff --git a/openhands-cli/tests/test_exit_session_confirmation.py b/openhands-cli/tests/test_exit_session_confirmation.py
deleted file mode 100644
index 8525b7d05b09..000000000000
--- a/openhands-cli/tests/test_exit_session_confirmation.py
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python3
-"""
-Tests for exit_session_confirmation functionality in OpenHands CLI.
-"""
-
-from collections.abc import Iterator
-from concurrent.futures import ThreadPoolExecutor
-from unittest.mock import MagicMock, patch
-
-import pytest
-from openhands_cli.user_actions import (
- exit_session,
- exit_session_confirmation,
- utils,
-)
-from openhands_cli.user_actions.types import UserConfirmation
-from prompt_toolkit.input.defaults import create_pipe_input
-from prompt_toolkit.output.defaults import DummyOutput
-
-from tests.utils import _send_keys
-
-QUESTION = 'Terminate session?'
-OPTIONS = ['Yes, proceed', 'No, dismiss']
-
-
-@pytest.fixture()
-def confirm_patch() -> Iterator[MagicMock]:
- """Patch cli_confirm once per test and yield the mock."""
- with patch('openhands_cli.user_actions.exit_session.cli_confirm') as m:
- yield m
-
-
-def _assert_called_once_with_defaults(mock_cli_confirm: MagicMock) -> None:
- """Ensure the question/options are correct and 'escapable' is not enabled."""
- mock_cli_confirm.assert_called_once()
- args, kwargs = mock_cli_confirm.call_args
- # Positional args
- assert args == (QUESTION, OPTIONS)
- # Should not opt into escapable mode
- assert 'escapable' not in kwargs or kwargs['escapable'] is False
-
-
-class TestExitSessionConfirmation:
- """Test suite for exit_session_confirmation functionality."""
-
- @pytest.mark.parametrize(
- 'index,expected',
- [
- (0, UserConfirmation.ACCEPT), # Yes
- (1, UserConfirmation.REJECT), # No
- (999, UserConfirmation.REJECT), # Invalid => default reject
- (-1, UserConfirmation.REJECT), # Negative => default reject
- ],
- )
- def test_index_mapping(
- self, confirm_patch: MagicMock, index: int, expected: UserConfirmation
- ) -> None:
- """All index-to-result mappings, including invalid/negative, in one place."""
- confirm_patch.return_value = index
-
- result = exit_session_confirmation()
-
- assert isinstance(result, UserConfirmation)
- assert result == expected
- _assert_called_once_with_defaults(confirm_patch)
-
- def test_exit_session_confirmation_non_escapable_e2e(
- self, monkeypatch: pytest.MonkeyPatch
- ) -> None:
- """E2E: non-escapable should ignore Ctrl-C/Ctrl-P/Esc; only Enter returns."""
- real_cli_confirm = utils.cli_confirm
-
- with create_pipe_input() as pipe:
- output = DummyOutput()
-
- def wrapper(
- question: str,
- choices: list[str] | None = None,
- initial_selection: int = 0,
- escapable: bool = False,
- **extra: object,
- ) -> int:
- # keep original params; inject test IO
- return real_cli_confirm(
- question=question,
- choices=choices,
- initial_selection=initial_selection,
- escapable=escapable,
- input=pipe,
- output=output,
- )
-
- # Patch the symbol the caller uses
- monkeypatch.setattr(exit_session, 'cli_confirm', wrapper, raising=True)
-
- with ThreadPoolExecutor(max_workers=1) as ex:
- fut = ex.submit(exit_session_confirmation)
-
- _send_keys(pipe, '\x03') # Ctrl-C (ignored)
- _send_keys(pipe, '\x10') # Ctrl-P (ignored)
- _send_keys(pipe, '\x1b') # Esc (ignored)
-
- _send_keys(pipe, '\x1b[B') # Arrow Down to "No, dismiss"
- _send_keys(pipe, '\r') # Enter
-
- result = fut.result(timeout=2.0)
- assert result == UserConfirmation.REJECT
diff --git a/openhands-cli/tests/test_gui_launcher.py b/openhands-cli/tests/test_gui_launcher.py
deleted file mode 100644
index 7bf036e91a8d..000000000000
--- a/openhands-cli/tests/test_gui_launcher.py
+++ /dev/null
@@ -1,199 +0,0 @@
-"""Tests for GUI launcher functionality."""
-
-import os
-import subprocess
-from pathlib import Path
-from unittest.mock import MagicMock, patch
-
-import pytest
-
-from openhands_cli.gui_launcher import (
- _format_docker_command_for_logging,
- check_docker_requirements,
- get_openhands_version,
- launch_gui_server,
-)
-
-
-class TestFormatDockerCommand:
- """Test the Docker command formatting function."""
-
- @pytest.mark.parametrize(
- "cmd,expected",
- [
- (
- ['docker', 'run', 'hello-world'],
- 'Running Docker command: docker run hello-world ',
- ),
- (
- ['docker', 'run', '-it', '--rm', '-p', '3000:3000', 'openhands:latest'],
- 'Running Docker command: docker run -it --rm -p 3000:3000 openhands:latest ',
- ),
- ([], 'Running Docker command: '),
- ],
- )
- def test_format_docker_command(self, cmd, expected):
- """Test formatting Docker commands."""
- result = _format_docker_command_for_logging(cmd)
- assert result == expected
-
-
-class TestCheckDockerRequirements:
- """Test Docker requirements checking."""
-
- @pytest.mark.parametrize(
- "which_return,run_side_effect,expected_result,expected_print_count",
- [
- # Docker not installed
- (None, None, False, 2),
- # Docker daemon not running
- ('/usr/bin/docker', MagicMock(returncode=1), False, 2),
- # Docker timeout
- ('/usr/bin/docker', subprocess.TimeoutExpired('docker info', 10), False, 2),
- # Docker available
- ('/usr/bin/docker', MagicMock(returncode=0), True, 0),
- ],
- )
- @patch('shutil.which')
- @patch('subprocess.run')
- def test_docker_requirements(
- self, mock_run, mock_which, which_return, run_side_effect, expected_result, expected_print_count
- ):
- """Test Docker requirements checking scenarios."""
- mock_which.return_value = which_return
- if run_side_effect is not None:
- if isinstance(run_side_effect, Exception):
- mock_run.side_effect = run_side_effect
- else:
- mock_run.return_value = run_side_effect
-
- with patch('openhands_cli.gui_launcher.print_formatted_text') as mock_print:
- result = check_docker_requirements()
-
- assert result is expected_result
- assert mock_print.call_count == expected_print_count
-
-
-class TestGetOpenHandsVersion:
- """Test version retrieval."""
-
- @pytest.mark.parametrize(
- "env_value,expected",
- [
- (None, 'latest'), # No environment variable set
- ('1.2.3', '1.2.3'), # Environment variable set
- ],
- )
- def test_version_retrieval(self, env_value, expected):
- """Test version retrieval from environment."""
- if env_value:
- os.environ['OPENHANDS_VERSION'] = env_value
- result = get_openhands_version()
- assert result == expected
-
-
-class TestLaunchGuiServer:
- """Test GUI server launching."""
-
- @patch('openhands_cli.gui_launcher.check_docker_requirements')
- @patch('openhands_cli.gui_launcher.print_formatted_text')
- def test_launch_gui_server_docker_not_available(self, mock_print, mock_check_docker):
- """Test that launch_gui_server exits when Docker is not available."""
- mock_check_docker.return_value = False
-
- with pytest.raises(SystemExit) as exc_info:
- launch_gui_server()
-
- assert exc_info.value.code == 1
-
- @pytest.mark.parametrize(
- "pull_side_effect,run_side_effect,expected_exit_code,mount_cwd,gpu",
- [
- # Docker pull failure
- (subprocess.CalledProcessError(1, 'docker pull'), None, 1, False, False),
- # Docker run failure
- (MagicMock(returncode=0), subprocess.CalledProcessError(1, 'docker run'), 1, False, False),
- # KeyboardInterrupt during run
- (MagicMock(returncode=0), KeyboardInterrupt(), 0, False, False),
- # Success with mount_cwd
- (MagicMock(returncode=0), MagicMock(returncode=0), None, True, False),
- # Success with GPU
- (MagicMock(returncode=0), MagicMock(returncode=0), None, False, True),
- ],
- )
- @patch('openhands_cli.gui_launcher.check_docker_requirements')
- @patch('openhands_cli.gui_launcher.ensure_config_dir_exists')
- @patch('openhands_cli.gui_launcher.get_openhands_version')
- @patch('subprocess.run')
- @patch('subprocess.check_output')
- @patch('pathlib.Path.cwd')
- @patch('openhands_cli.gui_launcher.print_formatted_text')
- def test_launch_gui_server_scenarios(
- self,
- mock_print,
- mock_cwd,
- mock_check_output,
- mock_run,
- mock_version,
- mock_config_dir,
- mock_check_docker,
- pull_side_effect,
- run_side_effect,
- expected_exit_code,
- mount_cwd,
- gpu,
- ):
- """Test various GUI server launch scenarios."""
- # Setup mocks
- mock_check_docker.return_value = True
- mock_config_dir.return_value = Path('/home/user/.openhands')
- mock_version.return_value = 'latest'
- mock_check_output.return_value = '1000\n'
- mock_cwd.return_value = Path('/current/dir')
-
- # Configure subprocess.run side effects
- side_effects = []
- if pull_side_effect is not None:
- if isinstance(pull_side_effect, Exception):
- side_effects.append(pull_side_effect)
- else:
- side_effects.append(pull_side_effect)
-
- if run_side_effect is not None:
- if isinstance(run_side_effect, Exception):
- side_effects.append(run_side_effect)
- else:
- side_effects.append(run_side_effect)
-
- mock_run.side_effect = side_effects
-
- # Test the function
- if expected_exit_code is not None:
- with pytest.raises(SystemExit) as exc_info:
- launch_gui_server(mount_cwd=mount_cwd, gpu=gpu)
- assert exc_info.value.code == expected_exit_code
- else:
- # Should not raise SystemExit for successful cases
- launch_gui_server(mount_cwd=mount_cwd, gpu=gpu)
-
- # Verify subprocess.run was called correctly
- assert mock_run.call_count == 2 # Pull and run commands
-
- # Check pull command
- pull_call = mock_run.call_args_list[0]
- pull_cmd = pull_call[0][0]
- assert pull_cmd[0:3] == ['docker', 'pull', 'docker.openhands.dev/openhands/runtime:latest-nikolaik']
-
- # Check run command
- run_call = mock_run.call_args_list[1]
- run_cmd = run_call[0][0]
- assert run_cmd[0:2] == ['docker', 'run']
-
- if mount_cwd:
- assert 'SANDBOX_VOLUMES=/current/dir:/workspace:rw' in ' '.join(run_cmd)
- assert 'SANDBOX_USER_ID=1000' in ' '.join(run_cmd)
-
- if gpu:
- assert '--gpus' in run_cmd
- assert 'all' in run_cmd
- assert 'SANDBOX_ENABLE_GPU=true' in ' '.join(run_cmd)
diff --git a/openhands-cli/tests/test_main.py b/openhands-cli/tests/test_main.py
deleted file mode 100644
index 2e2a4a47cab3..000000000000
--- a/openhands-cli/tests/test_main.py
+++ /dev/null
@@ -1,154 +0,0 @@
-"""Tests for main entry point functionality."""
-
-import sys
-from types import SimpleNamespace
-from unittest.mock import MagicMock, patch
-
-import pytest
-from openhands_cli import simple_main
-from openhands_cli.simple_main import main
-
-
-
-class TestMainEntryPoint:
- """Test the main entry point behavior."""
-
- @patch('openhands_cli.agent_chat.run_cli_entry')
- @patch('sys.argv', ['openhands'])
- def test_main_starts_agent_chat_directly(
- self, mock_run_agent_chat: MagicMock
- ) -> None:
- """Test that main() starts agent chat directly when setup succeeds."""
- # Mock run_cli_entry to raise KeyboardInterrupt to exit gracefully
- mock_run_agent_chat.side_effect = KeyboardInterrupt()
-
- # Should complete without raising an exception (graceful exit)
- simple_main.main()
-
- # Should call run_cli_entry with no resume conversation ID
- mock_run_agent_chat.assert_called_once_with(resume_conversation_id=None)
-
- @patch('openhands_cli.agent_chat.run_cli_entry')
- @patch('sys.argv', ['openhands'])
- def test_main_handles_import_error(self, mock_run_agent_chat: MagicMock) -> None:
- """Test that main() handles ImportError gracefully."""
- mock_run_agent_chat.side_effect = ImportError('Missing dependency')
-
- # Should raise ImportError (re-raised after handling)
- with pytest.raises(ImportError) as exc_info:
- simple_main.main()
-
- assert str(exc_info.value) == 'Missing dependency'
-
- @patch('openhands_cli.agent_chat.run_cli_entry')
- @patch('sys.argv', ['openhands'])
- def test_main_handles_keyboard_interrupt(
- self, mock_run_agent_chat: MagicMock
- ) -> None:
- """Test that main() handles KeyboardInterrupt gracefully."""
- # Mock run_cli_entry to raise KeyboardInterrupt
- mock_run_agent_chat.side_effect = KeyboardInterrupt()
-
- # Should complete without raising an exception (graceful exit)
- simple_main.main()
-
- @patch('openhands_cli.agent_chat.run_cli_entry')
- @patch('sys.argv', ['openhands'])
- def test_main_handles_eof_error(self, mock_run_agent_chat: MagicMock) -> None:
- """Test that main() handles EOFError gracefully."""
- # Mock run_cli_entry to raise EOFError
- mock_run_agent_chat.side_effect = EOFError()
-
- # Should complete without raising an exception (graceful exit)
- simple_main.main()
-
- @patch('openhands_cli.agent_chat.run_cli_entry')
- @patch('sys.argv', ['openhands'])
- def test_main_handles_general_exception(
- self, mock_run_agent_chat: MagicMock
- ) -> None:
- """Test that main() handles general exceptions."""
- mock_run_agent_chat.side_effect = Exception('Unexpected error')
-
- # Should raise Exception (re-raised after handling)
- with pytest.raises(Exception) as exc_info:
- simple_main.main()
-
- assert str(exc_info.value) == 'Unexpected error'
-
- @patch('openhands_cli.agent_chat.run_cli_entry')
- @patch('sys.argv', ['openhands', '--resume', 'test-conversation-id'])
- def test_main_with_resume_argument(self, mock_run_agent_chat: MagicMock) -> None:
- """Test that main() passes resume conversation ID when provided."""
- # Mock run_cli_entry to raise KeyboardInterrupt to exit gracefully
- mock_run_agent_chat.side_effect = KeyboardInterrupt()
-
- # Should complete without raising an exception (graceful exit)
- simple_main.main()
-
- # Should call run_cli_entry with the provided resume conversation ID
- mock_run_agent_chat.assert_called_once_with(
- resume_conversation_id='test-conversation-id'
- )
-
-
-
-
-@pytest.mark.parametrize(
- "argv,expected_kwargs",
- [
- (['openhands'], {"resume_conversation_id": None}),
- (['openhands', '--resume', 'test-id'], {"resume_conversation_id": 'test-id'}),
- ],
-)
-def test_main_cli_calls_run_cli_entry(monkeypatch, argv, expected_kwargs):
- # Patch sys.argv since main() takes no params
- monkeypatch.setattr(sys, "argv", argv, raising=False)
-
- called = {}
- fake_agent_chat = SimpleNamespace(
- run_cli_entry=lambda **kw: called.setdefault("kwargs", kw)
- )
- # Provide the symbol that main() will import
- monkeypatch.setitem(sys.modules, "openhands_cli.agent_chat", fake_agent_chat)
-
- # Execute (no SystemExit expected on success)
- main()
- assert called["kwargs"] == expected_kwargs
-
-
-@pytest.mark.parametrize(
- "argv,expected_kwargs",
- [
- (['openhands', 'serve'], {"mount_cwd": False, "gpu": False}),
- (['openhands', 'serve', '--mount-cwd'], {"mount_cwd": True, "gpu": False}),
- (['openhands', 'serve', '--gpu'], {"mount_cwd": False, "gpu": True}),
- (['openhands', 'serve', '--mount-cwd', '--gpu'], {"mount_cwd": True, "gpu": True}),
- ],
-)
-def test_main_serve_calls_launch_gui_server(monkeypatch, argv, expected_kwargs):
- monkeypatch.setattr(sys, "argv", argv, raising=False)
-
- called = {}
- fake_gui = SimpleNamespace(
- launch_gui_server=lambda **kw: called.setdefault("kwargs", kw)
- )
- monkeypatch.setitem(sys.modules, "openhands_cli.gui_launcher", fake_gui)
-
- main()
- assert called["kwargs"] == expected_kwargs
-
-
-@pytest.mark.parametrize(
- "argv,expected_exit_code",
- [
- (['openhands', 'invalid-command'], 2), # argparse error
- (['openhands', '--help'], 0), # top-level help
- (['openhands', 'serve', '--help'], 0), # subcommand help
- ],
-)
-def test_help_and_invalid(monkeypatch, argv, expected_exit_code):
- monkeypatch.setattr(sys, "argv", argv, raising=False)
- with pytest.raises(SystemExit) as exc:
- main()
- assert exc.value.code == expected_exit_code
diff --git a/openhands-cli/tests/test_mcp_config_validation.py b/openhands-cli/tests/test_mcp_config_validation.py
deleted file mode 100644
index b5497681923b..000000000000
--- a/openhands-cli/tests/test_mcp_config_validation.py
+++ /dev/null
@@ -1,206 +0,0 @@
-"""Parametrized tests for MCP configuration screen functionality."""
-
-import json
-from pathlib import Path
-from unittest.mock import patch
-
-import pytest
-from openhands_cli.locations import MCP_CONFIG_FILE
-from openhands_cli.tui.settings.mcp_screen import MCPScreen
-
-from openhands.sdk import LLM, Agent
-
-
-@pytest.fixture
-def persistence_dir(tmp_path, monkeypatch):
- """Patch PERSISTENCE_DIR to tmp and return the directory Path."""
- monkeypatch.setattr(
- 'openhands_cli.tui.settings.mcp_screen.PERSISTENCE_DIR',
- str(tmp_path),
- raising=True,
- )
- return tmp_path
-
-
-def _create_agent(mcp_config=None) -> Agent:
- if mcp_config is None:
- mcp_config = {}
- return Agent(
- llm=LLM(model='test-model', api_key='test-key', usage_id='test-service'),
- tools=[],
- mcp_config=mcp_config,
- )
-
-
-def _maybe_write_mcp_file(dirpath: Path, file_content):
- """Write mcp.json if file_content is provided.
-
- file_content:
- - None -> do not create file (missing)
- - "INVALID"-> write invalid JSON
- - dict -> dump as JSON
- """
- if file_content is None:
- return
- cfg_path = dirpath / MCP_CONFIG_FILE
- if file_content == 'INVALID':
- cfg_path.write_text('{"invalid": json content}')
- else:
- cfg_path.write_text(json.dumps(file_content))
-
-
-# Shared "always expected" help text snippets
-ALWAYS_EXPECTED = [
- 'MCP (Model Context Protocol) Configuration',
- 'To get started:',
- '~/.openhands/mcp.json',
- 'https://gofastmcp.com/clients/client#configuration-format',
- 'Restart your OpenHands session',
-]
-
-
-CASES = [
- # Agent has an existing server; should list "Current Agent MCP Servers"
- dict(
- id='agent_has_existing',
- agent_mcp={
- 'mcpServers': {
- 'existing_server': {
- 'command': 'python',
- 'args': ['-m', 'existing_server'],
- }
- }
- },
- file_content=None, # no incoming file
- expected=[
- 'Current Agent MCP Servers:',
- 'existing_server',
- ],
- unexpected=[],
- ),
- # Agent has none; should show "None configured on the current agent"
- dict(
- id='agent_has_none',
- agent_mcp={},
- file_content=None,
- expected=[
- 'Current Agent MCP Servers:',
- 'None configured on the current agent',
- ],
- unexpected=[],
- ),
- # New servers present only in mcp.json
- dict(
- id='new_servers_on_restart',
- agent_mcp={},
- file_content={
- 'mcpServers': {
- 'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']},
- 'notion': {'url': 'https://mcp.notion.com/mcp', 'auth': 'oauth'},
- }
- },
- expected=[
- 'Incoming Servers on Restart',
- 'New servers (will be added):',
- 'fetch',
- 'notion',
- ],
- unexpected=[],
- ),
- # Overriding/updating servers present in both agent and mcp.json (but different config)
- dict(
- id='overriding_servers_on_restart',
- agent_mcp={
- 'mcpServers': {
- 'fetch': {'command': 'python', 'args': ['-m', 'old_fetch_server']}
- }
- },
- file_content={
- 'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}}
- },
- expected=[
- 'Incoming Servers on Restart',
- 'Updated servers (configuration will change):',
- 'fetch',
- 'Current:',
- 'Incoming:',
- ],
- unexpected=[],
- ),
- # All servers already synced (matching config)
- dict(
- id='already_synced',
- agent_mcp={
- 'mcpServers': {
- 'fetch': {
- 'command': 'uvx',
- 'args': ['mcp-server-fetch'],
- 'env': {},
- 'transport': 'stdio',
- }
- }
- },
- file_content={
- 'mcpServers': {'fetch': {'command': 'uvx', 'args': ['mcp-server-fetch']}}
- },
- expected=[
- 'Incoming Servers on Restart',
- 'All configured servers match the current agent configuration',
- ],
- unexpected=[],
- ),
- # Invalid JSON file handling
- dict(
- id='invalid_json_file',
- agent_mcp={},
- file_content='INVALID',
- expected=[
- 'Invalid MCP configuration file',
- 'Please check your configuration file format',
- ],
- unexpected=[],
- ),
- # Missing JSON file handling
- dict(
- id='missing_json_file',
- agent_mcp={},
- file_content=None, # explicitly missing
- expected=[
- 'Configuration file not found',
- 'No incoming servers detected for next restart',
- ],
- unexpected=[],
- ),
-]
-
-
-@pytest.mark.parametrize('case', CASES, ids=[c['id'] for c in CASES])
-@patch('openhands_cli.tui.settings.mcp_screen.print_formatted_text')
-def test_display_mcp_info_parametrized(mock_print, case, persistence_dir):
- """Table-driven test for MCPScreen.display_mcp_info covering all scenarios."""
- # Arrange
- agent = _create_agent(case['agent_mcp'])
- _maybe_write_mcp_file(persistence_dir, case['file_content'])
- screen = MCPScreen()
-
- # Act
- screen.display_mcp_info(agent)
-
- # Gather output
- all_calls = [str(call_args) for call_args in mock_print.call_args_list]
- content = ' '.join(all_calls)
-
- # Invariants: help instructions should always be present
- for snippet in ALWAYS_EXPECTED:
- assert snippet in content, f'Missing help snippet: {snippet}'
-
- # Scenario-specific expectations
- for snippet in case['expected']:
- assert snippet in content, (
- f'Expected snippet not found for case {case["id"]}: {snippet}'
- )
-
- for snippet in case.get('unexpected', []):
- assert snippet not in content, (
- f'Unexpected snippet found for case {case["id"]}: {snippet}'
- )
diff --git a/openhands-cli/tests/test_pause_listener.py b/openhands-cli/tests/test_pause_listener.py
deleted file mode 100644
index 0471b0b80d87..000000000000
--- a/openhands-cli/tests/test_pause_listener.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python3
-"""
-Tests for pause listener in OpenHands CLI.
-"""
-
-import time
-from unittest.mock import MagicMock
-
-from openhands_cli.listeners.pause_listener import PauseListener, pause_listener
-from prompt_toolkit.input.defaults import create_pipe_input
-
-from openhands.sdk import Conversation
-
-
-class TestPauseListener:
- """Test suite for PauseListener class."""
-
- def test_pause_listener_stop(self) -> None:
- """Test PauseListener stop functionality."""
- mock_callback = MagicMock()
- listener = PauseListener(on_pause=mock_callback)
-
- listener.start()
-
- # Initially not paused
- assert not listener.is_paused()
- assert listener.is_alive()
-
- # Stop the listener
- listener.stop()
-
- # Listner was shutdown not paused
- assert not listener.is_paused()
- assert listener.is_stopped()
-
- def test_pause_listener_context_manager(self) -> None:
- """Test pause_listener context manager."""
- mock_conversation = MagicMock(spec=Conversation)
- mock_conversation.pause = MagicMock()
-
- with create_pipe_input() as pipe:
- with pause_listener(mock_conversation, pipe) as listener:
- assert isinstance(listener, PauseListener)
- assert listener.on_pause == mock_conversation.pause
- # Listener should be started (daemon thread)
- assert listener.is_alive()
- assert not listener.is_paused()
- pipe.send_text('\x10') # Ctrl-P
- time.sleep(0.1)
- assert listener.is_paused()
-
- assert listener.is_stopped()
- assert not listener.is_alive()
diff --git a/openhands-cli/tests/test_session_prompter.py b/openhands-cli/tests/test_session_prompter.py
deleted file mode 100644
index befb1efdfbe5..000000000000
--- a/openhands-cli/tests/test_session_prompter.py
+++ /dev/null
@@ -1,106 +0,0 @@
-import time
-from concurrent.futures import ThreadPoolExecutor
-from typing import Optional
-
-import pytest
-from openhands_cli.user_actions.utils import get_session_prompter
-from prompt_toolkit.formatted_text import HTML
-from prompt_toolkit.input.defaults import create_pipe_input
-from prompt_toolkit.output.defaults import DummyOutput
-
-from tests.utils import _send_keys
-
-
-def _run_prompt_and_type(
- prompt_text: str,
- keys: str,
- *,
- expect_exception: Optional[type[BaseException]] = None,
- timeout: float = 2.0,
- settle: float = 0.05,
-) -> str | None:
- """
- Helper to:
- 1) create a pipe + session,
- 2) start session.prompt in a background thread,
- 3) send keys, and
- 4) return the result or raise the expected exception.
-
- Returns:
- - The prompt result (str) if no exception expected.
- - None if an exception is expected and raised.
- """
- with create_pipe_input() as pipe:
- session = get_session_prompter(input=pipe, output=DummyOutput())
- with ThreadPoolExecutor(max_workers=1) as ex:
- fut = ex.submit(session.prompt, HTML(prompt_text))
- # Allow the prompt loop to start consuming input
- time.sleep(settle)
- _send_keys(pipe, keys)
- if expect_exception:
- with pytest.raises(expect_exception):
- fut.result(timeout=timeout)
- return None
- return fut.result(timeout=timeout)
-
-
-@pytest.mark.parametrize(
- 'desc,keys,expected',
- [
- ('basic single line', 'hello world\r', 'hello world'),
- ('empty input', '\r', ''),
- (
- 'single multiline via backslash-enter',
- 'line 1\\\rline 2\r',
- 'line 1\nline 2',
- ),
- (
- 'multiple multiline segments',
- 'first line\\\rsecond line\\\rthird line\r',
- 'first line\nsecond line\nthird line',
- ),
- (
- 'backslash-only newline then text',
- '\\\rafter newline\r',
- '\nafter newline',
- ),
- (
- 'mixed content (code-like)',
- "def function():\\\r return 'hello'\\\r # end of function\r",
- "def function():\n return 'hello'\n # end of function",
- ),
- (
- 'whitespace preservation (including blank line)',
- ' indented line\\\r\\\r more indented\r',
- ' indented line\n\n more indented',
- ),
- (
- 'special characters',
- 'echo \'hello world\'\\\rgrep -n "pattern" file.txt\r',
- 'echo \'hello world\'\ngrep -n "pattern" file.txt',
- ),
- ],
-)
-def test_get_session_prompter_scenarios(desc, keys, expected):
- """Covers most behaviors via parametrization to reduce duplication."""
- result = _run_prompt_and_type('> ', keys)
- assert result == expected
-
-
-def test_get_session_prompter_keyboard_interrupt():
- """Focused test for Ctrl+C behavior."""
- _run_prompt_and_type('> ', '\x03', expect_exception=KeyboardInterrupt)
-
-
-def test_get_session_prompter_default_parameters():
- """Lightweight sanity check for default construction."""
- session = get_session_prompter()
- assert session is not None
- assert session.multiline is True
- assert session.key_bindings is not None
- assert session.completer is not None
-
- # Prompt continuation should be callable and return the expected string
- cont = session.prompt_continuation
- assert callable(cont)
- assert cont(80, 1, False) == '...'
diff --git a/openhands-cli/tests/test_tui.py b/openhands-cli/tests/test_tui.py
deleted file mode 100644
index 067bef177c85..000000000000
--- a/openhands-cli/tests/test_tui.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""Tests for TUI functionality."""
-
-from openhands_cli.tui.tui import COMMANDS, CommandCompleter
-from prompt_toolkit.completion import CompleteEvent
-from prompt_toolkit.document import Document
-
-
-class TestCommandCompleter:
- """Test the CommandCompleter class."""
-
- def test_command_completion_with_slash(self) -> None:
- """Test that commands are completed when starting with /."""
- completer = CommandCompleter()
- document = Document('/')
- completions = list(completer.get_completions(document, CompleteEvent()))
-
- # Should return all available commands
- assert len(completions) == len(COMMANDS)
-
- # Check that all commands are included
- completion_texts = [c.text for c in completions]
- for command in COMMANDS.keys():
- assert command in completion_texts
-
- def test_command_completion_partial_match(self) -> None:
- """Test that partial command matches work correctly."""
- completer = CommandCompleter()
- document = Document('/ex')
- completions = list(completer.get_completions(document, CompleteEvent()))
-
- # Should return only /exit
- assert len(completions) == 1
- assert completions[0].text == '/exit'
- # display_meta is a FormattedText object, so we need to check its content
- # Extract the text from FormattedText
- meta_text = completions[0].display_meta
- if hasattr(meta_text, '_formatted_text'):
- # Extract text from FormattedText
- text_content = ''.join([item[1] for item in meta_text._formatted_text])
- else:
- text_content = str(meta_text)
- assert COMMANDS['/exit'] in text_content
-
- def test_command_completion_no_slash(self) -> None:
- """Test that no completions are returned without /."""
- completer = CommandCompleter()
- document = Document('help')
- completions = list(completer.get_completions(document, CompleteEvent()))
-
- # Should return no completions
- assert len(completions) == 0
-
- def test_command_completion_no_match(self) -> None:
- """Test that no completions are returned for non-matching commands."""
- completer = CommandCompleter()
- document = Document('/nonexistent')
- completions = list(completer.get_completions(document, CompleteEvent()))
-
- # Should return no completions
- assert len(completions) == 0
-
- def test_command_completion_styling(self) -> None:
- """Test that completions have proper styling."""
- completer = CommandCompleter()
- document = Document('/help')
- completions = list(completer.get_completions(document, CompleteEvent()))
-
- assert len(completions) == 1
- completion = completions[0]
- assert completion.style == 'bg:ansidarkgray fg:gold'
- assert completion.start_position == -5 # Length of "/help"
-
-
-def test_commands_dict() -> None:
- """Test that COMMANDS dictionary contains expected commands."""
- expected_commands = {
- '/exit',
- '/help',
- '/clear',
- '/new',
- '/status',
- '/confirm',
- '/resume',
- '/settings',
- '/mcp',
- }
- assert set(COMMANDS.keys()) == expected_commands
-
- # Check that all commands have descriptions
- for command, description in COMMANDS.items():
- assert isinstance(command, str)
- assert command.startswith('/')
- assert isinstance(description, str)
- assert len(description) > 0
diff --git a/openhands-cli/tests/utils.py b/openhands-cli/tests/utils.py
deleted file mode 100644
index d0a7f12d11de..000000000000
--- a/openhands-cli/tests/utils.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import time
-
-from prompt_toolkit.input import PipeInput
-
-
-def _send_keys(pipe: PipeInput, text: str, delay: float = 0.05) -> None:
- """Helper: small delay then send keys to avoid race with app.run()."""
- time.sleep(delay)
- pipe.send_text(text)
diff --git a/openhands-cli/tests/visualizer/test_visualizer.py b/openhands-cli/tests/visualizer/test_visualizer.py
deleted file mode 100644
index 92ead3643ab5..000000000000
--- a/openhands-cli/tests/visualizer/test_visualizer.py
+++ /dev/null
@@ -1,238 +0,0 @@
-"""Tests for the conversation visualizer and event visualization."""
-
-import json
-
-from rich.text import Text
-
-from openhands_cli.tui.visualizer import (
- CLIVisualizer,
-)
-from openhands.sdk.event import (
- ActionEvent,
- SystemPromptEvent,
- UserRejectObservation,
-)
-from openhands.sdk.llm import (
- MessageToolCall,
- TextContent,
-)
-from openhands.sdk.tool import Action
-
-
-class VisualizerMockAction(Action):
- """Mock action for testing."""
-
- command: str = "test command"
- working_dir: str = "/tmp"
-
-
-class VisualizerCustomAction(Action):
- """Custom action with overridden visualize method."""
-
- task_list: list[dict] = []
-
- @property
- def visualize(self) -> Text:
- """Custom visualization for task tracker."""
- content = Text()
- content.append("Task Tracker Action\n", style="bold")
- content.append(f"Tasks: {len(self.task_list)}")
- for i, task in enumerate(self.task_list):
- content.append(f"\n {i + 1}. {task.get('title', 'Untitled')}")
- return content
-
-
-def create_tool_call(
- call_id: str, function_name: str, arguments: dict
-) -> MessageToolCall:
- """Helper to create a MessageToolCall."""
- return MessageToolCall(
- id=call_id,
- name=function_name,
- arguments=json.dumps(arguments),
- origin="completion",
- )
-
-
-def test_conversation_visualizer_initialization():
- """Test DefaultConversationVisualizer can be initialized."""
- visualizer = CLIVisualizer()
- assert visualizer is not None
- assert hasattr(visualizer, "on_event")
- assert hasattr(visualizer, "_create_event_panel")
-
-
-def test_visualizer_event_panel_creation():
- """Test that visualizer creates panels for different event types."""
- conv_viz = CLIVisualizer()
-
- # Test with a simple action event
- action = VisualizerMockAction(command="test")
- tool_call = create_tool_call("call_1", "test", {})
- action_event = ActionEvent(
- thought=[TextContent(text="Testing")],
- action=action,
- tool_name="test",
- tool_call_id="call_1",
- tool_call=tool_call,
- llm_response_id="response_1",
- )
- panel = conv_viz._create_event_panel(action_event)
- assert panel is not None
- assert hasattr(panel, "renderable")
-
-
-def test_visualizer_action_event_with_none_action_panel():
- """ActionEvent with action=None should render as 'Agent Action (Not Executed)'."""
- visualizer = CLIVisualizer()
- tc = create_tool_call("call_ne_1", "missing_fn", {})
- action_event = ActionEvent(
- thought=[TextContent(text="...")],
- tool_call=tc,
- tool_name=tc.name,
- tool_call_id=tc.id,
- llm_response_id="resp_viz_1",
- action=None,
- )
- panel = visualizer._create_event_panel(action_event)
- assert panel is not None
- # Ensure it doesn't fall back to UNKNOWN
- assert "UNKNOWN Event" not in str(panel.title)
- # And uses the 'Agent Action (Not Executed)' title
- assert "Agent Action (Not Executed)" in str(panel.title)
-
-
-def test_visualizer_user_reject_observation_panel():
- """UserRejectObservation should render a dedicated panel."""
- visualizer = CLIVisualizer()
- event = UserRejectObservation(
- tool_name="demo_tool",
- tool_call_id="fc_call_1",
- action_id="action_1",
- rejection_reason="User rejected the proposed action.",
- )
-
- panel = visualizer._create_event_panel(event)
- assert panel is not None
- title = str(panel.title)
- assert "UNKNOWN Event" not in title
- assert "User Rejected Action" in title
- # ensure the reason is part of the renderable text
- renderable = panel.renderable
- assert isinstance(renderable, Text)
- assert "User rejected the proposed action." in renderable.plain
-
-
-def test_metrics_formatting():
- """Test metrics subtitle formatting."""
- from unittest.mock import MagicMock
-
- from openhands.sdk.conversation.conversation_stats import ConversationStats
- from openhands.sdk.llm.utils.metrics import Metrics
-
- # Create conversation stats with metrics
- conversation_stats = ConversationStats()
-
- # Create metrics and add to conversation stats
- metrics = Metrics(model_name="test-model")
- metrics.add_cost(0.0234)
- metrics.add_token_usage(
- prompt_tokens=1500,
- completion_tokens=500,
- cache_read_tokens=300,
- cache_write_tokens=0,
- reasoning_tokens=200,
- context_window=8000,
- response_id="test_response",
- )
-
- # Add metrics to conversation stats
- conversation_stats.usage_to_metrics["test_usage"] = metrics
-
- # Create visualizer and initialize with mock state
- visualizer = CLIVisualizer()
- mock_state = MagicMock()
- mock_state.stats = conversation_stats
- visualizer.initialize(mock_state)
-
- # Test the metrics subtitle formatting
- subtitle = visualizer._format_metrics_subtitle()
- assert subtitle is not None
- assert "1.5K" in subtitle # Input tokens abbreviated (trailing zeros removed)
- assert "500" in subtitle # Output tokens
- assert "20.00%" in subtitle # Cache hit rate
- assert "200" in subtitle # Reasoning tokens
- assert "0.0234" in subtitle # Cost
-
-
-def test_metrics_abbreviation_formatting():
- """Test number abbreviation with various edge cases."""
- from unittest.mock import MagicMock
-
- from openhands.sdk.conversation.conversation_stats import ConversationStats
- from openhands.sdk.llm.utils.metrics import Metrics
-
- test_cases = [
- # (input_tokens, expected_abbr)
- (999, "999"), # Below threshold
- (1000, "1K"), # Exact K boundary, trailing zeros removed
- (1500, "1.5K"), # K with one decimal, trailing zero removed
- (89080, "89.08K"), # K with two decimals (regression test for bug)
- (89000, "89K"), # K with trailing zeros removed
- (1000000, "1M"), # Exact M boundary
- (1234567, "1.23M"), # M with decimals
- (1000000000, "1B"), # Exact B boundary
- ]
-
- for tokens, expected in test_cases:
- stats = ConversationStats()
- metrics = Metrics(model_name="test-model")
- metrics.add_token_usage(
- prompt_tokens=tokens,
- completion_tokens=100,
- cache_read_tokens=0,
- cache_write_tokens=0,
- reasoning_tokens=0,
- context_window=8000,
- response_id="test",
- )
- stats.usage_to_metrics["test"] = metrics
-
- visualizer = CLIVisualizer()
- mock_state = MagicMock()
- mock_state.stats = stats
- visualizer.initialize(mock_state)
- subtitle = visualizer._format_metrics_subtitle()
-
- assert subtitle is not None, f"Failed for {tokens}"
- assert expected in subtitle, (
- f"Expected '{expected}' in subtitle for {tokens}, got: {subtitle}"
- )
-
-
-def test_event_base_fallback_visualize():
- """Test that Event provides fallback visualization."""
- from openhands.sdk.event.base import Event
- from openhands.sdk.event.types import SourceType
-
- class UnknownEvent(Event):
- source: SourceType = "agent"
-
- event = UnknownEvent()
-
- conv_viz = CLIVisualizer()
- panel = conv_viz._create_event_panel(event)
-
- assert "UNKNOWN Event" in str(panel.title)
-
-
-def test_visualizer_does_not_render_system_prompt():
- """Test that Event provides fallback visualization."""
- system_prompt_event = SystemPromptEvent(
- source="agent",
- system_prompt=TextContent(text="dummy"),
- tools=[]
- )
- conv_viz = CLIVisualizer()
- panel = conv_viz._create_event_panel(system_prompt_event)
- assert panel is None
diff --git a/openhands/README.md b/openhands/README.md
index 5864a39b0e12..93f06f26b3ba 100644
--- a/openhands/README.md
+++ b/openhands/README.md
@@ -2,8 +2,7 @@
This directory contains the core components of OpenHands.
-This diagram provides an overview of the roles of each component and how they communicate and collaborate.
-
+For an overview of the system architecture, see the [architecture documentation](https://docs.openhands.dev/usage/architecture/backend) (v0 backend architecture).
## Classes
diff --git a/openhands/agenthub/codeact_agent/prompts/system_prompt.j2 b/openhands/agenthub/codeact_agent/prompts/system_prompt.j2
index 71478749fa88..9be08842d43c 100644
--- a/openhands/agenthub/codeact_agent/prompts/system_prompt.j2
+++ b/openhands/agenthub/codeact_agent/prompts/system_prompt.j2
@@ -72,7 +72,7 @@ Your primary role is to assist users by executing commands, modifying code, and
-* When interacting with external services like GitHub, GitLab, or Bitbucket, use their respective APIs instead of browser-based interactions whenever possible.
+* When interacting with external services like GitHub, GitLab, Bitbucket, or Azure DevOps, use their respective APIs instead of browser-based interactions whenever possible.
* Only resort to browser-based interactions with these services if specifically requested by the user or if the required operation cannot be performed via API.
diff --git a/openhands/app_server/app_conversation/app_conversation_info_service.py b/openhands/app_server/app_conversation/app_conversation_info_service.py
index 1bbd06531b3a..8e9f1ffe6828 100644
--- a/openhands/app_server/app_conversation/app_conversation_info_service.py
+++ b/openhands/app_server/app_conversation/app_conversation_info_service.py
@@ -9,6 +9,7 @@
AppConversationSortOrder,
)
from openhands.app_server.services.injector import Injector
+from openhands.sdk.event import ConversationStateUpdateEvent
from openhands.sdk.utils.models import DiscriminatedUnionMixin
@@ -26,6 +27,7 @@ async def search_app_conversation_info(
sort_order: AppConversationSortOrder = AppConversationSortOrder.CREATED_AT_DESC,
page_id: str | None = None,
limit: int = 100,
+ include_sub_conversations: bool = False,
) -> AppConversationInfoPage:
"""Search for sandboxed conversations."""
@@ -67,6 +69,19 @@ async def delete_app_conversation_info(self, conversation_id: UUID) -> bool:
Returns True if the conversation was deleted successfully, False otherwise.
"""
+ @abstractmethod
+ async def get_sub_conversation_ids(
+ self, parent_conversation_id: UUID
+ ) -> list[UUID]:
+ """Get all sub-conversation IDs for a given parent conversation.
+
+ Args:
+ parent_conversation_id: The ID of the parent conversation
+
+ Returns:
+ List of sub-conversation IDs
+ """
+
# Mutators
@abstractmethod
@@ -78,6 +93,19 @@ async def save_app_conversation_info(
Return the stored info
"""
+ @abstractmethod
+ async def process_stats_event(
+ self,
+ event: ConversationStateUpdateEvent,
+ conversation_id: UUID,
+ ) -> None:
+ """Process a stats event and update conversation statistics.
+
+ Args:
+ event: The ConversationStateUpdateEvent with key='stats'
+ conversation_id: The ID of the conversation to update
+ """
+
class AppConversationInfoServiceInjector(
DiscriminatedUnionMixin, Injector[AppConversationInfoService], ABC
diff --git a/openhands/app_server/app_conversation/app_conversation_models.py b/openhands/app_server/app_conversation/app_conversation_models.py
index 1b2f201dcd82..0c7ef99ce504 100644
--- a/openhands/app_server/app_conversation/app_conversation_models.py
+++ b/openhands/app_server/app_conversation/app_conversation_models.py
@@ -1,6 +1,6 @@
from datetime import datetime
from enum import Enum
-from uuid import uuid4
+from uuid import UUID, uuid4
from pydantic import BaseModel, Field
@@ -16,6 +16,13 @@
from openhands.storage.data_models.conversation_metadata import ConversationTrigger
+class AgentType(Enum):
+ """Agent type for conversation."""
+
+ DEFAULT = 'default'
+ PLAN = 'plan'
+
+
class AppConversationInfo(BaseModel):
"""Conversation info which does not contain status."""
@@ -34,6 +41,9 @@ class AppConversationInfo(BaseModel):
metrics: MetricsSnapshot | None = None
+ parent_conversation_id: OpenHandsUUID | None = None
+ sub_conversation_ids: list[OpenHandsUUID] = Field(default_factory=list)
+
created_at: datetime = Field(default_factory=utc_now)
updated_at: datetime = Field(default_factory=utc_now)
@@ -87,7 +97,9 @@ class AppConversationStartRequest(BaseModel):
"""
sandbox_id: str | None = Field(default=None)
+ conversation_id: UUID | None = Field(default=None)
initial_message: SendMessageRequest | None = None
+ system_message_suffix: str | None = None
processors: list[EventCallbackProcessor] | None = Field(default=None)
llm_model: str | None = None
@@ -98,6 +110,8 @@ class AppConversationStartRequest(BaseModel):
title: str | None = None
trigger: ConversationTrigger | None = None
pr_number: list[int] = Field(default_factory=list)
+ parent_conversation_id: OpenHandsUUID | None = None
+ agent_type: AgentType = Field(default=AgentType.DEFAULT)
class AppConversationStartTaskStatus(Enum):
@@ -106,6 +120,7 @@ class AppConversationStartTaskStatus(Enum):
PREPARING_REPOSITORY = 'PREPARING_REPOSITORY'
RUNNING_SETUP_SCRIPT = 'RUNNING_SETUP_SCRIPT'
SETTING_UP_GIT_HOOKS = 'SETTING_UP_GIT_HOOKS'
+ SETTING_UP_SKILLS = 'SETTING_UP_SKILLS'
STARTING_CONVERSATION = 'STARTING_CONVERSATION'
READY = 'READY'
ERROR = 'ERROR'
diff --git a/openhands/app_server/app_conversation/app_conversation_router.py b/openhands/app_server/app_conversation/app_conversation_router.py
index 83596b64a572..bf82840e96b2 100644
--- a/openhands/app_server/app_conversation/app_conversation_router.py
+++ b/openhands/app_server/app_conversation/app_conversation_router.py
@@ -1,7 +1,9 @@
"""Sandboxed Conversation router for OpenHands Server."""
import asyncio
+import os
import sys
+import tempfile
from datetime import datetime
from typing import Annotated, AsyncGenerator
from uuid import UUID
@@ -49,9 +51,21 @@ async def anext(async_iterator):
depends_app_conversation_start_task_service,
depends_db_session,
depends_httpx_client,
+ depends_sandbox_service,
+ depends_sandbox_spec_service,
depends_user_context,
get_app_conversation_service,
)
+from openhands.app_server.sandbox.sandbox_models import (
+ AGENT_SERVER,
+ SandboxStatus,
+)
+from openhands.app_server.sandbox.sandbox_service import SandboxService
+from openhands.app_server.sandbox.sandbox_spec_service import SandboxSpecService
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
+from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
router = APIRouter(prefix='/app-conversations', tags=['Conversations'])
app_conversation_service_dependency = depends_app_conversation_service()
@@ -61,6 +75,8 @@ async def anext(async_iterator):
user_context_dependency = depends_user_context()
db_session_dependency = depends_db_session()
httpx_client_dependency = depends_httpx_client()
+sandbox_service_dependency = depends_sandbox_service()
+sandbox_spec_service_dependency = depends_sandbox_spec_service()
# Read methods
@@ -99,6 +115,12 @@ async def search_app_conversations(
lte=100,
),
] = 100,
+ include_sub_conversations: Annotated[
+ bool,
+ Query(
+ title='If True, include sub-conversations in the results. If False (default), exclude all sub-conversations.'
+ ),
+ ] = False,
app_conversation_service: AppConversationService = (
app_conversation_service_dependency
),
@@ -114,6 +136,7 @@ async def search_app_conversations(
updated_at__lt=updated_at__lt,
page_id=page_id,
limit=limit,
+ include_sub_conversations=include_sub_conversations,
)
@@ -193,7 +216,8 @@ async def stream_app_conversation_start(
user_context: UserContext = user_context_dependency,
) -> list[AppConversationStartTask]:
"""Start an app conversation start task and stream updates from it.
- Leaves the connection open until either the conversation starts or there was an error"""
+ Leaves the connection open until either the conversation starts or there was an error
+ """
response = StreamingResponse(
_stream_app_conversation_start(request, user_context),
media_type='application/json',
@@ -207,6 +231,10 @@ async def search_app_conversation_start_tasks(
UUID | None,
Query(title='Filter by conversation ID equal to this value'),
] = None,
+ created_at__gte: Annotated[
+ datetime | None,
+ Query(title='Filter by created_at greater than or equal to this datetime'),
+ ] = None,
sort_order: Annotated[
AppConversationStartTaskSortOrder,
Query(title='Sort order for the results'),
@@ -233,6 +261,7 @@ async def search_app_conversation_start_tasks(
return (
await app_conversation_start_task_service.search_app_conversation_start_tasks(
conversation_id__eq=conversation_id__eq,
+ created_at__gte=created_at__gte,
sort_order=sort_order,
page_id=page_id,
limit=limit,
@@ -246,6 +275,10 @@ async def count_app_conversation_start_tasks(
UUID | None,
Query(title='Filter by conversation ID equal to this value'),
] = None,
+ created_at__gte: Annotated[
+ datetime | None,
+ Query(title='Filter by created_at greater than or equal to this datetime'),
+ ] = None,
app_conversation_start_task_service: AppConversationStartTaskService = (
app_conversation_start_task_service_dependency
),
@@ -253,6 +286,7 @@ async def count_app_conversation_start_tasks(
"""Count conversation start tasks matching the given filters."""
return await app_conversation_start_task_service.count_app_conversation_start_tasks(
conversation_id__eq=conversation_id__eq,
+ created_at__gte=created_at__gte,
)
@@ -271,6 +305,101 @@ async def batch_get_app_conversation_start_tasks(
return start_tasks
+@router.get('/{conversation_id}/file')
+async def read_conversation_file(
+ conversation_id: UUID,
+ file_path: Annotated[
+ str,
+ Query(title='Path to the file to read within the sandbox workspace'),
+ ] = '/workspace/project/PLAN.md',
+ app_conversation_service: AppConversationService = (
+ app_conversation_service_dependency
+ ),
+ sandbox_service: SandboxService = sandbox_service_dependency,
+ sandbox_spec_service: SandboxSpecService = sandbox_spec_service_dependency,
+) -> str:
+ """Read a file from a specific conversation's sandbox workspace.
+
+ Returns the content of the file at the specified path if it exists, otherwise returns an empty string.
+
+ Args:
+ conversation_id: The UUID of the conversation
+ file_path: Path to the file to read within the sandbox workspace
+
+ Returns:
+ The content of the file or an empty string if the file doesn't exist
+ """
+ # Get the conversation info
+ conversation = await app_conversation_service.get_app_conversation(conversation_id)
+ if not conversation:
+ return ''
+
+ # Get the sandbox info
+ sandbox = await sandbox_service.get_sandbox(conversation.sandbox_id)
+ if not sandbox or sandbox.status != SandboxStatus.RUNNING:
+ return ''
+
+ # Get the sandbox spec to find the working directory
+ sandbox_spec = await sandbox_spec_service.get_sandbox_spec(sandbox.sandbox_spec_id)
+ if not sandbox_spec:
+ return ''
+
+ # Get the agent server URL
+ if not sandbox.exposed_urls:
+ return ''
+
+ agent_server_url = None
+ for exposed_url in sandbox.exposed_urls:
+ if exposed_url.name == AGENT_SERVER:
+ agent_server_url = exposed_url.url
+ break
+
+ if not agent_server_url:
+ return ''
+
+ agent_server_url = replace_localhost_hostname_for_docker(agent_server_url)
+
+ # Create remote workspace
+ remote_workspace = AsyncRemoteWorkspace(
+ host=agent_server_url,
+ api_key=sandbox.session_api_key,
+ working_dir=sandbox_spec.working_dir,
+ )
+
+ # Read the file at the specified path
+ temp_file_path = None
+ try:
+ # Create a temporary file path to download the remote file
+ with tempfile.NamedTemporaryFile(mode='w+b', delete=False) as temp_file:
+ temp_file_path = temp_file.name
+
+ # Download the file from remote system
+ result = await remote_workspace.file_download(
+ source_path=file_path,
+ destination_path=temp_file_path,
+ )
+
+ if result.success:
+ # Read the content from the temporary file
+ with open(temp_file_path, 'rb') as f:
+ content = f.read()
+ # Decode bytes to string
+ return content.decode('utf-8')
+ except Exception:
+ # If there's any error reading the file, return empty string
+ pass
+ finally:
+ # Clean up the temporary file
+ if temp_file_path:
+ try:
+ os.unlink(temp_file_path)
+ except Exception:
+ # Ignore errors during cleanup
+ pass
+
+ return ''
+
+
async def _consume_remaining(
async_iter, db_session: AsyncSession, httpx_client: httpx.AsyncClient
):
diff --git a/openhands/app_server/app_conversation/app_conversation_service.py b/openhands/app_server/app_conversation/app_conversation_service.py
index d910856c7692..8d6c6775a8a3 100644
--- a/openhands/app_server/app_conversation/app_conversation_service.py
+++ b/openhands/app_server/app_conversation/app_conversation_service.py
@@ -11,6 +11,7 @@
AppConversationStartRequest,
AppConversationStartTask,
)
+from openhands.app_server.sandbox.sandbox_models import SandboxInfo
from openhands.app_server.services.injector import Injector
from openhands.sdk.utils.models import DiscriminatedUnionMixin
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
@@ -30,6 +31,7 @@ async def search_app_conversations(
sort_order: AppConversationSortOrder = AppConversationSortOrder.CREATED_AT_DESC,
page_id: str | None = None,
limit: int = 100,
+ include_sub_conversations: bool = False,
) -> AppConversationPage:
"""Search for sandboxed conversations."""
@@ -90,6 +92,7 @@ async def start_app_conversation(
async def run_setup_scripts(
self,
task: AppConversationStartTask,
+ sandbox: SandboxInfo,
workspace: AsyncRemoteWorkspace,
) -> AsyncGenerator[AppConversationStartTask, None]:
"""Run the setup scripts for the project and yield status updates"""
diff --git a/openhands/app_server/app_conversation/app_conversation_service_base.py b/openhands/app_server/app_conversation/app_conversation_service_base.py
new file mode 100644
index 000000000000..f524167524f1
--- /dev/null
+++ b/openhands/app_server/app_conversation/app_conversation_service_base.py
@@ -0,0 +1,381 @@
+import logging
+import os
+import tempfile
+from abc import ABC
+from dataclasses import dataclass
+from pathlib import Path
+from typing import AsyncGenerator
+
+import base62
+
+from openhands.app_server.app_conversation.app_conversation_models import (
+ AgentType,
+ AppConversationStartTask,
+ AppConversationStartTaskStatus,
+)
+from openhands.app_server.app_conversation.app_conversation_service import (
+ AppConversationService,
+)
+from openhands.app_server.app_conversation.skill_loader import (
+ load_global_skills,
+ load_repo_skills,
+ load_sandbox_skills,
+ merge_skills,
+)
+from openhands.app_server.sandbox.sandbox_models import SandboxInfo
+from openhands.app_server.user.user_context import UserContext
+from openhands.sdk import Agent
+from openhands.sdk.context.agent_context import AgentContext
+from openhands.sdk.context.condenser import LLMSummarizingCondenser
+from openhands.sdk.context.skills import load_user_skills
+from openhands.sdk.llm import LLM
+from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
+
+_logger = logging.getLogger(__name__)
+PRE_COMMIT_HOOK = '.git/hooks/pre-commit'
+PRE_COMMIT_LOCAL = '.git/hooks/pre-commit.local'
+
+
+@dataclass
+class AppConversationServiceBase(AppConversationService, ABC):
+ """App Conversation service which adds git specific functionality.
+
+ Sets up repositories and installs hooks"""
+
+ init_git_in_empty_workspace: bool
+ user_context: UserContext
+
+ async def _load_and_merge_all_skills(
+ self,
+ sandbox: SandboxInfo,
+ remote_workspace: AsyncRemoteWorkspace,
+ selected_repository: str | None,
+ working_dir: str,
+ ) -> list:
+ """Load skills from all sources and merge them.
+
+ This method handles all errors gracefully and will return an empty list
+ if skill loading fails completely.
+
+ Args:
+ remote_workspace: AsyncRemoteWorkspace for loading repo skills
+ selected_repository: Repository name or None
+ working_dir: Working directory path
+
+ Returns:
+ List of merged Skill objects from all sources, or empty list on failure
+ """
+ try:
+ _logger.debug('Loading skills for V1 conversation')
+
+ # Load skills from all sources
+ sandbox_skills = load_sandbox_skills(sandbox)
+ global_skills = load_global_skills()
+ # Load user skills from ~/.openhands/skills/ directory
+ # Uses the SDK's load_user_skills() function which handles loading from
+ # ~/.openhands/skills/ and ~/.openhands/microagents/ (for backward compatibility)
+ try:
+ user_skills = load_user_skills()
+ _logger.info(
+ f'Loaded {len(user_skills)} user skills: {[s.name for s in user_skills]}'
+ )
+ except Exception as e:
+ _logger.warning(f'Failed to load user skills: {str(e)}')
+ user_skills = []
+ repo_skills = await load_repo_skills(
+ remote_workspace, selected_repository, working_dir
+ )
+
+ # Merge all skills (later lists override earlier ones)
+ all_skills = merge_skills(
+ [sandbox_skills, global_skills, user_skills, repo_skills]
+ )
+
+ _logger.info(
+ f'Loaded {len(all_skills)} total skills: {[s.name for s in all_skills]}'
+ )
+
+ return all_skills
+ except Exception as e:
+ _logger.warning(f'Failed to load skills: {e}', exc_info=True)
+ # Return empty list on failure - skills will be loaded again later if needed
+ return []
+
+ def _create_agent_with_skills(self, agent, skills: list):
+ """Create or update agent with skills in its context.
+
+ Args:
+ agent: The agent to update
+ skills: List of Skill objects to add to agent context
+
+ Returns:
+ Updated agent with skills in context
+ """
+ if agent.agent_context:
+ # Merge with existing context
+ existing_skills = agent.agent_context.skills
+ all_skills = merge_skills([skills, existing_skills])
+ agent = agent.model_copy(
+ update={
+ 'agent_context': agent.agent_context.model_copy(
+ update={'skills': all_skills}
+ )
+ }
+ )
+ else:
+ # Create new context
+ agent_context = AgentContext(skills=skills)
+ agent = agent.model_copy(update={'agent_context': agent_context})
+
+ return agent
+
+ async def _load_skills_and_update_agent(
+ self,
+ sandbox: SandboxInfo,
+ agent: Agent,
+ remote_workspace: AsyncRemoteWorkspace,
+ selected_repository: str | None,
+ working_dir: str,
+ ):
+ """Load all skills and update agent with them.
+
+ Args:
+ agent: The agent to update
+ remote_workspace: AsyncRemoteWorkspace for loading repo skills
+ selected_repository: Repository name or None
+ working_dir: Working directory path
+
+ Returns:
+ Updated agent with skills loaded into context
+ """
+ # Load and merge all skills
+ all_skills = await self._load_and_merge_all_skills(
+ sandbox, remote_workspace, selected_repository, working_dir
+ )
+
+ # Update agent with skills
+ agent = self._create_agent_with_skills(agent, all_skills)
+
+ return agent
+
+ async def run_setup_scripts(
+ self,
+ task: AppConversationStartTask,
+ sandbox: SandboxInfo,
+ workspace: AsyncRemoteWorkspace,
+ ) -> AsyncGenerator[AppConversationStartTask, None]:
+ task.status = AppConversationStartTaskStatus.PREPARING_REPOSITORY
+ yield task
+ await self.clone_or_init_git_repo(task, workspace)
+
+ task.status = AppConversationStartTaskStatus.RUNNING_SETUP_SCRIPT
+ yield task
+ await self.maybe_run_setup_script(workspace)
+
+ task.status = AppConversationStartTaskStatus.SETTING_UP_GIT_HOOKS
+ yield task
+ await self.maybe_setup_git_hooks(workspace)
+
+ task.status = AppConversationStartTaskStatus.SETTING_UP_SKILLS
+ yield task
+ await self._load_and_merge_all_skills(
+ sandbox,
+ workspace,
+ task.request.selected_repository,
+ workspace.working_dir,
+ )
+
+ async def _configure_git_user_settings(
+ self,
+ workspace: AsyncRemoteWorkspace,
+ ) -> None:
+ """Configure git global user settings from user preferences.
+
+ Reads git_user_name and git_user_email from user settings and
+ configures them as git global settings in the workspace.
+
+ Args:
+ workspace: The remote workspace to configure git settings in.
+ """
+ try:
+ user_info = await self.user_context.get_user_info()
+
+ if user_info.git_user_name:
+ cmd = f'git config --global user.name "{user_info.git_user_name}"'
+ result = await workspace.execute_command(cmd, workspace.working_dir)
+ if result.exit_code:
+ _logger.warning(f'Git config user.name failed: {result.stderr}')
+ else:
+ _logger.info(
+ f'Git configured with user.name={user_info.git_user_name}'
+ )
+
+ if user_info.git_user_email:
+ cmd = f'git config --global user.email "{user_info.git_user_email}"'
+ result = await workspace.execute_command(cmd, workspace.working_dir)
+ if result.exit_code:
+ _logger.warning(f'Git config user.email failed: {result.stderr}')
+ else:
+ _logger.info(
+ f'Git configured with user.email={user_info.git_user_email}'
+ )
+ except Exception as e:
+ _logger.warning(f'Failed to configure git user settings: {e}')
+
+ async def clone_or_init_git_repo(
+ self,
+ task: AppConversationStartTask,
+ workspace: AsyncRemoteWorkspace,
+ ):
+ request = task.request
+
+ # Create the projects directory if it does not exist yet
+ parent = Path(workspace.working_dir).parent
+ result = await workspace.execute_command(
+ f'mkdir {workspace.working_dir}', parent
+ )
+ if result.exit_code:
+ _logger.warning(f'mkdir failed: {result.stderr}')
+
+ # Configure git user settings from user preferences
+ await self._configure_git_user_settings(workspace)
+
+ if not request.selected_repository:
+ if self.init_git_in_empty_workspace:
+ _logger.debug('Initializing a new git repository in the workspace.')
+ cmd = (
+ 'git init && git config --global '
+ f'--add safe.directory {workspace.working_dir}'
+ )
+ result = await workspace.execute_command(cmd, workspace.working_dir)
+ if result.exit_code:
+ _logger.warning(f'Git init failed: {result.stderr}')
+ else:
+ _logger.info('Not initializing a new git repository.')
+ return
+
+ remote_repo_url: str = await self.user_context.get_authenticated_git_url(
+ request.selected_repository
+ )
+ if not remote_repo_url:
+ raise ValueError('Missing either Git token or valid repository')
+
+ dir_name = request.selected_repository.split('/')[-1]
+
+ # Clone the repo - this is the slow part!
+ clone_command = f'git clone {remote_repo_url} {dir_name}'
+ result = await workspace.execute_command(
+ clone_command, workspace.working_dir, 120
+ )
+ if result.exit_code:
+ _logger.warning(f'Git clone failed: {result.stderr}')
+
+ # Checkout the appropriate branch
+ if request.selected_branch:
+ checkout_command = f'git checkout {request.selected_branch}'
+ else:
+ # Generate a random branch name to avoid conflicts
+ random_str = base62.encodebytes(os.urandom(16))
+ openhands_workspace_branch = f'openhands-workspace-{random_str}'
+ checkout_command = f'git checkout -b {openhands_workspace_branch}'
+ git_dir = Path(workspace.working_dir) / dir_name
+ result = await workspace.execute_command(checkout_command, git_dir)
+ if result.exit_code:
+ _logger.warning(f'Git checkout failed: {result.stderr}')
+
+ async def maybe_run_setup_script(
+ self,
+ workspace: AsyncRemoteWorkspace,
+ ):
+ """Run .openhands/setup.sh if it exists in the workspace or repository."""
+ setup_script = workspace.working_dir + '/.openhands/setup.sh'
+
+ await workspace.execute_command(
+ f'chmod +x {setup_script} && source {setup_script}', timeout=600
+ )
+
+ # TODO: Does this need to be done?
+ # Add the action to the event stream as an ENVIRONMENT event
+ # source = EventSource.ENVIRONMENT
+ # self.event_stream.add_event(action, source)
+
+ async def maybe_setup_git_hooks(
+ self,
+ workspace: AsyncRemoteWorkspace,
+ ):
+ """Set up git hooks if .openhands/pre-commit.sh exists in the workspace or repository."""
+ command = 'mkdir -p .git/hooks && chmod +x .openhands/pre-commit.sh'
+ result = await workspace.execute_command(command, workspace.working_dir)
+ if result.exit_code:
+ return
+
+ # Check if there's an existing pre-commit hook
+ with tempfile.TemporaryFile(mode='w+t') as temp_file:
+ result = workspace.file_download(PRE_COMMIT_HOOK, str(temp_file))
+ if result.get('success'):
+ _logger.info('Preserving existing pre-commit hook')
+ # an existing pre-commit hook exists
+ if 'This hook was installed by OpenHands' not in temp_file.read():
+ # Move the existing hook to pre-commit.local
+ command = (
+ f'mv {PRE_COMMIT_HOOK} {PRE_COMMIT_LOCAL} &&'
+ f'chmod +x {PRE_COMMIT_LOCAL}'
+ )
+ result = await workspace.execute_command(
+ command, workspace.working_dir
+ )
+ if result.exit_code != 0:
+ _logger.error(
+ f'Failed to preserve existing pre-commit hook: {result.stderr}',
+ )
+ return
+
+ # write the pre-commit hook
+ await workspace.file_upload(
+ source_path=Path(__file__).parent / 'git' / 'pre-commit.sh',
+ destination_path=PRE_COMMIT_HOOK,
+ )
+
+ # Make the pre-commit hook executable
+ result = await workspace.execute_command(f'chmod +x {PRE_COMMIT_HOOK}')
+ if result.exit_code:
+ _logger.error(f'Failed to make pre-commit hook executable: {result.stderr}')
+ return
+
+ _logger.info('Git pre-commit hook installed successfully')
+
+ def _create_condenser(
+ self,
+ llm: LLM,
+ agent_type: AgentType,
+ condenser_max_size: int | None,
+ ) -> LLMSummarizingCondenser:
+ """Create a condenser based on user settings and agent type.
+
+ Args:
+ llm: The LLM instance to use for condensation
+ agent_type: Type of agent (PLAN or DEFAULT)
+ condenser_max_size: condenser_max_size setting
+
+ Returns:
+ Configured LLMSummarizingCondenser instance
+ """
+ # LLMSummarizingCondenser has defaults: max_size=120, keep_first=4
+ condenser_kwargs = {
+ 'llm': llm.model_copy(
+ update={
+ 'usage_id': (
+ 'condenser'
+ if agent_type == AgentType.DEFAULT
+ else 'planning_condenser'
+ )
+ }
+ ),
+ }
+ # Only override max_size if user has a custom value
+ if condenser_max_size is not None:
+ condenser_kwargs['max_size'] = condenser_max_size
+
+ condenser = LLMSummarizingCondenser(**condenser_kwargs)
+
+ return condenser
diff --git a/openhands/app_server/app_conversation/app_conversation_start_task_service.py b/openhands/app_server/app_conversation/app_conversation_start_task_service.py
index 05229411f5bf..230b26cd8ff7 100644
--- a/openhands/app_server/app_conversation/app_conversation_start_task_service.py
+++ b/openhands/app_server/app_conversation/app_conversation_start_task_service.py
@@ -1,5 +1,6 @@
import asyncio
from abc import ABC, abstractmethod
+from datetime import datetime
from uuid import UUID
from openhands.app_server.app_conversation.app_conversation_models import (
@@ -18,6 +19,7 @@ class AppConversationStartTaskService(ABC):
async def search_app_conversation_start_tasks(
self,
conversation_id__eq: UUID | None = None,
+ created_at__gte: datetime | None = None,
sort_order: AppConversationStartTaskSortOrder = AppConversationStartTaskSortOrder.CREATED_AT_DESC,
page_id: str | None = None,
limit: int = 100,
@@ -28,6 +30,7 @@ async def search_app_conversation_start_tasks(
async def count_app_conversation_start_tasks(
self,
conversation_id__eq: UUID | None = None,
+ created_at__gte: datetime | None = None,
) -> int:
"""Count conversation start tasks."""
diff --git a/openhands/app_server/app_conversation/git_app_conversation_service.py b/openhands/app_server/app_conversation/git_app_conversation_service.py
deleted file mode 100644
index 57a7965dca6e..000000000000
--- a/openhands/app_server/app_conversation/git_app_conversation_service.py
+++ /dev/null
@@ -1,164 +0,0 @@
-import logging
-import os
-import tempfile
-from abc import ABC
-from dataclasses import dataclass
-from pathlib import Path
-from typing import AsyncGenerator
-
-import base62
-
-from openhands.app_server.app_conversation.app_conversation_models import (
- AppConversationStartTask,
- AppConversationStartTaskStatus,
-)
-from openhands.app_server.app_conversation.app_conversation_service import (
- AppConversationService,
-)
-from openhands.app_server.user.user_context import UserContext
-from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
-
-_logger = logging.getLogger(__name__)
-PRE_COMMIT_HOOK = '.git/hooks/pre-commit'
-PRE_COMMIT_LOCAL = '.git/hooks/pre-commit.local'
-
-
-@dataclass
-class GitAppConversationService(AppConversationService, ABC):
- """App Conversation service which adds git specific functionality.
-
- Sets up repositories and installs hooks"""
-
- init_git_in_empty_workspace: bool
- user_context: UserContext
-
- async def run_setup_scripts(
- self,
- task: AppConversationStartTask,
- workspace: AsyncRemoteWorkspace,
- ) -> AsyncGenerator[AppConversationStartTask, None]:
- task.status = AppConversationStartTaskStatus.PREPARING_REPOSITORY
- yield task
- await self.clone_or_init_git_repo(task, workspace)
-
- task.status = AppConversationStartTaskStatus.RUNNING_SETUP_SCRIPT
- yield task
- await self.maybe_run_setup_script(workspace)
-
- task.status = AppConversationStartTaskStatus.SETTING_UP_GIT_HOOKS
- yield task
- await self.maybe_setup_git_hooks(workspace)
-
- async def clone_or_init_git_repo(
- self,
- task: AppConversationStartTask,
- workspace: AsyncRemoteWorkspace,
- ):
- request = task.request
-
- # Create the projects directory if it does not exist yet
- parent = Path(workspace.working_dir).parent
- result = await workspace.execute_command(
- f'mkdir {workspace.working_dir}', parent
- )
- if result.exit_code:
- _logger.warning(f'mkdir failed: {result.stderr}')
-
- if not request.selected_repository:
- if self.init_git_in_empty_workspace:
- _logger.debug('Initializing a new git repository in the workspace.')
- cmd = (
- 'git init && git config --global '
- f'--add safe.directory {workspace.working_dir}'
- )
- result = await workspace.execute_command(cmd, workspace.working_dir)
- if result.exit_code:
- _logger.warning(f'Git init failed: {result.stderr}')
- else:
- _logger.info('Not initializing a new git repository.')
- return
-
- remote_repo_url: str = await self.user_context.get_authenticated_git_url(
- request.selected_repository
- )
- if not remote_repo_url:
- raise ValueError('Missing either Git token or valid repository')
-
- dir_name = request.selected_repository.split('/')[-1]
-
- # Clone the repo - this is the slow part!
- clone_command = f'git clone {remote_repo_url} {dir_name}'
- result = await workspace.execute_command(clone_command, workspace.working_dir)
- if result.exit_code:
- _logger.warning(f'Git clone failed: {result.stderr}')
-
- # Checkout the appropriate branch
- if request.selected_branch:
- checkout_command = f'git checkout {request.selected_branch}'
- else:
- # Generate a random branch name to avoid conflicts
- random_str = base62.encodebytes(os.urandom(16))
- openhands_workspace_branch = f'openhands-workspace-{random_str}'
- checkout_command = f'git checkout -b {openhands_workspace_branch}'
- await workspace.execute_command(checkout_command, workspace.working_dir)
-
- async def maybe_run_setup_script(
- self,
- workspace: AsyncRemoteWorkspace,
- ):
- """Run .openhands/setup.sh if it exists in the workspace or repository."""
- setup_script = workspace.working_dir + '/.openhands/setup.sh'
-
- await workspace.execute_command(
- f'chmod +x {setup_script} && source {setup_script}', timeout=600
- )
-
- # TODO: Does this need to be done?
- # Add the action to the event stream as an ENVIRONMENT event
- # source = EventSource.ENVIRONMENT
- # self.event_stream.add_event(action, source)
-
- async def maybe_setup_git_hooks(
- self,
- workspace: AsyncRemoteWorkspace,
- ):
- """Set up git hooks if .openhands/pre-commit.sh exists in the workspace or repository."""
- command = 'mkdir -p .git/hooks && chmod +x .openhands/pre-commit.sh'
- result = await workspace.execute_command(command, workspace.working_dir)
- if result.exit_code:
- return
-
- # Check if there's an existing pre-commit hook
- with tempfile.TemporaryFile(mode='w+t') as temp_file:
- result = workspace.file_download(PRE_COMMIT_HOOK, str(temp_file))
- if result.get('success'):
- _logger.info('Preserving existing pre-commit hook')
- # an existing pre-commit hook exists
- if 'This hook was installed by OpenHands' not in temp_file.read():
- # Move the existing hook to pre-commit.local
- command = (
- f'mv {PRE_COMMIT_HOOK} {PRE_COMMIT_LOCAL} &&'
- f'chmod +x {PRE_COMMIT_LOCAL}'
- )
- result = await workspace.execute_command(
- command, workspace.working_dir
- )
- if result.exit_code != 0:
- _logger.error(
- f'Failed to preserve existing pre-commit hook: {result.stderr}',
- )
- return
-
- # write the pre-commit hook
- await workspace.file_upload(
- source_path=Path(__file__).parent / 'git' / 'pre-commit.sh',
- destination_path=PRE_COMMIT_HOOK,
- )
-
- # Make the pre-commit hook executable
- result = await workspace.execute_command(f'chmod +x {PRE_COMMIT_HOOK}')
- if result.exit_code:
- _logger.error(f'Failed to make pre-commit hook executable: {result.stderr}')
- return
-
- _logger.info('Git pre-commit hook installed successfully')
diff --git a/openhands/app_server/app_conversation/live_status_app_conversation_service.py b/openhands/app_server/app_conversation/live_status_app_conversation_service.py
index cc10d254e708..2f04bf9a7112 100644
--- a/openhands/app_server/app_conversation/live_status_app_conversation_service.py
+++ b/openhands/app_server/app_conversation/live_status_app_conversation_service.py
@@ -4,12 +4,12 @@
from dataclasses import dataclass
from datetime import datetime, timedelta
from time import time
-from typing import AsyncGenerator, Sequence
+from typing import Any, AsyncGenerator, Sequence
from uuid import UUID, uuid4
import httpx
from fastapi import Request
-from pydantic import Field, TypeAdapter
+from pydantic import Field, SecretStr, TypeAdapter
from openhands.agent_server.models import (
ConversationInfo,
@@ -21,6 +21,7 @@
AppConversationInfoService,
)
from openhands.app_server.app_conversation.app_conversation_models import (
+ AgentType,
AppConversation,
AppConversationInfo,
AppConversationPage,
@@ -33,12 +34,12 @@
AppConversationService,
AppConversationServiceInjector,
)
+from openhands.app_server.app_conversation.app_conversation_service_base import (
+ AppConversationServiceBase,
+)
from openhands.app_server.app_conversation.app_conversation_start_task_service import (
AppConversationStartTaskService,
)
-from openhands.app_server.app_conversation.git_app_conversation_service import (
- GitAppConversationService,
-)
from openhands.app_server.app_conversation.sql_app_conversation_info_service import (
SQLAppConversationInfoService,
)
@@ -62,22 +63,32 @@
from openhands.app_server.services.injector import InjectorState
from openhands.app_server.services.jwt_service import JwtService
from openhands.app_server.user.user_context import UserContext
+from openhands.app_server.user.user_models import UserInfo
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
from openhands.experiments.experiment_manager import ExperimentManagerImpl
from openhands.integrations.provider import ProviderType
-from openhands.sdk import LocalWorkspace
+from openhands.sdk import Agent, AgentContext, LocalWorkspace
from openhands.sdk.conversation.secret_source import LookupSecret, StaticSecret
from openhands.sdk.llm import LLM
from openhands.sdk.security.confirmation_policy import AlwaysConfirm
from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
-from openhands.tools.preset.default import get_default_agent
+from openhands.server.types import AppMode
+from openhands.tools.preset.default import (
+ get_default_tools,
+)
+from openhands.tools.preset.planning import (
+ format_plan_structure,
+ get_planning_tools,
+)
_conversation_info_type_adapter = TypeAdapter(list[ConversationInfo | None])
_logger = logging.getLogger(__name__)
-GIT_TOKEN = 'GIT_TOKEN'
@dataclass
-class LiveStatusAppConversationService(GitAppConversationService):
+class LiveStatusAppConversationService(AppConversationServiceBase):
"""AppConversationService which combines live status info from the sandbox with stored data."""
user_context: UserContext
@@ -91,7 +102,11 @@ class LiveStatusAppConversationService(GitAppConversationService):
sandbox_startup_poll_frequency: int
httpx_client: httpx.AsyncClient
web_url: str | None
+ openhands_provider_base_url: str | None
access_token_hard_timeout: timedelta | None
+ app_mode: str | None = None
+ keycloak_auth_cookie: str | None = None
+ tavily_api_key: str | None = None
async def search_app_conversations(
self,
@@ -103,6 +118,7 @@ async def search_app_conversations(
sort_order: AppConversationSortOrder = AppConversationSortOrder.CREATED_AT_DESC,
page_id: str | None = None,
limit: int = 20,
+ include_sub_conversations: bool = False,
) -> AppConversationPage:
"""Search for sandboxed conversations."""
page = await self.app_conversation_info_service.search_app_conversation_info(
@@ -114,6 +130,7 @@ async def search_app_conversations(
sort_order=sort_order,
page_id=page_id,
limit=limit,
+ include_sub_conversations=include_sub_conversations,
)
conversations: list[AppConversation] = await self._build_app_conversations(
page.items
@@ -168,6 +185,20 @@ async def _start_app_conversation(
) -> AsyncGenerator[AppConversationStartTask, None]:
# Create and yield the start task
user_id = await self.user_context.get_user_id()
+
+ # Validate and inherit from parent conversation if provided
+ if request.parent_conversation_id:
+ parent_info = (
+ await self.app_conversation_info_service.get_app_conversation_info(
+ request.parent_conversation_id
+ )
+ )
+ if parent_info is None:
+ raise ValueError(
+ f'Parent conversation not found: {request.parent_conversation_id}'
+ )
+ self._inherit_configuration_from_parent(request, parent_info)
+
task = AppConversationStartTask(
created_by_user_id=user_id,
request=request,
@@ -192,20 +223,29 @@ async def _start_app_conversation(
assert sandbox_spec is not None
# Run setup scripts
- workspace = AsyncRemoteWorkspace(
+ remote_workspace = AsyncRemoteWorkspace(
host=agent_server_url,
api_key=sandbox.session_api_key,
working_dir=sandbox_spec.working_dir,
)
- async for updated_task in self.run_setup_scripts(task, workspace):
+ async for updated_task in self.run_setup_scripts(
+ task, sandbox, remote_workspace
+ ):
yield updated_task
# Build the start request
start_conversation_request = (
await self._build_start_conversation_request_for_user(
+ sandbox,
request.initial_message,
+ request.system_message_suffix,
request.git_provider,
sandbox_spec.working_dir,
+ request.agent_type,
+ request.llm_model,
+ request.conversation_id,
+ remote_workspace=remote_workspace,
+ selected_repository=request.selected_repository,
)
)
@@ -224,6 +264,7 @@ async def _start_app_conversation(
headers={'X-Session-API-Key': sandbox.session_api_key},
timeout=self.sandbox_startup_timeout,
)
+
response.raise_for_status()
info = ConversationInfo.model_validate(response.json())
@@ -241,28 +282,31 @@ async def _start_app_conversation(
git_provider=request.git_provider,
trigger=request.trigger,
pr_number=request.pr_number,
+ parent_conversation_id=request.parent_conversation_id,
)
await self.app_conversation_info_service.save_app_conversation_info(
app_conversation_info
)
# Setup default processors
- processors = request.processors
- if processors is None:
- processors = [SetTitleCallbackProcessor()]
+ processors = request.processors or []
+
+ # Always ensure SetTitleCallbackProcessor is included
+ has_set_title_processor = any(
+ isinstance(processor, SetTitleCallbackProcessor)
+ for processor in processors
+ )
+ if not has_set_title_processor:
+ processors.append(SetTitleCallbackProcessor())
# Save processors
- await asyncio.gather(
- *[
- self.event_callback_service.save_event_callback(
- EventCallback(
- conversation_id=info.id,
- processor=processor,
- )
+ for processor in processors:
+ await self.event_callback_service.save_event_callback(
+ EventCallback(
+ conversation_id=info.id,
+ processor=processor,
)
- for processor in processors
- ]
- )
+ )
# Update the start task
task.status = AppConversationStartTaskStatus.READY
@@ -450,58 +494,254 @@ def _get_agent_server_url(self, sandbox: SandboxInfo) -> str:
for exposed_url in exposed_urls
if exposed_url.name == AGENT_SERVER
)
+ agent_server_url = replace_localhost_hostname_for_docker(agent_server_url)
return agent_server_url
- async def _build_start_conversation_request_for_user(
- self,
- initial_message: SendMessageRequest | None,
- git_provider: ProviderType | None,
- working_dir: str,
- ) -> StartConversationRequest:
- user = await self.user_context.get_user_info()
+ def _inherit_configuration_from_parent(
+ self, request: AppConversationStartRequest, parent_info: AppConversationInfo
+ ) -> None:
+ """Inherit configuration from parent conversation if not explicitly provided.
+
+ This ensures sub-conversations automatically inherit:
+ - Sandbox ID (to share the same workspace/environment)
+ - Git parameters (repository, branch, provider)
+ - LLM model
+
+ Args:
+ request: The conversation start request to modify
+ parent_info: The parent conversation info to inherit from
+ """
+ # Inherit sandbox_id from parent to share the same workspace/environment
+ if not request.sandbox_id:
+ request.sandbox_id = parent_info.sandbox_id
+
+ # Inherit git parameters from parent if not provided
+ if not request.selected_repository:
+ request.selected_repository = parent_info.selected_repository
+ if not request.selected_branch:
+ request.selected_branch = parent_info.selected_branch
+ if not request.git_provider:
+ request.git_provider = parent_info.git_provider
+
+ # Inherit LLM model from parent if not provided
+ if not request.llm_model and parent_info.llm_model:
+ request.llm_model = parent_info.llm_model
+
+ async def _setup_secrets_for_git_providers(self, user: UserInfo) -> dict:
+ """Set up secrets for all git provider authentication.
- # Set up a secret for the git token
+ Args:
+ user: User information containing authentication details
+
+ Returns:
+ Dictionary of secrets for the conversation
+ """
secrets = await self.user_context.get_secrets()
- if git_provider:
+
+ # Get all provider tokens from user authentication
+ provider_tokens = await self.user_context.get_provider_tokens()
+ if not provider_tokens:
+ return secrets
+
+ # Create secrets for each provider token
+ for provider_type, provider_token in provider_tokens.items():
+ if not provider_token.token:
+ continue
+
+ secret_name = f'{provider_type.name}_TOKEN'
+
if self.web_url:
- # If there is a web url, then we create an access token to access it.
- # For security reasons, we are explicit here - only this user, and
- # only this provider, with a timeout
+ # Create an access token for web-based authentication
access_token = self.jwt_service.create_jws_token(
payload={
'user_id': user.id,
- 'provider_type': git_provider.value,
+ 'provider_type': provider_type.value,
},
expires_in=self.access_token_hard_timeout,
)
- secrets[GIT_TOKEN] = LookupSecret(
+ headers = {'X-Access-Token': access_token}
+
+ # Include keycloak_auth cookie in headers if app_mode is SaaS
+ if self.app_mode == 'saas' and self.keycloak_auth_cookie:
+ headers['Cookie'] = f'keycloak_auth={self.keycloak_auth_cookie}'
+
+ secrets[secret_name] = LookupSecret(
url=self.web_url + '/api/v1/webhooks/secrets',
- headers={'X-Access-Token': access_token},
+ headers=headers,
)
else:
- # If there is no URL specified where the sandbox can access the app server
- # then we supply a static secret with the most recent value. Depending
- # on the type, this may eventually expire.
- static_token = await self.user_context.get_latest_token(git_provider)
+ # Use static token for environments without web URL access
+ static_token = await self.user_context.get_latest_token(provider_type)
if static_token:
- secrets[GIT_TOKEN] = StaticSecret(value=static_token)
+ secrets[secret_name] = StaticSecret(value=static_token)
- workspace = LocalWorkspace(working_dir=working_dir)
+ return secrets
+
+ async def _configure_llm_and_mcp(
+ self, user: UserInfo, llm_model: str | None
+ ) -> tuple[LLM, dict]:
+ """Configure LLM and MCP (Model Context Protocol) settings.
+
+ Args:
+ user: User information containing LLM preferences
+ llm_model: Optional specific model to use, falls back to user default
+ Returns:
+ Tuple of (configured LLM instance, MCP config dictionary)
+ """
+ # Configure LLM
+ model = llm_model or user.llm_model
+ base_url = user.llm_base_url
+ if model and model.startswith('openhands/'):
+ base_url = user.llm_base_url or self.openhands_provider_base_url
llm = LLM(
- model=user.llm_model,
- base_url=user.llm_base_url,
+ model=model,
+ base_url=base_url,
api_key=user.llm_api_key,
usage_id='agent',
)
- agent = get_default_agent(llm=llm)
- conversation_id = uuid4()
+ # Configure MCP
+ mcp_config: dict[str, Any] = {}
+ if self.web_url:
+ mcp_url = f'{self.web_url}/mcp/mcp'
+ mcp_config = {
+ 'default': {
+ 'url': mcp_url,
+ }
+ }
+
+ # Add API key if available
+ mcp_api_key = await self.user_context.get_mcp_api_key()
+ if mcp_api_key:
+ mcp_config['default']['headers'] = {
+ 'X-Session-API-Key': mcp_api_key,
+ }
+
+ # Get the actual API key values, prioritizing user's key over service key
+ user_search_key = None
+ if user.search_api_key:
+ key_value = user.search_api_key.get_secret_value()
+ if key_value and key_value.strip():
+ user_search_key = key_value
+
+ service_tavily_key = None
+ if self.tavily_api_key:
+ # tavily_api_key is already a string (extracted in the factory method)
+ if self.tavily_api_key.strip():
+ service_tavily_key = self.tavily_api_key
+
+ tavily_api_key = user_search_key or service_tavily_key
+
+ if tavily_api_key:
+ _logger.info('Adding search engine to MCP config')
+ mcp_config['tavily'] = {
+ 'url': f'https://mcp.tavily.com/mcp/?tavilyApiKey={tavily_api_key}'
+ }
+ else:
+ _logger.info('No search engine API key found, skipping search engine')
+
+ return llm, mcp_config
+
+ def _create_agent_with_context(
+ self,
+ llm: LLM,
+ agent_type: AgentType,
+ system_message_suffix: str | None,
+ mcp_config: dict,
+ condenser_max_size: int | None,
+ ) -> Agent:
+ """Create an agent with appropriate tools and context based on agent type.
+
+ Args:
+ llm: Configured LLM instance
+ agent_type: Type of agent to create (PLAN or DEFAULT)
+ system_message_suffix: Optional suffix for system messages
+ mcp_config: MCP configuration dictionary
+ condenser_max_size: condenser_max_size setting
+
+ Returns:
+ Configured Agent instance with context
+ """
+ # Create condenser with user's settings
+ condenser = self._create_condenser(llm, agent_type, condenser_max_size)
+
+ # Create agent based on type
+ if agent_type == AgentType.PLAN:
+ agent = Agent(
+ llm=llm,
+ tools=get_planning_tools(),
+ system_prompt_filename='system_prompt_planning.j2',
+ system_prompt_kwargs={'plan_structure': format_plan_structure()},
+ condenser=condenser,
+ security_analyzer=None,
+ mcp_config=mcp_config,
+ )
+ else:
+ agent = Agent(
+ llm=llm,
+ tools=get_default_tools(enable_browser=True),
+ system_prompt_kwargs={'cli_mode': False},
+ condenser=condenser,
+ mcp_config=mcp_config,
+ )
+
+ # Add agent context
+ agent_context = AgentContext(system_message_suffix=system_message_suffix)
+ agent = agent.model_copy(update={'agent_context': agent_context})
+
+ return agent
+
+ async def _finalize_conversation_request(
+ self,
+ agent: Agent,
+ conversation_id: UUID | None,
+ user: UserInfo,
+ workspace: LocalWorkspace,
+ initial_message: SendMessageRequest | None,
+ secrets: dict,
+ sandbox: SandboxInfo,
+ remote_workspace: AsyncRemoteWorkspace | None,
+ selected_repository: str | None,
+ working_dir: str,
+ ) -> StartConversationRequest:
+ """Finalize the conversation request with experiment variants and skills.
+
+ Args:
+ agent: The configured agent
+ conversation_id: Optional conversation ID, generates new one if None
+ user: User information
+ workspace: Local workspace instance
+ initial_message: Optional initial message for the conversation
+ secrets: Dictionary of secrets for authentication
+ sandbox: Sandbox information
+ remote_workspace: Optional remote workspace for skills loading
+ selected_repository: Optional repository name
+ working_dir: Working directory path
+
+ Returns:
+ Complete StartConversationRequest ready for use
+ """
+ # Generate conversation ID if not provided
+ conversation_id = conversation_id or uuid4()
+
+ # Apply experiment variants
agent = ExperimentManagerImpl.run_agent_variant_tests__v1(
user.id, conversation_id, agent
)
- start_conversation_request = StartConversationRequest(
+ # Load and merge skills if remote workspace is available
+ if remote_workspace:
+ try:
+ agent = await self._load_skills_and_update_agent(
+ sandbox, agent, remote_workspace, selected_repository, working_dir
+ )
+ except Exception as e:
+ _logger.warning(f'Failed to load skills: {e}', exc_info=True)
+ # Continue without skills - don't fail conversation startup
+
+ # Create and return the final request
+ return StartConversationRequest(
conversation_id=conversation_id,
agent=agent,
workspace=workspace,
@@ -511,7 +751,55 @@ async def _build_start_conversation_request_for_user(
initial_message=initial_message,
secrets=secrets,
)
- return start_conversation_request
+
+ async def _build_start_conversation_request_for_user(
+ self,
+ sandbox: SandboxInfo,
+ initial_message: SendMessageRequest | None,
+ system_message_suffix: str | None,
+ git_provider: ProviderType | None,
+ working_dir: str,
+ agent_type: AgentType = AgentType.DEFAULT,
+ llm_model: str | None = None,
+ conversation_id: UUID | None = None,
+ remote_workspace: AsyncRemoteWorkspace | None = None,
+ selected_repository: str | None = None,
+ ) -> StartConversationRequest:
+ """Build a complete conversation request for a user.
+
+ This method orchestrates the creation of a conversation request by:
+ 1. Setting up git provider secrets
+ 2. Configuring LLM and MCP settings
+ 3. Creating an agent with appropriate context
+ 4. Finalizing the request with skills and experiment variants
+ """
+ user = await self.user_context.get_user_info()
+ workspace = LocalWorkspace(working_dir=working_dir)
+
+ # Set up secrets for all git providers
+ secrets = await self._setup_secrets_for_git_providers(user)
+
+ # Configure LLM and MCP
+ llm, mcp_config = await self._configure_llm_and_mcp(user, llm_model)
+
+ # Create agent with context
+ agent = self._create_agent_with_context(
+ llm, agent_type, system_message_suffix, mcp_config, user.condenser_max_size
+ )
+
+ # Finalize and return the conversation request
+ return await self._finalize_conversation_request(
+ agent,
+ conversation_id,
+ user,
+ workspace,
+ initial_message,
+ secrets,
+ sandbox,
+ remote_workspace,
+ selected_repository,
+ working_dir,
+ )
async def update_agent_server_conversation_title(
self,
@@ -564,6 +852,8 @@ async def update_agent_server_conversation_title(
async def delete_app_conversation(self, conversation_id: UUID) -> bool:
"""Delete a V1 conversation and all its associated data.
+ This method will also cascade delete all sub-conversations of the parent.
+
Args:
conversation_id: The UUID of the conversation to delete.
"""
@@ -587,6 +877,10 @@ async def delete_app_conversation(self, conversation_id: UUID) -> bool:
)
return False
+ # Delete all sub-conversations first (to maintain referential integrity)
+ await self._delete_sub_conversations(conversation_id)
+
+ # Now delete the parent conversation
# Delete from agent server if sandbox is running
await self._delete_from_agent_server(app_conversation)
@@ -602,6 +896,41 @@ async def delete_app_conversation(self, conversation_id: UUID) -> bool:
)
return False
+ async def _delete_sub_conversations(self, parent_conversation_id: UUID) -> None:
+ """Delete all sub-conversations of a parent conversation.
+
+ This method handles errors gracefully, continuing to delete remaining
+ sub-conversations even if one fails.
+
+ Args:
+ parent_conversation_id: The UUID of the parent conversation.
+ """
+ sub_conversation_ids = (
+ await self.app_conversation_info_service.get_sub_conversation_ids(
+ parent_conversation_id
+ )
+ )
+
+ for sub_id in sub_conversation_ids:
+ try:
+ sub_conversation = await self.get_app_conversation(sub_id)
+ if sub_conversation:
+ # Delete from agent server if sandbox is running
+ await self._delete_from_agent_server(sub_conversation)
+ # Delete from database
+ await self._delete_from_database(sub_conversation)
+ _logger.info(
+ f'Successfully deleted sub-conversation {sub_id}',
+ extra={'conversation_id': str(sub_id)},
+ )
+ except Exception as e:
+ # Log error but continue deleting remaining sub-conversations
+ _logger.warning(
+ f'Error deleting sub-conversation {sub_id}: {e}',
+ extra={'conversation_id': str(sub_id)},
+ exc_info=True,
+ )
+
async def _delete_from_agent_server(
self, app_conversation: AppConversation
) -> None:
@@ -675,6 +1004,10 @@ class LiveStatusAppConversationServiceInjector(AppConversationServiceInjector):
'be retrieved by a sandboxed conversation.'
),
)
+ tavily_api_key: SecretStr | None = Field(
+ default=None,
+ description='The Tavily Search API key to add to MCP integration',
+ )
async def inject(
self, state: InjectorState, request: Request | None = None
@@ -717,6 +1050,29 @@ async def inject(
if isinstance(sandbox_service, DockerSandboxService):
web_url = f'http://host.docker.internal:{sandbox_service.host_port}'
+ # Get app_mode and keycloak_auth cookie for SaaS mode
+ app_mode = None
+ keycloak_auth_cookie = None
+ try:
+ from openhands.server.shared import server_config
+
+ app_mode = (
+ server_config.app_mode.value if server_config.app_mode else None
+ )
+ if request and server_config.app_mode == AppMode.SAAS:
+ keycloak_auth_cookie = request.cookies.get('keycloak_auth')
+ except (ImportError, AttributeError):
+ # If server_config is not available (e.g., in tests), continue without it
+ pass
+
+ # We supply the global tavily key only if the app mode is not SAAS, where
+ # currently the search endpoints are patched into the app server instead
+ # so the tavily key does not need to be shared
+ if self.tavily_api_key and app_mode != AppMode.SAAS:
+ tavily_api_key = self.tavily_api_key.get_secret_value()
+ else:
+ tavily_api_key = None
+
yield LiveStatusAppConversationService(
init_git_in_empty_workspace=self.init_git_in_empty_workspace,
user_context=user_context,
@@ -730,5 +1086,9 @@ async def inject(
sandbox_startup_poll_frequency=self.sandbox_startup_poll_frequency,
httpx_client=httpx_client,
web_url=web_url,
+ openhands_provider_base_url=config.openhands_provider_base_url,
access_token_hard_timeout=access_token_hard_timeout,
+ app_mode=app_mode,
+ keycloak_auth_cookie=keycloak_auth_cookie,
+ tavily_api_key=tavily_api_key,
)
diff --git a/openhands/app_server/app_conversation/skill_loader.py b/openhands/app_server/app_conversation/skill_loader.py
new file mode 100644
index 000000000000..d8fca7cfc3a9
--- /dev/null
+++ b/openhands/app_server/app_conversation/skill_loader.py
@@ -0,0 +1,348 @@
+"""Utilities for loading skills for V1 conversations.
+
+This module provides functions to load skills from various sources:
+- Global skills from OpenHands/skills/
+- User skills from ~/.openhands/skills/
+- Repository-level skills from the workspace
+
+All skills are used in V1 conversations.
+"""
+
+import logging
+import os
+from pathlib import Path
+
+import openhands
+from openhands.app_server.sandbox.sandbox_models import SandboxInfo
+from openhands.sdk.context.skills import Skill
+from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
+
+_logger = logging.getLogger(__name__)
+
+# Path to global skills directory
+GLOBAL_SKILLS_DIR = os.path.join(
+ os.path.dirname(os.path.dirname(openhands.__file__)),
+ 'skills',
+)
+WORK_HOSTS_SKILL = """The user has access to the following hosts for accessing a web application,
+each of which has a corresponding port:"""
+
+
+def _find_and_load_global_skill_files(skill_dir: Path) -> list[Skill]:
+ """Find and load all .md files from the global skills directory.
+
+ Args:
+ skill_dir: Path to the global skills directory
+
+ Returns:
+ List of Skill objects loaded from the files (excluding README.md)
+ """
+ skills = []
+
+ try:
+ # Find all .md files in the directory (excluding README.md)
+ md_files = [f for f in skill_dir.glob('*.md') if f.name.lower() != 'readme.md']
+
+ # Load skills from the found files
+ for file_path in md_files:
+ try:
+ skill = Skill.load(file_path, skill_dir)
+ skills.append(skill)
+ _logger.debug(f'Loaded global skill: {skill.name} from {file_path}')
+ except Exception as e:
+ _logger.warning(
+ f'Failed to load global skill from {file_path}: {str(e)}'
+ )
+
+ except Exception as e:
+ _logger.debug(f'Failed to find global skill files: {str(e)}')
+
+ return skills
+
+
+def load_sandbox_skills(sandbox: SandboxInfo) -> list[Skill]:
+ """Load skills specific to the sandbox, including exposed ports / urls."""
+ if not sandbox.exposed_urls:
+ return []
+ urls = [url for url in sandbox.exposed_urls if url.name.startswith('WORKER_')]
+ if not urls:
+ return []
+ content_list = [WORK_HOSTS_SKILL]
+ for url in urls:
+ content_list.append(f'* {url.url} (port {url.port})')
+ content = '\n'.join(content_list)
+ return [Skill(name='work_hosts', content=content, trigger=None)]
+
+
+def load_global_skills() -> list[Skill]:
+ """Load global skills from OpenHands/skills/ directory.
+
+ Returns:
+ List of Skill objects loaded from global skills directory.
+ Returns empty list if directory doesn't exist or on errors.
+ """
+ skill_dir = Path(GLOBAL_SKILLS_DIR)
+
+ # Check if directory exists
+ if not skill_dir.exists():
+ _logger.debug(f'Global skills directory does not exist: {skill_dir}')
+ return []
+
+ try:
+ _logger.info(f'Loading global skills from {skill_dir}')
+
+ # Find and load all .md files from the directory
+ skills = _find_and_load_global_skill_files(skill_dir)
+
+ _logger.info(f'Loaded {len(skills)} global skills: {[s.name for s in skills]}')
+
+ return skills
+
+ except Exception as e:
+ _logger.warning(f'Failed to load global skills: {str(e)}')
+ return []
+
+
+def _determine_repo_root(working_dir: str, selected_repository: str | None) -> str:
+ """Determine the repository root directory.
+
+ Args:
+ working_dir: Base working directory path
+ selected_repository: Repository name (e.g., 'owner/repo') or None
+
+ Returns:
+ Path to the repository root directory
+ """
+ if selected_repository:
+ repo_name = selected_repository.split('/')[-1]
+ return f'{working_dir}/{repo_name}'
+ return working_dir
+
+
+async def _read_file_from_workspace(
+ workspace: AsyncRemoteWorkspace, file_path: str, working_dir: str
+) -> str | None:
+ """Read file content from remote workspace.
+
+ Args:
+ workspace: AsyncRemoteWorkspace to execute commands
+ file_path: Path to the file to read
+ working_dir: Working directory for command execution
+
+ Returns:
+ File content as string, or None if file doesn't exist or read fails
+ """
+ try:
+ result = await workspace.execute_command(
+ f'cat {file_path}', cwd=working_dir, timeout=10.0
+ )
+ if result.exit_code == 0 and result.stdout.strip():
+ return result.stdout
+ return None
+ except Exception as e:
+ _logger.debug(f'Failed to read file {file_path}: {str(e)}')
+ return None
+
+
+async def _load_special_files(
+ workspace: AsyncRemoteWorkspace, repo_root: str, working_dir: str
+) -> list[Skill]:
+ """Load special skill files from repository root.
+
+ Loads: .cursorrules, agents.md, agent.md
+
+ Args:
+ workspace: AsyncRemoteWorkspace to execute commands
+ repo_root: Path to repository root directory
+ working_dir: Working directory for command execution
+
+ Returns:
+ List of Skill objects loaded from special files
+ """
+ skills = []
+ special_files = ['.cursorrules', 'agents.md', 'agent.md']
+
+ for filename in special_files:
+ file_path = f'{repo_root}/{filename}'
+ content = await _read_file_from_workspace(workspace, file_path, working_dir)
+
+ if content:
+ try:
+ # Use simple string path to avoid Path filesystem operations
+ skill = Skill.load(path=filename, skill_dir=None, file_content=content)
+ skills.append(skill)
+ _logger.debug(f'Loaded special file skill: {skill.name}')
+ except Exception as e:
+ _logger.warning(f'Failed to create skill from {filename}: {str(e)}')
+
+ return skills
+
+
+async def _find_and_load_skill_md_files(
+ workspace: AsyncRemoteWorkspace, skill_dir: str, working_dir: str
+) -> list[Skill]:
+ """Find and load all .md files from a skills directory in the workspace.
+
+ Args:
+ workspace: AsyncRemoteWorkspace to execute commands
+ skill_dir: Path to skills directory
+ working_dir: Working directory for command execution
+
+ Returns:
+ List of Skill objects loaded from the files (excluding README.md)
+ """
+ skills = []
+
+ try:
+ # Find all .md files in the directory
+ result = await workspace.execute_command(
+ f"find {skill_dir} -type f -name '*.md' 2>/dev/null || true",
+ cwd=working_dir,
+ timeout=10.0,
+ )
+
+ if result.exit_code == 0 and result.stdout.strip():
+ file_paths = [
+ f.strip()
+ for f in result.stdout.strip().split('\n')
+ if f.strip() and 'README.md' not in f
+ ]
+
+ # Load skills from the found files
+ for file_path in file_paths:
+ content = await _read_file_from_workspace(
+ workspace, file_path, working_dir
+ )
+
+ if content:
+ # Calculate relative path for skill name
+ rel_path = file_path.replace(f'{skill_dir}/', '')
+ try:
+ # Use simple string path to avoid Path filesystem operations
+ skill = Skill.load(
+ path=rel_path, skill_dir=None, file_content=content
+ )
+ skills.append(skill)
+ _logger.debug(f'Loaded repo skill: {skill.name}')
+ except Exception as e:
+ _logger.warning(
+ f'Failed to create skill from {rel_path}: {str(e)}'
+ )
+
+ except Exception as e:
+ _logger.debug(f'Failed to find skill files in {skill_dir}: {str(e)}')
+
+ return skills
+
+
+def _merge_repo_skills_with_precedence(
+ special_skills: list[Skill],
+ skills_dir_skills: list[Skill],
+ microagents_dir_skills: list[Skill],
+) -> list[Skill]:
+ """Merge repository skills with precedence order.
+
+ Precedence (highest to lowest):
+ 1. Special files (repo root)
+ 2. .openhands/skills/ directory
+ 3. .openhands/microagents/ directory (backward compatibility)
+
+ Args:
+ special_skills: Skills from special files in repo root
+ skills_dir_skills: Skills from .openhands/skills/ directory
+ microagents_dir_skills: Skills from .openhands/microagents/ directory
+
+ Returns:
+ Deduplicated list of skills with proper precedence
+ """
+ # Use a dict to deduplicate by name, with earlier sources taking precedence
+ skills_by_name = {}
+ for skill in special_skills + skills_dir_skills + microagents_dir_skills:
+ # Only add if not already present (earlier sources win)
+ if skill.name not in skills_by_name:
+ skills_by_name[skill.name] = skill
+
+ return list(skills_by_name.values())
+
+
+async def load_repo_skills(
+ workspace: AsyncRemoteWorkspace,
+ selected_repository: str | None,
+ working_dir: str,
+) -> list[Skill]:
+ """Load repository-level skills from the workspace.
+
+ Loads skills from:
+ 1. Special files in repo root: .cursorrules, agents.md, agent.md
+ 2. .md files in .openhands/skills/ directory (preferred)
+ 3. .md files in .openhands/microagents/ directory (for backward compatibility)
+
+ Args:
+ workspace: AsyncRemoteWorkspace to execute commands in the sandbox
+ selected_repository: Repository name (e.g., 'owner/repo') or None
+ working_dir: Working directory path
+
+ Returns:
+ List of Skill objects loaded from repository.
+ Returns empty list on errors.
+ """
+ try:
+ # Determine repository root directory
+ repo_root = _determine_repo_root(working_dir, selected_repository)
+ _logger.info(f'Loading repo skills from {repo_root}')
+
+ # Load special files from repo root
+ special_skills = await _load_special_files(workspace, repo_root, working_dir)
+
+ # Load .md files from .openhands/skills/ directory (preferred)
+ skills_dir = f'{repo_root}/.openhands/skills'
+ skills_dir_skills = await _find_and_load_skill_md_files(
+ workspace, skills_dir, working_dir
+ )
+
+ # Load .md files from .openhands/microagents/ directory (backward compatibility)
+ microagents_dir = f'{repo_root}/.openhands/microagents'
+ microagents_dir_skills = await _find_and_load_skill_md_files(
+ workspace, microagents_dir, working_dir
+ )
+
+ # Merge all loaded skills with proper precedence
+ all_skills = _merge_repo_skills_with_precedence(
+ special_skills, skills_dir_skills, microagents_dir_skills
+ )
+
+ _logger.info(
+ f'Loaded {len(all_skills)} repo skills: {[s.name for s in all_skills]}'
+ )
+
+ return all_skills
+
+ except Exception as e:
+ _logger.warning(f'Failed to load repo skills: {str(e)}')
+ return []
+
+
+def merge_skills(skill_lists: list[list[Skill]]) -> list[Skill]:
+ """Merge multiple skill lists, avoiding duplicates by name.
+
+ Later lists take precedence over earlier lists for duplicate names.
+
+ Args:
+ skill_lists: List of skill lists to merge
+
+ Returns:
+ Deduplicated list of skills with later lists overriding earlier ones
+ """
+ skills_by_name = {}
+
+ for skill_list in skill_lists:
+ for skill in skill_list:
+ if skill.name in skills_by_name:
+ _logger.debug(
+ f'Overriding skill "{skill.name}" from earlier source with later source'
+ )
+ skills_by_name[skill.name] = skill
+
+ result = list(skills_by_name.values())
+ _logger.debug(f'Merged skills: {[s.name for s in result]}')
+ return result
diff --git a/openhands/app_server/app_conversation/sql_app_conversation_info_service.py b/openhands/app_server/app_conversation/sql_app_conversation_info_service.py
index 09904114851e..83e2d1915b47 100644
--- a/openhands/app_server/app_conversation/sql_app_conversation_info_service.py
+++ b/openhands/app_server/app_conversation/sql_app_conversation_info_service.py
@@ -45,6 +45,8 @@
create_json_type_decorator,
)
from openhands.integrations.provider import ProviderType
+from openhands.sdk.conversation.conversation_stats import ConversationStats
+from openhands.sdk.event import ConversationStateUpdateEvent
from openhands.sdk.llm import MetricsSnapshot
from openhands.sdk.llm.utils.metrics import TokenUsage
from openhands.storage.data_models.conversation_metadata import ConversationTrigger
@@ -88,6 +90,7 @@ class StoredConversationMetadata(Base): # type: ignore
conversation_version = Column(String, nullable=False, default='V0', index=True)
sandbox_id = Column(String, nullable=True, index=True)
+ parent_conversation_id = Column(String, nullable=True, index=True)
@dataclass
@@ -110,10 +113,18 @@ async def search_app_conversation_info(
sort_order: AppConversationSortOrder = AppConversationSortOrder.CREATED_AT_DESC,
page_id: str | None = None,
limit: int = 100,
+ include_sub_conversations: bool = False,
) -> AppConversationInfoPage:
"""Search for sandboxed conversations without permission checks."""
query = await self._secure_select()
+ # Conditionally exclude sub-conversations based on the parameter
+ if not include_sub_conversations:
+ # Exclude sub-conversations (only include top-level conversations)
+ query = query.where(
+ StoredConversationMetadata.parent_conversation_id.is_(None)
+ )
+
query = self._apply_filters(
query=query,
title__contains=title__contains,
@@ -231,6 +242,26 @@ def _apply_filters(
query = query.where(*conditions)
return query
+ async def get_sub_conversation_ids(
+ self, parent_conversation_id: UUID
+ ) -> list[UUID]:
+ """Get all sub-conversation IDs for a given parent conversation.
+
+ Args:
+ parent_conversation_id: The ID of the parent conversation
+
+ Returns:
+ List of sub-conversation IDs
+ """
+ query = await self._secure_select()
+ query = query.where(
+ StoredConversationMetadata.parent_conversation_id
+ == str(parent_conversation_id)
+ )
+ result_set = await self.db_session.execute(query)
+ rows = result_set.scalars().all()
+ return [UUID(row.conversation_id) for row in rows]
+
async def get_app_conversation_info(
self, conversation_id: UUID
) -> AppConversationInfo | None:
@@ -241,7 +272,9 @@ async def get_app_conversation_info(
result_set = await self.db_session.execute(query)
result = result_set.scalar_one_or_none()
if result:
- return self._to_info(result)
+ # Fetch sub-conversation IDs
+ sub_conversation_ids = await self.get_sub_conversation_ids(conversation_id)
+ return self._to_info(result, sub_conversation_ids=sub_conversation_ids)
return None
async def batch_get_app_conversation_info(
@@ -260,8 +293,13 @@ async def batch_get_app_conversation_info(
results: list[AppConversationInfo | None] = []
for conversation_id in conversation_id_strs:
info = info_by_id.get(conversation_id)
+ sub_conversation_ids = await self.get_sub_conversation_ids(
+ UUID(conversation_id)
+ )
if info:
- results.append(self._to_info(info))
+ results.append(
+ self._to_info(info, sub_conversation_ids=sub_conversation_ids)
+ )
else:
results.append(None)
@@ -277,7 +315,7 @@ async def save_app_conversation_info(
)
result = await self.db_session.execute(query)
existing = result.scalar_one_or_none()
- assert existing is None or existing.created_by_user_id == user_id
+ assert existing is None or existing.user_id == user_id
metrics = info.metrics or MetricsSnapshot()
usage = metrics.accumulated_token_usage or TokenUsage()
@@ -307,12 +345,141 @@ async def save_app_conversation_info(
llm_model=info.llm_model,
conversation_version='V1',
sandbox_id=info.sandbox_id,
+ parent_conversation_id=(
+ str(info.parent_conversation_id)
+ if info.parent_conversation_id
+ else None
+ ),
)
await self.db_session.merge(stored)
await self.db_session.commit()
return info
+ async def update_conversation_statistics(
+ self, conversation_id: UUID, stats: ConversationStats
+ ) -> None:
+ """Update conversation statistics from stats event data.
+
+ Args:
+ conversation_id: The ID of the conversation to update
+ stats: ConversationStats object containing usage_to_metrics data from stats event
+ """
+ # Extract agent metrics from usage_to_metrics
+ usage_to_metrics = stats.usage_to_metrics
+ agent_metrics = usage_to_metrics.get('agent')
+
+ if not agent_metrics:
+ logger.debug(
+ 'No agent metrics found in stats for conversation %s', conversation_id
+ )
+ return
+
+ # Query existing record using secure select (filters for V1 and user if available)
+ query = await self._secure_select()
+ query = query.where(
+ StoredConversationMetadata.conversation_id == str(conversation_id)
+ )
+ result = await self.db_session.execute(query)
+ stored = result.scalar_one_or_none()
+
+ if not stored:
+ logger.debug(
+ 'Conversation %s not found or not accessible, skipping statistics update',
+ conversation_id,
+ )
+ return
+
+ # Extract accumulated_cost and max_budget_per_task from Metrics object
+ accumulated_cost = agent_metrics.accumulated_cost
+ max_budget_per_task = agent_metrics.max_budget_per_task
+
+ # Extract accumulated_token_usage from Metrics object
+ accumulated_token_usage = agent_metrics.accumulated_token_usage
+ if accumulated_token_usage:
+ prompt_tokens = accumulated_token_usage.prompt_tokens
+ completion_tokens = accumulated_token_usage.completion_tokens
+ cache_read_tokens = accumulated_token_usage.cache_read_tokens
+ cache_write_tokens = accumulated_token_usage.cache_write_tokens
+ reasoning_tokens = accumulated_token_usage.reasoning_tokens
+ context_window = accumulated_token_usage.context_window
+ per_turn_token = accumulated_token_usage.per_turn_token
+ else:
+ prompt_tokens = None
+ completion_tokens = None
+ cache_read_tokens = None
+ cache_write_tokens = None
+ reasoning_tokens = None
+ context_window = None
+ per_turn_token = None
+
+ # Update fields only if values are provided (not None)
+ if accumulated_cost is not None:
+ stored.accumulated_cost = accumulated_cost
+ if max_budget_per_task is not None:
+ stored.max_budget_per_task = max_budget_per_task
+ if prompt_tokens is not None:
+ stored.prompt_tokens = prompt_tokens
+ if completion_tokens is not None:
+ stored.completion_tokens = completion_tokens
+ if cache_read_tokens is not None:
+ stored.cache_read_tokens = cache_read_tokens
+ if cache_write_tokens is not None:
+ stored.cache_write_tokens = cache_write_tokens
+ if reasoning_tokens is not None:
+ stored.reasoning_tokens = reasoning_tokens
+ if context_window is not None:
+ stored.context_window = context_window
+ if per_turn_token is not None:
+ stored.per_turn_token = per_turn_token
+
+ # Update last_updated_at timestamp
+ stored.last_updated_at = utc_now()
+
+ await self.db_session.commit()
+
+ async def process_stats_event(
+ self,
+ event: ConversationStateUpdateEvent,
+ conversation_id: UUID,
+ ) -> None:
+ """Process a stats event and update conversation statistics.
+
+ Args:
+ event: The ConversationStateUpdateEvent with key='stats'
+ conversation_id: The ID of the conversation to update
+ """
+ try:
+ # Parse event value into ConversationStats model for type safety
+ # event.value can be a dict (from JSON deserialization) or a ConversationStats object
+ event_value = event.value
+ conversation_stats: ConversationStats | None = None
+
+ if isinstance(event_value, ConversationStats):
+ # Already a ConversationStats object
+ conversation_stats = event_value
+ elif isinstance(event_value, dict):
+ # Parse dict into ConversationStats model
+ # This validates the structure and ensures type safety
+ conversation_stats = ConversationStats.model_validate(event_value)
+ elif hasattr(event_value, 'usage_to_metrics'):
+ # Handle objects with usage_to_metrics attribute (e.g., from tests)
+ # Convert to dict first, then validate
+ stats_dict = {'usage_to_metrics': event_value.usage_to_metrics}
+ conversation_stats = ConversationStats.model_validate(stats_dict)
+
+ if conversation_stats and conversation_stats.usage_to_metrics:
+ # Pass ConversationStats object directly for type safety
+ await self.update_conversation_statistics(
+ conversation_id, conversation_stats
+ )
+ except Exception:
+ logger.exception(
+ 'Error updating conversation statistics for conversation %s',
+ conversation_id,
+ stack_info=True,
+ )
+
async def _secure_select(self):
query = select(StoredConversationMetadata).where(
StoredConversationMetadata.conversation_version == 'V1'
@@ -324,7 +491,11 @@ async def _secure_select(self):
)
return query
- def _to_info(self, stored: StoredConversationMetadata) -> AppConversationInfo:
+ def _to_info(
+ self,
+ stored: StoredConversationMetadata,
+ sub_conversation_ids: list[UUID] | None = None,
+ ) -> AppConversationInfo:
# V1 conversations should always have a sandbox_id
sandbox_id = stored.sandbox_id
assert sandbox_id is not None
@@ -364,6 +535,12 @@ def _to_info(self, stored: StoredConversationMetadata) -> AppConversationInfo:
pr_number=stored.pr_number,
llm_model=stored.llm_model,
metrics=metrics,
+ parent_conversation_id=(
+ UUID(stored.parent_conversation_id)
+ if stored.parent_conversation_id
+ else None
+ ),
+ sub_conversation_ids=sub_conversation_ids or [],
created_at=created_at,
updated_at=updated_at,
)
diff --git a/openhands/app_server/app_conversation/sql_app_conversation_start_task_service.py b/openhands/app_server/app_conversation/sql_app_conversation_start_task_service.py
index 91b48ab78127..4913e795bb62 100644
--- a/openhands/app_server/app_conversation/sql_app_conversation_start_task_service.py
+++ b/openhands/app_server/app_conversation/sql_app_conversation_start_task_service.py
@@ -18,6 +18,7 @@
import logging
from dataclasses import dataclass
+from datetime import datetime
from typing import AsyncGenerator
from uuid import UUID
@@ -75,6 +76,7 @@ class SQLAppConversationStartTaskService(AppConversationStartTaskService):
async def search_app_conversation_start_tasks(
self,
conversation_id__eq: UUID | None = None,
+ created_at__gte: datetime | None = None,
sort_order: AppConversationStartTaskSortOrder = AppConversationStartTaskSortOrder.CREATED_AT_DESC,
page_id: str | None = None,
limit: int = 100,
@@ -95,6 +97,12 @@ async def search_app_conversation_start_tasks(
== conversation_id__eq
)
+ # Apply created_at__gte filter
+ if created_at__gte is not None:
+ query = query.where(
+ StoredAppConversationStartTask.created_at >= created_at__gte
+ )
+
# Add sort order
if sort_order == AppConversationStartTaskSortOrder.CREATED_AT:
query = query.order_by(StoredAppConversationStartTask.created_at)
@@ -139,6 +147,7 @@ async def search_app_conversation_start_tasks(
async def count_app_conversation_start_tasks(
self,
conversation_id__eq: UUID | None = None,
+ created_at__gte: datetime | None = None,
) -> int:
"""Count conversation start tasks."""
query = select(func.count(StoredAppConversationStartTask.id))
@@ -156,6 +165,12 @@ async def count_app_conversation_start_tasks(
== conversation_id__eq
)
+ # Apply created_at__gte filter
+ if created_at__gte is not None:
+ query = query.where(
+ StoredAppConversationStartTask.created_at >= created_at__gte
+ )
+
result = await self.session.execute(query)
count = result.scalar()
return count or 0
diff --git a/openhands/app_server/app_lifespan/alembic/versions/003.py b/openhands/app_server/app_lifespan/alembic/versions/003.py
new file mode 100644
index 000000000000..6879b4358f7b
--- /dev/null
+++ b/openhands/app_server/app_lifespan/alembic/versions/003.py
@@ -0,0 +1,41 @@
+"""add parent_conversation_id to conversation_metadata
+
+Revision ID: 003
+Revises: 002
+Create Date: 2025-11-06 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision: str = '003'
+down_revision: Union[str, None] = '002'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """Upgrade schema."""
+ op.add_column(
+ 'conversation_metadata',
+ sa.Column('parent_conversation_id', sa.String(), nullable=True),
+ )
+ op.create_index(
+ op.f('ix_conversation_metadata_parent_conversation_id'),
+ 'conversation_metadata',
+ ['parent_conversation_id'],
+ unique=False,
+ )
+
+
+def downgrade() -> None:
+ """Downgrade schema."""
+ op.drop_index(
+ op.f('ix_conversation_metadata_parent_conversation_id'),
+ table_name='conversation_metadata',
+ )
+ op.drop_column('conversation_metadata', 'parent_conversation_id')
diff --git a/openhands/app_server/config.py b/openhands/app_server/config.py
index 2dd50d7fa714..3c40806af015 100644
--- a/openhands/app_server/config.py
+++ b/openhands/app_server/config.py
@@ -6,9 +6,11 @@
import httpx
from fastapi import Depends, Request
-from pydantic import Field
+from pydantic import Field, SecretStr
from sqlalchemy.ext.asyncio import AsyncSession
+# Import the event_callback module to ensure all processors are registered
+import openhands.app_server.event_callback # noqa: F401
from openhands.agent_server.env_parser import from_env
from openhands.app_server.app_conversation.app_conversation_info_service import (
AppConversationInfoService,
@@ -72,6 +74,11 @@ def get_default_web_url() -> str | None:
return f'https://{web_host}'
+def get_openhands_provider_base_url() -> str | None:
+ """Return the base URL for the OpenHands provider, if configured."""
+ return os.getenv('OPENHANDS_PROVIDER_BASE_URL') or None
+
+
def _get_default_lifespan():
# Check legacy parameters for saas mode. If we are in SAAS mode do not apply
# OSS alembic migrations
@@ -86,6 +93,10 @@ class AppServerConfig(OpenHandsModel):
default_factory=get_default_web_url,
description='The URL where OpenHands is running (e.g., http://localhost:3000)',
)
+ openhands_provider_base_url: str | None = Field(
+ default_factory=get_openhands_provider_base_url,
+ description='Base URL for the OpenHands provider',
+ )
# Dependency Injection Injectors
event: EventServiceInjector | None = None
event_callback: EventCallbackServiceInjector | None = None
@@ -183,7 +194,13 @@ def config_from_env() -> AppServerConfig:
)
if config.app_conversation is None:
- config.app_conversation = LiveStatusAppConversationServiceInjector()
+ tavily_api_key = None
+ tavily_api_key_str = os.getenv('TAVILY_API_KEY') or os.getenv('SEARCH_API_KEY')
+ if tavily_api_key_str:
+ tavily_api_key = SecretStr(tavily_api_key_str)
+ config.app_conversation = LiveStatusAppConversationServiceInjector(
+ tavily_api_key=tavily_api_key
+ )
if config.user is None:
config.user = AuthUserContextInjector()
diff --git a/openhands/app_server/event_callback/__init__.py b/openhands/app_server/event_callback/__init__.py
new file mode 100644
index 000000000000..41be0a732049
--- /dev/null
+++ b/openhands/app_server/event_callback/__init__.py
@@ -0,0 +1,21 @@
+"""Event callback system for OpenHands.
+
+This module provides the event callback system that allows processors to be
+registered and executed when specific events occur during conversations.
+
+All callback processors must be imported here to ensure they are registered
+with the discriminated union system used by Pydantic for validation.
+"""
+
+# Import base classes and processors without circular dependencies
+from .event_callback_models import EventCallbackProcessor, LoggingCallbackProcessor
+from .github_v1_callback_processor import GithubV1CallbackProcessor
+
+# Note: SetTitleCallbackProcessor is not imported here to avoid circular imports
+# It will be registered when imported elsewhere in the application
+
+__all__ = [
+ 'EventCallbackProcessor',
+ 'LoggingCallbackProcessor',
+ 'GithubV1CallbackProcessor',
+]
diff --git a/openhands/app_server/event_callback/github_v1_callback_processor.py b/openhands/app_server/event_callback/github_v1_callback_processor.py
new file mode 100644
index 000000000000..1a83bed9c0cd
--- /dev/null
+++ b/openhands/app_server/event_callback/github_v1_callback_processor.py
@@ -0,0 +1,296 @@
+import logging
+import os
+from typing import Any
+from uuid import UUID
+
+import httpx
+from github import Github, GithubIntegration
+from pydantic import Field
+
+from openhands.agent_server.models import AskAgentRequest, AskAgentResponse
+from openhands.app_server.event_callback.event_callback_models import (
+ EventCallback,
+ EventCallbackProcessor,
+)
+from openhands.app_server.event_callback.event_callback_result_models import (
+ EventCallbackResult,
+ EventCallbackResultStatus,
+)
+from openhands.app_server.event_callback.util import (
+ ensure_conversation_found,
+ ensure_running_sandbox,
+ get_agent_server_url_from_sandbox,
+ get_conversation_url,
+ get_prompt_template,
+)
+from openhands.sdk import Event
+from openhands.sdk.event import ConversationStateUpdateEvent
+
+_logger = logging.getLogger(__name__)
+
+
+class GithubV1CallbackProcessor(EventCallbackProcessor):
+ """Callback processor for GitHub V1 integrations."""
+
+ github_view_data: dict[str, Any] = Field(default_factory=dict)
+ should_request_summary: bool = Field(default=True)
+ should_extract: bool = Field(default=True)
+ inline_pr_comment: bool = Field(default=False)
+
+ async def __call__(
+ self,
+ conversation_id: UUID,
+ callback: EventCallback,
+ event: Event,
+ ) -> EventCallbackResult | None:
+ """Process events for GitHub V1 integration."""
+
+ # Only handle ConversationStateUpdateEvent
+ if not isinstance(event, ConversationStateUpdateEvent):
+ return None
+
+ # Only act when execution has finished
+ if not (event.key == 'execution_status' and event.value == 'finished'):
+ return None
+
+ _logger.info('[GitHub V1] Callback agent state was %s', event)
+ _logger.info(
+ '[GitHub V1] Should request summary: %s', self.should_request_summary
+ )
+
+ if not self.should_request_summary:
+ return None
+
+ self.should_request_summary = False
+
+ try:
+ summary = await self._request_summary(conversation_id)
+ await self._post_summary_to_github(summary)
+
+ return EventCallbackResult(
+ status=EventCallbackResultStatus.SUCCESS,
+ event_callback_id=callback.id,
+ event_id=event.id,
+ conversation_id=conversation_id,
+ detail=summary,
+ )
+ except Exception as e:
+ _logger.exception('[GitHub V1] Error processing callback: %s', e)
+
+ # Only try to post error to GitHub if we have basic requirements
+ try:
+ # Check if we have installation ID and credentials before posting
+ if (
+ self.github_view_data.get('installation_id')
+ and os.getenv('GITHUB_APP_CLIENT_ID')
+ and os.getenv('GITHUB_APP_PRIVATE_KEY')
+ ):
+ await self._post_summary_to_github(
+ f'OpenHands encountered an error: **{str(e)}**.\n\n'
+ f'[See the conversation]({get_conversation_url().format(conversation_id)})'
+ 'for more information.'
+ )
+ except Exception as post_error:
+ _logger.warning(
+ '[GitHub V1] Failed to post error message to GitHub: %s', post_error
+ )
+
+ return EventCallbackResult(
+ status=EventCallbackResultStatus.ERROR,
+ event_callback_id=callback.id,
+ event_id=event.id,
+ conversation_id=conversation_id,
+ detail=str(e),
+ )
+
+ # -------------------------------------------------------------------------
+ # GitHub helpers
+ # -------------------------------------------------------------------------
+
+ def _get_installation_access_token(self) -> str:
+ installation_id = self.github_view_data.get('installation_id')
+
+ if not installation_id:
+ raise ValueError(
+ f'Missing installation ID for GitHub payload: {self.github_view_data}'
+ )
+
+ github_app_client_id = os.getenv('GITHUB_APP_CLIENT_ID', '').strip()
+ github_app_private_key = os.getenv('GITHUB_APP_PRIVATE_KEY', '').replace(
+ '\\n', '\n'
+ )
+
+ if not github_app_client_id or not github_app_private_key:
+ raise ValueError('GitHub App credentials are not configured')
+
+ github_integration = GithubIntegration(
+ github_app_client_id,
+ github_app_private_key,
+ )
+ token_data = github_integration.get_access_token(installation_id)
+ return token_data.token
+
+ async def _post_summary_to_github(self, summary: str) -> None:
+ """Post a summary comment to the configured GitHub issue."""
+ installation_token = self._get_installation_access_token()
+
+ if not installation_token:
+ raise RuntimeError('Missing GitHub credentials')
+
+ full_repo_name = self.github_view_data['full_repo_name']
+ issue_number = self.github_view_data['issue_number']
+
+ if self.inline_pr_comment:
+ with Github(installation_token) as github_client:
+ repo = github_client.get_repo(full_repo_name)
+ pr = repo.get_pull(issue_number)
+ pr.create_review_comment_reply(
+ comment_id=self.github_view_data.get('comment_id', ''), body=summary
+ )
+ return
+
+ with Github(installation_token) as github_client:
+ repo = github_client.get_repo(full_repo_name)
+ issue = repo.get_issue(number=issue_number)
+ issue.create_comment(summary)
+
+ # -------------------------------------------------------------------------
+ # Agent / sandbox helpers
+ # -------------------------------------------------------------------------
+
+ async def _ask_question(
+ self,
+ httpx_client: httpx.AsyncClient,
+ agent_server_url: str,
+ conversation_id: UUID,
+ session_api_key: str,
+ message_content: str,
+ ) -> str:
+ """Send a message to the agent server via the V1 API and return response text."""
+ send_message_request = AskAgentRequest(question=message_content)
+
+ url = (
+ f'{agent_server_url.rstrip("/")}'
+ f'/api/conversations/{conversation_id}/ask_agent'
+ )
+ headers = {'X-Session-API-Key': session_api_key}
+ payload = send_message_request.model_dump()
+
+ try:
+ response = await httpx_client.post(
+ url,
+ json=payload,
+ headers=headers,
+ timeout=30.0,
+ )
+ response.raise_for_status()
+
+ agent_response = AskAgentResponse.model_validate(response.json())
+ return agent_response.response
+
+ except httpx.HTTPStatusError as e:
+ error_detail = f'HTTP {e.response.status_code} error'
+ try:
+ error_body = e.response.text
+ if error_body:
+ error_detail += f': {error_body}'
+ except Exception: # noqa: BLE001
+ pass
+
+ _logger.error(
+ '[GitHub V1] HTTP error sending message to %s: %s. '
+ 'Request payload: %s. Response headers: %s',
+ url,
+ error_detail,
+ payload,
+ dict(e.response.headers),
+ exc_info=True,
+ )
+ raise Exception(f'Failed to send message to agent server: {error_detail}')
+
+ except httpx.TimeoutException:
+ error_detail = f'Request timeout after 30 seconds to {url}'
+ _logger.error(
+ '[GitHub V1] %s. Request payload: %s',
+ error_detail,
+ payload,
+ exc_info=True,
+ )
+ raise Exception(error_detail)
+
+ except httpx.RequestError as e:
+ error_detail = f'Request error to {url}: {str(e)}'
+ _logger.error(
+ '[GitHub V1] %s. Request payload: %s',
+ error_detail,
+ payload,
+ exc_info=True,
+ )
+ raise Exception(error_detail)
+
+ # -------------------------------------------------------------------------
+ # Summary orchestration
+ # -------------------------------------------------------------------------
+
+ async def _request_summary(self, conversation_id: UUID) -> str:
+ """
+ Ask the agent to produce a summary of its work and return the agent response.
+
+ NOTE: This method now returns a string (the agent server's response text)
+ and raises exceptions on errors. The wrapping into EventCallbackResult
+ is handled by __call__.
+ """
+ # Import services within the method to avoid circular imports
+ from openhands.app_server.config import (
+ get_app_conversation_info_service,
+ get_httpx_client,
+ get_sandbox_service,
+ )
+ from openhands.app_server.services.injector import InjectorState
+ from openhands.app_server.user.specifiy_user_context import (
+ ADMIN,
+ USER_CONTEXT_ATTR,
+ )
+
+ # Create injector state for dependency injection
+ state = InjectorState()
+ setattr(state, USER_CONTEXT_ATTR, ADMIN)
+
+ async with (
+ get_app_conversation_info_service(state) as app_conversation_info_service,
+ get_sandbox_service(state) as sandbox_service,
+ get_httpx_client(state) as httpx_client,
+ ):
+ # 1. Conversation lookup
+ app_conversation_info = ensure_conversation_found(
+ await app_conversation_info_service.get_app_conversation_info(
+ conversation_id
+ ),
+ conversation_id,
+ )
+
+ # 2. Sandbox lookup + validation
+ sandbox = ensure_running_sandbox(
+ await sandbox_service.get_sandbox(app_conversation_info.sandbox_id),
+ app_conversation_info.sandbox_id,
+ )
+
+ assert sandbox.session_api_key is not None, (
+ f'No session API key for sandbox: {sandbox.id}'
+ )
+
+ # 3. URL + instruction
+ agent_server_url = get_agent_server_url_from_sandbox(sandbox)
+ agent_server_url = get_agent_server_url_from_sandbox(sandbox)
+
+ # Prepare message based on agent state
+ message_content = get_prompt_template('summary_prompt.j2')
+
+ # Ask the agent and return the response text
+ return await self._ask_question(
+ httpx_client=httpx_client,
+ agent_server_url=agent_server_url,
+ conversation_id=conversation_id,
+ session_api_key=sandbox.session_api_key,
+ message_content=message_content,
+ )
diff --git a/openhands/app_server/event_callback/set_title_callback_processor.py b/openhands/app_server/event_callback/set_title_callback_processor.py
index 92373dbff0cd..071d28b5f973 100644
--- a/openhands/app_server/event_callback/set_title_callback_processor.py
+++ b/openhands/app_server/event_callback/set_title_callback_processor.py
@@ -15,6 +15,9 @@
)
from openhands.app_server.services.injector import InjectorState
from openhands.app_server.user.specifiy_user_context import ADMIN, USER_CONTEXT_ATTR
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
from openhands.sdk import Event, MessageEvent
_logger = logging.getLogger(__name__)
@@ -53,8 +56,13 @@ async def __call__(
conversation_id
)
assert app_conversation is not None
+ app_conversation_url = app_conversation.conversation_url
+ assert app_conversation_url is not None
+ app_conversation_url = replace_localhost_hostname_for_docker(
+ app_conversation_url
+ )
response = await httpx_client.post(
- f'{app_conversation.conversation_url}/generate_title',
+ f'{app_conversation_url}/generate_title',
headers={
'X-Session-API-Key': app_conversation.session_api_key,
},
diff --git a/openhands/app_server/event_callback/sql_event_callback_service.py b/openhands/app_server/event_callback/sql_event_callback_service.py
index 37e5bce111d2..c45416c37c78 100644
--- a/openhands/app_server/event_callback/sql_event_callback_service.py
+++ b/openhands/app_server/event_callback/sql_event_callback_service.py
@@ -6,7 +6,6 @@
import asyncio
import logging
from dataclasses import dataclass
-from datetime import datetime
from typing import AsyncGenerator
from uuid import UUID
@@ -15,6 +14,7 @@
from sqlalchemy import Column, Enum, String, and_, func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
+from openhands.agent_server.utils import utc_now
from openhands.app_server.event_callback.event_callback_models import (
CreateEventCallbackRequest,
EventCallback,
@@ -177,7 +177,7 @@ async def search_event_callbacks(
return EventCallbackPage(items=callbacks, next_page_id=next_page_id)
async def save_event_callback(self, event_callback: EventCallback) -> EventCallback:
- event_callback.updated_at = datetime.now()
+ event_callback.updated_at = utc_now()
stored_callback = StoredEventCallback(**event_callback.model_dump())
await self.db_session.merge(stored_callback)
return event_callback
@@ -209,6 +209,10 @@ async def execute_callbacks(self, conversation_id: UUID, event: Event) -> None:
for callback in callbacks
]
)
+
+ # Persist any new changes callbacks may have made to itself
+ for callback in callbacks:
+ await self.save_event_callback(callback)
await self.db_session.commit()
async def execute_callback(
diff --git a/openhands/app_server/event_callback/util.py b/openhands/app_server/event_callback/util.py
new file mode 100644
index 000000000000..1c9e56893545
--- /dev/null
+++ b/openhands/app_server/event_callback/util.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING
+from uuid import UUID
+
+from openhands.app_server.sandbox.sandbox_models import (
+ AGENT_SERVER,
+ SandboxInfo,
+ SandboxStatus,
+)
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
+
+if TYPE_CHECKING:
+ from openhands.app_server.app_conversation.app_conversation_models import (
+ AppConversationInfo,
+ )
+
+
+def get_conversation_url() -> str:
+ from openhands.app_server.config import get_global_config
+
+ web_url = get_global_config().web_url
+ conversation_prefix = 'conversations/{}'
+ conversation_url = f'{web_url}/{conversation_prefix}'
+ return conversation_url
+
+
+def ensure_conversation_found(
+ app_conversation_info: AppConversationInfo | None, conversation_id: UUID
+) -> AppConversationInfo:
+ """Ensure conversation info exists, otherwise raise a clear error."""
+ if not app_conversation_info:
+ raise RuntimeError(f'Conversation not found: {conversation_id}')
+ return app_conversation_info
+
+
+def ensure_running_sandbox(sandbox: SandboxInfo | None, sandbox_id: str) -> SandboxInfo:
+ """Ensure sandbox exists, is running, and has a session API key."""
+ if not sandbox:
+ raise RuntimeError(f'Sandbox not found: {sandbox_id}')
+
+ if sandbox.status != SandboxStatus.RUNNING:
+ raise RuntimeError(f'Sandbox not running: {sandbox_id}')
+
+ if not sandbox.session_api_key:
+ raise RuntimeError(f'No session API key for sandbox: {sandbox.id}')
+
+ return sandbox
+
+
+def get_agent_server_url_from_sandbox(sandbox: SandboxInfo) -> str:
+ """Return the agent server URL from sandbox exposed URLs."""
+ exposed_urls = sandbox.exposed_urls
+ if not exposed_urls:
+ raise RuntimeError(f'No exposed URLs configured for sandbox {sandbox.id!r}')
+
+ try:
+ agent_server_url = next(
+ exposed_url.url
+ for exposed_url in exposed_urls
+ if exposed_url.name == AGENT_SERVER
+ )
+ except StopIteration:
+ raise RuntimeError(
+ f'No {AGENT_SERVER!r} URL found for sandbox {sandbox.id!r}'
+ ) from None
+
+ return replace_localhost_hostname_for_docker(agent_server_url)
+
+
+def get_prompt_template(template_name: str) -> str:
+ from jinja2 import Environment, FileSystemLoader
+
+ jinja_env = Environment(
+ loader=FileSystemLoader('openhands/integrations/templates/resolver/')
+ )
+ summary_instruction_template = jinja_env.get_template(template_name)
+ summary_instruction = summary_instruction_template.render()
+ return summary_instruction
diff --git a/openhands/app_server/event_callback/webhook_router.py b/openhands/app_server/event_callback/webhook_router.py
index 498ebd2fd26c..ac9812764d0d 100644
--- a/openhands/app_server/event_callback/webhook_router.py
+++ b/openhands/app_server/event_callback/webhook_router.py
@@ -6,9 +6,10 @@
import pkgutil
from uuid import UUID
-from fastapi import APIRouter, Depends, HTTPException, status
+from fastapi import APIRouter, Depends, HTTPException, Response, status
from fastapi.security import APIKeyHeader
from jwt import InvalidTokenError
+from pydantic import SecretStr
from openhands import tools # type: ignore[attr-defined]
from openhands.agent_server.models import ConversationInfo, Success
@@ -33,6 +34,7 @@
from openhands.app_server.sandbox.sandbox_service import SandboxService
from openhands.app_server.services.injector import InjectorState
from openhands.app_server.services.jwt_service import JwtService
+from openhands.app_server.user.auth_user_context import AuthUserContext
from openhands.app_server.user.specifiy_user_context import (
USER_CONTEXT_ATTR,
SpecifyUserContext,
@@ -41,6 +43,11 @@
from openhands.app_server.user.user_context import UserContext
from openhands.integrations.provider import ProviderType
from openhands.sdk import Event
+from openhands.sdk.event import ConversationStateUpdateEvent
+from openhands.server.user_auth.default_user_auth import DefaultUserAuth
+from openhands.server.user_auth.user_auth import (
+ get_for_user as get_user_auth_for_user,
+)
router = APIRouter(prefix='/webhooks', tags=['Webhooks'])
sandbox_service_dependency = depends_sandbox_service()
@@ -138,6 +145,13 @@ async def on_event(
*[event_service.save_event(conversation_id, event) for event in events]
)
+ # Process stats events for V1 conversations
+ for event in events:
+ if isinstance(event, ConversationStateUpdateEvent) and event.key == 'stats':
+ await app_conversation_info_service.process_stats_event(
+ event, conversation_id
+ )
+
asyncio.create_task(
_run_callbacks_in_bg_and_close(
conversation_id, app_conversation_info.created_by_user_id, events
@@ -154,23 +168,34 @@ async def on_event(
async def get_secret(
access_token: str = Depends(APIKeyHeader(name='X-Access-Token', auto_error=False)),
jwt_service: JwtService = jwt_dependency,
-) -> str:
+) -> Response:
"""Given an access token, retrieve a user secret. The access token
is limited by user and provider type, and may include a timeout, limiting
the damage in the event that a token is ever leaked"""
try:
payload = jwt_service.verify_jws_token(access_token)
user_id = payload['user_id']
- provider_type = ProviderType[payload['provider_type']]
- user_injector = config.user
- assert user_injector is not None
- user_context = await user_injector.get_for_user(user_id)
- secret = None
- if user_context:
- secret = await user_context.get_latest_token(provider_type)
+ provider_type = ProviderType(payload['provider_type'])
+
+ # Get UserAuth for the user_id
+ if user_id:
+ user_auth = await get_user_auth_for_user(user_id)
+ else:
+ # OSS mode - use default user auth
+ user_auth = DefaultUserAuth()
+
+ # Create UserContext directly
+ user_context = AuthUserContext(user_auth=user_auth)
+
+ secret = await user_context.get_latest_token(provider_type)
if secret is None:
raise HTTPException(404, 'No such provider')
- return secret
+ if isinstance(secret, SecretStr):
+ secret_value = secret.get_secret_value()
+ else:
+ secret_value = secret
+
+ return Response(content=secret_value, media_type='text/plain')
except InvalidTokenError:
raise HTTPException(status.HTTP_401_UNAUTHORIZED)
diff --git a/openhands/app_server/sandbox/docker_sandbox_service.py b/openhands/app_server/sandbox/docker_sandbox_service.py
index 0f6ea51916bf..d7fe0b726d15 100644
--- a/openhands/app_server/sandbox/docker_sandbox_service.py
+++ b/openhands/app_server/sandbox/docker_sandbox_service.py
@@ -32,6 +32,9 @@
)
from openhands.app_server.sandbox.sandbox_spec_service import SandboxSpecService
from openhands.app_server.services.injector import InjectorState
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
_logger = logging.getLogger(__name__)
SESSION_API_KEY_VARIABLE = 'OH_SESSION_API_KEYS_0'
@@ -159,6 +162,7 @@ async def _container_to_sandbox_info(self, container) -> SandboxInfo | None:
ExposedUrl(
name=exposed_port.name,
url=url,
+ port=host_port,
)
)
@@ -185,6 +189,9 @@ async def _container_to_checked_sandbox_info(self, container) -> SandboxInfo | N
if exposed_url.name == AGENT_SERVER
)
try:
+ # When running in Docker, replace localhost hostname with host.docker.internal for internal requests
+ app_server_url = replace_localhost_hostname_for_docker(app_server_url)
+
response = await self.httpx_client.get(
f'{app_server_url}{self.health_check_path}'
)
@@ -192,7 +199,7 @@ async def _container_to_checked_sandbox_info(self, container) -> SandboxInfo | N
except asyncio.CancelledError:
raise
except Exception as exc:
- _logger.info(f'Sandbox server not running: {exc}')
+ _logger.info(f'Sandbox server not running: {app_server_url} : {exc}')
sandbox_info.status = SandboxStatus.ERROR
sandbox_info.exposed_urls = None
sandbox_info.session_api_key = None
@@ -210,7 +217,9 @@ async def search_sandboxes(
sandboxes = []
for container in all_containers:
- if container.name.startswith(self.container_name_prefix):
+ if container.name and container.name.startswith(
+ self.container_name_prefix
+ ):
sandbox_info = await self._container_to_checked_sandbox_info(
container
)
diff --git a/openhands/app_server/sandbox/docker_sandbox_spec_service.py b/openhands/app_server/sandbox/docker_sandbox_spec_service.py
index cd42cbfe702a..b7a9553e7e5e 100644
--- a/openhands/app_server/sandbox/docker_sandbox_spec_service.py
+++ b/openhands/app_server/sandbox/docker_sandbox_spec_service.py
@@ -42,6 +42,8 @@ def get_default_sandbox_specs():
'LOG_JSON': 'true',
'OH_CONVERSATIONS_PATH': '/workspace/conversations',
'OH_BASH_EVENTS_DIR': '/workspace/bash_events',
+ 'PYTHONUNBUFFERED': '1',
+ 'ENV_LOG_LEVEL': '20',
},
working_dir='/workspace/project',
)
@@ -81,10 +83,49 @@ async def pull_spec_if_missing(self, spec: SandboxSpecInfo):
try:
docker_client.images.get(spec.id)
except docker.errors.ImageNotFound:
- _logger.info(f'⬇️ Pulling Docker Image: {spec.id}')
- # Pull in a background thread to prevent locking up the main runloop
- loop = asyncio.get_running_loop()
- await loop.run_in_executor(None, docker_client.images.pull, spec.id)
- _logger.info(f'⬇️ Finished Pulling Docker Image: {spec.id}')
+ _logger.info(f'⬇️ Pulling Docker Image: {spec.id}')
+ await self._pull_with_progress_logging(docker_client, spec.id)
+ _logger.info(f'⬇️ Finished Pulling Docker Image: {spec.id}')
except docker.errors.APIError as exc:
raise SandboxError(f'Error Getting Docker Image: {spec.id}') from exc
+
+ async def _pull_with_progress_logging(
+ self, docker_client: docker.DockerClient, image_id: str
+ ):
+ """Pull Docker image with periodic progress logging every 5 seconds."""
+ # Event to signal when pull is complete
+ pull_complete = asyncio.Event()
+
+ async def periodic_logger():
+ """Log progress message every 5 seconds until pull is complete."""
+ while not pull_complete.is_set():
+ try:
+ await asyncio.wait_for(pull_complete.wait(), timeout=5.0)
+ break # Pull completed
+ except asyncio.TimeoutError:
+ # 5 seconds elapsed, log progress message
+ _logger.info(f'🔄 Downloading Docker Image: {image_id}...')
+
+ async def pull_image():
+ """Perform the actual Docker image pull."""
+ try:
+ loop = asyncio.get_running_loop()
+ await loop.run_in_executor(None, docker_client.images.pull, image_id)
+ finally:
+ pull_complete.set()
+
+ # Run both tasks concurrently
+ logger_task = asyncio.create_task(periodic_logger())
+ pull_task = asyncio.create_task(pull_image())
+
+ try:
+ # Wait for pull to complete
+ await pull_task
+ finally:
+ # Ensure logger task is cancelled if still running
+ if not logger_task.done():
+ logger_task.cancel()
+ try:
+ await logger_task
+ except asyncio.CancelledError:
+ pass
diff --git a/openhands/app_server/sandbox/process_sandbox_service.py b/openhands/app_server/sandbox/process_sandbox_service.py
index 955f6368bccd..716c2e1b1916 100644
--- a/openhands/app_server/sandbox/process_sandbox_service.py
+++ b/openhands/app_server/sandbox/process_sandbox_service.py
@@ -37,6 +37,9 @@
from openhands.app_server.sandbox.sandbox_spec_models import SandboxSpecInfo
from openhands.app_server.sandbox.sandbox_spec_service import SandboxSpecService
from openhands.app_server.services.injector import InjectorState
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
_logger = logging.getLogger(__name__)
@@ -158,9 +161,10 @@ async def _wait_for_server_ready(self, port: int, timeout: int = 30) -> bool:
start_time = time.time()
while time.time() - start_time < timeout:
try:
- response = await self.httpx_client.get(
- f'http://localhost:{port}/alive', timeout=5.0
+ url = replace_localhost_hostname_for_docker(
+ f'http://localhost:{port}/alive'
)
+ response = await self.httpx_client.get(url, timeout=5.0)
if response.status_code == 200:
data = response.json()
if data.get('status') == 'ok':
@@ -199,15 +203,16 @@ async def _process_to_sandbox_info(
if status == SandboxStatus.RUNNING:
# Check if server is actually responding
try:
- response = await self.httpx_client.get(
- f'http://localhost:{process_info.port}{self.health_check_path}',
- timeout=5.0,
+ url = replace_localhost_hostname_for_docker(
+ f'http://localhost:{process_info.port}{self.health_check_path}'
)
+ response = await self.httpx_client.get(url, timeout=5.0)
if response.status_code == 200:
exposed_urls = [
ExposedUrl(
name=AGENT_SERVER,
url=f'http://localhost:{process_info.port}',
+ port=process_info.port,
),
]
session_api_key = process_info.session_api_key
diff --git a/openhands/app_server/sandbox/remote_sandbox_service.py b/openhands/app_server/sandbox/remote_sandbox_service.py
index c7d444c4ec7c..dfa029462e41 100644
--- a/openhands/app_server/sandbox/remote_sandbox_service.py
+++ b/openhands/app_server/sandbox/remote_sandbox_service.py
@@ -64,6 +64,10 @@
'starting': SandboxStatus.STARTING,
'error': SandboxStatus.ERROR,
}
+AGENT_SERVER_PORT = 60000
+VSCODE_PORT = 60001
+WORKER_1_PORT = 12000
+WORKER_2_PORT = 12001
class StoredRemoteSandbox(Base): # type: ignore
@@ -138,17 +142,29 @@ async def _to_sandbox_info(
exposed_urls = []
url = runtime.get('url', None)
if url:
- exposed_urls.append(ExposedUrl(name=AGENT_SERVER, url=url))
+ exposed_urls.append(
+ ExposedUrl(name=AGENT_SERVER, url=url, port=AGENT_SERVER_PORT)
+ )
vscode_url = (
_build_service_url(url, 'vscode')
+ f'/?tkn={session_api_key}&folder=%2Fworkspace%2Fproject'
)
- exposed_urls.append(ExposedUrl(name=VSCODE, url=vscode_url))
exposed_urls.append(
- ExposedUrl(name=WORKER_1, url=_build_service_url(url, 'work-1'))
+ ExposedUrl(name=VSCODE, url=vscode_url, port=VSCODE_PORT)
+ )
+ exposed_urls.append(
+ ExposedUrl(
+ name=WORKER_1,
+ url=_build_service_url(url, 'work-1'),
+ port=WORKER_1_PORT,
+ )
)
exposed_urls.append(
- ExposedUrl(name=WORKER_2, url=_build_service_url(url, 'work-2'))
+ ExposedUrl(
+ name=WORKER_2,
+ url=_build_service_url(url, 'work-2'),
+ port=WORKER_2_PORT,
+ )
)
else:
exposed_urls = None
@@ -318,7 +334,6 @@ async def start_sandbox(self, sandbox_spec_id: str | None = None) -> SandboxInfo
created_at=utc_now(),
)
self.db_session.add(stored_sandbox)
- await self.db_session.commit()
# Prepare environment variables
environment = await self._init_environment(sandbox_spec, sandbox_id)
@@ -407,7 +422,6 @@ async def delete_sandbox(self, sandbox_id: str) -> bool:
if not stored_sandbox:
return False
await self.db_session.delete(stored_sandbox)
- await self.db_session.commit()
runtime_data = await self._get_runtime(sandbox_id)
response = await self._send_runtime_api_request(
'POST',
diff --git a/openhands/app_server/sandbox/sandbox_models.py b/openhands/app_server/sandbox/sandbox_models.py
index a2157bac4157..948e5c17c24b 100644
--- a/openhands/app_server/sandbox/sandbox_models.py
+++ b/openhands/app_server/sandbox/sandbox_models.py
@@ -20,6 +20,7 @@ class ExposedUrl(BaseModel):
name: str
url: str
+ port: int
# Standard names
diff --git a/openhands/app_server/sandbox/sandbox_spec_service.py b/openhands/app_server/sandbox/sandbox_spec_service.py
index fd091ca130bd..3f1ff6b4d165 100644
--- a/openhands/app_server/sandbox/sandbox_spec_service.py
+++ b/openhands/app_server/sandbox/sandbox_spec_service.py
@@ -11,7 +11,7 @@
# The version of the agent server to use for deployments.
# Typically this will be the same as the values from the pyproject.toml
-AGENT_SERVER_IMAGE = 'ghcr.io/openhands/agent-server:f3c0c19-python'
+AGENT_SERVER_IMAGE = 'ghcr.io/openhands/agent-server:37c4b35-python'
class SandboxSpecService(ABC):
diff --git a/openhands/app_server/user/auth_user_context.py b/openhands/app_server/user/auth_user_context.py
index 53612364f5a3..8ea95036f498 100644
--- a/openhands/app_server/user/auth_user_context.py
+++ b/openhands/app_server/user/auth_user_context.py
@@ -9,7 +9,11 @@
from openhands.app_server.user.specifiy_user_context import USER_CONTEXT_ATTR
from openhands.app_server.user.user_context import UserContext, UserContextInjector
from openhands.app_server.user.user_models import UserInfo
-from openhands.integrations.provider import ProviderHandler, ProviderType
+from openhands.integrations.provider import (
+ PROVIDER_TOKEN_TYPE,
+ ProviderHandler,
+ ProviderType,
+)
from openhands.sdk.conversation.secret_source import SecretSource, StaticSecret
from openhands.server.user_auth.user_auth import UserAuth, get_user_auth
@@ -44,6 +48,9 @@ async def get_user_info(self) -> UserInfo:
self._user_info = user_info
return user_info
+ async def get_provider_tokens(self) -> PROVIDER_TOKEN_TYPE | None:
+ return await self.user_auth.get_provider_tokens()
+
async def get_provider_handler(self):
provider_handler = self._provider_handler
if not provider_handler:
@@ -78,6 +85,10 @@ async def get_secrets(self) -> dict[str, SecretSource]:
return results
+ async def get_mcp_api_key(self) -> str | None:
+ mcp_api_key = await self.user_auth.get_mcp_api_key()
+ return mcp_api_key
+
USER_ID_ATTR = 'user_id'
diff --git a/openhands/app_server/user/specifiy_user_context.py b/openhands/app_server/user/specifiy_user_context.py
index 0855b447bf69..87e2d74da20b 100644
--- a/openhands/app_server/user/specifiy_user_context.py
+++ b/openhands/app_server/user/specifiy_user_context.py
@@ -5,7 +5,7 @@
from openhands.app_server.errors import OpenHandsError
from openhands.app_server.user.user_context import UserContext
from openhands.app_server.user.user_models import UserInfo
-from openhands.integrations.provider import ProviderType
+from openhands.integrations.provider import PROVIDER_TOKEN_TYPE, ProviderType
from openhands.sdk.conversation.secret_source import SecretSource
@@ -24,12 +24,18 @@ async def get_user_info(self) -> UserInfo:
async def get_authenticated_git_url(self, repository: str) -> str:
raise NotImplementedError()
+ async def get_provider_tokens(self) -> PROVIDER_TOKEN_TYPE | None:
+ raise NotImplementedError()
+
async def get_latest_token(self, provider_type: ProviderType) -> str | None:
raise NotImplementedError()
async def get_secrets(self) -> dict[str, SecretSource]:
raise NotImplementedError()
+ async def get_mcp_api_key(self) -> str | None:
+ raise NotImplementedError()
+
USER_CONTEXT_ATTR = 'user_context'
ADMIN = SpecifyUserContext(user_id=None)
diff --git a/openhands/app_server/user/user_context.py b/openhands/app_server/user/user_context.py
index 75fe957160f7..02c0ba8aaf47 100644
--- a/openhands/app_server/user/user_context.py
+++ b/openhands/app_server/user/user_context.py
@@ -4,7 +4,7 @@
from openhands.app_server.user.user_models import (
UserInfo,
)
-from openhands.integrations.provider import ProviderType
+from openhands.integrations.provider import PROVIDER_TOKEN_TYPE, ProviderType
from openhands.sdk.conversation.secret_source import SecretSource
from openhands.sdk.utils.models import DiscriminatedUnionMixin
@@ -26,6 +26,10 @@ async def get_user_info(self) -> UserInfo:
async def get_authenticated_git_url(self, repository: str) -> str:
"""Get the provider tokens for the user"""
+ @abstractmethod
+ async def get_provider_tokens(self) -> PROVIDER_TOKEN_TYPE | None:
+ """Get the latest tokens for all provider types"""
+
@abstractmethod
async def get_latest_token(self, provider_type: ProviderType) -> str | None:
"""Get the latest token for the provider type given"""
@@ -34,6 +38,10 @@ async def get_latest_token(self, provider_type: ProviderType) -> str | None:
async def get_secrets(self) -> dict[str, SecretSource]:
"""Get custom secrets and github provider secrets for the conversation."""
+ @abstractmethod
+ async def get_mcp_api_key(self) -> str | None:
+ """Get an MCP API Key."""
+
class UserContextInjector(DiscriminatedUnionMixin, Injector[UserContext], ABC):
"""Injector for user contexts."""
diff --git a/openhands/app_server/utils/docker_utils.py b/openhands/app_server/utils/docker_utils.py
new file mode 100644
index 000000000000..03821c3974c3
--- /dev/null
+++ b/openhands/app_server/utils/docker_utils.py
@@ -0,0 +1,32 @@
+from urllib.parse import urlparse, urlunparse
+
+from openhands.utils.environment import is_running_in_docker
+
+
+def replace_localhost_hostname_for_docker(
+ url: str, replacement: str = 'host.docker.internal'
+) -> str:
+ """Replace localhost hostname in URL with the specified replacement when running in Docker.
+
+ This function only performs the replacement when the code is running inside a Docker
+ container. When not running in Docker, it returns the original URL unchanged.
+
+ Only replaces the hostname if it's exactly 'localhost', preserving all other
+ parts of the URL including port, path, query parameters, etc.
+
+ Args:
+ url: The URL to process
+ replacement: The hostname to replace localhost with (default: 'host.docker.internal')
+
+ Returns:
+ URL with localhost hostname replaced if running in Docker and hostname is localhost,
+ otherwise returns the original URL unchanged
+ """
+ if not is_running_in_docker():
+ return url
+ parsed = urlparse(url)
+ if parsed.hostname == 'localhost':
+ # Replace only the hostname part, preserving port and everything else
+ netloc = parsed.netloc.replace('localhost', replacement, 1)
+ return urlunparse(parsed._replace(netloc=netloc))
+ return url
diff --git a/openhands/app_server/utils/encryption_key.py b/openhands/app_server/utils/encryption_key.py
index 5815bce20e58..62224e1da166 100644
--- a/openhands/app_server/utils/encryption_key.py
+++ b/openhands/app_server/utils/encryption_key.py
@@ -1,3 +1,4 @@
+import hashlib
import os
from datetime import datetime
from pathlib import Path
@@ -30,8 +31,14 @@ def get_default_encryption_keys(workspace_dir: Path) -> list[EncryptionKey]:
"""Generate default encryption keys."""
master_key = os.getenv('JWT_SECRET')
if master_key:
+ # Derive a deterministic key ID from the secret itself.
+ # This ensures all pods using the same JWT_SECRET get the same key ID,
+ # which is critical for multi-pod deployments where tokens may be
+ # created by one pod and verified by another.
+ key_id = base62.encodebytes(hashlib.sha256(master_key.encode()).digest())
return [
EncryptionKey(
+ id=key_id,
key=SecretStr(master_key),
active=True,
notes='jwt secret master key',
diff --git a/openhands/app_server/utils/import_utils.py b/openhands/app_server/utils/import_utils.py
index 930db99e7ffe..325416d30901 100644
--- a/openhands/app_server/utils/import_utils.py
+++ b/openhands/app_server/utils/import_utils.py
@@ -71,7 +71,7 @@ def get_impl(cls: type[T], impl_name: str | None) -> type[T]:
Common Use Cases:
- Server components (ConversationService, UserAuth, etc.)
- Storage implementations (ConversationStore, SettingsStore, etc.)
- - Service integrations (GitHub, GitLab, Bitbucket services)
+ - Service integrations (GitHub, GitLab, Bitbucket, Azure DevOps services)
The implementation is cached to avoid repeated imports of the same class.
"""
diff --git a/openhands/controller/agent_controller.py b/openhands/controller/agent_controller.py
index e9616c66b5fa..958e5cb34837 100644
--- a/openhands/controller/agent_controller.py
+++ b/openhands/controller/agent_controller.py
@@ -877,7 +877,7 @@ async def _step(self) -> None:
# Synchronize spend across all llm services with the budget flag
self.state_tracker.sync_budget_flag_with_metrics()
- if self._is_stuck():
+ if self.agent.config.enable_stuck_detection and self._is_stuck():
await self._react_to_exception(
AgentStuckInLoopError('Agent got stuck in a loop')
)
diff --git a/openhands/core/config/README.md b/openhands/core/config/README.md
index c612a0824403..b16fac1fb5a7 100644
--- a/openhands/core/config/README.md
+++ b/openhands/core/config/README.md
@@ -32,6 +32,7 @@ The `load_from_env` function in the config package is responsible for loading co
export LLM_API_KEY='your_api_key_here'
export LLM_MODEL='gpt-4'
export AGENT_MEMORY_ENABLED='true'
+export AGENT_ENABLE_STUCK_DETECTION='false' # Disable loop detection
export SANDBOX_TIMEOUT='300'
```
diff --git a/openhands/core/config/agent_config.py b/openhands/core/config/agent_config.py
index 3c506c9382d2..b9b5873e9e29 100644
--- a/openhands/core/config/agent_config.py
+++ b/openhands/core/config/agent_config.py
@@ -51,6 +51,8 @@ class AgentConfig(BaseModel):
"""Whether to enable SoM (Set of Marks) visual browsing."""
enable_plan_mode: bool = Field(default=True)
"""Whether to enable plan mode, which uses the long horizon system message and add the new tool - task_tracker - for planning, tracking and executing complex tasks."""
+ enable_stuck_detection: bool = Field(default=True)
+ """Whether to enable stuck/loop detection. When disabled, the agent will not automatically detect and recover from loops."""
condenser: CondenserConfig = Field(
# The default condenser is set to the conversation window condenser -- if
# we use NoOp and the conversation hits the LLM context length limit,
diff --git a/openhands/integrations/azure_devops/azure_devops_service.py b/openhands/integrations/azure_devops/azure_devops_service.py
new file mode 100644
index 000000000000..8d719cbb5439
--- /dev/null
+++ b/openhands/integrations/azure_devops/azure_devops_service.py
@@ -0,0 +1,249 @@
+import os
+from typing import Any
+
+import httpx
+from pydantic import SecretStr
+
+from openhands.integrations.azure_devops.service.branches import (
+ AzureDevOpsBranchesMixin,
+)
+from openhands.integrations.azure_devops.service.features import (
+ AzureDevOpsFeaturesMixin,
+)
+from openhands.integrations.azure_devops.service.prs import AzureDevOpsPRsMixin
+from openhands.integrations.azure_devops.service.repos import AzureDevOpsReposMixin
+from openhands.integrations.azure_devops.service.resolver import (
+ AzureDevOpsResolverMixin,
+)
+from openhands.integrations.azure_devops.service.work_items import (
+ AzureDevOpsWorkItemsMixin,
+)
+from openhands.integrations.protocols.http_client import HTTPClient
+from openhands.integrations.service_types import (
+ BaseGitService,
+ GitService,
+ ProviderType,
+ RequestMethod,
+)
+from openhands.utils.import_utils import get_impl
+
+
+class AzureDevOpsServiceImpl(
+ AzureDevOpsResolverMixin,
+ AzureDevOpsReposMixin,
+ AzureDevOpsBranchesMixin,
+ AzureDevOpsPRsMixin,
+ AzureDevOpsWorkItemsMixin,
+ AzureDevOpsFeaturesMixin,
+ BaseGitService,
+ HTTPClient,
+ GitService,
+):
+ """Azure DevOps service implementation using modular mixins.
+
+ This class inherits functionality from specialized mixins:
+ - AzureDevOpsResolverMixin: PR/work item comment resolution
+ - AzureDevOpsReposMixin: Repository operations
+ - AzureDevOpsBranchesMixin: Branch operations
+ - AzureDevOpsPRsMixin: Pull request operations
+ - AzureDevOpsWorkItemsMixin: Work item operations (unique to Azure DevOps)
+ - AzureDevOpsFeaturesMixin: Microagents, suggested tasks, user info
+
+ This is an extension point in OpenHands that allows applications to customize Azure DevOps
+ integration behavior. Applications can substitute their own implementation by:
+ 1. Creating a class that inherits from GitService
+ 2. Implementing all required methods
+ 3. Setting OPENHANDS_AZURE_DEVOPS_SERVICE_CLS environment variable
+
+ The class is instantiated via get_impl() at module load time.
+ """
+
+ token: SecretStr = SecretStr('')
+ refresh = False
+ organization: str = ''
+
+ def __init__(
+ self,
+ user_id: str | None = None,
+ external_auth_id: str | None = None,
+ external_auth_token: SecretStr | None = None,
+ token: SecretStr | None = None,
+ external_token_manager: bool = False,
+ base_domain: str | None = None,
+ ):
+ self.user_id = user_id
+ self.external_token_manager = external_token_manager
+
+ if token:
+ self.token = token
+
+ if base_domain:
+ # Parse organization from base_domain
+ # Strip URL prefix if present (e.g., "https://dev.azure.com/org" -> "org")
+ domain_path = base_domain
+ if '://' in domain_path:
+ # Remove protocol and domain, keep only path
+ domain_path = domain_path.split('://', 1)[1]
+ if '/' in domain_path:
+ domain_path = domain_path.split('/', 1)[1]
+
+ # Format expected: organization (e.g., "contoso")
+ # Take first part only (in case user still enters org/project)
+ parts = domain_path.split('/')
+ if len(parts) >= 1:
+ self.organization = parts[0]
+
+ async def get_installations(self) -> list[str]:
+ """Get Azure DevOps organizations.
+
+ For Azure DevOps, 'installations' are equivalent to organizations.
+ Since authentication is per-organization, return the current organization.
+ """
+ return [self.organization]
+
+ @property
+ def provider(self) -> str:
+ return ProviderType.AZURE_DEVOPS.value
+
+ @property
+ def base_url(self) -> str:
+ """Get the base URL for Azure DevOps API calls."""
+ return f'https://dev.azure.com/{self.organization}'
+
+ @staticmethod
+ def _is_oauth_token(token: str) -> bool:
+ """Check if a token is an OAuth JWT token (from SSO) vs a PAT.
+
+ OAuth tokens from Azure AD/Entra ID are JWTs with the format:
+ header.payload.signature (three base64url-encoded parts separated by dots)
+
+ PATs are opaque tokens without this structure.
+
+ Args:
+ token: The token string to check
+
+ Returns:
+ True if the token appears to be a JWT (OAuth), False if it's a PAT
+ """
+ # JWTs have exactly 3 parts separated by dots
+ parts = token.split('.')
+ return len(parts) == 3 and all(len(part) > 0 for part in parts)
+
+ async def _get_azure_devops_headers(self) -> dict[str, Any]:
+ """Retrieve the Azure DevOps authentication headers.
+
+ Supports two authentication methods:
+ 1. OAuth 2.0 (Bearer token) - Used for SSO/SaaS mode with Keycloak/Azure AD
+ 2. Personal Access Token (Basic auth) - Used for self-hosted mode
+
+ The method automatically detects the token type:
+ - OAuth tokens are JWTs (header.payload.signature format) -> uses Bearer auth
+ - PATs are opaque strings -> uses Basic auth
+
+ Returns:
+ dict: HTTP headers with appropriate Authorization header
+ """
+ if not self.token:
+ latest_token = await self.get_latest_token()
+ if latest_token:
+ self.token = latest_token
+
+ token_value = self.token.get_secret_value()
+
+ # Detect token type and use appropriate authentication method
+ if self._is_oauth_token(token_value):
+ # OAuth 2.0 access token from SSO (Azure AD/Keycloak broker)
+ # Use Bearer authentication as per OAuth 2.0 spec
+ auth_header = f'Bearer {token_value}'
+ else:
+ # Personal Access Token (PAT) for self-hosted deployments
+ # Use Basic authentication with empty username and PAT as password
+ import base64
+
+ auth_str = base64.b64encode(f':{token_value}'.encode()).decode()
+ auth_header = f'Basic {auth_str}'
+
+ return {
+ 'Authorization': auth_header,
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ async def _get_headers(self) -> dict[str, Any]:
+ """Retrieve the Azure DevOps headers."""
+ return await self._get_azure_devops_headers()
+
+ def _has_token_expired(self, status_code: int) -> bool:
+ return status_code == 401
+
+ async def get_latest_token(self) -> SecretStr | None:
+ return self.token
+
+ async def _make_request(
+ self,
+ url: str,
+ params: dict | None = None,
+ method: RequestMethod = RequestMethod.GET,
+ ) -> tuple[Any, dict]:
+ try:
+ async with httpx.AsyncClient() as client:
+ azure_devops_headers = await self._get_azure_devops_headers()
+
+ # Make initial request
+ response = await self.execute_request(
+ client=client,
+ url=url,
+ headers=azure_devops_headers,
+ params=params,
+ method=method,
+ )
+
+ # Handle token refresh if needed
+ if self.refresh and self._has_token_expired(response.status_code):
+ await self.get_latest_token()
+ azure_devops_headers = await self._get_azure_devops_headers()
+ response = await self.execute_request(
+ client=client,
+ url=url,
+ headers=azure_devops_headers,
+ params=params,
+ method=method,
+ )
+
+ response.raise_for_status()
+ headers = {}
+ if 'Link' in response.headers:
+ headers['Link'] = response.headers['Link']
+
+ return response.json(), headers
+
+ except httpx.HTTPStatusError as e:
+ raise self.handle_http_status_error(e)
+ except httpx.HTTPError as e:
+ raise self.handle_http_error(e)
+
+ def _parse_repository(self, repository: str) -> tuple[str, str, str]:
+ """Parse repository string into organization, project, and repo name.
+
+ Args:
+ repository: Repository string in format organization/project/repo
+
+ Returns:
+ Tuple of (organization, project, repo_name)
+ """
+ parts = repository.split('/')
+ if len(parts) < 3:
+ raise ValueError(
+ f'Invalid repository format: {repository}. Expected format: organization/project/repo'
+ )
+ return parts[0], parts[1], parts[2]
+
+
+# Dynamic class loading to support custom implementations (e.g., SaaS)
+azure_devops_service_cls = os.environ.get(
+ 'OPENHANDS_AZURE_DEVOPS_SERVICE_CLS',
+ 'openhands.integrations.azure_devops.azure_devops_service.AzureDevOpsServiceImpl',
+)
+AzureDevOpsServiceImpl = get_impl( # type: ignore[misc]
+ AzureDevOpsServiceImpl, azure_devops_service_cls
+)
diff --git a/openhands/integrations/azure_devops/service/__init__.py b/openhands/integrations/azure_devops/service/__init__.py
new file mode 100644
index 000000000000..fe8e72aec9f6
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/__init__.py
@@ -0,0 +1 @@
+# Azure DevOps Service mixins
diff --git a/openhands/integrations/azure_devops/service/base.py b/openhands/integrations/azure_devops/service/base.py
new file mode 100644
index 000000000000..438308b2827b
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/base.py
@@ -0,0 +1,67 @@
+from abc import abstractmethod
+from typing import Any
+from urllib.parse import quote
+
+from pydantic import SecretStr
+
+from openhands.integrations.protocols.http_client import HTTPClient
+from openhands.integrations.service_types import (
+ BaseGitService,
+ RequestMethod,
+)
+
+
+class AzureDevOpsMixinBase(BaseGitService, HTTPClient):
+ """Declares common attributes and method signatures used across Azure DevOps mixins."""
+
+ organization: str
+
+ @property
+ @abstractmethod
+ def base_url(self) -> str:
+ """Get the base URL for Azure DevOps API calls."""
+ ...
+
+ async def _get_headers(self) -> dict:
+ """Retrieve the Azure DevOps token from settings store to construct the headers."""
+ if not self.token:
+ latest_token = await self.get_latest_token()
+ if latest_token:
+ self.token = latest_token
+
+ return {
+ 'Authorization': f'Bearer {self.token.get_secret_value() if self.token else ""}',
+ 'Content-Type': 'application/json',
+ }
+
+ async def get_latest_token(self) -> SecretStr | None: # type: ignore[override]
+ return self.token
+
+ async def _make_request(
+ self,
+ url: str,
+ params: dict | None = None,
+ method: RequestMethod = RequestMethod.GET,
+ ) -> tuple[Any, dict]: # type: ignore[override]
+ """Make HTTP request to Azure DevOps API."""
+ raise NotImplementedError('Implemented in AzureDevOpsServiceImpl')
+
+ def _parse_repository(self, repository: str) -> tuple[str, str, str]:
+ """Parse repository string into organization, project, and repo name."""
+ raise NotImplementedError('Implemented in AzureDevOpsServiceImpl')
+
+ def _truncate_comment(self, comment: str, max_length: int = 1000) -> str:
+ """Truncate comment to max length."""
+ raise NotImplementedError('Implemented in AzureDevOpsServiceImpl')
+
+ @staticmethod
+ def _encode_url_component(component: str) -> str:
+ """URL-encode a component for use in Azure DevOps API URLs.
+
+ Args:
+ component: The string component to encode (e.g., repo name, project name, org name)
+
+ Returns:
+ URL-encoded string with spaces and special characters properly encoded
+ """
+ return quote(component, safe='')
diff --git a/openhands/integrations/azure_devops/service/branches.py b/openhands/integrations/azure_devops/service/branches.py
new file mode 100644
index 000000000000..84fbe3f7f0f1
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/branches.py
@@ -0,0 +1,195 @@
+"""Branch operations for Azure DevOps integration."""
+
+from openhands.integrations.azure_devops.service.base import AzureDevOpsMixinBase
+from openhands.integrations.service_types import Branch, PaginatedBranchesResponse
+
+
+class AzureDevOpsBranchesMixin(AzureDevOpsMixinBase):
+ """Mixin for Azure DevOps branch operations."""
+
+ async def get_branches(self, repository: str) -> list[Branch]:
+ """Get branches for a repository."""
+ # Parse repository string: organization/project/repo
+ parts = repository.split('/')
+ if len(parts) < 3:
+ raise ValueError(
+ f'Invalid repository format: {repository}. Expected format: organization/project/repo'
+ )
+
+ org = parts[0]
+ project = parts[1]
+ repo_name = parts[2]
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo_name)
+
+ url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/refs?api-version=7.1&filter=heads/'
+
+ # Set maximum branches to fetch
+ MAX_BRANCHES = 1000
+
+ response, _ = await self._make_request(url)
+ branches_data = response.get('value', [])
+
+ all_branches = []
+
+ for branch_data in branches_data:
+ # Extract branch name from the ref (e.g., "refs/heads/main" -> "main")
+ name = branch_data.get('name', '').replace('refs/heads/', '')
+
+ # Get the commit details for this branch
+ object_id = branch_data.get('objectId', '')
+ commit_url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/commits/{object_id}?api-version=7.1'
+ commit_data, _ = await self._make_request(commit_url)
+
+ # Check if the branch is protected
+ name_enc = self._encode_url_component(name)
+ policy_url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/policy/configurations?api-version=7.1&repositoryId={repo_enc}&refName=refs/heads/{name_enc}'
+ policy_data, _ = await self._make_request(policy_url)
+ is_protected = len(policy_data.get('value', [])) > 0
+
+ branch = Branch(
+ name=name,
+ commit_sha=object_id,
+ protected=is_protected,
+ last_push_date=commit_data.get('committer', {}).get('date'),
+ )
+ all_branches.append(branch)
+
+ if len(all_branches) >= MAX_BRANCHES:
+ break
+
+ return all_branches
+
+ async def get_paginated_branches(
+ self, repository: str, page: int = 1, per_page: int = 30
+ ) -> PaginatedBranchesResponse:
+ """Get branches for a repository with pagination."""
+ # Parse repository string: organization/project/repo
+ parts = repository.split('/')
+ if len(parts) < 3:
+ raise ValueError(
+ f'Invalid repository format: {repository}. Expected format: organization/project/repo'
+ )
+
+ org = parts[0]
+ project = parts[1]
+ repo_name = parts[2]
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo_name)
+
+ # First, get the repository to get its ID
+ repo_url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}?api-version=7.1'
+ repo_data, _ = await self._make_request(repo_url)
+ repo_id = repo_data.get(
+ 'id', repo_name
+ ) # Fall back to repo_name if ID not found
+
+ url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/refs?api-version=7.1&filter=heads/'
+
+ response, _ = await self._make_request(url)
+ branches_data = response.get('value', [])
+
+ # Calculate pagination
+ start_idx = (page - 1) * per_page
+ end_idx = start_idx + per_page
+ paginated_data = branches_data[start_idx:end_idx]
+
+ branches: list[Branch] = []
+ for branch_data in paginated_data:
+ # Extract branch name from the ref (e.g., "refs/heads/main" -> "main")
+ name = branch_data.get('name', '').replace('refs/heads/', '')
+
+ # Get the commit details for this branch
+ object_id = branch_data.get('objectId', '')
+ commit_url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/commits/{object_id}?api-version=7.1'
+ commit_data, _ = await self._make_request(commit_url)
+
+ # Check if the branch is protected using repository ID
+ name_enc = self._encode_url_component(name)
+ policy_url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/policy/configurations?api-version=7.1&repositoryId={repo_id}&refName=refs/heads/{name_enc}'
+ policy_data, _ = await self._make_request(policy_url)
+ is_protected = len(policy_data.get('value', [])) > 0
+
+ branch = Branch(
+ name=name,
+ commit_sha=object_id,
+ protected=is_protected,
+ last_push_date=commit_data.get('committer', {}).get('date'),
+ )
+ branches.append(branch)
+
+ # Determine if there's a next page
+ has_next_page = end_idx < len(branches_data)
+
+ return PaginatedBranchesResponse(
+ branches=branches,
+ has_next_page=has_next_page,
+ current_page=page,
+ per_page=per_page,
+ )
+
+ async def search_branches(
+ self, repository: str, query: str, per_page: int = 30
+ ) -> list[Branch]:
+ """Search for branches within a repository."""
+ # Parse repository string: organization/project/repo
+ parts = repository.split('/')
+ if len(parts) < 3:
+ raise ValueError(
+ f'Invalid repository format: {repository}. Expected format: organization/project/repo'
+ )
+
+ org = parts[0]
+ project = parts[1]
+ repo_name = parts[2]
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo_name)
+
+ url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/refs?api-version=7.1&filter=heads/'
+
+ try:
+ response, _ = await self._make_request(url)
+ branches_data = response.get('value', [])
+
+ # Filter branches by query
+ filtered_branches = []
+ for branch_data in branches_data:
+ # Extract branch name from the ref (e.g., "refs/heads/main" -> "main")
+ name = branch_data.get('name', '').replace('refs/heads/', '')
+
+ # Check if query matches branch name
+ if query.lower() in name.lower():
+ object_id = branch_data.get('objectId', '')
+
+ # Get commit details for this branch
+ commit_url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/commits/{object_id}?api-version=7.1'
+ try:
+ commit_data, _ = await self._make_request(commit_url)
+ last_push_date = commit_data.get('committer', {}).get('date')
+ except Exception:
+ last_push_date = None
+
+ branch = Branch(
+ name=name,
+ commit_sha=object_id,
+ protected=False, # Skip protected check for search to improve performance
+ last_push_date=last_push_date,
+ )
+ filtered_branches.append(branch)
+
+ if len(filtered_branches) >= per_page:
+ break
+
+ return filtered_branches
+ except Exception:
+ # Return empty list on error instead of None
+ return []
diff --git a/openhands/integrations/azure_devops/service/features.py b/openhands/integrations/azure_devops/service/features.py
new file mode 100644
index 000000000000..9f74f21a3abb
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/features.py
@@ -0,0 +1,223 @@
+"""Feature operations for Azure DevOps integration (microagents, suggested tasks, user)."""
+
+from openhands.core.logger import openhands_logger as logger
+from openhands.integrations.azure_devops.service.base import AzureDevOpsMixinBase
+from openhands.integrations.service_types import (
+ MicroagentContentResponse,
+ ProviderType,
+ RequestMethod,
+ SuggestedTask,
+ TaskType,
+ User,
+)
+
+
+class AzureDevOpsFeaturesMixin(AzureDevOpsMixinBase):
+ """Mixin for Azure DevOps feature operations (microagents, suggested tasks, user info)."""
+
+ async def get_user(self) -> User:
+ """Get the authenticated user's information."""
+ url = f'{self.base_url}/_apis/connectionData?api-version=7.1-preview.1'
+ response, _ = await self._make_request(url)
+
+ # Extract authenticated user details
+ authenticated_user = response.get('authenticatedUser', {})
+ user_id = authenticated_user.get('id', '')
+ display_name = authenticated_user.get('providerDisplayName', '')
+
+ # Get descriptor for potential additional details
+ authenticated_user.get('descriptor', '')
+
+ return User(
+ id=str(user_id),
+ login=display_name,
+ avatar_url='',
+ name=display_name,
+ email='',
+ company=None,
+ )
+
+ async def get_suggested_tasks(self) -> list[SuggestedTask]:
+ """Get suggested tasks for the authenticated user across all repositories."""
+ # Azure DevOps requires querying each project separately for PRs and work items
+ # Since we no longer specify a single project, we need to query all projects
+ # Get all projects first
+ projects_url = f'{self.base_url}/_apis/projects?api-version=7.1'
+ projects_response, _ = await self._make_request(projects_url)
+ projects = projects_response.get('value', [])
+
+ # Get user info
+ user = await self.get_user()
+ tasks = []
+
+ # Query each project for pull requests and work items
+ for project in projects:
+ project_name = project.get('name')
+
+ try:
+ # URL-encode project name to handle spaces and special characters
+ project_enc = self._encode_url_component(project_name)
+
+ # Get pull requests created by the user in this project
+ url = f'{self.base_url}/{project_enc}/_apis/git/pullrequests?api-version=7.1&searchCriteria.creatorId={user.id}&searchCriteria.status=active'
+ response, _ = await self._make_request(url)
+
+ pull_requests = response.get('value', [])
+
+ for pr in pull_requests:
+ repo_name = pr.get('repository', {}).get('name', '')
+ pr_id = pr.get('pullRequestId')
+ title = pr.get('title', '')
+
+ # Check for merge conflicts
+ if pr.get('mergeStatus') == 'conflicts':
+ tasks.append(
+ SuggestedTask(
+ git_provider=ProviderType.AZURE_DEVOPS,
+ task_type=TaskType.MERGE_CONFLICTS,
+ repo=f'{self.organization}/{project_name}/{repo_name}',
+ issue_number=pr_id,
+ title=title,
+ )
+ )
+ # Check for failing checks
+ elif pr.get('status') == 'failed':
+ tasks.append(
+ SuggestedTask(
+ git_provider=ProviderType.AZURE_DEVOPS,
+ task_type=TaskType.FAILING_CHECKS,
+ repo=f'{self.organization}/{project_name}/{repo_name}',
+ issue_number=pr_id,
+ title=title,
+ )
+ )
+ # Check for unresolved comments
+ elif pr.get('hasUnresolvedComments', False):
+ tasks.append(
+ SuggestedTask(
+ git_provider=ProviderType.AZURE_DEVOPS,
+ task_type=TaskType.UNRESOLVED_COMMENTS,
+ repo=f'{self.organization}/{project_name}/{repo_name}',
+ issue_number=pr_id,
+ title=title,
+ )
+ )
+
+ # Get work items assigned to the user in this project
+ work_items_url = (
+ f'{self.base_url}/{project_enc}/_apis/wit/wiql?api-version=7.1'
+ )
+ wiql_query = {
+ 'query': "SELECT [System.Id], [System.Title], [System.State] FROM WorkItems WHERE [System.AssignedTo] = @me AND [System.State] = 'Active'"
+ }
+
+ work_items_response, _ = await self._make_request(
+ url=work_items_url, params=wiql_query, method=RequestMethod.POST
+ )
+
+ work_item_references = work_items_response.get('workItems', [])
+
+ # Get details for each work item
+ for work_item_ref in work_item_references:
+ work_item_id = work_item_ref.get('id')
+ work_item_url = f'{self.base_url}/{project_enc}/_apis/wit/workitems/{work_item_id}?api-version=7.1'
+ work_item, _ = await self._make_request(work_item_url)
+
+ title = work_item.get('fields', {}).get('System.Title', '')
+
+ tasks.append(
+ SuggestedTask(
+ git_provider=ProviderType.AZURE_DEVOPS,
+ task_type=TaskType.OPEN_ISSUE,
+ repo=f'{self.organization}/{project_name}',
+ issue_number=work_item_id,
+ title=title,
+ )
+ )
+ except Exception:
+ # Skip projects that fail (e.g., no access, no work items enabled)
+ continue
+
+ return tasks
+
+ async def _get_cursorrules_url(self, repository: str) -> str:
+ """Get the URL for checking .cursorrules file in Azure DevOps."""
+ org, project, repo = self._parse_repository(repository)
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+ return f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/items?path=/.cursorrules&api-version=7.1'
+
+ async def _get_microagents_directory_url(
+ self, repository: str, microagents_path: str
+ ) -> str:
+ """Get the URL for checking microagents directory in Azure DevOps.
+
+ Note: For org-level microagents (e.g., 'org/.openhands'), Azure DevOps doesn't support
+ this concept, so we raise ValueError to let the caller fall back to other providers.
+ """
+ parts = repository.split('/')
+ if len(parts) < 3:
+ # Azure DevOps doesn't support org-level configs, only full repo paths
+ raise ValueError(
+ f'Invalid repository format: {repository}. Expected format: organization/project/repo'
+ )
+ org, project, repo = parts[0], parts[1], parts[2]
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+ return f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/items?path=/{microagents_path}&recursionLevel=OneLevel&api-version=7.1'
+
+ def _get_microagents_directory_params(self, microagents_path: str) -> dict | None:
+ """Get parameters for the microagents directory request. Return None if no parameters needed."""
+ return None
+
+ def _is_valid_microagent_file(self, item: dict) -> bool:
+ """Check if an item represents a valid microagent file in Azure DevOps."""
+ return (
+ not item.get('isFolder', False)
+ and item.get('path', '').endswith('.md')
+ and not item.get('path', '').endswith('README.md')
+ )
+
+ def _get_file_name_from_item(self, item: dict) -> str:
+ """Extract file name from directory item in Azure DevOps."""
+ path = item.get('path', '')
+ return path.split('/')[-1] if path else ''
+
+ def _get_file_path_from_item(self, item: dict, microagents_path: str) -> str:
+ """Extract file path from directory item in Azure DevOps."""
+ return item.get('path', '').lstrip('/')
+
+ async def get_microagent_content(
+ self, repository: str, file_path: str
+ ) -> MicroagentContentResponse:
+ """Get content of a specific microagent file.
+
+ Args:
+ repository: Repository name in Azure DevOps format 'org/project/repo'
+ file_path: Path to the microagent file
+
+ Returns:
+ MicroagentContentResponse with parsed content and triggers
+ """
+ org, project, repo = self._parse_repository(repository)
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/items?path={file_path}&api-version=7.1'
+
+ try:
+ response, _ = await self._make_request(url)
+ content = (
+ response if isinstance(response, str) else response.get('content', '')
+ )
+
+ # Parse the content using the base class method
+ return self._parse_microagent_content(content, file_path)
+ except Exception as e:
+ logger.warning(f'Failed to fetch microagent content from {file_path}: {e}')
+ raise
diff --git a/openhands/integrations/azure_devops/service/prs.py b/openhands/integrations/azure_devops/service/prs.py
new file mode 100644
index 000000000000..c4cfefe09c6e
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/prs.py
@@ -0,0 +1,321 @@
+"""Pull request operations for Azure DevOps integration."""
+
+from datetime import datetime
+
+from openhands.core.logger import openhands_logger as logger
+from openhands.integrations.azure_devops.service.base import AzureDevOpsMixinBase
+from openhands.integrations.service_types import Comment, RequestMethod
+
+
+class AzureDevOpsPRsMixin(AzureDevOpsMixinBase):
+ """Mixin for Azure DevOps pull request operations."""
+
+ def _truncate_comment(self, comment: str, max_length: int = 1000) -> str:
+ """Truncate comment to max length."""
+ if len(comment) <= max_length:
+ return comment
+ return comment[:max_length] + '...'
+
+ async def add_pr_thread(
+ self,
+ repository: str,
+ pr_number: int,
+ comment_text: str,
+ status: str = 'active',
+ ) -> dict:
+ """Create a new thread (comment) in an Azure DevOps pull request.
+
+ Azure DevOps uses 'threads' concept where each thread contains comments.
+ This creates a new thread with a single comment for general PR discussion.
+
+ API Reference: https://learn.microsoft.com/en-us/rest/api/azure/devops/git/pull-request-threads/create
+
+ Args:
+ repository: Repository name in format "organization/project/repo"
+ pr_number: The pull request number
+ comment_text: The comment text to post
+ status: Thread status ('active', 'fixed', 'wontFix', 'closed', 'byDesign', 'pending')
+
+ Returns:
+ API response with created thread information
+
+ Raises:
+ HTTPException: If the API request fails
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests/{pr_number}/threads?api-version=7.1'
+
+ # Create thread payload with a comment
+ # Reference: https://learn.microsoft.com/en-us/rest/api/azure/devops/git/pull-request-threads/create
+ payload = {
+ 'comments': [
+ {
+ 'parentCommentId': 0,
+ 'content': comment_text,
+ 'commentType': 1, # 1 = text comment
+ }
+ ],
+ 'status': status,
+ }
+
+ response, _ = await self._make_request(
+ url=url, params=payload, method=RequestMethod.POST
+ )
+
+ logger.info(f'Created PR thread in {repository}#{pr_number}')
+ return response
+
+ async def add_pr_comment_to_thread(
+ self,
+ repository: str,
+ pr_number: int,
+ thread_id: int,
+ comment_text: str,
+ ) -> dict:
+ """Add a comment to an existing thread in an Azure DevOps pull request.
+
+ API Reference: https://learn.microsoft.com/en-us/rest/api/azure/devops/git/pull-request-thread-comments/create
+
+ Args:
+ repository: Repository name in format "organization/project/repo"
+ pr_number: The pull request number
+ thread_id: The thread ID to add the comment to
+ comment_text: The comment text to post
+
+ Returns:
+ API response with created comment information
+
+ Raises:
+ HTTPException: If the API request fails
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests/{pr_number}/threads/{thread_id}/comments?api-version=7.1'
+
+ payload = {
+ 'content': comment_text,
+ 'parentCommentId': 1, # Reply to the thread's root comment
+ 'commentType': 1, # 1 = text comment
+ }
+
+ response, _ = await self._make_request(
+ url=url, params=payload, method=RequestMethod.POST
+ )
+
+ logger.info(
+ f'Added comment to thread {thread_id} in PR {repository}#{pr_number}'
+ )
+ return response
+
+ async def get_pr_threads(self, repository: str, pr_number: int) -> list[dict]:
+ """Get all threads (comment conversations) for a pull request.
+
+ API Reference: https://learn.microsoft.com/en-us/rest/api/azure/devops/git/pull-request-threads/list
+
+ Args:
+ repository: Repository name in format "organization/project/repo"
+ pr_number: The pull request number
+
+ Returns:
+ List of thread objects containing comments
+
+ Raises:
+ HTTPException: If the API request fails
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests/{pr_number}/threads?api-version=7.1'
+
+ response, _ = await self._make_request(url)
+
+ return response.get('value', [])
+
+ async def get_pr_comments(
+ self, repository: str, pr_number: int, max_comments: int = 100
+ ) -> list[Comment]:
+ """Get all comments from all threads in a pull request.
+
+ Retrieves all threads and extracts comments from them, converting to Comment objects.
+
+ Args:
+ repository: Repository name in format "organization/project/repo"
+ pr_number: The pull request number
+ max_comments: Maximum number of comments to return
+
+ Returns:
+ List of Comment objects sorted by creation date
+ """
+ threads = await self.get_pr_threads(repository, pr_number)
+
+ all_comments: list[Comment] = []
+
+ for thread in threads:
+ comments_data = thread.get('comments', [])
+
+ for comment_data in comments_data:
+ # Extract author information
+ author_info = comment_data.get('author', {})
+ author = author_info.get('displayName', 'unknown')
+
+ # Parse dates
+ created_at = (
+ datetime.fromisoformat(
+ comment_data.get('publishedDate', '').replace('Z', '+00:00')
+ )
+ if comment_data.get('publishedDate')
+ else datetime.fromtimestamp(0)
+ )
+
+ updated_at = (
+ datetime.fromisoformat(
+ comment_data.get('lastUpdatedDate', '').replace('Z', '+00:00')
+ )
+ if comment_data.get('lastUpdatedDate')
+ else created_at
+ )
+
+ # Check if it's a system comment
+ is_system = comment_data.get('commentType', 1) != 1 # 1 = text comment
+
+ comment = Comment(
+ id=str(comment_data.get('id', 0)),
+ body=self._truncate_comment(comment_data.get('content', '')),
+ author=author,
+ created_at=created_at,
+ updated_at=updated_at,
+ system=is_system,
+ )
+
+ all_comments.append(comment)
+
+ # Sort by creation date and limit
+ all_comments.sort(key=lambda c: c.created_at)
+ return all_comments[:max_comments]
+
+ async def create_pr(
+ self,
+ repo_name: str,
+ source_branch: str,
+ target_branch: str,
+ title: str,
+ body: str | None = None,
+ draft: bool = False,
+ ) -> str:
+ """Creates a pull request in Azure DevOps.
+
+ Args:
+ repo_name: The repository name in format "organization/project/repo"
+ source_branch: The source branch name
+ target_branch: The target branch name
+ title: The title of the pull request
+ body: The description of the pull request
+ draft: Whether to create a draft pull request
+
+ Returns:
+ The URL of the created pull request
+ """
+ # Parse repository string: organization/project/repo
+ parts = repo_name.split('/')
+ if len(parts) < 3:
+ raise ValueError(
+ f'Invalid repository format: {repo_name}. Expected format: organization/project/repo'
+ )
+
+ org = parts[0]
+ project = parts[1]
+ repo = parts[2]
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests?api-version=7.1'
+
+ # Set default body if none provided
+ if not body:
+ body = f'Merging changes from {source_branch} into {target_branch}'
+
+ payload = {
+ 'sourceRefName': f'refs/heads/{source_branch}',
+ 'targetRefName': f'refs/heads/{target_branch}',
+ 'title': title,
+ 'description': body,
+ 'isDraft': draft,
+ }
+
+ response, _ = await self._make_request(
+ url=url, params=payload, method=RequestMethod.POST
+ )
+
+ # Return the web URL of the created PR
+ pr_id = response.get('pullRequestId')
+ return f'https://dev.azure.com/{org_enc}/{project_enc}/_git/{repo_enc}/pullrequest/{pr_id}'
+
+ async def get_pr_details(self, repository: str, pr_number: int) -> dict:
+ """Get detailed information about a specific pull request.
+
+ Args:
+ repository: Repository name in Azure DevOps format 'org/project/repo'
+ pr_number: The pull request number
+
+ Returns:
+ Raw API response from Azure DevOps
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests/{pr_number}?api-version=7.1'
+
+ response, _ = await self._make_request(url)
+ return response
+
+ async def is_pr_open(self, repository: str, pr_number: int) -> bool:
+ """Check if a PR is still active (not closed/merged).
+
+ Args:
+ repository: Repository name in Azure DevOps format 'org/project/repo'
+ pr_number: The PR number to check
+
+ Returns:
+ True if PR is active (open), False if closed/merged/abandoned
+ """
+ try:
+ pr_details = await self.get_pr_details(repository, pr_number)
+ status = pr_details.get('status', '').lower()
+ # Azure DevOps PR statuses: active, abandoned, completed
+ return status == 'active'
+ except Exception as e:
+ logger.warning(
+ f'Failed to check PR status for {repository}#{pr_number}: {e}'
+ )
+ return False
+
+ async def add_pr_reaction(
+ self, repository: str, pr_number: int, reaction_type: str = ':thumbsup:'
+ ) -> dict:
+ org, project, repo = self._parse_repository(repository)
+ comment_text = f'{reaction_type} OpenHands is processing this PR...'
+ return await self.add_pr_thread(
+ repository, pr_number, comment_text, status='closed'
+ )
diff --git a/openhands/integrations/azure_devops/service/repos.py b/openhands/integrations/azure_devops/service/repos.py
new file mode 100644
index 000000000000..ac7930acda9a
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/repos.py
@@ -0,0 +1,178 @@
+"""Repository operations for Azure DevOps integration."""
+
+from openhands.integrations.azure_devops.service.base import AzureDevOpsMixinBase
+from openhands.integrations.service_types import ProviderType, Repository
+from openhands.server.types import AppMode
+
+
+class AzureDevOpsReposMixin(AzureDevOpsMixinBase):
+ """Mixin for Azure DevOps repository operations."""
+
+ async def search_repositories(
+ self,
+ query: str,
+ per_page: int = 30,
+ sort: str = 'updated',
+ order: str = 'desc',
+ public: bool = False,
+ app_mode: AppMode = AppMode.OSS,
+ ) -> list[Repository]:
+ """Search for repositories in Azure DevOps."""
+ # Get all repositories across all projects in the organization
+ url = f'{self.base_url}/_apis/git/repositories?api-version=7.1'
+
+ response, _ = await self._make_request(url)
+
+ # Filter repositories by query if provided
+ repos = response.get('value', [])
+ if query:
+ repos = [
+ repo for repo in repos if query.lower() in repo.get('name', '').lower()
+ ]
+
+ # Limit to per_page
+ repos = repos[:per_page]
+
+ return [
+ Repository(
+ id=str(repo.get('id')),
+ full_name=f'{self.organization}/{repo.get("project", {}).get("name", "")}/{repo.get("name")}',
+ git_provider=ProviderType.AZURE_DEVOPS,
+ is_public=False, # Azure DevOps repos are private by default
+ )
+ for repo in repos
+ ]
+
+ async def get_repositories(self, sort: str, app_mode: AppMode) -> list[Repository]:
+ """Get repositories for the authenticated user."""
+ MAX_REPOS = 1000
+
+ # Get all projects first
+ projects_url = f'{self.base_url}/_apis/projects?api-version=7.1'
+ projects_response, _ = await self._make_request(projects_url)
+ projects = projects_response.get('value', [])
+
+ all_repos = []
+
+ # For each project, get its repositories
+ for project in projects:
+ project_name = project.get('name')
+ project_enc = self._encode_url_component(project_name)
+ repos_url = (
+ f'{self.base_url}/{project_enc}/_apis/git/repositories?api-version=7.1'
+ )
+ repos_response, _ = await self._make_request(repos_url)
+ repos = repos_response.get('value', [])
+
+ for repo in repos:
+ all_repos.append(
+ {
+ 'id': repo.get('id'),
+ 'name': repo.get('name'),
+ 'project_name': project_name,
+ 'updated_date': repo.get('lastUpdateTime'),
+ }
+ )
+
+ if len(all_repos) >= MAX_REPOS:
+ break
+
+ if len(all_repos) >= MAX_REPOS:
+ break
+
+ # Sort repositories based on the sort parameter
+ if sort == 'updated':
+ all_repos.sort(key=lambda r: r.get('updated_date', ''), reverse=True)
+ elif sort == 'name':
+ all_repos.sort(key=lambda r: r.get('name', '').lower())
+
+ return [
+ Repository(
+ id=str(repo.get('id')),
+ full_name=f'{self.organization}/{repo.get("project_name")}/{repo.get("name")}',
+ git_provider=ProviderType.AZURE_DEVOPS,
+ is_public=False, # Azure DevOps repos are private by default
+ )
+ for repo in all_repos[:MAX_REPOS]
+ ]
+
+ async def get_all_repositories(
+ self, sort: str, app_mode: AppMode
+ ) -> list[Repository]:
+ """Get repositories for the authenticated user (alias for get_repositories)."""
+ return await self.get_repositories(sort, app_mode)
+
+ def _parse_repository_response(
+ self, repo: dict, project_name: str, link_header: str | None = None
+ ) -> Repository:
+ """Parse an Azure DevOps API repository response into a Repository object.
+
+ Args:
+ repo: Repository data from Azure DevOps API
+ project_name: The project name the repository belongs to
+ link_header: Optional link header for pagination
+
+ Returns:
+ Repository object
+ """
+ return Repository(
+ id=str(repo.get('id')),
+ full_name=f'{self.organization}/{project_name}/{repo.get("name")}',
+ git_provider=ProviderType.AZURE_DEVOPS,
+ is_public=False, # Azure DevOps repos are private by default
+ link_header=link_header,
+ )
+
+ async def get_paginated_repos(
+ self,
+ page: int,
+ per_page: int,
+ sort: str,
+ installation_id: str | None,
+ query: str | None = None,
+ ) -> list[Repository]:
+ """Get a page of repositories for the authenticated user."""
+ # Get all repos first, then paginate manually
+ # Azure DevOps doesn't have native pagination for repositories
+ all_repos = await self.get_repositories(sort, AppMode.SAAS)
+
+ # Calculate pagination
+ start_idx = (page - 1) * per_page
+ end_idx = start_idx + per_page
+
+ # Filter by query if provided
+ if query:
+ query_lower = query.lower()
+ all_repos = [
+ repo for repo in all_repos if query_lower in repo.full_name.lower()
+ ]
+
+ return all_repos[start_idx:end_idx]
+
+ async def get_repository_details_from_repo_name(
+ self, repository: str
+ ) -> Repository:
+ """Gets all repository details from repository name.
+
+ Args:
+ repository: Repository name in format 'organization/project/repo'
+
+ Returns:
+ Repository object with details
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'https://dev.azure.com/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}?api-version=7.1'
+ response, _ = await self._make_request(url)
+
+ return Repository(
+ id=str(response.get('id')),
+ full_name=f'{org}/{project}/{repo}',
+ git_provider=ProviderType.AZURE_DEVOPS,
+ is_public=False, # Azure DevOps repos are private by default
+ )
diff --git a/openhands/integrations/azure_devops/service/resolver.py b/openhands/integrations/azure_devops/service/resolver.py
new file mode 100644
index 000000000000..10bfeaa4b656
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/resolver.py
@@ -0,0 +1,166 @@
+from datetime import datetime
+
+from openhands.core.logger import openhands_logger as logger
+from openhands.integrations.azure_devops.service.base import AzureDevOpsMixinBase
+from openhands.integrations.service_types import Comment
+
+
+class AzureDevOpsResolverMixin(AzureDevOpsMixinBase):
+ """Helper methods used for the Azure DevOps Resolver."""
+
+ async def get_issue_or_pr_title_and_body(
+ self, repository: str, issue_number: int
+ ) -> tuple[str, str]:
+ """Get the title and body of a pull request or work item.
+
+ First attempts to get as a PR, then falls back to work item if not found.
+
+ Args:
+ repository: Repository name in format 'organization/project/repo'
+ issue_number: The PR number or work item ID
+
+ Returns:
+ A tuple of (title, body)
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ # Try to get as a pull request first
+ try:
+ pr_url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests/{issue_number}?api-version=7.1'
+ response, _ = await self._make_request(pr_url)
+ title = response.get('title') or ''
+ body = response.get('description') or ''
+ return title, body
+ except Exception as pr_error:
+ logger.debug(f'Failed to get as PR: {pr_error}, trying as work item')
+
+ # Fall back to work item
+ try:
+ wi_url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/wit/workitems/{issue_number}?api-version=7.1'
+ response, _ = await self._make_request(wi_url)
+ fields = response.get('fields', {})
+ title = fields.get('System.Title') or ''
+ body = fields.get('System.Description') or ''
+ return title, body
+ except Exception as wi_error:
+ logger.error(f'Failed to get as work item: {wi_error}')
+ return '', ''
+
+ async def get_issue_or_pr_comments(
+ self, repository: str, issue_number: int, max_comments: int = 10
+ ) -> list[Comment]:
+ """Get comments for a pull request or work item.
+
+ First attempts to get PR comments, then falls back to work item comments if not found.
+
+ Args:
+ repository: Repository name in format 'organization/project/repo'
+ issue_number: The PR number or work item ID
+ max_comments: Maximum number of comments to return
+
+ Returns:
+ List of Comment objects ordered by creation date
+ """
+ # Try to get PR comments first
+ try:
+ comments = await self.get_pr_comments( # type: ignore[attr-defined]
+ repository, issue_number, max_comments
+ )
+ if comments:
+ return comments
+ except Exception as pr_error:
+ logger.debug(f'Failed to get PR comments: {pr_error}, trying work item')
+
+ # Fall back to work item comments
+ try:
+ return await self.get_work_item_comments( # type: ignore[attr-defined]
+ repository, issue_number, max_comments
+ )
+ except Exception as wi_error:
+ logger.error(f'Failed to get work item comments: {wi_error}')
+ return []
+
+ async def get_review_thread_comments(
+ self,
+ thread_id: int,
+ repository: str,
+ pr_number: int,
+ max_comments: int = 10,
+ ) -> list[Comment]:
+ """Get all comments in a specific PR review thread.
+
+ Azure DevOps organizes PR comments into threads. This method retrieves
+ all comments from a specific thread.
+
+ Args:
+ thread_id: The thread ID
+ repository: Repository name in format 'organization/project/repo'
+ pr_number: Pull request number
+ max_comments: Maximum number of comments to return
+
+ Returns:
+ List of Comment objects representing the thread
+ """
+ org, project, repo = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+ repo_enc = self._encode_url_component(repo)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/git/repositories/{repo_enc}/pullrequests/{pr_number}/threads/{thread_id}?api-version=7.1'
+
+ try:
+ response, _ = await self._make_request(url)
+ comments_data = response.get('comments', [])
+
+ all_comments: list[Comment] = []
+
+ for comment_data in comments_data:
+ # Extract author information
+ author_info = comment_data.get('author', {})
+ author = author_info.get('displayName', 'unknown')
+
+ # Parse dates
+ created_at = (
+ datetime.fromisoformat(
+ comment_data.get('publishedDate', '').replace('Z', '+00:00')
+ )
+ if comment_data.get('publishedDate')
+ else datetime.fromtimestamp(0)
+ )
+
+ updated_at = (
+ datetime.fromisoformat(
+ comment_data.get('lastUpdatedDate', '').replace('Z', '+00:00')
+ )
+ if comment_data.get('lastUpdatedDate')
+ else created_at
+ )
+
+ # Check if it's a system comment
+ is_system = comment_data.get('commentType', 1) != 1 # 1 = text comment
+
+ comment = Comment(
+ id=str(comment_data.get('id', 0)),
+ body=self._truncate_comment(comment_data.get('content', '')),
+ author=author,
+ created_at=created_at,
+ updated_at=updated_at,
+ system=is_system,
+ )
+
+ all_comments.append(comment)
+
+ # Sort by creation date and limit
+ all_comments.sort(key=lambda c: c.created_at)
+ return all_comments[:max_comments]
+
+ except Exception as error:
+ logger.error(f'Failed to get thread {thread_id} comments: {error}')
+ return []
diff --git a/openhands/integrations/azure_devops/service/work_items.py b/openhands/integrations/azure_devops/service/work_items.py
new file mode 100644
index 000000000000..fc4a6b6eff71
--- /dev/null
+++ b/openhands/integrations/azure_devops/service/work_items.py
@@ -0,0 +1,129 @@
+"""Work item operations for Azure DevOps integration."""
+
+from datetime import datetime
+
+from openhands.core.logger import openhands_logger as logger
+from openhands.integrations.azure_devops.service.base import AzureDevOpsMixinBase
+from openhands.integrations.service_types import Comment, RequestMethod
+
+
+class AzureDevOpsWorkItemsMixin(AzureDevOpsMixinBase):
+ """Mixin for Azure DevOps work item operations.
+
+ Work Items are unique to Azure DevOps and represent tasks, bugs, user stories, etc.
+ in Azure Boards. This mixin provides methods to interact with work item comments.
+ """
+
+ def _truncate_comment(self, comment: str, max_length: int = 1000) -> str:
+ """Truncate comment to max length."""
+ if len(comment) <= max_length:
+ return comment
+ return comment[:max_length] + '...'
+
+ async def add_work_item_comment(
+ self, repository: str, work_item_id: int, comment_text: str
+ ) -> dict:
+ """Add a comment to an Azure DevOps work item.
+
+ API Reference: https://learn.microsoft.com/en-us/rest/api/azure/devops/wit/comments/add-comment
+
+ Args:
+ repository: Repository name in format "organization/project/repo" (project extracted)
+ work_item_id: The work item ID
+ comment_text: The comment text to post
+
+ Returns:
+ API response with created comment information
+
+ Raises:
+ HTTPException: If the API request fails
+ """
+ org, project, _ = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/wit/workItems/{work_item_id}/comments?api-version=7.1-preview.4'
+
+ payload = {
+ 'text': comment_text,
+ }
+
+ response, _ = await self._make_request(
+ url=url, params=payload, method=RequestMethod.POST
+ )
+
+ logger.info(f'Added comment to work item {work_item_id} in project {project}')
+ return response
+
+ async def get_work_item_comments(
+ self, repository: str, work_item_id: int, max_comments: int = 100
+ ) -> list[Comment]:
+ """Get all comments from a work item.
+
+ API Reference: https://learn.microsoft.com/en-us/rest/api/azure/devops/wit/comments/get-comments
+
+ Args:
+ repository: Repository name in format "organization/project/repo" (project extracted)
+ work_item_id: The work item ID
+ max_comments: Maximum number of comments to return
+
+ Returns:
+ List of Comment objects sorted by creation date
+ """
+ org, project, _ = self._parse_repository(repository)
+
+ # URL-encode components to handle spaces and special characters
+ org_enc = self._encode_url_component(org)
+ project_enc = self._encode_url_component(project)
+
+ url = f'{self.base_url}/{org_enc}/{project_enc}/_apis/wit/workItems/{work_item_id}/comments?api-version=7.1-preview.4'
+
+ response, _ = await self._make_request(url)
+
+ comments_data = response.get('comments', [])
+ all_comments: list[Comment] = []
+
+ for comment_data in comments_data:
+ # Extract author information
+ author_info = comment_data.get('createdBy', {})
+ author = author_info.get('displayName', 'unknown')
+
+ # Parse dates
+ created_at = (
+ datetime.fromisoformat(
+ comment_data.get('createdDate', '').replace('Z', '+00:00')
+ )
+ if comment_data.get('createdDate')
+ else datetime.fromtimestamp(0)
+ )
+
+ modified_at = (
+ datetime.fromisoformat(
+ comment_data.get('modifiedDate', '').replace('Z', '+00:00')
+ )
+ if comment_data.get('modifiedDate')
+ else created_at
+ )
+
+ comment = Comment(
+ id=str(comment_data.get('id', 0)),
+ body=self._truncate_comment(comment_data.get('text', '')),
+ author=author,
+ created_at=created_at,
+ updated_at=modified_at,
+ system=False,
+ )
+
+ all_comments.append(comment)
+
+ # Sort by creation date and limit
+ all_comments.sort(key=lambda c: c.created_at)
+ return all_comments[:max_comments]
+
+ async def add_work_item_reaction(
+ self, repository: str, work_item_id: int, reaction_type: str = ':thumbsup:'
+ ) -> dict:
+ comment_text = f'{reaction_type} OpenHands is processing this work item...'
+ return await self.add_work_item_comment(repository, work_item_id, comment_text)
diff --git a/openhands/integrations/protocols/http_client.py b/openhands/integrations/protocols/http_client.py
index 21ec1857e059..5b12da029e6b 100644
--- a/openhands/integrations/protocols/http_client.py
+++ b/openhands/integrations/protocols/http_client.py
@@ -20,7 +20,7 @@ class HTTPClient(ABC):
"""Abstract base class defining the HTTP client interface for Git service integrations.
This class abstracts the common HTTP client functionality needed by all
- Git service providers (GitHub, GitLab, BitBucket) while keeping inheritance in place.
+ Git service providers (GitHub, GitLab, Bitbucket, Azure DevOps) while keeping inheritance in place.
"""
# Default attributes (subclasses may override)
diff --git a/openhands/integrations/provider.py b/openhands/integrations/provider.py
index 09c1ae7e11e1..c260f23ee026 100644
--- a/openhands/integrations/provider.py
+++ b/openhands/integrations/provider.py
@@ -1,8 +1,10 @@
from __future__ import annotations
import os
+from collections.abc import Mapping
from types import MappingProxyType
-from typing import Annotated, Any, Coroutine, Literal, cast, overload
+from typing import Any, Coroutine, Literal, cast, overload
+from urllib.parse import quote
import httpx
from pydantic import (
@@ -10,13 +12,15 @@
ConfigDict,
Field,
SecretStr,
- WithJsonSchema,
)
from openhands.core.logger import openhands_logger as logger
from openhands.events.action.action import Action
from openhands.events.action.commands import CmdRunAction
from openhands.events.stream import EventStream
+from openhands.integrations.azure_devops.azure_devops_service import (
+ AzureDevOpsServiceImpl,
+)
from openhands.integrations.bitbucket.bitbucket_service import BitBucketServiceImpl
from openhands.integrations.github.github_service import GithubServiceImpl
from openhands.integrations.gitlab.gitlab_service import GitLabServiceImpl
@@ -91,16 +95,8 @@ def from_value(cls, secret_value: CustomSecret | dict[str, str]) -> CustomSecret
raise ValueError('Unsupport Provider token type')
-PROVIDER_TOKEN_TYPE = MappingProxyType[ProviderType, ProviderToken]
-CUSTOM_SECRETS_TYPE = MappingProxyType[str, CustomSecret]
-PROVIDER_TOKEN_TYPE_WITH_JSON_SCHEMA = Annotated[
- PROVIDER_TOKEN_TYPE,
- WithJsonSchema({'type': 'object', 'additionalProperties': {'type': 'string'}}),
-]
-CUSTOM_SECRETS_TYPE_WITH_JSON_SCHEMA = Annotated[
- CUSTOM_SECRETS_TYPE,
- WithJsonSchema({'type': 'object', 'additionalProperties': {'type': 'string'}}),
-]
+PROVIDER_TOKEN_TYPE = Mapping[ProviderType, ProviderToken]
+CUSTOM_SECRETS_TYPE = Mapping[str, CustomSecret]
class ProviderHandler:
@@ -109,6 +105,7 @@ class ProviderHandler:
ProviderType.GITHUB: 'github.com',
ProviderType.GITLAB: 'gitlab.com',
ProviderType.BITBUCKET: 'bitbucket.org',
+ ProviderType.AZURE_DEVOPS: 'dev.azure.com',
}
def __init__(
@@ -129,6 +126,7 @@ def __init__(
ProviderType.GITHUB: GithubServiceImpl,
ProviderType.GITLAB: GitLabServiceImpl,
ProviderType.BITBUCKET: BitBucketServiceImpl,
+ ProviderType.AZURE_DEVOPS: AzureDevOpsServiceImpl,
}
self.external_auth_id = external_auth_id
@@ -214,6 +212,17 @@ async def get_bitbucket_workspaces(self) -> list[str]:
return []
+ async def get_azure_devops_organizations(self) -> list[str]:
+ service = cast(
+ InstallationsService, self.get_service(ProviderType.AZURE_DEVOPS)
+ )
+ try:
+ return await service.get_installations()
+ except Exception as e:
+ logger.warning(f'Failed to get azure devops organizations {e}')
+
+ return []
+
async def get_repositories(
self,
sort: str,
@@ -658,8 +667,10 @@ async def get_authenticated_git_url(
domain = self.PROVIDER_DOMAINS[provider]
# If provider tokens are provided, use the host from the token if available
+ # Note: For Azure DevOps, don't use the host field as it may contain org/project path
if self.provider_tokens and provider in self.provider_tokens:
- domain = self.provider_tokens[provider].host or domain
+ if provider != ProviderType.AZURE_DEVOPS:
+ domain = self.provider_tokens[provider].host or domain
# Try to use token if available, otherwise use public URL
if self.provider_tokens and provider in self.provider_tokens:
@@ -678,6 +689,63 @@ async def get_authenticated_git_url(
else:
# Access token format: use x-token-auth
remote_url = f'https://x-token-auth:{token_value}@{domain}/{repo_name}.git'
+ elif provider == ProviderType.AZURE_DEVOPS:
+ # Azure DevOps uses PAT with Basic auth
+ # Format: https://{anything}:{PAT}@dev.azure.com/{org}/{project}/_git/{repo}
+ # The username can be anything (it's ignored), but cannot be empty
+ # We use the org name as the username for clarity
+ # repo_name is in format: org/project/repo
+ logger.info(
+ f'[Azure DevOps] Constructing authenticated git URL for repository: {repo_name}'
+ )
+ logger.debug(f'[Azure DevOps] Original domain: {domain}')
+ logger.debug(
+ f'[Azure DevOps] Token available: {bool(token_value)}, '
+ f'Token length: {len(token_value) if token_value else 0}'
+ )
+
+ # Remove domain prefix if it exists in domain variable
+ clean_domain = domain.replace('https://', '').replace('http://', '')
+ logger.debug(f'[Azure DevOps] Cleaned domain: {clean_domain}')
+
+ parts = repo_name.split('/')
+ logger.debug(
+ f'[Azure DevOps] Repository parts: {parts} (length: {len(parts)})'
+ )
+
+ if len(parts) >= 3:
+ org, project, repo = parts[0], parts[1], parts[2]
+ logger.info(
+ f'[Azure DevOps] Parsed repository - org: {org}, project: {project}, repo: {repo}'
+ )
+ # URL-encode org, project, and repo to handle spaces and special characters
+ org_encoded = quote(org, safe='')
+ project_encoded = quote(project, safe='')
+ repo_encoded = quote(repo, safe='')
+ logger.debug(
+ f'[Azure DevOps] URL-encoded parts - org: {org_encoded}, project: {project_encoded}, repo: {repo_encoded}'
+ )
+ # Use org name as username (it's ignored by Azure DevOps but required for git)
+ remote_url = f'https://{org}:***@{clean_domain}/{org_encoded}/{project_encoded}/_git/{repo_encoded}'
+ logger.info(
+ f'[Azure DevOps] Constructed git URL (token masked): {remote_url}'
+ )
+ # Set the actual URL with token
+ remote_url = f'https://{org}:{token_value}@{clean_domain}/{org_encoded}/{project_encoded}/_git/{repo_encoded}'
+ else:
+ # Fallback if format is unexpected
+ logger.warning(
+ f'[Azure DevOps] Unexpected repository format: {repo_name}. '
+ f'Expected org/project/repo (3 parts), got {len(parts)} parts. '
+ 'Using fallback URL format.'
+ )
+ remote_url = (
+ f'https://user:{token_value}@{clean_domain}/{repo_name}.git'
+ )
+ logger.warning(
+ f'[Azure DevOps] Fallback URL constructed (token masked): '
+ f'https://user:***@{clean_domain}/{repo_name}.git'
+ )
else:
# GitHub
remote_url = f'https://{token_value}@{domain}/{repo_name}.git'
diff --git a/openhands/integrations/service_types.py b/openhands/integrations/service_types.py
index cfc48390591c..cf76e404791c 100644
--- a/openhands/integrations/service_types.py
+++ b/openhands/integrations/service_types.py
@@ -21,6 +21,7 @@ class ProviderType(Enum):
GITHUB = 'github'
GITLAB = 'gitlab'
BITBUCKET = 'bitbucket'
+ AZURE_DEVOPS = 'azure_devops'
ENTERPRISE_SSO = 'enterprise_sso'
diff --git a/openhands/integrations/templates/resolver/azure_devops/issue_conversation_instructions.j2 b/openhands/integrations/templates/resolver/azure_devops/issue_conversation_instructions.j2
new file mode 100644
index 000000000000..c9d22c59b16b
--- /dev/null
+++ b/openhands/integrations/templates/resolver/azure_devops/issue_conversation_instructions.j2
@@ -0,0 +1,41 @@
+{% if issue_number %}
+You are requested to fix work item #{{ issue_number }}: "{{ issue_title }}" in an Azure DevOps repository.
+A comment on the work item has been addressed to you.
+{% else %}
+Your task is to fix the work item: "{{ issue_title }}".
+{% endif %}
+
+# Work Item Description
+{{ issue_body }}
+
+{% if previous_comments %}
+# Previous Comments
+For reference, here are the previous comments on the work item:
+
+{% for comment in previous_comments %}
+- @{{ comment.author }} said:
+{{ comment.body }}
+{% if not loop.last %}\n\n{% endif %}
+{% endfor %}
+{% endif %}
+
+# Guidelines
+
+1. Review the task carefully.
+2. For all changes to actual application code (e.g. in Python or Javascript), add an appropriate test to the testing directory to make sure that the work item has been fixed
+3. Run the tests, and if they pass you are done!
+4. You do NOT need to write new tests if there are only changes to documentation or configuration files.
+
+# Final Checklist
+Re-read the work item title, description, and comments and make sure that you have successfully implemented all requirements.
+
+Use the Azure DevOps token and Azure DevOps REST APIs to:
+
+1. Create a new branch using `openhands/` as a prefix (e.g `openhands/update-readme`)
+2. Commit your changes with a clear commit message
+3. Push the branch to Azure DevOps
+4. Use the `create_pr` tool to open a new pull request
+5. The PR description should:
+ - Mention that it "fixes" or "closes" the work item number
+ - Include a clear summary of the changes
+ - Reference any related work items
diff --git a/openhands/integrations/templates/resolver/azure_devops/issue_prompt.j2 b/openhands/integrations/templates/resolver/azure_devops/issue_prompt.j2
new file mode 100644
index 000000000000..0309c21a021d
--- /dev/null
+++ b/openhands/integrations/templates/resolver/azure_devops/issue_prompt.j2
@@ -0,0 +1,5 @@
+{% if issue_comment %}
+{{ issue_comment }}
+{% else %}
+Please fix work item #{{ issue_number }}.
+{% endif %}
diff --git a/openhands/integrations/templates/resolver/azure_devops/pr_update_conversation_instructions.j2 b/openhands/integrations/templates/resolver/azure_devops/pr_update_conversation_instructions.j2
new file mode 100644
index 000000000000..8b9d184e38f2
--- /dev/null
+++ b/openhands/integrations/templates/resolver/azure_devops/pr_update_conversation_instructions.j2
@@ -0,0 +1,38 @@
+You are checked out to branch {{ branch_name }}, which has an open PR #{{ pr_number }}: "{{ pr_title }}".
+A comment on the PR has been addressed to you.
+
+# PR Description
+{{ pr_body }}
+
+{% if comments %}
+# Previous Comments
+You may find these other comments relevant:
+{% for comment in comments %}
+- @{{ comment.author }} said at {{ comment.created_at }}:
+{{ comment.body }}
+{% if not loop.last %}\n\n{% endif %}
+{% endfor %}
+{% endif %}
+
+{% if file_location %}
+# Comment location
+The comment is in the file `{{ file_location }}` on line #{{ line_number }}
+{% endif %}.
+
+# Steps to Handle the Comment
+
+## Understand the PR Context
+Use the Azure DevOps token and Azure DevOps REST APIs to:
+ 1. Retrieve the diff against the target branch to understand the changes
+ 2. Fetch the PR description and any linked work items for context
+
+## Process the Comment
+If it's a question:
+ 1. Answer the question asked
+ 2. DO NOT leave any comments on the PR
+
+If it requests a code update:
+ 1. Modify the code accordingly in the current branch
+ 2. Commit your changes with a clear commit message
+ 3. Push the changes to Azure DevOps to update the PR
+ 4. DO NOT leave any comments on the PR
diff --git a/openhands/integrations/templates/resolver/azure_devops/pr_update_prompt.j2 b/openhands/integrations/templates/resolver/azure_devops/pr_update_prompt.j2
new file mode 100644
index 000000000000..987ac3ac59a5
--- /dev/null
+++ b/openhands/integrations/templates/resolver/azure_devops/pr_update_prompt.j2
@@ -0,0 +1 @@
+{{ pr_comment }}
diff --git a/openhands/integrations/utils.py b/openhands/integrations/utils.py
index eb4114b049fc..c3a9ee344cc7 100644
--- a/openhands/integrations/utils.py
+++ b/openhands/integrations/utils.py
@@ -1,6 +1,9 @@
from pydantic import SecretStr
from openhands.core.logger import openhands_logger as logger
+from openhands.integrations.azure_devops.azure_devops_service import (
+ AzureDevOpsServiceImpl as AzureDevOpsService,
+)
from openhands.integrations.bitbucket.bitbucket_service import BitBucketService
from openhands.integrations.github.github_service import GitHubService
from openhands.integrations.gitlab.gitlab_service import GitLabService
@@ -10,8 +13,7 @@
async def validate_provider_token(
token: SecretStr, base_domain: str | None = None
) -> ProviderType | None:
- """Determine whether a token is for GitHub, GitLab, or Bitbucket by attempting to get user info
- from the services.
+ """Determine whether a token is for GitHub, GitLab, Bitbucket, or Azure DevOps by attempting to get user info from the services.
Args:
token: The token to check
@@ -21,6 +23,7 @@ async def validate_provider_token(
'github' if it's a GitHub token
'gitlab' if it's a GitLab token
'bitbucket' if it's a Bitbucket token
+ 'azure_devops' if it's an Azure DevOps token
None if the token is invalid for all services
"""
# Skip validation for empty tokens
@@ -45,7 +48,7 @@ async def validate_provider_token(
except Exception as e:
gitlab_error = e
- # Try Bitbucket last
+ # Try Bitbucket next
bitbucket_error = None
try:
bitbucket_service = BitBucketService(token=token, base_domain=base_domain)
@@ -54,8 +57,17 @@ async def validate_provider_token(
except Exception as e:
bitbucket_error = e
+ # Try Azure DevOps last
+ azure_devops_error = None
+ try:
+ azure_devops_service = AzureDevOpsService(token=token, base_domain=base_domain)
+ await azure_devops_service.get_user()
+ return ProviderType.AZURE_DEVOPS
+ except Exception as e:
+ azure_devops_error = e
+
logger.debug(
- f'Failed to validate token: {github_error} \n {gitlab_error} \n {bitbucket_error}'
+ f'Failed to validate token: {github_error} \n {gitlab_error} \n {bitbucket_error} \n {azure_devops_error}'
)
return None
diff --git a/openhands/llm/fn_call_converter.py b/openhands/llm/fn_call_converter.py
index 7de88245162e..826b278dc480 100644
--- a/openhands/llm/fn_call_converter.py
+++ b/openhands/llm/fn_call_converter.py
@@ -421,16 +421,12 @@ def convert_tool_call_to_string(tool_call: dict) -> str:
f'Failed to parse arguments as JSON. Arguments: {tool_call["function"]["arguments"]}'
) from e
for param_name, param_value in args.items():
- is_multiline = isinstance(param_value, str) and '\n' in param_value
+ # Don't add extra newlines - keep parameter value as-is
ret += f''
- if is_multiline:
- ret += '\n'
if isinstance(param_value, list) or isinstance(param_value, dict):
ret += json.dumps(param_value)
else:
ret += f'{param_value}'
- if is_multiline:
- ret += '\n'
ret += ' \n'
ret += ''
return ret
diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py
index f97669309c56..150fa54925c1 100644
--- a/openhands/llm/llm.py
+++ b/openhands/llm/llm.py
@@ -188,13 +188,15 @@ def __init__(
if 'claude-opus-4-1' in self.config.model.lower():
kwargs['thinking'] = {'type': 'disabled'}
- # Anthropic constraint: Opus models cannot accept both temperature and top_p
+ # Anthropic constraint: Opus 4.1, Opus 4.5, and Sonnet 4 models cannot accept both temperature and top_p
# Prefer temperature (drop top_p) if both are specified.
_model_lower = self.config.model.lower()
- # Limit to Opus 4.1 specifically to avoid changing behavior of other Anthropic models
- if ('claude-opus-4-1' in _model_lower) and (
- 'temperature' in kwargs and 'top_p' in kwargs
- ):
+ # Apply to Opus 4.1, Opus 4.5, and Sonnet 4 models to avoid API errors
+ if (
+ ('claude-opus-4-1' in _model_lower)
+ or ('claude-opus-4-5' in _model_lower)
+ or ('claude-sonnet-4' in _model_lower)
+ ) and ('temperature' in kwargs and 'top_p' in kwargs):
kwargs.pop('top_p', None)
# Add completion_kwargs if present
diff --git a/openhands/llm/model_features.py b/openhands/llm/model_features.py
index 954cee00fde2..78cbd84a222a 100644
--- a/openhands/llm/model_features.py
+++ b/openhands/llm/model_features.py
@@ -69,6 +69,9 @@ class ModelFeatures:
'claude-3.5-haiku*',
'claude-3-5-haiku*',
'claude-sonnet-4*',
+ 'anthropic.claude-sonnet-4*',
+ 'us.anthropic.claude-sonnet-4*',
+ 'global.anthropic.claude-sonnet-4*',
'claude-opus-4*',
# OpenAI families
'gpt-4o*',
@@ -80,12 +83,16 @@ class ModelFeatures:
'o4-mini*',
# Google Gemini
'gemini-2.5-pro*',
+ 'gemini-3*',
+ # Groq models (via groq/ provider prefix)
+ 'groq/*',
# Others
'kimi-k2-0711-preview',
'kimi-k2-instruct',
'qwen3-coder*',
'qwen3-coder-480b-a35b-instruct',
'deepseek-chat',
+ 'grok-code-fast-1',
]
REASONING_EFFORT_PATTERNS: list[str] = [
@@ -117,6 +124,9 @@ class ModelFeatures:
'claude-3-haiku-20240307',
'claude-3-opus-20240229',
'claude-sonnet-4*',
+ 'anthropic.claude-sonnet-4*',
+ 'us.anthropic.claude-sonnet-4*',
+ 'global.anthropic.claude-sonnet-4*',
'claude-opus-4*',
]
@@ -128,6 +138,8 @@ class ModelFeatures:
'grok-code-fast-1',
# DeepSeek R1 family
'deepseek-r1-0528*',
+ # Azure GPT-5 family
+ 'azure/gpt-5*',
]
diff --git a/openhands/memory/memory.py b/openhands/memory/memory.py
index fc1fa2233962..3ba7fc5d39a4 100644
--- a/openhands/memory/memory.py
+++ b/openhands/memory/memory.py
@@ -32,7 +32,7 @@
GLOBAL_MICROAGENTS_DIR = os.path.join(
os.path.dirname(os.path.dirname(openhands.__file__)),
- 'microagents',
+ 'skills',
)
USER_MICROAGENTS_DIR = Path.home() / '.openhands' / 'microagents'
@@ -77,7 +77,7 @@ def __init__(
self.conversation_instructions: ConversationInstructions | None = None
# Load global microagents (Knowledge + Repo)
- # from typically OpenHands/microagents (i.e., the PUBLIC microagents)
+ # from typically OpenHands/skills (i.e., the PUBLIC microagents)
self._load_global_microagents()
# Load user microagents from ~/.openhands/microagents/
@@ -172,33 +172,50 @@ def _on_workspace_context_recall(
):
obs = RecallObservation(
recall_type=RecallType.WORKSPACE_CONTEXT,
- repo_name=self.repository_info.repo_name
- if self.repository_info and self.repository_info.repo_name is not None
- else '',
- repo_directory=self.repository_info.repo_directory
- if self.repository_info
- and self.repository_info.repo_directory is not None
- else '',
- repo_branch=self.repository_info.branch_name
- if self.repository_info and self.repository_info.branch_name is not None
- else '',
+ repo_name=(
+ self.repository_info.repo_name
+ if self.repository_info
+ and self.repository_info.repo_name is not None
+ else ''
+ ),
+ repo_directory=(
+ self.repository_info.repo_directory
+ if self.repository_info
+ and self.repository_info.repo_directory is not None
+ else ''
+ ),
+ repo_branch=(
+ self.repository_info.branch_name
+ if self.repository_info
+ and self.repository_info.branch_name is not None
+ else ''
+ ),
repo_instructions=repo_instructions if repo_instructions else '',
- runtime_hosts=self.runtime_info.available_hosts
- if self.runtime_info and self.runtime_info.available_hosts is not None
- else {},
- additional_agent_instructions=self.runtime_info.additional_agent_instructions
- if self.runtime_info
- and self.runtime_info.additional_agent_instructions is not None
- else '',
+ runtime_hosts=(
+ self.runtime_info.available_hosts
+ if self.runtime_info
+ and self.runtime_info.available_hosts is not None
+ else {}
+ ),
+ additional_agent_instructions=(
+ self.runtime_info.additional_agent_instructions
+ if self.runtime_info
+ and self.runtime_info.additional_agent_instructions is not None
+ else ''
+ ),
microagent_knowledge=microagent_knowledge,
content='Added workspace context',
date=self.runtime_info.date if self.runtime_info is not None else '',
- custom_secrets_descriptions=self.runtime_info.custom_secrets_descriptions
- if self.runtime_info is not None
- else {},
- conversation_instructions=self.conversation_instructions.content
- if self.conversation_instructions is not None
- else '',
+ custom_secrets_descriptions=(
+ self.runtime_info.custom_secrets_descriptions
+ if self.runtime_info is not None
+ else {}
+ ),
+ conversation_instructions=(
+ self.conversation_instructions.content
+ if self.conversation_instructions is not None
+ else ''
+ ),
working_dir=self.runtime_info.working_dir if self.runtime_info else '',
)
return obs
diff --git a/openhands/resolver/README.md b/openhands/resolver/README.md
index 0bcd5a230799..8c09ce854b0d 100644
--- a/openhands/resolver/README.md
+++ b/openhands/resolver/README.md
@@ -1,9 +1,9 @@
-# OpenHands GitHub, GitLab & Bitbucket Issue Resolver 🙌
+# OpenHands GitHub, GitLab, Bitbucket & Azure DevOps Issue Resolver 🙌
-Need help resolving a GitHub, GitLab, or Bitbucket issue but don't have the time to do it yourself? Let an AI agent help you out!
+Need help resolving a GitHub, GitLab, Bitbucket, or Azure DevOps issue but don't have the time to do it yourself? Let an AI agent help you out!
This tool allows you to use open-source AI agents based on [OpenHands](https://github.com/openhands/openhands)
-to attempt to resolve GitHub, GitLab, and Bitbucket issues automatically. While it can handle multiple issues, it's primarily designed
+to attempt to resolve GitHub, GitLab, Bitbucket, and Azure DevOps issues automatically. While it can handle multiple issues, it's primarily designed
to help you resolve one issue at a time with high quality.
Getting started is simple - just follow the instructions below.
@@ -74,7 +74,7 @@ If you prefer to run the resolver programmatically instead of using GitHub Actio
pip install openhands-ai
```
-2. Create a GitHub, GitLab, or Bitbucket access token:
+2. Create a GitHub, GitLab, Bitbucket, or Azure DevOps access token:
- Create a GitHub access token
- Visit [GitHub's token settings](https://github.com/settings/personal-access-tokens/new)
- Create a fine-grained token with these scopes:
@@ -103,6 +103,13 @@ pip install openhands-ai
- 'Issues: Read'
- 'Issues: Write'
+ - Create an Azure DevOps access token
+ - Visit [Azure DevOps token settings](https://dev.azure.com/{organization}/_usersSettings/tokens)
+ - Create a personal access token with these scopes:
+ - 'Code: Read & Write'
+ - 'Work Items: Read & Write'
+ - 'Pull Request: Read & Write'
+
3. Set up environment variables:
```bash
@@ -122,6 +129,11 @@ export GIT_USERNAME="your-gitlab-username" # Optional, defaults to token owner
export BITBUCKET_TOKEN="your-bitbucket-token"
export GIT_USERNAME="your-bitbucket-username" # Optional, defaults to token owner
+# Azure DevOps credentials if you're using Azure DevOps repo
+
+export AZURE_DEVOPS_TOKEN="your-azure-devops-token"
+export GIT_USERNAME="your-azure-devops-username" # Optional, defaults to token owner
+
# LLM configuration
export LLM_MODEL="anthropic/claude-sonnet-4-20250514" # Recommended
diff --git a/openhands/resolver/interfaces/azure_devops.py b/openhands/resolver/interfaces/azure_devops.py
new file mode 100644
index 000000000000..94d15d729699
--- /dev/null
+++ b/openhands/resolver/interfaces/azure_devops.py
@@ -0,0 +1,427 @@
+import base64
+import re
+from typing import Any
+
+import httpx
+
+from openhands.resolver.interfaces.issue import (
+ Issue,
+ IssueHandlerInterface,
+ ReviewThread,
+)
+
+
+class AzureDevOpsIssueHandler(IssueHandlerInterface):
+ def __init__(
+ self,
+ token: str,
+ organization: str,
+ project: str,
+ repository: str,
+ ):
+ self.token = token
+ self.organization = organization
+ self.project = project
+ self.repository = repository
+ self.owner = f'{organization}/{project}'
+ self.base_api_url = f'https://dev.azure.com/{organization}/{project}/_apis'
+ self.repo_api_url = f'{self.base_api_url}/git/repositories/{repository}'
+ self.work_items_api_url = f'{self.base_api_url}/wit'
+ self.default_branch = 'main'
+
+ def set_owner(self, owner: str) -> None:
+ """Set the owner of the repository."""
+ self.owner = owner
+ parts = owner.split('/')
+ if len(parts) >= 2:
+ self.organization = parts[0]
+ self.project = parts[1]
+ self.base_api_url = (
+ f'https://dev.azure.com/{self.organization}/{self.project}/_apis'
+ )
+ self.repo_api_url = (
+ f'{self.base_api_url}/git/repositories/{self.repository}'
+ )
+ self.work_items_api_url = f'{self.base_api_url}/wit'
+
+ def get_headers(self) -> dict[str, str]:
+ """Get the headers for the Azure DevOps API."""
+ auth_str = base64.b64encode(f':{self.token}'.encode()).decode()
+ return {
+ 'Authorization': f'Basic {auth_str}',
+ 'Content-Type': 'application/json',
+ 'Accept': 'application/json',
+ }
+
+ def download_issues(self) -> list[Any]:
+ """Download issues from Azure DevOps."""
+ # Use WIQL to query for active work items
+ wiql_url = f'{self.work_items_api_url}/wiql?api-version=7.1'
+ wiql_query = {
+ 'query': "SELECT [System.Id], [System.Title], [System.State] FROM WorkItems WHERE [System.State] = 'Active' ORDER BY [System.CreatedDate] DESC"
+ }
+
+ response = httpx.post(wiql_url, headers=self.get_headers(), json=wiql_query)
+ response.raise_for_status()
+
+ work_item_references = response.json().get('workItems', [])
+
+ # Get details for each work item
+ work_items = []
+ for work_item_ref in work_item_references:
+ work_item_id = work_item_ref.get('id')
+ work_item_url = f'{self.work_items_api_url}/workitems/{work_item_id}?api-version=7.1&$expand=all'
+
+ item_response = httpx.get(work_item_url, headers=self.get_headers())
+ item_response.raise_for_status()
+
+ work_items.append(item_response.json())
+
+ return work_items
+
+ def get_issue_comments(
+ self, issue_number: int, comment_id: int | None = None
+ ) -> list[str] | None:
+ """Get comments for an issue."""
+ comments_url = f'{self.work_items_api_url}/workitems/{issue_number}/comments?api-version=7.1-preview.3'
+
+ response = httpx.get(comments_url, headers=self.get_headers())
+ response.raise_for_status()
+
+ comments_data = response.json().get('comments', [])
+
+ if comment_id is not None:
+ # Return a specific comment
+ for comment in comments_data:
+ if comment.get('id') == comment_id:
+ return [comment.get('text', '')]
+ return None
+
+ # Return all comments
+ return [comment.get('text', '') for comment in comments_data]
+
+ def get_base_url(self) -> str:
+ """Get the base URL for the Azure DevOps repository."""
+ return f'https://dev.azure.com/{self.organization}/{self.project}'
+
+ def get_branch_url(self, branch_name: str) -> str:
+ """Get the URL for a branch."""
+ return f'{self.get_base_url()}/_git/{self.repository}?version=GB{branch_name}'
+
+ def get_download_url(self) -> str:
+ """Get the download URL for the repository."""
+ return f'{self.get_base_url()}/_git/{self.repository}'
+
+ def get_clone_url(self) -> str:
+ """Get the clone URL for the repository."""
+ return f'https://dev.azure.com/{self.organization}/{self.project}/_git/{self.repository}'
+
+ def get_pull_url(self, pr_number: int) -> str:
+ """Get the URL for a pull request."""
+ return f'{self.get_base_url()}/_git/{self.repository}/pullrequest/{pr_number}'
+
+ def get_graphql_url(self) -> str:
+ """Get the GraphQL URL for Azure DevOps."""
+ return f'https://dev.azure.com/{self.organization}/_apis/graphql?api-version=7.1-preview.1'
+
+ def get_compare_url(self, branch_name: str) -> str:
+ """Get the URL to compare branches."""
+ return f'{self.get_base_url()}/_git/{self.repository}/branches?baseVersion=GB{self.default_branch}&targetVersion=GB{branch_name}&_a=files'
+
+ def get_branch_name(self, base_branch_name: str) -> str:
+ """Generate a branch name for a new pull request."""
+ return f'openhands/issue-{base_branch_name}'
+
+ def get_default_branch_name(self) -> str:
+ """Get the default branch name for the repository."""
+ # Get repository details to find the default branch
+ response = httpx.get(
+ f'{self.repo_api_url}?api-version=7.1', headers=self.get_headers()
+ )
+ response.raise_for_status()
+
+ repo_data = response.json()
+ default_branch = repo_data.get('defaultBranch', 'refs/heads/main')
+
+ # Remove 'refs/heads/' prefix
+ return default_branch.replace('refs/heads/', '')
+
+ def branch_exists(self, branch_name: str) -> bool:
+ """Check if a branch exists."""
+ # List all branches and check if the branch exists
+ response = httpx.get(
+ f'{self.repo_api_url}/refs?filter=heads/{branch_name}&api-version=7.1',
+ headers=self.get_headers(),
+ )
+ response.raise_for_status()
+
+ refs = response.json().get('value', [])
+ return len(refs) > 0
+
+ def reply_to_comment(self, pr_number: int, comment_id: str, reply: str) -> None:
+ """Reply to a comment on a pull request."""
+ # Get the thread ID from the comment ID
+ threads_url = (
+ f'{self.repo_api_url}/pullRequests/{pr_number}/threads?api-version=7.1'
+ )
+
+ response = httpx.get(threads_url, headers=self.get_headers())
+ response.raise_for_status()
+
+ threads = response.json().get('value', [])
+ thread_id = None
+
+ for thread in threads:
+ for comment in thread.get('comments', []):
+ if str(comment.get('id')) == comment_id:
+ thread_id = thread.get('id')
+ break
+ if thread_id:
+ break
+
+ if not thread_id:
+ raise ValueError(f'Comment ID {comment_id} not found in PR {pr_number}')
+
+ # Add a comment to the thread
+ comment_url = f'{self.repo_api_url}/pullRequests/{pr_number}/threads/{thread_id}/comments?api-version=7.1'
+
+ comment_data = {
+ 'content': reply,
+ 'parentCommentId': int(comment_id),
+ }
+
+ response = httpx.post(
+ comment_url, headers=self.get_headers(), json=comment_data
+ )
+ response.raise_for_status()
+
+ def send_comment_msg(self, issue_number: int, msg: str) -> None:
+ """Send a comment to an issue."""
+ comment_url = f'{self.work_items_api_url}/workitems/{issue_number}/comments?api-version=7.1-preview.3'
+
+ comment_data = {
+ 'text': msg,
+ }
+
+ response = httpx.post(
+ comment_url, headers=self.get_headers(), json=comment_data
+ )
+ response.raise_for_status()
+
+ def get_authorize_url(self) -> str:
+ """Get the authorization URL for Azure DevOps."""
+ return 'https://app.vsaex.visualstudio.com/app/register'
+
+ def create_pull_request(self, data: dict[str, Any] | None = None) -> dict[str, Any]:
+ """Create a pull request."""
+ if data is None:
+ data = {}
+
+ source_branch = data.get('source_branch')
+ target_branch = data.get('target_branch', self.default_branch)
+ title = data.get('title', 'Pull request created by OpenHands')
+ description = data.get('description', '')
+
+ pr_data = {
+ 'sourceRefName': f'refs/heads/{source_branch}',
+ 'targetRefName': f'refs/heads/{target_branch}',
+ 'title': title,
+ 'description': description,
+ }
+
+ response = httpx.post(
+ f'{self.repo_api_url}/pullrequests?api-version=7.1',
+ headers=self.get_headers(),
+ json=pr_data,
+ )
+ response.raise_for_status()
+
+ pr_response = response.json()
+
+ return {
+ 'id': pr_response.get('pullRequestId'),
+ 'number': pr_response.get('pullRequestId'),
+ 'url': pr_response.get('url'),
+ }
+
+ def request_reviewers(self, reviewer: str, pr_number: int) -> None:
+ """Request reviewers for a pull request."""
+ # Get the reviewer's ID
+ reviewer_url = f'https://vssps.dev.azure.com/{self.organization}/_apis/graph/users?api-version=7.1-preview.1'
+
+ response = httpx.get(reviewer_url, headers=self.get_headers())
+ response.raise_for_status()
+
+ users = response.json().get('value', [])
+ reviewer_id = None
+
+ for user in users:
+ if (
+ user.get('displayName') == reviewer
+ or user.get('mailAddress') == reviewer
+ ):
+ reviewer_id = user.get('descriptor')
+ break
+
+ if not reviewer_id:
+ raise ValueError(f'Reviewer {reviewer} not found')
+
+ # Add reviewer to the pull request
+ reviewers_url = f'{self.repo_api_url}/pullRequests/{pr_number}/reviewers/{reviewer_id}?api-version=7.1'
+
+ reviewer_data = {
+ 'vote': 0, # No vote yet
+ }
+
+ response = httpx.put(
+ reviewers_url, headers=self.get_headers(), json=reviewer_data
+ )
+ response.raise_for_status()
+
+ def get_context_from_external_issues_references(
+ self,
+ closing_issues: list[str],
+ closing_issue_numbers: list[int],
+ issue_body: str,
+ review_comments: list[str] | None,
+ review_threads: list[ReviewThread],
+ thread_comments: list[str] | None,
+ ) -> list[str]:
+ """Get context from external issue references."""
+ context = []
+
+ # Add issue body
+ if issue_body:
+ context.append(f'Issue description:\n{issue_body}')
+
+ # Add thread comments
+ if thread_comments:
+ context.append('Thread comments:\n' + '\n'.join(thread_comments))
+
+ # Add review comments
+ if review_comments:
+ context.append('Review comments:\n' + '\n'.join(review_comments))
+
+ # Add review threads
+ if review_threads:
+ for thread in review_threads:
+ context.append(
+ f'Review thread for files {", ".join(thread.files)}:\n{thread.comment}'
+ )
+
+ return context
+
+ def get_converted_issues(
+ self, issue_numbers: list[int] | None = None, comment_id: int | None = None
+ ) -> list[Issue]:
+ """Download issues from Azure DevOps and convert them to the Issue model."""
+ if issue_numbers is None:
+ # Download all issues
+ work_items = self.download_issues()
+ else:
+ # Download specific issues
+ work_items = []
+ for issue_number in issue_numbers:
+ work_item_url = f'{self.work_items_api_url}/workitems/{issue_number}?api-version=7.1&$expand=all'
+
+ response = httpx.get(work_item_url, headers=self.get_headers())
+ response.raise_for_status()
+
+ work_items.append(response.json())
+
+ issues = []
+ for work_item in work_items:
+ # Get basic issue information
+ issue_number = work_item.get('id')
+ title = work_item.get('fields', {}).get('System.Title', '')
+ description = work_item.get('fields', {}).get('System.Description', '')
+
+ # Get comments
+ thread_comments = self.get_issue_comments(issue_number, comment_id)
+
+ # Check if this is a pull request work item
+ is_pr = False
+ pr_number = None
+ head_branch = None
+ base_branch = None
+
+ # Look for PR links in the work item relations
+ for relation in work_item.get('relations', []):
+ if relation.get(
+ 'rel'
+ ) == 'ArtifactLink' and 'pullrequest' in relation.get('url', ''):
+ is_pr = True
+ # Extract PR number from URL
+ pr_url = relation.get('url', '')
+ pr_match = re.search(r'pullRequests/(\d+)', pr_url)
+ if pr_match:
+ pr_number = int(pr_match.group(1))
+ break
+
+ # If this is a PR, get the branch information
+ if is_pr and pr_number:
+ pr_url = f'{self.repo_api_url}/pullRequests/{pr_number}?api-version=7.1'
+
+ pr_response = httpx.get(pr_url, headers=self.get_headers())
+ pr_response.raise_for_status()
+
+ pr_data = pr_response.json()
+ head_branch = pr_data.get('sourceRefName', '').replace(
+ 'refs/heads/', ''
+ )
+ base_branch = pr_data.get('targetRefName', '').replace(
+ 'refs/heads/', ''
+ )
+
+ # Get PR review comments
+ review_comments = []
+ review_threads = []
+
+ threads_url = f'{self.repo_api_url}/pullRequests/{pr_number}/threads?api-version=7.1'
+
+ threads_response = httpx.get(threads_url, headers=self.get_headers())
+ threads_response.raise_for_status()
+
+ threads = threads_response.json().get('value', [])
+
+ for thread in threads:
+ thread_comments = [
+ comment.get('content', '')
+ for comment in thread.get('comments', [])
+ ]
+ review_comments.extend(thread_comments)
+
+ # Get files associated with this thread
+ thread_files = []
+ if thread.get('threadContext', {}).get('filePath'):
+ thread_files.append(
+ thread.get('threadContext', {}).get('filePath')
+ )
+
+ if thread_comments:
+ review_threads.append(
+ ReviewThread(
+ comment='\n'.join(thread_comments),
+ files=thread_files,
+ )
+ )
+
+ # Create the Issue object
+ issue = Issue(
+ owner=self.owner,
+ repo=self.repository,
+ number=issue_number,
+ title=title,
+ body=description,
+ thread_comments=thread_comments,
+ closing_issues=None,
+ review_comments=review_comments if is_pr else None,
+ review_threads=review_threads if is_pr else None,
+ thread_ids=None,
+ head_branch=head_branch,
+ base_branch=base_branch,
+ )
+
+ issues.append(issue)
+
+ return issues
diff --git a/openhands/resolver/interfaces/issue.py b/openhands/resolver/interfaces/issue.py
index e293d9095050..c491acfd898c 100644
--- a/openhands/resolver/interfaces/issue.py
+++ b/openhands/resolver/interfaces/issue.py
@@ -121,5 +121,5 @@ def get_context_from_external_issues_references(
def get_converted_issues(
self, issue_numbers: list[int] | None = None, comment_id: int | None = None
) -> list[Issue]:
- """Download issues from the git provider (GitHub, GitLab, or Bitbucket)."""
+ """Download issues from the git provider (GitHub, GitLab, Bitbucket, or Azure DevOps)."""
pass
diff --git a/openhands/resolver/issue_handler_factory.py b/openhands/resolver/issue_handler_factory.py
index 528644ed2184..45b927f69614 100644
--- a/openhands/resolver/issue_handler_factory.py
+++ b/openhands/resolver/issue_handler_factory.py
@@ -1,5 +1,6 @@
from openhands.core.config import LLMConfig
from openhands.integrations.provider import ProviderType
+from openhands.resolver.interfaces.azure_devops import AzureDevOpsIssueHandler
from openhands.resolver.interfaces.bitbucket import (
BitbucketIssueHandler,
BitbucketPRHandler,
@@ -68,6 +69,26 @@ def create(self) -> ServiceContextIssue | ServiceContextPR:
),
self.llm_config,
)
+ elif self.platform == ProviderType.AZURE_DEVOPS:
+ # Parse owner as organization/project
+ parts = self.owner.split('/')
+ if len(parts) < 2:
+ raise ValueError(
+ f'Invalid Azure DevOps owner format: {self.owner}. Expected format: organization/project'
+ )
+
+ organization = parts[0]
+ project = parts[1]
+
+ return ServiceContextIssue(
+ AzureDevOpsIssueHandler(
+ self.token,
+ organization,
+ project,
+ self.repo,
+ ),
+ self.llm_config,
+ )
else:
raise ValueError(f'Unsupported platform: {self.platform}')
elif self.issue_type == 'pr':
@@ -104,6 +125,27 @@ def create(self) -> ServiceContextIssue | ServiceContextPR:
),
self.llm_config,
)
+ elif self.platform == ProviderType.AZURE_DEVOPS:
+ # Parse owner as organization/project
+ parts = self.owner.split('/')
+ if len(parts) < 2:
+ raise ValueError(
+ f'Invalid Azure DevOps owner format: {self.owner}. Expected format: organization/project'
+ )
+
+ organization = parts[0]
+ project = parts[1]
+
+ # For now, use the same handler for both issues and PRs
+ return ServiceContextPR(
+ AzureDevOpsIssueHandler(
+ self.token,
+ organization,
+ project,
+ self.repo,
+ ),
+ self.llm_config,
+ )
else:
raise ValueError(f'Unsupported platform: {self.platform}')
else:
diff --git a/openhands/resolver/issue_resolver.py b/openhands/resolver/issue_resolver.py
index 143553818ed2..155c5ce2e177 100644
--- a/openhands/resolver/issue_resolver.py
+++ b/openhands/resolver/issue_resolver.py
@@ -81,6 +81,7 @@ def __init__(self, args: Namespace) -> None:
or os.getenv('GITHUB_TOKEN')
or os.getenv('GITLAB_TOKEN')
or os.getenv('BITBUCKET_TOKEN')
+ or os.getenv('AZURE_DEVOPS_TOKEN')
)
username = args.username if args.username else os.getenv('GIT_USERNAME')
if not username:
@@ -130,6 +131,8 @@ def __init__(self, args: Namespace) -> None:
else 'gitlab.com'
if platform == ProviderType.GITLAB
else 'bitbucket.org'
+ if platform == ProviderType.BITBUCKET
+ else 'dev.azure.com'
)
self.output_dir = args.output_dir
diff --git a/openhands/resolver/resolve_issue.py b/openhands/resolver/resolve_issue.py
index bd050386e52f..cdd6d6494326 100644
--- a/openhands/resolver/resolve_issue.py
+++ b/openhands/resolver/resolve_issue.py
@@ -122,7 +122,7 @@ def int_or_none(value: str) -> int | None:
'--base-domain',
type=str,
default=None,
- help='Base domain for the git server (defaults to "github.com" for GitHub, "gitlab.com" for GitLab, and "bitbucket.org" for Bitbucket)',
+ help='Base domain for the git server (defaults to "github.com" for GitHub, "gitlab.com" for GitLab, "bitbucket.org" for Bitbucket, and "dev.azure.com" for Azure DevOps)',
)
my_args = parser.parse_args()
diff --git a/openhands/resolver/send_pull_request.py b/openhands/resolver/send_pull_request.py
index 047592c9cc79..d6dd4830db64 100644
--- a/openhands/resolver/send_pull_request.py
+++ b/openhands/resolver/send_pull_request.py
@@ -11,6 +11,7 @@
from openhands.core.logger import openhands_logger as logger
from openhands.integrations.service_types import ProviderType
from openhands.llm.llm import LLM
+from openhands.resolver.interfaces.azure_devops import AzureDevOpsIssueHandler
from openhands.resolver.interfaces.bitbucket import BitbucketIssueHandler
from openhands.resolver.interfaces.github import GithubIssueHandler
from openhands.resolver.interfaces.gitlab import GitlabIssueHandler
@@ -247,7 +248,7 @@ def send_pull_request(
git_user_name: str = 'openhands',
git_user_email: str = 'openhands@all-hands.dev',
) -> str:
- """Send a pull request to a GitHub, GitLab, or Bitbucket repository.
+ """Send a pull request to a GitHub, GitLab, Bitbucket, or Azure DevOps repository.
Args:
issue: The issue to send the pull request for
@@ -261,7 +262,7 @@ def send_pull_request(
target_branch: The target branch to create the pull request against (defaults to repository default branch)
reviewer: The username of the reviewer to assign
pr_title: Custom title for the pull request (optional)
- base_domain: The base domain for the git server (defaults to "github.com" for GitHub, "gitlab.com" for GitLab, and "bitbucket.org" for Bitbucket)
+ base_domain: The base domain for the git server (defaults to "github.com" for GitHub, "gitlab.com" for GitLab, "bitbucket.org" for Bitbucket, and "dev.azure.com" for Azure DevOps)
"""
if pr_type not in ['branch', 'draft', 'ready']:
raise ValueError(f'Invalid pr_type: {pr_type}')
@@ -272,6 +273,8 @@ def send_pull_request(
base_domain = 'github.com'
elif platform == ProviderType.GITLAB:
base_domain = 'gitlab.com'
+ elif platform == ProviderType.AZURE_DEVOPS:
+ base_domain = 'dev.azure.com'
else: # platform == ProviderType.BITBUCKET
base_domain = 'bitbucket.org'
@@ -294,6 +297,13 @@ def send_pull_request(
),
None,
)
+ elif platform == ProviderType.AZURE_DEVOPS:
+ # For Azure DevOps, owner is "organization/project"
+ organization, project = issue.owner.split('/')
+ handler = ServiceContextIssue(
+ AzureDevOpsIssueHandler(token, organization, project, issue.repo),
+ None,
+ )
else:
raise ValueError(f'Unsupported platform: {platform}')
@@ -413,13 +423,19 @@ def update_existing_pull_request(
llm_config: The LLM configuration to use for summarizing changes.
comment_message: The main message to post as a comment on the PR.
additional_message: The additional messages to post as a comment on the PR in json list format.
- base_domain: The base domain for the git server (defaults to "github.com" for GitHub and "gitlab.com" for GitLab)
+ base_domain: The base domain for the git server (defaults to "github.com" for GitHub, "gitlab.com" for GitLab, and "dev.azure.com" for Azure DevOps)
"""
# Set up headers and base URL for GitHub or GitLab API
# Determine default base_domain based on platform
if base_domain is None:
- base_domain = 'github.com' if platform == ProviderType.GITHUB else 'gitlab.com'
+ base_domain = (
+ 'github.com'
+ if platform == ProviderType.GITHUB
+ else 'gitlab.com'
+ if platform == ProviderType.GITLAB
+ else 'dev.azure.com'
+ )
handler = None
if platform == ProviderType.GITHUB:
@@ -427,7 +443,14 @@ def update_existing_pull_request(
GithubIssueHandler(issue.owner, issue.repo, token, username, base_domain),
llm_config,
)
- else: # platform == Platform.GITLAB
+ elif platform == ProviderType.AZURE_DEVOPS:
+ # For Azure DevOps, owner is "organization/project"
+ organization, project = issue.owner.split('/')
+ handler = ServiceContextIssue(
+ AzureDevOpsIssueHandler(token, organization, project, issue.repo),
+ llm_config,
+ )
+ else: # platform == ProviderType.GITLAB
handler = ServiceContextIssue(
GitlabIssueHandler(issue.owner, issue.repo, token, username, base_domain),
llm_config,
@@ -519,7 +542,13 @@ def process_single_issue(
) -> None:
# Determine default base_domain based on platform
if base_domain is None:
- base_domain = 'github.com' if platform == ProviderType.GITHUB else 'gitlab.com'
+ base_domain = (
+ 'github.com'
+ if platform == ProviderType.GITHUB
+ else 'gitlab.com'
+ if platform == ProviderType.GITLAB
+ else 'dev.azure.com'
+ )
if not resolver_output.success and not send_on_failure:
logger.info(
f'Issue {resolver_output.issue.number} was not successfully resolved. Skipping PR creation.'
@@ -587,7 +616,7 @@ def process_single_issue(
def main() -> None:
parser = argparse.ArgumentParser(
- description='Send a pull request to Github or Gitlab.'
+ description='Send a pull request to Github, Gitlab, or Azure DevOps.'
)
parser.add_argument(
'--selected-repo',
@@ -664,7 +693,7 @@ def main() -> None:
parser.add_argument(
'--reviewer',
type=str,
- help='GitHub or GitLab username of the person to request review from',
+ help='GitHub, GitLab, or Azure DevOps username of the person to request review from',
default=None,
)
parser.add_argument(
@@ -677,7 +706,7 @@ def main() -> None:
'--base-domain',
type=str,
default=None,
- help='Base domain for the git server (defaults to "github.com" for GitHub and "gitlab.com" for GitLab)',
+ help='Base domain for the git server (defaults to "github.com" for GitHub, "gitlab.com" for GitLab, and "dev.azure.com" for Azure DevOps)',
)
parser.add_argument(
'--git-user-name',
@@ -693,10 +722,15 @@ def main() -> None:
)
my_args = parser.parse_args()
- token = my_args.token or os.getenv('GITHUB_TOKEN') or os.getenv('GITLAB_TOKEN')
+ token = (
+ my_args.token
+ or os.getenv('GITHUB_TOKEN')
+ or os.getenv('GITLAB_TOKEN')
+ or os.getenv('AZURE_DEVOPS_TOKEN')
+ )
if not token:
raise ValueError(
- 'token is not set, set via --token or GITHUB_TOKEN or GITLAB_TOKEN environment variable.'
+ 'token is not set, set via --token or GITHUB_TOKEN, GITLAB_TOKEN, or AZURE_DEVOPS_TOKEN environment variable.'
)
username = my_args.username if my_args.username else os.getenv('GIT_USERNAME')
diff --git a/openhands/resolver/utils.py b/openhands/resolver/utils.py
index 527727fba300..cc92223937e8 100644
--- a/openhands/resolver/utils.py
+++ b/openhands/resolver/utils.py
@@ -16,7 +16,7 @@
async def identify_token(token: str, base_domain: str | None) -> ProviderType:
- """Identifies whether a token belongs to GitHub, GitLab, or Bitbucket.
+ """Identifies whether a token belongs to GitHub, GitLab, Bitbucket, or Azure DevOps.
Parameters:
token (str): The personal access token to check.
base_domain (str): Custom base domain for provider (e.g GitHub Enterprise)
diff --git a/openhands/runtime/base.py b/openhands/runtime/base.py
index 2c271cc8f177..5eb5429f71ca 100644
--- a/openhands/runtime/base.py
+++ b/openhands/runtime/base.py
@@ -700,6 +700,29 @@ def _is_gitlab_repository(self, repo_name: str) -> bool:
# This is a safe fallback since we'll just use the default .openhands
return False
+ def _is_azure_devops_repository(self, repo_name: str) -> bool:
+ """Check if a repository is hosted on Azure DevOps.
+
+ Args:
+ repo_name: Repository name (e.g., "org/project/repo")
+
+ Returns:
+ True if the repository is hosted on Azure DevOps, False otherwise
+ """
+ try:
+ provider_handler = ProviderHandler(
+ self.git_provider_tokens or MappingProxyType({})
+ )
+ repository = call_async_from_sync(
+ provider_handler.verify_repo_provider,
+ GENERAL_TIMEOUT,
+ repo_name,
+ )
+ return repository.git_provider == ProviderType.AZURE_DEVOPS
+ except Exception:
+ # If we can't determine the provider, assume it's not Azure DevOps
+ return False
+
def get_microagents_from_org_or_user(
self, selected_repository: str
) -> list[BaseMicroagent]:
@@ -713,6 +736,9 @@ def get_microagents_from_org_or_user(
since GitLab doesn't support repository names starting with non-alphanumeric
characters.
+ For Azure DevOps repositories, it will use org/openhands-config/openhands-config
+ format to match Azure DevOps's three-part repository structure (org/project/repo).
+
Args:
selected_repository: The repository path (e.g., "github.com/acme-co/api")
@@ -735,24 +761,35 @@ def get_microagents_from_org_or_user(
)
return loaded_microagents
- # Extract the domain and org/user name
- org_name = repo_parts[-2]
+ # Determine repository type
+ is_azure_devops = self._is_azure_devops_repository(selected_repository)
+ is_gitlab = self._is_gitlab_repository(selected_repository)
+
+ # Extract the org/user name
+ # Azure DevOps format: org/project/repo (3 parts) - extract org (first part)
+ # GitHub/GitLab/Bitbucket format: owner/repo (2 parts) - extract owner (first part)
+ if is_azure_devops and len(repo_parts) >= 3:
+ org_name = repo_parts[0] # Get org from org/project/repo
+ else:
+ org_name = repo_parts[-2] # Get owner from owner/repo
+
self.log(
'info',
f'Extracted org/user name: {org_name}',
)
-
- # Determine if this is a GitLab repository
- is_gitlab = self._is_gitlab_repository(selected_repository)
self.log(
'debug',
- f'Repository type detection - is_gitlab: {is_gitlab}',
+ f'Repository type detection - is_gitlab: {is_gitlab}, is_azure_devops: {is_azure_devops}',
)
- # For GitLab, use openhands-config (since .openhands is not a valid repo name)
+ # For GitLab and Azure DevOps, use openhands-config (since .openhands is not a valid repo name)
# For other providers, use .openhands
if is_gitlab:
org_openhands_repo = f'{org_name}/openhands-config'
+ elif is_azure_devops:
+ # Azure DevOps format: org/project/repo
+ # For org-level config, use: org/openhands-config/openhands-config
+ org_openhands_repo = f'{org_name}/openhands-config/openhands-config'
else:
org_openhands_repo = f'{org_name}/.openhands'
diff --git a/openhands/runtime/builder/docker.py b/openhands/runtime/builder/docker.py
index 39a7982cd518..5f0fb2027b26 100644
--- a/openhands/runtime/builder/docker.py
+++ b/openhands/runtime/builder/docker.py
@@ -19,8 +19,11 @@ def __init__(self, docker_client: docker.DockerClient):
version_info = self.docker_client.version()
server_version = version_info.get('Version', '').replace('-', '.')
+ components = version_info.get('Components')
self.is_podman = (
- version_info.get('Components')[0].get('Name').startswith('Podman')
+ components is not None
+ and len(components) > 0
+ and components[0].get('Name', '').startswith('Podman')
)
if (
tuple(map(int, server_version.split('.')[:2])) < (18, 9)
@@ -79,8 +82,11 @@ def build(
self.docker_client = docker.from_env()
version_info = self.docker_client.version()
server_version = version_info.get('Version', '').split('+')[0].replace('-', '.')
+ components = version_info.get('Components')
self.is_podman = (
- version_info.get('Components')[0].get('Name').startswith('Podman')
+ components is not None
+ and len(components) > 0
+ and components[0].get('Name', '').startswith('Podman')
)
if tuple(map(int, server_version.split('.'))) < (18, 9) and not self.is_podman:
raise AgentRuntimeBuildError(
diff --git a/openhands/runtime/impl/docker/containers.py b/openhands/runtime/impl/docker/containers.py
index 25764b027488..32a5ba1353e2 100644
--- a/openhands/runtime/impl/docker/containers.py
+++ b/openhands/runtime/impl/docker/containers.py
@@ -7,7 +7,7 @@ def stop_all_containers(prefix: str) -> None:
containers = docker_client.containers.list(all=True)
for container in containers:
try:
- if container.name.startswith(prefix):
+ if container.name and container.name.startswith(prefix):
container.stop()
except docker.errors.APIError:
pass
diff --git a/openhands/runtime/utils/runtime_build.py b/openhands/runtime/utils/runtime_build.py
index 3b5281871ada..b4cb807b71e3 100644
--- a/openhands/runtime/utils/runtime_build.py
+++ b/openhands/runtime/utils/runtime_build.py
@@ -247,9 +247,9 @@ def build_runtime_image_in_folder(
lock_tag=lock_tag,
# Only tag the versioned image if we are building from scratch.
# This avoids too much layers when you lay one image on top of another multiple times
- versioned_tag=versioned_tag
- if build_from == BuildFromImageType.SCRATCH
- else None,
+ versioned_tag=(
+ versioned_tag if build_from == BuildFromImageType.SCRATCH else None
+ ),
platform=platform,
extra_build_args=extra_build_args,
)
@@ -282,10 +282,8 @@ def prep_build_folder(
),
)
- # Copy the 'microagents' directory (Microagents)
- shutil.copytree(
- Path(project_root, 'microagents'), Path(build_folder, 'code', 'microagents')
- )
+ # Copy the 'skills' directory (Skills)
+ shutil.copytree(Path(project_root, 'skills'), Path(build_folder, 'code', 'skills'))
# Copy pyproject.toml and poetry.lock files
for file in ['pyproject.toml', 'poetry.lock']:
diff --git a/openhands/runtime/utils/runtime_templates/Dockerfile.j2 b/openhands/runtime/utils/runtime_templates/Dockerfile.j2
index 51c0de535f3c..421f5acbdaa5 100644
--- a/openhands/runtime/utils/runtime_templates/Dockerfile.j2
+++ b/openhands/runtime/utils/runtime_templates/Dockerfile.j2
@@ -336,8 +336,8 @@ RUN \
# ================================================================
RUN if [ -d /openhands/code/openhands ]; then rm -rf /openhands/code/openhands; fi
COPY --chown=openhands:openhands ./code/pyproject.toml ./code/poetry.lock /openhands/code/
-RUN if [ -d /openhands/code/microagents ]; then rm -rf /openhands/code/microagents; fi
-COPY --chown=openhands:openhands ./code/microagents /openhands/code/microagents
+RUN if [ -d /openhands/code/skills ]; then rm -rf /openhands/code/skills; fi
+COPY --chown=openhands:openhands ./code/skills /openhands/code/skills
COPY --chown=openhands:openhands ./code/openhands /openhands/code/openhands
RUN chmod a+rwx /openhands/code/openhands/__init__.py && \
chown -R openhands:openhands /openhands/code
diff --git a/openhands/server/data_models/conversation_info.py b/openhands/server/data_models/conversation_info.py
index f4c4a77809a6..78af0e3dc121 100644
--- a/openhands/server/data_models/conversation_info.py
+++ b/openhands/server/data_models/conversation_info.py
@@ -28,3 +28,4 @@ class ConversationInfo:
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
pr_number: list[int] = field(default_factory=list)
conversation_version: str = 'V0'
+ sub_conversation_ids: list[str] = field(default_factory=list)
diff --git a/openhands/server/routes/git.py b/openhands/server/routes/git.py
index 753cf6f12f16..a6807a2e2a4a 100644
--- a/openhands/server/routes/git.py
+++ b/openhands/server/routes/git.py
@@ -53,6 +53,8 @@ async def get_user_installations(
return await client.get_github_installations()
elif provider == ProviderType.BITBUCKET:
return await client.get_bitbucket_workspaces()
+ elif provider == ProviderType.AZURE_DEVOPS:
+ return await client.get_azure_devops_organizations()
else:
return JSONResponse(
content=f"Provider {provider} doesn't support installations",
diff --git a/openhands/server/routes/manage_conversations.py b/openhands/server/routes/manage_conversations.py
index 8984f79e8c49..babbc48654cf 100644
--- a/openhands/server/routes/manage_conversations.py
+++ b/openhands/server/routes/manage_conversations.py
@@ -1,3 +1,4 @@
+import asyncio
import base64
import itertools
import json
@@ -5,12 +6,15 @@
import re
import uuid
from datetime import datetime, timedelta, timezone
+from typing import Annotated
import base62
-from fastapi import APIRouter, Depends, status
+import httpx
+from fastapi import APIRouter, Depends, Query, Request, status
from fastapi.responses import JSONResponse
from jinja2 import Environment, FileSystemLoader
from pydantic import BaseModel, ConfigDict, Field
+from sqlalchemy.ext.asyncio import AsyncSession
from openhands.app_server.app_conversation.app_conversation_info_service import (
AppConversationInfoService,
@@ -24,9 +28,15 @@
from openhands.app_server.config import (
depends_app_conversation_info_service,
depends_app_conversation_service,
+ depends_db_session,
+ depends_httpx_client,
depends_sandbox_service,
)
from openhands.app_server.sandbox.sandbox_service import SandboxService
+from openhands.app_server.services.db_session_injector import set_db_session_keep_open
+from openhands.app_server.services.httpx_client_injector import (
+ set_httpx_client_keep_open,
+)
from openhands.core.config.llm_config import LLMConfig
from openhands.core.config.mcp_config import MCPConfig
from openhands.core.logger import openhands_logger as logger
@@ -99,6 +109,8 @@
app_conversation_service_dependency = depends_app_conversation_service()
app_conversation_info_service_dependency = depends_app_conversation_info_service()
sandbox_service_dependency = depends_sandbox_service()
+db_session_dependency = depends_db_session()
+httpx_client_dependency = depends_httpx_client()
def _filter_conversations_by_age(
@@ -304,6 +316,12 @@ async def search_conversations(
limit: int = 20,
selected_repository: str | None = None,
conversation_trigger: ConversationTrigger | None = None,
+ include_sub_conversations: Annotated[
+ bool,
+ Query(
+ title='If True, include sub-conversations in the results. If False (default), exclude all sub-conversations.'
+ ),
+ ] = False,
conversation_store: ConversationStore = Depends(get_conversation_store),
app_conversation_service: AppConversationService = app_conversation_service_dependency,
) -> ConversationInfoResultSet:
@@ -338,6 +356,7 @@ async def search_conversations(
limit=limit,
# Apply age filter at the service level if possible
created_at__gte=age_filter_date,
+ include_sub_conversations=include_sub_conversations,
)
# Convert V1 conversations to ConversationInfo format
@@ -467,16 +486,26 @@ async def get_conversation(
@app.delete('/conversations/{conversation_id}')
async def delete_conversation(
+ request: Request,
conversation_id: str = Depends(validate_conversation_id),
user_id: str | None = Depends(get_user_id),
app_conversation_service: AppConversationService = app_conversation_service_dependency,
+ app_conversation_info_service: AppConversationInfoService = app_conversation_info_service_dependency,
sandbox_service: SandboxService = sandbox_service_dependency,
+ db_session: AsyncSession = db_session_dependency,
+ httpx_client: httpx.AsyncClient = httpx_client_dependency,
) -> bool:
+ set_db_session_keep_open(request.state, True)
+ set_httpx_client_keep_open(request.state, True)
+
# Try V1 conversation first
v1_result = await _try_delete_v1_conversation(
conversation_id,
app_conversation_service,
+ app_conversation_info_service,
sandbox_service,
+ db_session,
+ httpx_client,
)
if v1_result is not None:
return v1_result
@@ -488,23 +517,40 @@ async def delete_conversation(
async def _try_delete_v1_conversation(
conversation_id: str,
app_conversation_service: AppConversationService,
+ app_conversation_info_service: AppConversationInfoService,
sandbox_service: SandboxService,
+ db_session: AsyncSession,
+ httpx_client: httpx.AsyncClient,
) -> bool | None:
"""Try to delete a V1 conversation. Returns None if not a V1 conversation."""
result = None
try:
conversation_uuid = uuid.UUID(conversation_id)
# Check if it's a V1 conversation by trying to get it
- app_conversation = await app_conversation_service.get_app_conversation(
- conversation_uuid
+ app_conversation_info = (
+ await app_conversation_info_service.get_app_conversation_info(
+ conversation_uuid
+ )
)
- if app_conversation:
+ if app_conversation_info:
# This is a V1 conversation, delete it using the app conversation service
# Pass the conversation ID for secure deletion
result = await app_conversation_service.delete_app_conversation(
- app_conversation.id
+ app_conversation_info.id
+ )
+
+ # Manually commit so that the conversation will vanish from the list
+ await db_session.commit()
+
+ # Delete the sandbox in the background
+ asyncio.create_task(
+ _delete_sandbox_and_close_connections(
+ sandbox_service,
+ app_conversation_info.sandbox_id,
+ db_session,
+ httpx_client,
+ )
)
- await sandbox_service.delete_sandbox(app_conversation.sandbox_id)
except (ValueError, TypeError):
# Not a valid UUID, continue with V0 logic
pass
@@ -515,6 +561,24 @@ async def _try_delete_v1_conversation(
return result
+async def _delete_sandbox_and_close_connections(
+ sandbox_service: SandboxService,
+ sandbox_id: str,
+ db_session: AsyncSession,
+ httpx_client: httpx.AsyncClient,
+):
+ try:
+ await sandbox_service.delete_sandbox(sandbox_id)
+ await db_session.commit()
+ finally:
+ await asyncio.gather(
+ *[
+ db_session.aclose(),
+ httpx_client.aclose(),
+ ]
+ )
+
+
async def _delete_v0_conversation(conversation_id: str, user_id: str | None) -> bool:
"""Delete a V0 conversation using the legacy logic."""
conversation_store = await ConversationStoreImpl.get_instance(config, user_id)
@@ -1157,6 +1221,7 @@ async def _fetch_v1_conversations_safe(
app_conversation_service: App conversation service for V1
v1_page_id: Page ID for V1 pagination
limit: Maximum number of results
+ include_sub_conversations: If True, include sub-conversations in results
Returns:
Tuple of (v1_conversations, v1_next_page_id)
@@ -1432,4 +1497,7 @@ def _to_conversation_info(app_conversation: AppConversation) -> ConversationInfo
created_at=app_conversation.created_at,
pr_number=app_conversation.pr_number,
conversation_version='V1',
+ sub_conversation_ids=[
+ sub_id.hex for sub_id in app_conversation.sub_conversation_ids
+ ],
)
diff --git a/openhands/server/routes/mcp.py b/openhands/server/routes/mcp.py
index b6426bffb1ab..929c66af5b9e 100644
--- a/openhands/server/routes/mcp.py
+++ b/openhands/server/routes/mcp.py
@@ -8,6 +8,9 @@
from pydantic import Field
from openhands.core.logger import openhands_logger as logger
+from openhands.integrations.azure_devops.azure_devops_service import (
+ AzureDevOpsServiceImpl,
+)
from openhands.integrations.bitbucket.bitbucket_service import BitBucketServiceImpl
from openhands.integrations.github.github_service import GithubServiceImpl
from openhands.integrations.gitlab.gitlab_service import GitLabServiceImpl
@@ -286,3 +289,70 @@ async def create_bitbucket_pr(
raise ToolError(str(error))
return response
+
+
+@mcp_server.tool()
+async def create_azure_devops_pr(
+ repo_name: Annotated[
+ str, Field(description='Azure DevOps repository (organization/project/repo)')
+ ],
+ source_branch: Annotated[str, Field(description='Source branch on repo')],
+ target_branch: Annotated[str, Field(description='Target branch on repo')],
+ title: Annotated[
+ str,
+ Field(
+ description='PR Title. Start title with `DRAFT:` or `WIP:` if applicable.'
+ ),
+ ],
+ description: Annotated[str | None, Field(description='PR description')],
+) -> str:
+ """Open a PR in Azure DevOps"""
+ logger.info('Calling OpenHands MCP create_azure_devops_pr')
+
+ request = get_http_request()
+ headers = request.headers
+ conversation_id = headers.get('X-OpenHands-ServerConversation-ID', None)
+
+ provider_tokens = await get_provider_tokens(request)
+ access_token = await get_access_token(request)
+ user_id = await get_user_id(request)
+
+ azure_devops_token = (
+ provider_tokens.get(ProviderType.AZURE_DEVOPS, ProviderToken())
+ if provider_tokens
+ else ProviderToken()
+ )
+
+ azure_devops_service = AzureDevOpsServiceImpl(
+ user_id=azure_devops_token.user_id,
+ external_auth_id=user_id,
+ external_auth_token=access_token,
+ token=azure_devops_token.token,
+ base_domain=azure_devops_token.host,
+ )
+
+ try:
+ description = await get_conversation_link(
+ azure_devops_service, conversation_id, description or ''
+ )
+ except Exception as e:
+ logger.warning(f'Failed to append conversation link: {e}')
+
+ try:
+ response = await azure_devops_service.create_pr(
+ repo_name=repo_name,
+ source_branch=source_branch,
+ target_branch=target_branch,
+ title=title,
+ body=description,
+ )
+
+ if conversation_id and user_id:
+ await save_pr_metadata(user_id, conversation_id, response)
+
+ except Exception as e:
+ error = f'Error creating pull request: {e}'
+ logger.error(error)
+ raise ToolError(str(error))
+
+ return response
diff --git a/openhands/server/services/conversation_service.py b/openhands/server/services/conversation_service.py
index 927e55ce5831..ac2e06b8cdd1 100644
--- a/openhands/server/services/conversation_service.py
+++ b/openhands/server/services/conversation_service.py
@@ -7,7 +7,7 @@
from openhands.events.action.message import MessageAction
from openhands.experiments.experiment_manager import ExperimentManagerImpl
from openhands.integrations.provider import (
- CUSTOM_SECRETS_TYPE_WITH_JSON_SCHEMA,
+ CUSTOM_SECRETS_TYPE,
PROVIDER_TOKEN_TYPE,
ProviderToken,
)
@@ -73,7 +73,7 @@ async def initialize_conversation(
async def start_conversation(
user_id: str | None,
git_provider_tokens: PROVIDER_TOKEN_TYPE | None,
- custom_secrets: CUSTOM_SECRETS_TYPE_WITH_JSON_SCHEMA | None,
+ custom_secrets: CUSTOM_SECRETS_TYPE | None,
initial_user_msg: str | None,
image_urls: list[str] | None,
replay_json: str | None,
@@ -164,7 +164,7 @@ async def start_conversation(
async def create_new_conversation(
user_id: str | None,
git_provider_tokens: PROVIDER_TOKEN_TYPE | None,
- custom_secrets: CUSTOM_SECRETS_TYPE_WITH_JSON_SCHEMA | None,
+ custom_secrets: CUSTOM_SECRETS_TYPE | None,
selected_repository: str | None,
selected_branch: str | None,
initial_user_msg: str | None,
diff --git a/openhands/server/session/conversation_init_data.py b/openhands/server/session/conversation_init_data.py
index cdf76db97702..c1bf660c2840 100644
--- a/openhands/server/session/conversation_init_data.py
+++ b/openhands/server/session/conversation_init_data.py
@@ -1,4 +1,7 @@
-from pydantic import ConfigDict, Field
+from collections.abc import Mapping
+from types import MappingProxyType
+
+from pydantic import ConfigDict, Field, field_validator
from openhands.integrations.provider import CUSTOM_SECRETS_TYPE, PROVIDER_TOKEN_TYPE
from openhands.integrations.service_types import ProviderType
@@ -19,3 +22,17 @@ class ConversationInitData(Settings):
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
+
+ @field_validator('git_provider_tokens', 'custom_secrets')
+ @classmethod
+ def immutable_validator(cls, value: Mapping | None) -> MappingProxyType | None:
+ """Ensure git_provider_tokens and custom_secrets are always MappingProxyType.
+
+ This validator converts any Mapping (including dict) to MappingProxyType,
+ ensuring type safety and immutability. If the value is None, it returns None.
+ """
+ if value is None:
+ return None
+ if isinstance(value, MappingProxyType):
+ return value
+ return MappingProxyType(value)
diff --git a/openhands/server/user_auth/default_user_auth.py b/openhands/server/user_auth/default_user_auth.py
index 2e0a7b5af992..8bc79af1561e 100644
--- a/openhands/server/user_auth/default_user_auth.py
+++ b/openhands/server/user_auth/default_user_auth.py
@@ -88,6 +88,9 @@ async def get_provider_tokens(self) -> PROVIDER_TOKEN_TYPE | None:
return None
return user_secrets.provider_tokens
+ async def get_mcp_api_key(self) -> str | None:
+ return None
+
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
user_auth = DefaultUserAuth()
diff --git a/openhands/server/user_auth/user_auth.py b/openhands/server/user_auth/user_auth.py
index e370d3247438..c61c9ceb8bfb 100644
--- a/openhands/server/user_auth/user_auth.py
+++ b/openhands/server/user_auth/user_auth.py
@@ -75,6 +75,10 @@ async def get_secrets(self) -> Secrets | None:
def get_auth_type(self) -> AuthType | None:
return None
+ @abstractmethod
+ async def get_mcp_api_key(self) -> str | None:
+ """Get an mcp api key for the user"""
+
@classmethod
@abstractmethod
async def get_instance(cls, request: Request) -> UserAuth:
diff --git a/openhands/storage/data_models/secrets.py b/openhands/storage/data_models/secrets.py
index ce5302e754af..69b60e9730d0 100644
--- a/openhands/storage/data_models/secrets.py
+++ b/openhands/storage/data_models/secrets.py
@@ -1,3 +1,4 @@
+from collections.abc import Mapping
from types import MappingProxyType
from typing import Any
@@ -7,6 +8,7 @@
Field,
SerializationInfo,
field_serializer,
+ field_validator,
model_validator,
)
from pydantic.json import pydantic_encoder
@@ -14,9 +16,7 @@
from openhands.events.stream import EventStream
from openhands.integrations.provider import (
CUSTOM_SECRETS_TYPE,
- CUSTOM_SECRETS_TYPE_WITH_JSON_SCHEMA,
PROVIDER_TOKEN_TYPE,
- PROVIDER_TOKEN_TYPE_WITH_JSON_SCHEMA,
CustomSecret,
ProviderToken,
)
@@ -24,11 +24,11 @@
class Secrets(BaseModel):
- provider_tokens: PROVIDER_TOKEN_TYPE_WITH_JSON_SCHEMA = Field(
+ provider_tokens: PROVIDER_TOKEN_TYPE = Field(
default_factory=lambda: MappingProxyType({})
)
- custom_secrets: CUSTOM_SECRETS_TYPE_WITH_JSON_SCHEMA = Field(
+ custom_secrets: CUSTOM_SECRETS_TYPE = Field(
default_factory=lambda: MappingProxyType({})
)
@@ -38,6 +38,11 @@ class Secrets(BaseModel):
arbitrary_types_allowed=True,
)
+ @field_validator('provider_tokens', 'custom_secrets')
+ @classmethod
+ def immutable_validator(cls, value: Mapping) -> MappingProxyType:
+ return MappingProxyType(value)
+
@field_serializer('provider_tokens')
def provider_tokens_serializer(
self, provider_tokens: PROVIDER_TOKEN_TYPE, info: SerializationInfo
diff --git a/openhands/storage/data_models/settings.py b/openhands/storage/data_models/settings.py
index 72785c1822ab..0dc9b99e6238 100644
--- a/openhands/storage/data_models/settings.py
+++ b/openhands/storage/data_models/settings.py
@@ -1,5 +1,7 @@
from __future__ import annotations
+import os
+
from pydantic import (
BaseModel,
ConfigDict,
@@ -48,6 +50,7 @@ class Settings(BaseModel):
email_verified: bool | None = None
git_user_name: str | None = None
git_user_email: str | None = None
+ v1_enabled: bool | None = Field(default=bool(os.getenv('V1_ENABLED') == '1'))
model_config = ConfigDict(
validate_assignment=True,
diff --git a/openhands/utils/README.md b/openhands/utils/README.md
index 634a9b142460..37b208dcf5cb 100644
--- a/openhands/utils/README.md
+++ b/openhands/utils/README.md
@@ -57,6 +57,7 @@ OpenHands provides several components that can be extended:
3. Service Integrations:
- GitHub service
- GitLab service
+ - Azure DevOps service
### Implementation Details
diff --git a/openhands/utils/import_utils.py b/openhands/utils/import_utils.py
index 905904c3ff55..de2bd0ca8519 100644
--- a/openhands/utils/import_utils.py
+++ b/openhands/utils/import_utils.py
@@ -66,7 +66,7 @@ def get_impl(cls: type[T], impl_name: str | None) -> type[T]:
Common Use Cases:
- Server components (ConversationManager, UserAuth, etc.)
- Storage implementations (ConversationStore, SettingsStore, etc.)
- - Service integrations (GitHub, GitLab, Bitbucket services)
+ - Service integrations (GitHub, GitLab, Bitbucket, Azure DevOps services)
The implementation is cached to avoid repeated imports of the same class.
"""
diff --git a/poetry.lock b/poetry.lock
index b1d96842994d..06e67f3ca271 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -254,14 +254,14 @@ files = [
[[package]]
name = "anthropic"
-version = "0.72.0"
+version = "0.75.0"
description = "The official Python library for the anthropic API"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main"]
files = [
- {file = "anthropic-0.72.0-py3-none-any.whl", hash = "sha256:0e9f5a7582f038cab8efbb4c959e49ef654a56bfc7ba2da51b5a7b8a84de2e4d"},
- {file = "anthropic-0.72.0.tar.gz", hash = "sha256:8971fe76dcffc644f74ac3883069beb1527641115ae0d6eb8fa21c1ce4082f7a"},
+ {file = "anthropic-0.75.0-py3-none-any.whl", hash = "sha256:ea8317271b6c15d80225a9f3c670152746e88805a7a61e14d4a374577164965b"},
+ {file = "anthropic-0.75.0.tar.gz", hash = "sha256:e8607422f4ab616db2ea5baacc215dd5f028da99ce2f022e33c7c535b29f3dfb"},
]
[package.dependencies]
@@ -1205,34 +1205,37 @@ botocore = ["botocore"]
[[package]]
name = "browser-use"
-version = "0.8.0"
+version = "0.10.1"
description = "Make websites accessible for AI agents"
optional = false
python-versions = "<4.0,>=3.11"
groups = ["main"]
files = [
- {file = "browser_use-0.8.0-py3-none-any.whl", hash = "sha256:b7c299e38ec1c1aec42a236cc6ad2268a366226940d6ff9d88ed461afd5a1cc3"},
- {file = "browser_use-0.8.0.tar.gz", hash = "sha256:2136eb3251424f712a08ee379c9337237c2f93b29b566807db599cf94e6abb5e"},
+ {file = "browser_use-0.10.1-py3-none-any.whl", hash = "sha256:96e603bfc71098175342cdcb0592519e6f244412e740f0254e4389fdd82a977f"},
+ {file = "browser_use-0.10.1.tar.gz", hash = "sha256:5f211ecfdf1f9fd186160f10df70dedd661821231e30f1bce40939787abab223"},
]
[package.dependencies]
aiohttp = "3.12.15"
-anthropic = ">=0.68.1,<1.0.0"
+anthropic = ">=0.72.1,<1.0.0"
anyio = ">=4.9.0"
authlib = ">=1.6.0"
bubus = ">=1.5.6"
-cdp-use = ">=1.4.0"
+cdp-use = ">=1.4.4"
+click = ">=8.1.8"
+cloudpickle = ">=3.1.1"
google-api-core = ">=2.25.0"
google-api-python-client = ">=2.174.0"
google-auth = ">=2.40.3"
google-auth-oauthlib = ">=1.2.2"
-google-genai = ">=1.29.0,<2.0.0"
+google-genai = ">=1.50.0,<2.0.0"
groq = ">=0.30.0"
-html2text = ">=2025.4.15"
httpx = ">=0.28.1"
+inquirerpy = ">=0.3.4"
+markdownify = ">=1.2.0"
mcp = ">=1.10.1"
ollama = ">=0.5.1"
-openai = ">=1.99.2,<2.0.0"
+openai = ">=2.7.2,<3.0.0"
pillow = ">=11.2.1"
portalocker = ">=2.7.0,<3.0.0"
posthog = ">=3.7.0"
@@ -1241,19 +1244,24 @@ pydantic = ">=2.11.5"
pyobjc = {version = ">=11.0", markers = "platform_system == \"darwin\""}
pyotp = ">=2.9.0"
pypdf = ">=5.7.0"
+python-docx = ">=1.2.0"
python-dotenv = ">=1.0.1"
reportlab = ">=4.0.0"
requests = ">=2.32.3"
+rich = ">=14.0.0"
screeninfo = {version = ">=0.8.1", markers = "platform_system != \"darwin\""}
typing-extensions = ">=4.12.2"
uuid7 = ">=0.1.0"
[package.extras]
-all = ["agentmail (==0.0.59)", "boto3 (>=1.38.45)", "botocore (>=1.37.23)", "click (>=8.1.8)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)", "rich (>=14.0.0)", "textual (>=3.2.0)"]
+all = ["agentmail (==0.0.59)", "boto3 (>=1.38.45)", "botocore (>=1.37.23)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)", "oci (>=2.126.4)", "textual (>=3.2.0)"]
aws = ["boto3 (>=1.38.45)"]
-cli = ["click (>=8.1.8)", "rich (>=14.0.0)", "textual (>=3.2.0)"]
-eval = ["anyio (>=4.9.0)", "browserbase (==1.4.0)", "datamodel-code-generator (>=0.26.0)", "hyperbrowser (==0.47.0)", "lmnr[all] (==0.7.17)", "psutil (>=7.0.0)"]
+cli = ["textual (>=3.2.0)"]
+cli-oci = ["oci (>=2.126.4)", "textual (>=3.2.0)"]
+code = ["matplotlib (>=3.9.0)", "numpy (>=2.3.2)", "pandas (>=2.2.0)", "tabulate (>=0.9.0)"]
+eval = ["anyio (>=4.9.0)", "datamodel-code-generator (>=0.26.0)", "lmnr[all] (==0.7.17)", "psutil (>=7.0.0)"]
examples = ["agentmail (==0.0.59)", "botocore (>=1.37.23)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)"]
+oci = ["oci (>=2.126.4)"]
video = ["imageio[ffmpeg] (>=2.37.0)", "numpy (>=2.3.2)"]
[[package]]
@@ -1494,14 +1502,14 @@ files = [
[[package]]
name = "cdp-use"
-version = "1.4.3"
+version = "1.4.4"
description = "Type safe generator/client library for CDP"
optional = false
python-versions = ">=3.11"
groups = ["main"]
files = [
- {file = "cdp_use-1.4.3-py3-none-any.whl", hash = "sha256:c48664604470c2579aa1e677c3e3e7e24c4f300c54804c093d935abb50479ecd"},
- {file = "cdp_use-1.4.3.tar.gz", hash = "sha256:9029c04bdc49fbd3939d2bf1988ad8d88e260729c7d5e35c2f6c87591f5a10e9"},
+ {file = "cdp_use-1.4.4-py3-none-any.whl", hash = "sha256:e37e80e067db2653d6fdf953d4ff9e5d80d75daa27b7c6d48c0261cccbef73e1"},
+ {file = "cdp_use-1.4.4.tar.gz", hash = "sha256:330a848b517006eb9ad1dc468aa6434d913cf0c6918610760c36c3fdfdba0fab"},
]
[package.dependencies]
@@ -2408,6 +2416,21 @@ wrapt = ">=1.10,<2"
[package.extras]
dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
+[[package]]
+name = "deprecation"
+version = "2.1.0"
+description = "A library to handle automated deprecations"
+optional = false
+python-versions = "*"
+groups = ["main"]
+files = [
+ {file = "deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a"},
+ {file = "deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff"},
+]
+
+[package.dependencies]
+packaging = "*"
+
[[package]]
name = "dill"
version = "0.3.8"
@@ -3787,28 +3810,28 @@ testing = ["pytest"]
[[package]]
name = "google-genai"
-version = "1.45.0"
+version = "1.53.0"
description = "GenAI Python SDK"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.10"
groups = ["main"]
files = [
- {file = "google_genai-1.45.0-py3-none-any.whl", hash = "sha256:e755295063e5fd5a4c44acff782a569e37fa8f76a6c75d0ede3375c70d916b7f"},
- {file = "google_genai-1.45.0.tar.gz", hash = "sha256:96ec32ae99a30b5a1b54cb874b577ec6e41b5d5b808bf0f10ed4620e867f9386"},
+ {file = "google_genai-1.53.0-py3-none-any.whl", hash = "sha256:65a3f99e5c03c372d872cda7419f5940e723374bb12a2f3ffd5e3e56e8eb2094"},
+ {file = "google_genai-1.53.0.tar.gz", hash = "sha256:938a26d22f3fd32c6eeeb4276ef204ef82884e63af9842ce3eac05ceb39cbd8d"},
]
[package.dependencies]
anyio = ">=4.8.0,<5.0.0"
-google-auth = ">=2.14.1,<3.0.0"
+google-auth = {version = ">=2.14.1,<3.0.0", extras = ["requests"]}
httpx = ">=0.28.1,<1.0.0"
-pydantic = ">=2.0.0,<3.0.0"
+pydantic = ">=2.9.0,<3.0.0"
requests = ">=2.28.1,<3.0.0"
tenacity = ">=8.2.3,<9.2.0"
typing-extensions = ">=4.11.0,<5.0.0"
websockets = ">=13.0.0,<15.1.0"
[package.extras]
-aiohttp = ["aiohttp (<4.0.0)"]
+aiohttp = ["aiohttp (<3.13.3)"]
local-tokenizer = ["protobuf", "sentencepiece (>=0.2.0)"]
[[package]]
@@ -3976,67 +3999,71 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
[[package]]
name = "grpcio"
-version = "1.72.1"
+version = "1.67.1"
description = "HTTP/2-based RPC framework"
optional = false
-python-versions = ">=3.9"
+python-versions = ">=3.8"
groups = ["main"]
files = [
- {file = "grpcio-1.72.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:ce2706ff37be7a6de68fbc4c3f8dde247cab48cc70fee5fedfbc9cd923b4ee5a"},
- {file = "grpcio-1.72.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:7db9e15ee7618fbea748176a67d347f3100fa92d36acccd0e7eeb741bc82f72a"},
- {file = "grpcio-1.72.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:8d6e7764181ba4a8b74aa78c98a89c9f3441068ebcee5d6f14c44578214e0be3"},
- {file = "grpcio-1.72.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:237bb619ba33594006025e6f114f62e60d9563afd6f8e89633ee384868e26687"},
- {file = "grpcio-1.72.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7f1d8a442fd242aa432c8e1b8411c79ebc409dad2c637614d726e226ce9ed0c"},
- {file = "grpcio-1.72.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f2359bd4bba85bf94fd9ab8802671b9637a6803bb673d221157a11523a52e6a8"},
- {file = "grpcio-1.72.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3269cfca37570a420a57a785f2a5d4234c5b12aced55f8843dafced2d3f8c9a6"},
- {file = "grpcio-1.72.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:06c023d86398714d6257194c21f2bc0b58a53ce45cee87dd3c54c7932c590e17"},
- {file = "grpcio-1.72.1-cp310-cp310-win32.whl", hash = "sha256:06dbe54eeea5f9dfb3e7ca2ff66c715ff5fc96b07a1feb322122fe14cb42f6aa"},
- {file = "grpcio-1.72.1-cp310-cp310-win_amd64.whl", hash = "sha256:ba593aa2cd52f4468ba29668c83f893d88c128198d6b1273ca788ef53e3ae5fe"},
- {file = "grpcio-1.72.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:4e112c083f90c330b0eaa78a633fb206d49c20c443926e827f8cac9eb9d2ea32"},
- {file = "grpcio-1.72.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:c6f7e3275832adab7384193f78b8c1a98b82541562fa08d7244e8a6b4b5c78a4"},
- {file = "grpcio-1.72.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:dd03c8847c47ef7ac5455aafdfb5e553ecf84f228282bd6106762b379f27c25c"},
- {file = "grpcio-1.72.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7497dbdf220b88b66004e2630fb2b1627df5e279db970d3cc20f70d39dce978d"},
- {file = "grpcio-1.72.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c2cde3ae8ae901317c049394ed8d3c6964de6b814ae65fc68636a7337b63aa"},
- {file = "grpcio-1.72.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7a66cef4bc1db81a54108a849e95650da640c9bc1901957bf7d3b1eeb3251ee8"},
- {file = "grpcio-1.72.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:fc0435ad45d540597f78978e3fd5515b448193f51f9065fb67dda566336e0f5f"},
- {file = "grpcio-1.72.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:524bad78d610fa1f9f316d47b3aab1ff89d438ba952ee34e3e335ca80a27ba96"},
- {file = "grpcio-1.72.1-cp311-cp311-win32.whl", hash = "sha256:409ee0abf7e74bbf88941046142452cf3d1f3863d34e11e8fd2b07375170c730"},
- {file = "grpcio-1.72.1-cp311-cp311-win_amd64.whl", hash = "sha256:ea483e408fac55569c11158c3e6d6d6a8c3b0f798b68f1c10db9b22c5996e19b"},
- {file = "grpcio-1.72.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:65a5ef28e5852bd281c6d01a923906e8036736e95e370acab8626fcbec041e67"},
- {file = "grpcio-1.72.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:9e5c594a6c779d674204fb9bdaa1e7b71666ff10b34a62e7769fc6868b5d7511"},
- {file = "grpcio-1.72.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:d324f4bdb990d852d79b38c59a12d24fcd47cf3b1a38f2e4d2b6d0b1031bc818"},
- {file = "grpcio-1.72.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:841db55dd29cf2f4121b853b2f89813a1b6175163fbb92c5945fb1b0ca259ef2"},
- {file = "grpcio-1.72.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00da930aa2711b955a538e835096aa365a4b7f2701bdc2ce1febb242a103f8a1"},
- {file = "grpcio-1.72.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4b657773480267fbb7ad733fa85abc103c52ab62e5bc97791faf82c53836eefc"},
- {file = "grpcio-1.72.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a08b483f17a6abca2578283a7ae3aa8d4d90347242b0de2898bdb27395c3f20b"},
- {file = "grpcio-1.72.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:299f3ea4e03c1d0548f4a174b48d612412f92c667f2100e30a079ab76fdaa813"},
- {file = "grpcio-1.72.1-cp312-cp312-win32.whl", hash = "sha256:addc721a3708ff789da1bf69876018dc730c1ec9d3d3cb6912776a00c535a5bc"},
- {file = "grpcio-1.72.1-cp312-cp312-win_amd64.whl", hash = "sha256:22ea2aa92a60dff231ba5fcd7f0220a33c2218e556009996f858eeafe294d1c2"},
- {file = "grpcio-1.72.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:294be6e9c323a197434569a41e0fb5b5aa0962fd5d55a3dc890ec5df985f611a"},
- {file = "grpcio-1.72.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:41ec164dac8df2862f67457d9cdf8d8f8b6a4ca475a3ed1ba6547fff98d93717"},
- {file = "grpcio-1.72.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:761736f75c6ddea3732d97eaabe70c616271f5f542a8be95515135fdd1a638f6"},
- {file = "grpcio-1.72.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:082003cb93618964c111c70d69b60ac0dc6566d4c254c9b2a775faa2965ba8f8"},
- {file = "grpcio-1.72.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8660f736da75424949c14f7c8b1ac60a25b2f37cabdec95181834b405373e8a7"},
- {file = "grpcio-1.72.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:2ada1abe2ad122b42407b2bfd79d6706a4940d4797f44bd740f5c98ca1ecda9b"},
- {file = "grpcio-1.72.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:0db2766d0c482ee740abbe7d00a06cc4fb54f7e5a24d3cf27c3352be18a2b1e8"},
- {file = "grpcio-1.72.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4bdb404d9c2187260b34e2b22783c204fba8a9023a166cf77376190d9cf5a08"},
- {file = "grpcio-1.72.1-cp313-cp313-win32.whl", hash = "sha256:bb64722c3124c906a5b66e50a90fd36442642f653ba88a24f67d08e94bca59f3"},
- {file = "grpcio-1.72.1-cp313-cp313-win_amd64.whl", hash = "sha256:329cc6ff5b431df9614340d3825b066a1ff0a5809a01ba2e976ef48c65a0490b"},
- {file = "grpcio-1.72.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:8941b83addd503c1982090b4631804d0ff1edbbc6c85c9c20ed503b1dc65fef9"},
- {file = "grpcio-1.72.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:d29b80290c5eda561a4c291d6d5b4315a2a5095ab37061118d6e0781858aca0a"},
- {file = "grpcio-1.72.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:4ca56d955564db749c9c6d75e9c4c777854e22b2482d247fb6c5a02d5f28ea78"},
- {file = "grpcio-1.72.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b08a3ef14d2b01eef13882c6d3a2d8fb5fcd73db81bd1e3ab69d4ee75215433a"},
- {file = "grpcio-1.72.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7df49801b3b323e4a21047979e3834cd286b32ee5ceee46f5217826274721f"},
- {file = "grpcio-1.72.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9717617ba2ff65c058ef53b0d5e50f03e8350f0c5597f93bb5c980a31db990c8"},
- {file = "grpcio-1.72.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:212db80b1e8aa7792d51269bfb32164e2333a9bb273370ace3ed2a378505cb01"},
- {file = "grpcio-1.72.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a0d19947d4480af5f363f077f221e665931f479e2604280ac4eafe6daa71f77"},
- {file = "grpcio-1.72.1-cp39-cp39-win32.whl", hash = "sha256:7622ef647dc911ed010a817d9be501df4ae83495b8e5cdd35b555bdcf3880a3e"},
- {file = "grpcio-1.72.1-cp39-cp39-win_amd64.whl", hash = "sha256:f8d8fa7cd2a7f1b4207e215dec8bc07f1202682d9a216ebe028185c15faece30"},
- {file = "grpcio-1.72.1.tar.gz", hash = "sha256:87f62c94a40947cec1a0f91f95f5ba0aa8f799f23a1d42ae5be667b6b27b959c"},
+ {file = "grpcio-1.67.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:8b0341d66a57f8a3119b77ab32207072be60c9bf79760fa609c5609f2deb1f3f"},
+ {file = "grpcio-1.67.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:f5a27dddefe0e2357d3e617b9079b4bfdc91341a91565111a21ed6ebbc51b22d"},
+ {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:43112046864317498a33bdc4797ae6a268c36345a910de9b9c17159d8346602f"},
+ {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9b929f13677b10f63124c1a410994a401cdd85214ad83ab67cc077fc7e480f0"},
+ {file = "grpcio-1.67.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7d1797a8a3845437d327145959a2c0c47c05947c9eef5ff1a4c80e499dcc6fa"},
+ {file = "grpcio-1.67.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:0489063974d1452436139501bf6b180f63d4977223ee87488fe36858c5725292"},
+ {file = "grpcio-1.67.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9fd042de4a82e3e7aca44008ee2fb5da01b3e5adb316348c21980f7f58adc311"},
+ {file = "grpcio-1.67.1-cp310-cp310-win32.whl", hash = "sha256:638354e698fd0c6c76b04540a850bf1db27b4d2515a19fcd5cf645c48d3eb1ed"},
+ {file = "grpcio-1.67.1-cp310-cp310-win_amd64.whl", hash = "sha256:608d87d1bdabf9e2868b12338cd38a79969eaf920c89d698ead08f48de9c0f9e"},
+ {file = "grpcio-1.67.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:7818c0454027ae3384235a65210bbf5464bd715450e30a3d40385453a85a70cb"},
+ {file = "grpcio-1.67.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ea33986b70f83844cd00814cee4451055cd8cab36f00ac64a31f5bb09b31919e"},
+ {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c7a01337407dd89005527623a4a72c5c8e2894d22bead0895306b23c6695698f"},
+ {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80b866f73224b0634f4312a4674c1be21b2b4afa73cb20953cbbb73a6b36c3cc"},
+ {file = "grpcio-1.67.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9fff78ba10d4250bfc07a01bd6254a6d87dc67f9627adece85c0b2ed754fa96"},
+ {file = "grpcio-1.67.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8a23cbcc5bb11ea7dc6163078be36c065db68d915c24f5faa4f872c573bb400f"},
+ {file = "grpcio-1.67.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1a65b503d008f066e994f34f456e0647e5ceb34cfcec5ad180b1b44020ad4970"},
+ {file = "grpcio-1.67.1-cp311-cp311-win32.whl", hash = "sha256:e29ca27bec8e163dca0c98084040edec3bc49afd10f18b412f483cc68c712744"},
+ {file = "grpcio-1.67.1-cp311-cp311-win_amd64.whl", hash = "sha256:786a5b18544622bfb1e25cc08402bd44ea83edfb04b93798d85dca4d1a0b5be5"},
+ {file = "grpcio-1.67.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:267d1745894200e4c604958da5f856da6293f063327cb049a51fe67348e4f953"},
+ {file = "grpcio-1.67.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:85f69fdc1d28ce7cff8de3f9c67db2b0ca9ba4449644488c1e0303c146135ddb"},
+ {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f26b0b547eb8d00e195274cdfc63ce64c8fc2d3e2d00b12bf468ece41a0423a0"},
+ {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4422581cdc628f77302270ff839a44f4c24fdc57887dc2a45b7e53d8fc2376af"},
+ {file = "grpcio-1.67.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7616d2ded471231c701489190379e0c311ee0a6c756f3c03e6a62b95a7146e"},
+ {file = "grpcio-1.67.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8a00efecde9d6fcc3ab00c13f816313c040a28450e5e25739c24f432fc6d3c75"},
+ {file = "grpcio-1.67.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:699e964923b70f3101393710793289e42845791ea07565654ada0969522d0a38"},
+ {file = "grpcio-1.67.1-cp312-cp312-win32.whl", hash = "sha256:4e7b904484a634a0fff132958dabdb10d63e0927398273917da3ee103e8d1f78"},
+ {file = "grpcio-1.67.1-cp312-cp312-win_amd64.whl", hash = "sha256:5721e66a594a6c4204458004852719b38f3d5522082be9061d6510b455c90afc"},
+ {file = "grpcio-1.67.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:aa0162e56fd10a5547fac8774c4899fc3e18c1aa4a4759d0ce2cd00d3696ea6b"},
+ {file = "grpcio-1.67.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:beee96c8c0b1a75d556fe57b92b58b4347c77a65781ee2ac749d550f2a365dc1"},
+ {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:a93deda571a1bf94ec1f6fcda2872dad3ae538700d94dc283c672a3b508ba3af"},
+ {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e6f255980afef598a9e64a24efce87b625e3e3c80a45162d111a461a9f92955"},
+ {file = "grpcio-1.67.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e838cad2176ebd5d4a8bb03955138d6589ce9e2ce5d51c3ada34396dbd2dba8"},
+ {file = "grpcio-1.67.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:a6703916c43b1d468d0756c8077b12017a9fcb6a1ef13faf49e67d20d7ebda62"},
+ {file = "grpcio-1.67.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:917e8d8994eed1d86b907ba2a61b9f0aef27a2155bca6cbb322430fc7135b7bb"},
+ {file = "grpcio-1.67.1-cp313-cp313-win32.whl", hash = "sha256:e279330bef1744040db8fc432becc8a727b84f456ab62b744d3fdb83f327e121"},
+ {file = "grpcio-1.67.1-cp313-cp313-win_amd64.whl", hash = "sha256:fa0c739ad8b1996bd24823950e3cb5152ae91fca1c09cc791190bf1627ffefba"},
+ {file = "grpcio-1.67.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:178f5db771c4f9a9facb2ab37a434c46cb9be1a75e820f187ee3d1e7805c4f65"},
+ {file = "grpcio-1.67.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f3e49c738396e93b7ba9016e153eb09e0778e776df6090c1b8c91877cc1c426"},
+ {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:24e8a26dbfc5274d7474c27759b54486b8de23c709d76695237515bc8b5baeab"},
+ {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b6c16489326d79ead41689c4b84bc40d522c9a7617219f4ad94bc7f448c5085"},
+ {file = "grpcio-1.67.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60e6a4dcf5af7bbc36fd9f81c9f372e8ae580870a9e4b6eafe948cd334b81cf3"},
+ {file = "grpcio-1.67.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:95b5f2b857856ed78d72da93cd7d09b6db8ef30102e5e7fe0961fe4d9f7d48e8"},
+ {file = "grpcio-1.67.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b49359977c6ec9f5d0573ea4e0071ad278ef905aa74e420acc73fd28ce39e9ce"},
+ {file = "grpcio-1.67.1-cp38-cp38-win32.whl", hash = "sha256:f5b76ff64aaac53fede0cc93abf57894ab2a7362986ba22243d06218b93efe46"},
+ {file = "grpcio-1.67.1-cp38-cp38-win_amd64.whl", hash = "sha256:804c6457c3cd3ec04fe6006c739579b8d35c86ae3298ffca8de57b493524b771"},
+ {file = "grpcio-1.67.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:a25bdea92b13ff4d7790962190bf6bf5c4639876e01c0f3dda70fc2769616335"},
+ {file = "grpcio-1.67.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:cdc491ae35a13535fd9196acb5afe1af37c8237df2e54427be3eecda3653127e"},
+ {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:85f862069b86a305497e74d0dc43c02de3d1d184fc2c180993aa8aa86fbd19b8"},
+ {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ec74ef02010186185de82cc594058a3ccd8d86821842bbac9873fd4a2cf8be8d"},
+ {file = "grpcio-1.67.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01f616a964e540638af5130469451cf580ba8c7329f45ca998ab66e0c7dcdb04"},
+ {file = "grpcio-1.67.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:299b3d8c4f790c6bcca485f9963b4846dd92cf6f1b65d3697145d005c80f9fe8"},
+ {file = "grpcio-1.67.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:60336bff760fbb47d7e86165408126f1dded184448e9a4c892189eb7c9d3f90f"},
+ {file = "grpcio-1.67.1-cp39-cp39-win32.whl", hash = "sha256:5ed601c4c6008429e3d247ddb367fe8c7259c355757448d7c1ef7bd4a6739e8e"},
+ {file = "grpcio-1.67.1-cp39-cp39-win_amd64.whl", hash = "sha256:5db70d32d6703b89912af16d6d45d78406374a8b8ef0d28140351dd0ec610e98"},
+ {file = "grpcio-1.67.1.tar.gz", hash = "sha256:3dc2ed4cabea4dc14d5e708c2b426205956077cc5de419b4d4079315017e9732"},
]
[package.extras]
-protobuf = ["grpcio-tools (>=1.72.1)"]
+protobuf = ["grpcio-tools (>=1.67.1)"]
[[package]]
name = "grpcio-status"
@@ -4419,6 +4446,25 @@ files = [
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
]
+[[package]]
+name = "inquirerpy"
+version = "0.3.4"
+description = "Python port of Inquirer.js (A collection of common interactive command-line user interfaces)"
+optional = false
+python-versions = ">=3.7,<4.0"
+groups = ["main"]
+files = [
+ {file = "InquirerPy-0.3.4-py3-none-any.whl", hash = "sha256:c65fdfbac1fa00e3ee4fb10679f4d3ed7a012abf4833910e63c295827fe2a7d4"},
+ {file = "InquirerPy-0.3.4.tar.gz", hash = "sha256:89d2ada0111f337483cb41ae31073108b2ec1e618a49d7110b0d7ade89fc197e"},
+]
+
+[package.dependencies]
+pfzy = ">=0.3.1,<0.4.0"
+prompt-toolkit = ">=3.0.1,<4.0.0"
+
+[package.extras]
+docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst-parser (>=0.15.1,<0.16.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.4.0,<0.5.0)"]
+
[[package]]
name = "installer"
version = "0.7.0"
@@ -5594,25 +5640,26 @@ types-tqdm = "*"
[[package]]
name = "litellm"
-version = "1.77.7"
+version = "1.80.7"
description = "Library to easily interface with LLM API providers"
optional = false
-python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
+python-versions = "<4.0,>=3.9"
groups = ["main"]
files = [
- {file = "litellm-1.77.7-py3-none-any.whl", hash = "sha256:1b3a1b17bd521a0ad25226fb62a912602c803922aabb4a16adf83834673be574"},
- {file = "litellm-1.77.7.tar.gz", hash = "sha256:e3398fb2575b98726e787c0a1481daed5938d58cafdcd96fbca80c312221af3e"},
+ {file = "litellm-1.80.7-py3-none-any.whl", hash = "sha256:f7d993f78c1e0e4e1202b2a925cc6540b55b6e5fb055dd342d88b145ab3102ed"},
+ {file = "litellm-1.80.7.tar.gz", hash = "sha256:3977a8d195aef842d01c18bf9e22984829363c6a4b54daf9a43c9dd9f190b42c"},
]
[package.dependencies]
aiohttp = ">=3.10"
click = "*"
fastuuid = ">=0.13.0"
+grpcio = ">=1.62.3,<1.68.0"
httpx = ">=0.23.0"
importlib-metadata = ">=6.8.0"
jinja2 = ">=3.1.2,<4.0.0"
jsonschema = ">=4.22.0,<5.0.0"
-openai = ">=1.99.5"
+openai = ">=2.8.0"
pydantic = ">=2.5.0,<3.0.0"
python-dotenv = ">=0.2.0"
tiktoken = ">=0.7.0"
@@ -5620,10 +5667,10 @@ tokenizers = "*"
[package.extras]
caching = ["diskcache (>=5.6.1,<6.0.0)"]
-extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-iam (>=2.19.1,<3.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "redisvl (>=0.4.1,<0.5.0) ; python_version >= \"3.9\" and python_version < \"3.14\"", "resend (>=0.8.0,<0.9.0)"]
+extra-proxy = ["azure-identity (>=1.15.0,<2.0.0) ; python_version >= \"3.9\"", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-iam (>=2.19.1,<3.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "redisvl (>=0.4.1,<0.5.0) ; python_version >= \"3.9\" and python_version < \"3.14\"", "resend (>=0.8.0)"]
mlflow = ["mlflow (>3.1.4) ; python_version >= \"3.10\""]
-proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "azure-identity (>=1.15.0,<2.0.0)", "azure-storage-blob (>=12.25.1,<13.0.0)", "backoff", "boto3 (==1.36.0)", "cryptography", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.20)", "litellm-proxy-extras (==0.2.25)", "mcp (>=1.10.0,<2.0.0) ; python_version >= \"3.10\"", "orjson (>=3.9.7,<4.0.0)", "polars (>=1.31.0,<2.0.0) ; python_version >= \"3.10\"", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0) ; sys_platform != \"win32\"", "websockets (>=13.1.0,<14.0.0)"]
-semantic-router = ["semantic-router ; python_version >= \"3.9\""]
+proxy = ["PyJWT (>=2.10.1,<3.0.0) ; python_version >= \"3.9\"", "apscheduler (>=3.10.4,<4.0.0)", "azure-identity (>=1.15.0,<2.0.0) ; python_version >= \"3.9\"", "azure-storage-blob (>=12.25.1,<13.0.0)", "backoff", "boto3 (==1.36.0)", "cryptography", "fastapi (>=0.120.1)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=23.0.0,<24.0.0)", "litellm-enterprise (==0.1.22)", "litellm-proxy-extras (==0.4.9)", "mcp (>=1.21.2,<2.0.0) ; python_version >= \"3.10\"", "orjson (>=3.9.7,<4.0.0)", "polars (>=1.31.0,<2.0.0) ; python_version >= \"3.10\"", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rich (==13.7.1)", "rq", "soundfile (>=0.12.1,<0.13.0)", "uvicorn (>=0.31.1,<0.32.0)", "uvloop (>=0.21.0,<0.22.0) ; sys_platform != \"win32\"", "websockets (>=15.0.1,<16.0.0)"]
+semantic-router = ["semantic-router (>=0.1.12) ; python_version >= \"3.9\" and python_version < \"3.14\""]
utils = ["numpydoc"]
[[package]]
@@ -5768,8 +5815,11 @@ files = [
{file = "lxml-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:7ce1a171ec325192c6a636b64c94418e71a1964f56d002cc28122fceff0b6121"},
{file = "lxml-5.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:795f61bcaf8770e1b37eec24edf9771b307df3af74d1d6f27d812e15a9ff3872"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:29f451a4b614a7b5b6c2e043d7b64a15bd8304d7e767055e8ab68387a8cacf4e"},
+ {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:891f7f991a68d20c75cb13c5c9142b2a3f9eb161f1f12a9489c82172d1f133c0"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa412a82e460571fad592d0f93ce9935a20090029ba08eca05c614f99b0cc92"},
+ {file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ac7ba71f9561cd7d7b55e1ea5511543c0282e2b6450f122672a2694621d63b7e"},
{file = "lxml-5.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:c5d32f5284012deaccd37da1e2cd42f081feaa76981f0eaa474351b68df813c5"},
+ {file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:ce31158630a6ac85bddd6b830cffd46085ff90498b397bd0a259f59d27a12188"},
{file = "lxml-5.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:31e63621e073e04697c1b2d23fcb89991790eef370ec37ce4d5d469f40924ed6"},
{file = "lxml-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:be2ba4c3c5b7900246a8f866580700ef0d538f2ca32535e991027bdaba944063"},
{file = "lxml-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:09846782b1ef650b321484ad429217f5154da4d6e786636c38e434fa32e94e49"},
@@ -5890,14 +5940,14 @@ testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
name = "markdownify"
-version = "1.1.0"
+version = "1.2.2"
description = "Convert HTML to markdown."
optional = false
python-versions = "*"
groups = ["main"]
files = [
- {file = "markdownify-1.1.0-py3-none-any.whl", hash = "sha256:32a5a08e9af02c8a6528942224c91b933b4bd2c7d078f9012943776fc313eeef"},
- {file = "markdownify-1.1.0.tar.gz", hash = "sha256:449c0bbbf1401c5112379619524f33b63490a8fa479456d41de9dc9e37560ebd"},
+ {file = "markdownify-1.2.2-py3-none-any.whl", hash = "sha256:3f02d3cc52714084d6e589f70397b6fc9f2f3a8531481bf35e8cc39f975e186a"},
+ {file = "markdownify-1.2.2.tar.gz", hash = "sha256:b274f1b5943180b031b699b199cbaeb1e2ac938b75851849a31fd0c3d6603d09"},
]
[package.dependencies]
@@ -7173,28 +7223,28 @@ pydantic = ">=2.9"
[[package]]
name = "openai"
-version = "1.99.9"
+version = "2.8.0"
description = "The official Python library for the openai API"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
groups = ["main", "evaluation"]
files = [
- {file = "openai-1.99.9-py3-none-any.whl", hash = "sha256:9dbcdb425553bae1ac5d947147bebbd630d91bbfc7788394d4c4f3a35682ab3a"},
- {file = "openai-1.99.9.tar.gz", hash = "sha256:f2082d155b1ad22e83247c3de3958eb4255b20ccf4a1de2e6681b6957b554e92"},
+ {file = "openai-2.8.0-py3-none-any.whl", hash = "sha256:ba975e347f6add2fe13529ccb94d54a578280e960765e5224c34b08d7e029ddf"},
+ {file = "openai-2.8.0.tar.gz", hash = "sha256:4851908f6d6fcacbd47ba659c5ac084f7725b752b6bfa1e948b6fbfc111a6bad"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
-jiter = ">=0.4.0,<1"
+jiter = ">=0.10.0,<1"
pydantic = ">=1.9.0,<3"
sniffio = "*"
tqdm = ">4"
typing-extensions = ">=4.11,<5"
[package.extras]
-aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"]
+aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"]
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<16)"]
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
@@ -7329,14 +7379,14 @@ llama = ["llama-index (>=0.12.29,<0.13.0)", "llama-index-core (>=0.12.29,<0.13.0
[[package]]
name = "openhands-agent-server"
-version = "1.1.0"
+version = "1.4.1"
description = "OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = [
- {file = "openhands_agent_server-1.1.0-py3-none-any.whl", hash = "sha256:59a856883df23488c0723e47655ef21649a321fcd4709a25a4690866eff6ac88"},
- {file = "openhands_agent_server-1.1.0.tar.gz", hash = "sha256:e39bebd39afd45cfcfd765005e7c4e5409e46678bd7612ae20bae79f7057b935"},
+ {file = "openhands_agent_server-1.4.1-py3-none-any.whl", hash = "sha256:1e621d15215a48e2398e23c58a791347f06c215c2344053aeb26b562c34a44ee"},
+ {file = "openhands_agent_server-1.4.1.tar.gz", hash = "sha256:03010a5c8d63bbd5b088458eb75308ef16559018140d75a3644ae5bbc3531bbf"},
]
[package.dependencies]
@@ -7344,6 +7394,7 @@ aiosqlite = ">=0.19"
alembic = ">=1.13"
docker = ">=7.1,<8"
fastapi = ">=0.104"
+openhands-sdk = "*"
pydantic = ">=2"
sqlalchemy = ">=2"
uvicorn = ">=0.31.1"
@@ -7352,20 +7403,21 @@ wsproto = ">=1.2.0"
[[package]]
name = "openhands-sdk"
-version = "1.1.0"
+version = "1.4.1"
description = "OpenHands SDK - Core functionality for building AI agents"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = [
- {file = "openhands_sdk-1.1.0-py3-none-any.whl", hash = "sha256:4a984ce1687a48cf99a67fdf3d37b116f8b2840743d4807810b5024af6a1d57e"},
- {file = "openhands_sdk-1.1.0.tar.gz", hash = "sha256:855e0d8f3657205e4119e50520c17e65b3358b1a923f7a051a82512a54bf426c"},
+ {file = "openhands_sdk-1.4.1-py3-none-any.whl", hash = "sha256:70e453eab7f9ab6b705198c2615fdd844b21e14b29d78afaf62724f4a440bcdc"},
+ {file = "openhands_sdk-1.4.1.tar.gz", hash = "sha256:37365de25ed57cf8cc2a8003ab4d7a1fe2a40b49c8e8da84a3f1ea2b522eddf2"},
]
[package.dependencies]
+deprecation = ">=2.1.0"
fastmcp = ">=2.11.3"
httpx = ">=0.27.0"
-litellm = ">=1.77.7.dev9"
+litellm = ">=1.80.7"
lmnr = ">=0.7.20"
pydantic = ">=2.11.7"
python-frontmatter = ">=1.1.0"
@@ -7378,14 +7430,14 @@ boto3 = ["boto3 (>=1.35.0)"]
[[package]]
name = "openhands-tools"
-version = "1.1.0"
+version = "1.4.1"
description = "OpenHands Tools - Runtime tools for AI agents"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = [
- {file = "openhands_tools-1.1.0-py3-none-any.whl", hash = "sha256:767d6746f05edade49263aa24450a037485a3dc23379f56917ef19aad22033f9"},
- {file = "openhands_tools-1.1.0.tar.gz", hash = "sha256:c2fadaa4f4e16e9a3df5781ea847565dcae7171584f09ef7c0e1d97c8dfc83f6"},
+ {file = "openhands_tools-1.4.1-py3-none-any.whl", hash = "sha256:8f40189a08bf80eb4a33219ee9ccc528f9c6c4f2d5c9ab807b06c3f3fe21a612"},
+ {file = "openhands_tools-1.4.1.tar.gz", hash = "sha256:4c0caf87f520a207d9035191c77b7b5c53eeec996350a24ffaf7f740a6566b22"},
]
[package.dependencies]
@@ -7397,6 +7449,7 @@ func-timeout = ">=4.3.5"
libtmux = ">=0.46.2"
openhands-sdk = "*"
pydantic = ">=2.11.7"
+tom-swe = ">=1.0.3"
[[package]]
name = "openpyxl"
@@ -7909,6 +7962,21 @@ files = [
[package.dependencies]
ptyprocess = ">=0.5"
+[[package]]
+name = "pfzy"
+version = "0.3.4"
+description = "Python port of the fzy fuzzy string matching algorithm"
+optional = false
+python-versions = ">=3.7,<4.0"
+groups = ["main"]
+files = [
+ {file = "pfzy-0.3.4-py3-none-any.whl", hash = "sha256:5f50d5b2b3207fa72e7ec0ef08372ef652685470974a107d0d4999fc5a903a96"},
+ {file = "pfzy-0.3.4.tar.gz", hash = "sha256:717ea765dd10b63618e7298b2d98efd819e0b30cd5905c9707223dceeb94b3f1"},
+]
+
+[package.extras]
+docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst-parser (>=0.15.1,<0.16.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.4.0,<0.5.0)"]
+
[[package]]
name = "pg8000"
version = "1.31.5"
@@ -14950,6 +15018,31 @@ dev = ["tokenizers[testing]"]
docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests", "ruff"]
+[[package]]
+name = "tom-swe"
+version = "1.0.3"
+description = "Theory of Mind modeling for Software Engineering assistants"
+optional = false
+python-versions = ">=3.10"
+groups = ["main"]
+files = [
+ {file = "tom_swe-1.0.3-py3-none-any.whl", hash = "sha256:7b1172b29eb5c8fb7f1975016e7b6a238511b9ac2a7a980bd400dcb4e29773f2"},
+ {file = "tom_swe-1.0.3.tar.gz", hash = "sha256:57c97d0104e563f15bd39edaf2aa6ac4c3e9444afd437fb92458700d22c6c0f5"},
+]
+
+[package.dependencies]
+jinja2 = ">=3.0.0"
+json-repair = ">=0.1.0"
+litellm = ">=1.0.0"
+pydantic = ">=2.0.0"
+python-dotenv = ">=1.0.0"
+tiktoken = ">=0.8.0"
+tqdm = ">=4.65.0"
+
+[package.extras]
+dev = ["aiofiles (>=23.0.0)", "black (>=22.0.0)", "datasets (>=2.0.0)", "fastapi (>=0.104.0)", "httpx (>=0.25.0)", "huggingface-hub (>=0.0.0)", "isort (>=5.0.0)", "mypy (>=1.0.0)", "numpy (>=1.24.0)", "pandas (>=2.0.0)", "pre-commit (>=3.6.0)", "pytest (>=7.0.0)", "pytest-cov (>=6.2.1)", "rich (>=13.0.0)", "ruff (>=0.3.0)", "typing-extensions (>=4.0.0)", "uvicorn (>=0.24.0)"]
+search = ["bm25s (>=0.2.0)", "pystemmer (>=2.2.0)"]
+
[[package]]
name = "toml"
version = "0.10.2"
@@ -16729,4 +16822,4 @@ third-party-runtimes = ["daytona", "e2b-code-interpreter", "modal", "runloop-api
[metadata]
lock-version = "2.1"
python-versions = "^3.12,<3.14"
-content-hash = "0fe5bab6aeb5ebce4588b30cfcf491af4cc9d9b9cd5160e67c8a055d9db276fc"
+content-hash = "c208fcc692f74540f7b6e822136002dd0f079a3d8d1b93227a5bb07a7f4432cb"
diff --git a/pyproject.toml b/pyproject.toml
index 28b7b03f9afc..764751f2bce4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -20,14 +20,14 @@ packages = [
]
include = [
"openhands/integrations/vscode/openhands-vscode-0.0.1.vsix",
- "microagents/**/*",
+ "skills/**/*",
]
build = "build_vscode.py" # Build VSCode extension during Poetry build
[tool.poetry.dependencies]
python = "^3.12,<3.14"
-litellm = ">=1.74.3, <1.78.0, !=1.64.4, !=1.67.*" # avoid 1.64.4 (known bug) & 1.67.* (known bug #10272)
-openai = "1.99.9" # Pin due to litellm incompatibility with >=1.100.0 (BerriAI/litellm#13711)
+litellm = ">=1.74.3, <=1.80.7, !=1.64.4, !=1.67.*" # avoid 1.64.4 (known bug) & 1.67.* (known bug #10272)
+openai = "2.8.0" # Pin due to litellm incompatibility with >=1.100.0 (BerriAI/litellm#13711)
aiohttp = ">=3.9.0,!=3.11.13" # Pin to avoid yanked version 3.11.13
google-genai = "*" # To use litellm with Gemini Pro API
google-api-python-client = "^2.164.0" # For Google Sheets API
@@ -113,16 +113,17 @@ e2b-code-interpreter = { version = "^2.0.0", optional = true }
pybase62 = "^1.0.0"
# V1 dependencies
-#openhands-agent-server = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-agent-server", rev = "f3c0c19cd134fbda84e07f152897a6d61e1e46c5" }
-#openhands-sdk = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-sdk", rev = "f3c0c19cd134fbda84e07f152897a6d61e1e46c5" }
-#openhands-tools = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-tools", rev = "f3c0c19cd134fbda84e07f152897a6d61e1e46c5" }
-openhands-sdk = "1.1.0"
-openhands-agent-server = "1.1.0"
-openhands-tools = "1.1.0"
+#openhands-agent-server = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-agent-server", rev = "15f565b8ac38876e40dc05c08e2b04ccaae4a66d" }
+#openhands-sdk = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-sdk", rev = "15f565b8ac38876e40dc05c08e2b04ccaae4a66d" }
+#openhands-tools = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-tools", rev = "15f565b8ac38876e40dc05c08e2b04ccaae4a66d" }
+openhands-sdk = "1.4.1"
+openhands-agent-server = "1.4.1"
+openhands-tools = "1.4.1"
python-jose = { version = ">=3.3", extras = [ "cryptography" ] }
sqlalchemy = { extras = [ "asyncio" ], version = "^2.0.40" }
pg8000 = "^1.31.5"
asyncpg = "^0.30.0"
+deprecation = "^2.1.0"
lmnr = "^0.7.20"
[tool.poetry.extras]
diff --git a/microagents/README.md b/skills/README.md
similarity index 58%
rename from microagents/README.md
rename to skills/README.md
index 97e920e535f1..f819415e7054 100644
--- a/microagents/README.md
+++ b/skills/README.md
@@ -1,82 +1,107 @@
-# OpenHands Microagents
+# OpenHands Skills
-Microagents are specialized prompts that enhance OpenHands with domain-specific knowledge and task-specific workflows. They help developers by providing expert guidance, automating common tasks, and ensuring consistent practices across projects. Each microagent is designed to excel in a specific area, from Git operations to code review processes.
+Skills are specialized prompts that enhance OpenHands with domain-specific knowledge and task-specific workflows. They help developers by providing expert guidance, automating common tasks, and ensuring consistent practices across projects. Each skill is designed to excel in a specific area, from Git operations to code review processes.
-## Sources of Microagents
+## Terminology Note
-OpenHands loads microagents from two sources:
+**Version 0 (V0)**: The term "microagents" continues to be used for V0 conversations. V0 is the current stable version of OpenHands.
+
+**Version 1 (V1)**: The term "skills" is used for V1 conversations. V1 UI and app server have not yet been released, but the codebase has been updated to use "skills" terminology in preparation for the V1 release.
+
+This directory (`OpenHands/skills/`) contains shareable skills that will be used in V1 conversations. For V0 conversations, the system continues to use microagents from the same underlying files.
+
+## Sources of Skills/Microagents
+
+OpenHands loads skills (V1) or microagents (V0) from two sources:
+
+### 1. Shareable Skills/Microagents (Public)
+
+This directory (`OpenHands/skills/`) contains shareable skills (V1) or microagents (V0) that are:
-### 1. Shareable Microagents (Public)
-This directory (`OpenHands/microagents/`) contains shareable microagents that are:
- Available to all OpenHands users
- Maintained in the OpenHands repository
- Perfect for reusable knowledge and common workflows
+- Used as "skills" in V1 conversations and "microagents" in V0 conversations
Directory structure:
+
```
-OpenHands/microagents/
+OpenHands/skills/
├── # Keyword-triggered expertise
│ ├── git.md # Git operations
│ ├── testing.md # Testing practices
│ └── docker.md # Docker guidelines
-└── # These microagents are always loaded
+└── # These skills/microagents are always loaded
├── pr_review.md # PR review process
├── bug_fix.md # Bug fixing workflow
└── feature.md # Feature implementation
```
### 2. Repository Instructions (Private)
-Each repository can have its own instructions in `.openhands/microagents/repo.md`. These instructions are:
+
+Each repository can have its own instructions in `.openhands/microagents/` (V0) or `.openhands/skills/` (V1). These instructions are:
+
- Private to that repository
- Automatically loaded when working with that repository
- Perfect for repository-specific guidelines and team practices
+- V1 supports both `.openhands/skills/` (preferred) and `.openhands/microagents/` (backward compatibility)
Example repository structure:
+
```
your-repository/
└── .openhands/
- └── microagents/
+ ├── skills/ # V1: Preferred location for repository-specific skills
+ │ └── repo.md # Repository-specific instructions
+ │ └── ... # Private skills that are only available inside this
+ └── microagents/ # V0: Current location (also supported in V1 for backward compatibility)
└── repo.md # Repository-specific instructions
└── ... # Private micro-agents that are only available inside this repo
```
-
## Loading Order
When OpenHands works with a repository, it:
-1. Loads repository-specific instructions from `.openhands/microagents/repo.md` if present
+
+1. Loads repository-specific instructions from `.openhands/microagents/repo.md` (V0) or `.openhands/skills/` (V1) if present
2. Loads relevant knowledge agents based on keywords in conversations
-## Types of Microagents
+**Note**: V1 also supports loading from `.openhands/microagents/` for backward compatibility.
-Most microagents use markdown files with YAML frontmatter. For repository agents (repo.md), the frontmatter is optional - if not provided, the file will be loaded with default settings as a repository agent.
+## Types of Skills/Microagents
+Most skills/microagents use markdown files with YAML frontmatter. For repository agents (repo.md), the frontmatter is optional - if not provided, the file will be loaded with default settings as a repository agent.
### 1. Knowledge Agents
Knowledge agents provide specialized expertise that's triggered by keywords in conversations. They help with:
+
- Language best practices
- Framework guidelines
- Common patterns
- Tool usage
Key characteristics:
+
- **Trigger-based**: Activated by specific keywords in conversations
- **Context-aware**: Provide relevant advice based on file types and content
- **Reusable**: Knowledge can be applied across multiple projects
- **Versioned**: Support multiple versions of tools/frameworks
-You can see an example of a knowledge-based agent in [OpenHands's github microagent](https://github.com/OpenHands/OpenHands/tree/main/microagents/github.md).
+You can see an example of a knowledge-based agent in [OpenHands's github skill](https://github.com/OpenHands/OpenHands/tree/main/skills/github.md).
### 2. Repository Agents
Repository agents provide repository-specific knowledge and guidelines. They are:
-- Loaded from `.openhands/microagents/repo.md`
+
+- Loaded from `.openhands/microagents/repo.md` (V0) or `.openhands/skills/` directory (V1)
+- V1 also supports `.openhands/microagents/` for backward compatibility
- Specific to individual repositories
- Automatically activated for their repository
- Perfect for team practices and project conventions
Key features:
+
- **Project-specific**: Contains guidelines unique to the repository
- **Team-focused**: Enforces team conventions and practices
- **Always active**: Automatically loaded for the repository
@@ -84,18 +109,17 @@ Key features:
You can see an example of a repo agent in [the agent for the OpenHands repo itself](https://github.com/OpenHands/OpenHands/blob/main/.openhands/microagents/repo.md).
-
## Contributing
### When to Contribute
1. **Knowledge Agents** - When you have:
+
- Language/framework best practices
- Tool usage patterns
- Common problem solutions
- General development guidelines
-
2. **Repository Agents** - When you need:
- Project-specific guidelines
- Team conventions and practices
@@ -105,13 +129,13 @@ You can see an example of a repo agent in [the agent for the OpenHands repo itse
### Best Practices
1. **For Knowledge Agents**:
+
- Choose distinctive triggers
- Focus on one area of expertise
- Include practical examples
- Use file patterns when relevant
- Keep knowledge general and reusable
-
2. **For Repository Agents**:
- Document clear setup instructions
- Include repository structure details
@@ -126,12 +150,11 @@ You can see an example of a repo agent in [the agent for the OpenHands repo itse
### Submission Process
1. Create your agent file in the appropriate directory:
- - `microagents/` for expertise (public, shareable)
- - Note: Repository-specific agents should remain in their respective repositories' `.openhands/microagents/` directory
+ - `skills/` for expertise (public, shareable)
+ - Note: Repository-specific agents should remain in their respective repositories' `.openhands/skills/` (V1) or `.openhands/microagents/` (V0) directory
2. Test thoroughly
3. Submit a pull request to OpenHands
-
## License
-All microagents are subject to the same license as OpenHands. See the root LICENSE file for details.
+All skills/microagents are subject to the same license as OpenHands. See the root LICENSE file for details.
diff --git a/microagents/add_agent.md b/skills/add_agent.md
similarity index 93%
rename from microagents/add_agent.md
rename to skills/add_agent.md
index e3fccd19eb42..a8a49befe663 100644
--- a/microagents/add_agent.md
+++ b/skills/add_agent.md
@@ -37,4 +37,4 @@ When creating a new microagent:
For detailed information, see:
- [Microagents Overview](https://docs.all-hands.dev/usage/prompting/microagents-overview)
-- [Example GitHub Microagent](https://github.com/OpenHands/OpenHands/blob/main/microagents/github.md)
+- [Example GitHub Skill](https://github.com/OpenHands/OpenHands/blob/main/skills/github.md)
diff --git a/microagents/add_repo_inst.md b/skills/add_repo_inst.md
similarity index 100%
rename from microagents/add_repo_inst.md
rename to skills/add_repo_inst.md
diff --git a/microagents/address_pr_comments.md b/skills/address_pr_comments.md
similarity index 100%
rename from microagents/address_pr_comments.md
rename to skills/address_pr_comments.md
diff --git a/skills/agent-builder.md b/skills/agent-builder.md
new file mode 100644
index 000000000000..a1b7931e9e04
--- /dev/null
+++ b/skills/agent-builder.md
@@ -0,0 +1,39 @@
+---
+name: agent_sdk_builder
+version: 1.0.0
+author: openhands
+agent: CodeActAgent
+triggers:
+ - /agent-builder
+inputs:
+ - name: INITIAL_PROMPT
+ description: "Initial SDK requirements"
+---
+
+# Agent Builder and Interviewer Role
+
+You are an expert requirements gatherer and agent builder. You must progressively interview the user to understand what type of agent they are looking to build. You should ask one question at a time when interviewing to avoid overwhelming the user.
+
+Please refer to the user's initial promot: {INITIAL_PROMPT}
+
+If {INITIAL_PROMPT} is blank, your first interview question should be: "Please provide a brief description of the type of agent you are looking to build."
+
+# Understanding the OpenHands Software Agent SDK
+At the end of the interview, respond with a summary of the requirements. Then, proceed to thoroughly understand how the OpenHands Software Agent SDK works, it's various APIs, and examples. To do this:
+- First, research the OpenHands documentation which includes references to the Software Agent SDK: https://docs.openhands.dev/llms.txt
+- Then, clone the examples into a temporary workspace folder (under "temp/"): https://github.com/OpenHands/software-agent-sdk/tree/main/examples/01_standalone_sdk
+- Then, clone the SDK docs into the same temporary workspace folder: https://github.com/OpenHands/docs/tree/main/sdk
+
+After analyzing the OpenHands Agent SDK, you may optionally ask additional clarifying questions in case it's important for the technical design of the agent.
+
+# Generating the SDK Plan
+You can then proceed to build a technical implementation plan based on the user requirements and your understanding of how the OpenHands Agent SDK works.
+- The plan should be stored in "plan/SDK_PLAN.md" from the root of the workspace.
+- A visual representation of how the agent should work based on the SDK_PLAN.md. This should look like a flow diagram with nodes and edges. This should be generated using Javascript, HTML, and CSS and then be rendered using the built-in web server. Store this in the plan/ directory.
+
+# Implementing the Plan
+After the plan is generated, please ask the user if they are ready to generate the SDK implementation. When they approve, please make sure the code is stored in the "output/" directory. Make sure the code provides logging that a user can see in the terminal. Ideally, the SDK is a single python file.
+
+Additional guidelines:
+- Users can configure their LLM API Key using an environment variable named "LLM_API_KEY"
+- Unless otherwise specified, default to this model: openhands/claude-sonnet-4-20250514. This is configurable through the LLM_BASE_MODEL environment variable.
diff --git a/microagents/agent_memory.md b/skills/agent_memory.md
similarity index 100%
rename from microagents/agent_memory.md
rename to skills/agent_memory.md
diff --git a/skills/azure_devops.md b/skills/azure_devops.md
new file mode 100644
index 000000000000..5064cc9bd47f
--- /dev/null
+++ b/skills/azure_devops.md
@@ -0,0 +1,52 @@
+---
+name: azure_devops
+type: knowledge
+version: 1.0.0
+agent: CodeActAgent
+triggers:
+- azure_devops
+- azure
+---
+
+You have access to an environment variable, `AZURE_DEVOPS_TOKEN`, which allows you to interact with
+the Azure DevOps API.
+
+
+You can use `curl` with the `AZURE_DEVOPS_TOKEN` to interact with Azure DevOps's API.
+ALWAYS use the Azure DevOps API for operations instead of a web browser.
+
+
+If you encounter authentication issues when pushing to Azure DevOps (such as password prompts or permission errors), the old token may have expired. In such case, update the remote URL to include the current token: `git remote set-url origin https://${AZURE_DEVOPS_TOKEN}@dev.azure.com/organization/project/_git/repository`
+
+Here are some instructions for pushing, but ONLY do this if the user asks you to:
+* NEVER push directly to the `main` or `master` branch
+* Git config (username and email) is pre-set. Do not modify.
+* You may already be on a branch starting with `openhands-workspace`. Create a new branch with a better name before pushing.
+* Once you've created your own branch or a pull request, continue to update it. Do NOT create a new one unless you are explicitly asked to. Update the PR title and description as necessary, but don't change the branch name.
+* Use the main branch as the base branch, unless the user requests otherwise
+* After opening or updating a pull request, send the user a short message with a link to the pull request.
+* Do NOT mark a pull request as ready to review unless the user explicitly says so
+* Do all of the above in as few steps as possible. E.g. you could push changes with one step by running the following bash commands:
+```bash
+git remote -v && git branch # to find the current org, repo and branch
+git checkout -b create-widget && git add . && git commit -m "Create widget" && git push -u origin create-widget
+```
+
+## Azure DevOps API Usage
+
+When working with Azure DevOps API, you need to use Basic authentication with your Personal Access Token (PAT). The username is ignored (empty string), and the password is the PAT.
+
+Here's how to authenticate with curl:
+```bash
+# Convert PAT to base64
+AUTH=$(echo -n ":$AZURE_DEVOPS_TOKEN" | base64)
+
+# Make API call
+curl -H "Authorization: Basic $AUTH" -H "Content-Type: application/json" https://dev.azure.com/{organization}/{project}/_apis/git/repositories?api-version=7.1
+```
+
+Common API endpoints:
+- List repositories: `https://dev.azure.com/{organization}/{project}/_apis/git/repositories?api-version=7.1`
+- Get repository details: `https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}?api-version=7.1`
+- List pull requests: `https://dev.azure.com/{organization}/{project}/_apis/git/pullrequests?api-version=7.1`
+- Create pull request: `https://dev.azure.com/{organization}/{project}/_apis/git/repositories/{repositoryId}/pullrequests?api-version=7.1` (POST)
diff --git a/microagents/bitbucket.md b/skills/bitbucket.md
similarity index 100%
rename from microagents/bitbucket.md
rename to skills/bitbucket.md
diff --git a/microagents/code-review.md b/skills/code-review.md
similarity index 100%
rename from microagents/code-review.md
rename to skills/code-review.md
diff --git a/microagents/codereview-roasted.md b/skills/codereview-roasted.md
similarity index 100%
rename from microagents/codereview-roasted.md
rename to skills/codereview-roasted.md
diff --git a/microagents/default-tools.md b/skills/default-tools.md
similarity index 100%
rename from microagents/default-tools.md
rename to skills/default-tools.md
diff --git a/microagents/docker.md b/skills/docker.md
similarity index 100%
rename from microagents/docker.md
rename to skills/docker.md
diff --git a/microagents/fix-py-line-too-long.md b/skills/fix-py-line-too-long.md
similarity index 100%
rename from microagents/fix-py-line-too-long.md
rename to skills/fix-py-line-too-long.md
diff --git a/microagents/fix_test.md b/skills/fix_test.md
similarity index 100%
rename from microagents/fix_test.md
rename to skills/fix_test.md
diff --git a/microagents/flarglebargle.md b/skills/flarglebargle.md
similarity index 100%
rename from microagents/flarglebargle.md
rename to skills/flarglebargle.md
diff --git a/microagents/github.md b/skills/github.md
similarity index 100%
rename from microagents/github.md
rename to skills/github.md
diff --git a/microagents/gitlab.md b/skills/gitlab.md
similarity index 100%
rename from microagents/gitlab.md
rename to skills/gitlab.md
diff --git a/microagents/kubernetes.md b/skills/kubernetes.md
similarity index 100%
rename from microagents/kubernetes.md
rename to skills/kubernetes.md
diff --git a/microagents/npm.md b/skills/npm.md
similarity index 100%
rename from microagents/npm.md
rename to skills/npm.md
diff --git a/microagents/onboarding.md b/skills/onboarding.md
similarity index 100%
rename from microagents/onboarding.md
rename to skills/onboarding.md
diff --git a/microagents/pdflatex.md b/skills/pdflatex.md
similarity index 100%
rename from microagents/pdflatex.md
rename to skills/pdflatex.md
diff --git a/microagents/security.md b/skills/security.md
similarity index 100%
rename from microagents/security.md
rename to skills/security.md
diff --git a/microagents/ssh.md b/skills/ssh.md
similarity index 100%
rename from microagents/ssh.md
rename to skills/ssh.md
diff --git a/microagents/swift-linux.md b/skills/swift-linux.md
similarity index 100%
rename from microagents/swift-linux.md
rename to skills/swift-linux.md
diff --git a/microagents/update_pr_description.md b/skills/update_pr_description.md
similarity index 100%
rename from microagents/update_pr_description.md
rename to skills/update_pr_description.md
diff --git a/microagents/update_test.md b/skills/update_test.md
similarity index 100%
rename from microagents/update_test.md
rename to skills/update_test.md
diff --git a/tests/unit/app_server/test_app_conversation_service_base.py b/tests/unit/app_server/test_app_conversation_service_base.py
new file mode 100644
index 000000000000..a179a11c2448
--- /dev/null
+++ b/tests/unit/app_server/test_app_conversation_service_base.py
@@ -0,0 +1,628 @@
+"""Unit tests for git functionality in AppConversationServiceBase.
+
+This module tests the git-related functionality, specifically the clone_or_init_git_repo method
+and the recent bug fixes for git checkout operations.
+"""
+
+import subprocess
+from unittest.mock import AsyncMock, MagicMock, Mock, patch
+
+import pytest
+
+from openhands.app_server.app_conversation.app_conversation_models import AgentType
+from openhands.app_server.app_conversation.app_conversation_service_base import (
+ AppConversationServiceBase,
+)
+from openhands.app_server.user.user_context import UserContext
+
+
+class MockUserInfo:
+ """Mock class for UserInfo to simulate user settings."""
+
+ def __init__(
+ self, git_user_name: str | None = None, git_user_email: str | None = None
+ ):
+ self.git_user_name = git_user_name
+ self.git_user_email = git_user_email
+
+
+class MockCommandResult:
+ """Mock class for command execution result."""
+
+ def __init__(self, exit_code: int = 0, stderr: str = ''):
+ self.exit_code = exit_code
+ self.stderr = stderr
+
+
+class MockWorkspace:
+ """Mock class for AsyncRemoteWorkspace."""
+
+ def __init__(self, working_dir: str = '/workspace'):
+ self.working_dir = working_dir
+ self.execute_command = AsyncMock(return_value=MockCommandResult())
+
+
+class MockAppConversationServiceBase:
+ """Mock class to test git functionality without complex dependencies."""
+
+ def __init__(self):
+ self.logger = MagicMock()
+
+ async def clone_or_init_git_repo(
+ self,
+ workspace_path: str,
+ repo_url: str,
+ branch: str = 'main',
+ timeout: int = 300,
+ ) -> bool:
+ """Clone or initialize a git repository.
+
+ This is a simplified version of the actual method for testing purposes.
+ """
+ try:
+ # Try to clone the repository
+ clone_result = subprocess.run(
+ ['git', 'clone', '--branch', branch, repo_url, workspace_path],
+ capture_output=True,
+ text=True,
+ timeout=timeout,
+ )
+
+ if clone_result.returncode == 0:
+ self.logger.info(
+ f'Successfully cloned repository {repo_url} to {workspace_path}'
+ )
+ return True
+
+ # If clone fails, try to checkout the branch
+ checkout_result = subprocess.run(
+ ['git', 'checkout', branch],
+ cwd=workspace_path,
+ capture_output=True,
+ text=True,
+ timeout=timeout,
+ )
+
+ if checkout_result.returncode == 0:
+ self.logger.info(f'Successfully checked out branch {branch}')
+ return True
+ else:
+ self.logger.error(
+ f'Failed to checkout branch {branch}: {checkout_result.stderr}'
+ )
+ return False
+
+ except subprocess.TimeoutExpired:
+ self.logger.error(f'Git operation timed out after {timeout} seconds')
+ return False
+ except Exception as e:
+ self.logger.error(f'Git operation failed: {str(e)}')
+ return False
+
+
+@pytest.fixture
+def service():
+ """Create a mock service instance for testing."""
+ return MockAppConversationServiceBase()
+
+
+@pytest.mark.asyncio
+async def test_clone_or_init_git_repo_successful_clone(service):
+ """Test successful git clone operation."""
+ with patch('subprocess.run') as mock_run:
+ # Mock successful clone
+ mock_run.return_value = MagicMock(returncode=0, stderr='', stdout='Cloning...')
+
+ result = await service.clone_or_init_git_repo(
+ workspace_path='/tmp/test_repo',
+ repo_url='https://github.com/test/repo.git',
+ branch='main',
+ timeout=300,
+ )
+
+ assert result is True
+ mock_run.assert_called_once_with(
+ [
+ 'git',
+ 'clone',
+ '--branch',
+ 'main',
+ 'https://github.com/test/repo.git',
+ '/tmp/test_repo',
+ ],
+ capture_output=True,
+ text=True,
+ timeout=300,
+ )
+ service.logger.info.assert_called_with(
+ 'Successfully cloned repository https://github.com/test/repo.git to /tmp/test_repo'
+ )
+
+
+@pytest.mark.asyncio
+async def test_clone_or_init_git_repo_clone_fails_checkout_succeeds(service):
+ """Test git clone fails but checkout succeeds."""
+ with patch('subprocess.run') as mock_run:
+ # Mock clone failure, then checkout success
+ mock_run.side_effect = [
+ MagicMock(returncode=1, stderr='Clone failed', stdout=''), # Clone fails
+ MagicMock(
+ returncode=0, stderr='', stdout='Switched to branch'
+ ), # Checkout succeeds
+ ]
+
+ result = await service.clone_or_init_git_repo(
+ workspace_path='/tmp/test_repo',
+ repo_url='https://github.com/test/repo.git',
+ branch='feature-branch',
+ timeout=300,
+ )
+
+ assert result is True
+ assert mock_run.call_count == 2
+
+ # Check clone call
+ mock_run.assert_any_call(
+ [
+ 'git',
+ 'clone',
+ '--branch',
+ 'feature-branch',
+ 'https://github.com/test/repo.git',
+ '/tmp/test_repo',
+ ],
+ capture_output=True,
+ text=True,
+ timeout=300,
+ )
+
+ # Check checkout call
+ mock_run.assert_any_call(
+ ['git', 'checkout', 'feature-branch'],
+ cwd='/tmp/test_repo',
+ capture_output=True,
+ text=True,
+ timeout=300,
+ )
+
+ service.logger.info.assert_called_with(
+ 'Successfully checked out branch feature-branch'
+ )
+
+
+@pytest.mark.asyncio
+async def test_clone_or_init_git_repo_both_operations_fail(service):
+ """Test both git clone and checkout operations fail."""
+ with patch('subprocess.run') as mock_run:
+ # Mock both operations failing
+ mock_run.side_effect = [
+ MagicMock(returncode=1, stderr='Clone failed', stdout=''), # Clone fails
+ MagicMock(
+ returncode=1, stderr='Checkout failed', stdout=''
+ ), # Checkout fails
+ ]
+
+ result = await service.clone_or_init_git_repo(
+ workspace_path='/tmp/test_repo',
+ repo_url='https://github.com/test/repo.git',
+ branch='nonexistent-branch',
+ timeout=300,
+ )
+
+ assert result is False
+ assert mock_run.call_count == 2
+ service.logger.error.assert_called_with(
+ 'Failed to checkout branch nonexistent-branch: Checkout failed'
+ )
+
+
+@pytest.mark.asyncio
+async def test_clone_or_init_git_repo_timeout(service):
+ """Test git operation timeout."""
+ with patch('subprocess.run') as mock_run:
+ # Mock timeout exception
+ mock_run.side_effect = subprocess.TimeoutExpired(
+ cmd=['git', 'clone'], timeout=300
+ )
+
+ result = await service.clone_or_init_git_repo(
+ workspace_path='/tmp/test_repo',
+ repo_url='https://github.com/test/repo.git',
+ branch='main',
+ timeout=300,
+ )
+
+ assert result is False
+ service.logger.error.assert_called_with(
+ 'Git operation timed out after 300 seconds'
+ )
+
+
+@pytest.mark.asyncio
+async def test_clone_or_init_git_repo_exception(service):
+ """Test git operation with unexpected exception."""
+ with patch('subprocess.run') as mock_run:
+ # Mock unexpected exception
+ mock_run.side_effect = Exception('Unexpected error')
+
+ result = await service.clone_or_init_git_repo(
+ workspace_path='/tmp/test_repo',
+ repo_url='https://github.com/test/repo.git',
+ branch='main',
+ timeout=300,
+ )
+
+ assert result is False
+ service.logger.error.assert_called_with(
+ 'Git operation failed: Unexpected error'
+ )
+
+
+@pytest.mark.asyncio
+async def test_clone_or_init_git_repo_custom_timeout(service):
+ """Test git operation with custom timeout."""
+ with patch('subprocess.run') as mock_run:
+ # Mock successful clone with custom timeout
+ mock_run.return_value = MagicMock(returncode=0, stderr='', stdout='Cloning...')
+
+ result = await service.clone_or_init_git_repo(
+ workspace_path='/tmp/test_repo',
+ repo_url='https://github.com/test/repo.git',
+ branch='main',
+ timeout=600, # Custom timeout
+ )
+
+ assert result is True
+ mock_run.assert_called_once_with(
+ [
+ 'git',
+ 'clone',
+ '--branch',
+ 'main',
+ 'https://github.com/test/repo.git',
+ '/tmp/test_repo',
+ ],
+ capture_output=True,
+ text=True,
+ timeout=600, # Verify custom timeout is used
+ )
+
+
+@patch(
+ 'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
+)
+def test_create_condenser_default_agent_with_none_max_size(mock_condenser_class):
+ """Test _create_condenser for DEFAULT agent with condenser_max_size = None uses default."""
+ # Arrange
+ mock_user_context = Mock(spec=UserContext)
+ with patch.object(
+ AppConversationServiceBase,
+ '__abstractmethods__',
+ set(),
+ ):
+ service = AppConversationServiceBase(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ )
+ mock_llm = MagicMock()
+ mock_llm_copy = MagicMock()
+ mock_llm_copy.usage_id = 'condenser'
+ mock_llm.model_copy.return_value = mock_llm_copy
+ mock_condenser_instance = MagicMock()
+ mock_condenser_class.return_value = mock_condenser_instance
+
+ # Act
+ service._create_condenser(mock_llm, AgentType.DEFAULT, None)
+
+ # Assert
+ mock_condenser_class.assert_called_once()
+ call_kwargs = mock_condenser_class.call_args[1]
+ # When condenser_max_size is None, max_size should not be passed (uses SDK default of 120)
+ assert 'max_size' not in call_kwargs
+ # keep_first is never passed (uses SDK default of 4)
+ assert 'keep_first' not in call_kwargs
+ assert call_kwargs['llm'].usage_id == 'condenser'
+ mock_llm.model_copy.assert_called_once()
+
+
+@patch(
+ 'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
+)
+def test_create_condenser_default_agent_with_custom_max_size(mock_condenser_class):
+ """Test _create_condenser for DEFAULT agent with custom condenser_max_size."""
+ # Arrange
+ mock_user_context = Mock(spec=UserContext)
+ with patch.object(
+ AppConversationServiceBase,
+ '__abstractmethods__',
+ set(),
+ ):
+ service = AppConversationServiceBase(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ )
+ mock_llm = MagicMock()
+ mock_llm_copy = MagicMock()
+ mock_llm_copy.usage_id = 'condenser'
+ mock_llm.model_copy.return_value = mock_llm_copy
+ mock_condenser_instance = MagicMock()
+ mock_condenser_class.return_value = mock_condenser_instance
+
+ # Act
+ service._create_condenser(mock_llm, AgentType.DEFAULT, 150)
+
+ # Assert
+ mock_condenser_class.assert_called_once()
+ call_kwargs = mock_condenser_class.call_args[1]
+ assert call_kwargs['max_size'] == 150 # Custom value should be used
+ # keep_first is never passed (uses SDK default of 4)
+ assert 'keep_first' not in call_kwargs
+ assert call_kwargs['llm'].usage_id == 'condenser'
+ mock_llm.model_copy.assert_called_once()
+
+
+@patch(
+ 'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
+)
+def test_create_condenser_plan_agent_with_none_max_size(mock_condenser_class):
+ """Test _create_condenser for PLAN agent with condenser_max_size = None uses default."""
+ # Arrange
+ mock_user_context = Mock(spec=UserContext)
+ with patch.object(
+ AppConversationServiceBase,
+ '__abstractmethods__',
+ set(),
+ ):
+ service = AppConversationServiceBase(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ )
+ mock_llm = MagicMock()
+ mock_llm_copy = MagicMock()
+ mock_llm_copy.usage_id = 'planning_condenser'
+ mock_llm.model_copy.return_value = mock_llm_copy
+ mock_condenser_instance = MagicMock()
+ mock_condenser_class.return_value = mock_condenser_instance
+
+ # Act
+ service._create_condenser(mock_llm, AgentType.PLAN, None)
+
+ # Assert
+ mock_condenser_class.assert_called_once()
+ call_kwargs = mock_condenser_class.call_args[1]
+ # When condenser_max_size is None, max_size should not be passed (uses SDK default of 120)
+ assert 'max_size' not in call_kwargs
+ # keep_first is never passed (uses SDK default of 4)
+ assert 'keep_first' not in call_kwargs
+ assert call_kwargs['llm'].usage_id == 'planning_condenser'
+ mock_llm.model_copy.assert_called_once()
+
+
+@patch(
+ 'openhands.app_server.app_conversation.app_conversation_service_base.LLMSummarizingCondenser'
+)
+def test_create_condenser_plan_agent_with_custom_max_size(mock_condenser_class):
+ """Test _create_condenser for PLAN agent with custom condenser_max_size."""
+ # Arrange
+ mock_user_context = Mock(spec=UserContext)
+ with patch.object(
+ AppConversationServiceBase,
+ '__abstractmethods__',
+ set(),
+ ):
+ service = AppConversationServiceBase(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ )
+ mock_llm = MagicMock()
+ mock_llm_copy = MagicMock()
+ mock_llm_copy.usage_id = 'planning_condenser'
+ mock_llm.model_copy.return_value = mock_llm_copy
+ mock_condenser_instance = MagicMock()
+ mock_condenser_class.return_value = mock_condenser_instance
+
+ # Act
+ service._create_condenser(mock_llm, AgentType.PLAN, 200)
+
+ # Assert
+ mock_condenser_class.assert_called_once()
+ call_kwargs = mock_condenser_class.call_args[1]
+ assert call_kwargs['max_size'] == 200 # Custom value should be used
+ # keep_first is never passed (uses SDK default of 4)
+ assert 'keep_first' not in call_kwargs
+ assert call_kwargs['llm'].usage_id == 'planning_condenser'
+ mock_llm.model_copy.assert_called_once()
+
+
+# =============================================================================
+# Tests for _configure_git_user_settings
+# =============================================================================
+
+
+def _create_service_with_mock_user_context(user_info: MockUserInfo) -> tuple:
+ """Create a mock service with the actual _configure_git_user_settings method.
+
+ Uses MagicMock for the service but binds the real method for testing.
+
+ Returns a tuple of (service, mock_user_context) for testing.
+ """
+ mock_user_context = MagicMock()
+ mock_user_context.get_user_info = AsyncMock(return_value=user_info)
+
+ # Create a simple mock service and set required attribute
+ service = MagicMock()
+ service.user_context = mock_user_context
+
+ # Bind the actual method from the real class to test real implementation
+ service._configure_git_user_settings = (
+ lambda workspace: AppConversationServiceBase._configure_git_user_settings(
+ service, workspace
+ )
+ )
+
+ return service, mock_user_context
+
+
+@pytest.fixture
+def mock_workspace():
+ """Create a mock workspace instance for testing."""
+ return MockWorkspace(working_dir='/workspace/project')
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_both_name_and_email(mock_workspace):
+ """Test configuring both git user name and email."""
+ user_info = MockUserInfo(
+ git_user_name='Test User', git_user_email='test@example.com'
+ )
+ service, mock_user_context = _create_service_with_mock_user_context(user_info)
+
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify get_user_info was called
+ mock_user_context.get_user_info.assert_called_once()
+
+ # Verify both git config commands were executed
+ assert mock_workspace.execute_command.call_count == 2
+
+ # Check git config user.name call
+ mock_workspace.execute_command.assert_any_call(
+ 'git config --global user.name "Test User"', '/workspace/project'
+ )
+
+ # Check git config user.email call
+ mock_workspace.execute_command.assert_any_call(
+ 'git config --global user.email "test@example.com"', '/workspace/project'
+ )
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_only_name(mock_workspace):
+ """Test configuring only git user name."""
+ user_info = MockUserInfo(git_user_name='Test User', git_user_email=None)
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify only user.name was configured
+ assert mock_workspace.execute_command.call_count == 1
+ mock_workspace.execute_command.assert_called_once_with(
+ 'git config --global user.name "Test User"', '/workspace/project'
+ )
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_only_email(mock_workspace):
+ """Test configuring only git user email."""
+ user_info = MockUserInfo(git_user_name=None, git_user_email='test@example.com')
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify only user.email was configured
+ assert mock_workspace.execute_command.call_count == 1
+ mock_workspace.execute_command.assert_called_once_with(
+ 'git config --global user.email "test@example.com"', '/workspace/project'
+ )
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_neither_set(mock_workspace):
+ """Test when neither git user name nor email is set."""
+ user_info = MockUserInfo(git_user_name=None, git_user_email=None)
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify no git config commands were executed
+ mock_workspace.execute_command.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_empty_strings(mock_workspace):
+ """Test when git user name and email are empty strings."""
+ user_info = MockUserInfo(git_user_name='', git_user_email='')
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Empty strings are falsy, so no commands should be executed
+ mock_workspace.execute_command.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_get_user_info_fails(mock_workspace):
+ """Test handling of exception when get_user_info fails."""
+ user_info = MockUserInfo()
+ service, mock_user_context = _create_service_with_mock_user_context(user_info)
+ mock_user_context.get_user_info = AsyncMock(
+ side_effect=Exception('User info error')
+ )
+
+ # Should not raise exception, just log warning
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify no git config commands were executed
+ mock_workspace.execute_command.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_name_command_fails(mock_workspace):
+ """Test handling when git config user.name command fails."""
+ user_info = MockUserInfo(
+ git_user_name='Test User', git_user_email='test@example.com'
+ )
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ # Make the first command fail (user.name), second succeed (user.email)
+ mock_workspace.execute_command = AsyncMock(
+ side_effect=[
+ MockCommandResult(exit_code=1, stderr='Permission denied'),
+ MockCommandResult(exit_code=0),
+ ]
+ )
+
+ # Should not raise exception
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify both commands were still attempted
+ assert mock_workspace.execute_command.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_email_command_fails(mock_workspace):
+ """Test handling when git config user.email command fails."""
+ user_info = MockUserInfo(
+ git_user_name='Test User', git_user_email='test@example.com'
+ )
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ # Make the first command succeed (user.name), second fail (user.email)
+ mock_workspace.execute_command = AsyncMock(
+ side_effect=[
+ MockCommandResult(exit_code=0),
+ MockCommandResult(exit_code=1, stderr='Permission denied'),
+ ]
+ )
+
+ # Should not raise exception
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify both commands were still attempted
+ assert mock_workspace.execute_command.call_count == 2
+
+
+@pytest.mark.asyncio
+async def test_configure_git_user_settings_special_characters_in_name(mock_workspace):
+ """Test git user name with special characters."""
+ user_info = MockUserInfo(
+ git_user_name="Test O'Brien", git_user_email='test@example.com'
+ )
+ service, _ = _create_service_with_mock_user_context(user_info)
+
+ await service._configure_git_user_settings(mock_workspace)
+
+ # Verify the name is passed with special characters
+ mock_workspace.execute_command.assert_any_call(
+ 'git config --global user.name "Test O\'Brien"', '/workspace/project'
+ )
diff --git a/tests/unit/app_server/test_docker_sandbox_service.py b/tests/unit/app_server/test_docker_sandbox_service.py
index f79988773a86..d428b3b664cb 100644
--- a/tests/unit/app_server/test_docker_sandbox_service.py
+++ b/tests/unit/app_server/test_docker_sandbox_service.py
@@ -697,10 +697,41 @@ async def test_container_to_sandbox_info_invalid_created_time(self, service):
assert result is not None
assert isinstance(result.created_at, datetime)
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
async def test_container_to_checked_sandbox_info_health_check_success(
- self, service, mock_running_container
+ self, mock_is_docker, service, mock_running_container
+ ):
+ """Test health check success when running in Docker."""
+ # Setup
+ service.httpx_client.get.return_value.raise_for_status.return_value = None
+
+ # Execute
+ result = await service._container_to_checked_sandbox_info(
+ mock_running_container
+ )
+
+ # Verify
+ assert result is not None
+ assert result.status == SandboxStatus.RUNNING
+ assert result.exposed_urls is not None
+ assert result.session_api_key == 'session_key_123'
+
+ # Verify health check was called with Docker-internal URL
+ service.httpx_client.get.assert_called_once_with(
+ 'http://host.docker.internal:12345/health'
+ )
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=False,
+ )
+ async def test_container_to_checked_sandbox_info_health_check_success_not_in_docker(
+ self, mock_is_docker, service, mock_running_container
):
- """Test health check success."""
+ """Test health check success when not running in Docker."""
# Setup
service.httpx_client.get.return_value.raise_for_status.return_value = None
@@ -715,7 +746,7 @@ async def test_container_to_checked_sandbox_info_health_check_success(
assert result.exposed_urls is not None
assert result.session_api_key == 'session_key_123'
- # Verify health check was called
+ # Verify health check was called with original localhost URL
service.httpx_client.get.assert_called_once_with(
'http://localhost:12345/health'
)
diff --git a/tests/unit/app_server/test_docker_sandbox_spec_service_injector.py b/tests/unit/app_server/test_docker_sandbox_spec_service_injector.py
index 1df987c56fde..059cc27e8ac6 100644
--- a/tests/unit/app_server/test_docker_sandbox_spec_service_injector.py
+++ b/tests/unit/app_server/test_docker_sandbox_spec_service_injector.py
@@ -447,3 +447,85 @@ async def test_pull_if_missing_flag_reset_after_first_inject(
# Verify no Docker operations were performed
mock_get_docker_client.assert_not_called()
mock_docker_client.images.get.assert_not_called()
+
+ @patch('openhands.app_server.sandbox.docker_sandbox_spec_service.get_docker_client')
+ @patch('openhands.app_server.sandbox.docker_sandbox_spec_service._logger')
+ async def test_pull_with_progress_logging(
+ self, mock_logger, mock_get_docker_client, sample_spec
+ ):
+ """Test that periodic progress logging occurs during image pull."""
+ # Setup
+ mock_docker_client = MagicMock()
+ mock_get_docker_client.return_value = mock_docker_client
+ mock_docker_client.images.get.side_effect = ImageNotFound('Image not found')
+
+ # Create a future that will be resolved after some delay to simulate slow pull
+ pull_future = asyncio.Future()
+
+ async def delayed_pull_completion():
+ # Wait for multiple logging intervals to pass
+ await asyncio.sleep(12) # 12 seconds = 2 logging intervals (5s each)
+ pull_future.set_result(MagicMock())
+
+ # Start the delayed completion task
+ asyncio.create_task(delayed_pull_completion())
+
+ # Mock the executor to return our delayed future
+ with patch('asyncio.get_running_loop') as mock_get_loop:
+ mock_loop = MagicMock()
+ mock_get_loop.return_value = mock_loop
+ mock_loop.run_in_executor.return_value = pull_future
+
+ injector = DockerSandboxSpecServiceInjector()
+
+ # Execute
+ await injector.pull_spec_if_missing(sample_spec)
+
+ # Verify that progress logging occurred
+ # Should have initial pull message, progress messages, and completion message
+ progress_calls = [
+ call
+ for call in mock_logger.info.call_args_list
+ if '🔄 Downloading Docker Image:' in str(call)
+ ]
+
+ # Should have at least 2 progress log messages (every 5 seconds for 12 seconds)
+ assert len(progress_calls) >= 2
+
+ # Verify the progress message format
+ for call in progress_calls:
+ assert '🔄 Downloading Docker Image: test-image:latest...' in str(call)
+
+ @patch('openhands.app_server.sandbox.docker_sandbox_spec_service.get_docker_client')
+ @patch('openhands.app_server.sandbox.docker_sandbox_spec_service._logger')
+ async def test_pull_with_progress_logging_fast_pull(
+ self, mock_logger, mock_get_docker_client, sample_spec
+ ):
+ """Test that no progress logging occurs for fast pulls (< 5 seconds)."""
+ # Setup
+ mock_docker_client = MagicMock()
+ mock_get_docker_client.return_value = mock_docker_client
+ mock_docker_client.images.get.side_effect = ImageNotFound('Image not found')
+
+ # Mock fast pull (completes immediately)
+ with patch('asyncio.get_running_loop') as mock_get_loop:
+ mock_loop = MagicMock()
+ mock_get_loop.return_value = mock_loop
+ fast_future = asyncio.Future()
+ fast_future.set_result(MagicMock())
+ mock_loop.run_in_executor.return_value = fast_future
+
+ injector = DockerSandboxSpecServiceInjector()
+
+ # Execute
+ await injector.pull_spec_if_missing(sample_spec)
+
+ # Verify that no progress logging occurred (only start/end messages)
+ progress_calls = [
+ call
+ for call in mock_logger.info.call_args_list
+ if '🔄 Downloading Docker Image:' in str(call)
+ ]
+
+ # Should have no progress log messages for fast pulls
+ assert len(progress_calls) == 0
diff --git a/tests/unit/app_server/test_docker_utils.py b/tests/unit/app_server/test_docker_utils.py
new file mode 100644
index 000000000000..127c6dfc6e2b
--- /dev/null
+++ b/tests/unit/app_server/test_docker_utils.py
@@ -0,0 +1,297 @@
+from unittest.mock import patch
+
+from openhands.app_server.utils.docker_utils import (
+ replace_localhost_hostname_for_docker,
+)
+
+
+class TestReplaceLocalhostHostnameForDocker:
+ """Test cases for replace_localhost_hostname_for_docker function."""
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_replace_localhost_basic_in_docker(self, mock_is_docker):
+ """Test basic localhost replacement when running in Docker."""
+ # Basic HTTP URL
+ result = replace_localhost_hostname_for_docker('http://localhost:8080')
+ assert result == 'http://host.docker.internal:8080'
+
+ # HTTPS URL
+ result = replace_localhost_hostname_for_docker('https://localhost:443')
+ assert result == 'https://host.docker.internal:443'
+
+ # No port specified
+ result = replace_localhost_hostname_for_docker('http://localhost')
+ assert result == 'http://host.docker.internal'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=False,
+ )
+ def test_replace_localhost_basic_not_in_docker(self, mock_is_docker):
+ """Test that localhost is NOT replaced when not running in Docker."""
+ # Basic HTTP URL
+ result = replace_localhost_hostname_for_docker('http://localhost:8080')
+ assert result == 'http://localhost:8080'
+
+ # HTTPS URL
+ result = replace_localhost_hostname_for_docker('https://localhost:443')
+ assert result == 'https://localhost:443'
+
+ # No port specified
+ result = replace_localhost_hostname_for_docker('http://localhost')
+ assert result == 'http://localhost'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_replace_localhost_with_path_and_query(self, mock_is_docker):
+ """Test localhost replacement preserving path and query parameters."""
+ # With path
+ result = replace_localhost_hostname_for_docker(
+ 'http://localhost:3000/api/health'
+ )
+ assert result == 'http://host.docker.internal:3000/api/health'
+
+ # With query parameters containing localhost
+ result = replace_localhost_hostname_for_docker(
+ 'http://localhost:8080/path?param=localhost&other=value'
+ )
+ assert (
+ result
+ == 'http://host.docker.internal:8080/path?param=localhost&other=value'
+ )
+
+ # With path containing localhost
+ result = replace_localhost_hostname_for_docker(
+ 'http://localhost:9000/localhost/endpoint'
+ )
+ assert result == 'http://host.docker.internal:9000/localhost/endpoint'
+
+ # With fragment
+ result = replace_localhost_hostname_for_docker(
+ 'http://localhost:8080/path#localhost'
+ )
+ assert result == 'http://host.docker.internal:8080/path#localhost'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_replace_localhost_with_authentication(self, mock_is_docker):
+ """Test localhost replacement with authentication in URL."""
+ result = replace_localhost_hostname_for_docker(
+ 'http://user:pass@localhost:8080/path'
+ )
+ assert result == 'http://user:pass@host.docker.internal:8080/path'
+
+ result = replace_localhost_hostname_for_docker(
+ 'https://admin:secret@localhost:443/admin'
+ )
+ assert result == 'https://admin:secret@host.docker.internal:443/admin'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_replace_localhost_different_protocols(self, mock_is_docker):
+ """Test localhost replacement with different protocols."""
+ # FTP
+ result = replace_localhost_hostname_for_docker('ftp://localhost:21/files')
+ assert result == 'ftp://host.docker.internal:21/files'
+
+ # WebSocket
+ result = replace_localhost_hostname_for_docker('ws://localhost:8080/socket')
+ assert result == 'ws://host.docker.internal:8080/socket'
+
+ # WebSocket Secure
+ result = replace_localhost_hostname_for_docker(
+ 'wss://localhost:443/secure-socket'
+ )
+ assert result == 'wss://host.docker.internal:443/secure-socket'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_no_replacement_for_non_localhost(self, mock_is_docker):
+ """Test that non-localhost hostnames are not replaced even when in Docker."""
+ # IP address
+ result = replace_localhost_hostname_for_docker('http://127.0.0.1:8080')
+ assert result == 'http://127.0.0.1:8080'
+
+ # Different hostname
+ result = replace_localhost_hostname_for_docker('http://example.com:8080')
+ assert result == 'http://example.com:8080'
+
+ # Hostname containing localhost but not exact match
+ result = replace_localhost_hostname_for_docker('http://mylocalhost:8080')
+ assert result == 'http://mylocalhost:8080'
+
+ # Subdomain of localhost
+ result = replace_localhost_hostname_for_docker('http://api.localhost:8080')
+ assert result == 'http://api.localhost:8080'
+
+ # localhost as subdomain
+ result = replace_localhost_hostname_for_docker(
+ 'http://localhost.example.com:8080'
+ )
+ assert result == 'http://localhost.example.com:8080'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_custom_replacement_hostname(self, mock_is_docker):
+ """Test using custom replacement hostname."""
+ result = replace_localhost_hostname_for_docker(
+ 'http://localhost:8080', 'custom.host'
+ )
+ assert result == 'http://custom.host:8080'
+
+ result = replace_localhost_hostname_for_docker(
+ 'https://localhost:443/path', 'internal.docker'
+ )
+ assert result == 'https://internal.docker:443/path'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_edge_cases_in_docker(self, mock_is_docker):
+ """Test edge cases and malformed URLs when in Docker."""
+ # Empty string
+ result = replace_localhost_hostname_for_docker('')
+ assert result == ''
+
+ # Malformed URL (no protocol)
+ result = replace_localhost_hostname_for_docker('localhost:8080')
+ assert result == 'localhost:8080'
+
+ # Just hostname
+ result = replace_localhost_hostname_for_docker('localhost')
+ assert result == 'localhost'
+
+ # URL with no hostname
+ result = replace_localhost_hostname_for_docker('http://:8080/path')
+ assert result == 'http://:8080/path'
+
+ # Invalid URL structure
+ result = replace_localhost_hostname_for_docker('not-a-url')
+ assert result == 'not-a-url'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=False,
+ )
+ def test_edge_cases_not_in_docker(self, mock_is_docker):
+ """Test edge cases and malformed URLs when not in Docker."""
+ # Empty string
+ result = replace_localhost_hostname_for_docker('')
+ assert result == ''
+
+ # Malformed URL (no protocol)
+ result = replace_localhost_hostname_for_docker('localhost:8080')
+ assert result == 'localhost:8080'
+
+ # Just hostname
+ result = replace_localhost_hostname_for_docker('localhost')
+ assert result == 'localhost'
+
+ # URL with no hostname
+ result = replace_localhost_hostname_for_docker('http://:8080/path')
+ assert result == 'http://:8080/path'
+
+ # Invalid URL structure
+ result = replace_localhost_hostname_for_docker('not-a-url')
+ assert result == 'not-a-url'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_complex_urls(self, mock_is_docker):
+ """Test complex URL scenarios."""
+ # Multiple query parameters and fragments
+ complex_url = 'http://localhost:8080/api/v1/health?timeout=30&retry=3&host=localhost#section'
+ result = replace_localhost_hostname_for_docker(complex_url)
+ expected = 'http://host.docker.internal:8080/api/v1/health?timeout=30&retry=3&host=localhost#section'
+ assert result == expected
+
+ # URL with encoded characters
+ encoded_url = (
+ 'http://localhost:8080/path%20with%20spaces?param=value%20with%20spaces'
+ )
+ result = replace_localhost_hostname_for_docker(encoded_url)
+ expected = 'http://host.docker.internal:8080/path%20with%20spaces?param=value%20with%20spaces'
+ assert result == expected
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_integration_with_docker_detection_in_docker(self, mock_is_docker):
+ """Test integration scenario similar to actual usage when in Docker."""
+ # Simulate the actual usage pattern in the code
+ app_server_url = 'http://localhost:35375'
+
+ # This is how it's used in the actual code
+ internal_url = replace_localhost_hostname_for_docker(app_server_url)
+
+ assert internal_url == 'http://host.docker.internal:35375'
+
+ # Test with health check path appended
+ health_check_url = f'{internal_url}/health'
+ assert health_check_url == 'http://host.docker.internal:35375/health'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=False,
+ )
+ def test_integration_with_docker_detection_not_in_docker(self, mock_is_docker):
+ """Test integration scenario similar to actual usage when not in Docker."""
+ # Simulate the actual usage pattern in the code
+ app_server_url = 'http://localhost:35375'
+
+ # This is how it's used in the actual code
+ internal_url = replace_localhost_hostname_for_docker(app_server_url)
+
+ # Should return original URL when not in Docker
+ assert internal_url == 'http://localhost:35375'
+
+ # Test with health check path appended
+ health_check_url = f'{internal_url}/health'
+ assert health_check_url == 'http://localhost:35375/health'
+
+ @patch(
+ 'openhands.app_server.utils.docker_utils.is_running_in_docker',
+ return_value=True,
+ )
+ def test_preserves_original_url_structure(self, mock_is_docker):
+ """Test that all URL components are preserved correctly."""
+ original_url = 'https://user:pass@localhost:8443/api/v1/endpoint?param1=value1¶m2=value2#fragment'
+ result = replace_localhost_hostname_for_docker(original_url)
+ expected = 'https://user:pass@host.docker.internal:8443/api/v1/endpoint?param1=value1¶m2=value2#fragment'
+
+ assert result == expected
+
+ # Verify each component is preserved
+ from urllib.parse import urlparse
+
+ original_parsed = urlparse(original_url)
+ result_parsed = urlparse(result)
+
+ assert original_parsed.scheme == result_parsed.scheme
+ assert original_parsed.username == result_parsed.username
+ assert original_parsed.password == result_parsed.password
+ assert original_parsed.port == result_parsed.port
+ assert original_parsed.path == result_parsed.path
+ assert original_parsed.query == result_parsed.query
+ assert original_parsed.fragment == result_parsed.fragment
+
+ # Only hostname should be different
+ assert original_parsed.hostname == 'localhost'
+ assert result_parsed.hostname == 'host.docker.internal'
diff --git a/tests/unit/app_server/test_github_v1_callback_processor.py b/tests/unit/app_server/test_github_v1_callback_processor.py
new file mode 100644
index 000000000000..acf958a8e3d1
--- /dev/null
+++ b/tests/unit/app_server/test_github_v1_callback_processor.py
@@ -0,0 +1,771 @@
+"""
+Tests for the GithubV1CallbackProcessor.
+
+Covers:
+- Event filtering
+- Successful summary + GitHub posting
+- Inline PR comments
+- Error conditions (missing IDs/credentials, conversation/sandbox issues)
+- Agent server HTTP/timeout errors
+- Low-level helper methods
+"""
+
+import os
+from unittest.mock import AsyncMock, MagicMock, patch
+from uuid import uuid4
+
+import httpx
+import pytest
+
+from openhands.app_server.app_conversation.app_conversation_models import (
+ AppConversationInfo,
+)
+from openhands.app_server.event_callback.event_callback_models import EventCallback
+from openhands.app_server.event_callback.event_callback_result_models import (
+ EventCallbackResultStatus,
+)
+from openhands.app_server.event_callback.github_v1_callback_processor import (
+ GithubV1CallbackProcessor,
+)
+from openhands.app_server.sandbox.sandbox_models import (
+ ExposedUrl,
+ SandboxInfo,
+ SandboxStatus,
+)
+from openhands.events.action.message import MessageAction
+from openhands.sdk.event import ConversationStateUpdateEvent
+
+# ---------------------------------------------------------------------------
+# Fixtures
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture
+def github_callback_processor():
+ return GithubV1CallbackProcessor(
+ github_view_data={
+ 'installation_id': 12345,
+ 'full_repo_name': 'test-owner/test-repo',
+ 'issue_number': 42,
+ },
+ should_request_summary=True,
+ should_extract=True,
+ inline_pr_comment=False,
+ )
+
+
+@pytest.fixture
+def github_callback_processor_inline():
+ return GithubV1CallbackProcessor(
+ github_view_data={
+ 'installation_id': 12345,
+ 'full_repo_name': 'test-owner/test-repo',
+ 'issue_number': 42,
+ 'comment_id': 'comment_123',
+ },
+ should_request_summary=True,
+ should_extract=True,
+ inline_pr_comment=True,
+ )
+
+
+@pytest.fixture
+def conversation_state_update_event():
+ return ConversationStateUpdateEvent(key='execution_status', value='finished')
+
+
+@pytest.fixture
+def wrong_event():
+ return MessageAction(content='Hello world')
+
+
+@pytest.fixture
+def wrong_state_event():
+ return ConversationStateUpdateEvent(key='execution_status', value='running')
+
+
+@pytest.fixture
+def event_callback():
+ return EventCallback(
+ id=uuid4(),
+ conversation_id=uuid4(),
+ processor=GithubV1CallbackProcessor(),
+ event_kind='ConversationStateUpdateEvent',
+ )
+
+
+@pytest.fixture
+def mock_app_conversation_info():
+ return AppConversationInfo(
+ conversation_id=uuid4(),
+ sandbox_id='sandbox_123',
+ title='Test Conversation',
+ created_by_user_id='test_user_123',
+ )
+
+
+@pytest.fixture
+def mock_sandbox_info():
+ return SandboxInfo(
+ id='sandbox_123',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test_api_key',
+ created_by_user_id='test_user_123',
+ sandbox_spec_id='spec_123',
+ exposed_urls=[
+ ExposedUrl(name='AGENT_SERVER', url='http://localhost:8000', port=8000),
+ ],
+ )
+
+
+# ---------------------------------------------------------------------------
+# Helper for common service mocks
+# ---------------------------------------------------------------------------
+
+
+async def _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ app_conversation_info,
+ sandbox_info,
+ agent_response_text='Test summary from agent',
+):
+ # app_conversation_info_service
+ mock_app_conversation_info_service = AsyncMock()
+ mock_app_conversation_info_service.get_app_conversation_info.return_value = (
+ app_conversation_info
+ )
+ mock_get_app_conversation_info_service.return_value.__aenter__.return_value = (
+ mock_app_conversation_info_service
+ )
+
+ # sandbox_service
+ mock_sandbox_service = AsyncMock()
+ mock_sandbox_service.get_sandbox.return_value = sandbox_info
+ mock_get_sandbox_service.return_value.__aenter__.return_value = mock_sandbox_service
+
+ # httpx_client
+ mock_httpx_client = AsyncMock()
+ mock_response = MagicMock()
+ mock_response.json.return_value = {'response': agent_response_text}
+ mock_response.raise_for_status.return_value = None
+ mock_httpx_client.post.return_value = mock_response
+ mock_get_httpx_client.return_value.__aenter__.return_value = mock_httpx_client
+
+ return mock_httpx_client
+
+
+# ---------------------------------------------------------------------------
+# Tests
+# ---------------------------------------------------------------------------
+
+
+class TestGithubV1CallbackProcessor:
+ async def test_call_with_wrong_event_type(
+ self, github_callback_processor, wrong_event, event_callback
+ ):
+ result = await github_callback_processor(
+ conversation_id=uuid4(),
+ callback=event_callback,
+ event=wrong_event,
+ )
+ assert result is None
+
+ async def test_call_with_wrong_state_event(
+ self, github_callback_processor, wrong_state_event, event_callback
+ ):
+ result = await github_callback_processor(
+ conversation_id=uuid4(),
+ callback=event_callback,
+ event=wrong_state_event,
+ )
+ assert result is None
+
+ async def test_call_should_request_summary_false(
+ self, github_callback_processor, conversation_state_update_event, event_callback
+ ):
+ github_callback_processor.should_request_summary = False
+
+ result = await github_callback_processor(
+ conversation_id=uuid4(),
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+ assert result is None
+
+ # ------------------------------------------------------------------ #
+ # Successful paths
+ # ------------------------------------------------------------------ #
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key',
+ },
+ )
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.get_prompt_template'
+ )
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.GithubIntegration'
+ )
+ @patch('openhands.app_server.event_callback.github_v1_callback_processor.Github')
+ async def test_successful_callback_execution(
+ self,
+ mock_github,
+ mock_github_integration,
+ mock_get_prompt_template,
+ mock_get_httpx_client,
+ mock_get_sandbox_service,
+ mock_get_app_conversation_info_service,
+ github_callback_processor,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ conversation_id = uuid4()
+
+ # Common service mocks
+ mock_httpx_client = await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+
+ mock_get_prompt_template.return_value = 'Please provide a summary'
+
+ # GitHub integration
+ mock_token_data = MagicMock()
+ mock_token_data.token = 'test_access_token'
+ mock_integration_instance = MagicMock()
+ mock_integration_instance.get_access_token.return_value = mock_token_data
+ mock_github_integration.return_value = mock_integration_instance
+
+ # GitHub API
+ mock_github_client = MagicMock()
+ mock_repo = MagicMock()
+ mock_issue = MagicMock()
+ mock_repo.get_issue.return_value = mock_issue
+ mock_github_client.get_repo.return_value = mock_repo
+ mock_github.return_value.__enter__.return_value = mock_github_client
+
+ result = await github_callback_processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.SUCCESS
+ assert result.event_callback_id == event_callback.id
+ assert result.event_id == conversation_state_update_event.id
+ assert result.conversation_id == conversation_id
+ assert result.detail == 'Test summary from agent'
+ assert github_callback_processor.should_request_summary is False
+
+ mock_github_integration.assert_called_once_with(
+ 'test_client_id', 'test_private_key'
+ )
+ mock_integration_instance.get_access_token.assert_called_once_with(12345)
+
+ mock_github.assert_called_once_with('test_access_token')
+ mock_github_client.get_repo.assert_called_once_with('test-owner/test-repo')
+ mock_repo.get_issue.assert_called_once_with(number=42)
+ mock_issue.create_comment.assert_called_once_with('Test summary from agent')
+
+ mock_httpx_client.post.assert_called_once()
+ url_arg, kwargs = mock_httpx_client.post.call_args
+ url = url_arg[0] if url_arg else kwargs['url']
+ assert 'ask_agent' in url
+ assert kwargs['headers']['X-Session-API-Key'] == 'test_api_key'
+ assert kwargs['json']['question'] == 'Please provide a summary'
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key',
+ },
+ )
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.get_prompt_template'
+ )
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.GithubIntegration'
+ )
+ @patch('openhands.app_server.event_callback.github_v1_callback_processor.Github')
+ async def test_successful_inline_pr_comment(
+ self,
+ mock_github,
+ mock_github_integration,
+ mock_get_prompt_template,
+ mock_get_httpx_client,
+ mock_get_sandbox_service,
+ mock_get_app_conversation_info_service,
+ github_callback_processor_inline,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ conversation_id = uuid4()
+
+ await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+
+ mock_get_prompt_template.return_value = 'Please provide a summary'
+
+ mock_token_data = MagicMock()
+ mock_token_data.token = 'test_access_token'
+ mock_integration_instance = MagicMock()
+ mock_integration_instance.get_access_token.return_value = mock_token_data
+ mock_github_integration.return_value = mock_integration_instance
+
+ mock_github_client = MagicMock()
+ mock_repo = MagicMock()
+ mock_pr = MagicMock()
+ mock_repo.get_pull.return_value = mock_pr
+ mock_github_client.get_repo.return_value = mock_repo
+ mock_github.return_value.__enter__.return_value = mock_github_client
+
+ result = await github_callback_processor_inline(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.SUCCESS
+
+ mock_repo.get_pull.assert_called_once_with(42)
+ mock_pr.create_review_comment_reply.assert_called_once_with(
+ comment_id='comment_123', body='Test summary from agent'
+ )
+
+ # ------------------------------------------------------------------ #
+ # Error paths
+ # ------------------------------------------------------------------ #
+
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ async def test_missing_installation_id(
+ self,
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ processor = GithubV1CallbackProcessor(
+ github_view_data={}, should_request_summary=True
+ )
+ conversation_id = uuid4()
+
+ await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+
+ result = await processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.ERROR
+ assert 'Missing installation ID' in result.detail
+
+ @patch.dict(os.environ, {}, clear=True)
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ async def test_missing_github_credentials(
+ self,
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ github_callback_processor,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ conversation_id = uuid4()
+
+ await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+
+ result = await github_callback_processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.ERROR
+ assert 'GitHub App credentials are not configured' in result.detail
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key',
+ },
+ )
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ async def test_sandbox_not_running(
+ self,
+ mock_get_sandbox_service,
+ mock_get_app_conversation_info_service,
+ github_callback_processor,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ ):
+ conversation_id = uuid4()
+
+ mock_app_conversation_info_service = AsyncMock()
+ mock_app_conversation_info_service.get_app_conversation_info.return_value = (
+ mock_app_conversation_info
+ )
+ mock_get_app_conversation_info_service.return_value.__aenter__.return_value = (
+ mock_app_conversation_info_service
+ )
+
+ non_running_sandbox = SandboxInfo(
+ id='sandbox_123',
+ status=SandboxStatus.PAUSED,
+ session_api_key='test_api_key',
+ created_by_user_id='test_user_123',
+ sandbox_spec_id='spec_123',
+ )
+ mock_sandbox_service = AsyncMock()
+ mock_sandbox_service.get_sandbox.return_value = non_running_sandbox
+ mock_get_sandbox_service.return_value.__aenter__.return_value = (
+ mock_sandbox_service
+ )
+
+ result = await github_callback_processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.ERROR
+ assert 'Sandbox not running' in result.detail
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key',
+ },
+ )
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.get_prompt_template'
+ )
+ async def test_agent_server_http_error(
+ self,
+ mock_get_prompt_template,
+ mock_get_httpx_client,
+ mock_get_sandbox_service,
+ mock_get_app_conversation_info_service,
+ github_callback_processor,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ conversation_id = uuid4()
+
+ # Set up happy path except httpx
+ await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+
+ mock_get_prompt_template.return_value = 'Please provide a summary'
+
+ mock_httpx_client = mock_get_httpx_client.return_value.__aenter__.return_value
+ mock_response = MagicMock()
+ mock_response.status_code = 500
+ mock_response.text = 'Internal Server Error'
+ mock_response.headers = {}
+ mock_error = httpx.HTTPStatusError(
+ 'HTTP 500 error', request=MagicMock(), response=mock_response
+ )
+ mock_httpx_client.post.side_effect = mock_error
+
+ result = await github_callback_processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.ERROR
+ assert 'Failed to send message to agent server' in result.detail
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key',
+ },
+ )
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.get_prompt_template'
+ )
+ async def test_agent_server_timeout(
+ self,
+ mock_get_prompt_template,
+ mock_get_httpx_client,
+ mock_get_sandbox_service,
+ mock_get_app_conversation_info_service,
+ github_callback_processor,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ conversation_id = uuid4()
+
+ await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+
+ mock_get_prompt_template.return_value = 'Please provide a summary'
+
+ mock_httpx_client = mock_get_httpx_client.return_value.__aenter__.return_value
+ mock_httpx_client.post.side_effect = httpx.TimeoutException('Request timeout')
+
+ result = await github_callback_processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.ERROR
+ assert 'Request timeout after 30 seconds' in result.detail
+
+ # ------------------------------------------------------------------ #
+ # Low-level helper tests
+ # ------------------------------------------------------------------ #
+
+ def test_get_installation_access_token_missing_id(self):
+ processor = GithubV1CallbackProcessor(github_view_data={})
+
+ with pytest.raises(ValueError, match='Missing installation ID'):
+ processor._get_installation_access_token()
+
+ @patch.dict(os.environ, {}, clear=True)
+ def test_get_installation_access_token_missing_credentials(
+ self, github_callback_processor
+ ):
+ with pytest.raises(
+ ValueError, match='GitHub App credentials are not configured'
+ ):
+ github_callback_processor._get_installation_access_token()
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key\\nwith_newlines',
+ },
+ )
+ @patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.GithubIntegration'
+ )
+ def test_get_installation_access_token_success(
+ self, mock_github_integration, github_callback_processor
+ ):
+ mock_token_data = MagicMock()
+ mock_token_data.token = 'test_access_token'
+ mock_integration_instance = MagicMock()
+ mock_integration_instance.get_access_token.return_value = mock_token_data
+ mock_github_integration.return_value = mock_integration_instance
+
+ token = github_callback_processor._get_installation_access_token()
+
+ assert token == 'test_access_token'
+ mock_github_integration.assert_called_once_with(
+ 'test_client_id', 'test_private_key\nwith_newlines'
+ )
+ mock_integration_instance.get_access_token.assert_called_once_with(12345)
+
+ @patch('openhands.app_server.event_callback.github_v1_callback_processor.Github')
+ async def test_post_summary_to_github_issue_comment(
+ self, mock_github, github_callback_processor
+ ):
+ mock_github_client = MagicMock()
+ mock_repo = MagicMock()
+ mock_issue = MagicMock()
+ mock_repo.get_issue.return_value = mock_issue
+ mock_github_client.get_repo.return_value = mock_repo
+ mock_github.return_value.__enter__.return_value = mock_github_client
+
+ with patch.object(
+ github_callback_processor,
+ '_get_installation_access_token',
+ return_value='test_token',
+ ):
+ await github_callback_processor._post_summary_to_github('Test summary')
+
+ mock_github.assert_called_once_with('test_token')
+ mock_github_client.get_repo.assert_called_once_with('test-owner/test-repo')
+ mock_repo.get_issue.assert_called_once_with(number=42)
+ mock_issue.create_comment.assert_called_once_with('Test summary')
+
+ @patch('openhands.app_server.event_callback.github_v1_callback_processor.Github')
+ async def test_post_summary_to_github_pr_comment(
+ self, mock_github, github_callback_processor_inline
+ ):
+ mock_github_client = MagicMock()
+ mock_repo = MagicMock()
+ mock_pr = MagicMock()
+ mock_repo.get_pull.return_value = mock_pr
+ mock_github_client.get_repo.return_value = mock_repo
+ mock_github.return_value.__enter__.return_value = mock_github_client
+
+ with patch.object(
+ github_callback_processor_inline,
+ '_get_installation_access_token',
+ return_value='test_token',
+ ):
+ await github_callback_processor_inline._post_summary_to_github(
+ 'Test summary'
+ )
+
+ mock_github.assert_called_once_with('test_token')
+ mock_github_client.get_repo.assert_called_once_with('test-owner/test-repo')
+ mock_repo.get_pull.assert_called_once_with(42)
+ mock_pr.create_review_comment_reply.assert_called_once_with(
+ comment_id='comment_123', body='Test summary'
+ )
+
+ async def test_post_summary_to_github_missing_token(
+ self, github_callback_processor
+ ):
+ with patch.object(
+ github_callback_processor, '_get_installation_access_token', return_value=''
+ ):
+ with pytest.raises(RuntimeError, match='Missing GitHub credentials'):
+ await github_callback_processor._post_summary_to_github('Test summary')
+
+ @patch.dict(
+ os.environ,
+ {
+ 'GITHUB_APP_CLIENT_ID': 'test_client_id',
+ 'GITHUB_APP_PRIVATE_KEY': 'test_private_key',
+ 'WEB_HOST': 'test.example.com',
+ },
+ )
+ @patch('openhands.app_server.config.get_httpx_client')
+ @patch('openhands.app_server.config.get_sandbox_service')
+ @patch('openhands.app_server.config.get_app_conversation_info_service')
+ async def test_exception_handling_posts_error_to_github(
+ self,
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ github_callback_processor,
+ conversation_state_update_event,
+ event_callback,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ ):
+ conversation_id = uuid4()
+
+ # happy-ish path, except httpx error
+ mock_httpx_client = await _setup_happy_path_services(
+ mock_get_app_conversation_info_service,
+ mock_get_sandbox_service,
+ mock_get_httpx_client,
+ mock_app_conversation_info,
+ mock_sandbox_info,
+ )
+ mock_httpx_client.post.side_effect = Exception('Simulated agent server error')
+
+ with (
+ patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.GithubIntegration'
+ ) as mock_github_integration,
+ patch(
+ 'openhands.app_server.event_callback.github_v1_callback_processor.Github'
+ ) as mock_github,
+ ):
+ mock_integration = MagicMock()
+ mock_github_integration.return_value = mock_integration
+ mock_integration.get_access_token.return_value.token = 'test_token'
+
+ mock_gh = MagicMock()
+ mock_github.return_value.__enter__.return_value = mock_gh
+ mock_repo = MagicMock()
+ mock_issue = MagicMock()
+ mock_repo.get_issue.return_value = mock_issue
+ mock_gh.get_repo.return_value = mock_repo
+
+ result = await github_callback_processor(
+ conversation_id=conversation_id,
+ callback=event_callback,
+ event=conversation_state_update_event,
+ )
+
+ assert result is not None
+ assert result.status == EventCallbackResultStatus.ERROR
+ assert 'Simulated agent server error' in result.detail
+
+ mock_issue.create_comment.assert_called_once()
+ call_args = mock_issue.create_comment.call_args
+ error_comment = call_args[1].get('body') or call_args[0][0]
+ assert (
+ 'OpenHands encountered an error: **Simulated agent server error**'
+ in error_comment
+ )
+ assert f'conversations/{conversation_id}' in error_comment
+ assert 'for more information.' in error_comment
diff --git a/tests/unit/app_server/test_live_status_app_conversation_service.py b/tests/unit/app_server/test_live_status_app_conversation_service.py
new file mode 100644
index 000000000000..1dabdfa88a5b
--- /dev/null
+++ b/tests/unit/app_server/test_live_status_app_conversation_service.py
@@ -0,0 +1,825 @@
+"""Unit tests for the methods in LiveStatusAppConversationService."""
+
+from unittest.mock import AsyncMock, Mock, patch
+from uuid import UUID, uuid4
+
+import pytest
+
+from openhands.agent_server.models import SendMessageRequest, StartConversationRequest
+from openhands.app_server.app_conversation.app_conversation_models import AgentType
+from openhands.app_server.app_conversation.live_status_app_conversation_service import (
+ LiveStatusAppConversationService,
+)
+from openhands.app_server.sandbox.sandbox_models import SandboxInfo, SandboxStatus
+from openhands.app_server.user.user_context import UserContext
+from openhands.integrations.provider import ProviderType
+from openhands.sdk import Agent
+from openhands.sdk.conversation.secret_source import LookupSecret, StaticSecret
+from openhands.sdk.llm import LLM
+from openhands.sdk.workspace import LocalWorkspace
+from openhands.sdk.workspace.remote.async_remote_workspace import AsyncRemoteWorkspace
+from openhands.server.types import AppMode
+
+
+class TestLiveStatusAppConversationService:
+ """Test cases for the methods in LiveStatusAppConversationService."""
+
+ def setup_method(self):
+ """Set up test fixtures."""
+ # Create mock dependencies
+ self.mock_user_context = Mock(spec=UserContext)
+ self.mock_user_auth = Mock()
+ self.mock_user_context.user_auth = self.mock_user_auth
+ self.mock_jwt_service = Mock()
+ self.mock_sandbox_service = Mock()
+ self.mock_sandbox_spec_service = Mock()
+ self.mock_app_conversation_info_service = Mock()
+ self.mock_app_conversation_start_task_service = Mock()
+ self.mock_event_callback_service = Mock()
+ self.mock_httpx_client = Mock()
+
+ # Create service instance
+ self.service = LiveStatusAppConversationService(
+ init_git_in_empty_workspace=True,
+ user_context=self.mock_user_context,
+ app_conversation_info_service=self.mock_app_conversation_info_service,
+ app_conversation_start_task_service=self.mock_app_conversation_start_task_service,
+ event_callback_service=self.mock_event_callback_service,
+ sandbox_service=self.mock_sandbox_service,
+ sandbox_spec_service=self.mock_sandbox_spec_service,
+ jwt_service=self.mock_jwt_service,
+ sandbox_startup_timeout=30,
+ sandbox_startup_poll_frequency=1,
+ httpx_client=self.mock_httpx_client,
+ web_url='https://test.example.com',
+ openhands_provider_base_url='https://provider.example.com',
+ access_token_hard_timeout=None,
+ app_mode='test',
+ keycloak_auth_cookie=None,
+ )
+
+ # Mock user info
+ self.mock_user = Mock()
+ self.mock_user.id = 'test_user_123'
+ self.mock_user.llm_model = 'gpt-4'
+ self.mock_user.llm_base_url = 'https://api.openai.com/v1'
+ self.mock_user.llm_api_key = 'test_api_key'
+ self.mock_user.confirmation_mode = False
+ self.mock_user.search_api_key = None # Default to None
+ self.mock_user.condenser_max_size = None # Default to None
+ self.mock_user.llm_base_url = 'https://api.openai.com/v1'
+
+ # Mock sandbox
+ self.mock_sandbox = Mock(spec=SandboxInfo)
+ self.mock_sandbox.id = uuid4()
+ self.mock_sandbox.status = SandboxStatus.RUNNING
+
+ @pytest.mark.asyncio
+ async def test_setup_secrets_for_git_providers_no_provider_tokens(self):
+ """Test _setup_secrets_for_git_providers with no provider tokens."""
+ # Arrange
+ base_secrets = {'existing': 'secret'}
+ self.mock_user_context.get_secrets.return_value = base_secrets
+ self.mock_user_context.get_provider_tokens = AsyncMock(return_value=None)
+
+ # Act
+ result = await self.service._setup_secrets_for_git_providers(self.mock_user)
+
+ # Assert
+ assert result == base_secrets
+ self.mock_user_context.get_secrets.assert_called_once()
+ self.mock_user_context.get_provider_tokens.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_setup_secrets_for_git_providers_with_web_url(self):
+ """Test _setup_secrets_for_git_providers with web URL (creates access token)."""
+ # Arrange
+ from pydantic import SecretStr
+
+ from openhands.integrations.provider import ProviderToken
+
+ base_secrets = {}
+ self.mock_user_context.get_secrets.return_value = base_secrets
+ self.mock_jwt_service.create_jws_token.return_value = 'test_access_token'
+
+ # Mock provider tokens
+ provider_tokens = {
+ ProviderType.GITHUB: ProviderToken(token=SecretStr('github_token')),
+ ProviderType.GITLAB: ProviderToken(token=SecretStr('gitlab_token')),
+ }
+ self.mock_user_context.get_provider_tokens = AsyncMock(
+ return_value=provider_tokens
+ )
+
+ # Act
+ result = await self.service._setup_secrets_for_git_providers(self.mock_user)
+
+ # Assert
+ assert 'GITHUB_TOKEN' in result
+ assert 'GITLAB_TOKEN' in result
+ assert isinstance(result['GITHUB_TOKEN'], LookupSecret)
+ assert isinstance(result['GITLAB_TOKEN'], LookupSecret)
+ assert (
+ result['GITHUB_TOKEN'].url
+ == 'https://test.example.com/api/v1/webhooks/secrets'
+ )
+ assert result['GITHUB_TOKEN'].headers['X-Access-Token'] == 'test_access_token'
+
+ # Should be called twice, once for each provider
+ assert self.mock_jwt_service.create_jws_token.call_count == 2
+
+ @pytest.mark.asyncio
+ async def test_setup_secrets_for_git_providers_with_saas_mode(self):
+ """Test _setup_secrets_for_git_providers with SaaS mode (includes keycloak cookie)."""
+ # Arrange
+ from pydantic import SecretStr
+
+ from openhands.integrations.provider import ProviderToken
+
+ self.service.app_mode = 'saas'
+ self.service.keycloak_auth_cookie = 'test_cookie'
+ base_secrets = {}
+ self.mock_user_context.get_secrets.return_value = base_secrets
+ self.mock_jwt_service.create_jws_token.return_value = 'test_access_token'
+
+ # Mock provider tokens
+ provider_tokens = {
+ ProviderType.GITLAB: ProviderToken(token=SecretStr('gitlab_token')),
+ }
+ self.mock_user_context.get_provider_tokens = AsyncMock(
+ return_value=provider_tokens
+ )
+
+ # Act
+ result = await self.service._setup_secrets_for_git_providers(self.mock_user)
+
+ # Assert
+ assert 'GITLAB_TOKEN' in result
+ lookup_secret = result['GITLAB_TOKEN']
+ assert isinstance(lookup_secret, LookupSecret)
+ assert 'Cookie' in lookup_secret.headers
+ assert lookup_secret.headers['Cookie'] == 'keycloak_auth=test_cookie'
+
+ @pytest.mark.asyncio
+ async def test_setup_secrets_for_git_providers_without_web_url(self):
+ """Test _setup_secrets_for_git_providers without web URL (uses static token)."""
+ # Arrange
+ from pydantic import SecretStr
+
+ from openhands.integrations.provider import ProviderToken
+
+ self.service.web_url = None
+ base_secrets = {}
+ self.mock_user_context.get_secrets.return_value = base_secrets
+ self.mock_user_context.get_latest_token.return_value = 'static_token_value'
+
+ # Mock provider tokens
+ provider_tokens = {
+ ProviderType.GITHUB: ProviderToken(token=SecretStr('github_token')),
+ }
+ self.mock_user_context.get_provider_tokens = AsyncMock(
+ return_value=provider_tokens
+ )
+
+ # Act
+ result = await self.service._setup_secrets_for_git_providers(self.mock_user)
+
+ # Assert
+ assert 'GITHUB_TOKEN' in result
+ assert isinstance(result['GITHUB_TOKEN'], StaticSecret)
+ assert result['GITHUB_TOKEN'].value.get_secret_value() == 'static_token_value'
+ self.mock_user_context.get_latest_token.assert_called_once_with(
+ ProviderType.GITHUB
+ )
+
+ @pytest.mark.asyncio
+ async def test_setup_secrets_for_git_providers_no_static_token(self):
+ """Test _setup_secrets_for_git_providers when no static token is available."""
+ # Arrange
+ from pydantic import SecretStr
+
+ from openhands.integrations.provider import ProviderToken
+
+ self.service.web_url = None
+ base_secrets = {}
+ self.mock_user_context.get_secrets.return_value = base_secrets
+ self.mock_user_context.get_latest_token.return_value = None
+
+ # Mock provider tokens
+ provider_tokens = {
+ ProviderType.GITHUB: ProviderToken(token=SecretStr('github_token')),
+ }
+ self.mock_user_context.get_provider_tokens = AsyncMock(
+ return_value=provider_tokens
+ )
+
+ # Act
+ result = await self.service._setup_secrets_for_git_providers(self.mock_user)
+
+ # Assert
+ assert 'GITHUB_TOKEN' not in result
+ assert result == base_secrets
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_with_custom_model(self):
+ """Test _configure_llm_and_mcp with custom LLM model."""
+ # Arrange
+ custom_model = 'gpt-3.5-turbo'
+ self.mock_user_context.get_mcp_api_key.return_value = 'mcp_api_key'
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, custom_model
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert llm.model == custom_model
+ assert llm.base_url == self.mock_user.llm_base_url
+ assert llm.api_key.get_secret_value() == self.mock_user.llm_api_key
+ assert llm.usage_id == 'agent'
+
+ assert 'default' in mcp_config
+ assert mcp_config['default']['url'] == 'https://test.example.com/mcp/mcp'
+ assert mcp_config['default']['headers']['X-Session-API-Key'] == 'mcp_api_key'
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_openhands_model_prefers_user_base_url(self):
+ """openhands/* model uses user.llm_base_url when provided."""
+ # Arrange
+ self.mock_user.llm_model = 'openhands/special'
+ self.mock_user.llm_base_url = 'https://user-llm.example.com'
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, _ = await self.service._configure_llm_and_mcp(
+ self.mock_user, self.mock_user.llm_model
+ )
+
+ # Assert
+ assert llm.base_url == 'https://user-llm.example.com'
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_openhands_model_uses_provider_default(self):
+ """openhands/* model falls back to configured provider base URL."""
+ # Arrange
+ self.mock_user.llm_model = 'openhands/default'
+ self.mock_user.llm_base_url = None
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, _ = await self.service._configure_llm_and_mcp(
+ self.mock_user, self.mock_user.llm_model
+ )
+
+ # Assert
+ assert llm.base_url == 'https://provider.example.com'
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_openhands_model_no_base_urls(self):
+ """openhands/* model sets base_url to None when no sources available."""
+ # Arrange
+ self.mock_user.llm_model = 'openhands/default'
+ self.mock_user.llm_base_url = None
+ self.service.openhands_provider_base_url = None
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, _ = await self.service._configure_llm_and_mcp(
+ self.mock_user, self.mock_user.llm_model
+ )
+
+ # Assert
+ assert llm.base_url is None
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_non_openhands_model_ignores_provider(self):
+ """Non-openhands model ignores provider base URL and uses user base URL."""
+ # Arrange
+ self.mock_user.llm_model = 'gpt-4'
+ self.mock_user.llm_base_url = 'https://user-llm.example.com'
+ self.service.openhands_provider_base_url = 'https://provider.example.com'
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, _ = await self.service._configure_llm_and_mcp(self.mock_user, None)
+
+ # Assert
+ assert llm.base_url == 'https://user-llm.example.com'
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_with_user_default_model(self):
+ """Test _configure_llm_and_mcp using user's default model."""
+ # Arrange
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert llm.model == self.mock_user.llm_model
+ assert 'default' in mcp_config
+ assert 'headers' not in mcp_config['default']
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_without_web_url(self):
+ """Test _configure_llm_and_mcp without web URL (no MCP config)."""
+ # Arrange
+ self.service.web_url = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert mcp_config == {}
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_tavily_with_user_search_api_key(self):
+ """Test _configure_llm_and_mcp adds tavily when user has search_api_key."""
+ # Arrange
+ from pydantic import SecretStr
+
+ self.mock_user.search_api_key = SecretStr('user_search_key')
+ self.mock_user_context.get_mcp_api_key.return_value = 'mcp_api_key'
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'default' in mcp_config
+ assert 'tavily' in mcp_config
+ assert (
+ mcp_config['tavily']['url']
+ == 'https://mcp.tavily.com/mcp/?tavilyApiKey=user_search_key'
+ )
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_tavily_with_env_tavily_key(self):
+ """Test _configure_llm_and_mcp adds tavily when service has tavily_api_key."""
+ # Arrange
+ self.service.tavily_api_key = 'env_tavily_key'
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'default' in mcp_config
+ assert 'tavily' in mcp_config
+ assert (
+ mcp_config['tavily']['url']
+ == 'https://mcp.tavily.com/mcp/?tavilyApiKey=env_tavily_key'
+ )
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_tavily_user_key_takes_precedence(self):
+ """Test _configure_llm_and_mcp user search_api_key takes precedence over env key."""
+ # Arrange
+ from pydantic import SecretStr
+
+ self.mock_user.search_api_key = SecretStr('user_search_key')
+ self.service.tavily_api_key = 'env_tavily_key'
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'tavily' in mcp_config
+ assert (
+ mcp_config['tavily']['url']
+ == 'https://mcp.tavily.com/mcp/?tavilyApiKey=user_search_key'
+ )
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_no_tavily_without_keys(self):
+ """Test _configure_llm_and_mcp does not add tavily when no keys are available."""
+ # Arrange
+ self.mock_user.search_api_key = None
+ self.service.tavily_api_key = None
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'default' in mcp_config
+ assert 'tavily' not in mcp_config
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_saas_mode_no_tavily_without_user_key(self):
+ """Test _configure_llm_and_mcp does not add tavily in SAAS mode without user search_api_key.
+
+ In SAAS mode, the global tavily_api_key should not be passed to the service instance,
+ so tavily should only be added if the user has their own search_api_key.
+ """
+ # Arrange - simulate SAAS mode where no global tavily key is available
+ self.service.app_mode = AppMode.SAAS.value
+ self.service.tavily_api_key = None # In SAAS mode, this should be None
+ self.mock_user.search_api_key = None
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'default' in mcp_config
+ assert 'tavily' not in mcp_config
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_saas_mode_with_user_search_key(self):
+ """Test _configure_llm_and_mcp adds tavily in SAAS mode when user has search_api_key.
+
+ Even in SAAS mode, if the user has their own search_api_key, tavily should be added.
+ """
+ # Arrange - simulate SAAS mode with user having their own search key
+ from pydantic import SecretStr
+
+ self.service.app_mode = AppMode.SAAS.value
+ self.service.tavily_api_key = None # In SAAS mode, this should be None
+ self.mock_user.search_api_key = SecretStr('user_search_key')
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'default' in mcp_config
+ assert 'tavily' in mcp_config
+ assert (
+ mcp_config['tavily']['url']
+ == 'https://mcp.tavily.com/mcp/?tavilyApiKey=user_search_key'
+ )
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_tavily_with_empty_user_search_key(self):
+ """Test _configure_llm_and_mcp handles empty user search_api_key correctly."""
+ # Arrange
+ from pydantic import SecretStr
+
+ self.mock_user.search_api_key = SecretStr('') # Empty string
+ self.service.tavily_api_key = 'env_tavily_key'
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'tavily' in mcp_config
+ # Should fall back to env key since user key is empty
+ assert (
+ mcp_config['tavily']['url']
+ == 'https://mcp.tavily.com/mcp/?tavilyApiKey=env_tavily_key'
+ )
+
+ @pytest.mark.asyncio
+ async def test_configure_llm_and_mcp_tavily_with_whitespace_user_search_key(self):
+ """Test _configure_llm_and_mcp handles whitespace-only user search_api_key correctly."""
+ # Arrange
+ from pydantic import SecretStr
+
+ self.mock_user.search_api_key = SecretStr(' ') # Whitespace only
+ self.service.tavily_api_key = 'env_tavily_key'
+ self.mock_user_context.get_mcp_api_key.return_value = None
+
+ # Act
+ llm, mcp_config = await self.service._configure_llm_and_mcp(
+ self.mock_user, None
+ )
+
+ # Assert
+ assert isinstance(llm, LLM)
+ assert 'tavily' in mcp_config
+ # Should fall back to env key since user key is whitespace only
+ assert (
+ mcp_config['tavily']['url']
+ == 'https://mcp.tavily.com/mcp/?tavilyApiKey=env_tavily_key'
+ )
+
+ @patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.get_planning_tools'
+ )
+ @patch(
+ 'openhands.app_server.app_conversation.app_conversation_service_base.AppConversationServiceBase._create_condenser'
+ )
+ @patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.format_plan_structure'
+ )
+ def test_create_agent_with_context_planning_agent(
+ self, mock_format_plan, mock_create_condenser, mock_get_tools
+ ):
+ """Test _create_agent_with_context for planning agent type."""
+ # Arrange
+ mock_llm = Mock(spec=LLM)
+ mock_llm.model_copy.return_value = mock_llm
+ mock_get_tools.return_value = []
+ mock_condenser = Mock()
+ mock_create_condenser.return_value = mock_condenser
+ mock_format_plan.return_value = 'test_plan_structure'
+ mcp_config = {'default': {'url': 'test'}}
+ system_message_suffix = 'Test suffix'
+
+ # Act
+ with patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.Agent'
+ ) as mock_agent_class:
+ mock_agent_instance = Mock()
+ mock_agent_instance.model_copy.return_value = mock_agent_instance
+ mock_agent_class.return_value = mock_agent_instance
+
+ self.service._create_agent_with_context(
+ mock_llm,
+ AgentType.PLAN,
+ system_message_suffix,
+ mcp_config,
+ self.mock_user.condenser_max_size,
+ )
+
+ # Assert
+ mock_agent_class.assert_called_once()
+ call_kwargs = mock_agent_class.call_args[1]
+ assert call_kwargs['llm'] == mock_llm
+ assert call_kwargs['system_prompt_filename'] == 'system_prompt_planning.j2'
+ assert (
+ call_kwargs['system_prompt_kwargs']['plan_structure']
+ == 'test_plan_structure'
+ )
+ assert call_kwargs['mcp_config'] == mcp_config
+ assert call_kwargs['security_analyzer'] is None
+ assert call_kwargs['condenser'] == mock_condenser
+ mock_create_condenser.assert_called_once_with(
+ mock_llm, AgentType.PLAN, self.mock_user.condenser_max_size
+ )
+
+ @patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.get_default_tools'
+ )
+ @patch(
+ 'openhands.app_server.app_conversation.app_conversation_service_base.AppConversationServiceBase._create_condenser'
+ )
+ def test_create_agent_with_context_default_agent(
+ self, mock_create_condenser, mock_get_tools
+ ):
+ """Test _create_agent_with_context for default agent type."""
+ # Arrange
+ mock_llm = Mock(spec=LLM)
+ mock_llm.model_copy.return_value = mock_llm
+ mock_get_tools.return_value = []
+ mock_condenser = Mock()
+ mock_create_condenser.return_value = mock_condenser
+ mcp_config = {'default': {'url': 'test'}}
+
+ # Act
+ with patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.Agent'
+ ) as mock_agent_class:
+ mock_agent_instance = Mock()
+ mock_agent_instance.model_copy.return_value = mock_agent_instance
+ mock_agent_class.return_value = mock_agent_instance
+
+ self.service._create_agent_with_context(
+ mock_llm,
+ AgentType.DEFAULT,
+ None,
+ mcp_config,
+ self.mock_user.condenser_max_size,
+ )
+
+ # Assert
+ mock_agent_class.assert_called_once()
+ call_kwargs = mock_agent_class.call_args[1]
+ assert call_kwargs['llm'] == mock_llm
+ assert call_kwargs['system_prompt_kwargs']['cli_mode'] is False
+ assert call_kwargs['mcp_config'] == mcp_config
+ assert call_kwargs['condenser'] == mock_condenser
+ mock_get_tools.assert_called_once_with(enable_browser=True)
+ mock_create_condenser.assert_called_once_with(
+ mock_llm, AgentType.DEFAULT, self.mock_user.condenser_max_size
+ )
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
+ )
+ async def test_finalize_conversation_request_with_skills(
+ self, mock_experiment_manager
+ ):
+ """Test _finalize_conversation_request with skills loading."""
+ # Arrange
+ mock_agent = Mock(spec=Agent)
+ mock_updated_agent = Mock(spec=Agent)
+ mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
+ mock_updated_agent
+ )
+
+ conversation_id = uuid4()
+ workspace = LocalWorkspace(working_dir='/test')
+ initial_message = Mock(spec=SendMessageRequest)
+ secrets = {'test': StaticSecret(value='secret')}
+ remote_workspace = Mock(spec=AsyncRemoteWorkspace)
+
+ # Mock the skills loading method
+ self.service._load_skills_and_update_agent = AsyncMock(
+ return_value=mock_updated_agent
+ )
+
+ # Act
+ result = await self.service._finalize_conversation_request(
+ mock_agent,
+ conversation_id,
+ self.mock_user,
+ workspace,
+ initial_message,
+ secrets,
+ self.mock_sandbox,
+ remote_workspace,
+ 'test_repo',
+ '/test/dir',
+ )
+
+ # Assert
+ assert isinstance(result, StartConversationRequest)
+ assert result.conversation_id == conversation_id
+ assert result.agent == mock_updated_agent
+ assert result.workspace == workspace
+ assert result.initial_message == initial_message
+ assert result.secrets == secrets
+
+ mock_experiment_manager.run_agent_variant_tests__v1.assert_called_once_with(
+ self.mock_user.id, conversation_id, mock_agent
+ )
+ self.service._load_skills_and_update_agent.assert_called_once_with(
+ self.mock_sandbox,
+ mock_updated_agent,
+ remote_workspace,
+ 'test_repo',
+ '/test/dir',
+ )
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
+ )
+ async def test_finalize_conversation_request_without_skills(
+ self, mock_experiment_manager
+ ):
+ """Test _finalize_conversation_request without remote workspace (no skills)."""
+ # Arrange
+ mock_agent = Mock(spec=Agent)
+ mock_updated_agent = Mock(spec=Agent)
+ mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
+ mock_updated_agent
+ )
+
+ workspace = LocalWorkspace(working_dir='/test')
+ secrets = {'test': StaticSecret(value='secret')}
+
+ # Act
+ result = await self.service._finalize_conversation_request(
+ mock_agent,
+ None,
+ self.mock_user,
+ workspace,
+ None,
+ secrets,
+ self.mock_sandbox,
+ None,
+ None,
+ '/test/dir',
+ )
+
+ # Assert
+ assert isinstance(result, StartConversationRequest)
+ assert isinstance(result.conversation_id, UUID)
+ assert result.agent == mock_updated_agent
+ mock_experiment_manager.run_agent_variant_tests__v1.assert_called_once()
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
+ )
+ async def test_finalize_conversation_request_skills_loading_fails(
+ self, mock_experiment_manager
+ ):
+ """Test _finalize_conversation_request when skills loading fails."""
+ # Arrange
+ mock_agent = Mock(spec=Agent)
+ mock_updated_agent = Mock(spec=Agent)
+ mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
+ mock_updated_agent
+ )
+
+ workspace = LocalWorkspace(working_dir='/test')
+ secrets = {'test': StaticSecret(value='secret')}
+ remote_workspace = Mock(spec=AsyncRemoteWorkspace)
+
+ # Mock skills loading to raise an exception
+ self.service._load_skills_and_update_agent = AsyncMock(
+ side_effect=Exception('Skills loading failed')
+ )
+
+ # Act
+ with patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service._logger'
+ ) as mock_logger:
+ result = await self.service._finalize_conversation_request(
+ mock_agent,
+ None,
+ self.mock_user,
+ workspace,
+ None,
+ secrets,
+ self.mock_sandbox,
+ remote_workspace,
+ 'test_repo',
+ '/test/dir',
+ )
+
+ # Assert
+ assert isinstance(result, StartConversationRequest)
+ assert (
+ result.agent == mock_updated_agent
+ ) # Should still use the experiment-modified agent
+ mock_logger.warning.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_build_start_conversation_request_for_user_integration(self):
+ """Test the main _build_start_conversation_request_for_user method integration."""
+ # Arrange
+ self.mock_user_context.get_user_info.return_value = self.mock_user
+
+ # Mock all the helper methods
+ mock_secrets = {'GITHUB_TOKEN': Mock()}
+ mock_llm = Mock(spec=LLM)
+ mock_mcp_config = {'default': {'url': 'test'}}
+ mock_agent = Mock(spec=Agent)
+ mock_final_request = Mock(spec=StartConversationRequest)
+
+ self.service._setup_secrets_for_git_providers = AsyncMock(
+ return_value=mock_secrets
+ )
+ self.service._configure_llm_and_mcp = AsyncMock(
+ return_value=(mock_llm, mock_mcp_config)
+ )
+ self.service._create_agent_with_context = Mock(return_value=mock_agent)
+ self.service._finalize_conversation_request = AsyncMock(
+ return_value=mock_final_request
+ )
+
+ # Act
+ result = await self.service._build_start_conversation_request_for_user(
+ sandbox=self.mock_sandbox,
+ initial_message=None,
+ system_message_suffix='Test suffix',
+ git_provider=ProviderType.GITHUB,
+ working_dir='/test/dir',
+ agent_type=AgentType.DEFAULT,
+ llm_model='gpt-4',
+ conversation_id=None,
+ remote_workspace=None,
+ selected_repository='test/repo',
+ )
+
+ # Assert
+ assert result == mock_final_request
+
+ self.service._setup_secrets_for_git_providers.assert_called_once_with(
+ self.mock_user
+ )
+ self.service._configure_llm_and_mcp.assert_called_once_with(
+ self.mock_user, 'gpt-4'
+ )
+ self.service._create_agent_with_context.assert_called_once_with(
+ mock_llm,
+ AgentType.DEFAULT,
+ 'Test suffix',
+ mock_mcp_config,
+ self.mock_user.condenser_max_size,
+ )
+ self.service._finalize_conversation_request.assert_called_once()
diff --git a/tests/unit/app_server/test_remote_sandbox_service.py b/tests/unit/app_server/test_remote_sandbox_service.py
index 1d917cc76021..567ecad2e30a 100644
--- a/tests/unit/app_server/test_remote_sandbox_service.py
+++ b/tests/unit/app_server/test_remote_sandbox_service.py
@@ -435,7 +435,7 @@ async def test_start_sandbox_success(
9
) # max_num_sandboxes - 1
remote_sandbox_service.db_session.add.assert_called_once()
- remote_sandbox_service.db_session.commit.assert_called_once()
+ remote_sandbox_service.db_session.commit.assert_not_called()
@pytest.mark.asyncio
async def test_start_sandbox_with_specific_spec(
@@ -627,7 +627,7 @@ async def test_delete_sandbox_success(self, remote_sandbox_service):
# Verify
assert result is True
remote_sandbox_service.db_session.delete.assert_called_once_with(stored_sandbox)
- remote_sandbox_service.db_session.commit.assert_called_once()
+ remote_sandbox_service.db_session.commit.assert_not_called()
remote_sandbox_service.httpx_client.request.assert_called_once_with(
'POST',
'https://api.example.com/stop',
diff --git a/tests/unit/app_server/test_skill_loader.py b/tests/unit/app_server/test_skill_loader.py
new file mode 100644
index 000000000000..c9e54ba5a1a5
--- /dev/null
+++ b/tests/unit/app_server/test_skill_loader.py
@@ -0,0 +1,756 @@
+"""Tests for skill_loader module.
+
+This module tests the loading of skills from various sources
+(global, user, and repository-level) into SDK Skill objects for V1 conversations.
+"""
+
+import tempfile
+from pathlib import Path
+from unittest.mock import AsyncMock, MagicMock, Mock, patch
+
+import pytest
+
+from openhands.app_server.app_conversation.skill_loader import (
+ _determine_repo_root,
+ _find_and_load_global_skill_files,
+ _find_and_load_skill_md_files,
+ _load_special_files,
+ _read_file_from_workspace,
+ load_global_skills,
+ load_repo_skills,
+ merge_skills,
+)
+
+# ===== Test Fixtures =====
+
+
+@pytest.fixture
+def mock_skill():
+ """Create a mock Skill object."""
+ skill = Mock()
+ skill.name = 'test_skill'
+ skill.content = 'Test content'
+ return skill
+
+
+@pytest.fixture
+def mock_skills_list():
+ """Create a list of mock Skill objects."""
+ skills = []
+ for i in range(3):
+ skill = Mock()
+ skill.name = f'skill_{i}'
+ skill.content = f'Content {i}'
+ skills.append(skill)
+ return skills
+
+
+@pytest.fixture
+def mock_async_remote_workspace():
+ """Create a mock AsyncRemoteWorkspace."""
+ workspace = AsyncMock()
+ return workspace
+
+
+@pytest.fixture
+def temp_skills_dir():
+ """Create a temporary directory with test skill files."""
+ with tempfile.TemporaryDirectory() as temp_dir:
+ root = Path(temp_dir)
+
+ # Create test skill files
+ test_skill = """---
+name: test_skill
+triggers:
+ - test
+ - testing
+---
+
+# Test Skill
+
+This is a test skill for testing purposes.
+"""
+ (root / 'test_skill.md').write_text(test_skill)
+
+ another_skill = """---
+name: another_skill
+---
+
+# Another Skill
+
+Another test skill.
+"""
+ (root / 'another_skill.md').write_text(another_skill)
+
+ # Create README.md which should be ignored
+ (root / 'README.md').write_text('# README\n\nThis should be ignored.')
+
+ yield root
+
+
+@pytest.fixture
+def command_result_success():
+ """Create a successful command result."""
+ result = Mock()
+ result.exit_code = 0
+ result.stdout = 'test output'
+ return result
+
+
+@pytest.fixture
+def command_result_failure():
+ """Create a failed command result."""
+ result = Mock()
+ result.exit_code = 1
+ result.stdout = ''
+ return result
+
+
+# ===== Tests for Helper Functions =====
+
+
+class TestDetermineRepoRoot:
+ """Test _determine_repo_root helper function."""
+
+ def test_with_selected_repository(self):
+ """Test determining repo root with selected repository."""
+ result = _determine_repo_root('/workspace/project', 'owner/repo-name')
+ assert result == '/workspace/project/repo-name'
+
+ def test_without_selected_repository(self):
+ """Test determining repo root without selected repository."""
+ result = _determine_repo_root('/workspace/project', None)
+ assert result == '/workspace/project'
+
+ def test_with_complex_repository_name(self):
+ """Test with complex repository name."""
+ result = _determine_repo_root('/workspace', 'org-name/complex-repo-123')
+ assert result == '/workspace/complex-repo-123'
+
+
+class TestReadFileFromWorkspace:
+ """Test _read_file_from_workspace helper function."""
+
+ @pytest.mark.asyncio
+ async def test_successful_read(
+ self, mock_async_remote_workspace, command_result_success
+ ):
+ """Test successfully reading a file from workspace."""
+ command_result_success.stdout = 'file content\n'
+ mock_async_remote_workspace.execute_command.return_value = (
+ command_result_success
+ )
+
+ result = await _read_file_from_workspace(
+ mock_async_remote_workspace, '/path/to/file.md', '/workspace'
+ )
+
+ assert result == 'file content\n'
+ mock_async_remote_workspace.execute_command.assert_called_once_with(
+ 'cat /path/to/file.md', cwd='/workspace', timeout=10.0
+ )
+
+ @pytest.mark.asyncio
+ async def test_file_not_found(
+ self, mock_async_remote_workspace, command_result_failure
+ ):
+ """Test reading a non-existent file."""
+ mock_async_remote_workspace.execute_command.return_value = (
+ command_result_failure
+ )
+
+ result = await _read_file_from_workspace(
+ mock_async_remote_workspace, '/nonexistent/file.md', '/workspace'
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_empty_file(self, mock_async_remote_workspace):
+ """Test reading an empty file."""
+ result_obj = Mock()
+ result_obj.exit_code = 0
+ result_obj.stdout = ' ' # Only whitespace
+ mock_async_remote_workspace.execute_command.return_value = result_obj
+
+ result = await _read_file_from_workspace(
+ mock_async_remote_workspace, '/empty/file.md', '/workspace'
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_command_exception(self, mock_async_remote_workspace):
+ """Test handling exception during file read."""
+ mock_async_remote_workspace.execute_command.side_effect = Exception(
+ 'Connection error'
+ )
+
+ result = await _read_file_from_workspace(
+ mock_async_remote_workspace, '/path/to/file.md', '/workspace'
+ )
+
+ assert result is None
+
+
+class TestLoadSpecialFiles:
+ """Test _load_special_files helper function."""
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._read_file_from_workspace'
+ )
+ @patch('openhands.app_server.app_conversation.skill_loader.Skill')
+ async def test_load_all_special_files(
+ self,
+ mock_skill_class,
+ mock_read_file,
+ mock_async_remote_workspace,
+ mock_skills_list,
+ ):
+ """Test loading all special files successfully."""
+ # Mock reading files - return content for each special file
+ mock_read_file.side_effect = [
+ 'cursorrules content',
+ 'agents.md content',
+ 'agent.md content',
+ ]
+
+ # Mock skill creation
+ mock_skill_class.load.side_effect = mock_skills_list
+
+ result = await _load_special_files(
+ mock_async_remote_workspace, '/repo', '/workspace'
+ )
+
+ assert len(result) == 3
+ assert result == mock_skills_list
+ assert mock_read_file.call_count == 3
+ assert mock_skill_class.load.call_count == 3
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._read_file_from_workspace'
+ )
+ @patch('openhands.app_server.app_conversation.skill_loader.Skill')
+ async def test_load_partial_special_files(
+ self, mock_skill_class, mock_read_file, mock_async_remote_workspace, mock_skill
+ ):
+ """Test loading when only some special files exist."""
+ # Only .cursorrules exists
+ mock_read_file.side_effect = ['cursorrules content', None, None]
+ mock_skill_class.load.return_value = mock_skill
+
+ result = await _load_special_files(
+ mock_async_remote_workspace, '/repo', '/workspace'
+ )
+
+ assert len(result) == 1
+ assert result[0] == mock_skill
+ assert mock_read_file.call_count == 3
+ assert mock_skill_class.load.call_count == 1
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._read_file_from_workspace'
+ )
+ async def test_load_no_special_files(
+ self, mock_read_file, mock_async_remote_workspace
+ ):
+ """Test when no special files exist."""
+ mock_read_file.return_value = None
+
+ result = await _load_special_files(
+ mock_async_remote_workspace, '/repo', '/workspace'
+ )
+
+ assert len(result) == 0
+
+
+class TestFindAndLoadSkillMdFiles:
+ """Test _find_and_load_skill_md_files helper function."""
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._read_file_from_workspace'
+ )
+ @patch('openhands.app_server.app_conversation.skill_loader.Skill')
+ async def test_find_and_load_files_success(
+ self,
+ mock_skill_class,
+ mock_read_file,
+ mock_async_remote_workspace,
+ mock_skills_list,
+ ):
+ """Test successfully finding and loading skill .md files."""
+ result_obj = Mock()
+ result_obj.exit_code = 0
+ result_obj.stdout = (
+ '/repo/.openhands/skills/test1.md\n/repo/.openhands/skills/test2.md\n'
+ )
+ mock_async_remote_workspace.execute_command.return_value = result_obj
+
+ mock_read_file.side_effect = ['content1', 'content2']
+ mock_skill_class.load.side_effect = mock_skills_list[:2]
+
+ result = await _find_and_load_skill_md_files(
+ mock_async_remote_workspace, '/repo/.openhands/skills', '/workspace'
+ )
+
+ assert len(result) == 2
+ assert result == mock_skills_list[:2]
+
+ # Verify relative paths are used
+ assert mock_skill_class.load.call_args_list[0][1]['path'] == 'test1.md'
+ assert mock_skill_class.load.call_args_list[1][1]['path'] == 'test2.md'
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._read_file_from_workspace'
+ )
+ @patch('openhands.app_server.app_conversation.skill_loader.Skill')
+ async def test_find_and_load_excludes_readme(
+ self, mock_skill_class, mock_read_file, mock_async_remote_workspace, mock_skill
+ ):
+ """Test that README.md files are excluded."""
+ result_obj = Mock()
+ result_obj.exit_code = 0
+ result_obj.stdout = (
+ '/repo/.openhands/skills/test.md\n/repo/.openhands/skills/README.md\n'
+ )
+ mock_async_remote_workspace.execute_command.return_value = result_obj
+
+ mock_read_file.return_value = 'content'
+ mock_skill_class.load.return_value = mock_skill
+
+ result = await _find_and_load_skill_md_files(
+ mock_async_remote_workspace, '/repo/.openhands/skills', '/workspace'
+ )
+
+ assert len(result) == 1
+ assert result[0] == mock_skill
+ # Verify README.md was not processed
+ assert mock_read_file.call_count == 1
+
+ @pytest.mark.asyncio
+ async def test_find_and_load_no_results(
+ self, mock_async_remote_workspace, command_result_failure
+ ):
+ """Test when no files are found."""
+ mock_async_remote_workspace.execute_command.return_value = (
+ command_result_failure
+ )
+
+ result = await _find_and_load_skill_md_files(
+ mock_async_remote_workspace, '/nonexistent', '/workspace'
+ )
+
+ assert len(result) == 0
+
+ @pytest.mark.asyncio
+ async def test_find_and_load_exception(self, mock_async_remote_workspace):
+ """Test handling exception during file search."""
+ mock_async_remote_workspace.execute_command.side_effect = Exception(
+ 'Command error'
+ )
+
+ result = await _find_and_load_skill_md_files(
+ mock_async_remote_workspace, '/repo/.openhands/skills', '/workspace'
+ )
+
+ assert len(result) == 0
+
+ @pytest.mark.asyncio
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._read_file_from_workspace'
+ )
+ async def test_find_and_load_some_missing(
+ self, mock_read_file, mock_async_remote_workspace
+ ):
+ """Test loading when some files fail to read."""
+ result_obj = Mock()
+ result_obj.exit_code = 0
+ result_obj.stdout = (
+ '/repo/.openhands/skills/test1.md\n/repo/.openhands/skills/missing.md\n'
+ )
+ mock_async_remote_workspace.execute_command.return_value = result_obj
+
+ mock_read_file.side_effect = ['content1', None]
+
+ with patch(
+ 'openhands.app_server.app_conversation.skill_loader.Skill'
+ ) as mock_skill_class:
+ mock_skill = Mock()
+ mock_skill_class.load.return_value = mock_skill
+
+ result = await _find_and_load_skill_md_files(
+ mock_async_remote_workspace,
+ '/repo/.openhands/skills',
+ '/workspace',
+ )
+
+ assert len(result) == 1
+ assert mock_skill_class.load.call_count == 1
+
+
+class TestFindAndLoadGlobalSkillFiles:
+ """Test _find_and_load_global_skill_files helper function."""
+
+ @patch('openhands.app_server.app_conversation.skill_loader.Skill')
+ def test_find_and_load_global_files_success(
+ self, mock_skill_class, temp_skills_dir, mock_skills_list
+ ):
+ """Test successfully finding and loading global skill files."""
+ file_paths = list(temp_skills_dir.glob('*.md'))
+ file_paths = [f for f in file_paths if f.name.lower() != 'readme.md']
+
+ mock_skill_class.load.side_effect = mock_skills_list[: len(file_paths)]
+
+ result = _find_and_load_global_skill_files(temp_skills_dir)
+
+ # Should find and load .md files but not README.md
+ assert len(result) == len(file_paths)
+ assert mock_skill_class.load.call_count == len(file_paths)
+ skill_names = [s.name for s in result]
+ assert len(skill_names) == len(file_paths)
+
+ @patch('openhands.app_server.app_conversation.skill_loader.Skill')
+ def test_find_and_load_global_files_with_errors(
+ self, mock_skill_class, temp_skills_dir, mock_skill
+ ):
+ """Test loading when some files fail to parse."""
+ file_paths = list(temp_skills_dir.glob('*.md'))
+ file_paths = [f for f in file_paths if f.name.lower() != 'readme.md']
+
+ # First file succeeds, second file fails
+ mock_skill_class.load.side_effect = [mock_skill, Exception('Parse error')]
+
+ result = _find_and_load_global_skill_files(temp_skills_dir)
+
+ assert len(result) == 1
+ assert result[0] == mock_skill
+
+ def test_find_and_load_global_files_empty_dir(self, tmp_path):
+ """Test finding and loading files in empty directory."""
+ result = _find_and_load_global_skill_files(tmp_path)
+ assert len(result) == 0
+
+ def test_find_and_load_global_files_nonexistent_dir(self):
+ """Test finding and loading files in non-existent directory."""
+ nonexistent = Path('/nonexistent/path')
+ result = _find_and_load_global_skill_files(nonexistent)
+ assert len(result) == 0
+
+
+# ===== Tests for Main Loader Functions =====
+
+
+class TestLoadGlobalSkills:
+ """Test load_global_skills main function."""
+
+ @patch('openhands.app_server.app_conversation.skill_loader.Path')
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._find_and_load_global_skill_files'
+ )
+ def test_load_global_skills_success(
+ self,
+ mock_find_and_load,
+ mock_path_class,
+ temp_skills_dir,
+ mock_skills_list,
+ ):
+ """Test successfully loading global skills."""
+ mock_path_obj = MagicMock()
+ mock_path_obj.exists.return_value = True
+ mock_path_class.return_value = mock_path_obj
+
+ mock_find_and_load.return_value = mock_skills_list
+
+ result = load_global_skills()
+
+ assert len(result) == len(mock_skills_list)
+ assert result == mock_skills_list
+
+ @patch('openhands.app_server.app_conversation.skill_loader.Path')
+ def test_load_global_skills_dir_not_exists(self, mock_path_class):
+ """Test when global skills directory doesn't exist."""
+ mock_path_obj = MagicMock()
+ mock_path_obj.exists.return_value = False
+ mock_path_class.return_value = mock_path_obj
+
+ result = load_global_skills()
+
+ assert len(result) == 0
+
+ @patch('openhands.app_server.app_conversation.skill_loader.Path')
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._find_and_load_global_skill_files'
+ )
+ def test_load_global_skills_exception(self, mock_find_and_load, mock_path_class):
+ """Test handling exception during global skill loading."""
+ mock_path_obj = MagicMock()
+ mock_path_obj.exists.return_value = True
+ mock_path_class.return_value = mock_path_obj
+
+ mock_find_and_load.side_effect = Exception('File system error')
+
+ result = load_global_skills()
+
+ assert len(result) == 0
+
+
+class TestLoadRepoSkills:
+ """Test load_repo_skills main function."""
+
+ @pytest.mark.asyncio
+ @patch('openhands.app_server.app_conversation.skill_loader._load_special_files')
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._find_and_load_skill_md_files'
+ )
+ async def test_load_repo_skills_success(
+ self,
+ mock_find_and_load,
+ mock_load_special,
+ mock_async_remote_workspace,
+ mock_skills_list,
+ ):
+ """Test successfully loading repo skills."""
+ special_skills = [mock_skills_list[0]]
+ skills_dir_skills = [mock_skills_list[1]]
+ microagents_dir_skills = [mock_skills_list[2]]
+
+ mock_load_special.return_value = special_skills
+ # Mock loading from both directories
+ mock_find_and_load.side_effect = [skills_dir_skills, microagents_dir_skills]
+
+ result = await load_repo_skills(
+ mock_async_remote_workspace, 'owner/repo', '/workspace/project'
+ )
+
+ assert len(result) == 3
+ # Verify all skills are present (merged with precedence)
+ assert special_skills[0] in result
+ assert skills_dir_skills[0] in result
+ assert microagents_dir_skills[0] in result
+
+ @pytest.mark.asyncio
+ @patch('openhands.app_server.app_conversation.skill_loader._load_special_files')
+ @patch(
+ 'openhands.app_server.app_conversation.skill_loader._find_and_load_skill_md_files'
+ )
+ async def test_load_repo_skills_no_selected_repository(
+ self,
+ mock_find_and_load,
+ mock_load_special,
+ mock_async_remote_workspace,
+ mock_skills_list,
+ ):
+ """Test loading repo skills without selected repository."""
+ mock_load_special.return_value = [mock_skills_list[0]]
+ mock_find_and_load.return_value = []
+
+ result = await load_repo_skills(
+ mock_async_remote_workspace, None, '/workspace/project'
+ )
+
+ assert len(result) == 1
+ # Verify repo root is working_dir when no repository selected
+ mock_load_special.assert_called_once_with(
+ mock_async_remote_workspace, '/workspace/project', '/workspace/project'
+ )
+ # Verify both directories were checked
+ assert mock_find_and_load.call_count == 2
+
+ @pytest.mark.asyncio
+ @patch('openhands.app_server.app_conversation.skill_loader._load_special_files')
+ async def test_load_repo_skills_exception(
+ self, mock_load_special, mock_async_remote_workspace
+ ):
+ """Test handling exception during repo skill loading."""
+ mock_load_special.side_effect = Exception('Workspace error')
+
+ result = await load_repo_skills(
+ mock_async_remote_workspace, 'owner/repo', '/workspace/project'
+ )
+
+ assert len(result) == 0
+
+
+class TestMergeSkills:
+ """Test merge_skills function."""
+
+ def test_merge_skills_no_duplicates(self):
+ """Test merging skills with no duplicates."""
+ skill1 = Mock()
+ skill1.name = 'skill1'
+ skill2 = Mock()
+ skill2.name = 'skill2'
+ skill3 = Mock()
+ skill3.name = 'skill3'
+
+ result = merge_skills([[skill1], [skill2], [skill3]])
+
+ assert len(result) == 3
+ names = {s.name for s in result}
+ assert names == {'skill1', 'skill2', 'skill3'}
+
+ def test_merge_skills_with_duplicates(self):
+ """Test merging skills with duplicates - later takes precedence."""
+ skill1_v1 = Mock()
+ skill1_v1.name = 'skill1'
+ skill1_v1.version = 'v1'
+
+ skill1_v2 = Mock()
+ skill1_v2.name = 'skill1'
+ skill1_v2.version = 'v2'
+
+ skill2 = Mock()
+ skill2.name = 'skill2'
+
+ result = merge_skills([[skill1_v1, skill2], [skill1_v2]])
+
+ assert len(result) == 2
+ names = {s.name for s in result}
+ assert names == {'skill1', 'skill2'}
+
+ # Verify later version takes precedence
+ skill1_result = next(s for s in result if s.name == 'skill1')
+ assert skill1_result.version == 'v2'
+
+ def test_merge_skills_empty_lists(self):
+ """Test merging empty skill lists."""
+ result = merge_skills([[], [], []])
+ assert len(result) == 0
+
+ def test_merge_skills_single_list(self):
+ """Test merging single skill list."""
+ skill1 = Mock()
+ skill1.name = 'skill1'
+ skill2 = Mock()
+ skill2.name = 'skill2'
+
+ result = merge_skills([[skill1, skill2]])
+
+ assert len(result) == 2
+
+ def test_merge_skills_precedence_order(self):
+ """Test that skill precedence follows list order."""
+ # Create three versions of the same skill
+ skill_v1 = Mock()
+ skill_v1.name = 'test_skill'
+ skill_v1.priority = 'low'
+
+ skill_v2 = Mock()
+ skill_v2.name = 'test_skill'
+ skill_v2.priority = 'medium'
+
+ skill_v3 = Mock()
+ skill_v3.name = 'test_skill'
+ skill_v3.priority = 'high'
+
+ # List order: low -> medium -> high
+ # Should result in high priority (last one)
+ result = merge_skills([[skill_v1], [skill_v2], [skill_v3]])
+
+ assert len(result) == 1
+ assert result[0].priority == 'high'
+
+ def test_merge_skills_mixed_empty_and_filled(self):
+ """Test merging with mix of empty and filled lists."""
+ skill1 = Mock()
+ skill1.name = 'skill1'
+ skill2 = Mock()
+ skill2.name = 'skill2'
+
+ result = merge_skills([[], [skill1], [], [skill2], []])
+
+ assert len(result) == 2
+
+
+# ===== Integration Tests =====
+
+
+class TestSkillLoaderIntegration:
+ """Integration tests for the skill loader."""
+
+ @pytest.mark.asyncio
+ @patch('openhands.app_server.app_conversation.skill_loader.load_global_skills')
+ @patch('openhands.sdk.context.skills.load_user_skills')
+ @patch('openhands.app_server.app_conversation.skill_loader.load_repo_skills')
+ async def test_full_loading_workflow(
+ self,
+ mock_load_repo,
+ mock_load_user,
+ mock_load_global,
+ mock_async_remote_workspace,
+ ):
+ """Test the full workflow of loading all skill types."""
+ # Create distinct mock skills for each source
+ global_skill = Mock()
+ global_skill.name = 'global_skill'
+
+ user_skill = Mock()
+ user_skill.name = 'user_skill'
+
+ repo_skill = Mock()
+ repo_skill.name = 'repo_skill'
+
+ mock_load_global.return_value = [global_skill]
+ mock_load_user.return_value = [user_skill]
+ mock_load_repo.return_value = [repo_skill]
+
+ # Simulate loading all sources
+ global_skills = mock_load_global()
+ user_skills = mock_load_user()
+ repo_skills = await mock_load_repo(
+ mock_async_remote_workspace, 'owner/repo', '/workspace'
+ )
+
+ # Merge all skills
+ all_skills = merge_skills([global_skills, user_skills, repo_skills])
+
+ assert len(all_skills) == 3
+ names = {s.name for s in all_skills}
+ assert names == {'global_skill', 'user_skill', 'repo_skill'}
+
+ @pytest.mark.asyncio
+ @patch('openhands.app_server.app_conversation.skill_loader.load_global_skills')
+ @patch('openhands.sdk.context.skills.load_user_skills')
+ @patch('openhands.app_server.app_conversation.skill_loader.load_repo_skills')
+ async def test_loading_with_override_precedence(
+ self,
+ mock_load_repo,
+ mock_load_user,
+ mock_load_global,
+ mock_async_remote_workspace,
+ ):
+ """Test that repo skills override user skills, and user skills override global."""
+ # Create skills with same name but different sources
+ global_skill = Mock()
+ global_skill.name = 'common_skill'
+ global_skill.source = 'global'
+
+ user_skill = Mock()
+ user_skill.name = 'common_skill'
+ user_skill.source = 'user'
+
+ repo_skill = Mock()
+ repo_skill.name = 'common_skill'
+ repo_skill.source = 'repo'
+
+ mock_load_global.return_value = [global_skill]
+ mock_load_user.return_value = [user_skill]
+ mock_load_repo.return_value = [repo_skill]
+
+ # Load and merge in correct precedence order
+ global_skills = mock_load_global()
+ user_skills = mock_load_user()
+ repo_skills = await mock_load_repo(
+ mock_async_remote_workspace, 'owner/repo', '/workspace'
+ )
+
+ all_skills = merge_skills([global_skills, user_skills, repo_skills])
+
+ # Should have only one skill with repo source (highest precedence)
+ assert len(all_skills) == 1
+ assert all_skills[0].source == 'repo'
diff --git a/tests/unit/app_server/test_sql_app_conversation_info_service.py b/tests/unit/app_server/test_sql_app_conversation_info_service.py
index 2ff5974f738e..393e2e654bce 100644
--- a/tests/unit/app_server/test_sql_app_conversation_info_service.py
+++ b/tests/unit/app_server/test_sql_app_conversation_info_service.py
@@ -623,3 +623,383 @@ async def test_complex_date_range_filters(
created_at__gte=start_time, created_at__lt=end_time
)
assert count == 2
+
+ @pytest.mark.asyncio
+ async def test_search_excludes_sub_conversations_by_default(
+ self,
+ service: SQLAppConversationInfoService,
+ ):
+ """Test that search excludes sub-conversations by default."""
+ # Create a parent conversation
+ parent_id = uuid4()
+ parent_info = AppConversationInfo(
+ id=parent_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent',
+ title='Parent Conversation',
+ created_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create sub-conversations
+ sub_info_1 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub1',
+ title='Sub Conversation 1',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 13, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 13, 30, 0, tzinfo=timezone.utc),
+ )
+
+ sub_info_2 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub2',
+ title='Sub Conversation 2',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 14, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 14, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Save all conversations
+ await service.save_app_conversation_info(parent_info)
+ await service.save_app_conversation_info(sub_info_1)
+ await service.save_app_conversation_info(sub_info_2)
+
+ # Search without include_sub_conversations (default False)
+ page = await service.search_app_conversation_info()
+
+ # Should only return the parent conversation
+ assert len(page.items) == 1
+ assert page.items[0].id == parent_id
+ assert page.items[0].title == 'Parent Conversation'
+ assert page.items[0].parent_conversation_id is None
+
+ @pytest.mark.asyncio
+ async def test_search_includes_sub_conversations_when_flag_true(
+ self,
+ service: SQLAppConversationInfoService,
+ ):
+ """Test that search includes sub-conversations when include_sub_conversations=True."""
+ # Create a parent conversation
+ parent_id = uuid4()
+ parent_info = AppConversationInfo(
+ id=parent_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent',
+ title='Parent Conversation',
+ created_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create sub-conversations
+ sub_info_1 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub1',
+ title='Sub Conversation 1',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 13, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 13, 30, 0, tzinfo=timezone.utc),
+ )
+
+ sub_info_2 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub2',
+ title='Sub Conversation 2',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 14, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 14, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Save all conversations
+ await service.save_app_conversation_info(parent_info)
+ await service.save_app_conversation_info(sub_info_1)
+ await service.save_app_conversation_info(sub_info_2)
+
+ # Search with include_sub_conversations=True
+ page = await service.search_app_conversation_info(
+ include_sub_conversations=True
+ )
+
+ # Should return all conversations (1 parent + 2 sub-conversations)
+ assert len(page.items) == 3
+
+ # Verify all conversations are present
+ conversation_ids = {item.id for item in page.items}
+ assert parent_id in conversation_ids
+ assert sub_info_1.id in conversation_ids
+ assert sub_info_2.id in conversation_ids
+
+ # Verify parent conversation has no parent_conversation_id
+ parent_item = next(item for item in page.items if item.id == parent_id)
+ assert parent_item.parent_conversation_id is None
+
+ # Verify sub-conversations have parent_conversation_id set
+ sub_item_1 = next(item for item in page.items if item.id == sub_info_1.id)
+ assert sub_item_1.parent_conversation_id == parent_id
+
+ sub_item_2 = next(item for item in page.items if item.id == sub_info_2.id)
+ assert sub_item_2.parent_conversation_id == parent_id
+
+ @pytest.mark.asyncio
+ async def test_search_sub_conversations_with_filters(
+ self,
+ service: SQLAppConversationInfoService,
+ ):
+ """Test that include_sub_conversations works correctly with other filters."""
+ # Create a parent conversation
+ parent_id = uuid4()
+ parent_info = AppConversationInfo(
+ id=parent_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent',
+ title='Parent Conversation',
+ created_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create sub-conversations with different titles
+ sub_info_1 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub1',
+ title='Sub Conversation Alpha',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 13, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 13, 30, 0, tzinfo=timezone.utc),
+ )
+
+ sub_info_2 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub2',
+ title='Sub Conversation Beta',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 14, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 14, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Save all conversations
+ await service.save_app_conversation_info(parent_info)
+ await service.save_app_conversation_info(sub_info_1)
+ await service.save_app_conversation_info(sub_info_2)
+
+ # Search with title filter and include_sub_conversations=False (default)
+ page = await service.search_app_conversation_info(title__contains='Alpha')
+ # Should only find parent if it matches, but parent doesn't have "Alpha"
+ # So should find nothing or only sub if we include them
+ assert len(page.items) == 0
+
+ # Search with title filter and include_sub_conversations=True
+ page = await service.search_app_conversation_info(
+ title__contains='Alpha', include_sub_conversations=True
+ )
+ # Should find the sub-conversation with "Alpha" in title
+ assert len(page.items) == 1
+ assert page.items[0].title == 'Sub Conversation Alpha'
+ assert page.items[0].parent_conversation_id == parent_id
+
+ # Search with title filter for "Parent" and include_sub_conversations=True
+ page = await service.search_app_conversation_info(
+ title__contains='Parent', include_sub_conversations=True
+ )
+ # Should find the parent conversation
+ assert len(page.items) == 1
+ assert page.items[0].title == 'Parent Conversation'
+ assert page.items[0].parent_conversation_id is None
+
+ @pytest.mark.asyncio
+ async def test_search_sub_conversations_with_date_filters(
+ self,
+ service: SQLAppConversationInfoService,
+ ):
+ """Test that include_sub_conversations works correctly with date filters."""
+ # Create a parent conversation
+ parent_id = uuid4()
+ parent_info = AppConversationInfo(
+ id=parent_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent',
+ title='Parent Conversation',
+ created_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create sub-conversations at different times
+ sub_info_1 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub1',
+ title='Sub Conversation 1',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 13, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 13, 30, 0, tzinfo=timezone.utc),
+ )
+
+ sub_info_2 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub2',
+ title='Sub Conversation 2',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 14, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 14, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Save all conversations
+ await service.save_app_conversation_info(parent_info)
+ await service.save_app_conversation_info(sub_info_1)
+ await service.save_app_conversation_info(sub_info_2)
+
+ # Search with date filter and include_sub_conversations=False (default)
+ cutoff_time = datetime(2024, 1, 1, 13, 30, 0, tzinfo=timezone.utc)
+ page = await service.search_app_conversation_info(created_at__gte=cutoff_time)
+ # Should only return parent if it matches the filter, but parent is at 12:00
+ assert len(page.items) == 0
+
+ # Search with date filter and include_sub_conversations=True
+ page = await service.search_app_conversation_info(
+ created_at__gte=cutoff_time, include_sub_conversations=True
+ )
+ # Should find sub-conversations created after cutoff (sub_info_2 at 14:00)
+ assert len(page.items) == 1
+ assert page.items[0].id == sub_info_2.id
+ assert page.items[0].parent_conversation_id == parent_id
+
+ @pytest.mark.asyncio
+ async def test_search_multiple_parents_with_sub_conversations(
+ self,
+ service: SQLAppConversationInfoService,
+ ):
+ """Test search with multiple parent conversations and their sub-conversations."""
+ # Create first parent conversation
+ parent1_id = uuid4()
+ parent1_info = AppConversationInfo(
+ id=parent1_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent1',
+ title='Parent 1',
+ created_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create second parent conversation
+ parent2_id = uuid4()
+ parent2_info = AppConversationInfo(
+ id=parent2_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent2',
+ title='Parent 2',
+ created_at=datetime(2024, 1, 1, 13, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 13, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create sub-conversations for parent1
+ sub1_1 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub1_1',
+ title='Sub 1-1',
+ parent_conversation_id=parent1_id,
+ created_at=datetime(2024, 1, 1, 14, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 14, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create sub-conversations for parent2
+ sub2_1 = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_sub2_1',
+ title='Sub 2-1',
+ parent_conversation_id=parent2_id,
+ created_at=datetime(2024, 1, 1, 15, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 15, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Save all conversations
+ await service.save_app_conversation_info(parent1_info)
+ await service.save_app_conversation_info(parent2_info)
+ await service.save_app_conversation_info(sub1_1)
+ await service.save_app_conversation_info(sub2_1)
+
+ # Search without include_sub_conversations (default False)
+ page = await service.search_app_conversation_info()
+ # Should only return the 2 parent conversations
+ assert len(page.items) == 2
+ conversation_ids = {item.id for item in page.items}
+ assert parent1_id in conversation_ids
+ assert parent2_id in conversation_ids
+ assert sub1_1.id not in conversation_ids
+ assert sub2_1.id not in conversation_ids
+
+ # Search with include_sub_conversations=True
+ page = await service.search_app_conversation_info(
+ include_sub_conversations=True
+ )
+ # Should return all 4 conversations (2 parents + 2 sub-conversations)
+ assert len(page.items) == 4
+ conversation_ids = {item.id for item in page.items}
+ assert parent1_id in conversation_ids
+ assert parent2_id in conversation_ids
+ assert sub1_1.id in conversation_ids
+ assert sub2_1.id in conversation_ids
+
+ @pytest.mark.asyncio
+ async def test_search_sub_conversations_with_pagination(
+ self,
+ service: SQLAppConversationInfoService,
+ ):
+ """Test that include_sub_conversations works correctly with pagination."""
+ # Create a parent conversation
+ parent_id = uuid4()
+ parent_info = AppConversationInfo(
+ id=parent_id,
+ created_by_user_id='test_user_123',
+ sandbox_id='sandbox_parent',
+ title='Parent Conversation',
+ created_at=datetime(2024, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 12, 30, 0, tzinfo=timezone.utc),
+ )
+
+ # Create multiple sub-conversations
+ sub_conversations = []
+ for i in range(5):
+ sub_info = AppConversationInfo(
+ id=uuid4(),
+ created_by_user_id='test_user_123',
+ sandbox_id=f'sandbox_sub{i}',
+ title=f'Sub Conversation {i}',
+ parent_conversation_id=parent_id,
+ created_at=datetime(2024, 1, 1, 13 + i, 0, 0, tzinfo=timezone.utc),
+ updated_at=datetime(2024, 1, 1, 13 + i, 30, 0, tzinfo=timezone.utc),
+ )
+ sub_conversations.append(sub_info)
+ await service.save_app_conversation_info(sub_info)
+
+ # Save parent
+ await service.save_app_conversation_info(parent_info)
+
+ # Search with include_sub_conversations=True and pagination
+ page1 = await service.search_app_conversation_info(
+ include_sub_conversations=True, limit=3
+ )
+ # Should return 3 items (1 parent + 2 sub-conversations)
+ assert len(page1.items) == 3
+ assert page1.next_page_id is not None
+
+ # Get next page
+ page2 = await service.search_app_conversation_info(
+ include_sub_conversations=True, limit=3, page_id=page1.next_page_id
+ )
+ # Should return remaining items
+ assert len(page2.items) == 3
+ assert page2.next_page_id is None
+
+ # Verify all conversations are present across pages
+ all_ids = {item.id for item in page1.items} | {item.id for item in page2.items}
+ assert parent_id in all_ids
+ for sub_info in sub_conversations:
+ assert sub_info.id in all_ids
diff --git a/tests/unit/app_server/test_sql_app_conversation_start_task_service.py b/tests/unit/app_server/test_sql_app_conversation_start_task_service.py
index 017f4f1fc847..943595e141f6 100644
--- a/tests/unit/app_server/test_sql_app_conversation_start_task_service.py
+++ b/tests/unit/app_server/test_sql_app_conversation_start_task_service.py
@@ -639,3 +639,145 @@ async def test_search_and_count_with_user_isolation(
user2_count = await user2_service.count_app_conversation_start_tasks()
assert user2_count == 1
+
+ async def test_search_app_conversation_start_tasks_with_created_at_gte_filter(
+ self,
+ service: SQLAppConversationStartTaskService,
+ sample_request: AppConversationStartRequest,
+ ):
+ """Test search with created_at__gte filter."""
+ from datetime import timedelta
+
+ from openhands.agent_server.models import utc_now
+
+ # Create tasks with different creation times
+ base_time = utc_now()
+
+ # Task 1: created 2 hours ago
+ task1 = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id='user1',
+ status=AppConversationStartTaskStatus.WORKING,
+ request=sample_request,
+ )
+ task1.created_at = base_time - timedelta(hours=2)
+ await service.save_app_conversation_start_task(task1)
+
+ # Task 2: created 1 hour ago
+ task2 = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id='user1',
+ status=AppConversationStartTaskStatus.READY,
+ request=sample_request,
+ )
+ task2.created_at = base_time - timedelta(hours=1)
+ await service.save_app_conversation_start_task(task2)
+
+ # Task 3: created 30 minutes ago
+ task3 = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id='user1',
+ status=AppConversationStartTaskStatus.WORKING,
+ request=sample_request,
+ )
+ task3.created_at = base_time - timedelta(minutes=30)
+ await service.save_app_conversation_start_task(task3)
+
+ # Search for tasks created in the last 90 minutes
+ filter_time = base_time - timedelta(minutes=90)
+ result = await service.search_app_conversation_start_tasks(
+ created_at__gte=filter_time
+ )
+
+ # Should return task2 and task3 (created within last 90 minutes)
+ assert len(result.items) == 2
+ task_ids = [task.id for task in result.items]
+ assert task2.id in task_ids
+ assert task3.id in task_ids
+ assert task1.id not in task_ids
+
+ # Test count with the same filter
+ count = await service.count_app_conversation_start_tasks(
+ created_at__gte=filter_time
+ )
+ assert count == 2
+
+ # Search for tasks created in the last 45 minutes
+ filter_time_recent = base_time - timedelta(minutes=45)
+ result_recent = await service.search_app_conversation_start_tasks(
+ created_at__gte=filter_time_recent
+ )
+
+ # Should return only task3
+ assert len(result_recent.items) == 1
+ assert result_recent.items[0].id == task3.id
+
+ # Test count with recent filter
+ count_recent = await service.count_app_conversation_start_tasks(
+ created_at__gte=filter_time_recent
+ )
+ assert count_recent == 1
+
+ async def test_search_app_conversation_start_tasks_combined_filters(
+ self,
+ service: SQLAppConversationStartTaskService,
+ sample_request: AppConversationStartRequest,
+ ):
+ """Test search with both conversation_id and created_at__gte filters."""
+ from datetime import timedelta
+
+ from openhands.agent_server.models import utc_now
+
+ conversation_id1 = uuid4()
+ conversation_id2 = uuid4()
+ base_time = utc_now()
+
+ # Task 1: conversation_id1, created 2 hours ago
+ task1 = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id='user1',
+ status=AppConversationStartTaskStatus.WORKING,
+ app_conversation_id=conversation_id1,
+ request=sample_request,
+ )
+ task1.created_at = base_time - timedelta(hours=2)
+ await service.save_app_conversation_start_task(task1)
+
+ # Task 2: conversation_id1, created 30 minutes ago
+ task2 = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id='user1',
+ status=AppConversationStartTaskStatus.READY,
+ app_conversation_id=conversation_id1,
+ request=sample_request,
+ )
+ task2.created_at = base_time - timedelta(minutes=30)
+ await service.save_app_conversation_start_task(task2)
+
+ # Task 3: conversation_id2, created 30 minutes ago
+ task3 = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id='user1',
+ status=AppConversationStartTaskStatus.WORKING,
+ app_conversation_id=conversation_id2,
+ request=sample_request,
+ )
+ task3.created_at = base_time - timedelta(minutes=30)
+ await service.save_app_conversation_start_task(task3)
+
+ # Search for tasks with conversation_id1 created in the last hour
+ filter_time = base_time - timedelta(hours=1)
+ result = await service.search_app_conversation_start_tasks(
+ conversation_id__eq=conversation_id1, created_at__gte=filter_time
+ )
+
+ # Should return only task2 (conversation_id1 and created within last hour)
+ assert len(result.items) == 1
+ assert result.items[0].id == task2.id
+ assert result.items[0].app_conversation_id == conversation_id1
+
+ # Test count with combined filters
+ count = await service.count_app_conversation_start_tasks(
+ conversation_id__eq=conversation_id1, created_at__gte=filter_time
+ )
+ assert count == 1
diff --git a/tests/unit/app_server/test_webhook_router_stats.py b/tests/unit/app_server/test_webhook_router_stats.py
new file mode 100644
index 000000000000..ba5664a196b7
--- /dev/null
+++ b/tests/unit/app_server/test_webhook_router_stats.py
@@ -0,0 +1,615 @@
+"""Tests for stats event processing in webhook_router.
+
+This module tests the stats event processing functionality introduced for
+updating conversation statistics from ConversationStateUpdateEvent events.
+"""
+
+from datetime import datetime, timezone
+from typing import AsyncGenerator
+from unittest.mock import AsyncMock, MagicMock, patch
+from uuid import uuid4
+
+import pytest
+from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
+from sqlalchemy.pool import StaticPool
+
+from openhands.app_server.app_conversation.app_conversation_models import (
+ AppConversationInfo,
+)
+from openhands.app_server.app_conversation.sql_app_conversation_info_service import (
+ SQLAppConversationInfoService,
+ StoredConversationMetadata,
+)
+from openhands.app_server.user.specifiy_user_context import SpecifyUserContext
+from openhands.app_server.utils.sql_utils import Base
+from openhands.sdk.conversation.conversation_stats import ConversationStats
+from openhands.sdk.event import ConversationStateUpdateEvent
+from openhands.sdk.llm.utils.metrics import Metrics, TokenUsage
+
+# ---------------------------------------------------------------------------
+# Fixtures
+# ---------------------------------------------------------------------------
+
+
+@pytest.fixture
+async def async_engine():
+ """Create an async SQLite engine for testing."""
+ engine = create_async_engine(
+ 'sqlite+aiosqlite:///:memory:',
+ poolclass=StaticPool,
+ connect_args={'check_same_thread': False},
+ echo=False,
+ )
+
+ # Create all tables
+ async with engine.begin() as conn:
+ await conn.run_sync(Base.metadata.create_all)
+
+ yield engine
+
+ await engine.dispose()
+
+
+@pytest.fixture
+async def async_session(async_engine) -> AsyncGenerator[AsyncSession, None]:
+ """Create an async session for testing."""
+ async_session_maker = async_sessionmaker(
+ async_engine, class_=AsyncSession, expire_on_commit=False
+ )
+
+ async with async_session_maker() as db_session:
+ yield db_session
+
+
+@pytest.fixture
+def service(async_session) -> SQLAppConversationInfoService:
+ """Create a SQLAppConversationInfoService instance for testing."""
+ return SQLAppConversationInfoService(
+ db_session=async_session, user_context=SpecifyUserContext(user_id=None)
+ )
+
+
+@pytest.fixture
+async def v1_conversation_metadata(async_session, service):
+ """Create a V1 conversation metadata record for testing."""
+ conversation_id = uuid4()
+ stored = StoredConversationMetadata(
+ conversation_id=str(conversation_id),
+ user_id='test_user_123',
+ sandbox_id='sandbox_123',
+ conversation_version='V1',
+ title='Test Conversation',
+ accumulated_cost=0.0,
+ prompt_tokens=0,
+ completion_tokens=0,
+ cache_read_tokens=0,
+ cache_write_tokens=0,
+ reasoning_tokens=0,
+ context_window=0,
+ per_turn_token=0,
+ created_at=datetime.now(timezone.utc),
+ last_updated_at=datetime.now(timezone.utc),
+ )
+ async_session.add(stored)
+ await async_session.commit()
+ return conversation_id, stored
+
+
+@pytest.fixture
+def stats_event_with_dict_value():
+ """Create a ConversationStateUpdateEvent with dict value."""
+ event_value = {
+ 'usage_to_metrics': {
+ 'agent': {
+ 'accumulated_cost': 0.03411525,
+ 'max_budget_per_task': None,
+ 'accumulated_token_usage': {
+ 'prompt_tokens': 8770,
+ 'completion_tokens': 82,
+ 'cache_read_tokens': 0,
+ 'cache_write_tokens': 8767,
+ 'reasoning_tokens': 0,
+ 'context_window': 0,
+ 'per_turn_token': 8852,
+ },
+ },
+ 'condenser': {
+ 'accumulated_cost': 0.0,
+ 'accumulated_token_usage': {
+ 'prompt_tokens': 0,
+ 'completion_tokens': 0,
+ },
+ },
+ }
+ }
+ return ConversationStateUpdateEvent(key='stats', value=event_value)
+
+
+@pytest.fixture
+def stats_event_with_object_value():
+ """Create a ConversationStateUpdateEvent with object value."""
+ event_value = MagicMock()
+ event_value.usage_to_metrics = {
+ 'agent': {
+ 'accumulated_cost': 0.05,
+ 'accumulated_token_usage': {
+ 'prompt_tokens': 1000,
+ 'completion_tokens': 100,
+ },
+ }
+ }
+ return ConversationStateUpdateEvent(key='stats', value=event_value)
+
+
+@pytest.fixture
+def stats_event_no_usage_to_metrics():
+ """Create a ConversationStateUpdateEvent without usage_to_metrics."""
+ event_value = {'some_other_key': 'value'}
+ return ConversationStateUpdateEvent(key='stats', value=event_value)
+
+
+# ---------------------------------------------------------------------------
+# Tests for update_conversation_statistics
+# ---------------------------------------------------------------------------
+
+
+class TestUpdateConversationStatistics:
+ """Test the update_conversation_statistics method."""
+
+ @pytest.mark.asyncio
+ async def test_update_statistics_success(
+ self, service, async_session, v1_conversation_metadata
+ ):
+ """Test successfully updating conversation statistics."""
+ conversation_id, stored = v1_conversation_metadata
+
+ agent_metrics = Metrics(
+ model_name='test-model',
+ accumulated_cost=0.03411525,
+ max_budget_per_task=10.0,
+ accumulated_token_usage=TokenUsage(
+ model='test-model',
+ prompt_tokens=8770,
+ completion_tokens=82,
+ cache_read_tokens=0,
+ cache_write_tokens=8767,
+ reasoning_tokens=0,
+ context_window=0,
+ per_turn_token=8852,
+ ),
+ )
+ stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
+
+ await service.update_conversation_statistics(conversation_id, stats)
+
+ # Verify the update
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == 0.03411525
+ assert stored.max_budget_per_task == 10.0
+ assert stored.prompt_tokens == 8770
+ assert stored.completion_tokens == 82
+ assert stored.cache_read_tokens == 0
+ assert stored.cache_write_tokens == 8767
+ assert stored.reasoning_tokens == 0
+ assert stored.context_window == 0
+ assert stored.per_turn_token == 8852
+ assert stored.last_updated_at is not None
+
+ @pytest.mark.asyncio
+ async def test_update_statistics_partial_update(
+ self, service, async_session, v1_conversation_metadata
+ ):
+ """Test updating only some statistics fields."""
+ conversation_id, stored = v1_conversation_metadata
+
+ # Set initial values
+ stored.accumulated_cost = 0.01
+ stored.prompt_tokens = 100
+ await async_session.commit()
+
+ agent_metrics = Metrics(
+ model_name='test-model',
+ accumulated_cost=0.05,
+ accumulated_token_usage=TokenUsage(
+ model='test-model',
+ prompt_tokens=200,
+ completion_tokens=0, # Default value
+ ),
+ )
+ stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
+
+ await service.update_conversation_statistics(conversation_id, stats)
+
+ # Verify updated fields
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == 0.05
+ assert stored.prompt_tokens == 200
+ # completion_tokens should remain unchanged (not None in stats)
+ assert stored.completion_tokens == 0
+
+ @pytest.mark.asyncio
+ async def test_update_statistics_no_agent_metrics(
+ self, service, v1_conversation_metadata
+ ):
+ """Test that update is skipped when no agent metrics are present."""
+ conversation_id, stored = v1_conversation_metadata
+ original_cost = stored.accumulated_cost
+
+ condenser_metrics = Metrics(
+ model_name='test-model',
+ accumulated_cost=0.1,
+ )
+ stats = ConversationStats(usage_to_metrics={'condenser': condenser_metrics})
+
+ await service.update_conversation_statistics(conversation_id, stats)
+
+ # Verify no update occurred
+ assert stored.accumulated_cost == original_cost
+
+ @pytest.mark.asyncio
+ async def test_update_statistics_conversation_not_found(self, service):
+ """Test that update is skipped when conversation doesn't exist."""
+ nonexistent_id = uuid4()
+ agent_metrics = Metrics(
+ model_name='test-model',
+ accumulated_cost=0.1,
+ )
+ stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
+
+ # Should not raise an exception
+ await service.update_conversation_statistics(nonexistent_id, stats)
+
+ @pytest.mark.asyncio
+ async def test_update_statistics_v0_conversation_skipped(
+ self, service, async_session
+ ):
+ """Test that V0 conversations are skipped."""
+ conversation_id = uuid4()
+ stored = StoredConversationMetadata(
+ conversation_id=str(conversation_id),
+ user_id='test_user_123',
+ sandbox_id='sandbox_123',
+ conversation_version='V0', # V0 conversation
+ title='V0 Conversation',
+ accumulated_cost=0.0,
+ created_at=datetime.now(timezone.utc),
+ last_updated_at=datetime.now(timezone.utc),
+ )
+ async_session.add(stored)
+ await async_session.commit()
+
+ original_cost = stored.accumulated_cost
+
+ agent_metrics = Metrics(
+ model_name='test-model',
+ accumulated_cost=0.1,
+ )
+ stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
+
+ await service.update_conversation_statistics(conversation_id, stats)
+
+ # Verify no update occurred
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == original_cost
+
+ @pytest.mark.asyncio
+ async def test_update_statistics_with_none_values(
+ self, service, async_session, v1_conversation_metadata
+ ):
+ """Test that None values in stats don't overwrite existing values."""
+ conversation_id, stored = v1_conversation_metadata
+
+ # Set initial values
+ stored.accumulated_cost = 0.01
+ stored.max_budget_per_task = 5.0
+ stored.prompt_tokens = 100
+ await async_session.commit()
+
+ agent_metrics = Metrics(
+ model_name='test-model',
+ accumulated_cost=0.05,
+ max_budget_per_task=None, # None value
+ accumulated_token_usage=TokenUsage(
+ model='test-model',
+ prompt_tokens=200,
+ completion_tokens=0, # Default value (None is not valid for int)
+ ),
+ )
+ stats = ConversationStats(usage_to_metrics={'agent': agent_metrics})
+
+ await service.update_conversation_statistics(conversation_id, stats)
+
+ # Verify updated fields and that None values didn't overwrite
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == 0.05
+ assert stored.max_budget_per_task == 5.0 # Should remain unchanged
+ assert stored.prompt_tokens == 200
+ assert (
+ stored.completion_tokens == 0
+ ) # Should remain unchanged (was 0, None doesn't update)
+
+
+# ---------------------------------------------------------------------------
+# Tests for process_stats_event
+# ---------------------------------------------------------------------------
+
+
+class TestProcessStatsEvent:
+ """Test the process_stats_event method."""
+
+ @pytest.mark.asyncio
+ async def test_process_stats_event_with_dict_value(
+ self,
+ service,
+ async_session,
+ stats_event_with_dict_value,
+ v1_conversation_metadata,
+ ):
+ """Test processing stats event with dict value."""
+ conversation_id, stored = v1_conversation_metadata
+
+ await service.process_stats_event(stats_event_with_dict_value, conversation_id)
+
+ # Verify the update occurred
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == 0.03411525
+ assert stored.prompt_tokens == 8770
+ assert stored.completion_tokens == 82
+
+ @pytest.mark.asyncio
+ async def test_process_stats_event_with_object_value(
+ self,
+ service,
+ async_session,
+ stats_event_with_object_value,
+ v1_conversation_metadata,
+ ):
+ """Test processing stats event with object value."""
+ conversation_id, stored = v1_conversation_metadata
+
+ await service.process_stats_event(
+ stats_event_with_object_value, conversation_id
+ )
+
+ # Verify the update occurred
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == 0.05
+ assert stored.prompt_tokens == 1000
+ assert stored.completion_tokens == 100
+
+ @pytest.mark.asyncio
+ async def test_process_stats_event_no_usage_to_metrics(
+ self,
+ service,
+ async_session,
+ stats_event_no_usage_to_metrics,
+ v1_conversation_metadata,
+ ):
+ """Test processing stats event without usage_to_metrics."""
+ conversation_id, stored = v1_conversation_metadata
+ original_cost = stored.accumulated_cost
+
+ await service.process_stats_event(
+ stats_event_no_usage_to_metrics, conversation_id
+ )
+
+ # Verify update_conversation_statistics was NOT called
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == original_cost
+
+ @pytest.mark.asyncio
+ async def test_process_stats_event_service_error_handled(
+ self, service, stats_event_with_dict_value
+ ):
+ """Test that errors from service are caught and logged."""
+ conversation_id = uuid4()
+
+ # Should not raise an exception
+ with (
+ patch.object(
+ service,
+ 'update_conversation_statistics',
+ side_effect=Exception('Database error'),
+ ),
+ patch(
+ 'openhands.app_server.app_conversation.sql_app_conversation_info_service.logger'
+ ) as mock_logger,
+ ):
+ await service.process_stats_event(
+ stats_event_with_dict_value, conversation_id
+ )
+
+ # Verify error was logged
+ mock_logger.exception.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_process_stats_event_empty_usage_to_metrics(
+ self, service, async_session, v1_conversation_metadata
+ ):
+ """Test processing stats event with empty usage_to_metrics."""
+ conversation_id, stored = v1_conversation_metadata
+ original_cost = stored.accumulated_cost
+
+ # Create event with empty usage_to_metrics
+ event = ConversationStateUpdateEvent(
+ key='stats', value={'usage_to_metrics': {}}
+ )
+
+ await service.process_stats_event(event, conversation_id)
+
+ # Empty dict is falsy, so update_conversation_statistics should NOT be called
+ await async_session.refresh(stored)
+ assert stored.accumulated_cost == original_cost
+
+
+# ---------------------------------------------------------------------------
+# Integration tests for on_event endpoint
+# ---------------------------------------------------------------------------
+
+
+class TestOnEventStatsProcessing:
+ """Test stats event processing in the on_event endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_on_event_processes_stats_events(self):
+ """Test that on_event processes stats events."""
+ from openhands.app_server.event_callback.webhook_router import on_event
+ from openhands.app_server.sandbox.sandbox_models import (
+ SandboxInfo,
+ SandboxStatus,
+ )
+
+ conversation_id = uuid4()
+ sandbox_id = 'sandbox_123'
+
+ # Create stats event
+ stats_event = ConversationStateUpdateEvent(
+ key='stats',
+ value={
+ 'usage_to_metrics': {
+ 'agent': {
+ 'accumulated_cost': 0.1,
+ 'accumulated_token_usage': {
+ 'prompt_tokens': 1000,
+ },
+ }
+ }
+ },
+ )
+
+ # Create non-stats event
+ other_event = ConversationStateUpdateEvent(
+ key='execution_status', value='running'
+ )
+
+ events = [stats_event, other_event]
+
+ # Mock dependencies
+ mock_sandbox = SandboxInfo(
+ id=sandbox_id,
+ status=SandboxStatus.RUNNING,
+ session_api_key='test_key',
+ created_by_user_id='user_123',
+ sandbox_spec_id='spec_123',
+ )
+
+ mock_app_conversation_info = AppConversationInfo(
+ id=conversation_id,
+ sandbox_id=sandbox_id,
+ created_by_user_id='user_123',
+ )
+
+ mock_event_service = AsyncMock()
+ mock_app_conversation_info_service = AsyncMock()
+ mock_app_conversation_info_service.get_app_conversation_info.return_value = (
+ mock_app_conversation_info
+ )
+
+ # Set up process_stats_event to call update_conversation_statistics
+ async def process_stats_event_side_effect(event, conversation_id):
+ # Simulate what process_stats_event does - call update_conversation_statistics
+ from openhands.sdk.conversation.conversation_stats import ConversationStats
+
+ if isinstance(event.value, dict):
+ stats = ConversationStats.model_validate(event.value)
+ if stats and stats.usage_to_metrics:
+ await mock_app_conversation_info_service.update_conversation_statistics(
+ conversation_id, stats
+ )
+
+ mock_app_conversation_info_service.process_stats_event.side_effect = (
+ process_stats_event_side_effect
+ )
+
+ with (
+ patch(
+ 'openhands.app_server.event_callback.webhook_router.valid_sandbox',
+ return_value=mock_sandbox,
+ ),
+ patch(
+ 'openhands.app_server.event_callback.webhook_router.valid_conversation',
+ return_value=mock_app_conversation_info,
+ ),
+ patch(
+ 'openhands.app_server.event_callback.webhook_router._run_callbacks_in_bg_and_close'
+ ) as mock_callbacks,
+ ):
+ await on_event(
+ events=events,
+ conversation_id=conversation_id,
+ sandbox_info=mock_sandbox,
+ app_conversation_info_service=mock_app_conversation_info_service,
+ event_service=mock_event_service,
+ )
+
+ # Verify events were saved
+ assert mock_event_service.save_event.call_count == 2
+
+ # Verify stats event was processed
+ mock_app_conversation_info_service.update_conversation_statistics.assert_called_once()
+
+ # Verify callbacks were scheduled
+ mock_callbacks.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_on_event_skips_non_stats_events(self):
+ """Test that on_event skips non-stats events."""
+ from openhands.app_server.event_callback.webhook_router import on_event
+ from openhands.app_server.sandbox.sandbox_models import (
+ SandboxInfo,
+ SandboxStatus,
+ )
+ from openhands.events.action.message import MessageAction
+
+ conversation_id = uuid4()
+ sandbox_id = 'sandbox_123'
+
+ # Create non-stats events
+ events = [
+ ConversationStateUpdateEvent(key='execution_status', value='running'),
+ MessageAction(content='test'),
+ ]
+
+ mock_sandbox = SandboxInfo(
+ id=sandbox_id,
+ status=SandboxStatus.RUNNING,
+ session_api_key='test_key',
+ created_by_user_id='user_123',
+ sandbox_spec_id='spec_123',
+ )
+
+ mock_app_conversation_info = AppConversationInfo(
+ id=conversation_id,
+ sandbox_id=sandbox_id,
+ created_by_user_id='user_123',
+ )
+
+ mock_event_service = AsyncMock()
+ mock_app_conversation_info_service = AsyncMock()
+ mock_app_conversation_info_service.get_app_conversation_info.return_value = (
+ mock_app_conversation_info
+ )
+
+ with (
+ patch(
+ 'openhands.app_server.event_callback.webhook_router.valid_sandbox',
+ return_value=mock_sandbox,
+ ),
+ patch(
+ 'openhands.app_server.event_callback.webhook_router.valid_conversation',
+ return_value=mock_app_conversation_info,
+ ),
+ patch(
+ 'openhands.app_server.event_callback.webhook_router._run_callbacks_in_bg_and_close'
+ ),
+ ):
+ await on_event(
+ events=events,
+ conversation_id=conversation_id,
+ sandbox_info=mock_sandbox,
+ app_conversation_info_service=mock_app_conversation_info_service,
+ event_service=mock_event_service,
+ )
+
+ # Verify stats update was NOT called
+ mock_app_conversation_info_service.update_conversation_statistics.assert_not_called()
diff --git a/tests/unit/controller/test_agent_controller.py b/tests/unit/controller/test_agent_controller.py
index 2aa5192c7c34..da12ee8f9e4e 100644
--- a/tests/unit/controller/test_agent_controller.py
+++ b/tests/unit/controller/test_agent_controller.py
@@ -94,6 +94,7 @@ def mock_agent_with_stats():
)
agent_config.disabled_microagents = []
agent_config.enable_mcp = True
+ agent_config.enable_stuck_detection = True
llm_registry.service_to_llm.clear()
mock_llm = llm_registry.get_llm('agent_llm', llm_config)
agent.llm = mock_llm
diff --git a/tests/unit/controller/test_agent_controller_loop_recovery.py b/tests/unit/controller/test_agent_controller_loop_recovery.py
index 36c40f0c42a2..562825cc6b32 100644
--- a/tests/unit/controller/test_agent_controller_loop_recovery.py
+++ b/tests/unit/controller/test_agent_controller_loop_recovery.py
@@ -372,3 +372,39 @@ async def test_controller_truncates_history_during_loop_recovery(
assert mock_controller.state.end_id == 5, (
f'Expected end_id to be 5, got {mock_controller.state.end_id}'
)
+
+ def test_stuck_detection_config_option_exists(self):
+ """Test that the enable_stuck_detection config option exists and defaults to True."""
+ from openhands.core.config.agent_config import AgentConfig
+
+ # Create a default config
+ config = AgentConfig()
+
+ # Verify the attribute exists and defaults to True
+ assert hasattr(config, 'enable_stuck_detection')
+ assert config.enable_stuck_detection is True
+
+ # Verify we can create a config with it disabled
+ config_disabled = AgentConfig(enable_stuck_detection=False)
+ assert config_disabled.enable_stuck_detection is False
+
+ def test_stuck_detection_config_from_env(self):
+ """Test that enable_stuck_detection can be set via environment variable."""
+ import os
+
+ from openhands.core.config.agent_config import AgentConfig
+
+ # Test with enabled (default)
+ os.environ.pop('AGENT_ENABLE_STUCK_DETECTION', None)
+ config = AgentConfig()
+ assert config.enable_stuck_detection is True
+
+ # Test with explicitly disabled
+ os.environ['AGENT_ENABLE_STUCK_DETECTION'] = 'false'
+ # Need to reload for env var to take effect in real usage
+ # For this test, we just verify the config accepts the parameter
+ config_disabled = AgentConfig(enable_stuck_detection=False)
+ assert config_disabled.enable_stuck_detection is False
+
+ # Cleanup
+ os.environ.pop('AGENT_ENABLE_STUCK_DETECTION', None)
diff --git a/tests/unit/experiments/test_experiment_manager.py b/tests/unit/experiments/test_experiment_manager.py
index 2103e11cb4cf..85faa078f562 100644
--- a/tests/unit/experiments/test_experiment_manager.py
+++ b/tests/unit/experiments/test_experiment_manager.py
@@ -9,6 +9,7 @@
from openhands.app_server.app_conversation.live_status_app_conversation_service import (
LiveStatusAppConversationService,
)
+from openhands.app_server.sandbox.sandbox_models import SandboxInfo, SandboxStatus
from openhands.experiments.experiment_manager import ExperimentManager
from openhands.sdk import Agent
from openhands.sdk.llm import LLM
@@ -125,11 +126,8 @@ async def test_experiment_manager_called_with_correct_parameters_in_context__noo
self,
):
"""
- Use the real LiveStatusAppConversationService to build a StartConversationRequest,
- and verify ExperimentManagerImpl.run_agent_variant_tests__v1:
- - is called exactly once with the (user_id, generated conversation_id, agent)
- - returns the *same* agent instance (no copy/mutation)
- - does not tweak agent fields (LLM, system prompt, etc.)
+ Test that ExperimentManagerImpl.run_agent_variant_tests__v1 is called with correct parameters
+ and returns the same agent instance (no copy/mutation) when building a StartConversationRequest.
"""
# --- Arrange: fixed UUID to assert call parameters deterministically
fixed_conversation_id = UUID('00000000-0000-0000-0000-000000000001')
@@ -142,6 +140,7 @@ async def test_experiment_manager_called_with_correct_parameters_in_context__noo
mock_agent = Mock(spec=Agent)
mock_agent.llm = mock_llm
mock_agent.system_prompt_filename = 'default_system_prompt.j2'
+ mock_agent.model_copy = Mock(return_value=mock_agent)
# Minimal, real-ish user context used by the service
class DummyUserContext:
@@ -153,6 +152,7 @@ async def get_user_info(self):
llm_base_url=None,
llm_api_key=None,
confirmation_mode=False,
+ condenser_max_size=None,
)
async def get_secrets(self):
@@ -188,28 +188,70 @@ async def get_user_id(self):
sandbox_startup_poll_frequency=1,
httpx_client=httpx_client,
web_url=None,
+ openhands_provider_base_url=None,
access_token_hard_timeout=None,
)
+ sandbox = SandboxInfo(
+ id='mock-sandbox-id',
+ created_by_user_id='mock-user-id',
+ sandbox_spec_id='mock-sandbox-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='mock-session-api-key',
+ )
+
# Patch the pieces invoked by the service
with (
- patch(
- 'openhands.app_server.app_conversation.live_status_app_conversation_service.get_default_agent',
+ patch.object(
+ service,
+ '_setup_secrets_for_git_providers',
+ return_value={},
+ ),
+ patch.object(
+ service,
+ '_configure_llm_and_mcp',
+ return_value=(mock_llm, {}),
+ ),
+ patch.object(
+ service,
+ '_create_agent_with_context',
+ return_value=mock_agent,
+ ),
+ patch.object(
+ service,
+ '_load_skills_and_update_agent',
return_value=mock_agent,
),
patch(
'openhands.app_server.app_conversation.live_status_app_conversation_service.uuid4',
return_value=fixed_conversation_id,
),
+ patch(
+ 'openhands.app_server.app_conversation.live_status_app_conversation_service.ExperimentManagerImpl'
+ ) as mock_experiment_manager,
):
+ # Configure the experiment manager mock to return the same agent
+ mock_experiment_manager.run_agent_variant_tests__v1.return_value = (
+ mock_agent
+ )
+
# --- Act: build the start request
start_req = await service._build_start_conversation_request_for_user(
+ sandbox=sandbox,
initial_message=None,
+ system_message_suffix=None, # No additional system message suffix
git_provider=None, # Keep secrets path simple
working_dir='/tmp/project', # Arbitrary path
)
- # The agent in the StartConversationRequest is the *same* object we provided
+ # --- Assert: verify experiment manager was called with correct parameters
+ mock_experiment_manager.run_agent_variant_tests__v1.assert_called_once_with(
+ 'test_user_123', # user_id
+ fixed_conversation_id, # conversation_id
+ mock_agent, # agent (after model_copy with agent_context)
+ )
+
+ # The agent in the StartConversationRequest is the *same* object returned by experiment manager
assert start_req.agent is mock_agent
# No tweaks to agent fields by the experiment manager (noop)
diff --git a/tests/unit/llm/test_llm.py b/tests/unit/llm/test_llm.py
index a6fc7c6726c1..b04425e631da 100644
--- a/tests/unit/llm/test_llm.py
+++ b/tests/unit/llm/test_llm.py
@@ -1255,6 +1255,44 @@ def test_opus_41_keeps_temperature_top_p(mock_completion):
assert 'top_p' not in call_kwargs
+@patch('openhands.llm.llm.litellm_completion')
+def test_opus_45_keeps_temperature_drops_top_p(mock_completion):
+ mock_completion.return_value = {
+ 'choices': [{'message': {'content': 'ok'}}],
+ }
+ config = LLMConfig(
+ model='anthropic/claude-opus-4-5-20251101',
+ api_key='k',
+ temperature=0.7,
+ top_p=0.9,
+ )
+ llm = LLM(config, service_id='svc')
+ llm.completion(messages=[{'role': 'user', 'content': 'hi'}])
+ call_kwargs = mock_completion.call_args[1]
+ assert call_kwargs.get('temperature') == 0.7
+ # Anthropic rejects both temperature and top_p together on Opus 4.5; we keep temperature and drop top_p
+ assert 'top_p' not in call_kwargs
+
+
+@patch('openhands.llm.llm.litellm_completion')
+def test_sonnet_4_keeps_temperature_drops_top_p(mock_completion):
+ mock_completion.return_value = {
+ 'choices': [{'message': {'content': 'ok'}}],
+ }
+ config = LLMConfig(
+ model='anthropic/claude-sonnet-4-20250514',
+ api_key='k',
+ temperature=0.7,
+ top_p=0.9,
+ )
+ llm = LLM(config, service_id='svc')
+ llm.completion(messages=[{'role': 'user', 'content': 'hi'}])
+ call_kwargs = mock_completion.call_args[1]
+ assert call_kwargs.get('temperature') == 0.7
+ # Anthropic rejects both temperature and top_p together on Sonnet 4; we keep temperature and drop top_p
+ assert 'top_p' not in call_kwargs
+
+
@patch('openhands.llm.llm.litellm_completion')
def test_opus_4_keeps_temperature_top_p(mock_completion):
mock_completion.return_value = {
diff --git a/tests/unit/llm/test_llm_fncall_converter.py b/tests/unit/llm/test_llm_fncall_converter.py
index ff4b7961efe6..b4270f89b023 100644
--- a/tests/unit/llm/test_llm_fncall_converter.py
+++ b/tests/unit/llm/test_llm_fncall_converter.py
@@ -701,6 +701,8 @@ def test_get_example_for_tools_all_tools():
""",
),
# Test case with indented code block to verify indentation is preserved
+ # Note: multiline parameter values should NOT have extra newlines before/after
+ # to prevent newline accumulation across multiple LLM response cycles
(
[
{
@@ -716,16 +718,12 @@ def test_get_example_for_tools_all_tools():
"""
str_replace
/test/file.py
-
-def example():
- pass
-
-
-def example():
+def example():
+ pass
+def example():
# This is indented
print("hello")
- return True
-
+ return True
""",
),
# Test case with list parameter value
diff --git a/tests/unit/llm/test_model_features.py b/tests/unit/llm/test_model_features.py
index 8424268fddf4..4a4bf8b3495d 100644
--- a/tests/unit/llm/test_model_features.py
+++ b/tests/unit/llm/test_model_features.py
@@ -23,6 +23,8 @@
'bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0',
'anthropic.claude-3-5-sonnet-20241022-v2',
),
+ ('global.anthropic.claude-sonnet-4', 'global.anthropic.claude-sonnet-4'),
+ ('us.anthropic.claude-sonnet-4', 'us.anthropic.claude-sonnet-4'),
('', ''),
(None, ''), # type: ignore[arg-type]
],
@@ -177,6 +179,8 @@ def test_get_features(model, expect):
'claude-3-5-haiku-20241022',
'claude-sonnet-4-latest',
'claude-opus-4-1-20250805',
+ 'global.anthropic.claude-sonnet-4',
+ 'us.anthropic.claude-sonnet-4',
# OpenAI families
'gpt-4o',
'gpt-4.1',
@@ -188,6 +192,7 @@ def test_get_features(model, expect):
'o4-mini',
# Google Gemini
'gemini-2.5-pro',
+ 'gemini-3-pro-preview',
# Others
'kimi-k2-0711-preview',
'kimi-k2-instruct',
@@ -240,6 +245,8 @@ def test_deepseek_reasoning_effort_models(model):
'claude-3-haiku-20240307',
'claude-3-opus-20240229',
'claude-sonnet-4-latest',
+ 'global.anthropic.claude-sonnet-4',
+ 'us.anthropic.claude-sonnet-4',
],
)
def test_prompt_cache_models(model):
diff --git a/tests/unit/resolver/test_issue_handler_factory.py b/tests/unit/resolver/test_issue_handler_factory.py
index 1e994bfa710c..12932a1282c1 100644
--- a/tests/unit/resolver/test_issue_handler_factory.py
+++ b/tests/unit/resolver/test_issue_handler_factory.py
@@ -3,6 +3,7 @@
from openhands.core.config import LLMConfig
from openhands.integrations.provider import ProviderType
+from openhands.resolver.interfaces.azure_devops import AzureDevOpsIssueHandler
from openhands.resolver.interfaces.github import GithubIssueHandler, GithubPRHandler
from openhands.resolver.interfaces.gitlab import GitlabIssueHandler, GitlabPRHandler
from openhands.resolver.interfaces.issue_definitions import (
@@ -32,28 +33,50 @@ def factory_params(llm_config):
}
+@pytest.fixture
+def azure_factory_params(llm_config):
+ return {
+ 'owner': 'test-org/test-project',
+ 'repo': 'test-repo',
+ 'token': 'test-token',
+ 'username': 'test-user',
+ 'base_domain': 'dev.azure.com',
+ 'llm_config': llm_config,
+ }
+
+
test_cases = [
- # platform, issue_type, expected_context_type, expected_handler_type
- (ProviderType.GITHUB, 'issue', ServiceContextIssue, GithubIssueHandler),
- (ProviderType.GITHUB, 'pr', ServiceContextPR, GithubPRHandler),
- (ProviderType.GITLAB, 'issue', ServiceContextIssue, GitlabIssueHandler),
- (ProviderType.GITLAB, 'pr', ServiceContextPR, GitlabPRHandler),
+ # platform, issue_type, expected_context_type, expected_handler_type, use_azure_params
+ (ProviderType.GITHUB, 'issue', ServiceContextIssue, GithubIssueHandler, False),
+ (ProviderType.GITHUB, 'pr', ServiceContextPR, GithubPRHandler, False),
+ (ProviderType.GITLAB, 'issue', ServiceContextIssue, GitlabIssueHandler, False),
+ (ProviderType.GITLAB, 'pr', ServiceContextPR, GitlabPRHandler, False),
+ (
+ ProviderType.AZURE_DEVOPS,
+ 'issue',
+ ServiceContextIssue,
+ AzureDevOpsIssueHandler,
+ True,
+ ),
+ (ProviderType.AZURE_DEVOPS, 'pr', ServiceContextPR, AzureDevOpsIssueHandler, True),
]
@pytest.mark.parametrize(
- 'platform,issue_type,expected_context_type,expected_handler_type', test_cases
+ 'platform,issue_type,expected_context_type,expected_handler_type,use_azure_params',
+ test_cases,
)
def test_handler_creation(
factory_params,
+ azure_factory_params,
platform: ProviderType,
issue_type: str,
expected_context_type: type,
expected_handler_type: type,
+ use_azure_params: bool,
):
- factory = IssueHandlerFactory(
- **factory_params, platform=platform, issue_type=issue_type
- )
+ params = azure_factory_params if use_azure_params else factory_params
+ factory = IssueHandlerFactory(**params, platform=platform, issue_type=issue_type)
handler = factory.create()
diff --git a/tests/unit/runtime/test_runtime_git_tokens.py b/tests/unit/runtime/test_runtime_git_tokens.py
index 4b4d27650b87..2d5f9ec40d53 100644
--- a/tests/unit/runtime/test_runtime_git_tokens.py
+++ b/tests/unit/runtime/test_runtime_git_tokens.py
@@ -147,9 +147,11 @@ def runtime(temp_dir):
return runtime
-def mock_repo_and_patch(monkeypatch, provider=ProviderType.GITHUB, is_public=True):
+def mock_repo_and_patch(
+ monkeypatch, provider=ProviderType.GITHUB, is_public=True, full_name='owner/repo'
+):
repo = Repository(
- id='123', full_name='owner/repo', git_provider=provider, is_public=is_public
+ id='123', full_name=full_name, git_provider=provider, is_public=is_public
)
async def mock_verify_repo_provider(*_args, **_kwargs):
@@ -216,11 +218,14 @@ async def test_export_latest_git_provider_tokens_success(runtime):
async def test_export_latest_git_provider_tokens_multiple_refs(temp_dir):
"""Test token export with multiple token references"""
config = OpenHandsConfig()
- # Initialize with both GitHub and GitLab tokens
+ # Initialize with GitHub, GitLab, and Azure DevOps tokens
git_provider_tokens = MappingProxyType(
{
ProviderType.GITHUB: ProviderToken(token=SecretStr('github_token')),
ProviderType.GITLAB: ProviderToken(token=SecretStr('gitlab_token')),
+ ProviderType.AZURE_DEVOPS: ProviderToken(
+ token=SecretStr('azure_devops_token')
+ ),
}
)
file_store = get_file_store('local', temp_dir)
@@ -234,15 +239,18 @@ async def test_export_latest_git_provider_tokens_multiple_refs(temp_dir):
)
# Create a command that references multiple tokens
- cmd = CmdRunAction(command='echo $GITHUB_TOKEN && echo $GITLAB_TOKEN')
+ cmd = CmdRunAction(
+ command='echo $GITHUB_TOKEN && echo $GITLAB_TOKEN && echo $AZURE_DEVOPS_TOKEN'
+ )
# Export the tokens
await runtime._export_latest_git_provider_tokens(cmd)
- # Verify that both tokens were exported
+ # Verify that all tokens were exported
assert event_stream.secrets == {
'github_token': 'github_token',
'gitlab_token': 'gitlab_token',
+ 'azure_devops_token': 'azure_devops_token',
}
@@ -478,6 +486,57 @@ async def test_clone_or_init_repo_gitlab_with_token(temp_dir, monkeypatch):
assert result == 'repo'
+@pytest.mark.asyncio
+async def test_clone_or_init_repo_azure_devops_with_token(temp_dir, monkeypatch):
+ """Test cloning Azure DevOps repository with token"""
+ config = OpenHandsConfig()
+
+ # Set up Azure DevOps token
+ azure_devops_token = 'azure_devops_test_token'
+ git_provider_tokens = MappingProxyType(
+ {ProviderType.AZURE_DEVOPS: ProviderToken(token=SecretStr(azure_devops_token))}
+ )
+
+ file_store = get_file_store('local', temp_dir)
+ event_stream = EventStream('abc', file_store)
+ runtime = MockRuntime(
+ config=config,
+ event_stream=event_stream,
+ user_id='test_user',
+ git_provider_tokens=git_provider_tokens,
+ )
+
+ # Mock the repository to be Azure DevOps with 3-part format: org/project/repo
+ azure_repo_name = 'testorg/testproject/testrepo'
+ mock_repo_and_patch(
+ monkeypatch, provider=ProviderType.AZURE_DEVOPS, full_name=azure_repo_name
+ )
+
+ # Call the method with Azure DevOps 3-part format: org/project/repo
+ result = await runtime.clone_or_init_repo(
+ git_provider_tokens=git_provider_tokens,
+ selected_repository=azure_repo_name,
+ selected_branch=None,
+ )
+
+ # Check that the first command is the git clone with the correct URL format with token
+ # Azure DevOps uses Basic auth format: https://org:token@dev.azure.com/org/project/_git/repo
+ clone_cmd = runtime.run_action_calls[0].command
+ expected_repo_path = str(runtime.workspace_root / 'testrepo')
+ assert (
+ f'https://testorg:{azure_devops_token}@dev.azure.com/testorg/testproject/_git/testrepo'
+ in clone_cmd
+ )
+ assert expected_repo_path in clone_cmd
+
+ # Check that the second command is the checkout
+ checkout_cmd = runtime.run_action_calls[1].command
+ assert f'cd {expected_repo_path}' in checkout_cmd
+ assert 'git checkout -b openhands-workspace-' in checkout_cmd
+
+ assert result == 'testrepo'
+
+
@pytest.mark.asyncio
async def test_clone_or_init_repo_with_branch(temp_dir, monkeypatch):
"""Test cloning a repository with a specified branch"""
diff --git a/tests/unit/runtime/test_runtime_gitlab_microagents.py b/tests/unit/runtime/test_runtime_gitlab_microagents.py
index c4a108386b97..8226fde19be0 100644
--- a/tests/unit/runtime/test_runtime_gitlab_microagents.py
+++ b/tests/unit/runtime/test_runtime_gitlab_microagents.py
@@ -238,16 +238,19 @@ def test_get_microagents_from_org_or_user_github(temp_workspace):
# Mock the provider detection to return GitHub
with patch.object(runtime, '_is_gitlab_repository', return_value=False):
- # Mock the _get_authenticated_git_url to simulate failure (no org repo)
- with patch('openhands.runtime.base.call_async_from_sync') as mock_async:
- mock_async.side_effect = Exception('Repository not found')
+ with patch.object(runtime, '_is_azure_devops_repository', return_value=False):
+ # Mock the _get_authenticated_git_url to simulate failure (no org repo)
+ with patch('openhands.runtime.base.call_async_from_sync') as mock_async:
+ mock_async.side_effect = Exception('Repository not found')
- result = runtime.get_microagents_from_org_or_user('github.com/owner/repo')
+ result = runtime.get_microagents_from_org_or_user(
+ 'github.com/owner/repo'
+ )
- # Should only try .openhands, not openhands-config
- assert len(result) == 0
- # Check that only one attempt was made (for .openhands)
- assert mock_async.call_count == 1
+ # Should only try .openhands, not openhands-config
+ assert len(result) == 0
+ # Check that only one attempt was made (for .openhands)
+ assert mock_async.call_count == 1
def test_get_microagents_from_org_or_user_gitlab_success_with_config(temp_workspace):
@@ -260,16 +263,21 @@ def test_get_microagents_from_org_or_user_gitlab_success_with_config(temp_worksp
# Mock the provider detection to return GitLab
with patch.object(runtime, '_is_gitlab_repository', return_value=True):
- # Mock successful cloning for openhands-config
- with patch('openhands.runtime.base.call_async_from_sync') as mock_async:
- mock_async.return_value = 'https://gitlab.com/owner/openhands-config.git'
+ with patch.object(runtime, '_is_azure_devops_repository', return_value=False):
+ # Mock successful cloning for openhands-config
+ with patch('openhands.runtime.base.call_async_from_sync') as mock_async:
+ mock_async.return_value = (
+ 'https://gitlab.com/owner/openhands-config.git'
+ )
- result = runtime.get_microagents_from_org_or_user('gitlab.com/owner/repo')
+ result = runtime.get_microagents_from_org_or_user(
+ 'gitlab.com/owner/repo'
+ )
- # Should succeed with openhands-config
- assert len(result) >= 0 # May be empty if no microagents found
- # Should only try once for openhands-config
- assert mock_async.call_count == 1
+ # Should succeed with openhands-config
+ assert len(result) >= 0 # May be empty if no microagents found
+ # Should only try once for openhands-config
+ assert mock_async.call_count == 1
def test_get_microagents_from_org_or_user_gitlab_failure(temp_workspace):
@@ -278,16 +286,19 @@ def test_get_microagents_from_org_or_user_gitlab_failure(temp_workspace):
# Mock the provider detection to return GitLab
with patch.object(runtime, '_is_gitlab_repository', return_value=True):
- # Mock the _get_authenticated_git_url to fail for openhands-config
- with patch('openhands.runtime.base.call_async_from_sync') as mock_async:
- mock_async.side_effect = Exception('openhands-config not found')
-
- result = runtime.get_microagents_from_org_or_user('gitlab.com/owner/repo')
-
- # Should return empty list when repository doesn't exist
- assert len(result) == 0
- # Should only try once for openhands-config
- assert mock_async.call_count == 1
+ with patch.object(runtime, '_is_azure_devops_repository', return_value=False):
+ # Mock the _get_authenticated_git_url to fail for openhands-config
+ with patch('openhands.runtime.base.call_async_from_sync') as mock_async:
+ mock_async.side_effect = Exception('openhands-config not found')
+
+ result = runtime.get_microagents_from_org_or_user(
+ 'gitlab.com/owner/repo'
+ )
+
+ # Should return empty list when repository doesn't exist
+ assert len(result) == 0
+ # Should only try once for openhands-config
+ assert mock_async.call_count == 1
def test_get_microagents_from_selected_repo_gitlab_uses_openhands(temp_workspace):
diff --git a/tests/unit/server/data_models/test_conversation.py b/tests/unit/server/data_models/test_conversation.py
index 0917dc1facfd..79ff91fa7fd8 100644
--- a/tests/unit/server/data_models/test_conversation.py
+++ b/tests/unit/server/data_models/test_conversation.py
@@ -1,8 +1,10 @@
import json
+import uuid
from contextlib import contextmanager
from datetime import datetime, timezone
from types import MappingProxyType
from unittest.mock import AsyncMock, MagicMock, patch
+from uuid import uuid4
import pytest
from fastapi import FastAPI
@@ -10,8 +12,26 @@
from fastapi.testclient import TestClient
from openhands.app_server.app_conversation.app_conversation_models import (
+ AppConversation,
AppConversationPage,
)
+from openhands.app_server.app_conversation.app_conversation_router import (
+ read_conversation_file,
+)
+from openhands.app_server.app_conversation.live_status_app_conversation_service import (
+ LiveStatusAppConversationService,
+)
+from openhands.app_server.app_conversation.sql_app_conversation_info_service import (
+ SQLAppConversationInfoService,
+)
+from openhands.app_server.sandbox.sandbox_models import (
+ AGENT_SERVER,
+ ExposedUrl,
+ SandboxInfo,
+ SandboxStatus,
+)
+from openhands.app_server.sandbox.sandbox_spec_models import SandboxSpecInfo
+from openhands.app_server.user.user_context import UserContext
from openhands.integrations.service_types import (
AuthenticationError,
CreateMicroagent,
@@ -20,6 +40,11 @@
TaskType,
)
from openhands.runtime.runtime_status import RuntimeStatus
+from openhands.sdk.conversation.state import ConversationExecutionStatus
+from openhands.sdk.workspace.models import FileOperationResult
+from openhands.sdk.workspace.remote.async_remote_workspace import (
+ AsyncRemoteWorkspace,
+)
from openhands.server.data_models.conversation_info import ConversationInfo
from openhands.server.data_models.conversation_info_result_set import (
ConversationInfoResultSet,
@@ -911,10 +936,16 @@ async def test_delete_conversation():
# Create a mock app conversation service
mock_app_conversation_service = MagicMock()
- mock_app_conversation_service.get_app_conversation = AsyncMock(
+
+ # Create a mock app conversation info service
+ mock_app_conversation_info_service = MagicMock()
+ mock_app_conversation_info_service.get_app_conversation_info = AsyncMock(
return_value=None
)
+ # Create a mock sandbox service
+ mock_sandbox_service = MagicMock()
+
# Mock the conversation manager
with patch(
'openhands.server.routes.manage_conversations.conversation_manager'
@@ -932,9 +963,12 @@ async def test_delete_conversation():
# Call delete_conversation
result = await delete_conversation(
+ request=MagicMock(),
conversation_id='some_conversation_id',
user_id='12345',
app_conversation_service=mock_app_conversation_service,
+ app_conversation_info_service=mock_app_conversation_info_service,
+ sandbox_service=mock_sandbox_service,
)
# Verify the result
@@ -954,14 +988,6 @@ async def test_delete_conversation():
@pytest.mark.asyncio
async def test_delete_v1_conversation_success():
"""Test successful deletion of a V1 conversation."""
- from uuid import uuid4
-
- from openhands.app_server.app_conversation.app_conversation_models import (
- AppConversation,
- )
- from openhands.app_server.sandbox.sandbox_models import SandboxStatus
- from openhands.sdk.conversation.state import ConversationExecutionStatus
-
conversation_uuid = uuid4()
conversation_id = str(conversation_uuid)
@@ -972,49 +998,68 @@ async def test_delete_v1_conversation_success():
mock_service = MagicMock()
mock_service_dep.return_value = mock_service
- # Mock the conversation exists
- mock_app_conversation = AppConversation(
- id=conversation_uuid,
- created_by_user_id='test_user',
- sandbox_id='test-sandbox-id',
- title='Test V1 Conversation',
- sandbox_status=SandboxStatus.RUNNING,
- execution_status=ConversationExecutionStatus.RUNNING,
- session_api_key='test-api-key',
- selected_repository='test/repo',
- selected_branch='main',
- git_provider=ProviderType.GITHUB,
- trigger=ConversationTrigger.GUI,
- created_at=datetime.now(timezone.utc),
- updated_at=datetime.now(timezone.utc),
- )
- mock_service.get_app_conversation = AsyncMock(
- return_value=mock_app_conversation
- )
- mock_service.delete_app_conversation = AsyncMock(return_value=True)
+ # Mock the app conversation info service
+ with patch(
+ 'openhands.server.routes.manage_conversations.app_conversation_info_service_dependency'
+ ) as mock_info_service_dep:
+ mock_info_service = MagicMock()
+ mock_info_service_dep.return_value = mock_info_service
- # Call delete_conversation with V1 conversation ID
- result = await delete_conversation(
- conversation_id=conversation_id,
- user_id='test_user',
- app_conversation_service=mock_service,
- )
+ # Mock the sandbox service
+ with patch(
+ 'openhands.server.routes.manage_conversations.sandbox_service_dependency'
+ ) as mock_sandbox_service_dep:
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service_dep.return_value = mock_sandbox_service
+
+ # Mock the conversation info exists
+ mock_app_conversation_info = AppConversation(
+ id=conversation_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test V1 Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+ mock_info_service.get_app_conversation_info = AsyncMock(
+ return_value=mock_app_conversation_info
+ )
+ mock_service.delete_app_conversation = AsyncMock(return_value=True)
- # Verify the result
- assert result is True
+ # Call delete_conversation with V1 conversation ID
+ result = await delete_conversation(
+ request=MagicMock(),
+ conversation_id=conversation_id,
+ user_id='test_user',
+ app_conversation_service=mock_service,
+ app_conversation_info_service=mock_info_service,
+ sandbox_service=mock_sandbox_service,
+ )
- # Verify that get_app_conversation was called
- mock_service.get_app_conversation.assert_called_once_with(conversation_uuid)
+ # Verify the result
+ assert result is True
- # Verify that delete_app_conversation was called with the conversation ID
- mock_service.delete_app_conversation.assert_called_once_with(conversation_uuid)
+ # Verify that get_app_conversation_info was called
+ mock_info_service.get_app_conversation_info.assert_called_once_with(
+ conversation_uuid
+ )
+
+ # Verify that delete_app_conversation was called with the conversation ID
+ mock_service.delete_app_conversation.assert_called_once_with(
+ conversation_uuid
+ )
@pytest.mark.asyncio
async def test_delete_v1_conversation_not_found():
"""Test deletion of a V1 conversation that doesn't exist."""
- from uuid import uuid4
-
conversation_uuid = uuid4()
conversation_id = str(conversation_uuid)
@@ -1025,25 +1070,46 @@ async def test_delete_v1_conversation_not_found():
mock_service = MagicMock()
mock_service_dep.return_value = mock_service
- # Mock the conversation doesn't exist
- mock_service.get_app_conversation = AsyncMock(return_value=None)
- mock_service.delete_app_conversation = AsyncMock(return_value=False)
+ # Mock the app conversation info service
+ with patch(
+ 'openhands.server.routes.manage_conversations.app_conversation_info_service_dependency'
+ ) as mock_info_service_dep:
+ mock_info_service = MagicMock()
+ mock_info_service_dep.return_value = mock_info_service
- # Call delete_conversation with V1 conversation ID
- result = await delete_conversation(
- conversation_id=conversation_id,
- user_id='test_user',
- app_conversation_service=mock_service,
- )
+ # Mock the sandbox service
+ with patch(
+ 'openhands.server.routes.manage_conversations.sandbox_service_dependency'
+ ) as mock_sandbox_service_dep:
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service_dep.return_value = mock_sandbox_service
+
+ # Mock the conversation doesn't exist
+ mock_info_service.get_app_conversation_info = AsyncMock(
+ return_value=None
+ )
+ mock_service.delete_app_conversation = AsyncMock(return_value=False)
- # Verify the result
- assert result is False
+ # Call delete_conversation with V1 conversation ID
+ result = await delete_conversation(
+ request=MagicMock(),
+ conversation_id=conversation_id,
+ user_id='test_user',
+ app_conversation_service=mock_service,
+ app_conversation_info_service=mock_info_service,
+ sandbox_service=mock_sandbox_service,
+ )
- # Verify that get_app_conversation was called
- mock_service.get_app_conversation.assert_called_once_with(conversation_uuid)
+ # Verify the result
+ assert result is False
- # Verify that delete_app_conversation was NOT called
- mock_service.delete_app_conversation.assert_not_called()
+ # Verify that get_app_conversation_info was called
+ mock_info_service.get_app_conversation_info.assert_called_once_with(
+ conversation_uuid
+ )
+
+ # Verify that delete_app_conversation was NOT called
+ mock_service.delete_app_conversation.assert_not_called()
@pytest.mark.asyncio
@@ -1091,26 +1157,45 @@ async def test_delete_v1_conversation_invalid_uuid():
mock_runtime_cls.delete = AsyncMock()
mock_get_runtime_cls.return_value = mock_runtime_cls
- # Call delete_conversation
- result = await delete_conversation(
- conversation_id=conversation_id,
- user_id='test_user',
- app_conversation_service=mock_service,
- )
+ # Mock the app conversation info service
+ with patch(
+ 'openhands.server.routes.manage_conversations.app_conversation_info_service_dependency'
+ ) as mock_info_service_dep:
+ mock_info_service = MagicMock()
+ mock_info_service_dep.return_value = mock_info_service
+
+ # Mock the sandbox service
+ with patch(
+ 'openhands.server.routes.manage_conversations.sandbox_service_dependency'
+ ) as mock_sandbox_service_dep:
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service_dep.return_value = mock_sandbox_service
+
+ # Call delete_conversation
+ result = await delete_conversation(
+ request=MagicMock(),
+ conversation_id=conversation_id,
+ user_id='test_user',
+ app_conversation_service=mock_service,
+ app_conversation_info_service=mock_info_service,
+ sandbox_service=mock_sandbox_service,
+ )
- # Verify the result
- assert result is True
+ # Verify the result
+ assert result is True
- # Verify V0 logic was used
- mock_store.delete_metadata.assert_called_once_with(conversation_id)
- mock_runtime_cls.delete.assert_called_once_with(conversation_id)
+ # Verify V0 logic was used
+ mock_store.delete_metadata.assert_called_once_with(
+ conversation_id
+ )
+ mock_runtime_cls.delete.assert_called_once_with(
+ conversation_id
+ )
@pytest.mark.asyncio
async def test_delete_v1_conversation_service_error():
"""Test deletion when app conversation service raises an error."""
- from uuid import uuid4
-
conversation_uuid = uuid4()
conversation_id = str(conversation_uuid)
@@ -1121,70 +1206,89 @@ async def test_delete_v1_conversation_service_error():
mock_service = MagicMock()
mock_service_dep.return_value = mock_service
- # Mock service error
- mock_service.get_app_conversation = AsyncMock(
- side_effect=Exception('Service error')
- )
-
- # Mock V0 conversation logic as fallback
+ # Mock the app conversation info service
with patch(
- 'openhands.server.routes.manage_conversations.ConversationStoreImpl.get_instance'
- ) as mock_get_instance:
- mock_store = MagicMock()
- mock_store.get_metadata = AsyncMock(
- return_value=ConversationMetadata(
- conversation_id=conversation_id,
- title='Test V0 Conversation',
- created_at=datetime.fromisoformat('2025-01-01T00:00:00+00:00'),
- last_updated_at=datetime.fromisoformat('2025-01-01T00:01:00+00:00'),
- selected_repository='test/repo',
- user_id='test_user',
- )
- )
- mock_store.delete_metadata = AsyncMock()
- mock_get_instance.return_value = mock_store
+ 'openhands.server.routes.manage_conversations.app_conversation_info_service_dependency'
+ ) as mock_info_service_dep:
+ mock_info_service = MagicMock()
+ mock_info_service_dep.return_value = mock_info_service
- # Mock conversation manager
+ # Mock the sandbox service
with patch(
- 'openhands.server.routes.manage_conversations.conversation_manager'
- ) as mock_manager:
- mock_manager.is_agent_loop_running = AsyncMock(return_value=False)
- mock_manager.get_connections = AsyncMock(return_value={})
+ 'openhands.server.routes.manage_conversations.sandbox_service_dependency'
+ ) as mock_sandbox_service_dep:
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service_dep.return_value = mock_sandbox_service
+
+ # Mock service error
+ mock_info_service.get_app_conversation_info = AsyncMock(
+ side_effect=Exception('Service error')
+ )
- # Mock runtime
+ # Mock V0 conversation logic as fallback
with patch(
- 'openhands.server.routes.manage_conversations.get_runtime_cls'
- ) as mock_get_runtime_cls:
- mock_runtime_cls = MagicMock()
- mock_runtime_cls.delete = AsyncMock()
- mock_get_runtime_cls.return_value = mock_runtime_cls
-
- # Call delete_conversation
- result = await delete_conversation(
- conversation_id=conversation_id,
- user_id='test_user',
- app_conversation_service=mock_service,
+ 'openhands.server.routes.manage_conversations.ConversationStoreImpl.get_instance'
+ ) as mock_get_instance:
+ mock_store = MagicMock()
+ mock_store.get_metadata = AsyncMock(
+ return_value=ConversationMetadata(
+ conversation_id=conversation_id,
+ title='Test V0 Conversation',
+ created_at=datetime.fromisoformat(
+ '2025-01-01T00:00:00+00:00'
+ ),
+ last_updated_at=datetime.fromisoformat(
+ '2025-01-01T00:01:00+00:00'
+ ),
+ selected_repository='test/repo',
+ user_id='test_user',
+ )
)
+ mock_store.delete_metadata = AsyncMock()
+ mock_get_instance.return_value = mock_store
+
+ # Mock conversation manager
+ with patch(
+ 'openhands.server.routes.manage_conversations.conversation_manager'
+ ) as mock_manager:
+ mock_manager.is_agent_loop_running = AsyncMock(
+ return_value=False
+ )
+ mock_manager.get_connections = AsyncMock(return_value={})
+
+ # Mock runtime
+ with patch(
+ 'openhands.server.routes.manage_conversations.get_runtime_cls'
+ ) as mock_get_runtime_cls:
+ mock_runtime_cls = MagicMock()
+ mock_runtime_cls.delete = AsyncMock()
+ mock_get_runtime_cls.return_value = mock_runtime_cls
+
+ # Call delete_conversation
+ result = await delete_conversation(
+ request=MagicMock(),
+ conversation_id=conversation_id,
+ user_id='test_user',
+ app_conversation_service=mock_service,
+ app_conversation_info_service=mock_info_service,
+ sandbox_service=mock_sandbox_service,
+ )
- # Verify the result (should fallback to V0)
- assert result is True
+ # Verify the result (should fallback to V0)
+ assert result is True
- # Verify V0 logic was used
- mock_store.delete_metadata.assert_called_once_with(conversation_id)
- mock_runtime_cls.delete.assert_called_once_with(conversation_id)
+ # Verify V0 logic was used
+ mock_store.delete_metadata.assert_called_once_with(
+ conversation_id
+ )
+ mock_runtime_cls.delete.assert_called_once_with(
+ conversation_id
+ )
@pytest.mark.asyncio
async def test_delete_v1_conversation_with_agent_server():
"""Test V1 conversation deletion with agent server integration."""
- from uuid import uuid4
-
- from openhands.app_server.app_conversation.app_conversation_models import (
- AppConversation,
- )
- from openhands.app_server.sandbox.sandbox_models import SandboxStatus
- from openhands.sdk.conversation.state import ConversationExecutionStatus
-
conversation_uuid = uuid4()
conversation_id = str(conversation_uuid)
@@ -1195,42 +1299,63 @@ async def test_delete_v1_conversation_with_agent_server():
mock_service = MagicMock()
mock_service_dep.return_value = mock_service
- # Mock the conversation exists with running sandbox
- mock_app_conversation = AppConversation(
- id=conversation_uuid,
- created_by_user_id='test_user',
- sandbox_id='test-sandbox-id',
- title='Test V1 Conversation',
- sandbox_status=SandboxStatus.RUNNING,
- execution_status=ConversationExecutionStatus.RUNNING,
- session_api_key='test-api-key',
- selected_repository='test/repo',
- selected_branch='main',
- git_provider=ProviderType.GITHUB,
- trigger=ConversationTrigger.GUI,
- created_at=datetime.now(timezone.utc),
- updated_at=datetime.now(timezone.utc),
- )
- mock_service.get_app_conversation = AsyncMock(
- return_value=mock_app_conversation
- )
- mock_service.delete_app_conversation = AsyncMock(return_value=True)
+ # Mock the app conversation info service
+ with patch(
+ 'openhands.server.routes.manage_conversations.app_conversation_info_service_dependency'
+ ) as mock_info_service_dep:
+ mock_info_service = MagicMock()
+ mock_info_service_dep.return_value = mock_info_service
- # Call delete_conversation with V1 conversation ID
- result = await delete_conversation(
- conversation_id=conversation_id,
- user_id='test_user',
- app_conversation_service=mock_service,
- )
+ # Mock the sandbox service
+ with patch(
+ 'openhands.server.routes.manage_conversations.sandbox_service_dependency'
+ ) as mock_sandbox_service_dep:
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service_dep.return_value = mock_sandbox_service
+
+ # Mock the conversation exists with running sandbox
+ mock_app_conversation_info = AppConversation(
+ id=conversation_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test V1 Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+ mock_info_service.get_app_conversation_info = AsyncMock(
+ return_value=mock_app_conversation_info
+ )
+ mock_service.delete_app_conversation = AsyncMock(return_value=True)
- # Verify the result
- assert result is True
+ # Call delete_conversation with V1 conversation ID
+ result = await delete_conversation(
+ request=MagicMock(),
+ conversation_id=conversation_id,
+ user_id='test_user',
+ app_conversation_service=mock_service,
+ app_conversation_info_service=mock_info_service,
+ sandbox_service=mock_sandbox_service,
+ )
- # Verify that get_app_conversation was called
- mock_service.get_app_conversation.assert_called_once_with(conversation_uuid)
+ # Verify the result
+ assert result is True
- # Verify that delete_app_conversation was called with the conversation ID
- mock_service.delete_app_conversation.assert_called_once_with(conversation_uuid)
+ # Verify that get_app_conversation_info was called
+ mock_info_service.get_app_conversation_info.assert_called_once_with(
+ conversation_uuid
+ )
+
+ # Verify that delete_app_conversation was called with the conversation ID
+ mock_service.delete_app_conversation.assert_called_once_with(
+ conversation_uuid
+ )
@pytest.mark.asyncio
@@ -1919,3 +2044,1341 @@ async def get_agent_loop_info(*args, **kwargs):
# Check third conversation
assert result_set.results[2].conversation_id == 'conversation_3'
assert result_set.results[2].pr_number == [300]
+
+
+@pytest.mark.asyncio
+async def test_delete_v1_conversation_with_sub_conversations():
+ """Test V1 conversation deletion cascades to delete all sub-conversations."""
+ parent_uuid = uuid4()
+ str(parent_uuid)
+ sub1_uuid = uuid4()
+ sub2_uuid = uuid4()
+
+ # Create a real service instance to test the cascade deletion logic
+ mock_info_service = MagicMock(spec=SQLAppConversationInfoService)
+ mock_start_task_service = MagicMock()
+ mock_sandbox_service = MagicMock()
+ mock_httpx_client = MagicMock()
+
+ # Mock parent conversation
+ parent_conversation = AppConversation(
+ id=parent_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Parent Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sub-conversations
+ sub1_conversation = AppConversation(
+ id=sub1_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id', # Same sandbox as parent
+ title='Sub Conversation 1',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key-sub1',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ sub2_conversation = AppConversation(
+ id=sub2_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id', # Same sandbox as parent
+ title='Sub Conversation 2',
+ sandbox_status=SandboxStatus.PAUSED,
+ execution_status=None,
+ session_api_key=None,
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock get_app_conversation to return conversations
+ async def mock_get_app_conversation(conv_id):
+ if conv_id == parent_uuid:
+ return parent_conversation
+ elif conv_id == sub1_uuid:
+ return sub1_conversation
+ elif conv_id == sub2_uuid:
+ return sub2_conversation
+ return None
+
+ # Mock get_sub_conversation_ids to return sub-conversation IDs
+ mock_info_service.get_sub_conversation_ids = AsyncMock(
+ return_value=[sub1_uuid, sub2_uuid]
+ )
+
+ # Mock delete methods
+ mock_info_service.delete_app_conversation_info = AsyncMock(return_value=True)
+ mock_start_task_service.delete_app_conversation_start_tasks = AsyncMock(
+ return_value=True
+ )
+
+ # Mock sandbox service - use actual SandboxInfo model
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ # Mock httpx client for agent server calls
+ mock_response = MagicMock()
+ mock_response.raise_for_status = MagicMock()
+ mock_httpx_client.delete = AsyncMock(return_value=mock_response)
+
+ # Create service instance
+ mock_user_context = MagicMock(spec=UserContext)
+ mock_user_context.get_user_id = AsyncMock(return_value='test_user')
+
+ service = LiveStatusAppConversationService(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ app_conversation_info_service=mock_info_service,
+ app_conversation_start_task_service=mock_start_task_service,
+ event_callback_service=MagicMock(),
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=MagicMock(),
+ jwt_service=MagicMock(),
+ sandbox_startup_timeout=120,
+ sandbox_startup_poll_frequency=2,
+ httpx_client=mock_httpx_client,
+ web_url=None,
+ openhands_provider_base_url=None,
+ access_token_hard_timeout=None,
+ )
+
+ # Mock get_app_conversation method
+ service.get_app_conversation = mock_get_app_conversation
+
+ # Execute deletion
+ result = await service.delete_app_conversation(parent_uuid)
+
+ # Verify result
+ assert result is True
+
+ # Verify get_sub_conversation_ids was called with parent ID
+ mock_info_service.get_sub_conversation_ids.assert_called_once_with(parent_uuid)
+
+ # Verify sub-conversations were deleted (from database)
+ assert (
+ mock_info_service.delete_app_conversation_info.call_count == 3
+ ) # 2 subs + 1 parent
+ delete_calls = [
+ call_args[0][0]
+ for call_args in mock_info_service.delete_app_conversation_info.call_args_list
+ ]
+ assert sub1_uuid in delete_calls
+ assert sub2_uuid in delete_calls
+ assert parent_uuid in delete_calls
+
+ # Verify sub-conversation start tasks were deleted
+ assert mock_start_task_service.delete_app_conversation_start_tasks.call_count == 3
+ task_delete_calls = [
+ call_args[0][0]
+ for call_args in mock_start_task_service.delete_app_conversation_start_tasks.call_args_list
+ ]
+ assert sub1_uuid in task_delete_calls
+ assert sub2_uuid in task_delete_calls
+ assert parent_uuid in task_delete_calls
+
+ # Verify agent server was called for running sub-conversations
+ # sub1 has session_api_key and is running, so it should be deleted from agent server
+ # sub2 is paused (no session_api_key), so no agent server call
+ # parent is running, so it should be deleted from agent server
+ assert mock_httpx_client.delete.call_count == 2 # sub1 + parent
+ delete_urls = [
+ call_args[0][0] for call_args in mock_httpx_client.delete.call_args_list
+ ]
+ # The URL format is: http://agent:8000/api/conversations/{uuid}
+ # UUID is converted to string in the URL
+ assert any(f'/api/conversations/{sub1_uuid}' in url for url in delete_urls)
+ assert any(f'/api/conversations/{parent_uuid}' in url for url in delete_urls)
+
+
+@pytest.mark.asyncio
+async def test_delete_v1_conversation_with_no_sub_conversations():
+ """Test V1 conversation deletion when there are no sub-conversations."""
+ parent_uuid = uuid4()
+
+ # Create a real service instance
+ mock_info_service = MagicMock(spec=SQLAppConversationInfoService)
+ mock_start_task_service = MagicMock()
+ mock_sandbox_service = MagicMock()
+ mock_httpx_client = MagicMock()
+
+ # Mock parent conversation
+ parent_conversation = AppConversation(
+ id=parent_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Parent Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock no sub-conversations
+ mock_info_service.get_sub_conversation_ids = AsyncMock(return_value=[])
+ mock_info_service.delete_app_conversation_info = AsyncMock(return_value=True)
+ mock_start_task_service.delete_app_conversation_start_tasks = AsyncMock(
+ return_value=True
+ )
+
+ # Mock sandbox service - use actual SandboxInfo model
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ # Mock httpx client
+ mock_response = MagicMock()
+ mock_response.raise_for_status = MagicMock()
+ mock_httpx_client.delete = AsyncMock(return_value=mock_response)
+
+ # Create service instance
+ mock_user_context = MagicMock(spec=UserContext)
+ mock_user_context.get_user_id = AsyncMock(return_value='test_user')
+
+ service = LiveStatusAppConversationService(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ app_conversation_info_service=mock_info_service,
+ app_conversation_start_task_service=mock_start_task_service,
+ event_callback_service=MagicMock(),
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=MagicMock(),
+ jwt_service=MagicMock(),
+ sandbox_startup_timeout=120,
+ sandbox_startup_poll_frequency=2,
+ httpx_client=mock_httpx_client,
+ web_url=None,
+ openhands_provider_base_url=None,
+ access_token_hard_timeout=None,
+ )
+
+ # Mock get_app_conversation method
+ service.get_app_conversation = AsyncMock(return_value=parent_conversation)
+
+ # Execute deletion
+ result = await service.delete_app_conversation(parent_uuid)
+
+ # Verify result
+ assert result is True
+
+ # Verify get_sub_conversation_ids was called
+ mock_info_service.get_sub_conversation_ids.assert_called_once_with(parent_uuid)
+
+ # Verify only parent was deleted
+ mock_info_service.delete_app_conversation_info.assert_called_once_with(parent_uuid)
+ mock_start_task_service.delete_app_conversation_start_tasks.assert_called_once_with(
+ parent_uuid
+ )
+
+ # Verify agent server was called for parent
+ mock_httpx_client.delete.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_delete_v1_conversation_sub_conversation_deletion_error():
+ """Test that deletion continues even if one sub-conversation fails to delete."""
+ parent_uuid = uuid4()
+ sub1_uuid = uuid4()
+ sub2_uuid = uuid4()
+
+ # Create a real service instance
+ mock_info_service = MagicMock(spec=SQLAppConversationInfoService)
+ mock_start_task_service = MagicMock()
+ mock_sandbox_service = MagicMock()
+ mock_httpx_client = MagicMock()
+
+ # Mock parent conversation
+ parent_conversation = AppConversation(
+ id=parent_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Parent Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sub-conversations
+ AppConversation(
+ id=sub1_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Sub Conversation 1',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key-sub1',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ sub2_conversation = AppConversation(
+ id=sub2_uuid,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Sub Conversation 2',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key-sub2',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock get_sub_conversation_ids
+ mock_info_service.get_sub_conversation_ids = AsyncMock(
+ return_value=[sub1_uuid, sub2_uuid]
+ )
+
+ # Mock get_app_conversation to raise error for sub1, but work for sub2
+ async def mock_get_app_conversation(conv_id):
+ if conv_id == parent_uuid:
+ return parent_conversation
+ elif conv_id == sub1_uuid:
+ raise Exception('Failed to get sub-conversation 1')
+ elif conv_id == sub2_uuid:
+ return sub2_conversation
+ return None
+
+ # Mock delete methods - sub1 will fail, sub2 and parent should succeed
+ def mock_delete_info(conv_id: uuid.UUID):
+ if conv_id == sub1_uuid:
+ raise Exception('Failed to delete sub-conversation 1')
+ return True
+
+ mock_info_service.delete_app_conversation_info = AsyncMock(
+ side_effect=mock_delete_info
+ )
+ mock_start_task_service.delete_app_conversation_start_tasks = AsyncMock(
+ return_value=True
+ )
+
+ # Mock sandbox service - use actual SandboxInfo model
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ # Mock httpx client
+ mock_response = MagicMock()
+ mock_response.raise_for_status = MagicMock()
+ mock_httpx_client.delete = AsyncMock(return_value=mock_response)
+
+ # Create service instance
+ mock_user_context = MagicMock(spec=UserContext)
+ mock_user_context.get_user_id = AsyncMock(return_value='test_user')
+
+ service = LiveStatusAppConversationService(
+ init_git_in_empty_workspace=True,
+ user_context=mock_user_context,
+ app_conversation_info_service=mock_info_service,
+ app_conversation_start_task_service=mock_start_task_service,
+ event_callback_service=MagicMock(),
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=MagicMock(),
+ jwt_service=MagicMock(),
+ sandbox_startup_timeout=120,
+ sandbox_startup_poll_frequency=2,
+ httpx_client=mock_httpx_client,
+ web_url=None,
+ openhands_provider_base_url=None,
+ access_token_hard_timeout=None,
+ )
+
+ # Mock get_app_conversation method
+ service.get_app_conversation = mock_get_app_conversation
+
+ # Execute deletion - should succeed despite sub1 failure
+ result = await service.delete_app_conversation(parent_uuid)
+
+ # Verify result - should still succeed
+ assert result is True
+
+ # Verify get_sub_conversation_ids was called
+ mock_info_service.get_sub_conversation_ids.assert_called_once_with(parent_uuid)
+
+ # Verify sub2 and parent were deleted (sub1 failed but didn't stop the process)
+ # The delete_app_conversation_info should be called for sub2 and parent
+ # (sub1 fails in get_app_conversation, so it never gets to delete)
+ delete_calls = [
+ call_args[0][0]
+ for call_args in mock_info_service.delete_app_conversation_info.call_args_list
+ ]
+ assert sub2_uuid in delete_calls
+ assert parent_uuid in delete_calls
+ assert sub1_uuid not in delete_calls # Failed before deletion
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_success():
+ """Test successfully retrieving file content from conversation workspace."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+ file_content = '# Project Plan\n\n## Phase 1\n- Task 1\n- Task 2\n'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Mock tempfile and file operations
+ temp_file_path = '/tmp/test_file_12345'
+ mock_file_result = FileOperationResult(
+ success=True,
+ source_path=file_path,
+ destination_path=temp_file_path,
+ file_size=len(file_content.encode('utf-8')),
+ )
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.AsyncRemoteWorkspace'
+ ) as mock_workspace_class:
+ mock_workspace = MagicMock(spec=AsyncRemoteWorkspace)
+ mock_workspace.file_download = AsyncMock(return_value=mock_file_result)
+ mock_workspace_class.return_value = mock_workspace
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.tempfile.NamedTemporaryFile'
+ ) as mock_tempfile:
+ mock_temp_file = MagicMock()
+ mock_temp_file.name = temp_file_path
+ mock_tempfile.return_value.__enter__ = MagicMock(
+ return_value=mock_temp_file
+ )
+ mock_tempfile.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch('builtins.open', create=True) as mock_open:
+ mock_file_handle = MagicMock()
+ mock_file_handle.read.return_value = file_content.encode('utf-8')
+ mock_open.return_value.__enter__ = MagicMock(
+ return_value=mock_file_handle
+ )
+ mock_open.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.os.unlink'
+ ) as mock_unlink:
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == file_content
+
+ # Verify services were called correctly
+ mock_app_conversation_service.get_app_conversation.assert_called_once_with(
+ conversation_id
+ )
+ mock_sandbox_service.get_sandbox.assert_called_once_with(
+ 'test-sandbox-id'
+ )
+ mock_sandbox_spec_service.get_sandbox_spec.assert_called_once_with(
+ 'test-spec-id'
+ )
+
+ # Verify workspace was created and file_download was called
+ mock_workspace_class.assert_called_once()
+ mock_workspace.file_download.assert_called_once_with(
+ source_path=file_path,
+ destination_path=temp_file_path,
+ )
+
+ # Verify file was read and cleaned up
+ mock_open.assert_called_once_with(temp_file_path, 'rb')
+ mock_unlink.assert_called_once_with(temp_file_path)
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_different_path():
+ """Test successfully retrieving file content from a different file path."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/src/main.py'
+ file_content = 'def main():\n print("Hello, World!")\n'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Mock tempfile and file operations
+ temp_file_path = '/tmp/test_file_67890'
+ mock_file_result = FileOperationResult(
+ success=True,
+ source_path=file_path,
+ destination_path=temp_file_path,
+ file_size=len(file_content.encode('utf-8')),
+ )
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.AsyncRemoteWorkspace'
+ ) as mock_workspace_class:
+ mock_workspace = MagicMock(spec=AsyncRemoteWorkspace)
+ mock_workspace.file_download = AsyncMock(return_value=mock_file_result)
+ mock_workspace_class.return_value = mock_workspace
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.tempfile.NamedTemporaryFile'
+ ) as mock_tempfile:
+ mock_temp_file = MagicMock()
+ mock_temp_file.name = temp_file_path
+ mock_tempfile.return_value.__enter__ = MagicMock(
+ return_value=mock_temp_file
+ )
+ mock_tempfile.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch('builtins.open', create=True) as mock_open:
+ mock_file_handle = MagicMock()
+ mock_file_handle.read.return_value = file_content.encode('utf-8')
+ mock_open.return_value.__enter__ = MagicMock(
+ return_value=mock_file_handle
+ )
+ mock_open.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.os.unlink'
+ ) as mock_unlink:
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == file_content
+
+ # Verify workspace was created and file_download was called
+ mock_workspace_class.assert_called_once()
+ mock_workspace.file_download.assert_called_once_with(
+ source_path=file_path,
+ destination_path=temp_file_path,
+ )
+
+ # Verify file was read and cleaned up
+ mock_open.assert_called_once_with(temp_file_path, 'rb')
+ mock_unlink.assert_called_once_with(temp_file_path)
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_conversation_not_found():
+ """Test when conversation doesn't exist."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(return_value=None)
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_spec_service = MagicMock()
+
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == ''
+
+ # Verify only conversation service was called
+ mock_app_conversation_service.get_app_conversation.assert_called_once_with(
+ conversation_id
+ )
+ mock_sandbox_service.get_sandbox.assert_not_called()
+ mock_sandbox_spec_service.get_sandbox_spec.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_sandbox_not_found():
+ """Test when sandbox doesn't exist."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=None)
+
+ mock_sandbox_spec_service = MagicMock()
+
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == ''
+
+ # Verify services were called
+ mock_app_conversation_service.get_app_conversation.assert_called_once_with(
+ conversation_id
+ )
+ mock_sandbox_service.get_sandbox.assert_called_once_with('test-sandbox-id')
+ mock_sandbox_spec_service.get_sandbox_spec.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_sandbox_not_running():
+ """Test when sandbox is not in RUNNING status."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.PAUSED,
+ execution_status=None,
+ session_api_key=None,
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.PAUSED,
+ session_api_key=None,
+ exposed_urls=None,
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == ''
+
+ # Verify services were called
+ mock_app_conversation_service.get_app_conversation.assert_called_once_with(
+ conversation_id
+ )
+ mock_sandbox_service.get_sandbox.assert_called_once_with('test-sandbox-id')
+ mock_sandbox_spec_service.get_sandbox_spec.assert_not_called()
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_sandbox_spec_not_found():
+ """Test when sandbox spec doesn't exist."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(return_value=None)
+
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == ''
+
+ # Verify services were called
+ mock_app_conversation_service.get_app_conversation.assert_called_once_with(
+ conversation_id
+ )
+ mock_sandbox_service.get_sandbox.assert_called_once_with('test-sandbox-id')
+ mock_sandbox_spec_service.get_sandbox_spec.assert_called_once_with('test-spec-id')
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_no_exposed_urls():
+ """Test when sandbox has no exposed URLs."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox with no exposed URLs
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=None,
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == ''
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_no_agent_server_url():
+ """Test when sandbox has exposed URLs but no AGENT_SERVER."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox with exposed URLs but no AGENT_SERVER
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name='OTHER_SERVICE', url='http://other:9000', port=9000)
+ ],
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result
+ assert result == ''
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_file_not_found():
+ """Test when file doesn't exist."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Mock tempfile and file operations for file not found
+ temp_file_path = '/tmp/test_file_not_found'
+ mock_file_result = FileOperationResult(
+ success=False,
+ source_path=file_path,
+ destination_path=temp_file_path,
+ error=f'File not found: {file_path}',
+ )
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.AsyncRemoteWorkspace'
+ ) as mock_workspace_class:
+ mock_workspace = MagicMock(spec=AsyncRemoteWorkspace)
+ mock_workspace.file_download = AsyncMock(return_value=mock_file_result)
+ mock_workspace_class.return_value = mock_workspace
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.tempfile.NamedTemporaryFile'
+ ) as mock_tempfile:
+ mock_temp_file = MagicMock()
+ mock_temp_file.name = temp_file_path
+ mock_tempfile.return_value.__enter__ = MagicMock(
+ return_value=mock_temp_file
+ )
+ mock_tempfile.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.os.unlink'
+ ) as mock_unlink:
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result (empty string when file_download fails)
+ assert result == ''
+
+ # Verify cleanup still happens
+ mock_unlink.assert_called_once_with(temp_file_path)
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_empty_file():
+ """Test when file exists but is empty."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Mock tempfile and file operations for empty file
+ temp_file_path = '/tmp/test_file_empty'
+ empty_content = ''
+ mock_file_result = FileOperationResult(
+ success=True,
+ source_path=file_path,
+ destination_path=temp_file_path,
+ file_size=0,
+ )
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.AsyncRemoteWorkspace'
+ ) as mock_workspace_class:
+ mock_workspace = MagicMock(spec=AsyncRemoteWorkspace)
+ mock_workspace.file_download = AsyncMock(return_value=mock_file_result)
+ mock_workspace_class.return_value = mock_workspace
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.tempfile.NamedTemporaryFile'
+ ) as mock_tempfile:
+ mock_temp_file = MagicMock()
+ mock_temp_file.name = temp_file_path
+ mock_tempfile.return_value.__enter__ = MagicMock(
+ return_value=mock_temp_file
+ )
+ mock_tempfile.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch('builtins.open', create=True) as mock_open:
+ mock_file_handle = MagicMock()
+ mock_file_handle.read.return_value = empty_content.encode('utf-8')
+ mock_open.return_value.__enter__ = MagicMock(
+ return_value=mock_file_handle
+ )
+ mock_open.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.os.unlink'
+ ) as mock_unlink:
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result (empty string when file is empty)
+ assert result == ''
+
+ # Verify cleanup happens
+ mock_unlink.assert_called_once_with(temp_file_path)
+
+
+@pytest.mark.asyncio
+async def test_read_conversation_file_command_exception():
+ """Test when command execution raises an exception."""
+ conversation_id = uuid4()
+ file_path = '/workspace/project/PLAN.md'
+
+ # Mock conversation
+ mock_conversation = AppConversation(
+ id=conversation_id,
+ created_by_user_id='test_user',
+ sandbox_id='test-sandbox-id',
+ title='Test Conversation',
+ sandbox_status=SandboxStatus.RUNNING,
+ execution_status=ConversationExecutionStatus.RUNNING,
+ session_api_key='test-api-key',
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=ProviderType.GITHUB,
+ trigger=ConversationTrigger.GUI,
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Mock sandbox
+ mock_sandbox = SandboxInfo(
+ id='test-sandbox-id',
+ created_by_user_id='test_user',
+ sandbox_spec_id='test-spec-id',
+ status=SandboxStatus.RUNNING,
+ session_api_key='test-api-key',
+ exposed_urls=[
+ ExposedUrl(name=AGENT_SERVER, url='http://agent:8000', port=8000)
+ ],
+ )
+
+ # Mock sandbox spec
+ mock_sandbox_spec = SandboxSpecInfo(
+ id='test-spec-id',
+ command=None,
+ working_dir='/workspace',
+ created_at=datetime.now(timezone.utc),
+ )
+
+ # Mock services
+ mock_app_conversation_service = MagicMock()
+ mock_app_conversation_service.get_app_conversation = AsyncMock(
+ return_value=mock_conversation
+ )
+
+ mock_sandbox_service = MagicMock()
+ mock_sandbox_service.get_sandbox = AsyncMock(return_value=mock_sandbox)
+
+ mock_sandbox_spec_service = MagicMock()
+ mock_sandbox_spec_service.get_sandbox_spec = AsyncMock(
+ return_value=mock_sandbox_spec
+ )
+
+ # Mock tempfile and file operations for exception case
+ temp_file_path = '/tmp/test_file_exception'
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.AsyncRemoteWorkspace'
+ ) as mock_workspace_class:
+ mock_workspace = MagicMock(spec=AsyncRemoteWorkspace)
+ mock_workspace.file_download = AsyncMock(
+ side_effect=Exception('Connection timeout')
+ )
+ mock_workspace_class.return_value = mock_workspace
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.tempfile.NamedTemporaryFile'
+ ) as mock_tempfile:
+ mock_temp_file = MagicMock()
+ mock_temp_file.name = temp_file_path
+ mock_tempfile.return_value.__enter__ = MagicMock(
+ return_value=mock_temp_file
+ )
+ mock_tempfile.return_value.__exit__ = MagicMock(return_value=None)
+
+ with patch(
+ 'openhands.app_server.app_conversation.app_conversation_router.os.unlink'
+ ) as mock_unlink:
+ # Call the endpoint
+ result = await read_conversation_file(
+ conversation_id=conversation_id,
+ file_path=file_path,
+ app_conversation_service=mock_app_conversation_service,
+ sandbox_service=mock_sandbox_service,
+ sandbox_spec_service=mock_sandbox_spec_service,
+ )
+
+ # Verify result (empty string on exception)
+ assert result == ''
+
+ # Verify cleanup still happens even on exception
+ mock_unlink.assert_called_once_with(temp_file_path)
diff --git a/tests/unit/server/routes/test_conversation_routes.py b/tests/unit/server/routes/test_conversation_routes.py
index f909e44cc867..343894cefa87 100644
--- a/tests/unit/server/routes/test_conversation_routes.py
+++ b/tests/unit/server/routes/test_conversation_routes.py
@@ -11,10 +11,21 @@
AppConversationInfoService,
)
from openhands.app_server.app_conversation.app_conversation_models import (
+ AgentType,
AppConversationInfo,
+ AppConversationPage,
+ AppConversationStartRequest,
+ AppConversationStartTask,
+ AppConversationStartTaskStatus,
+)
+from openhands.app_server.app_conversation.app_conversation_service import (
+ AppConversationService,
)
from openhands.microagent.microagent import KnowledgeMicroagent, RepoMicroagent
from openhands.microagent.types import MicroagentMetadata, MicroagentType
+from openhands.server.data_models.conversation_info_result_set import (
+ ConversationInfoResultSet,
+)
from openhands.server.routes.conversation import (
AddMessageRequest,
add_message,
@@ -22,11 +33,15 @@
)
from openhands.server.routes.manage_conversations import (
UpdateConversationRequest,
+ search_conversations,
update_conversation,
)
from openhands.server.session.conversation import ServerConversation
from openhands.storage.conversation.conversation_store import ConversationStore
-from openhands.storage.data_models.conversation_metadata import ConversationMetadata
+from openhands.storage.data_models.conversation_metadata import (
+ ConversationMetadata,
+ ConversationTrigger,
+)
@pytest.mark.asyncio
@@ -1125,3 +1140,322 @@ async def test_add_message_empty_message():
call_args = mock_manager.send_event_to_conversation.call_args
message_data = call_args[0][1]
assert message_data['args']['content'] == ''
+
+
+@pytest.mark.sub_conversation
+@pytest.mark.asyncio
+async def test_create_sub_conversation_with_planning_agent():
+ """Test creating a sub-conversation from a parent conversation with planning agent."""
+ from uuid import uuid4
+
+ parent_conversation_id = uuid4()
+ user_id = 'test_user_456'
+ sandbox_id = 'test_sandbox_123'
+
+ # Create mock parent conversation info
+ parent_info = AppConversationInfo(
+ id=parent_conversation_id,
+ created_by_user_id=user_id,
+ sandbox_id=sandbox_id,
+ selected_repository='test/repo',
+ selected_branch='main',
+ git_provider=None,
+ title='Parent Conversation',
+ llm_model='anthropic/claude-3-5-sonnet-20241022',
+ created_at=datetime.now(timezone.utc),
+ updated_at=datetime.now(timezone.utc),
+ )
+
+ # Create sub-conversation request with planning agent
+ sub_conversation_request = AppConversationStartRequest(
+ parent_conversation_id=parent_conversation_id,
+ agent_type=AgentType.PLAN,
+ initial_message=None,
+ )
+
+ # Create mock app conversation service
+ mock_app_conversation_service = MagicMock(spec=AppConversationService)
+ mock_app_conversation_info_service = MagicMock(spec=AppConversationInfoService)
+
+ # Mock the service to return parent info
+ mock_app_conversation_info_service.get_app_conversation_info = AsyncMock(
+ return_value=parent_info
+ )
+
+ # Mock the start_app_conversation method to return a task
+ async def mock_start_generator(request):
+ task = AppConversationStartTask(
+ id=uuid4(),
+ created_by_user_id=user_id,
+ status=AppConversationStartTaskStatus.READY,
+ app_conversation_id=uuid4(),
+ sandbox_id=sandbox_id,
+ agent_server_url='http://agent-server:8000',
+ request=request,
+ )
+ yield task
+
+ mock_app_conversation_service.start_app_conversation = mock_start_generator
+
+ # Test the service method directly
+ async for task in mock_app_conversation_service.start_app_conversation(
+ sub_conversation_request
+ ):
+ # Verify the task was created with planning agent
+ assert task is not None
+ assert task.status == AppConversationStartTaskStatus.READY
+ assert task.request.agent_type == AgentType.PLAN
+ assert task.request.parent_conversation_id == parent_conversation_id
+ assert task.sandbox_id == sandbox_id
+ break
+
+
+@pytest.mark.asyncio
+async def test_search_conversations_include_sub_conversations_default_false():
+ """Test that include_sub_conversations defaults to False when not provided."""
+ with patch('openhands.server.routes.manage_conversations.config') as mock_config:
+ mock_config.conversation_max_age_seconds = 864000 # 10 days
+ with patch(
+ 'openhands.server.routes.manage_conversations.conversation_manager'
+ ) as mock_manager:
+
+ async def mock_get_running_agent_loops(*args, **kwargs):
+ return set()
+
+ async def mock_get_connections(*args, **kwargs):
+ return {}
+
+ async def get_agent_loop_info(*args, **kwargs):
+ return []
+
+ mock_manager.get_running_agent_loops = mock_get_running_agent_loops
+ mock_manager.get_connections = mock_get_connections
+ mock_manager.get_agent_loop_info = get_agent_loop_info
+ with patch(
+ 'openhands.server.routes.manage_conversations.datetime'
+ ) as mock_datetime:
+ mock_datetime.now.return_value = datetime.fromisoformat(
+ '2025-01-01T00:00:00+00:00'
+ )
+ mock_datetime.fromisoformat = datetime.fromisoformat
+ mock_datetime.timezone = timezone
+
+ # Mock the conversation store
+ mock_store = MagicMock()
+ mock_store.search = AsyncMock(
+ return_value=ConversationInfoResultSet(results=[])
+ )
+
+ # Create a mock app conversation service
+ mock_app_conversation_service = AsyncMock()
+ mock_app_conversation_service.search_app_conversations.return_value = (
+ AppConversationPage(items=[])
+ )
+
+ # Call search_conversations without include_sub_conversations parameter
+ await search_conversations(
+ page_id=None,
+ limit=20,
+ selected_repository=None,
+ conversation_trigger=None,
+ conversation_store=mock_store,
+ app_conversation_service=mock_app_conversation_service,
+ )
+
+ # Verify that search_app_conversations was called with include_sub_conversations=False (default)
+ mock_app_conversation_service.search_app_conversations.assert_called_once()
+ call_kwargs = (
+ mock_app_conversation_service.search_app_conversations.call_args[1]
+ )
+ assert call_kwargs.get('include_sub_conversations') is False
+
+
+@pytest.mark.asyncio
+async def test_search_conversations_include_sub_conversations_explicit_false():
+ """Test that include_sub_conversations=False is properly passed through."""
+ with patch('openhands.server.routes.manage_conversations.config') as mock_config:
+ mock_config.conversation_max_age_seconds = 864000 # 10 days
+ with patch(
+ 'openhands.server.routes.manage_conversations.conversation_manager'
+ ) as mock_manager:
+
+ async def mock_get_running_agent_loops(*args, **kwargs):
+ return set()
+
+ async def mock_get_connections(*args, **kwargs):
+ return {}
+
+ async def get_agent_loop_info(*args, **kwargs):
+ return []
+
+ mock_manager.get_running_agent_loops = mock_get_running_agent_loops
+ mock_manager.get_connections = mock_get_connections
+ mock_manager.get_agent_loop_info = get_agent_loop_info
+ with patch(
+ 'openhands.server.routes.manage_conversations.datetime'
+ ) as mock_datetime:
+ mock_datetime.now.return_value = datetime.fromisoformat(
+ '2025-01-01T00:00:00+00:00'
+ )
+ mock_datetime.fromisoformat = datetime.fromisoformat
+ mock_datetime.timezone = timezone
+
+ # Mock the conversation store
+ mock_store = MagicMock()
+ mock_store.search = AsyncMock(
+ return_value=ConversationInfoResultSet(results=[])
+ )
+
+ # Create a mock app conversation service
+ mock_app_conversation_service = AsyncMock()
+ mock_app_conversation_service.search_app_conversations.return_value = (
+ AppConversationPage(items=[])
+ )
+
+ # Call search_conversations with include_sub_conversations=False
+ await search_conversations(
+ page_id=None,
+ limit=20,
+ selected_repository=None,
+ conversation_trigger=None,
+ include_sub_conversations=False,
+ conversation_store=mock_store,
+ app_conversation_service=mock_app_conversation_service,
+ )
+
+ # Verify that search_app_conversations was called with include_sub_conversations=False
+ mock_app_conversation_service.search_app_conversations.assert_called_once()
+ call_kwargs = (
+ mock_app_conversation_service.search_app_conversations.call_args[1]
+ )
+ assert call_kwargs.get('include_sub_conversations') is False
+
+
+@pytest.mark.asyncio
+async def test_search_conversations_include_sub_conversations_explicit_true():
+ """Test that include_sub_conversations=True is properly passed through."""
+ with patch('openhands.server.routes.manage_conversations.config') as mock_config:
+ mock_config.conversation_max_age_seconds = 864000 # 10 days
+ with patch(
+ 'openhands.server.routes.manage_conversations.conversation_manager'
+ ) as mock_manager:
+
+ async def mock_get_running_agent_loops(*args, **kwargs):
+ return set()
+
+ async def mock_get_connections(*args, **kwargs):
+ return {}
+
+ async def get_agent_loop_info(*args, **kwargs):
+ return []
+
+ mock_manager.get_running_agent_loops = mock_get_running_agent_loops
+ mock_manager.get_connections = mock_get_connections
+ mock_manager.get_agent_loop_info = get_agent_loop_info
+ with patch(
+ 'openhands.server.routes.manage_conversations.datetime'
+ ) as mock_datetime:
+ mock_datetime.now.return_value = datetime.fromisoformat(
+ '2025-01-01T00:00:00+00:00'
+ )
+ mock_datetime.fromisoformat = datetime.fromisoformat
+ mock_datetime.timezone = timezone
+
+ # Mock the conversation store
+ mock_store = MagicMock()
+ mock_store.search = AsyncMock(
+ return_value=ConversationInfoResultSet(results=[])
+ )
+
+ # Create a mock app conversation service
+ mock_app_conversation_service = AsyncMock()
+ mock_app_conversation_service.search_app_conversations.return_value = (
+ AppConversationPage(items=[])
+ )
+
+ # Call search_conversations with include_sub_conversations=True
+ await search_conversations(
+ page_id=None,
+ limit=20,
+ selected_repository=None,
+ conversation_trigger=None,
+ include_sub_conversations=True,
+ conversation_store=mock_store,
+ app_conversation_service=mock_app_conversation_service,
+ )
+
+ # Verify that search_app_conversations was called with include_sub_conversations=True
+ mock_app_conversation_service.search_app_conversations.assert_called_once()
+ call_kwargs = (
+ mock_app_conversation_service.search_app_conversations.call_args[1]
+ )
+ assert call_kwargs.get('include_sub_conversations') is True
+
+
+@pytest.mark.asyncio
+async def test_search_conversations_include_sub_conversations_with_other_filters():
+ """Test that include_sub_conversations works correctly with other filters."""
+ with patch('openhands.server.routes.manage_conversations.config') as mock_config:
+ mock_config.conversation_max_age_seconds = 864000 # 10 days
+ with patch(
+ 'openhands.server.routes.manage_conversations.conversation_manager'
+ ) as mock_manager:
+
+ async def mock_get_running_agent_loops(*args, **kwargs):
+ return set()
+
+ async def mock_get_connections(*args, **kwargs):
+ return {}
+
+ async def get_agent_loop_info(*args, **kwargs):
+ return []
+
+ mock_manager.get_running_agent_loops = mock_get_running_agent_loops
+ mock_manager.get_connections = mock_get_connections
+ mock_manager.get_agent_loop_info = get_agent_loop_info
+ with patch(
+ 'openhands.server.routes.manage_conversations.datetime'
+ ) as mock_datetime:
+ mock_datetime.now.return_value = datetime.fromisoformat(
+ '2025-01-01T00:00:00+00:00'
+ )
+ mock_datetime.fromisoformat = datetime.fromisoformat
+ mock_datetime.timezone = timezone
+
+ # Mock the conversation store
+ mock_store = MagicMock()
+ mock_store.search = AsyncMock(
+ return_value=ConversationInfoResultSet(results=[])
+ )
+
+ # Create a mock app conversation service
+ mock_app_conversation_service = AsyncMock()
+ mock_app_conversation_service.search_app_conversations.return_value = (
+ AppConversationPage(items=[])
+ )
+
+ # Create a valid base64-encoded page_id for testing
+ import base64
+
+ page_id_data = json.dumps({'v0': None, 'v1': 'test_v1_page_id'})
+ encoded_page_id = base64.b64encode(page_id_data.encode()).decode()
+
+ # Call search_conversations with include_sub_conversations and other filters
+ await search_conversations(
+ page_id=encoded_page_id,
+ limit=50,
+ selected_repository='test/repo',
+ conversation_trigger=ConversationTrigger.GUI,
+ include_sub_conversations=True,
+ conversation_store=mock_store,
+ app_conversation_service=mock_app_conversation_service,
+ )
+
+ # Verify that search_app_conversations was called with all parameters including include_sub_conversations=True
+ mock_app_conversation_service.search_app_conversations.assert_called_once()
+ call_kwargs = (
+ mock_app_conversation_service.search_app_conversations.call_args[1]
+ )
+ assert call_kwargs.get('include_sub_conversations') is True
+ assert call_kwargs.get('page_id') == 'test_v1_page_id'
+ assert call_kwargs.get('limit') == 50
diff --git a/tests/unit/server/routes/test_settings_api.py b/tests/unit/server/routes/test_settings_api.py
index f01b1d77df3a..6ea408038810 100644
--- a/tests/unit/server/routes/test_settings_api.py
+++ b/tests/unit/server/routes/test_settings_api.py
@@ -46,6 +46,9 @@ async def get_secrets_store(self) -> SecretsStore | None:
async def get_secrets(self) -> Secrets | None:
return None
+ async def get_mcp_api_key(self) -> str | None:
+ return None
+
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
return MockUserAuth()
diff --git a/tests/unit/server/routes/test_settings_store_functions.py b/tests/unit/server/routes/test_settings_store_functions.py
index 6296a8e354cf..c6eb6f5628c8 100644
--- a/tests/unit/server/routes/test_settings_store_functions.py
+++ b/tests/unit/server/routes/test_settings_store_functions.py
@@ -2,13 +2,16 @@
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
+from fastapi import FastAPI
from fastapi.testclient import TestClient
from pydantic import SecretStr
from openhands.integrations.provider import ProviderToken
from openhands.integrations.service_types import ProviderType
from openhands.server.routes.secrets import (
- app,
+ app as secrets_router,
+)
+from openhands.server.routes.secrets import (
check_provider_tokens,
)
from openhands.server.routes.settings import store_llm_settings
@@ -27,7 +30,12 @@ async def get_settings_store(request):
@pytest.fixture
def test_client():
- # Create a test client
+ # Create a test client with a FastAPI app that includes the secrets router
+ # This is necessary because TestClient with APIRouter directly doesn't set up
+ # the full middleware stack in newer FastAPI versions (0.118.0+)
+ test_app = FastAPI()
+ test_app.include_router(secrets_router)
+
with (
patch.dict(os.environ, {'SESSION_API_KEY': ''}, clear=False),
patch('openhands.server.dependencies._SESSION_API_KEY', None),
@@ -36,7 +44,7 @@ def test_client():
AsyncMock(return_value=''),
),
):
- client = TestClient(app)
+ client = TestClient(test_app)
yield client
diff --git a/tests/unit/server/session/test_conversation_init_data.py b/tests/unit/server/session/test_conversation_init_data.py
new file mode 100644
index 000000000000..3c5d7d97f792
--- /dev/null
+++ b/tests/unit/server/session/test_conversation_init_data.py
@@ -0,0 +1,272 @@
+"""Unit tests for ConversationInitData - specifically testing the field validator.
+
+These tests verify that the immutable_validator correctly converts dict to MappingProxyType
+for git_provider_tokens and custom_secrets fields, ensuring type safety.
+"""
+
+from types import MappingProxyType
+
+import pytest
+from pydantic import SecretStr
+
+from openhands.integrations.provider import CustomSecret, ProviderToken, ProviderType
+from openhands.server.session.conversation_init_data import ConversationInitData
+from openhands.storage.data_models.settings import Settings
+
+
+@pytest.fixture
+def base_settings():
+ """Create a base Settings object with minimal required fields."""
+ return Settings(
+ language='en',
+ agent='CodeActAgent',
+ max_iterations=100,
+ llm_model='anthropic/claude-3-5-sonnet-20241022',
+ llm_api_key=SecretStr('test_api_key_12345'),
+ llm_base_url=None,
+ )
+
+
+class TestConversationInitDataValidator:
+ """Test suite for ConversationInitData field validator."""
+
+ def test_git_provider_tokens_dict_converted_to_mappingproxy(self, base_settings):
+ """Test that dict passed as git_provider_tokens is converted to MappingProxyType."""
+ # Create provider tokens as a regular dict
+ provider_tokens_dict = {
+ ProviderType.GITHUB: ProviderToken(
+ token=SecretStr('ghp_test_token_123'), user_id='test_user'
+ ),
+ ProviderType.GITLAB: ProviderToken(
+ token=SecretStr('glpat_test_token_456'), user_id='test_user_2'
+ ),
+ }
+
+ # Create ConversationInitData with dict
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens=provider_tokens_dict,
+ )
+
+ # Verify it's now a MappingProxyType
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+ assert ProviderType.GITHUB in init_data.git_provider_tokens
+ assert ProviderType.GITLAB in init_data.git_provider_tokens
+ assert (
+ init_data.git_provider_tokens[ProviderType.GITHUB].token.get_secret_value()
+ == 'ghp_test_token_123'
+ )
+
+ def test_git_provider_tokens_mappingproxy_preserved(self, base_settings):
+ """Test that MappingProxyType passed as git_provider_tokens is converted to MappingProxyType."""
+ # Create provider tokens as MappingProxyType
+ provider_token = ProviderToken(
+ token=SecretStr('ghp_test_token_789'), user_id='test_user_3'
+ )
+ provider_tokens_proxy = MappingProxyType({ProviderType.GITHUB: provider_token})
+
+ # Create ConversationInitData with MappingProxyType
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens=provider_tokens_proxy,
+ )
+
+ # Verify it's a MappingProxyType (Pydantic may create a new one, but type is preserved)
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+ assert (
+ init_data.git_provider_tokens[ProviderType.GITHUB].token.get_secret_value()
+ == 'ghp_test_token_789'
+ )
+ assert (
+ init_data.git_provider_tokens[ProviderType.GITHUB].user_id == 'test_user_3'
+ )
+
+ def test_git_provider_tokens_none_preserved(self, base_settings):
+ """Test that None passed as git_provider_tokens is preserved."""
+ # Create ConversationInitData with None
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens=None,
+ )
+
+ # Verify it's still None
+ assert init_data.git_provider_tokens is None
+
+ def test_custom_secrets_dict_converted_to_mappingproxy(self, base_settings):
+ """Test that dict passed as custom_secrets is converted to MappingProxyType."""
+ # Create custom secrets as a regular dict
+ custom_secrets_dict = {
+ 'API_KEY': CustomSecret(
+ secret=SecretStr('api_key_123'), description='API key for service'
+ ),
+ 'DATABASE_URL': CustomSecret(
+ secret=SecretStr('postgres://localhost'), description='Database URL'
+ ),
+ }
+
+ # Create ConversationInitData with dict
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ custom_secrets=custom_secrets_dict,
+ )
+
+ # Verify it's now a MappingProxyType
+ assert isinstance(init_data.custom_secrets, MappingProxyType)
+ assert 'API_KEY' in init_data.custom_secrets
+ assert 'DATABASE_URL' in init_data.custom_secrets
+ assert (
+ init_data.custom_secrets['API_KEY'].secret.get_secret_value()
+ == 'api_key_123'
+ )
+
+ def test_custom_secrets_mappingproxy_preserved(self, base_settings):
+ """Test that MappingProxyType passed as custom_secrets is converted to MappingProxyType."""
+ # Create custom secrets as MappingProxyType
+ custom_secret = CustomSecret(
+ secret=SecretStr('api_key_456'), description='API key'
+ )
+ custom_secrets_proxy = MappingProxyType({'API_KEY': custom_secret})
+
+ # Create ConversationInitData with MappingProxyType
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ custom_secrets=custom_secrets_proxy,
+ )
+
+ # Verify it's a MappingProxyType (Pydantic may create a new one, but type is preserved)
+ assert isinstance(init_data.custom_secrets, MappingProxyType)
+ assert (
+ init_data.custom_secrets['API_KEY'].secret.get_secret_value()
+ == 'api_key_456'
+ )
+ assert init_data.custom_secrets['API_KEY'].description == 'API key'
+
+ def test_custom_secrets_none_preserved(self, base_settings):
+ """Test that None passed as custom_secrets is preserved."""
+ # Create ConversationInitData with None
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ custom_secrets=None,
+ )
+
+ # Verify it's still None
+ assert init_data.custom_secrets is None
+
+ def test_both_fields_dict_converted(self, base_settings):
+ """Test that both fields are converted when passed as dicts."""
+ provider_tokens_dict = {
+ ProviderType.GITHUB: ProviderToken(
+ token=SecretStr('ghp_token'), user_id='user1'
+ )
+ }
+ custom_secrets_dict = {
+ 'SECRET': CustomSecret(
+ secret=SecretStr('secret_value'), description='A secret'
+ )
+ }
+
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens=provider_tokens_dict,
+ custom_secrets=custom_secrets_dict,
+ )
+
+ # Both should be MappingProxyType
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+ assert isinstance(init_data.custom_secrets, MappingProxyType)
+
+ def test_empty_dict_converted_to_mappingproxy(self, base_settings):
+ """Test that empty dict is converted to empty MappingProxyType."""
+ # Create ConversationInitData with empty dicts
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens={},
+ custom_secrets={},
+ )
+
+ # Both should be MappingProxyType (even if empty)
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+ assert isinstance(init_data.custom_secrets, MappingProxyType)
+ assert len(init_data.git_provider_tokens) == 0
+ assert len(init_data.custom_secrets) == 0
+
+ def test_validator_prevents_mutation(self, base_settings):
+ """Test that MappingProxyType prevents mutation of the underlying data."""
+ provider_tokens_dict = {
+ ProviderType.GITHUB: ProviderToken(
+ token=SecretStr('ghp_token'), user_id='user1'
+ )
+ }
+
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens=provider_tokens_dict,
+ )
+
+ # Verify it's a MappingProxyType (which is immutable)
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+
+ # Verify that attempting to modify would raise (MappingProxyType is read-only)
+ with pytest.raises(TypeError):
+ # MappingProxyType doesn't support item assignment
+ init_data.git_provider_tokens[ProviderType.GITLAB] = ProviderToken(
+ token=SecretStr('new_token')
+ )
+
+ def test_validator_with_settings_dict_unpacking(self, base_settings):
+ """Test validator works when creating from unpacked settings dict.
+
+ This simulates the real-world usage in conversation_service.py where
+ session_init_args is created from settings.__dict__.
+ """
+ # Simulate the pattern used in conversation_service.py
+ session_init_args = {**base_settings.__dict__}
+ session_init_args['git_provider_tokens'] = {
+ ProviderType.GITHUB: ProviderToken(
+ token=SecretStr('ghp_from_dict'), user_id='user_from_dict'
+ )
+ }
+
+ # Create ConversationInitData from unpacked dict
+ init_data = ConversationInitData(**session_init_args)
+
+ # Verify it's converted to MappingProxyType
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+ assert (
+ init_data.git_provider_tokens[ProviderType.GITHUB].token.get_secret_value()
+ == 'ghp_from_dict'
+ )
+
+ def test_validator_with_mixed_types(self, base_settings):
+ """Test validator with one field as dict and one as MappingProxyType."""
+ # git_provider_tokens as dict
+ provider_tokens_dict = {
+ ProviderType.GITHUB: ProviderToken(
+ token=SecretStr('ghp_dict_token'), user_id='user_dict'
+ )
+ }
+
+ # custom_secrets as MappingProxyType
+ custom_secret = CustomSecret(
+ secret=SecretStr('secret_proxy'), description='From proxy'
+ )
+ custom_secrets_proxy = MappingProxyType({'SECRET': custom_secret})
+
+ init_data = ConversationInitData(
+ **base_settings.__dict__,
+ git_provider_tokens=provider_tokens_dict,
+ custom_secrets=custom_secrets_proxy,
+ )
+
+ # Both should be MappingProxyType
+ assert isinstance(init_data.git_provider_tokens, MappingProxyType)
+ assert isinstance(init_data.custom_secrets, MappingProxyType)
+ # Verify the content is preserved (Pydantic may create new MappingProxyType instances)
+ assert (
+ init_data.git_provider_tokens[ProviderType.GITHUB].token.get_secret_value()
+ == 'ghp_dict_token'
+ )
+ assert (
+ init_data.custom_secrets['SECRET'].secret.get_secret_value()
+ == 'secret_proxy'
+ )
diff --git a/tests/unit/server/test_openapi_schema_generation.py b/tests/unit/server/test_openapi_schema_generation.py
index 2aa798e1e650..eb967e496c68 100644
--- a/tests/unit/server/test_openapi_schema_generation.py
+++ b/tests/unit/server/test_openapi_schema_generation.py
@@ -46,6 +46,9 @@ async def get_secrets_store(self) -> SecretsStore | None:
async def get_secrets(self) -> Secrets | None:
return None
+ async def get_mcp_api_key(self) -> str | None:
+ return None
+
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
return MockUserAuth()
diff --git a/tests/unit/test_azure_devops.py b/tests/unit/test_azure_devops.py
new file mode 100644
index 000000000000..32d275c91f74
--- /dev/null
+++ b/tests/unit/test_azure_devops.py
@@ -0,0 +1,127 @@
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from openhands.integrations.azure_devops.azure_devops_service import (
+ AzureDevOpsServiceImpl as AzureDevOpsService,
+)
+from openhands.integrations.service_types import ProviderType
+
+
+@pytest.mark.asyncio
+async def test_azure_devops_service_init():
+ """Test that the Azure DevOps service initializes correctly."""
+ service = AzureDevOpsService(
+ user_id='test_user',
+ token=None,
+ base_domain='myorg',
+ )
+
+ assert service.organization == 'myorg'
+ assert service.provider == ProviderType.AZURE_DEVOPS.value
+
+
+@pytest.mark.asyncio
+async def test_azure_devops_get_repositories():
+ """Test that the Azure DevOps service can get repositories."""
+ with patch('httpx.AsyncClient') as mock_client:
+ # Mock the response for projects
+ mock_projects_response = MagicMock()
+ mock_projects_response.json.return_value = {
+ 'value': [
+ {'name': 'Project1'},
+ ]
+ }
+ mock_projects_response.raise_for_status = AsyncMock()
+
+ # Mock the response for repositories
+ mock_repos_response = MagicMock()
+ mock_repos_response.json.return_value = {
+ 'value': [
+ {
+ 'id': 'repo1',
+ 'name': 'Repo1',
+ 'project': {'name': 'Project1'},
+ 'lastUpdateTime': '2023-01-01T00:00:00Z',
+ },
+ {
+ 'id': 'repo2',
+ 'name': 'Repo2',
+ 'project': {'name': 'Project1'},
+ 'lastUpdateTime': '2023-01-02T00:00:00Z',
+ },
+ ]
+ }
+ mock_repos_response.raise_for_status = AsyncMock()
+
+ # Set up the mock client to return our mock responses
+ # First call: get projects, Second call: get repos for Project1
+ mock_client_instance = MagicMock()
+ mock_client_instance.get = AsyncMock(
+ side_effect=[
+ mock_projects_response,
+ mock_repos_response,
+ ]
+ )
+ mock_client.return_value.__aenter__.return_value = mock_client_instance
+
+ # Create the service and call get_repositories
+ service = AzureDevOpsService(
+ user_id='test_user',
+ token=None,
+ base_domain='myorg',
+ )
+
+ # Mock the _get_azure_devops_headers method
+ service._get_azure_devops_headers = AsyncMock(return_value={})
+
+ # Call the method
+ repos = await service.get_repositories('updated', None)
+
+ # Verify the results (sorted by lastUpdateTime descending, so repo2 first)
+ assert len(repos) == 2
+ assert repos[0].id == 'repo2'
+ assert repos[0].full_name == 'myorg/Project1/Repo2'
+ assert repos[0].git_provider == ProviderType.AZURE_DEVOPS
+ assert repos[1].id == 'repo1'
+ assert repos[1].full_name == 'myorg/Project1/Repo1'
+ assert repos[1].git_provider == ProviderType.AZURE_DEVOPS
+
+
+@pytest.mark.asyncio
+async def test_azure_devops_get_repository_details():
+ """Test that the Azure DevOps service can get repository details."""
+ with patch('httpx.AsyncClient') as mock_client:
+ # Mock the response
+ mock_response = MagicMock()
+ mock_response.json.return_value = {
+ 'id': 'repo1',
+ 'name': 'Repo1',
+ 'project': {'name': 'Project1'},
+ }
+ mock_response.raise_for_status = AsyncMock()
+
+ # Set up the mock client to return our mock response
+ mock_client_instance = MagicMock()
+ mock_client_instance.get = AsyncMock(return_value=mock_response)
+ mock_client.return_value.__aenter__.return_value = mock_client_instance
+
+ # Create the service and call get_repository_details_from_repo_name
+ service = AzureDevOpsService(
+ user_id='test_user',
+ token=None,
+ base_domain='myorg',
+ )
+
+ # Mock the _get_azure_devops_headers method
+ service._get_azure_devops_headers = AsyncMock(return_value={})
+
+ # Call the method
+ repo = await service.get_repository_details_from_repo_name(
+ 'myorg/Project1/Repo1'
+ )
+
+ # Verify the results
+ assert repo.id == 'repo1'
+ assert repo.full_name == 'myorg/Project1/Repo1'
+ assert repo.git_provider == ProviderType.AZURE_DEVOPS