From 28011a13b3892a4851b4556d284c5f7cf4e5d9a3 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Wed, 25 Mar 2026 11:19:54 -0700 Subject: [PATCH 01/83] secure supply chain analysis fixes (#549) Fixes the following errors I encountered when migrating our packaging/publishing pipelines to onnxruntime-release-pipelines ``` Starting: Secure Supply Chain Analysis (auto-injected by policy) ============================================================================== Task : Secure Supply Chain Analysis Description : A task to scan for vulnerabilities in your software supply chain. Formerly "NuGet Security Analysis". Version : 0.2.216 Author : Microsoft Corporation Help : See https://aka.ms/sscatask for more information. ============================================================================== Telemetry ID: 29518951-f4fb-4d5c-a56e-110cbb97c51b For more information please visit: https://aka.ms/sscatask Scanning repository contents at source path: E:\_work\1\s > Starting Multifeed Nuget Security Analysis: ##[warning]samples/cs/GettingStarted/nuget.config - Multiple feeds declared. (https://aka.ms/cfs/nuget) ##[warning]sdk/cs/NuGet.config - Multiple feeds declared. (https://aka.ms/cfs/nuget) > Starting Multifeed Corext Analysis: > Starting Multifeed Python Security Analysis: > Starting CFS NuGet Analysis: ##[warning]samples/cs/GettingStarted/nuget.config - CFS0013: Package source has value that is not an Azure Artifacts feed. (https://aka.ms/cfs/nuget) ##[warning]sdk/cs/NuGet.config - CFS0013: Package source has value that is not an Azure Artifacts feed. (https://aka.ms/cfs/nuget) ##[warning]sdk_legacy/cs/samples/TestApp/TestApp.csproj - CFS0011: Missing in scope NuGet.config file(s). (https://aka.ms/cfs/nuget) ##[warning]sdk_legacy/cs/src/Microsoft.AI.Foundry.Local.csproj - CFS0011: Missing in scope NuGet.config file(s). (https://aka.ms/cfs/nuget) ##[warning]sdk_legacy/cs/test/FoundryLocal.Tests/FoundryLocal.Tests.csproj - CFS0011: Missing in scope NuGet.config file(s). (https://aka.ms/cfs/nuget) > Starting CFS NPM Analysis: ##[warning]www/.npmrc - CFS0002: Missing default registry. (https://aka.ms/cfs/npm) ##[warning]samples/js/chat-and-audio-foundry-local/package.json - CFS0001: Missing sibling .npmrc file. (https://aka.ms/cfs/npm) ##[warning]samples/js/copilot-sdk-foundry-local/package.json - CFS0001: Missing sibling .npmrc file. (https://aka.ms/cfs/npm) ##[warning]samples/js/electron-chat-application/package.json - CFS0001: Missing sibling .npmrc file. (https://aka.ms/cfs/npm) ##[warning]samples/js/tool-calling-foundry-local/package.json - CFS0001: Missing sibling .npmrc file. (https://aka.ms/cfs/npm) ##[warning]sdk/js/package.json - CFS0001: Missing sibling .npmrc file. (https://aka.ms/cfs/npm) ##[warning]sdk_legacy/js/package.json - CFS0001: Missing sibling .npmrc file. (https://aka.ms/cfs/npm) > Starting CFS Maven Analysis: > Starting CFS Cargo Analysis: ##[warning]samples/rust/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) ##[warning]samples/rust/audio-transcription-example/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) ##[warning]samples/rust/foundry-local-webserver/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) ##[warning]samples/rust/native-chat-completions/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) ##[warning]samples/rust/tool-calling-foundry-local/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) ##[warning]sdk/rust/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) ##[warning]sdk_legacy/rust/Cargo.toml - CFS0041: Missing associated .cargo/config.toml file. (https://aka.ms/cfs/cargo) > Starting CFS CoreXT Analysis: > Starting CFS CDPx Analysis: > Starting DockerFile Analysis: > Starting Kubernetes Deployment File Analysis: > Starting Helm Charts Analysis: > Starting Pipeline Configuration Security Analysis: Azure Artifacts Configuration Analysis found 19 package configuration files in the repository which do not comply with Microsoft package feed security policies. The specific problems and links to their mitigations are listed above. If you need further assistance, please visit https://aka.ms/cfs/detectors . ##[error]NuGet Security Analysis found 2 NuGet package configuration files in the repository which do not comply with Microsoft package feed security policies. The specific problems are listed above. Please visit https://aka.ms/cfs/nuget for more details. ``` --------- Co-authored-by: Prathik Rao --- .github/workflows/build-cs-steps.yml | 3 +++ .github/workflows/build-js-steps.yml | 12 +++++++----- .github/workflows/build-rust-steps.yml | 12 ++++++++++++ samples/cs/GettingStarted/nuget.config | 11 +---------- samples/js/chat-and-audio-foundry-local/.npmrc | 2 ++ samples/js/copilot-sdk-foundry-local/.npmrc | 2 ++ samples/js/electron-chat-application/.npmrc | 2 ++ samples/js/tool-calling-foundry-local/.npmrc | 2 ++ samples/rust/.cargo/config.toml | 7 +++++++ sdk/cs/NuGet.config | 1 - .../Microsoft.AI.Foundry.Local.Tests.csproj | 4 ++-- sdk/js/.npmrc | 2 ++ sdk/js/package.json | 4 ++-- sdk/js/script/install.cjs | 6 +++++- sdk/rust/.cargo/config.toml | 7 +++++++ sdk_legacy/cs/NuGet.config | 7 +++++++ sdk_legacy/js/.npmrc | 2 ++ sdk_legacy/rust/.cargo/config.toml | 7 +++++++ www/.npmrc | 2 ++ 19 files changed, 74 insertions(+), 21 deletions(-) create mode 100644 samples/js/chat-and-audio-foundry-local/.npmrc create mode 100644 samples/js/copilot-sdk-foundry-local/.npmrc create mode 100644 samples/js/electron-chat-application/.npmrc create mode 100644 samples/js/tool-calling-foundry-local/.npmrc create mode 100644 samples/rust/.cargo/config.toml create mode 100644 sdk/js/.npmrc create mode 100644 sdk/rust/.cargo/config.toml create mode 100644 sdk_legacy/cs/NuGet.config create mode 100644 sdk_legacy/js/.npmrc create mode 100644 sdk_legacy/rust/.cargo/config.toml diff --git a/.github/workflows/build-cs-steps.yml b/.github/workflows/build-cs-steps.yml index 9b089bc6..dcfed979 100644 --- a/.github/workflows/build-cs-steps.yml +++ b/.github/workflows/build-cs-steps.yml @@ -43,6 +43,9 @@ jobs: # TODO: once the nightly packaging is fixed, add back the commented out lines with /p:FoundryLocalCoreVersion="*-*" # /p:FoundryLocalCoreVersion="*-*" to always use nightly version of Foundry Local Core + - name: Authenticate to Azure Artifacts NuGet feed + run: dotnet nuget update source ORT-Nightly --username az --password ${{ secrets.AZURE_DEVOPS_PAT }} --store-password-in-clear-text --configfile sdk/cs/NuGet.config + - name: Restore dependencies run: | # dotnet restore sdk/cs/src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=${{ inputs.useWinML }} /p:FoundryLocalCoreVersion="*-*" --configfile sdk/cs/NuGet.config diff --git a/.github/workflows/build-js-steps.yml b/.github/workflows/build-js-steps.yml index a806933c..d7a568a3 100644 --- a/.github/workflows/build-js-steps.yml +++ b/.github/workflows/build-js-steps.yml @@ -84,6 +84,13 @@ jobs: Write-Host "`nDirectory contents:" Get-ChildItem -Recurse -Depth 2 | ForEach-Object { Write-Host " $($_.FullName)" } + # The .npmrc points to an Azure Artifacts feed for CFS compliance. + # Remove it in CI so npm uses the public registry directly. + - name: Remove .npmrc (use public registry) + shell: pwsh + working-directory: sdk/js + run: | + if (Test-Path .npmrc) { Remove-Item .npmrc -Force; Write-Host "Removed .npmrc" } - name: npm install (WinML) if: ${{ inputs.useWinML == true }} @@ -95,11 +102,6 @@ jobs: working-directory: sdk/js run: npm install - # Verify that installing new packages doesn't strip custom native binary folders - - name: npm install openai (verify persistence) - working-directory: sdk/js - run: npm install openai - - name: Set package version working-directory: sdk/js run: npm version ${{ env.ProjectVersion }} --no-git-tag-version --allow-same-version diff --git a/.github/workflows/build-rust-steps.yml b/.github/workflows/build-rust-steps.yml index 7649acaa..27c22da8 100644 --- a/.github/workflows/build-rust-steps.yml +++ b/.github/workflows/build-rust-steps.yml @@ -46,6 +46,18 @@ jobs: with: workspaces: sdk/rust -> target + # The .cargo/config.toml redirects crates-io to an Azure Artifacts feed + # for CFS compliance. Remove the redirect in CI so cargo can fetch from + # crates.io directly without Azure DevOps auth. + - name: Use crates.io directly + shell: pwsh + working-directory: sdk/rust + run: | + if (Test-Path .cargo/config.toml) { + Remove-Item .cargo/config.toml + Write-Host "Removed .cargo/config.toml crates-io redirect" + } + - name: Checkout test-data-shared from Azure DevOps if: ${{ inputs.run-integration-tests }} shell: pwsh diff --git a/samples/cs/GettingStarted/nuget.config b/samples/cs/GettingStarted/nuget.config index 5cf1e78e..b5c4e511 100644 --- a/samples/cs/GettingStarted/nuget.config +++ b/samples/cs/GettingStarted/nuget.config @@ -2,15 +2,6 @@ - - + - - - - - - - - \ No newline at end of file diff --git a/samples/js/chat-and-audio-foundry-local/.npmrc b/samples/js/chat-and-audio-foundry-local/.npmrc new file mode 100644 index 00000000..114ea2a4 --- /dev/null +++ b/samples/js/chat-and-audio-foundry-local/.npmrc @@ -0,0 +1,2 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true diff --git a/samples/js/copilot-sdk-foundry-local/.npmrc b/samples/js/copilot-sdk-foundry-local/.npmrc new file mode 100644 index 00000000..114ea2a4 --- /dev/null +++ b/samples/js/copilot-sdk-foundry-local/.npmrc @@ -0,0 +1,2 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true diff --git a/samples/js/electron-chat-application/.npmrc b/samples/js/electron-chat-application/.npmrc new file mode 100644 index 00000000..114ea2a4 --- /dev/null +++ b/samples/js/electron-chat-application/.npmrc @@ -0,0 +1,2 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true diff --git a/samples/js/tool-calling-foundry-local/.npmrc b/samples/js/tool-calling-foundry-local/.npmrc new file mode 100644 index 00000000..114ea2a4 --- /dev/null +++ b/samples/js/tool-calling-foundry-local/.npmrc @@ -0,0 +1,2 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true diff --git a/samples/rust/.cargo/config.toml b/samples/rust/.cargo/config.toml new file mode 100644 index 00000000..84c57445 --- /dev/null +++ b/samples/rust/.cargo/config.toml @@ -0,0 +1,7 @@ +[registries] + +[source.crates-io] +replace-with = "ORT-Nightly" + +[source.ORT-Nightly] +registry = "sparse+https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/Cargo/index/" diff --git a/sdk/cs/NuGet.config b/sdk/cs/NuGet.config index 294478a7..420497e9 100644 --- a/sdk/cs/NuGet.config +++ b/sdk/cs/NuGet.config @@ -2,7 +2,6 @@ - diff --git a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj index b0bd3cd0..5f0c7cf2 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj +++ b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj @@ -1,7 +1,7 @@  - net9.0 + net10.0 enable enable false @@ -19,7 +19,7 @@ - net9.0-windows10.0.26100.0 + net10.0-windows10.0.26100.0 10.0.17763.0 None true diff --git a/sdk/js/.npmrc b/sdk/js/.npmrc new file mode 100644 index 00000000..114ea2a4 --- /dev/null +++ b/sdk/js/.npmrc @@ -0,0 +1,2 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true diff --git a/sdk/js/package.json b/sdk/js/package.json index bdfadf5e..46ae6ce5 100644 --- a/sdk/js/package.json +++ b/sdk/js/package.json @@ -1,6 +1,6 @@ { - "name": "@prathikrao/foundry-local-sdk", - "version": "0.0.3", + "name": "foundry-local-sdk", + "version": "0.9.0", "description": "Foundry Local JavaScript SDK", "main": "dist/index.js", "types": "dist/index.d.ts", diff --git a/sdk/js/script/install.cjs b/sdk/js/script/install.cjs index 3db771b8..cdf5531d 100644 --- a/sdk/js/script/install.cjs +++ b/sdk/js/script/install.cjs @@ -40,6 +40,7 @@ const REQUIRED_FILES = [ // Instead, it sets an environment variable named npm_config_winml to 'true'. const useWinML = process.env.npm_config_winml === 'true'; const useNightly = process.env.npm_config_nightly === 'true'; +const noDeps = process.env.npm_config_nodeps === 'true'; console.log(`[foundry-local] WinML enabled: ${useWinML}`); console.log(`[foundry-local] Nightly enabled: ${useNightly}`); @@ -120,7 +121,10 @@ const LINUX_ARTIFACTS = [ ]; let ARTIFACTS = []; -if (useWinML) { +if (noDeps) { + console.log(`[foundry-local] Skipping dependencies install...`); + ARTIFACTS = []; +} else if (useWinML) { console.log(`[foundry-local] Using WinML artifacts...`); ARTIFACTS = WINML_ARTIFACTS; } else if (os.platform() === 'linux') { diff --git a/sdk/rust/.cargo/config.toml b/sdk/rust/.cargo/config.toml new file mode 100644 index 00000000..84c57445 --- /dev/null +++ b/sdk/rust/.cargo/config.toml @@ -0,0 +1,7 @@ +[registries] + +[source.crates-io] +replace-with = "ORT-Nightly" + +[source.ORT-Nightly] +registry = "sparse+https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/Cargo/index/" diff --git a/sdk_legacy/cs/NuGet.config b/sdk_legacy/cs/NuGet.config new file mode 100644 index 00000000..420497e9 --- /dev/null +++ b/sdk_legacy/cs/NuGet.config @@ -0,0 +1,7 @@ + + + + + + + diff --git a/sdk_legacy/js/.npmrc b/sdk_legacy/js/.npmrc new file mode 100644 index 00000000..114ea2a4 --- /dev/null +++ b/sdk_legacy/js/.npmrc @@ -0,0 +1,2 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true diff --git a/sdk_legacy/rust/.cargo/config.toml b/sdk_legacy/rust/.cargo/config.toml new file mode 100644 index 00000000..84c57445 --- /dev/null +++ b/sdk_legacy/rust/.cargo/config.toml @@ -0,0 +1,7 @@ +[registries] + +[source.crates-io] +replace-with = "ORT-Nightly" + +[source.ORT-Nightly] +registry = "sparse+https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/Cargo/index/" diff --git a/www/.npmrc b/www/.npmrc index b6f27f13..06fe7275 100644 --- a/www/.npmrc +++ b/www/.npmrc @@ -1 +1,3 @@ +registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +always-auth=true engine-strict=true From 15cf28fdda9ef4f12651529abec3f7e76b29c4f1 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Wed, 25 Mar 2026 14:20:08 -0700 Subject: [PATCH 02/83] init dummy ADO packaging pipeline for FLC & SDK (#553) Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .pipelines/foundry-local-packaging.yml diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml new file mode 100644 index 00000000..b87eb70e --- /dev/null +++ b/.pipelines/foundry-local-packaging.yml @@ -0,0 +1,9 @@ +# Foundry Local SDK Packaging Pipeline (placeholder) +trigger: none + +pool: + vmImage: 'windows-latest' + +steps: +- script: echo "Foundry Local packaging pipeline - placeholder" + displayName: 'Placeholder' \ No newline at end of file From 9434df7c8c99dcf306deb51f71cd3b3f12a468e4 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 26 Mar 2026 09:40:54 -0700 Subject: [PATCH 03/83] Convert JS SDK streaming APIs from callbacks to async iterables (#545) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - [x] Convert JS SDK streaming APIs from callbacks to async iterables - [x] Add `return()` hook to async iterators to prevent unbounded buffering on early break - [x] Add guards in streaming callbacks to skip work after error or cancellation - [x] Fix test assertions to assert synchronous throws directly - [x] Replace O(n) `chunks.shift()` with O(1) head-index dequeue with compaction - [x] Guard against concurrent `next()` calls with `nextInFlight` flag - [x] Add comment explaining native stream cancellation limitation in `return()` - [x] Fix docs example for `completeStreamingChat(messages, tools)` overload to pass `tools` - [x] Regenerate TypeDoc API docs - [x] Type-check, code review, and security scan - [x] Add comments explaining why local variable captures are needed (closures lose `this`) - [x] Add comments clarifying promise-resolve wake-up pattern in `.then()` handler - [x] Add structural comments explaining the AsyncIterable/AsyncIterator factory pattern - [x] Apply same readability improvements to chatClient.ts --- ⚡ Quickly spin up Copilot coding agent tasks from anywhere on your macOS or Windows machine with [Raycast](https://gh.io/cca-raycast-docs). --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: baijumeswani <12852605+baijumeswani@users.noreply.github.com> --- README.md | 4 +- samples/js/audio-transcription-example/app.js | 6 +- .../chat-and-audio-foundry-local/src/app.js | 27 ++- samples/js/native-chat-completions/app.js | 15 +- sdk/js/README.md | 34 ++- sdk/js/docs/README.md | 178 ++++++++-------- sdk/js/docs/classes/AudioClient.md | 19 +- sdk/js/docs/classes/AudioClientSettings.md | 4 +- sdk/js/docs/classes/ChatClient.md | 59 ++++-- sdk/js/docs/classes/ChatClientSettings.md | 20 +- sdk/js/docs/classes/Model.md | 2 +- .../docs/classes/ResponsesClientSettings.md | 28 +-- sdk/js/examples/audio-transcription.ts | 4 +- sdk/js/examples/chat-completion.ts | 15 +- sdk/js/examples/tool-calling.ts | 40 ++-- sdk/js/src/openai/audioClient.ts | 179 +++++++++++----- sdk/js/src/openai/chatClient.ts | 194 +++++++++++++----- sdk/js/test/openai/audioClient.test.ts | 27 +-- sdk/js/test/openai/chatClient.test.ts | 36 +--- 19 files changed, 528 insertions(+), 363 deletions(-) diff --git a/README.md b/README.md index 14c53229..07bc9b4d 100644 --- a/README.md +++ b/README.md @@ -232,9 +232,9 @@ const result = await audioClient.transcribe('recording.wav'); console.log('Transcription:', result.text); // Or stream in real-time -await audioClient.transcribeStreaming('recording.wav', (chunk) => { +for await (const chunk of audioClient.transcribeStreaming('recording.wav')) { process.stdout.write(chunk.text); -}); +} await whisperModel.unload(); ``` diff --git a/samples/js/audio-transcription-example/app.js b/samples/js/audio-transcription-example/app.js index fe441d1b..78efc8af 100644 --- a/samples/js/audio-transcription-example/app.js +++ b/samples/js/audio-transcription-example/app.js @@ -39,12 +39,12 @@ console.log('\nAudio transcription result:'); console.log(transcription.text); console.log('✓ Audio transcription completed'); -// Same example but with streaming transcription using callback +// Same example but with streaming transcription using async iteration console.log('\nTesting streaming audio transcription...'); -await audioClient.transcribeStreaming('./Recording.mp3', (result) => { +for await (const result of audioClient.transcribeStreaming('./Recording.mp3')) { // Output the intermediate transcription results as they are received without line ending process.stdout.write(result.text); -}); +} console.log('\n✓ Streaming transcription completed'); // Unload the model diff --git a/samples/js/chat-and-audio-foundry-local/src/app.js b/samples/js/chat-and-audio-foundry-local/src/app.js index b3084816..49ce199c 100644 --- a/samples/js/chat-and-audio-foundry-local/src/app.js +++ b/samples/js/chat-and-audio-foundry-local/src/app.js @@ -76,22 +76,19 @@ async function main() { // Summarize the transcription console.log("Generating summary...\n"); - await chatClient.completeStreamingChat( - [ - { - role: "system", - content: - "You are a helpful assistant. Summarize the following transcribed audio and extract key themes and action items.", - }, - { role: "user", content: transcription.text }, - ], - (chunk) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } + for await (const chunk of chatClient.completeStreamingChat([ + { + role: "system", + content: + "You are a helpful assistant. Summarize the following transcribed audio and extract key themes and action items.", + }, + { role: "user", content: transcription.text }, + ])) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); } - ); + } console.log("\n"); // --- Clean up --- diff --git a/samples/js/native-chat-completions/app.js b/samples/js/native-chat-completions/app.js index af566ef7..67348e8c 100644 --- a/samples/js/native-chat-completions/app.js +++ b/samples/js/native-chat-completions/app.js @@ -41,15 +41,14 @@ console.log(completion.choices[0]?.message?.content); // Example streaming completion console.log('\nTesting streaming completion...'); -await chatClient.completeStreamingChat( - [{ role: 'user', content: 'Write a short poem about programming.' }], - (chunk) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } +for await (const chunk of chatClient.completeStreamingChat( + [{ role: 'user', content: 'Write a short poem about programming.' }] +)) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); } -); +} console.log('\n'); // Unload the model diff --git a/sdk/js/README.md b/sdk/js/README.md index 3308c9d8..9b08f9ac 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -69,15 +69,14 @@ console.log(completion.choices[0]?.message?.content); // Example streaming completion console.log('\nTesting streaming completion...'); -await chatClient.completeStreamingChat( - [{ role: 'user', content: 'Write a short poem about programming.' }], - (chunk) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } +for await (const chunk of chatClient.completeStreamingChat( + [{ role: 'user', content: 'Write a short poem about programming.' }] +)) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); } -); +} console.log('\n'); // Unload the model @@ -157,15 +156,14 @@ console.log(response.choices[0].message.content); For real-time output, use streaming: ```typescript -await chatClient.completeStreamingChat( - [{ role: 'user', content: 'Write a short poem about programming.' }], - (chunk) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } +for await (const chunk of chatClient.completeStreamingChat( + [{ role: 'user', content: 'Write a short poem about programming.' }] +)) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); } -); +} ``` ### Audio Transcription @@ -180,9 +178,9 @@ audioClient.settings.language = 'en'; const result = await audioClient.transcribe('/path/to/audio.wav'); // Streaming transcription -await audioClient.transcribeStreaming('/path/to/audio.wav', (chunk) => { +for await (const chunk of audioClient.transcribeStreaming('/path/to/audio.wav')) { console.log(chunk); -}); +} ``` ### Embedded Web Service diff --git a/sdk/js/docs/README.md b/sdk/js/docs/README.md index e79be84d..58218628 100644 --- a/sdk/js/docs/README.md +++ b/sdk/js/docs/README.md @@ -163,7 +163,7 @@ Use a plain object with these properties to configure the SDK. ##### additionalSettings? ```ts -optional additionalSettings: { +optional additionalSettings?: { [key: string]: string; }; ``` @@ -180,7 +180,7 @@ Optional. Internal use only. ##### appDataDir? ```ts -optional appDataDir: string; +optional appDataDir?: string; ``` The directory where application data should be stored. @@ -198,7 +198,7 @@ Used for identifying the application in logs and telemetry. ##### libraryPath? ```ts -optional libraryPath: string; +optional libraryPath?: string; ``` The path to the directory containing the native Foundry Local Core libraries. @@ -208,7 +208,7 @@ If not provided, the SDK attempts to discover them in standard locations. ##### logLevel? ```ts -optional logLevel: "trace" | "debug" | "info" | "warn" | "error" | "fatal"; +optional logLevel?: "trace" | "debug" | "info" | "warn" | "error" | "fatal"; ``` The logging level for the SDK. @@ -218,7 +218,7 @@ Defaults to 'warn'. ##### logsDir? ```ts -optional logsDir: string; +optional logsDir?: string; ``` The directory where log files are written. @@ -227,7 +227,7 @@ Optional. Defaults to `{appDataDir}/logs`. ##### modelCacheDir? ```ts -optional modelCacheDir: string; +optional modelCacheDir?: string; ``` The directory where models are downloaded and cached. @@ -236,7 +236,7 @@ Optional. Defaults to `{appDataDir}/cache/models`. ##### serviceEndpoint? ```ts -optional serviceEndpoint: string; +optional serviceEndpoint?: string; ``` The external URL if the web service is running in a separate process. @@ -245,7 +245,7 @@ Optional. This is used to connect to an existing service instance. ##### webServiceUrls? ```ts -optional webServiceUrls: string; +optional webServiceUrls?: string; ``` The URL(s) for the local web service to bind to. @@ -351,7 +351,7 @@ call_id: string; ##### id? ```ts -optional id: string; +optional id?: string; ``` ##### name @@ -363,7 +363,7 @@ name: string; ##### status? ```ts -optional status: ResponseItemStatus; +optional status?: ResponseItemStatus; ``` ##### type @@ -387,7 +387,7 @@ call_id: string; ##### id? ```ts -optional id: string; +optional id?: string; ``` ##### output @@ -399,7 +399,7 @@ output: string | ContentPart[]; ##### status? ```ts -optional status: ResponseItemStatus; +optional status?: ResponseItemStatus; ``` ##### type @@ -417,7 +417,7 @@ type: "function_call_output"; ##### description? ```ts -optional description: string; +optional description?: string; ``` ##### name @@ -429,13 +429,13 @@ name: string; ##### parameters? ```ts -optional parameters: Record; +optional parameters?: Record; ``` ##### strict? ```ts -optional strict: boolean; +optional strict?: boolean; ``` ##### type @@ -671,7 +671,7 @@ type: "item_reference"; ##### bytes? ```ts -optional bytes: number[]; +optional bytes?: number[]; ``` ##### logprob @@ -701,7 +701,7 @@ content: string | ContentPart[]; ##### id? ```ts -optional id: string; +optional id?: string; ``` ##### role @@ -713,7 +713,7 @@ role: MessageRole; ##### status? ```ts -optional status: ResponseItemStatus; +optional status?: ResponseItemStatus; ``` ##### type @@ -749,13 +749,13 @@ createdAtUnix: number; ##### displayName? ```ts -optional displayName: string | null; +optional displayName?: string | null; ``` ##### fileSizeMb? ```ts -optional fileSizeMb: number | null; +optional fileSizeMb?: number | null; ``` ##### id @@ -767,31 +767,31 @@ id: string; ##### license? ```ts -optional license: string | null; +optional license?: string | null; ``` ##### licenseDescription? ```ts -optional licenseDescription: string | null; +optional licenseDescription?: string | null; ``` ##### maxOutputTokens? ```ts -optional maxOutputTokens: number | null; +optional maxOutputTokens?: number | null; ``` ##### minFLVersion? ```ts -optional minFLVersion: string | null; +optional minFLVersion?: string | null; ``` ##### modelSettings? ```ts -optional modelSettings: ModelSettings | null; +optional modelSettings?: ModelSettings | null; ``` ##### modelType @@ -809,7 +809,7 @@ name: string; ##### promptTemplate? ```ts -optional promptTemplate: PromptTemplate | null; +optional promptTemplate?: PromptTemplate | null; ``` ##### providerType @@ -821,25 +821,25 @@ providerType: string; ##### publisher? ```ts -optional publisher: string | null; +optional publisher?: string | null; ``` ##### runtime? ```ts -optional runtime: Runtime | null; +optional runtime?: Runtime | null; ``` ##### supportsToolCalling? ```ts -optional supportsToolCalling: boolean | null; +optional supportsToolCalling?: boolean | null; ``` ##### task? ```ts -optional task: string | null; +optional task?: string | null; ``` ##### uri @@ -863,7 +863,7 @@ version: number; ##### parameters? ```ts -optional parameters: Parameter[] | null; +optional parameters?: Parameter[] | null; ``` *** @@ -947,13 +947,13 @@ type: "response.output_item.done"; ##### annotations? ```ts -optional annotations: Annotation[]; +optional annotations?: Annotation[]; ``` ##### logprobs? ```ts -optional logprobs: LogProb[]; +optional logprobs?: LogProb[]; ``` ##### text @@ -1067,7 +1067,7 @@ name: string; ##### value? ```ts -optional value: string | null; +optional value?: string | null; ``` *** @@ -1091,13 +1091,13 @@ prompt: string; ##### system? ```ts -optional system: string | null; +optional system?: string | null; ``` ##### user? ```ts -optional user: string | null; +optional user?: string | null; ``` *** @@ -1109,13 +1109,13 @@ optional user: string | null; ##### effort? ```ts -optional effort: string; +optional effort?: string; ``` ##### summary? ```ts -optional summary: string; +optional summary?: string; ``` *** @@ -1127,31 +1127,31 @@ optional summary: string; ##### content? ```ts -optional content: ContentPart[]; +optional content?: ContentPart[]; ``` ##### encrypted\_content? ```ts -optional encrypted_content: string; +optional encrypted_content?: string; ``` ##### id? ```ts -optional id: string; +optional id?: string; ``` ##### status? ```ts -optional status: ResponseItemStatus; +optional status?: ResponseItemStatus; ``` ##### summary? ```ts -optional summary: string; +optional summary?: string; ``` ##### type @@ -1259,121 +1259,121 @@ type: "response.refusal.done"; ##### frequency\_penalty? ```ts -optional frequency_penalty: number; +optional frequency_penalty?: number; ``` ##### input? ```ts -optional input: string | ResponseInputItem[]; +optional input?: string | ResponseInputItem[]; ``` ##### instructions? ```ts -optional instructions: string; +optional instructions?: string; ``` ##### max\_output\_tokens? ```ts -optional max_output_tokens: number; +optional max_output_tokens?: number; ``` ##### metadata? ```ts -optional metadata: Record; +optional metadata?: Record; ``` ##### model? ```ts -optional model: string; +optional model?: string; ``` ##### parallel\_tool\_calls? ```ts -optional parallel_tool_calls: boolean; +optional parallel_tool_calls?: boolean; ``` ##### presence\_penalty? ```ts -optional presence_penalty: number; +optional presence_penalty?: number; ``` ##### previous\_response\_id? ```ts -optional previous_response_id: string; +optional previous_response_id?: string; ``` ##### reasoning? ```ts -optional reasoning: ReasoningConfig; +optional reasoning?: ReasoningConfig; ``` ##### seed? ```ts -optional seed: number; +optional seed?: number; ``` ##### store? ```ts -optional store: boolean; +optional store?: boolean; ``` ##### stream? ```ts -optional stream: boolean; +optional stream?: boolean; ``` ##### temperature? ```ts -optional temperature: number; +optional temperature?: number; ``` ##### text? ```ts -optional text: TextConfig; +optional text?: TextConfig; ``` ##### tool\_choice? ```ts -optional tool_choice: ResponseToolChoice; +optional tool_choice?: ResponseToolChoice; ``` ##### tools? ```ts -optional tools: FunctionToolDefinition[]; +optional tools?: FunctionToolDefinition[]; ``` ##### top\_p? ```ts -optional top_p: number; +optional top_p?: number; ``` ##### truncation? ```ts -optional truncation: TruncationStrategy; +optional truncation?: TruncationStrategy; ``` ##### user? ```ts -optional user: string; +optional user?: string; ``` *** @@ -1403,13 +1403,13 @@ message: string; ##### jsonSchema? ```ts -optional jsonSchema: string; +optional jsonSchema?: string; ``` ##### larkGrammar? ```ts -optional larkGrammar: string; +optional larkGrammar?: string; ``` ##### type @@ -1457,13 +1457,13 @@ type: ##### cancelled\_at? ```ts -optional cancelled_at: number | null; +optional cancelled_at?: number | null; ``` ##### completed\_at? ```ts -optional completed_at: number | null; +optional completed_at?: number | null; ``` ##### created\_at @@ -1475,13 +1475,13 @@ created_at: number; ##### error? ```ts -optional error: ResponseError | null; +optional error?: ResponseError | null; ``` ##### failed\_at? ```ts -optional failed_at: number | null; +optional failed_at?: number | null; ``` ##### frequency\_penalty @@ -1499,25 +1499,25 @@ id: string; ##### incomplete\_details? ```ts -optional incomplete_details: IncompleteDetails | null; +optional incomplete_details?: IncompleteDetails | null; ``` ##### instructions? ```ts -optional instructions: string | null; +optional instructions?: string | null; ``` ##### max\_output\_tokens? ```ts -optional max_output_tokens: number | null; +optional max_output_tokens?: number | null; ``` ##### metadata? ```ts -optional metadata: Record | null; +optional metadata?: Record | null; ``` ##### model @@ -1553,13 +1553,13 @@ presence_penalty: number; ##### previous\_response\_id? ```ts -optional previous_response_id: string | null; +optional previous_response_id?: string | null; ``` ##### reasoning? ```ts -optional reasoning: ReasoningConfig | null; +optional reasoning?: ReasoningConfig | null; ``` ##### status @@ -1613,13 +1613,13 @@ truncation: TruncationStrategy; ##### usage? ```ts -optional usage: ResponseUsage | null; +optional usage?: ResponseUsage | null; ``` ##### user? ```ts -optional user: string | null; +optional user?: string | null; ``` *** @@ -1655,7 +1655,7 @@ input_tokens: number; ##### input\_tokens\_details? ```ts -optional input_tokens_details: { +optional input_tokens_details?: { cached_tokens: number; }; ``` @@ -1675,7 +1675,7 @@ output_tokens: number; ##### output\_tokens\_details? ```ts -optional output_tokens_details: { +optional output_tokens_details?: { reasoning_tokens: number; }; ``` @@ -1719,19 +1719,19 @@ executionProvider: string; ##### code? ```ts -optional code: string; +optional code?: string; ``` ##### message? ```ts -optional message: string; +optional message?: string; ``` ##### param? ```ts -optional param: string; +optional param?: string; ``` ##### sequence\_number @@ -1755,13 +1755,13 @@ type: "error"; ##### format? ```ts -optional format: TextFormat; +optional format?: TextFormat; ``` ##### verbosity? ```ts -optional verbosity: string; +optional verbosity?: string; ``` *** @@ -1773,25 +1773,25 @@ optional verbosity: string; ##### description? ```ts -optional description: string; +optional description?: string; ``` ##### name? ```ts -optional name: string; +optional name?: string; ``` ##### schema? ```ts -optional schema: unknown; +optional schema?: unknown; ``` ##### strict? ```ts -optional strict: boolean; +optional strict?: boolean; ``` ##### type @@ -1809,7 +1809,7 @@ type: string; ##### name? ```ts -optional name: string; +optional name?: string; ``` ##### type diff --git a/sdk/js/docs/classes/AudioClient.md b/sdk/js/docs/classes/AudioClient.md index 7fd13bd8..12e79de5 100644 --- a/sdk/js/docs/classes/AudioClient.md +++ b/sdk/js/docs/classes/AudioClient.md @@ -46,24 +46,31 @@ Error - If audioFilePath is invalid or transcription fails. ### transcribeStreaming() ```ts -transcribeStreaming(audioFilePath, callback): Promise; +transcribeStreaming(audioFilePath): AsyncIterable; ``` -Transcribes audio into the input language using streaming. +Transcribes audio into the input language using streaming, returning an async iterable of chunks. #### Parameters | Parameter | Type | Description | | ------ | ------ | ------ | | `audioFilePath` | `string` | Path to the audio file to transcribe. | -| `callback` | (`chunk`) => `void` | A callback function that receives each chunk of the streaming response. | #### Returns -`Promise`\<`void`\> +`AsyncIterable`\<`any`\> -A promise that resolves when the stream is complete. +An async iterable that yields parsed streaming transcription chunks. #### Throws -Error - If audioFilePath or callback are invalid, or streaming fails. +Error - If audioFilePath is invalid, or streaming fails. + +#### Example + +```typescript +for await (const chunk of audioClient.transcribeStreaming('recording.wav')) { + process.stdout.write(chunk.text); +} +``` diff --git a/sdk/js/docs/classes/AudioClientSettings.md b/sdk/js/docs/classes/AudioClientSettings.md index 619c526b..dae7cbbe 100644 --- a/sdk/js/docs/classes/AudioClientSettings.md +++ b/sdk/js/docs/classes/AudioClientSettings.md @@ -19,7 +19,7 @@ new AudioClientSettings(): AudioClientSettings; ### language? ```ts -optional language: string; +optional language?: string; ``` *** @@ -27,5 +27,5 @@ optional language: string; ### temperature? ```ts -optional temperature: number; +optional temperature?: number; ``` diff --git a/sdk/js/docs/classes/ChatClient.md b/sdk/js/docs/classes/ChatClient.md index 91e877aa..c3120f0b 100644 --- a/sdk/js/docs/classes/ChatClient.md +++ b/sdk/js/docs/classes/ChatClient.md @@ -75,53 +75,80 @@ Error - If messages or tools are invalid or completion fails. #### Call Signature ```ts -completeStreamingChat(messages, callback): Promise; +completeStreamingChat(messages): AsyncIterable; ``` -Performs a streaming chat completion. +Performs a streaming chat completion, returning an async iterable of chunks. ##### Parameters | Parameter | Type | Description | | ------ | ------ | ------ | | `messages` | `any`[] | An array of message objects. | -| `callback` | (`chunk`) => `void` | A callback function that receives each chunk of the streaming response. | ##### Returns -`Promise`\<`void`\> +`AsyncIterable`\<`any`\> -A promise that resolves when the stream is complete. +An async iterable that yields parsed streaming response chunks. ##### Throws -Error - If messages, tools, or callback are invalid, or streaming fails. +Error - If messages or tools are invalid, or streaming fails. + +##### Example + +```typescript +// Without tools: +for await (const chunk of chatClient.completeStreamingChat(messages)) { + const content = chunk.choices?.[0]?.delta?.content; + if (content) process.stdout.write(content); +} + +// With tools: +for await (const chunk of chatClient.completeStreamingChat(messages, tools)) { + const content = chunk.choices?.[0]?.delta?.content; + if (content) process.stdout.write(content); +} +``` #### Call Signature ```ts -completeStreamingChat( - messages, - tools, -callback): Promise; +completeStreamingChat(messages, tools): AsyncIterable; ``` -Performs a streaming chat completion. +Performs a streaming chat completion, returning an async iterable of chunks. ##### Parameters | Parameter | Type | Description | | ------ | ------ | ------ | | `messages` | `any`[] | An array of message objects. | -| `tools` | `any`[] | An array of tool objects. | -| `callback` | (`chunk`) => `void` | A callback function that receives each chunk of the streaming response. | +| `tools` | `any`[] | An optional array of tool objects. | ##### Returns -`Promise`\<`void`\> +`AsyncIterable`\<`any`\> -A promise that resolves when the stream is complete. +An async iterable that yields parsed streaming response chunks. ##### Throws -Error - If messages, tools, or callback are invalid, or streaming fails. +Error - If messages or tools are invalid, or streaming fails. + +##### Example + +```typescript +// Without tools: +for await (const chunk of chatClient.completeStreamingChat(messages)) { + const content = chunk.choices?.[0]?.delta?.content; + if (content) process.stdout.write(content); +} + +// With tools: +for await (const chunk of chatClient.completeStreamingChat(messages, tools)) { + const content = chunk.choices?.[0]?.delta?.content; + if (content) process.stdout.write(content); +} +``` diff --git a/sdk/js/docs/classes/ChatClientSettings.md b/sdk/js/docs/classes/ChatClientSettings.md index 7fed8a46..7d48bcca 100644 --- a/sdk/js/docs/classes/ChatClientSettings.md +++ b/sdk/js/docs/classes/ChatClientSettings.md @@ -19,7 +19,7 @@ new ChatClientSettings(): ChatClientSettings; ### frequencyPenalty? ```ts -optional frequencyPenalty: number; +optional frequencyPenalty?: number; ``` *** @@ -27,7 +27,7 @@ optional frequencyPenalty: number; ### maxTokens? ```ts -optional maxTokens: number; +optional maxTokens?: number; ``` *** @@ -35,7 +35,7 @@ optional maxTokens: number; ### n? ```ts -optional n: number; +optional n?: number; ``` *** @@ -43,7 +43,7 @@ optional n: number; ### presencePenalty? ```ts -optional presencePenalty: number; +optional presencePenalty?: number; ``` *** @@ -51,7 +51,7 @@ optional presencePenalty: number; ### randomSeed? ```ts -optional randomSeed: number; +optional randomSeed?: number; ``` *** @@ -59,7 +59,7 @@ optional randomSeed: number; ### responseFormat? ```ts -optional responseFormat: ResponseFormat; +optional responseFormat?: ResponseFormat; ``` *** @@ -67,7 +67,7 @@ optional responseFormat: ResponseFormat; ### temperature? ```ts -optional temperature: number; +optional temperature?: number; ``` *** @@ -75,7 +75,7 @@ optional temperature: number; ### toolChoice? ```ts -optional toolChoice: ToolChoice; +optional toolChoice?: ToolChoice; ``` *** @@ -83,7 +83,7 @@ optional toolChoice: ToolChoice; ### topK? ```ts -optional topK: number; +optional topK?: number; ``` *** @@ -91,5 +91,5 @@ optional topK: number; ### topP? ```ts -optional topP: number; +optional topP?: number; ``` diff --git a/sdk/js/docs/classes/Model.md b/sdk/js/docs/classes/Model.md index 48340dae..424d673b 100644 --- a/sdk/js/docs/classes/Model.md +++ b/sdk/js/docs/classes/Model.md @@ -156,7 +156,7 @@ Automatically selects the new variant if it is cached and the current one is not #### Throws -Error - If the variant's alias does not match the model's alias. +Error - If the argument is not a ModelVariant object, or if the variant's alias does not match the model's alias. *** diff --git a/sdk/js/docs/classes/ResponsesClientSettings.md b/sdk/js/docs/classes/ResponsesClientSettings.md index 08b9ea94..8401faf1 100644 --- a/sdk/js/docs/classes/ResponsesClientSettings.md +++ b/sdk/js/docs/classes/ResponsesClientSettings.md @@ -22,7 +22,7 @@ new ResponsesClientSettings(): ResponsesClientSettings; ### frequencyPenalty? ```ts -optional frequencyPenalty: number; +optional frequencyPenalty?: number; ``` *** @@ -30,7 +30,7 @@ optional frequencyPenalty: number; ### instructions? ```ts -optional instructions: string; +optional instructions?: string; ``` System-level instructions to guide the model. @@ -40,7 +40,7 @@ System-level instructions to guide the model. ### maxOutputTokens? ```ts -optional maxOutputTokens: number; +optional maxOutputTokens?: number; ``` *** @@ -48,7 +48,7 @@ optional maxOutputTokens: number; ### metadata? ```ts -optional metadata: Record; +optional metadata?: Record; ``` *** @@ -56,7 +56,7 @@ optional metadata: Record; ### parallelToolCalls? ```ts -optional parallelToolCalls: boolean; +optional parallelToolCalls?: boolean; ``` *** @@ -64,7 +64,7 @@ optional parallelToolCalls: boolean; ### presencePenalty? ```ts -optional presencePenalty: number; +optional presencePenalty?: number; ``` *** @@ -72,7 +72,7 @@ optional presencePenalty: number; ### reasoning? ```ts -optional reasoning: ReasoningConfig; +optional reasoning?: ReasoningConfig; ``` *** @@ -80,7 +80,7 @@ optional reasoning: ReasoningConfig; ### seed? ```ts -optional seed: number; +optional seed?: number; ``` *** @@ -88,7 +88,7 @@ optional seed: number; ### store? ```ts -optional store: boolean; +optional store?: boolean; ``` *** @@ -96,7 +96,7 @@ optional store: boolean; ### temperature? ```ts -optional temperature: number; +optional temperature?: number; ``` *** @@ -104,7 +104,7 @@ optional temperature: number; ### text? ```ts -optional text: TextConfig; +optional text?: TextConfig; ``` *** @@ -112,7 +112,7 @@ optional text: TextConfig; ### toolChoice? ```ts -optional toolChoice: ResponseToolChoice; +optional toolChoice?: ResponseToolChoice; ``` *** @@ -120,7 +120,7 @@ optional toolChoice: ResponseToolChoice; ### topP? ```ts -optional topP: number; +optional topP?: number; ``` *** @@ -128,5 +128,5 @@ optional topP: number; ### truncation? ```ts -optional truncation: TruncationStrategy; +optional truncation?: TruncationStrategy; ``` diff --git a/sdk/js/examples/audio-transcription.ts b/sdk/js/examples/audio-transcription.ts index 7fddf2d8..4e4fc2d4 100644 --- a/sdk/js/examples/audio-transcription.ts +++ b/sdk/js/examples/audio-transcription.ts @@ -72,9 +72,9 @@ async function main() { // Example: Streaming transcription console.log('\nTesting streaming transcription...'); - await audioClient.transcribeStreaming(audioFilePath, (chunk: any) => { + for await (const chunk of audioClient.transcribeStreaming(audioFilePath)) { process.stdout.write(chunk.text); - }); + } console.log('\n'); // Unload the model diff --git a/sdk/js/examples/chat-completion.ts b/sdk/js/examples/chat-completion.ts index 2c283e23..a9e2d59a 100644 --- a/sdk/js/examples/chat-completion.ts +++ b/sdk/js/examples/chat-completion.ts @@ -70,15 +70,14 @@ async function main() { // Example streaming completion console.log('\nTesting streaming completion...'); - await chatClient.completeStreamingChat( - [{ role: 'user', content: 'Write a short poem about programming.' }], - (chunk) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } + for await (const chunk of chatClient.completeStreamingChat( + [{ role: 'user', content: 'Write a short poem about programming.' }] + )) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); } - ); + } console.log('\n'); // Model management example diff --git a/sdk/js/examples/tool-calling.ts b/sdk/js/examples/tool-calling.ts index bb4ed541..c3640a8f 100644 --- a/sdk/js/examples/tool-calling.ts +++ b/sdk/js/examples/tool-calling.ts @@ -109,22 +109,18 @@ async function main() { let toolCallData: any = null; console.log('Chat completion response:'); - await chatClient.completeStreamingChat( - messages, - tools, - (chunk: any) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } - - // Capture tool call data - const toolCalls = chunk.choices?.[0]?.message?.tool_calls; - if (toolCalls && toolCalls.length > 0) { - toolCallData = toolCalls[0]; - } + for await (const chunk of chatClient.completeStreamingChat(messages, tools)) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); + } + + // Capture tool call data + const toolCalls = chunk.choices?.[0]?.message?.tool_calls; + if (toolCalls && toolCalls.length > 0) { + toolCallData = toolCalls[0]; } - ); + } console.log('\n'); // Handle tool invocation @@ -159,16 +155,12 @@ async function main() { }; console.log('Chat completion response:'); - await chatClient.completeStreamingChat( - messages, - tools, - (chunk: any) => { - const content = chunk.choices?.[0]?.message?.content; - if (content) { - process.stdout.write(content); - } + for await (const chunk of chatClient.completeStreamingChat(messages, tools)) { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); } - ); + } console.log('\n'); console.log('\n✓ Example completed successfully'); diff --git a/sdk/js/src/openai/audioClient.ts b/sdk/js/src/openai/audioClient.ts index 59267015..7b174924 100644 --- a/sdk/js/src/openai/audioClient.ts +++ b/sdk/js/src/openai/audioClient.ts @@ -89,66 +89,153 @@ export class AudioClient { } /** - * Transcribes audio into the input language using streaming. + * Transcribes audio into the input language using streaming, returning an async iterable of chunks. * @param audioFilePath - Path to the audio file to transcribe. - * @param callback - A callback function that receives each chunk of the streaming response. - * @returns A promise that resolves when the stream is complete. - * @throws Error - If audioFilePath or callback are invalid, or streaming fails. + * @returns An async iterable that yields parsed streaming transcription chunks. + * @throws Error - If audioFilePath is invalid, or streaming fails. + * + * @example + * ```typescript + * for await (const chunk of audioClient.transcribeStreaming('recording.wav')) { + * process.stdout.write(chunk.text); + * } + * ``` */ - public async transcribeStreaming(audioFilePath: string, callback: (chunk: any) => void): Promise { + public transcribeStreaming(audioFilePath: string): AsyncIterable { this.validateAudioFilePath(audioFilePath); - if (!callback || typeof callback !== 'function') { - throw new Error('Callback must be a valid function.'); - } + const request = { Model: this.modelId, FileName: audioFilePath, ...this.settings._serialize() }; - - let error: Error | null = null; - try { - await this.coreInterop.executeCommandStreaming( - "audio_transcribe", - { Params: { OpenAICreateRequest: JSON.stringify(request) } }, - (chunkStr: string) => { - // Skip processing if we already encountered an error - if (error) { - return; - } - - if (chunkStr) { - let chunk: any; - try { - chunk = JSON.parse(chunkStr); - } catch (e) { - // Don't throw from callback - store first error and stop processing - error = new Error(`Failed to parse streaming chunk: ${e instanceof Error ? e.message : String(e)}`, { cause: e }); - return; + // Capture instance properties to local variables because `this` is not + // accessible inside the [Symbol.asyncIterator]() method below — it's a + // regular method on the returned object literal, not on the AudioClient. + const coreInterop = this.coreInterop; + const modelId = this.modelId; + + // Return an AsyncIterable object. The [Symbol.asyncIterator]() factory + // is called once when the consumer starts a `for await` loop, and it + // returns the AsyncIterator (with next() / return() methods). + return { + [Symbol.asyncIterator](): AsyncIterator { + // Buffer for chunks received from the native callback. + // Uses a head index for O(1) dequeue instead of Array.shift() which is O(n). + // JavaScript's single-threaded event loop ensures no race conditions + // between the callback pushing chunks and next() consuming them. + const chunks: any[] = []; + let head = 0; + let done = false; + let cancelled = false; + let error: Error | null = null; + let resolve: (() => void) | null = null; + let nextInFlight = false; + + const streamingPromise = coreInterop.executeCommandStreaming( + "audio_transcribe", + { Params: { OpenAICreateRequest: JSON.stringify(request) } }, + (chunkStr: string) => { + if (cancelled || error) return; + if (chunkStr) { + try { + const chunk = JSON.parse(chunkStr); + chunks.push(chunk); + } catch (e) { + if (!error) { + error = new Error( + `Failed to parse streaming chunk: ${e instanceof Error ? e.message : String(e)}`, + { cause: e } + ); + } + } + } + // Wake up any waiting next() call + if (resolve) { + const r = resolve; + resolve = null; + r(); } + } + // When the native stream completes, mark done and wake up any + // pending next() call so it can see that iteration has ended. + ).then(() => { + done = true; + if (resolve) { + const r = resolve; + resolve = null; + r(); // resolve the pending next() promise + } + }).catch((err) => { + if (!error) { + const underlyingError = err instanceof Error ? err : new Error(String(err)); + error = new Error( + `Streaming audio transcription failed for model '${modelId}': ${underlyingError.message}`, + { cause: underlyingError } + ); + } + done = true; + if (resolve) { + const r = resolve; + resolve = null; + r(); + } + }); + // Return the AsyncIterator object consumed by `for await`. + // next() yields buffered chunks one at a time; return() is + // called automatically when the consumer breaks out early. + return { + async next(): Promise> { + if (nextInFlight) { + throw new Error('next() called concurrently on streaming iterator; await each call before invoking next().'); + } + nextInFlight = true; try { - callback(chunk); - } catch (e) { - // Don't throw from callback - store first error and stop processing - error = new Error(`User callback threw an error: ${e instanceof Error ? e.message : String(e)}`, { cause: e }); - return; + while (true) { + if (head < chunks.length) { + const value = chunks[head]; + chunks[head] = undefined; // allow GC + head++; + // Compact the array when all buffered chunks have been consumed + if (head === chunks.length) { + chunks.length = 0; + head = 0; + } + return { value, done: false }; + } + if (error) { + throw error; + } + if (done || cancelled) { + return { value: undefined, done: true }; + } + // Wait for the next chunk or completion + await new Promise((r) => { resolve = r; }); + } + } finally { + nextInFlight = false; } + }, + async return(): Promise> { + // Mark cancelled so the callback stops buffering. + // Note: the underlying native stream cannot be cancelled + // (CoreInterop.executeCommandStreaming has no abort support), + // so the koffi callback may still fire but will no-op due + // to the cancelled guard above. + cancelled = true; + chunks.length = 0; + head = 0; + if (resolve) { + const r = resolve; + resolve = null; + r(); + } + return { value: undefined, done: true }; } - } - ); - - // If we encountered an error during streaming, reject now - if (error) { - throw error; + }; } - } catch (err) { - const underlyingError = err instanceof Error ? err : new Error(String(err)); - throw new Error( - `Streaming audio transcription failed for model '${this.modelId}': ${underlyingError.message}`, - { cause: underlyingError } - ); - } + }; } } diff --git a/sdk/js/src/openai/chatClient.ts b/sdk/js/src/openai/chatClient.ts index 7aa77170..f844da41 100644 --- a/sdk/js/src/openai/chatClient.ts +++ b/sdk/js/src/openai/chatClient.ts @@ -211,26 +211,33 @@ export class ChatClient { } /** - * Performs a streaming chat completion. + * Performs a streaming chat completion, returning an async iterable of chunks. * @param messages - An array of message objects. - * @param tools - An array of tool objects. - * @param callback - A callback function that receives each chunk of the streaming response. - * @returns A promise that resolves when the stream is complete. - * @throws Error - If messages, tools, or callback are invalid, or streaming fails. + * @param tools - An optional array of tool objects. + * @returns An async iterable that yields parsed streaming response chunks. + * @throws Error - If messages or tools are invalid, or streaming fails. + * + * @example + * ```typescript + * // Without tools: + * for await (const chunk of chatClient.completeStreamingChat(messages)) { + * const content = chunk.choices?.[0]?.delta?.content; + * if (content) process.stdout.write(content); + * } + * + * // With tools: + * for await (const chunk of chatClient.completeStreamingChat(messages, tools)) { + * const content = chunk.choices?.[0]?.delta?.content; + * if (content) process.stdout.write(content); + * } + * ``` */ - public async completeStreamingChat(messages: any[], callback: (chunk: any) => void): Promise; - public async completeStreamingChat(messages: any[], tools: any[], callback: (chunk: any) => void): Promise; - public async completeStreamingChat(messages: any[], toolsOrCallback: any[] | ((chunk: any) => void), maybeCallback?: (chunk: any) => void): Promise { - const tools = Array.isArray(toolsOrCallback) ? toolsOrCallback : undefined; - const callback = (Array.isArray(toolsOrCallback) ? maybeCallback : toolsOrCallback) as ((chunk: any) => void) | undefined; - + public completeStreamingChat(messages: any[]): AsyncIterable; + public completeStreamingChat(messages: any[], tools: any[]): AsyncIterable; + public completeStreamingChat(messages: any[], tools?: any[]): AsyncIterable { this.validateMessages(messages); this.validateTools(tools); - if (!callback || typeof callback !== 'function') { - throw new Error('Callback must be a valid function.'); - } - const request = { model: this.modelId, messages, @@ -239,49 +246,132 @@ export class ChatClient { ...this.settings._serialize() }; - let error: Error | null = null; + // Capture instance properties to local variables because `this` is not + // accessible inside the [Symbol.asyncIterator]() method below — it's a + // regular method on the returned object literal, not on the ChatClient. + const coreInterop = this.coreInterop; + const modelId = this.modelId; - try { - await this.coreInterop.executeCommandStreaming( - 'chat_completions', - { Params: { OpenAICreateRequest: JSON.stringify(request) } }, - (chunkStr: string) => { - // Skip processing if we already encountered an error - if (error) return; + // Return an AsyncIterable object. The [Symbol.asyncIterator]() factory + // is called once when the consumer starts a `for await` loop, and it + // returns the AsyncIterator (with next() / return() methods). + return { + [Symbol.asyncIterator](): AsyncIterator { + // Buffer for chunks received from the native callback. + // Uses a head index for O(1) dequeue instead of Array.shift() which is O(n). + // JavaScript's single-threaded event loop ensures no race conditions + // between the callback pushing chunks and next() consuming them. + const chunks: any[] = []; + let head = 0; + let done = false; + let cancelled = false; + let error: Error | null = null; + let resolve: (() => void) | null = null; + let nextInFlight = false; - if (chunkStr) { - let chunk: any; - try { - chunk = JSON.parse(chunkStr); - } catch (e) { - // Don't throw from callback - store first error and stop processing - error = new Error( - `Failed to parse streaming chunk: ${e instanceof Error ? e.message : String(e)}`, - { cause: e } - ); - return; + const streamingPromise = coreInterop.executeCommandStreaming( + 'chat_completions', + { Params: { OpenAICreateRequest: JSON.stringify(request) } }, + (chunkStr: string) => { + if (cancelled || error) return; + if (chunkStr) { + try { + const chunk = JSON.parse(chunkStr); + chunks.push(chunk); + } catch (e) { + if (!error) { + error = new Error( + `Failed to parse streaming chunk: ${e instanceof Error ? e.message : String(e)}`, + { cause: e } + ); + } + } } + // Wake up any waiting next() call + if (resolve) { + const r = resolve; + resolve = null; + r(); + } + } + // When the native stream completes, mark done and wake up any + // pending next() call so it can see that iteration has ended. + ).then(() => { + done = true; + if (resolve) { + const r = resolve; + resolve = null; + r(); // resolve the pending next() promise + } + }).catch((err) => { + if (!error) { + const underlyingError = err instanceof Error ? err : new Error(String(err)); + error = new Error( + `Streaming chat completion failed for model '${modelId}': ${underlyingError.message}`, + { cause: underlyingError } + ); + } + done = true; + if (resolve) { + const r = resolve; + resolve = null; + r(); + } + }); + // Return the AsyncIterator object consumed by `for await`. + // next() yields buffered chunks one at a time; return() is + // called automatically when the consumer breaks out early. + return { + async next(): Promise> { + if (nextInFlight) { + throw new Error('next() called concurrently on streaming iterator; await each call before invoking next().'); + } + nextInFlight = true; try { - callback(chunk); - } catch (e) { - // Don't throw from callback - store first error and stop processing - error = new Error( - `User callback threw an error: ${e instanceof Error ? e.message : String(e)}`, - { cause: e } - ); + while (true) { + if (head < chunks.length) { + const value = chunks[head]; + chunks[head] = undefined; // allow GC + head++; + // Compact the array when all buffered chunks have been consumed + if (head === chunks.length) { + chunks.length = 0; + head = 0; + } + return { value, done: false }; + } + if (error) { + throw error; + } + if (done || cancelled) { + return { value: undefined, done: true }; + } + // Wait for the next chunk or completion + await new Promise((r) => { resolve = r; }); + } + } finally { + nextInFlight = false; + } + }, + async return(): Promise> { + // Mark cancelled so the callback stops buffering. + // Note: the underlying native stream cannot be cancelled + // (CoreInterop.executeCommandStreaming has no abort support), + // so the koffi callback may still fire but will no-op due + // to the cancelled guard above. + cancelled = true; + chunks.length = 0; + head = 0; + if (resolve) { + const r = resolve; + resolve = null; + r(); } + return { value: undefined, done: true }; } - } - ); - - // If we encountered an error during streaming, reject now - if (error) throw error; - } catch (err) { - const underlyingError = err instanceof Error ? err : new Error(String(err)); - throw new Error(`Streaming chat completion failed for model '${this.modelId}': ${underlyingError.message}`, { - cause: underlyingError - }); - } + }; + } + }; } } diff --git a/sdk/js/test/openai/audioClient.test.ts b/sdk/js/test/openai/audioClient.test.ts index a57c02e5..10da05be 100644 --- a/sdk/js/test/openai/audioClient.test.ts +++ b/sdk/js/test/openai/audioClient.test.ts @@ -110,13 +110,13 @@ describe('Audio Client Tests', () => { audioClient.settings.temperature = 0.0; // for deterministic results let fullResponse = ''; - await audioClient.transcribeStreaming(AUDIO_FILE_PATH, (chunk) => { + for await (const chunk of audioClient.transcribeStreaming(AUDIO_FILE_PATH)) { expect(chunk).to.not.be.undefined; expect(chunk.text).to.not.be.undefined; expect(chunk.text).to.be.a('string'); expect(chunk.text.length).to.be.greaterThan(0); fullResponse += chunk.text; - }); + } console.log(`Full response: ${fullResponse}`); expect(fullResponse).to.equal(EXPECTED_TEXT); @@ -151,13 +151,13 @@ describe('Audio Client Tests', () => { audioClient.settings.temperature = 0.0; // for deterministic results let fullResponse = ''; - await audioClient.transcribeStreaming(AUDIO_FILE_PATH, (chunk) => { + for await (const chunk of audioClient.transcribeStreaming(AUDIO_FILE_PATH)) { expect(chunk).to.not.be.undefined; expect(chunk.text).to.not.be.undefined; expect(chunk.text).to.be.a('string'); expect(chunk.text.length).to.be.greaterThan(0); fullResponse += chunk.text; - }); + } console.log(`Full response: ${fullResponse}`); expect(fullResponse).to.equal(EXPECTED_TEXT); @@ -190,27 +190,12 @@ describe('Audio Client Tests', () => { const audioClient = model.createAudioClient(); try { - await audioClient.transcribeStreaming('', () => {}); + // transcribeStreaming validates synchronously before returning the AsyncIterable + audioClient.transcribeStreaming(''); expect.fail('Should have thrown an error for empty audio file path'); } catch (error) { expect(error).to.be.instanceOf(Error); expect((error as Error).message).to.include('Audio file path must be a non-empty string'); } }); - - it('should throw when transcribing streaming with invalid callback', async function() { - const manager = getTestManager(); - const catalog = manager.catalog; - const model = await catalog.getModel(WHISPER_MODEL_ALIAS); - const audioClient = model.createAudioClient(); - const invalidCallbacks: any[] = [null, undefined, 42, {}, 'not-a-function']; - for (const invalidCallback of invalidCallbacks) { - try { - await audioClient.transcribeStreaming(AUDIO_FILE_PATH, invalidCallback as any); - expect.fail('Should have thrown an error for invalid callback'); - } catch (error) { - expect(error).to.be.instanceOf(Error); - } - } - }); }); \ No newline at end of file diff --git a/sdk/js/test/openai/chatClient.test.ts b/sdk/js/test/openai/chatClient.test.ts index 5f612845..7be190ce 100644 --- a/sdk/js/test/openai/chatClient.test.ts +++ b/sdk/js/test/openai/chatClient.test.ts @@ -81,13 +81,13 @@ describe('Chat Client Tests', () => { let fullContent = ''; let chunkCount = 0; - await client.completeStreamingChat(messages, (chunk: any) => { + for await (const chunk of client.completeStreamingChat(messages)) { chunkCount++; const content = chunk.choices?.[0]?.delta?.content; if (content) { fullContent += content; } - }); + } expect(chunkCount).to.be.greaterThan(0); expect(fullContent).to.be.a('string'); @@ -102,13 +102,13 @@ describe('Chat Client Tests', () => { fullContent = ''; chunkCount = 0; - await client.completeStreamingChat(messages, (chunk: any) => { + for await (const chunk of client.completeStreamingChat(messages)) { chunkCount++; const content = chunk.choices?.[0]?.delta?.content; if (content) { fullContent += content; } - }); + } expect(chunkCount).to.be.greaterThan(0); expect(fullContent).to.be.a('string'); @@ -172,7 +172,8 @@ describe('Chat Client Tests', () => { const invalidMessages: any[] = [[], null, undefined]; for (const invalidMessage of invalidMessages) { try { - await client.completeStreamingChat(invalidMessage, () => {}); + // completeStreamingChat validates synchronously before returning the AsyncIterable + client.completeStreamingChat(invalidMessage); expect.fail(`Should have thrown an error for ${Array.isArray(invalidMessage) ? 'empty' : invalidMessage} messages`); } catch (error) { expect(error).to.be.instanceOf(Error); @@ -181,23 +182,6 @@ describe('Chat Client Tests', () => { } }); - it('should throw when completing streaming chat with invalid callback', async function() { - const manager = getTestManager(); - const catalog = manager.catalog; - const model = await catalog.getModel(TEST_MODEL_ALIAS); - const client = model.createChatClient(); - const messages = [{ role: 'user', content: 'Hello' }]; - const invalidCallbacks: any[] = [null, undefined, {} as any, 'not a function' as any]; - for (const invalidCallback of invalidCallbacks) { - try { - await client.completeStreamingChat(messages as any, invalidCallback as any); - expect.fail('Should have thrown an error for invalid callback'); - } catch (error) { - expect(error).to.be.instanceOf(Error); - } - } - }); - it('should perform tool calling chat completion (non-streaming)', async function() { this.timeout(20000); const manager = getTestManager(); @@ -305,7 +289,7 @@ describe('Chat Client Tests', () => { let lastToolCallChunk: any = null; // Check that each response chunk contains the expected information - await client.completeStreamingChat(messages, tools, (chunk: any) => { + for await (const chunk of client.completeStreamingChat(messages, tools)) { const content = chunk.choices?.[0]?.message?.content ?? chunk.choices?.[0]?.delta?.content; if (content) { fullResponse += content; @@ -314,7 +298,7 @@ describe('Chat Client Tests', () => { if (toolCalls && toolCalls.length > 0) { lastToolCallChunk = chunk; } - }); + } expect(fullResponse).to.be.a('string').and.not.equal(''); expect(lastToolCallChunk).to.not.be.null; @@ -341,12 +325,12 @@ describe('Chat Client Tests', () => { // Run the next turn of the conversation fullResponse = ''; - await client.completeStreamingChat(messages, tools, (chunk: any) => { + for await (const chunk of client.completeStreamingChat(messages, tools)) { const content = chunk.choices?.[0]?.message?.content ?? chunk.choices?.[0]?.delta?.content; if (content) { fullResponse += content; } - }); + } // Check that the conversation continued expect(fullResponse).to.be.a('string').and.not.equal(''); From 0d7bab5d064c57c4358d5eeb46c22a1cdc56ac7d Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Fri, 27 Mar 2026 01:24:44 -0400 Subject: [PATCH 04/83] separates js sdk into foundry-local-sdk and foundry-local-sdk-winml packages (#555) no longer need `npm install --winml` as `npm install` with the separate packages will fetch the appropriate binaries --------- Co-authored-by: Prathik Rao Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .github/workflows/build-js-steps.yml | 26 +- sdk/js/docs/README.md | 2 +- sdk/js/package.json | 12 +- sdk/js/script/install-standard.cjs | 26 ++ sdk/js/script/install-utils.cjs | 193 +++++++++++++++ sdk/js/script/install-winml.cjs | 25 ++ sdk/js/script/install.cjs | 357 --------------------------- sdk/js/script/pack.cjs | 32 +++ 8 files changed, 293 insertions(+), 380 deletions(-) create mode 100644 sdk/js/script/install-standard.cjs create mode 100644 sdk/js/script/install-utils.cjs create mode 100644 sdk/js/script/install-winml.cjs delete mode 100644 sdk/js/script/install.cjs create mode 100644 sdk/js/script/pack.cjs diff --git a/.github/workflows/build-js-steps.yml b/.github/workflows/build-js-steps.yml index d7a568a3..55f3ebf8 100644 --- a/.github/workflows/build-js-steps.yml +++ b/.github/workflows/build-js-steps.yml @@ -92,13 +92,7 @@ jobs: run: | if (Test-Path .npmrc) { Remove-Item .npmrc -Force; Write-Host "Removed .npmrc" } - - name: npm install (WinML) - if: ${{ inputs.useWinML == true }} - working-directory: sdk/js - run: npm install --winml - - - name: npm install (Standard) - if: ${{ inputs.useWinML == false }} + - name: npm install working-directory: sdk/js run: npm install @@ -114,21 +108,15 @@ jobs: working-directory: sdk/js run: npm run build - - name: Pack npm package + - name: Pack npm package (WinML) + if: ${{ inputs.useWinML == true }} working-directory: sdk/js - run: npm pack + run: npm run pack:winml - - name: Rename WinML artifact - if: ${{ inputs.useWinML == true }} - shell: pwsh + - name: Pack npm package (Standard) + if: ${{ inputs.useWinML == false }} working-directory: sdk/js - run: | - $tgz = Get-ChildItem *.tgz | Select-Object -First 1 - if ($tgz) { - $newName = $tgz.Name -replace '^foundry-local-sdk-', 'foundry-local-sdk-winml-' - Rename-Item -Path $tgz.FullName -NewName $newName - Write-Host "Renamed $($tgz.Name) to $newName" - } + run: npm run pack - name: Upload npm packages uses: actions/upload-artifact@v4 diff --git a/sdk/js/docs/README.md b/sdk/js/docs/README.md index 58218628..dd483aa4 100644 --- a/sdk/js/docs/README.md +++ b/sdk/js/docs/README.md @@ -1,4 +1,4 @@ -# @prathikrao/foundry-local-sdk +# foundry-local-sdk ## Enumerations diff --git a/sdk/js/package.json b/sdk/js/package.json index 46ae6ce5..5830e3fe 100644 --- a/sdk/js/package.json +++ b/sdk/js/package.json @@ -7,13 +7,19 @@ "type": "module", "files": [ "dist", - "script" + "script/install-standard.cjs", + "script/install-winml.cjs", + "script/install-utils.cjs", + "script/pack.cjs", + "script/preinstall.cjs" ], "scripts": { "build": "tsc -p tsconfig.build.json", "docs": "typedoc", "example": "tsx examples/chat-completion.ts", - "install": "node script/install.cjs", + "install": "node script/install-standard.cjs", + "pack": "node script/pack.cjs", + "pack:winml": "node script/pack.cjs winml", "preinstall": "node script/preinstall.cjs", "test": "mocha --import=tsx test/**/*.test.ts" }, @@ -45,4 +51,4 @@ }, "author": "", "license": "ISC" -} +} \ No newline at end of file diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs new file mode 100644 index 00000000..319a33d1 --- /dev/null +++ b/sdk/js/script/install-standard.cjs @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Install script for foundry-local-sdk (standard variant). + +'use strict'; + +const os = require('os'); +const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); + +const useNightly = process.env.npm_config_nightly === 'true'; + +const ARTIFACTS = [ + { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0.8-rc3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, + { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.24.3', feed: NUGET_FEED, nightly: false }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.12.2', feed: NUGET_FEED, nightly: false }, +]; + +(async () => { + try { + await runInstall(ARTIFACTS); + } catch (err) { + console.error('[foundry-local] Installation failed:', err instanceof Error ? err.message : err); + process.exit(1); + } +})(); diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs new file mode 100644 index 00000000..f9a5186c --- /dev/null +++ b/sdk/js/script/install-utils.cjs @@ -0,0 +1,193 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Shared NuGet download and extraction utilities for install scripts. + +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const os = require('os'); +const https = require('https'); +const AdmZip = require('adm-zip'); + +const PLATFORM_MAP = { + 'win32-x64': 'win-x64', + 'win32-arm64': 'win-arm64', + 'linux-x64': 'linux-x64', + 'darwin-arm64': 'osx-arm64', +}; +const platformKey = `${os.platform()}-${os.arch()}`; +const RID = PLATFORM_MAP[platformKey]; +const BIN_DIR = path.join(__dirname, '..', 'packages', '@foundry-local-core', platformKey); +const EXT = os.platform() === 'win32' ? '.dll' : os.platform() === 'darwin' ? '.dylib' : '.so'; + +const REQUIRED_FILES = [ + `Microsoft.AI.Foundry.Local.Core${EXT}`, + `${os.platform() === 'win32' ? '' : 'lib'}onnxruntime${EXT}`, + `${os.platform() === 'win32' ? '' : 'lib'}onnxruntime-genai${EXT}`, +]; + +const NUGET_FEED = 'https://api.nuget.org/v3/index.json'; +const ORT_NIGHTLY_FEED = 'https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json'; + +// --- Download helpers --- + +async function downloadWithRetryAndRedirects(url, destStream = null) { + const maxRedirects = 5; + let currentUrl = url; + let redirects = 0; + + while (redirects < maxRedirects) { + const response = await new Promise((resolve, reject) => { + https.get(currentUrl, (res) => resolve(res)) + .on('error', reject); + }); + + if (response.statusCode >= 300 && response.statusCode < 400 && response.headers.location) { + currentUrl = response.headers.location; + response.resume(); + redirects++; + console.log(` Following redirect to ${new URL(currentUrl).host}...`); + continue; + } + + if (response.statusCode !== 200) { + throw new Error(`Download failed with status ${response.statusCode}: ${currentUrl}`); + } + + if (destStream) { + response.pipe(destStream); + return new Promise((resolve, reject) => { + destStream.on('finish', resolve); + destStream.on('error', reject); + response.on('error', reject); + }); + } else { + let data = ''; + response.on('data', chunk => data += chunk); + return new Promise((resolve, reject) => { + response.on('end', () => resolve(data)); + response.on('error', reject); + }); + } + } + throw new Error('Too many redirects'); +} + +async function downloadJson(url) { + return JSON.parse(await downloadWithRetryAndRedirects(url)); +} + +async function downloadFile(url, dest) { + const file = fs.createWriteStream(dest); + try { + await downloadWithRetryAndRedirects(url, file); + file.close(); + } catch (e) { + file.close(); + if (fs.existsSync(dest)) fs.unlinkSync(dest); + throw e; + } +} + +const serviceIndexCache = new Map(); + +async function getBaseAddress(feedUrl) { + if (!serviceIndexCache.has(feedUrl)) { + serviceIndexCache.set(feedUrl, await downloadJson(feedUrl)); + } + const resources = serviceIndexCache.get(feedUrl).resources || []; + const res = resources.find(r => r['@type'] && r['@type'].startsWith('PackageBaseAddress/3.0.0')); + if (!res) throw new Error('Could not find PackageBaseAddress/3.0.0 in NuGet feed.'); + const baseAddress = res['@id']; + return baseAddress.endsWith('/') ? baseAddress : baseAddress + '/'; +} + +async function resolveLatestVersion(feedUrl, packageName) { + const baseAddress = await getBaseAddress(feedUrl); + const versionsUrl = `${baseAddress}${packageName.toLowerCase()}/index.json`; + const versionData = await downloadJson(versionsUrl); + const versions = versionData.versions || []; + if (versions.length === 0) throw new Error(`No versions found for ${packageName}`); + versions.sort((a, b) => b.localeCompare(a)); + console.log(`[foundry-local] Latest version of ${packageName}: ${versions[0]}`); + return versions[0]; +} + +async function installPackage(artifact, tempDir) { + const pkgName = artifact.name; + let pkgVer = artifact.version; + if (artifact.nightly) { + console.log(` Resolving latest version for ${pkgName}...`); + pkgVer = await resolveLatestVersion(artifact.feed, pkgName); + } + + const baseAddress = await getBaseAddress(artifact.feed); + const nameLower = pkgName.toLowerCase(); + const verLower = pkgVer.toLowerCase(); + const downloadUrl = `${baseAddress}${nameLower}/${verLower}/${nameLower}.${verLower}.nupkg`; + + const nupkgPath = path.join(tempDir, `${pkgName}.${pkgVer}.nupkg`); + console.log(` Downloading ${pkgName} ${pkgVer}...`); + await downloadFile(downloadUrl, nupkgPath); + + console.log(` Extracting...`); + const zip = new AdmZip(nupkgPath); + const targetPathPrefix = `runtimes/${RID}/native/`.toLowerCase(); + const entries = zip.getEntries().filter(e => { + const p = e.entryName.toLowerCase(); + return p.includes(targetPathPrefix) && p.endsWith(EXT); + }); + + if (entries.length > 0) { + entries.forEach(entry => { + zip.extractEntryTo(entry, BIN_DIR, false, true); + console.log(` Extracted ${entry.name}`); + }); + } else { + console.warn(` No files found for RID ${RID} in ${pkgName}.`); + } + + // Update platform package.json version for Core packages + if (pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { + const pkgJsonPath = path.join(BIN_DIR, 'package.json'); + if (fs.existsSync(pkgJsonPath)) { + const pkgJson = JSON.parse(fs.readFileSync(pkgJsonPath, 'utf8')); + pkgJson.version = pkgVer; + fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgJson, null, 2)); + } + } +} + +async function runInstall(artifacts) { + if (!RID) { + console.warn(`[foundry-local] Unsupported platform: ${platformKey}. Skipping.`); + return; + } + + if (fs.existsSync(BIN_DIR) && REQUIRED_FILES.every(f => fs.existsSync(path.join(BIN_DIR, f)))) { + if (process.env.npm_config_nightly === 'true') { + console.log(`[foundry-local] Nightly requested. Forcing reinstall...`); + fs.rmSync(BIN_DIR, { recursive: true, force: true }); + } else { + console.log(`[foundry-local] Native libraries already installed.`); + return; + } + } + + console.log(`[foundry-local] Installing native libraries for ${RID}...`); + fs.mkdirSync(BIN_DIR, { recursive: true }); + + const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'foundry-install-')); + try { + for (const artifact of artifacts) { + await installPackage(artifact, tempDir); + } + console.log('[foundry-local] Installation complete.'); + } finally { + try { fs.rmSync(tempDir, { recursive: true, force: true }); } catch {} + } +} + +module.exports = { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall }; diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs new file mode 100644 index 00000000..b46770ca --- /dev/null +++ b/sdk/js/script/install-winml.cjs @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Install script for foundry-local-sdk-winml variant. + +'use strict'; + +const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); + +const useNightly = process.env.npm_config_nightly === 'true'; + +const ARTIFACTS = [ + { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0.8-rc3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, + { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED, nightly: false }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.WinML', version: '0.12.2', feed: NUGET_FEED, nightly: false }, +]; + +(async () => { + try { + await runInstall(ARTIFACTS); + } catch (err) { + console.error('Failed to install WinML artifacts:', err); + process.exit(1); + } +})(); diff --git a/sdk/js/script/install.cjs b/sdk/js/script/install.cjs deleted file mode 100644 index cdf5531d..00000000 --- a/sdk/js/script/install.cjs +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -// Adapted from onnxruntime\js\node\script\install-utils.js -// The file in packages/ are the original source of truth that we are downloading and "installing" into our project's source tree. -// The file in node_modules/... is a symlink created by NPM to mark them as dependencies of the overall package. - -'use strict'; - -const fs = require('fs'); -const path = require('path'); -const os = require('os'); -const https = require('https'); -const AdmZip = require('adm-zip'); - -// Determine platform -const PLATFORM_MAP = { - 'win32-x64': 'win-x64', - 'win32-arm64': 'win-arm64', - 'linux-x64': 'linux-x64', - 'darwin-arm64': 'osx-arm64', -}; -const platformKey = `${os.platform()}-${os.arch()}`; -const RID = PLATFORM_MAP[platformKey]; - -if (!RID) { - console.warn(`[foundry-local] Unsupported platform: ${platformKey}. Skipping native library installation.`); - process.exit(0); -} - -// Write to the source 'packages' directory so binaries persist and link correctly via package.json -const BIN_DIR = path.join(__dirname, '..', 'packages', '@foundry-local-core', platformKey); -const REQUIRED_FILES = [ - 'Microsoft.AI.Foundry.Local.Core.dll', - 'onnxruntime.dll', - 'onnxruntime-genai.dll', -].map(f => f.replace('.dll', os.platform() === 'win32' ? '.dll' : os.platform() === 'darwin' ? '.dylib' : '.so')); - -// When you run npm install --winml, npm does not pass --winml as a command-line argument to your script. -// Instead, it sets an environment variable named npm_config_winml to 'true'. -const useWinML = process.env.npm_config_winml === 'true'; -const useNightly = process.env.npm_config_nightly === 'true'; -const noDeps = process.env.npm_config_nodeps === 'true'; - -console.log(`[foundry-local] WinML enabled: ${useWinML}`); -console.log(`[foundry-local] Nightly enabled: ${useNightly}`); - -const NUGET_FEED = 'https://api.nuget.org/v3/index.json'; -const ORT_FEED = 'https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT/nuget/v3/index.json'; -const ORT_NIGHTLY_FEED = 'https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json'; - -// If nightly is requested, pull Core/GenAI from the ORT-Nightly feed where nightly builds are published. -// Otherwise use the standard NuGet.org feed. -const CORE_FEED = useNightly ? ORT_NIGHTLY_FEED : NUGET_FEED; - -const FOUNDRY_LOCAL_CORE_ARTIFACT = { - name: 'Microsoft.AI.Foundry.Local.Core', - version: '0.9.0.8-rc3', - feed: ORT_NIGHTLY_FEED, - nightly: useNightly -} - -const FOUNDRY_LOCAL_CORE_WINML_ARTIFACT = { - name: 'Microsoft.AI.Foundry.Local.Core.WinML', - version: '0.9.0.8-rc3', - feed: ORT_NIGHTLY_FEED, - nightly: useNightly -} - -const ONNX_RUNTIME_FOUNDRY_ARTIFACT = { - name: 'Microsoft.ML.OnnxRuntime.Foundry', - version: '1.24.3', - feed: NUGET_FEED, - nightly: false -} - -const ONNX_RUNTIME_WINML_ARTIFACT = { - name: 'Microsoft.ML.OnnxRuntime.Foundry', - version: '1.23.2.3', - feed: NUGET_FEED, - nightly: false -} - -const ONNX_RUNTIME_LINUX_ARTIFACT = { - name: 'Microsoft.ML.OnnxRuntime.Gpu.Linux', - version: '1.24.3', - feed: NUGET_FEED, - nightly: false -} - -const ONNX_RUNTIME_GENAI_FOUNDRY_ARTIFACT = { - name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', - version: '0.12.2', - feed: NUGET_FEED, - nightly: false -} - -const ONNX_RUNTIME_GENAI_WINML_ARTIFACT = { - name: 'Microsoft.ML.OnnxRuntimeGenAI.WinML', - version: '0.12.2', - feed: NUGET_FEED, - nightly: false -} - -const WINML_ARTIFACTS = [ - FOUNDRY_LOCAL_CORE_WINML_ARTIFACT, - ONNX_RUNTIME_WINML_ARTIFACT, - ONNX_RUNTIME_GENAI_WINML_ARTIFACT -]; - -const NON_WINML_ARTIFACTS = [ - FOUNDRY_LOCAL_CORE_ARTIFACT, - ONNX_RUNTIME_FOUNDRY_ARTIFACT, - ONNX_RUNTIME_GENAI_FOUNDRY_ARTIFACT -]; - -const LINUX_ARTIFACTS = [ - FOUNDRY_LOCAL_CORE_ARTIFACT, - ONNX_RUNTIME_LINUX_ARTIFACT, - ONNX_RUNTIME_GENAI_FOUNDRY_ARTIFACT -]; - -let ARTIFACTS = []; -if (noDeps) { - console.log(`[foundry-local] Skipping dependencies install...`); - ARTIFACTS = []; -} else if (useWinML) { - console.log(`[foundry-local] Using WinML artifacts...`); - ARTIFACTS = WINML_ARTIFACTS; -} else if (os.platform() === 'linux') { - console.log(`[foundry-local] Using Linux GPU artifacts...`); - ARTIFACTS = LINUX_ARTIFACTS; -} else { - console.log(`[foundry-local] Using standard artifacts...`); - ARTIFACTS = NON_WINML_ARTIFACTS; -} - -// Check if already installed -if (fs.existsSync(BIN_DIR) && REQUIRED_FILES.every(f => fs.existsSync(path.join(BIN_DIR, f)))) { - if (useNightly) { - console.log(`[foundry-local] Nightly requested. Forcing reinstall...`); - fs.rmSync(BIN_DIR, { recursive: true, force: true }); - } else { - console.log(`[foundry-local] Native libraries already installed.`); - process.exit(0); - } -} - -console.log(`[foundry-local] Installing native libraries for ${RID}...`); -fs.mkdirSync(BIN_DIR, { recursive: true }); - -async function downloadWithRetryAndRedirects(url, destStream = null) { - const maxRedirects = 5; - let currentUrl = url; - let redirects = 0; - - while (redirects < maxRedirects) { - const response = await new Promise((resolve, reject) => { - https.get(currentUrl, (res) => resolve(res)) - .on('error', reject); - }); - - // When you request a file from api.nuget.org, it rarely serves the file directly. - // Instead, it usually responds with a 302 Found or 307 Temporary Redirect pointing to a Content Delivery Network (CDN) - // or a specific Storage Account where the actual file lives. Node.js treats a redirect as a completed request so we - // need to explicitly handle it here. - if (response.statusCode >= 300 && response.statusCode < 400 && response.headers.location) { - currentUrl = response.headers.location; - response.resume(); // Consume/discard response data to free up socket - redirects++; - console.log(` Following redirect to ${new URL(currentUrl).host}...`); - continue; - } - - if (response.statusCode !== 200) { - throw new Error(`Download failed with status ${response.statusCode}: ${currentUrl}`); - } - - // destStream is null when the function is used to download JSON data (like NuGet feed index or package metadata) rather than a file - if (destStream) { - response.pipe(destStream); - return new Promise((resolve, reject) => { - destStream.on('finish', resolve); - destStream.on('error', reject); - response.on('error', reject); - }); - } else { - let data = ''; - response.on('data', chunk => data += chunk); - return new Promise((resolve, reject) => { - response.on('end', () => resolve(data)); - response.on('error', reject); - }); - } - } - throw new Error('Too many redirects'); -} - -async function downloadJson(url) { - const data = await downloadWithRetryAndRedirects(url); - return JSON.parse(data); -} - -async function downloadFile(url, dest) { - const file = fs.createWriteStream(dest); - try { - await downloadWithRetryAndRedirects(url, file); - file.close(); - } catch (e) { - file.close(); - if (fs.existsSync(dest)) fs.unlinkSync(dest); - throw e; - } -} - - -// Map to cache service index resources -const serviceIndexCache = new Map(); - -async function getBaseAddress(feedUrl) { - // 1. Get Service Index - if (!serviceIndexCache.has(feedUrl)) { - const index = await downloadJson(feedUrl); - serviceIndexCache.set(feedUrl, index); - } - - const serviceIndex = serviceIndexCache.get(feedUrl); - - // 2. Find PackageBaseAddress/3.0.0 - const resources = serviceIndex.resources || []; - const baseAddressRes = resources.find(r => r['@type'] && r['@type'].startsWith('PackageBaseAddress/3.0.0')); - - if (!baseAddressRes) { - throw new Error('Could not find PackageBaseAddress/3.0.0 in NuGet feed.'); - } - - const baseAddress = baseAddressRes['@id']; - // Ensure trailing slash - return baseAddress.endsWith('/') ? baseAddress : baseAddress + '/'; -} - -async function resolveLatestVersion(feedUrl, packageName) { - const baseAddress = await getBaseAddress(feedUrl); - const nameLower = packageName.toLowerCase(); - - // Fetch version list: {baseAddress}/{lower_id}/index.json - const versionsUrl = `${baseAddress}${nameLower}/index.json`; - try { - const versionData = await downloadJson(versionsUrl); - const versions = versionData.versions || []; - - if (versions.length === 0) { - throw new Error('No versions found'); - } - - // Sort descending to prioritize latest date-based versions (e.g. 0.9.0-dev.YYYYMMDD...) - versions.sort((a, b) => b.localeCompare(a)); - - const latestVersion = versions[0]; - console.log(`[foundry-local] Installing latest version of Foundry Local Core: ${latestVersion}`); - return latestVersion; - } catch (e) { - throw new Error(`Failed to fetch versions for ${packageName} from ${versionsUrl}: ${e.message}`); - } -} - -async function resolvePackageRawUrl(feedUrl, packageName, version) { - const properBase = await getBaseAddress(feedUrl); - - // 3. Construct .nupkg URL (lowercase is standard for V3) - const nameLower = packageName.toLowerCase(); - const verLower = version.toLowerCase(); - - return `${properBase}${nameLower}/${verLower}/${nameLower}.${verLower}.nupkg`; -} - -async function installPackage(artifact, tempDir) { - const pkgName = artifact.name; - const feedUrl = artifact.feed; - - // Resolve version if not specified - let pkgVer = artifact.version; - let isNightly = artifact.nightly; - if (isNightly) { - console.log(` Resolving latest version for ${pkgName}...`); - pkgVer = await resolveLatestVersion(feedUrl, pkgName); - } - - console.log(` Resolving ${pkgName} ${pkgVer}...`); - const downloadUrl = await resolvePackageRawUrl(feedUrl, pkgName, pkgVer); - - const nupkgPath = path.join(tempDir, `${pkgName}.${pkgVer}.nupkg`); - - console.log(` Downloading ${downloadUrl}...`); - await downloadFile(downloadUrl, nupkgPath); - - console.log(` Extracting...`); - const zip = new AdmZip(nupkgPath); - const zipEntries = zip.getEntries(); - - // Pattern: runtimes/{RID}/native/{file}.{ext} - const ext = os.platform() === 'win32' ? '.dll' : os.platform() === 'darwin' ? '.dylib' : '.so'; - const targetPathPrefix = `runtimes/${RID}/native/`.toLowerCase(); - - let found = false; - - console.log(` Scanning for all ${ext} files in ${targetPathPrefix}...`); - const entries = zipEntries.filter(e => { - const entryPathLower = e.entryName.toLowerCase(); - return entryPathLower.includes(targetPathPrefix) && entryPathLower.endsWith(ext); - }); - - if (entries.length > 0) { - entries.forEach(entry => { - console.log(` Found ${entry.entryName}`); - zip.extractEntryTo(entry, BIN_DIR, false, true); - console.log(` Extracted ${entry.name}`); - }); - found = true; - } else { - console.warn(` ⚠ No files found for RID ${RID} in package.`); - } - - // After extracting, update the packages/@foundry-local-core/RID/package.json version to match the downloaded artifact - if (found && pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { - const pkgJsonPath = path.join(BIN_DIR, 'package.json'); - try { - if (fs.existsSync(pkgJsonPath)) { - const pkgJson = JSON.parse(fs.readFileSync(pkgJsonPath, 'utf8')); - pkgJson.version = pkgVer; - fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgJson, null, 2)); - console.log(` Updated package.json version to ${pkgVer}`); - } - } catch (e) { - console.warn(` Failed to update package.json version: ${e.message}`); - } - } -} - -async function main() { - const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'foundry-install-')); - try { - for (const artifact of ARTIFACTS) { - await installPackage(artifact, tempDir); - } - console.log('[foundry-local] ✓ Installation complete.'); - } catch (e) { - console.error(`[foundry-local] Installation failed: ${e.message}`); - process.exit(1); - } finally { - try { - fs.rmSync(tempDir, { recursive: true, force: true }); - } catch {} - } -} - -main(); diff --git a/sdk/js/script/pack.cjs b/sdk/js/script/pack.cjs new file mode 100644 index 00000000..32057c7e --- /dev/null +++ b/sdk/js/script/pack.cjs @@ -0,0 +1,32 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Usage: +// node script/pack.cjs -> foundry-local-sdk-.tgz +// node script/pack.cjs winml -> foundry-local-sdk-winml-.tgz + +'use strict'; + +const fs = require('fs'); +const path = require('path'); +const { execSync } = require('child_process'); + +const pkgPath = path.join(__dirname, '..', 'package.json'); +const original = fs.readFileSync(pkgPath, 'utf8'); +const isWinML = process.argv[2] === 'winml'; + +try { + const pkg = JSON.parse(original); + if (isWinML) { + pkg.name = 'foundry-local-sdk-winml'; + pkg.scripts.install = 'node script/install-winml.cjs'; + pkg.files = ['dist', 'script/install-winml.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs']; + } else { + pkg.files = ['dist', 'script/install-standard.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs']; + } + fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2)); + execSync('npm pack', { cwd: path.join(__dirname, '..'), stdio: 'inherit' }); +} finally { + // Always restore original package.json + fs.writeFileSync(pkgPath, original); +} From 1c45c90173d5b0c44931d7def11355e702b162f1 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Fri, 27 Mar 2026 11:47:54 -0400 Subject: [PATCH 05/83] implements python sdk (#533) mvp --------- Co-authored-by: Prathik Rao Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> --- .github/workflows/build-python-steps.yml | 110 +++++++ .github/workflows/foundry-local-sdk-build.yml | 19 ++ sdk/python/.gitignore | 20 ++ sdk/python/LICENSE.txt | 21 ++ sdk/python/README.md | 243 ++++++++++++++ sdk/python/build_backend.py | 157 +++++++++ sdk/python/examples/chat_completion.py | 83 +++++ sdk/python/pyproject.toml | 55 ++++ sdk/python/requirements-dev.txt | 5 + sdk/python/requirements-winml.txt | 7 + sdk/python/requirements.txt | 7 + sdk/python/src/__init__.py | 23 ++ sdk/python/src/catalog.py | 144 +++++++++ sdk/python/src/configuration.py | 163 ++++++++++ sdk/python/src/detail/__init__.py | 25 ++ sdk/python/src/detail/core_interop.py | 306 ++++++++++++++++++ sdk/python/src/detail/model_data_types.py | 76 +++++ sdk/python/src/detail/model_load_manager.py | 166 ++++++++++ sdk/python/src/detail/utils.py | 294 +++++++++++++++++ sdk/python/src/exception.py | 7 + sdk/python/src/foundry_local_manager.py | 118 +++++++ sdk/python/src/imodel.py | 91 ++++++ sdk/python/src/logging_helper.py | 30 ++ sdk/python/src/model.py | 133 ++++++++ sdk/python/src/model_variant.py | 130 ++++++++ sdk/python/src/openai/__init__.py | 10 + sdk/python/src/openai/audio_client.py | 153 +++++++++ sdk/python/src/openai/chat_client.py | 290 +++++++++++++++++ sdk/python/src/version.py | 6 + sdk/python/test/README.md | 79 +++++ sdk/python/test/__init__.py | 0 sdk/python/test/conftest.py | 145 +++++++++ sdk/python/test/detail/__init__.py | 0 .../test/detail/test_model_load_manager.py | 144 +++++++++ sdk/python/test/openai/__init__.py | 0 sdk/python/test/openai/test_audio_client.py | 156 +++++++++ sdk/python/test/openai/test_chat_client.py | 243 ++++++++++++++ sdk/python/test/test_catalog.py | 74 +++++ sdk/python/test/test_foundry_local_manager.py | 22 ++ sdk/python/test/test_model.py | 58 ++++ 40 files changed, 3813 insertions(+) create mode 100644 .github/workflows/build-python-steps.yml create mode 100644 sdk/python/.gitignore create mode 100644 sdk/python/LICENSE.txt create mode 100644 sdk/python/README.md create mode 100644 sdk/python/build_backend.py create mode 100644 sdk/python/examples/chat_completion.py create mode 100644 sdk/python/pyproject.toml create mode 100644 sdk/python/requirements-dev.txt create mode 100644 sdk/python/requirements-winml.txt create mode 100644 sdk/python/requirements.txt create mode 100644 sdk/python/src/__init__.py create mode 100644 sdk/python/src/catalog.py create mode 100644 sdk/python/src/configuration.py create mode 100644 sdk/python/src/detail/__init__.py create mode 100644 sdk/python/src/detail/core_interop.py create mode 100644 sdk/python/src/detail/model_data_types.py create mode 100644 sdk/python/src/detail/model_load_manager.py create mode 100644 sdk/python/src/detail/utils.py create mode 100644 sdk/python/src/exception.py create mode 100644 sdk/python/src/foundry_local_manager.py create mode 100644 sdk/python/src/imodel.py create mode 100644 sdk/python/src/logging_helper.py create mode 100644 sdk/python/src/model.py create mode 100644 sdk/python/src/model_variant.py create mode 100644 sdk/python/src/openai/__init__.py create mode 100644 sdk/python/src/openai/audio_client.py create mode 100644 sdk/python/src/openai/chat_client.py create mode 100644 sdk/python/src/version.py create mode 100644 sdk/python/test/README.md create mode 100644 sdk/python/test/__init__.py create mode 100644 sdk/python/test/conftest.py create mode 100644 sdk/python/test/detail/__init__.py create mode 100644 sdk/python/test/detail/test_model_load_manager.py create mode 100644 sdk/python/test/openai/__init__.py create mode 100644 sdk/python/test/openai/test_audio_client.py create mode 100644 sdk/python/test/openai/test_chat_client.py create mode 100644 sdk/python/test/test_catalog.py create mode 100644 sdk/python/test/test_foundry_local_manager.py create mode 100644 sdk/python/test/test_model.py diff --git a/.github/workflows/build-python-steps.yml b/.github/workflows/build-python-steps.yml new file mode 100644 index 00000000..dc180bb4 --- /dev/null +++ b/.github/workflows/build-python-steps.yml @@ -0,0 +1,110 @@ +name: Build Python SDK + +on: + workflow_call: + inputs: + version: + required: true + type: string + useWinML: + required: false + type: boolean + default: false + platform: + required: false + type: string + default: 'windows' + +permissions: + contents: read + +jobs: + build: + runs-on: ${{ inputs.platform }}-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + clean: true + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + # Clone test-data-shared from Azure DevOps (models for integration tests) + - name: Checkout test-data-shared from Azure DevOps + shell: pwsh + working-directory: ${{ github.workspace }}/.. + run: | + $pat = "${{ secrets.AZURE_DEVOPS_PAT }}" + $encodedPat = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes(":$pat")) + + git config --global http.https://dev.azure.com.extraheader "AUTHORIZATION: Basic $encodedPat" + + git lfs install + git clone --depth 1 https://dev.azure.com/microsoft/windows.ai.toolkit/_git/test-data-shared test-data-shared + + Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" + + - name: Checkout specific commit in test-data-shared + shell: pwsh + working-directory: ${{ github.workspace }}/../test-data-shared + run: | + git checkout 231f820fe285145b7ea4a449b112c1228ce66a41 + if ($LASTEXITCODE -ne 0) { + Write-Error "Git checkout failed." + exit 1 + } + + - name: Install build tool + run: | + python -m pip install build + + - name: Configure pip for Azure Artifacts + run: | + pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.extra-index-url https://pypi.org/simple/ + pip config set global.pre true + + - name: Set package version + working-directory: sdk/python + run: echo '__version__ = "${{ inputs.version }}"' > src/version.py + + - name: Build wheel (Cross-Platform) + if: ${{ inputs.useWinML == false }} + working-directory: sdk/python + run: python -m build --wheel --outdir dist/ + + - name: Build wheel (WinML) + if: ${{ inputs.useWinML == true }} + working-directory: sdk/python + run: python -m build --wheel -C winml=true --outdir dist/ + + - name: Install built wheel + working-directory: sdk/python + shell: pwsh + run: | + $wheel = (Get-ChildItem dist/*.whl | Select-Object -First 1).FullName + pip install $wheel + + - name: Install test dependencies + run: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 + + - name: Run tests + working-directory: sdk/python + run: python -m pytest test/ -v + + - name: Upload Python packages + uses: actions/upload-artifact@v4 + with: + name: python-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }} + path: sdk/python/dist/* + + - name: Upload flcore logs + uses: actions/upload-artifact@v4 + if: always() + with: + name: python-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }}-logs + path: sdk/python/logs/** diff --git a/.github/workflows/foundry-local-sdk-build.yml b/.github/workflows/foundry-local-sdk-build.yml index 9ac5fe04..13eddf6d 100644 --- a/.github/workflows/foundry-local-sdk-build.yml +++ b/.github/workflows/foundry-local-sdk-build.yml @@ -29,6 +29,12 @@ jobs: version: '0.9.0.${{ github.run_number }}' platform: 'windows' secrets: inherit + build-python-windows: + uses: ./.github/workflows/build-python-steps.yml + with: + version: '0.9.0.${{ github.run_number }}' + platform: 'windows' + secrets: inherit build-rust-windows: uses: ./.github/workflows/build-rust-steps.yml with: @@ -50,6 +56,13 @@ jobs: platform: 'windows' useWinML: true secrets: inherit + build-python-windows-WinML: + uses: ./.github/workflows/build-python-steps.yml + with: + version: '0.9.0.${{ github.run_number }}' + platform: 'windows' + useWinML: true + secrets: inherit build-rust-windows-WinML: uses: ./.github/workflows/build-rust-steps.yml with: @@ -70,6 +83,12 @@ jobs: version: '0.9.0.${{ github.run_number }}' platform: 'macos' secrets: inherit + build-python-macos: + uses: ./.github/workflows/build-python-steps.yml + with: + version: '0.9.0.${{ github.run_number }}' + platform: 'macos' + secrets: inherit build-rust-macos: uses: ./.github/workflows/build-rust-steps.yml with: diff --git a/sdk/python/.gitignore b/sdk/python/.gitignore new file mode 100644 index 00000000..543c109e --- /dev/null +++ b/sdk/python/.gitignore @@ -0,0 +1,20 @@ +# Native binaries downloaded from NuGet (per-platform) +packages/ + +# Build / egg info +*.egg-info/ +dist/ +build/ +*.whl +*.tar.gz +__pycache__/ + +# Logs +logs/ + +# IDE +.vscode/ +.idea/ + +# pytest +.pytest_cache/ diff --git a/sdk/python/LICENSE.txt b/sdk/python/LICENSE.txt new file mode 100644 index 00000000..48bc6bb4 --- /dev/null +++ b/sdk/python/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/sdk/python/README.md b/sdk/python/README.md new file mode 100644 index 00000000..7cc8b44c --- /dev/null +++ b/sdk/python/README.md @@ -0,0 +1,243 @@ +# Foundry Local Python SDK + +The Foundry Local Python SDK provides a Python interface for interacting with local AI models via the Foundry Local Core native library. It allows you to discover, download, load, and run inference on models directly on your local machine — no cloud required. + +## Features + +- **Model Discovery** – browse and search the model catalog +- **Model Management** – download, cache, load, and unload models +- **Chat Completions** – OpenAI-compatible chat API (non-streaming and streaming) +- **Tool Calling** – function-calling support with chat completions +- **Audio Transcription** – Whisper-based speech-to-text (non-streaming and streaming) +- **Built-in Web Service** – optional HTTP endpoint for multi-process scenarios +- **Native Performance** – ctypes FFI to AOT-compiled Foundry Local Core + +## Installation + +Two package variants are published — choose the one that matches your target hardware: + +| Variant | Package | Native backends | +|---|---|---| +| Standard (cross-platform) | `foundry-local-sdk` | CPU / DirectML / CUDA | +| WinML (Windows only) | `foundry-local-sdk-winml` | Windows ML + all standard backends | + +```bash +# Standard (cross-platform — Linux, macOS, Windows) +pip install foundry-local-sdk + +# WinML (Windows only) +pip install foundry-local-sdk-winml +``` + +Each package installs the correct native binaries (`foundry-local-core`, `onnxruntime-core`, `onnxruntime-genai-core`) as wheel dependencies. They are mutually exclusive — install only one per environment. WinML is auto-detected at runtime: if the WinML package is installed, the SDK automatically enables the Windows App Runtime Bootstrap. + +### Building from source + +```bash +cd sdk/python + +# Standard wheel +python -m build --wheel + +# WinML wheel (uses the build_backend.py shim) +python -m build --wheel -C winml=true +``` + +For editable installs during development (native packages installed separately via `foundry-local-install`): + +```bash +pip install -e . +``` + +### Installing native binaries for development / CI + +When working from source the native packages are not pulled in automatically. Use the `foundry-local-install` CLI to install them: + +```bash +# Standard +foundry-local-install + +# WinML (Windows only) +foundry-local-install --winml +``` + +Add `--verbose` to print the resolved binary paths after installation: + +```bash +foundry-local-install --verbose +foundry-local-install --winml --verbose +``` + +> **Note:** The standard and WinML native packages use different PyPI package names (`foundry-local-core` vs `foundry-local-core-winml`) so they can coexist in the same pip index, but they should not be installed in the same Python environment simultaneously. + +## Quick Start + +```python +from foundry_local_sdk import Configuration, FoundryLocalManager + +# 1. Initialize +config = Configuration(app_name="MyApp") +FoundryLocalManager.initialize(config) +manager = FoundryLocalManager.instance + +# 2. Discover models +catalog = manager.catalog +models = catalog.list_models() +for m in models: + print(f" {m.alias}") + +# 3. Load a model +model = catalog.get_model("phi-3.5-mini") +model.load() + +# 4. Chat +client = model.get_chat_client() +response = client.complete_chat([ + {"role": "user", "content": "Why is the sky blue?"} +]) +print(response.choices[0].message.content) + +# 5. Cleanup +model.unload() +``` + +## Usage + +### Initialization + +Create a `Configuration` and initialize the singleton `FoundryLocalManager`. + +```python +from foundry_local_sdk import Configuration, FoundryLocalManager +from foundry_local_sdk.configuration import LogLevel + +config = Configuration( + app_name="MyApp", + model_cache_dir="/path/to/cache", # optional + log_level=LogLevel.INFORMATION, # optional (default: Warning) + additional_settings={"Bootstrap": "false"}, # optional +) +FoundryLocalManager.initialize(config) +manager = FoundryLocalManager.instance +``` + +### Discovering Models + +```python +catalog = manager.catalog + +# List all models in the catalog +models = catalog.list_models() + +# Get a specific model by alias +model = catalog.get_model("qwen2.5-0.5b") + +# Get a specific variant by ID +variant = catalog.get_model_variant("qwen2.5-0.5b-instruct-generic-cpu:4") + +# List locally cached models +cached = catalog.get_cached_models() + +# List currently loaded models +loaded = catalog.get_loaded_models() +``` + +### Loading and Running a Model + +```python +model = catalog.get_model("qwen2.5-0.5b") + +# Select a specific variant (optional – defaults to highest-priority cached variant) +cached = catalog.get_cached_models() +variant = next(v for v in cached if v.alias == "qwen2.5-0.5b") +model.select_variant(variant) + +# Load into memory +model.load() + +# Non-streaming chat +client = model.get_chat_client() +client.settings.temperature = 0.0 +client.settings.max_tokens = 500 + +result = client.complete_chat([ + {"role": "user", "content": "What is 7 multiplied by 6?"} +]) +print(result.choices[0].message.content) # "42" + +# Streaming chat +messages = [{"role": "user", "content": "Tell me a joke"}] + +def on_chunk(chunk): + delta = chunk.choices[0].delta + if delta and delta.content: + print(delta.content, end="", flush=True) + +client.complete_streaming_chat(messages, on_chunk) + +# Unload when done +model.unload() +``` + +### Web Service (Optional) + +Start a built-in HTTP server for multi-process access. + +```python +manager.start_web_service() +print(f"Listening on: {manager.urls}") + +# ... use the service ... + +manager.stop_web_service() +``` + +## API Reference + +### Core Classes + +| Class | Description | +|---|---| +| `Configuration` | SDK configuration (app name, cache dir, log level, web service settings) | +| `FoundryLocalManager` | Singleton entry point – initialization, catalog access, web service | +| `Catalog` | Model discovery – listing, lookup by alias/ID, cached/loaded queries | +| `Model` | Groups variants under one alias – select, load, unload, create clients | +| `ModelVariant` | Specific model variant – download, cache, load/unload, create clients | + +### OpenAI Clients + +| Class | Description | +|---|---| +| `ChatClient` | Chat completions (non-streaming and streaming) with tool calling | +| `AudioClient` | Audio transcription (non-streaming and streaming) | + +### Internal / Detail + +| Class | Description | +|---|---| +| `CoreInterop` | ctypes FFI layer to the native Foundry Local Core library | +| `ModelLoadManager` | Load/unload via core interop or external web service | +| `ModelInfo` | Pydantic model for catalog entries | + +### CLI entry point + +| Function | CLI name | Description | +|---|---|---| +| `foundry_local_sdk.detail.utils.foundry_local_install` | `foundry-local-install` | Install and verify native binaries (`--winml` for WinML variant) | + +> **Migration note:** The function was previously named `verify_native_install`. The public CLI name (`foundry-local-install`) and its behaviour are unchanged; only the Python function name in `foundry_local_sdk.detail.utils` was updated to `foundry_local_install` for consistency. + +## Running Tests + +```bash +pip install -r requirements-dev.txt +python -m pytest test/ -v +``` + +See [test/README.md](test/README.md) for detailed test setup and structure. + +## Running Examples + +```bash +python examples/chat_completion.py +``` \ No newline at end of file diff --git a/sdk/python/build_backend.py b/sdk/python/build_backend.py new file mode 100644 index 00000000..b4b91a1b --- /dev/null +++ b/sdk/python/build_backend.py @@ -0,0 +1,157 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""PEP 517 build backend shim for foundry-local-sdk. + +Delegates all hooks to ``setuptools.build_meta`` after optionally +patching ``pyproject.toml`` and ``requirements.txt`` in-place for the +WinML variant build. + +Usage +----- +Standard (default):: + + python -m build --wheel + +WinML variant:: + + python -m build --wheel -C winml=true + +Environment variable fallback (useful in CI pipelines):: + + FOUNDRY_VARIANT=winml python -m build --wheel +""" + +from __future__ import annotations + +import contextlib +import os +import shutil +from collections.abc import Generator +from pathlib import Path + +import setuptools.build_meta as _sb + +# --------------------------------------------------------------------------- +# Paths +# --------------------------------------------------------------------------- + +_PROJECT_ROOT = Path(__file__).parent +_PYPROJECT = _PROJECT_ROOT / "pyproject.toml" +_REQUIREMENTS = _PROJECT_ROOT / "requirements.txt" +_REQUIREMENTS_WINML = _PROJECT_ROOT / "requirements-winml.txt" + +# The exact string in pyproject.toml to patch for the WinML variant. +_STANDARD_NAME = 'name = "foundry-local-sdk"' +_WINML_NAME = 'name = "foundry-local-sdk-winml"' + + +# --------------------------------------------------------------------------- +# Variant detection +# --------------------------------------------------------------------------- + + +def _is_winml(config_settings: dict | None) -> bool: + """Return True when the WinML variant should be built. + + Checks ``config_settings["winml"]`` first (set via ``-C winml=true``), + then falls back to the ``FOUNDRY_VARIANT`` environment variable. + """ + if config_settings and str(config_settings.get("winml", "")).lower() == "true": + return True + return os.environ.get("FOUNDRY_VARIANT", "").lower() == "winml" + + +# --------------------------------------------------------------------------- +# In-place patching context manager +# --------------------------------------------------------------------------- + + +@contextlib.contextmanager +def _patch_for_winml() -> Generator[None, None, None]: + """Temporarily patch ``pyproject.toml`` and ``requirements.txt`` for WinML. + + Both files are restored to their original content in the ``finally`` + block, even if the build raises an exception. + """ + pyproject_original = _PYPROJECT.read_text(encoding="utf-8") + requirements_original = _REQUIREMENTS.read_text(encoding="utf-8") + try: + # Patch package name (simple string replacement — no TOML writer needed) + patched_pyproject = pyproject_original.replace(_STANDARD_NAME, _WINML_NAME, 1) + if patched_pyproject == pyproject_original: + raise RuntimeError( + f"Could not find {_STANDARD_NAME!r} in pyproject.toml — " + "WinML name patch failed." + ) + _PYPROJECT.write_text(patched_pyproject, encoding="utf-8") + + # Swap requirements.txt with the WinML variant + shutil.copy2(_REQUIREMENTS_WINML, _REQUIREMENTS) + + yield + finally: + _PYPROJECT.write_text(pyproject_original, encoding="utf-8") + _REQUIREMENTS.write_text(requirements_original, encoding="utf-8") + + +# --------------------------------------------------------------------------- +# PEP 517 hook delegation +# --------------------------------------------------------------------------- + + +def get_requires_for_build_wheel(config_settings=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.get_requires_for_build_wheel(config_settings) + return _sb.get_requires_for_build_wheel(config_settings) + + +def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.prepare_metadata_for_build_wheel(metadata_directory, config_settings) + return _sb.prepare_metadata_for_build_wheel(metadata_directory, config_settings) + + +def build_wheel(wheel_directory, config_settings=None, metadata_directory=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.build_wheel(wheel_directory, config_settings, metadata_directory) + return _sb.build_wheel(wheel_directory, config_settings, metadata_directory) + + +def get_requires_for_build_editable(config_settings=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.get_requires_for_build_editable(config_settings) + return _sb.get_requires_for_build_editable(config_settings) + + +def prepare_metadata_for_build_editable(metadata_directory, config_settings=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.prepare_metadata_for_build_editable(metadata_directory, config_settings) + return _sb.prepare_metadata_for_build_editable(metadata_directory, config_settings) + + +def build_editable(wheel_directory, config_settings=None, metadata_directory=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.build_editable(wheel_directory, config_settings, metadata_directory) + return _sb.build_editable(wheel_directory, config_settings, metadata_directory) + + +def get_requires_for_build_sdist(config_settings=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.get_requires_for_build_sdist(config_settings) + return _sb.get_requires_for_build_sdist(config_settings) + + +def build_sdist(sdist_directory, config_settings=None): + if _is_winml(config_settings): + with _patch_for_winml(): + return _sb.build_sdist(sdist_directory, config_settings) + return _sb.build_sdist(sdist_directory, config_settings) diff --git a/sdk/python/examples/chat_completion.py b/sdk/python/examples/chat_completion.py new file mode 100644 index 00000000..60eefd5e --- /dev/null +++ b/sdk/python/examples/chat_completion.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +"""Example: Chat completion using Foundry Local Python SDK. + +Demonstrates basic chat completion with the Foundry Local runtime, +including model discovery, loading, and inference. +""" + +from foundry_local_sdk import Configuration, FoundryLocalManager + +def main(): + # 1. Initialize the SDK + config = Configuration(app_name="ChatCompletionExample") + print("Initializing Foundry Local Manager") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # 2. Print available models in the catalog and cache + models = manager.catalog.list_models() + print("Available models in catalog:") + for m in models: + print(f" - {m.alias} ({m.id})") + + cached_models = manager.catalog.get_cached_models() + print("\nCached models:") + for m in cached_models: + print(f" - {m.alias} ({m.id})") + + CACHED_MODEL_ALIAS = "qwen2.5-0.5b" + + # 3. Find a model from the cache (+ download if not cached) + model = manager.catalog.get_model(CACHED_MODEL_ALIAS) + if model is None: + print(f"Model '{CACHED_MODEL_ALIAS}' not found in catalog.") + print("Available models:") + for m in manager.catalog.list_models(): + print(f" - {m.alias} ({m.id})") + return + + if not model.is_cached: + print(f"Downloading {model.alias}...") + model.download(progress_callback=lambda pct: print(f" {pct:.1f}%", end="\r")) + print() + + # 4. Load the model + print(f"Loading {model.alias}...", end="") + model.load() + print("loaded!") + + try: + # 5. Create a chat client and send a message + client = model.get_chat_client() + + print("\n--- Non-streaming ---") + response = client.complete_chat( + messages=[{"role": "user", "content": "What is the capital of France? Reply briefly."}] + ) + print(f"Response: {response.choices[0].message.content}") + + # 6. Streaming + print("\n--- Streaming ---") + for chunk in client.complete_streaming_chat( + [{"role": "user", "content": "Tell me a short joke."}] + ): + if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="", flush=True) + print() # newline after streaming + + except Exception as e: + print(f"Error during inference: {e}") + + finally: + # 7. Cleanup + model.unload() + print("\nModel unloaded.") + + +if __name__ == "__main__": + main() diff --git a/sdk/python/pyproject.toml b/sdk/python/pyproject.toml new file mode 100644 index 00000000..ef93b6f7 --- /dev/null +++ b/sdk/python/pyproject.toml @@ -0,0 +1,55 @@ +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "build_backend" +backend-path = ["."] + +[project] +name = "foundry-local-sdk" +dynamic = ["version", "dependencies"] +description = "Foundry Local Manager Python SDK: Control-plane SDK for Foundry Local." +readme = "README.md" +requires-python = ">=3.11" +license = "MIT" +license-files = ["LICENSE.txt"] +authors = [ + {name = "Microsoft Corporation", email = "foundrylocaldevs@microsoft.com"}, +] +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Programming Language :: Python", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] + +[project.urls] +Homepage = "https://github.com/microsoft/Foundry-Local" + +[project.scripts] +foundry-local-install = "foundry_local_sdk.detail.utils:foundry_local_install" + +[tool.setuptools.package-dir] +foundry_local_sdk = "src" +"foundry_local_sdk.detail" = "src/detail" +"foundry_local_sdk.openai" = "src/openai" + +[tool.setuptools] +packages = ["foundry_local_sdk", "foundry_local_sdk.detail", "foundry_local_sdk.openai"] + +[tool.setuptools.dynamic] +version = {attr = "foundry_local_sdk.version.__version__"} +dependencies = {file = ["requirements.txt"]} + +[tool.pytest.ini_options] +testpaths = ["test"] +python_files = ["test_*.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +timeout = 60 diff --git a/sdk/python/requirements-dev.txt b/sdk/python/requirements-dev.txt new file mode 100644 index 00000000..aea40875 --- /dev/null +++ b/sdk/python/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt +build +coverage +pytest +pytest-timeout diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt new file mode 100644 index 00000000..0fb9f9c2 --- /dev/null +++ b/sdk/python/requirements-winml.txt @@ -0,0 +1,7 @@ +pydantic>=2.0.0 +requests>=2.32.4 +openai>=2.24.0 +# WinML native binary packages from the ORT-Nightly PyPI feed. +foundry-local-core-winml +onnxruntime-core==1.24.3 +onnxruntime-genai-core==0.12.1 \ No newline at end of file diff --git a/sdk/python/requirements.txt b/sdk/python/requirements.txt new file mode 100644 index 00000000..801f577d --- /dev/null +++ b/sdk/python/requirements.txt @@ -0,0 +1,7 @@ +pydantic>=2.0.0 +requests>=2.32.4 +openai>=2.24.0 +# Standard native binary packages from the ORT-Nightly PyPI feed. +foundry-local-core==0.9.0.dev20260327060216 +onnxruntime-core==1.24.3 +onnxruntime-genai-core==0.12.1 \ No newline at end of file diff --git a/sdk/python/src/__init__.py b/sdk/python/src/__init__.py new file mode 100644 index 00000000..14534d19 --- /dev/null +++ b/sdk/python/src/__init__.py @@ -0,0 +1,23 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +import logging +import sys + +from .configuration import Configuration +from .foundry_local_manager import FoundryLocalManager +from .version import __version__ + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.WARNING) + +_sc = logging.StreamHandler(stream=sys.stdout) +_formatter = logging.Formatter( + "[foundry-local] | %(asctime)s | %(levelname)-8s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S" +) +_sc.setFormatter(_formatter) +_logger.addHandler(_sc) +_logger.propagate = False + +__all__ = ["Configuration", "FoundryLocalManager", "__version__"] diff --git a/sdk/python/src/catalog.py b/sdk/python/src/catalog.py new file mode 100644 index 00000000..767a9f08 --- /dev/null +++ b/sdk/python/src/catalog.py @@ -0,0 +1,144 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +import datetime +import logging +import threading +from typing import List, Optional +from pydantic import TypeAdapter + +from .model import Model +from .model_variant import ModelVariant + +from .detail.core_interop import CoreInterop, get_cached_model_ids +from .detail.model_data_types import ModelInfo +from .detail.model_load_manager import ModelLoadManager +from .exception import FoundryLocalException + +logger = logging.getLogger(__name__) + +class Catalog(): + """Model catalog for discovering and querying available models. + + Provides methods to list models, look up by alias or ID, and query + cached or loaded models. The model list is refreshed every 6 hours. + """ + + def __init__(self, model_load_manager: ModelLoadManager, core_interop: CoreInterop): + """Initialize the Catalog. + + Args: + model_load_manager: Manager for loading/unloading models. + core_interop: Native interop layer for Foundry Local Core. + """ + self._core_interop = core_interop + self._model_load_manager = model_load_manager + self._lock = threading.Lock() + + self._models: List[ModelInfo] = [] + self._model_alias_to_model = {} + self._model_id_to_model_variant = {} + self._last_fetch = datetime.datetime.min + + response = core_interop.execute_command("get_catalog_name") + if response.error is not None: + raise FoundryLocalException(f"Failed to get catalog name: {response.error}") + + self.name = response.data + + def _update_models(self): + with self._lock: + # refresh every 6 hours + if (datetime.datetime.now() - self._last_fetch) < datetime.timedelta(hours=6): + return + + response = self._core_interop.execute_command("get_model_list") + if response.error is not None: + raise FoundryLocalException(f"Failed to get model list: {response.error}") + + model_list_json = response.data + + adapter = TypeAdapter(list[ModelInfo]) + models: List[ModelInfo] = adapter.validate_json(model_list_json) + + self._model_alias_to_model.clear() + self._model_id_to_model_variant.clear() + + for model_info in models: + variant = ModelVariant(model_info, self._model_load_manager, self._core_interop) + + value = self._model_alias_to_model.get(model_info.alias) + if value is None: + value = Model(variant, self._core_interop) + self._model_alias_to_model[model_info.alias] = value + else: + value._add_variant(variant) + + self._model_id_to_model_variant[variant.id] = variant + + self._last_fetch = datetime.datetime.now() + self._models = models + + def list_models(self) -> List[Model]: + """ + List the available models in the catalog. + :return: List of Model instances. + """ + self._update_models() + return list(self._model_alias_to_model.values()) + + def get_model(self, model_alias: str) -> Optional[Model]: + """ + Lookup a model by its alias. + :param model_alias: Model alias. + :return: Model if found. + """ + self._update_models() + return self._model_alias_to_model.get(model_alias) + + def get_model_variant(self, model_id: str) -> Optional[ModelVariant]: + """ + Lookup a model variant by its unique model id. + :param model_id: Model id. + :return: Model variant if found. + """ + self._update_models() + return self._model_id_to_model_variant.get(model_id) + + def get_cached_models(self) -> List[ModelVariant]: + """ + Get a list of currently downloaded models from the model cache. + :return: List of ModelVariant instances. + """ + self._update_models() + + cached_model_ids = get_cached_model_ids(self._core_interop) + + cached_models = [] + for model_id in cached_model_ids: + model_variant = self._model_id_to_model_variant.get(model_id) + if model_variant is not None: + cached_models.append(model_variant) + + return cached_models + + def get_loaded_models(self) -> List[ModelVariant]: + """ + Get a list of the currently loaded models. + :return: List of ModelVariant instances. + """ + self._update_models() + + loaded_model_ids = self._model_load_manager.list_loaded() + loaded_models = [] + + for model_id in loaded_model_ids: + model_variant = self._model_id_to_model_variant.get(model_id) + if model_variant is not None: + loaded_models.append(model_variant) + + return loaded_models \ No newline at end of file diff --git a/sdk/python/src/configuration.py b/sdk/python/src/configuration.py new file mode 100644 index 00000000..23967efb --- /dev/null +++ b/sdk/python/src/configuration.py @@ -0,0 +1,163 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +import logging +import re + +from typing import Optional, Dict +from urllib.parse import urlparse + +from .exception import FoundryLocalException + +from .logging_helper import LogLevel + +logger = logging.getLogger(__name__) + + +class Configuration: + """Configuration for Foundry Local SDK. + + Configuration values: + app_name: Your application name. MUST be set to a valid name. + foundry_local_core_path: Path to the Foundry Local Core native library. + app_data_dir: Application data directory. + Default: {home}/.{appname}, where {home} is the user's home directory + and {appname} is the app_name value. + model_cache_dir: Model cache directory. + Default: {appdata}/cache/models, where {appdata} is the app_data_dir value. + logs_dir: Log directory. + Default: {appdata}/logs + log_level: Logging level. + Valid values are: Verbose, Debug, Information, Warning, Error, Fatal. + Default: LogLevel.WARNING + web: Optional configuration for the built-in web service. + NOTE: This is not included in all builds. + additional_settings: Additional settings that Foundry Local Core can consume. + Keys and values are strings. + """ + + class WebService: + """Configuration settings if the optional web service is used.""" + + def __init__( + self, + urls: Optional[str] = None, + external_url: Optional[str] = None + ): + """Initialize WebService configuration. + + Args: + urls: Url/s to bind to the web service when + FoundryLocalManager.start_web_service() is called. + After startup, FoundryLocalManager.urls will contain the actual URL/s + the service is listening on. + Default: 127.0.0.1:0, which binds to a random ephemeral port. + Multiple URLs can be specified as a semi-colon separated list. + external_url: If the web service is running in a separate process, + it will be accessed using this URI. + Both processes should be using the same version of the SDK. + If a random port is assigned when creating the web service in the + external process the actual port must be provided here. + """ + self.urls = urls + self.external_url = external_url + + def __init__( + self, + app_name: str, + foundry_local_core_path: Optional[str] = None, + app_data_dir: Optional[str] = None, + model_cache_dir: Optional[str] = None, + logs_dir: Optional[str] = None, + log_level: Optional[LogLevel] = LogLevel.WARNING, + web: Optional['Configuration.WebService'] = None, + additional_settings: Optional[Dict[str, str]] = None + ): + """Initialize Configuration. + + Args: + app_name: Your application name. MUST be set to a valid name. + app_data_dir: Application data directory. Optional. + model_cache_dir: Model cache directory. Optional. + logs_dir: Log directory. Optional. + log_level: Logging level. Default: LogLevel.WARNING + web: Optional configuration for the built-in web service. + additional_settings: Additional settings dictionary. Optional. + """ + self.app_name = app_name + self.foundry_local_core_path = foundry_local_core_path + self.app_data_dir = app_data_dir + self.model_cache_dir = model_cache_dir + self.logs_dir = logs_dir + self.log_level = log_level + self.web = web + self.additional_settings = additional_settings + + # make sure app name only has safe characters as it's used as a directory name + self._safe_app_name_chars = re.compile(r'^[A-Za-z0-9._-]+$') + + def validate(self) -> None: + """Validate the configuration. + + Raises: + FoundryLocalException: If configuration is invalid. + """ + if not self.app_name: + raise FoundryLocalException( + "Configuration AppName must be set to a valid application name." + ) + + # Check for invalid filename characters + if not bool(self._safe_app_name_chars.match(self.app_name)): + raise FoundryLocalException("Configuration AppName value contains invalid characters.") + + if self.web is not None and self.web.external_url is not None: + parsed = urlparse(self.web.external_url) + if not parsed.port or parsed.port == 0: + raise FoundryLocalException("Configuration Web.ExternalUrl has invalid port.") + + def as_dictionary(self) -> Dict[str, str]: + """Convert configuration to a dictionary of string key-value pairs. + + Returns: + Dictionary containing configuration values as strings. + + Raises: + FoundryLocalException: If AppName is not set to a valid value. + """ + if not self.app_name: + raise FoundryLocalException( + "Configuration AppName must be set to a valid application name." + ) + + config_values = { + "AppName": self.app_name, + "LogLevel": str(self.log_level) + } + + if self.app_data_dir: + config_values["AppDataDir"] = self.app_data_dir + + if self.model_cache_dir: + config_values["ModelCacheDir"] = self.model_cache_dir + + if self.logs_dir: + config_values["LogsDir"] = self.logs_dir + + if self.foundry_local_core_path: + config_values["FoundryLocalCorePath"] = self.foundry_local_core_path + + if self.web is not None: + if self.web.urls is not None: + config_values["WebServiceUrls"] = self.web.urls + + # Emit any additional settings. + if self.additional_settings is not None: + for key, value in self.additional_settings.items(): + if not key: + continue # skip empty keys + config_values[key] = value if value is not None else "" + + return config_values diff --git a/sdk/python/src/detail/__init__.py b/sdk/python/src/detail/__init__.py new file mode 100644 index 00000000..d9a7cbc0 --- /dev/null +++ b/sdk/python/src/detail/__init__.py @@ -0,0 +1,25 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""This file is required for Python to treat this directory as a package, +enabling dotted imports such as ``foundry_local_sdk.detail.core_interop``. + +The re-exports below are optional convenience aliases so callers can write +``from foundry_local_sdk.detail import CoreInterop`` instead of importing +from the individual submodule directly. +""" + +from .core_interop import CoreInterop, InteropRequest, Response +from .model_data_types import ModelInfo, DeviceType, Runtime +from .model_load_manager import ModelLoadManager + +__all__ = [ + "CoreInterop", + "DeviceType", + "InteropRequest", + "ModelInfo", + "ModelLoadManager", + "Response", + "Runtime", +] diff --git a/sdk/python/src/detail/core_interop.py b/sdk/python/src/detail/core_interop.py new file mode 100644 index 00000000..7a6bb08c --- /dev/null +++ b/sdk/python/src/detail/core_interop.py @@ -0,0 +1,306 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +import ctypes +import json +import logging +import os +import sys + +from dataclasses import dataclass +from pathlib import Path +from typing import Callable, Dict, Optional +from ..configuration import Configuration +from ..exception import FoundryLocalException +from .utils import get_native_binary_paths, NativeBinaryPaths, create_ort_symlinks, _get_ext + +logger = logging.getLogger(__name__) + +class InteropRequest: + """Request payload for a Foundry Local Core command. + + Args: + params: Dictionary of key-value string parameters. + """ + + def __init__(self, params: Dict[str, str] = None): + self.params = params or {} + + def to_json(self) -> str: + """Serialize the request to a JSON string.""" + return json.dumps({"Params": self.params}, ensure_ascii=False) # FLC expects UTF-8 encoded JSON (not ascii) + + +class RequestBuffer(ctypes.Structure): + """ctypes Structure matching the native ``RequestBuffer`` C struct.""" + + _fields_ = [ + ("Command", ctypes.c_void_p), + ("CommandLength", ctypes.c_int), + ("Data", ctypes.c_void_p), + ("DataLength", ctypes.c_int), + ] + + +class ResponseBuffer(ctypes.Structure): + """ctypes Structure matching the native ``ResponseBuffer`` C struct.""" + + _fields_ = [ + ("Data", ctypes.c_void_p), + ("DataLength", ctypes.c_int), + ("Error", ctypes.c_void_p), + ("ErrorLength", ctypes.c_int), + ] + + +@dataclass +class Response: + """Result from a Foundry Local Core command. + Either ``data`` or ``error`` will be set, never both. + """ + + data: Optional[str] = None + error: Optional[str] = None + + +class CallbackHelper: + """Internal helper class to convert the callback from ctypes to a str and call the python callback.""" + @staticmethod + def callback(data_ptr, length, self_ptr): + self = None + try: + self = ctypes.cast(self_ptr, ctypes.POINTER(ctypes.py_object)).contents.value + + # convert to a string and pass to the python callback + data_bytes = ctypes.string_at(data_ptr, length) + data_str = data_bytes.decode('utf-8') + self._py_callback(data_str) + except Exception as e: + if self is not None and self.exception is None: + self.exception = e # keep the first only as they are likely all the same + + def __init__(self, py_callback: Callable[[str], None]): + self._py_callback = py_callback + self.exception = None + + +class CoreInterop: + """ctypes FFI layer for the Foundry Local Core native library. + + Provides ``execute_command`` and ``execute_command_with_callback`` to + invoke native commands exposed by ``Microsoft.AI.Foundry.Local.Core``. + """ + + _initialized = False + _flcore_library = None + _genai_library = None + _ort_library = None + + instance = None + + # Callback function for native interop. + # This returns a string and its length, and an optional user provided object. + CALLBACK_TYPE = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p) + + @staticmethod + def _initialize_native_libraries() -> 'NativeBinaryPaths': + """Load the native Foundry Local Core library and its dependencies. + + Locates the binaries from the installed Python packages + ``foundry-local-core``, ``onnxruntime-core``, and + ``onnxruntime-genai-core`` using :func:`get_native_binary_paths`. + + Returns: + NativeBinaryPaths with resolved paths to all native binaries. + """ + paths = get_native_binary_paths() + if paths is None: + raise RuntimeError( + "Could not locate native libraries.\n" + " Standard variant : pip install foundry-local-sdk\n" + " WinML variant : pip install foundry-local-sdk-winml\n" + " Dev/CI install : foundry-local-install (or --winml)" + ) + + logger.info("Native libraries found — Core: %s ORT: %s GenAI: %s", + paths.core, paths.ort, paths.genai) + + # Create the onnxruntime.dll symlink on Linux/macOS if needed. + # create_ort_symlinks(paths) + os.environ["ORT_LIB_PATH"] = str(paths.ort) # For ORT-GENAI to find ORT dependency + + if sys.platform.startswith("win"): + # Register every binary directory so the .NET AOT Core library + # can resolve sibling DLLs via P/Invoke. + for native_dir in paths.all_dirs(): + os.add_dll_directory(str(native_dir)) + + # Explicitly pre-load ORT and GenAI so their symbols are globally + # available when Core does P/Invoke lookups at runtime. + # On Windows the PATH manipulation above is sufficient; on + # Linux/macOS we need RTLD_GLOBAL so that dlopen() within the + # Core native code can resolve ORT/GenAI symbols. + # ORT must be loaded before GenAI (GenAI depends on ORT). + if sys.platform.startswith("win"): + CoreInterop._ort_library = ctypes.CDLL(str(paths.ort)) + CoreInterop._genai_library = ctypes.CDLL(str(paths.genai)) + else: + CoreInterop._ort_library = ctypes.CDLL(str(paths.ort), mode=os.RTLD_GLOBAL) + CoreInterop._genai_library = ctypes.CDLL(str(paths.genai), mode=os.RTLD_GLOBAL) + + CoreInterop._flcore_library = ctypes.CDLL(str(paths.core)) + + # Set the function signatures + lib = CoreInterop._flcore_library + lib.execute_command.argtypes = [ctypes.POINTER(RequestBuffer), + ctypes.POINTER(ResponseBuffer)] + lib.execute_command.restype = None + + lib.free_response.argtypes = [ctypes.POINTER(ResponseBuffer)] + lib.free_response.restype = None + + # Set the callback function signature and delegate info + lib.execute_command_with_callback.argtypes = [ctypes.POINTER(RequestBuffer), + ctypes.POINTER(ResponseBuffer), + ctypes.c_void_p, # callback_fn + ctypes.c_void_p] # user_data + lib.execute_command_with_callback.restype = None + + return paths + + @staticmethod + def _to_c_buffer(s: str): + # Helper: encodes strings into unmanaged memory + if s is None: + return ctypes.c_void_p(0), 0, None + + buf = s.encode("utf-8") + ptr = ctypes.create_string_buffer(buf) # keeps memory alive in Python + return ctypes.cast(ptr, ctypes.c_void_p), len(buf), ptr + + def __init__(self, config: Configuration): + if not CoreInterop._initialized: + paths = CoreInterop._initialize_native_libraries() + CoreInterop._initialized = True + + # Pass the full path to the Core DLL so the native layer can + # discover sibling DLLs via Path.GetDirectoryName(FoundryLocalCorePath). + flcore_lib_name = f"Microsoft.AI.Foundry.Local.Core{_get_ext()}" + config.foundry_local_core_path = str(paths.core_dir / flcore_lib_name) + + # Pass ORT and GenAI library paths so the C# native library resolver + # can search their directories (they may be in separate pip packages). + if config.additional_settings is None: + config.additional_settings = {} + config.additional_settings["OrtLibraryPath"] = str(paths.ort) + config.additional_settings["OrtGenAILibraryPath"] = str(paths.genai) + + # Auto-detect WinML Bootstrap: if the Bootstrap DLL is present + # in the native binaries directory and the user hasn't explicitly + # set the Bootstrap config, enable it automatically. + if sys.platform.startswith("win"): + bootstrap_dll = paths.core_dir / "Microsoft.WindowsAppRuntime.Bootstrap.dll" + if bootstrap_dll.exists(): + if config.additional_settings is None: + config.additional_settings = {} + if "Bootstrap" not in config.additional_settings: + logger.info("WinML Bootstrap DLL detected — enabling Bootstrap") + config.additional_settings["Bootstrap"] = "true" + + request = InteropRequest(params=config.as_dictionary()) + response = self.execute_command("initialize", request) + if response.error is not None: + raise FoundryLocalException(f"Failed to initialize Foundry.Local.Core: {response.error}") + + logger.info("Foundry.Local.Core initialized successfully: %s", response.data) + + def _execute_command(self, command: str, interop_request: InteropRequest = None, + callback: CoreInterop.CALLBACK_TYPE = None): + cmd_ptr, cmd_len, cmd_buf = CoreInterop._to_c_buffer(command) + data_ptr, data_len, data_buf = CoreInterop._to_c_buffer(interop_request.to_json() if interop_request else None) + + req = RequestBuffer(Command=cmd_ptr, CommandLength=cmd_len, Data=data_ptr, DataLength=data_len) + resp = ResponseBuffer() + lib = CoreInterop._flcore_library + + if (callback is not None): + # If a callback is provided, use the execute_command_with_callback method + # We need a helper to do the initial conversion from ctypes to Python and pass it through to the + # provided callback function + callback_helper = CallbackHelper(callback) + callback_py_obj = ctypes.py_object(callback_helper) + callback_helper_ptr = ctypes.cast(ctypes.pointer(callback_py_obj), ctypes.c_void_p) + callback_fn = CoreInterop.CALLBACK_TYPE(CallbackHelper.callback) + + lib.execute_command_with_callback(ctypes.byref(req), ctypes.byref(resp), callback_fn, callback_helper_ptr) + + if callback_helper.exception is not None: + raise callback_helper.exception + else: + lib.execute_command(ctypes.byref(req), ctypes.byref(resp)) + + req = None # Free Python reference to request + + response_str = ctypes.string_at(resp.Data, resp.DataLength).decode("utf-8") if resp.Data else None + error_str = ctypes.string_at(resp.Error, resp.ErrorLength).decode("utf-8") if resp.Error else None + + # C# owns the memory in the response so we need to free it explicitly + lib.free_response(resp) + + return Response(data=response_str, error=error_str) + + def execute_command(self, command_name: str, command_input: Optional[InteropRequest] = None) -> Response: + """Execute a command synchronously. + + Args: + command_name: The native command name (e.g. ``"get_model_list"``). + command_input: Optional request parameters. + + Returns: + A ``Response`` with ``data`` on success or ``error`` on failure. + """ + logger.debug("Executing command: %s Input: %s", command_name, + command_input.params if command_input else None) + + response = self._execute_command(command_name, command_input) + return response + + def execute_command_with_callback(self, command_name: str, command_input: Optional[InteropRequest], + callback: Callable[[str], None]) -> Response: + """Execute a command with a streaming callback. + + The ``callback`` receives incremental string data from the native layer + (e.g. streaming chat tokens or download progress). + + Args: + command_name: The native command name. + command_input: Optional request parameters. + callback: Called with each incremental string response. + + Returns: + A ``Response`` with ``data`` on success or ``error`` on failure. + """ + logger.debug("Executing command with callback: %s Input: %s", command_name, + command_input.params if command_input else None) + response = self._execute_command(command_name, command_input, callback) + return response + + +def get_cached_model_ids(core_interop: CoreInterop) -> list[str]: + """Get the list of models that have been downloaded and are cached.""" + + response = core_interop.execute_command("get_cached_models") + if response.error is not None: + raise FoundryLocalException(f"Failed to get cached models: {response.error}") + + try: + model_ids = json.loads(response.data) + except json.JSONDecodeError as e: + raise FoundryLocalException(f"Failed to decode JSON response: Response was: {response.data}") from e + + return model_ids + diff --git a/sdk/python/src/detail/model_data_types.py b/sdk/python/src/detail/model_data_types.py new file mode 100644 index 00000000..b8b9e8d6 --- /dev/null +++ b/sdk/python/src/detail/model_data_types.py @@ -0,0 +1,76 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from typing import Optional, List +from pydantic import BaseModel, Field + +from enum import StrEnum + +# ---------- ENUMS ---------- +class DeviceType(StrEnum): + """Device types supported by model variants.""" + + CPU = "CPU" + GPU = "GPU" + NPU = "NPU" + +# ---------- DATA MODELS ---------- + +class PromptTemplate(BaseModel): + """Prompt template strings for system, user, assistant, and raw prompt roles.""" + + system: Optional[str] = Field(default=None, alias="system") + user: Optional[str] = Field(default=None, alias="user") + assistant: Optional[str] = Field(default=None, alias="assistant") + prompt: Optional[str] = Field(default=None, alias="prompt") + + +class Runtime(BaseModel): + """Runtime configuration specifying the device type and execution provider.""" + + device_type: DeviceType = Field(alias="deviceType") + execution_provider: str = Field(alias="executionProvider") + + +class Parameter(BaseModel): + """A named parameter with an optional string value.""" + + name: str + value: Optional[str] = None + + +class ModelSettings(BaseModel): + """Model-specific settings containing a list of parameters.""" + + parameters: Optional[List[Parameter]] = Field(default=None, alias="parameters") + + +class ModelInfo(BaseModel): + """Catalog metadata for a single model variant. + + Fields are populated from the JSON response of the ``get_model_list`` command. + """ + + id: str = Field(alias="id", description="Unique identifier of the model. Generally :") + name: str = Field(alias="name", description="Model variant name") + version: int = Field(alias="version") + alias: str = Field(..., description="Alias of the model") + display_name: Optional[str] = Field(alias="displayName") + provider_type: str = Field(alias="providerType") + uri: str = Field(alias="uri") + model_type: str = Field(alias="modelType") + prompt_template: Optional[PromptTemplate] = Field(default=None, alias="promptTemplate") + publisher: Optional[str] = Field(alias="publisher") + model_settings: Optional[ModelSettings] = Field(default=None, alias="modelSettings") + license: Optional[str] = Field(alias="license") + license_description: Optional[str] = Field(alias="licenseDescription") + cached: bool = Field(alias="cached") + task: Optional[str] = Field(alias="task") + runtime: Optional[Runtime] = Field(alias="runtime") + file_size_mb: Optional[int] = Field(alias="fileSizeMb") + supports_tool_calling: Optional[bool] = Field(alias="supportsToolCalling") + max_output_tokens: Optional[int] = Field(alias="maxOutputTokens") + min_fl_version: Optional[str] = Field(alias="minFLVersion") + created_at_unix: int = Field(alias="createdAt") diff --git a/sdk/python/src/detail/model_load_manager.py b/sdk/python/src/detail/model_load_manager.py new file mode 100644 index 00000000..8ffd087a --- /dev/null +++ b/sdk/python/src/detail/model_load_manager.py @@ -0,0 +1,166 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +from __future__ import annotations + +import json +import logging +import requests + +from typing import List +from urllib.parse import quote + +from ..exception import FoundryLocalException +from ..version import __version__ as sdk_version +from .core_interop import CoreInterop, InteropRequest + +logger = logging.getLogger(__name__) + + +class ModelLoadManager: + """Manages loading and unloading of models in Foundry Local. + + Can operate in two modes: direct interop with Foundry Local Core, or via + an external web service if the configuration provides a + ``WebServiceExternalUrl`` value. + """ + + _headers = {"user-agent": f"foundry-local-python-sdk/{sdk_version}"} + + def __init__(self, core_interop: CoreInterop, external_service_url: str = None): + self._core_interop = core_interop + self._external_service_url = external_service_url + + def load(self, model_id: str) -> None: + """ + Load a model by its ID. + + This method loads a model either via direct interop with Foundry Local Core + or, if an external service URL is configured, by calling the external web + service. + + :param model_id: The ID of the model to load. + :raises FoundryLocalException: If the model cannot be loaded successfully, + for example due to an error returned from Foundry Local Core or from + the external service, including underlying HTTP or network errors when + communicating with the external service. + """ + if self._external_service_url: + self._web_load_model(model_id) + return + + request = InteropRequest({"Model": model_id}) + response = self._core_interop.execute_command("load_model", request) + if response.error is not None: + raise FoundryLocalException(f"Failed to load model {model_id}: {response.error}") + + def unload(self, model_id: str) -> None: + """ + Unload a model by its ID. + :param model_id: The ID of the model to unload. + """ + if self._external_service_url: + self._web_unload_model(model_id) + return + + request = InteropRequest({"Model": model_id}) + response = self._core_interop.execute_command("unload_model", request) + if response.error is not None: + raise FoundryLocalException(f"Failed to unload model {model_id}: {response.error}") + + def list_loaded(self) -> list[str]: + """ + List loaded models. + :return: List of loaded model IDs + """ + if self._external_service_url: + return self._web_list_loaded_models() + + response = self._core_interop.execute_command("list_loaded_models") + if response.error is not None: + raise FoundryLocalException(f"Failed to list loaded models: {response.error}") + + try: + model_ids = json.loads(response.data) + except json.JSONDecodeError as e: + raise FoundryLocalException(f"Failed to decode JSON response: Response was: {response.data}") from e + + return model_ids + + def _web_list_loaded_models(self) -> List[str]: + try: + response = requests.get(f"{self._external_service_url}/models/loaded", headers=self._headers, timeout=10) + + if not response.ok: + raise FoundryLocalException( + f"Error listing loaded models from {self._external_service_url}: {response.reason}" + ) + + content = response.text + logger.debug("Loaded models json from %s: %s", self._external_service_url, content) + + model_list = json.loads(content) + return model_list if model_list is not None else [] + except requests.RequestException as e: + raise FoundryLocalException( + f"HTTP request failed when listing loaded models from {self._external_service_url}" + ) from e + except json.JSONDecodeError as e: + raise FoundryLocalException(f"Failed to decode JSON response: Response was: {content}") from e + + def _web_load_model(self, model_id: str) -> None: + """ + Load a model via the external web service. + + :param model_id: The ID of the model to load + :raises FoundryLocalException: If the HTTP request fails or response is invalid + """ + try: + encoded_model_id = quote(model_id) + url = f"{self._external_service_url}/models/load/{encoded_model_id}" + + # Future: add query params like load timeout + # query_params = { + # # "timeout": "30" + # } + # response = requests.get(url, params=query_params) + + response = requests.get(url, headers=self._headers, timeout=10) + + if not response.ok: + raise FoundryLocalException( + f"Error loading model {model_id} from {self._external_service_url}: " + f"{response.reason}" + ) + + content = response.text + logger.info("Model %s loaded successfully from %s: %s", + model_id, self._external_service_url, content) + + except requests.RequestException as e: + raise FoundryLocalException( + f"HTTP request failed when loading model {model_id} from {self._external_service_url}: {e}" + ) from e + + def _web_unload_model(self, model_id: str) -> None: + try: + encoded_model_id = quote(model_id) + url = f"{self._external_service_url}/models/unload/{encoded_model_id}" + + response = requests.get(url, headers=self._headers, timeout=10) + + if not response.ok: + raise FoundryLocalException( + f"Error unloading model {model_id} from {self._external_service_url}: " + f"{response.reason}" + ) + + content = response.text + logger.info("Model %s unloaded successfully from %s: %s", + model_id, self._external_service_url, content) + + except requests.RequestException as e: + raise FoundryLocalException( + f"HTTP request failed when unloading model {model_id} from {self._external_service_url}: {e}" + ) from e diff --git a/sdk/python/src/detail/utils.py b/sdk/python/src/detail/utils.py new file mode 100644 index 00000000..5a054610 --- /dev/null +++ b/sdk/python/src/detail/utils.py @@ -0,0 +1,294 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Utility functions for the Foundry Local SDK. + +Includes native library locator logic and helper functions used by +other SDK modules. +""" + +from __future__ import annotations + +import argparse +import importlib.util +import json +import logging +import os +import sys + +from dataclasses import dataclass +from pathlib import Path + +from enum import StrEnum +from ..exception import FoundryLocalException + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Platform helpers +# --------------------------------------------------------------------------- + +# Maps Python sys.platform to native shared library extension +EXT_MAP: dict[str, str] = { + "win32": ".dll", + "linux": ".so", + "darwin": ".dylib", +} + + +def _get_ext() -> str: + """Get the native library file extension for the current platform.""" + for plat_prefix, ext in EXT_MAP.items(): + if sys.platform.startswith(plat_prefix): + return ext + raise RuntimeError(f"Unsupported platform: {sys.platform}") + + +# --------------------------------------------------------------------------- +# Package-based binary discovery +# --------------------------------------------------------------------------- + +# On Linux/macOS the ORT shared libraries carry the "lib" prefix while the +# Core library refers to them without it — a symlink "onnxruntime.dll" → +# "libonnxruntime.so/.dylib" is created to bridge the gap (see below). +_ORT_PREFIX = "" if sys.platform == "win32" else "lib" + + +def _native_binary_names() -> tuple[str, str, str]: + """Return the expected native binary filenames for the current platform.""" + ext = _get_ext() + return ( + f"Microsoft.AI.Foundry.Local.Core{ext}", + f"{_ORT_PREFIX}onnxruntime{ext}", + f"{_ORT_PREFIX}onnxruntime-genai{ext}", + ) + + +def _find_file_in_package(package_name: str, filename: str) -> Path | None: + """Locate a native binary *filename* inside an installed Python package. + + Searches the package root and common sub-directories (``capi/``, + ``native/``, ``lib/``). Falls back to a recursive ``rglob`` scan of + the entire package tree when none of the quick paths match. + + Args: + package_name: The PyPI package name (hyphens or underscores accepted; + e.g. ``"onnxruntime-genai-core"`` or ``"onnxruntime_genai_core"``). + filename: The filename to look for (e.g. ``"onnxruntime-genai.dll"``). + + Returns: + Absolute ``Path`` to the file, or ``None`` if not found. + """ + import_name = package_name.replace("-", "_") + spec = importlib.util.find_spec(import_name) + if spec is None or spec.origin is None: + return None + + pkg_root = Path(spec.origin).parent + + # Quick checks for well-known sub-directories first + for candidate_dir in (pkg_root, pkg_root / "capi", pkg_root / "native", pkg_root / "lib", pkg_root / "bin"): + candidate = candidate_dir / filename + if candidate.exists(): + return candidate + + # Recursive fallback + for match in pkg_root.rglob(filename): + return match + + return None + + +@dataclass +class NativeBinaryPaths: + """Resolved paths to the three native binaries required by the SDK.""" + + core: Path + ort: Path + genai: Path + + @property + def core_dir(self) -> Path: + """Directory that contains the Core binary.""" + return self.core.parent + + @property + def ort_dir(self) -> Path: + """Directory that contains the OnnxRuntime binary.""" + return self.ort.parent + + @property + def genai_dir(self) -> Path: + """Directory that contains the OnnxRuntimeGenAI binary.""" + return self.genai.parent + + def all_dirs(self) -> list[Path]: + """Return a deduplicated list of directories that contain the binaries.""" + seen: list[Path] = [] + for d in (self.core_dir, self.ort_dir, self.genai_dir): + if d not in seen: + seen.append(d) + return seen + + +def get_native_binary_paths() -> NativeBinaryPaths | None: + """Locate native binaries from installed Python packages. + + Returns: + A :class:`NativeBinaryPaths` instance if all three binaries were + found, or ``None`` if any is missing. + """ + core_name, ort_name, genai_name = _native_binary_names() + + # Probe WinML packages first; fall back to standard if not installed. + core_path = _find_file_in_package("foundry-local-core-winml", core_name) or _find_file_in_package("foundry-local-core", core_name) + ort_path = _find_file_in_package("onnxruntime-core", ort_name) + genai_path = _find_file_in_package("onnxruntime-genai-core", genai_name) + + if core_path and ort_path and genai_path: + return NativeBinaryPaths(core=core_path, ort=ort_path, genai=genai_path) + + return None + +def create_ort_symlinks(paths: NativeBinaryPaths) -> None: + """Create compatibility symlinks for ORT in the Core library directory on Linux/macOS. + + Workaround for ORT issue https://github.com/microsoft/onnxruntime/issues/27263. + + On Linux/macOS the native packages ship ORT binaries with a ``lib`` prefix + (e.g. ``libonnxruntime.dylib``) in their own package directories, while the + .NET AOT Core library P/Invokes ``onnxruntime.dylib`` / ``onnxruntime-genai.dylib`` + and searches its *own* directory first (matching the JS SDK behaviour where all + binaries live in a single ``coreDir``). + + This function creates ``onnxruntime{ext}`` and ``onnxruntime-genai{ext}`` symlinks + in ``paths.core_dir`` pointing at the absolute paths of the respective binaries so + the Core DLL can resolve them via ``dlopen`` without needing ``DYLD_LIBRARY_PATH``. + """ + if sys.platform == "win32": + return + + ext = ".dylib" if sys.platform == "darwin" else ".so" + + # Pairs of (actual binary path, link stem to create in core_dir) + links: list[tuple[Path, str]] = [ + (paths.ort, "onnxruntime"), + (paths.genai, "onnxruntime-genai"), + ] + + for src_path, link_stem in links: + link_path = paths.core_dir / f"{link_stem}{ext}" + if not link_path.exists(): + if src_path.exists(): + os.symlink(str(src_path), link_path) + logger.info("Created symlink: %s -> %s", link_path, src_path) + else: + logger.warning("Cannot create symlink %s: source %s not found", link_path, src_path) + + # Create a libonnxruntime symlink in genai_dir pointing to the real ORT + # binary so the dynamic linker can resolve GenAI's dependency. + if paths.genai_dir != paths.ort_dir: + ort_link_in_genai = paths.genai_dir / paths.ort.name + if not ort_link_in_genai.exists(): + if paths.ort.exists(): + os.symlink(str(paths.ort), ort_link_in_genai) + logger.info("Created symlink: %s -> %s", ort_link_in_genai, paths.ort) + else: + logger.warning("Cannot create symlink %s: source %s not found", + ort_link_in_genai, paths.ort) + + +# --------------------------------------------------------------------------- +# CLI entry point for verifying native binary installation +# --------------------------------------------------------------------------- + + +def foundry_local_install(args: list[str] | None = None) -> None: + """CLI entry point for installing and verifying native binaries. + + Usage:: + + foundry-local-install [--winml] [--verbose] + + Installs the platform-specific native libraries required by the SDK via + pip, then verifies they can be located. Use ``--winml`` to install the + WinML variants of the native packages (Windows only). + + Standard variant (default):: + + foundry-local-install + # installs: foundry-local-core, onnxruntime-core, onnxruntime-genai-core + + WinML variant:: + + foundry-local-install --winml + # installs: foundry-local-core-winml, onnxruntime-core, onnxruntime-genai-core + """ + import subprocess + + parser = argparse.ArgumentParser( + description=( + "Install and verify the platform-specific native libraries required by " + "the Foundry Local SDK via pip. Use --winml to install the WinML variants " + "(Windows only). Without --winml the standard cross-platform packages are installed." + ), + prog="foundry-local-install", + ) + parser.add_argument( + "--winml", + action="store_true", + help=( + "Install WinML native package (foundry-local-core-winml) " + "instead of the standard cross-platform package." + ), + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Print the resolved path for each binary after installation.", + ) + parsed = parser.parse_args(args) + + if parsed.winml: + variant = "WinML" + packages = ["foundry-local-core-winml", "onnxruntime-core", "onnxruntime-genai-core"] + else: + variant = "standard" + packages = ["foundry-local-core", "onnxruntime-core", "onnxruntime-genai-core"] + + print(f"[foundry-local] Installing {variant} native packages: {', '.join(packages)}") + subprocess.check_call([sys.executable, "-m", "pip", "install", *packages]) + + paths = get_native_binary_paths() + if paths is None: + core_name, ort_name, genai_name = _native_binary_names() + missing: list[str] = [] + if parsed.winml: + if _find_file_in_package("foundry-local-core-winml", core_name) is None: + missing.append("foundry-local-core-winml") + else: + if _find_file_in_package("foundry-local-core", core_name) is None: + missing.append("foundry-local-core") + if _find_file_in_package("onnxruntime-core", ort_name) is None: + missing.append("onnxruntime-core") + if _find_file_in_package("onnxruntime-genai-core", genai_name) is None: + missing.append("onnxruntime-genai-core") + print( + "[foundry-local] ERROR: Could not locate native binaries after installation. " + f"Missing: {', '.join(missing)}", + file=sys.stderr, + ) + hint = "pip install foundry-local-sdk-winml" if parsed.winml else "pip install foundry-local-sdk" + print(f" Try: {hint}", file=sys.stderr) + sys.exit(1) + + print(f"[foundry-local] {variant.capitalize()} native libraries installed and verified.") + if parsed.verbose: + print(f" Core : {paths.core}") + print(f" ORT : {paths.ort}") + print(f" GenAI : {paths.genai}") + + + diff --git a/sdk/python/src/exception.py b/sdk/python/src/exception.py new file mode 100644 index 00000000..0cff6a90 --- /dev/null +++ b/sdk/python/src/exception.py @@ -0,0 +1,7 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +class FoundryLocalException(Exception): + """Base exception for Foundry Local SDK errors.""" diff --git a/sdk/python/src/foundry_local_manager.py b/sdk/python/src/foundry_local_manager.py new file mode 100644 index 00000000..4486eaf1 --- /dev/null +++ b/sdk/python/src/foundry_local_manager.py @@ -0,0 +1,118 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +import json +import logging +import threading + +from .catalog import Catalog +from .configuration import Configuration +from .logging_helper import set_default_logger_severity +from .detail.core_interop import CoreInterop +from .detail.model_load_manager import ModelLoadManager +from .exception import FoundryLocalException + +logger = logging.getLogger(__name__) + + +class FoundryLocalManager: + """Singleton manager for Foundry Local SDK operations. + + Call ``FoundryLocalManager.initialize(config)`` once at startup, then access + the singleton via ``FoundryLocalManager.instance``. + + Attributes: + instance: The singleton ``FoundryLocalManager`` instance (set after ``initialize``). + catalog: The model ``Catalog`` for discovering and managing models. + urls: Bound URL(s) after ``start_web_service()`` is called, or ``None``. + """ + + _lock = threading.Lock() + instance: FoundryLocalManager = None + + @staticmethod + def initialize(config: Configuration): + """Initialize the Foundry Local SDK with the given configuration. + + This method must be called before using any other part of the SDK. + + Args: + config: Configuration object for the SDK. + """ + # Delegate singleton creation to the constructor, which enforces + # the singleton invariant under a lock and sets `instance`. + FoundryLocalManager(config) + + def __init__(self, config: Configuration): + # Enforce singleton creation under a class-level lock and ensure + # that `FoundryLocalManager.instance` is set exactly once. + with FoundryLocalManager._lock: + if FoundryLocalManager.instance is not None: + raise FoundryLocalException( + "FoundryLocalManager is a singleton and has already been initialized." + ) + config.validate() + self.config = config + self._initialize() + FoundryLocalManager.instance = self + + self.urls = None + + def _initialize(self): + set_default_logger_severity(self.config.log_level) + + external_service_url = self.config.web.external_url if self.config.web else None + + self._core_interop = CoreInterop(self.config) + self._model_load_manager = ModelLoadManager(self._core_interop, external_service_url) + self.catalog = Catalog(self._model_load_manager, self._core_interop) + + def ensure_eps_downloaded(self) -> None: + """Ensure execution providers are downloaded and registered (synchronous). + Only relevant when using WinML. + + Raises: + FoundryLocalException: If execution provider download fails. + """ + result = self._core_interop.execute_command("ensure_eps_downloaded") + + if result.error is not None: + raise FoundryLocalException(f"Error ensuring execution providers downloaded: {result.error}") + + def start_web_service(self): + """Start the optional web service. + + If provided, the service will be bound to the value of Configuration.web.urls. + The default of http://127.0.0.1:0 will be used otherwise, which binds to a random ephemeral port. + + FoundryLocalManager.urls will be updated with the actual URL/s the service is listening on. + """ + with FoundryLocalManager._lock: + response = self._core_interop.execute_command("start_service") + + if response.error is not None: + raise FoundryLocalException(f"Error starting web service: {response.error}") + + bound_urls = json.loads(response.data) + if bound_urls is None or len(bound_urls) == 0: + raise FoundryLocalException("Failed to get bound URLs from web service start response.") + + self.urls = bound_urls + + def stop_web_service(self): + """Stop the optional web service.""" + + with FoundryLocalManager._lock: + if self.urls is None: + raise FoundryLocalException("Web service is not running.") + + response = self._core_interop.execute_command("stop_service") + + if response.error is not None: + raise FoundryLocalException(f"Error stopping web service: {response.error}") + + self.urls = None diff --git a/sdk/python/src/imodel.py b/sdk/python/src/imodel.py new file mode 100644 index 00000000..a092b98e --- /dev/null +++ b/sdk/python/src/imodel.py @@ -0,0 +1,91 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import Callable, Optional + +from .openai.chat_client import ChatClient +from .openai.audio_client import AudioClient + +class IModel(ABC): + """Abstract interface for a model that can be downloaded, loaded, and used for inference.""" + + @property + @abstractmethod + def id(self) -> str: + """Unique model id.""" + pass + + @property + @abstractmethod + def alias(self) -> str: + """Model alias.""" + pass + + @property + @abstractmethod + def is_cached(self) -> bool: + """True if the model is present in the local cache.""" + pass + + @property + @abstractmethod + def is_loaded(self) -> bool: + """True if the model is loaded into memory.""" + pass + + @abstractmethod + def download(self, progress_callback: Callable[[float], None] = None) -> None: + """ + Download the model to local cache if not already present. + :param progress_callback: Optional callback function for download progress as a percentage (0.0 to 100.0). + """ + pass + + @abstractmethod + def get_path(self) -> str: + """ + Gets the model path if cached. + :return: Path of model directory. + """ + pass + + @abstractmethod + def load(self) -> None: + """ + Load the model into memory if not already loaded. + """ + pass + + @abstractmethod + def remove_from_cache(self) -> None: + """ + Remove the model from the local cache. + """ + pass + + @abstractmethod + def unload(self) -> None: + """ + Unload the model if loaded. + """ + pass + + @abstractmethod + def get_chat_client(self) -> ChatClient: + """ + Get an OpenAI API based ChatClient. + :return: ChatClient instance. + """ + pass + + @abstractmethod + def get_audio_client(self) -> AudioClient: + """ + Get an OpenAI API based AudioClient. + :return: AudioClient instance. + """ + pass diff --git a/sdk/python/src/logging_helper.py b/sdk/python/src/logging_helper.py new file mode 100644 index 00000000..e476f62b --- /dev/null +++ b/sdk/python/src/logging_helper.py @@ -0,0 +1,30 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +import logging + +from enum import StrEnum + +# Map the python logging levels to the Foundry Local Core names +class LogLevel(StrEnum): + VERBOSE = "Verbose" + DEBUG = "Debug" + INFORMATION = "Information" + WARNING = "Warning" + ERROR = "Error" + FATAL = "Fatal" + +LOG_LEVEL_MAP = { + LogLevel.VERBOSE: logging.DEBUG, # No direct equivalent for Trace in Python logging + LogLevel.DEBUG: logging.DEBUG, + LogLevel.INFORMATION: logging.INFO, + LogLevel.WARNING: logging.WARNING, + LogLevel.ERROR: logging.ERROR, + LogLevel.FATAL: logging.CRITICAL, +} + +def set_default_logger_severity(config_level: LogLevel): + py_level = LOG_LEVEL_MAP.get(config_level, logging.INFO) + logger = logging.getLogger(__name__.split(".", maxsplit=1)[0]) + logger.setLevel(py_level) diff --git a/sdk/python/src/model.py b/sdk/python/src/model.py new file mode 100644 index 00000000..4c8750ca --- /dev/null +++ b/sdk/python/src/model.py @@ -0,0 +1,133 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +from __future__ import annotations + +import logging +from typing import Callable, List, Optional + +from .imodel import IModel +from .openai.chat_client import ChatClient +from .openai.audio_client import AudioClient +from .model_variant import ModelVariant +from .exception import FoundryLocalException +from .detail.core_interop import CoreInterop + +logger = logging.getLogger(__name__) + + +class Model(IModel): + """A model identified by an alias that groups one or more ``ModelVariant`` instances. + + Operations are delegated to the currently selected variant. + """ + + def __init__(self, model_variant: ModelVariant, core_interop: CoreInterop): + self._alias = model_variant.alias + self._variants: List[ModelVariant] = [model_variant] + # Variants are sorted by Core, so the first one added is the default + self._selected_variant = model_variant + self._core_interop = core_interop + + def _add_variant(self, variant: ModelVariant) -> None: + if variant.alias != self._alias: + raise FoundryLocalException( + f"Variant alias {variant.alias} does not match model alias {self._alias}" + ) + + self._variants.append(variant) + + # Prefer the highest priority locally cached variant + if variant.info.cached and not self._selected_variant.info.cached: + self._selected_variant = variant + + def select_variant(self, variant: ModelVariant) -> None: + """ + Select a specific model variant by its ModelVariant object. + The selected variant will be used for IModel operations. + + :param variant: ModelVariant to select + :raises FoundryLocalException: If variant is not valid for this model + """ + if variant not in self._variants: + raise FoundryLocalException( + f"Model {self._alias} does not have a {variant.id} variant." + ) + + self._selected_variant = variant + + def get_latest_version(self, variant: ModelVariant) -> ModelVariant: + """ + Get the latest version of the specified model variant. + + :param variant: Model variant + :return: ModelVariant for latest version. Same as variant if that is the latest version + :raises FoundryLocalException: If variant is not valid for this model + """ + # Variants are sorted by version, so the first one matching the name is the latest version + for v in self._variants: + if v.info.name == variant.info.name: + return v + + raise FoundryLocalException( + f"Model {self._alias} does not have a {variant.id} variant." + ) + + @property + def variants(self) -> List[ModelVariant]: + """List of all variants for this model.""" + return self._variants.copy() # Return a copy to prevent external modification + + @property + def selected_variant(self) -> ModelVariant: + """Currently selected variant.""" + return self._selected_variant + + @property + def id(self) -> str: + """Model Id of the currently selected variant.""" + return self._selected_variant.id + + @property + def alias(self) -> str: + """Alias of this model.""" + return self._alias + + @property + def is_cached(self) -> bool: + """Is the currently selected variant cached locally?""" + return self._selected_variant.is_cached + + @property + def is_loaded(self) -> bool: + """Is the currently selected variant loaded in memory?""" + return self._selected_variant.is_loaded + + def download(self, progress_callback: Optional[Callable[[float], None]] = None) -> None: + """Download the currently selected variant.""" + self._selected_variant.download(progress_callback) + + def get_path(self) -> str: + """Get the path to the currently selected variant.""" + return self._selected_variant.get_path() + + def load(self) -> None: + """Load the currently selected variant into memory.""" + self._selected_variant.load() + + def unload(self) -> None: + """Unload the currently selected variant from memory.""" + self._selected_variant.unload() + + def remove_from_cache(self) -> None: + """Remove the currently selected variant from the local cache.""" + self._selected_variant.remove_from_cache() + + def get_chat_client(self) -> ChatClient: + """Get a chat client for the currently selected variant.""" + return self._selected_variant.get_chat_client() + + def get_audio_client(self) -> AudioClient: + """Get an audio client for the currently selected variant.""" + return self._selected_variant.get_audio_client() diff --git a/sdk/python/src/model_variant.py b/sdk/python/src/model_variant.py new file mode 100644 index 00000000..f0d40109 --- /dev/null +++ b/sdk/python/src/model_variant.py @@ -0,0 +1,130 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +from __future__ import annotations + +import logging +from typing import Callable, Optional + +from .imodel import IModel +from .exception import FoundryLocalException + +from .detail.core_interop import CoreInterop, InteropRequest +from .detail.model_data_types import ModelInfo +from .detail.core_interop import get_cached_model_ids +from .detail.model_load_manager import ModelLoadManager +from .openai.audio_client import AudioClient +from .openai.chat_client import ChatClient + +logger = logging.getLogger(__name__) + + +class ModelVariant(IModel): + """A specific variant of a model (e.g. a particular device type, version, or quantization). + + Implements ``IModel`` and provides download, cache, load/unload, and + client-creation operations for a single model variant. + """ + + def __init__(self, model_info: ModelInfo, model_load_manager: ModelLoadManager, core_interop: CoreInterop): + """Initialize a ModelVariant. + + Args: + model_info: Catalog metadata for this variant. + model_load_manager: Manager for loading/unloading models. + core_interop: Native interop layer for Foundry Local Core. + """ + self._model_info = model_info + self._model_load_manager = model_load_manager + self._core_interop = core_interop + + self._id = model_info.id + self._alias = model_info.alias + + @property + def id(self) -> str: + """Unique model variant ID (e.g. ``name:version``).""" + return self._id + + @property + def alias(self) -> str: + """Model alias shared across variants.""" + return self._alias + + @property + def info(self) -> ModelInfo: + """Full catalog metadata for this variant.""" + return self._model_info + + @property + def is_cached(self) -> bool: + """``True`` if this variant is present in the local model cache.""" + cached_model_ids = get_cached_model_ids(self._core_interop) + return self.id in cached_model_ids + + @property + def is_loaded(self) -> bool: + """``True`` if this variant is currently loaded into memory.""" + loaded_model_ids = self._model_load_manager.list_loaded() + return self.id in loaded_model_ids + + def download(self, progress_callback: Callable[[float], None] = None): + """Download this variant to the local cache. + + Args: + progress_callback: Optional callback receiving download progress as a + percentage (0.0 to 100.0). + """ + request = InteropRequest(params={"Model": self.id}) + if progress_callback is None: + response = self._core_interop.execute_command("download_model", request) + else: + response = self._core_interop.execute_command_with_callback( + "download_model", request, + lambda pct_str: progress_callback(float(pct_str)) + ) + + logger.info("Download response: %s", response) + if response.error is not None: + raise FoundryLocalException(f"Failed to download model: {response.error}") + + def get_path(self) -> str: + """Get the local file-system path to this variant if cached. + + Returns: + Path to the model directory. + + Raises: + FoundryLocalException: If the model path cannot be retrieved. + """ + request = InteropRequest(params={"Model": self.id}) + response = self._core_interop.execute_command("get_model_path", request) + if response.error is not None: + raise FoundryLocalException(f"Failed to get model path: {response.error}") + + return response.data + + def load(self) -> None: + """Load this variant into memory for inference.""" + self._model_load_manager.load(self.id) + + def remove_from_cache(self) -> None: + """Remove this variant from the local model cache.""" + request = InteropRequest(params={"Model": self.id}) + response = self._core_interop.execute_command("remove_cached_model", request) + if response.error is not None: + raise FoundryLocalException(f"Failed to remove model from cache: {response.error}") + + + def unload(self) -> None: + """Unload this variant from memory.""" + self._model_load_manager.unload(self.id) + + def get_chat_client(self) -> ChatClient: + """Create an OpenAI-compatible ``ChatClient`` for this variant.""" + return ChatClient(self.id, self._core_interop) + + def get_audio_client(self) -> AudioClient: + """Create an OpenAI-compatible ``AudioClient`` for this variant.""" + return AudioClient(self.id, self._core_interop) \ No newline at end of file diff --git a/sdk/python/src/openai/__init__.py b/sdk/python/src/openai/__init__.py new file mode 100644 index 00000000..e445ba1d --- /dev/null +++ b/sdk/python/src/openai/__init__.py @@ -0,0 +1,10 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""OpenAI-compatible clients for chat completions and audio transcription.""" + +from .chat_client import ChatClient, ChatClientSettings +from .audio_client import AudioClient + +__all__ = ["AudioClient", "ChatClient", "ChatClientSettings"] diff --git a/sdk/python/src/openai/audio_client.py b/sdk/python/src/openai/audio_client.py new file mode 100644 index 00000000..8d3ffa29 --- /dev/null +++ b/sdk/python/src/openai/audio_client.py @@ -0,0 +1,153 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +import json +import logging +from dataclasses import dataclass +from typing import Callable, Optional + +from ..detail.core_interop import CoreInterop, InteropRequest +from ..exception import FoundryLocalException + +logger = logging.getLogger(__name__) + + +class AudioSettings: + """Settings supported by Foundry Local for audio transcription. + + Attributes: + language: Language of the audio (e.g. ``"en"``). + temperature: Sampling temperature (0.0 for deterministic results). + """ + + def __init__( + self, + language: Optional[str] = None, + temperature: Optional[float] = None, + ): + self.language = language + self.temperature = temperature + + +@dataclass +class AudioTranscriptionResponse: + """Response from an audio transcription request. + + Attributes: + text: The transcribed text. + """ + + text: str + + +class AudioClient: + """OpenAI-compatible audio transcription client backed by Foundry Local Core. + + Supports non-streaming and streaming transcription of audio files. + + Attributes: + model_id: The ID of the loaded Whisper model variant. + settings: Tunable ``AudioSettings`` (language, temperature). + """ + + def __init__(self, model_id: str, core_interop: CoreInterop): + self.model_id = model_id + self.settings = AudioSettings() + self._core_interop = core_interop + + @staticmethod + def _validate_audio_file_path(audio_file_path: str) -> None: + """Validate that the audio file path is a non-empty string.""" + if not isinstance(audio_file_path, str) or audio_file_path.strip() == "": + raise ValueError("Audio file path must be a non-empty string.") + + def _create_request_json(self, audio_file_path: str) -> str: + """Build the JSON payload for the ``audio_transcribe`` native command.""" + request: dict = { + "Model": self.model_id, + "FileName": audio_file_path, + } + + metadata: dict[str, str] = {} + + if self.settings.language is not None: + request["Language"] = self.settings.language + metadata["language"] = self.settings.language + + if self.settings.temperature is not None: + request["Temperature"] = self.settings.temperature + metadata["temperature"] = str(self.settings.temperature) + + if metadata: + request["metadata"] = metadata + + return json.dumps(request) + + def transcribe(self, audio_file_path: str) -> AudioTranscriptionResponse: + """Transcribe an audio file (non-streaming). + + Args: + audio_file_path: Path to the audio file to transcribe. + + Returns: + An ``AudioTranscriptionResponse`` containing the transcribed text. + + Raises: + ValueError: If *audio_file_path* is not a non-empty string. + FoundryLocalException: If the underlying native transcription command fails. + """ + self._validate_audio_file_path(audio_file_path) + + request_json = self._create_request_json(audio_file_path) + request = InteropRequest(params={"OpenAICreateRequest": request_json}) + + response = self._core_interop.execute_command("audio_transcribe", request) + if response.error is not None: + raise FoundryLocalException( + f"Audio transcription failed for model '{self.model_id}': {response.error}" + ) + + data = json.loads(response.data) + return AudioTranscriptionResponse(text=data.get("text", "")) + + def transcribe_streaming( + self, + audio_file_path: str, + callback: Callable[[AudioTranscriptionResponse], None], + ) -> None: + """Transcribe an audio file with streaming chunks. + + Each chunk is passed to *callback* as an ``AudioTranscriptionResponse``. + + Args: + audio_file_path: Path to the audio file to transcribe. + callback: Called with each incremental transcription chunk. + + Raises: + ValueError: If *audio_file_path* is not a non-empty string. + FoundryLocalException: If the underlying native transcription command fails. + """ + self._validate_audio_file_path(audio_file_path) + + if not callable(callback): + raise TypeError("Callback must be a valid function.") + + request_json = self._create_request_json(audio_file_path) + request = InteropRequest(params={"OpenAICreateRequest": request_json}) + + def callback_handler(chunk_str: str): + chunk_data = json.loads(chunk_str) + chunk = AudioTranscriptionResponse(text=chunk_data.get("text", "")) + callback(chunk) + + response = self._core_interop.execute_command_with_callback( + "audio_transcribe", request, callback_handler + ) + if response.error is not None: + raise FoundryLocalException( + f"Streaming audio transcription failed for model '{self.model_id}': {response.error}" + ) \ No newline at end of file diff --git a/sdk/python/src/openai/chat_client.py b/sdk/python/src/openai/chat_client.py new file mode 100644 index 00000000..0b0d58bc --- /dev/null +++ b/sdk/python/src/openai/chat_client.py @@ -0,0 +1,290 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +import logging +import json +import queue +import threading + +from ..detail.core_interop import CoreInterop, InteropRequest +from ..exception import FoundryLocalException +from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam +from openai.types.chat.completion_create_params import CompletionCreateParamsBase, \ + CompletionCreateParamsNonStreaming, \ + CompletionCreateParamsStreaming +from openai.types.chat import ChatCompletion +from openai.types.chat.chat_completion_chunk import ChatCompletionChunk +from typing import Any, Dict, Generator, List, Optional + +logger = logging.getLogger(__name__) + + +class ChatClientSettings: + """Settings for chat completion requests. + + Attributes match the OpenAI chat completion API parameters. + Foundry-specific settings (``top_k``, ``random_seed``) are sent via metadata. + """ + + def __init__( + self, + frequency_penalty: Optional[float] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + temperature: Optional[float] = None, + presence_penalty: Optional[float] = None, + random_seed: Optional[int] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + response_format: Optional[Dict[str, Any]] = None, + tool_choice: Optional[Dict[str, Any]] = None, + ): + self.frequency_penalty = frequency_penalty + self.max_tokens = max_tokens + self.n = n + self.temperature = temperature + self.presence_penalty = presence_penalty + self.random_seed = random_seed + self.top_k = top_k + self.top_p = top_p + self.response_format = response_format + self.tool_choice = tool_choice + + def _serialize(self) -> Dict[str, Any]: + """Serialize settings into an OpenAI-compatible request dict.""" + self._validate_response_format(self.response_format) + self._validate_tool_choice(self.tool_choice) + + result: Dict[str, Any] = { + k: v for k, v in { + "frequency_penalty": self.frequency_penalty, + "max_tokens": self.max_tokens, + "n": self.n, + "presence_penalty": self.presence_penalty, + "temperature": self.temperature, + "top_p": self.top_p, + "response_format": self.response_format, + "tool_choice": self.tool_choice, + }.items() if v is not None + } + + metadata: Dict[str, str] = {} + if self.top_k is not None: + metadata["top_k"] = str(self.top_k) + if self.random_seed is not None: + metadata["random_seed"] = str(self.random_seed) + + if metadata: + result["metadata"] = metadata + + return result + + def _validate_response_format(self, response_format: Optional[Dict[str, Any]]) -> None: + if response_format is None: + return + valid_types = ["text", "json_object", "json_schema", "lark_grammar"] + fmt_type = response_format.get("type") + if fmt_type not in valid_types: + raise ValueError(f"ResponseFormat type must be one of: {', '.join(valid_types)}") + grammar_types = ["json_schema", "lark_grammar"] + if fmt_type in grammar_types: + if fmt_type == "json_schema" and ( + not isinstance(response_format.get("json_schema"), str) + or not response_format["json_schema"].strip() + ): + raise ValueError('ResponseFormat with type "json_schema" must have a valid json_schema string.') + if fmt_type == "lark_grammar" and ( + not isinstance(response_format.get("lark_grammar"), str) + or not response_format["lark_grammar"].strip() + ): + raise ValueError('ResponseFormat with type "lark_grammar" must have a valid lark_grammar string.') + elif response_format.get("json_schema") or response_format.get("lark_grammar"): + raise ValueError( + f'ResponseFormat with type "{fmt_type}" should not have json_schema or lark_grammar properties.' + ) + + def _validate_tool_choice(self, tool_choice: Optional[Dict[str, Any]]) -> None: + if tool_choice is None: + return + valid_types = ["none", "auto", "required", "function"] + choice_type = tool_choice.get("type") + if choice_type not in valid_types: + raise ValueError(f"ToolChoice type must be one of: {', '.join(valid_types)}") + if choice_type == "function" and ( + not isinstance(tool_choice.get("name"), str) or not tool_choice.get("name", "").strip() + ): + raise ValueError('ToolChoice with type "function" must have a valid name string.') + elif choice_type != "function" and tool_choice.get("name"): + raise ValueError(f'ToolChoice with type "{choice_type}" should not have a name property.') + +class ChatClient: + """OpenAI-compatible chat completions client backed by Foundry Local Core. + + Supports non-streaming and streaming completions with optional tool calling. + + Attributes: + model_id: The ID of the loaded model variant. + settings: Tunable ``ChatClientSettings`` (temperature, max tokens, etc.). + """ + + def __init__(self, model_id: str, core_interop: CoreInterop): + self.model_id = model_id + self.settings = ChatClientSettings() + self._core_interop = core_interop + + def _validate_messages(self, messages: List[ChatCompletionMessageParam]) -> None: + """Validate the messages list before sending to the native layer.""" + if not messages: + raise ValueError("messages must be a non-empty list.") + for i, msg in enumerate(messages): + if not isinstance(msg, dict): + raise ValueError(f"messages[{i}] must be a dict, got {type(msg).__name__}.") + if "role" not in msg: + raise ValueError(f"messages[{i}] is missing required key 'role'.") + if "content" not in msg: + raise ValueError(f"messages[{i}] is missing required key 'content'.") + + def _validate_tools(self, tools: Optional[List[Dict[str, Any]]]) -> None: + """Validate the tools list before sending to the native layer.""" + if not tools: + return + if not isinstance(tools, list): + raise ValueError("tools must be a list if provided.") + for i, tool in enumerate(tools): + if not isinstance(tool, dict) or not tool: + raise ValueError( + f"tools[{i}] must be a non-null object with a valid 'type' and 'function' definition." + ) + if not isinstance(tool.get("type"), str) or not tool["type"].strip(): + raise ValueError(f"tools[{i}] must have a 'type' property that is a non-empty string.") + fn = tool.get("function") + if not isinstance(fn, dict): + raise ValueError(f"tools[{i}] must have a 'function' property that is a non-empty object.") + if not isinstance(fn.get("name"), str) or not fn["name"].strip(): + raise ValueError( + f"tools[{i}]'s function must have a 'name' property that is a non-empty string." + ) + + def _create_request( + self, + messages: List[ChatCompletionMessageParam], + streaming: bool, + tools: Optional[List[Dict[str, Any]]] = None, + ) -> str: + request: Dict[str, Any] = { + "model": self.model_id, + "messages": messages, + **({ + "tools": tools} if tools else {}), + **({ + "stream": True} if streaming else {}), + **self.settings._serialize(), + } + + if streaming: + chat_request = CompletionCreateParamsStreaming(request) + else: + chat_request = CompletionCreateParamsNonStreaming(request) + + return json.dumps(chat_request) + + def complete_chat(self, messages: List[ChatCompletionMessageParam], tools: Optional[List[Dict[str, Any]]] = None): + """Perform a non-streaming chat completion. + + Args: + messages: Conversation history as a list of OpenAI message dicts. + tools: Optional list of tool definitions for function calling. + + Returns: + A ``ChatCompletion`` response. + + Raises: + ValueError: If messages is None, empty, or contains malformed entries. + FoundryLocalException: If the native command returns an error. + """ + self._validate_messages(messages) + self._validate_tools(tools) + chat_request_json = self._create_request(messages, streaming=False, tools=tools) + + # Send the request to the chat API + request = InteropRequest(params={"OpenAICreateRequest": chat_request_json}) + response = self._core_interop.execute_command("chat_completions", request) + if response.error is not None: + raise FoundryLocalException(f"Error during chat completion: {response.error}") + + completion = ChatCompletion.model_validate_json(response.data) + + return completion + + def _stream_chunks(self, chat_request_json: str) -> Generator[ChatCompletionChunk, None, None]: + """Background-thread generator that yields parsed chunks from the native streaming call.""" + _SENTINEL = object() + chunk_queue: queue.Queue = queue.Queue() + errors: List[Exception] = [] + + def _on_chunk(response_str: str) -> None: + raw = json.loads(response_str) + # Foundry Local returns tool call chunks with "message.tool_calls" instead + # of the standard streaming "delta.tool_calls". Normalize to delta format + # so ChatCompletionChunk parses correctly. + for choice in raw.get("choices", []): + if "message" in choice and "delta" not in choice: + msg = choice.pop("message") + # ChoiceDeltaToolCall requires "index"; add if missing + for i, tc in enumerate(msg.get("tool_calls", [])): + tc.setdefault("index", i) + choice["delta"] = msg + chunk_queue.put(ChatCompletionChunk.model_validate(raw)) + + def _run() -> None: + try: + resp = self._core_interop.execute_command_with_callback( + "chat_completions", + InteropRequest(params={"OpenAICreateRequest": chat_request_json}), + _on_chunk, + ) + if resp.error is not None: + errors.append(FoundryLocalException(f"Error during streaming chat completion: {resp.error}")) + except Exception as exc: + errors.append(exc) + finally: + chunk_queue.put(_SENTINEL) + + threading.Thread(target=_run, daemon=True).start() + while (item := chunk_queue.get()) is not _SENTINEL: + yield item + if errors: + raise errors[0] + + def complete_streaming_chat( + self, + messages: List[ChatCompletionMessageParam], + tools: Optional[List[Dict[str, Any]]] = None, + ) -> Generator[ChatCompletionChunk, None, None]: + """Perform a streaming chat completion, yielding chunks as they arrive. + + Consume with a standard ``for`` loop:: + + for chunk in client.complete_streaming_chat(messages): + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="", flush=True) + + Args: + messages: Conversation history as a list of OpenAI message dicts. + tools: Optional list of tool definitions for function calling. + + Returns: + A generator of ``ChatCompletionChunk`` objects. + + Raises: + ValueError: If messages or tools are malformed. + FoundryLocalException: If the native layer returns an error. + """ + self._validate_messages(messages) + self._validate_tools(tools) + chat_request_json = self._create_request(messages, streaming=True, tools=tools) + return self._stream_chunks(chat_request_json) diff --git a/sdk/python/src/version.py b/sdk/python/src/version.py new file mode 100644 index 00000000..f198d448 --- /dev/null +++ b/sdk/python/src/version.py @@ -0,0 +1,6 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +__version__ = "0.9.0.dev0" diff --git a/sdk/python/test/README.md b/sdk/python/test/README.md new file mode 100644 index 00000000..92f389a8 --- /dev/null +++ b/sdk/python/test/README.md @@ -0,0 +1,79 @@ +# Foundry Local Python SDK – Test Suite + +This test suite mirrors the structure of the JS (`sdk_v2/js/test/`) and C# (`sdk_v2/cs/test/`) SDK test suites. + +## Prerequisites + +1. **Python 3.10+** (tested with 3.12/3.13) +2. **SDK installed in editable mode** from the `sdk/python` directory: + ```bash + pip install -e . + ``` +3. **Test dependencies**: + ```bash + pip install -r requirements-dev.txt + ``` +4. **Test model data** – the `test-data-shared` folder must exist as a sibling of the git repo root + (e.g. `../test-data-shared` relative to the repo). It should contain cached models for + `qwen2.5-0.5b` and `whisper-tiny`. + +## Running the tests + +From the `sdk/python` directory: + +```bash +# Run all tests +python -m pytest test/ + +# Run with verbose output +python -m pytest test/ -v + +# Run a specific test file +python -m pytest test/test_catalog.py + +# Run a specific test class or function +python -m pytest test/test_catalog.py::TestCatalog::test_should_list_models + +# List all collected tests without running them +python -m pytest test/ --collect-only +``` + +## Test structure + +``` +test/ +├── conftest.py # Shared fixtures & config (equivalent to testUtils.ts) +├── test_foundry_local_manager.py # FoundryLocalManager initialization (2 tests) +├── test_catalog.py # Catalog listing, lookup, error cases (9 tests) +├── test_model.py # Model caching & load/unload lifecycle (2 tests) +├── detail/ +│ └── test_model_load_manager.py # ModelLoadManager core interop & web service (5 tests) +└── openai/ + ├── test_chat_client.py # Chat completions, streaming, error validation (7 tests) + └── test_audio_client.py # Audio transcription (7 tests) +``` + +**Total: 32 tests** + +## Key conventions + +| Concept | Python (pytest) | JS (Mocha) | C# (TUnit) | +|---|---|---|---| +| Shared setup | `conftest.py` (auto-discovered) | `testUtils.ts` (explicit import) | `Utils.cs` (`[Before(Assembly)]`) | +| Session fixture | `@pytest.fixture(scope="session")` | manual singleton | `[Before(Assembly)]` static | +| Teardown | `yield` + cleanup in fixture | `after()` hook | `[After(Assembly)]` | +| Skip in CI | `@skip_in_ci` marker | `IS_RUNNING_IN_CI` + `this.skip()` | `[SkipInCI]` attribute | +| Expected failure | `@pytest.mark.xfail` | N/A | N/A | +| Timeout | `@pytest.mark.timeout(30)` | `this.timeout(30000)` | `[Timeout(30000)]` | + +## CI environment detection + +Tests that require the web service are skipped when either `TF_BUILD=true` (Azure DevOps) or +`GITHUB_ACTIONS=true` is set. + +## Test models + +| Alias | Use | Variant | +|---|---|---| +| `qwen2.5-0.5b` | Chat completions | `qwen2.5-0.5b-instruct-generic-cpu:4` | +| `whisper-tiny` | Audio transcription | `openai-whisper-tiny-generic-cpu:2` | diff --git a/sdk/python/test/__init__.py b/sdk/python/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sdk/python/test/conftest.py b/sdk/python/test/conftest.py new file mode 100644 index 00000000..b7e22c97 --- /dev/null +++ b/sdk/python/test/conftest.py @@ -0,0 +1,145 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Shared test configuration and fixtures for Foundry Local Python SDK tests. + +NOTE: "conftest.py" is a special filename that pytest uses to auto-discover +fixtures and shared utilities. All fixtures defined here are automatically +available to every test file without needing an explicit import. +This serves the same role as testUtils.ts in the JS SDK. +""" + +from __future__ import annotations + +import os +import logging + +import pytest + +from pathlib import Path + +from foundry_local_sdk.configuration import Configuration, LogLevel +from foundry_local_sdk.foundry_local_manager import FoundryLocalManager + +logger = logging.getLogger(__name__) + +TEST_MODEL_ALIAS = "qwen2.5-0.5b" +AUDIO_MODEL_ALIAS = "whisper-tiny" + +def get_git_repo_root() -> Path: + """Walk upward from __file__ until we find a .git directory.""" + current = Path(__file__).resolve().parent + while True: + if (current / ".git").exists(): + return current + parent = current.parent + if parent == current: + raise RuntimeError("Could not find git repo root") + current = parent + + +def get_test_data_shared_path() -> str: + """Return absolute path to the test-data-shared folder (sibling of the repo root).""" + repo_root = get_git_repo_root() + return str(repo_root.parent / "test-data-shared") + + +def is_running_in_ci() -> bool: + """Check TF_BUILD (Azure DevOps) and GITHUB_ACTIONS env vars.""" + azure_devops = os.environ.get("TF_BUILD", "false").lower() == "true" + github_actions = os.environ.get("GITHUB_ACTIONS", "false").lower() == "true" + return azure_devops or github_actions + + +IS_RUNNING_IN_CI = is_running_in_ci() + +skip_in_ci = pytest.mark.skipif(IS_RUNNING_IN_CI, reason="Skipped in CI environments") + + +def get_test_config() -> Configuration: + """Build a Configuration suitable for integration tests.""" + repo_root = get_git_repo_root() + return Configuration( + app_name="FoundryLocalTest", + model_cache_dir=get_test_data_shared_path(), + log_level=LogLevel.WARNING, + logs_dir=str(repo_root / "sdk" / "python" / "logs"), + additional_settings={"Bootstrap": "false"}, + ) + + +def get_multiply_tool(): + """Tool definition for the multiply_numbers function-calling test.""" + return { + "type": "function", + "function": { + "name": "multiply_numbers", + "description": "A tool for multiplying two numbers.", + "parameters": { + "type": "object", + "properties": { + "first": { + "type": "integer", + "description": "The first number in the operation", + }, + "second": { + "type": "integer", + "description": "The second number in the operation", + }, + }, + "required": ["first", "second"], + }, + }, + } + + +# --------------------------------------------------------------------------- +# Session-scoped fixtures +# --------------------------------------------------------------------------- + +@pytest.fixture(scope="session") +def manager(): + """Initialize FoundryLocalManager once for the entire test session.""" + # Reset singleton in case a previous run left state + FoundryLocalManager.instance = None + + config = get_test_config() + FoundryLocalManager.initialize(config) + mgr = FoundryLocalManager.instance + assert mgr is not None, "FoundryLocalManager.initialize did not set instance" + + yield mgr + + # Teardown: unload all loaded models + try: + catalog = mgr.catalog + loaded = catalog.get_loaded_models() + for model_variant in loaded: + try: + model_variant.unload() + except Exception as e: + logger.warning("Failed to unload model %s during teardown: %s", model_variant.id, e) + except Exception as e: + logger.warning("Failed to get loaded models during teardown: %s", e) + + # Reset the singleton so that other test sessions start clean + FoundryLocalManager.instance = None + + +@pytest.fixture(scope="session") +def catalog(manager): + """Return the Catalog from the session-scoped manager.""" + return manager.catalog + + +@pytest.fixture(scope="session") +def core_interop(manager): + """Return the CoreInterop from the session-scoped manager (internal, for component tests).""" + return manager._core_interop + + +@pytest.fixture(scope="session") +def model_load_manager(manager): + """Return the ModelLoadManager from the session-scoped manager (internal, for component tests).""" + return manager._model_load_manager diff --git a/sdk/python/test/detail/__init__.py b/sdk/python/test/detail/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sdk/python/test/detail/test_model_load_manager.py b/sdk/python/test/detail/test_model_load_manager.py new file mode 100644 index 00000000..a5a231e3 --- /dev/null +++ b/sdk/python/test/detail/test_model_load_manager.py @@ -0,0 +1,144 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for ModelLoadManager – mirrors modelLoadManager.test.ts.""" + +from __future__ import annotations + +import pytest + +from foundry_local_sdk.detail.model_load_manager import ModelLoadManager +from ..conftest import TEST_MODEL_ALIAS, IS_RUNNING_IN_CI, skip_in_ci + + +class TestModelLoadManagerCoreInterop: + """ModelLoadManager tests using Core Interop (no external URL).""" + + def _get_model_id(self, catalog) -> str: + """Resolve the variant ID for the test model alias.""" + cached = catalog.get_cached_models() + variant = next((m for m in cached if m.alias == TEST_MODEL_ALIAS), None) + assert variant is not None, f"{TEST_MODEL_ALIAS} should be cached" + return variant.id + + def test_should_load_model(self, catalog, core_interop): + """Load model via core interop and verify it appears in loaded list.""" + model_id = self._get_model_id(catalog) + mlm = ModelLoadManager(core_interop) + + mlm.load(model_id) + loaded = mlm.list_loaded() + assert model_id in loaded + + # Cleanup + mlm.unload(model_id) + + def test_should_unload_model(self, catalog, core_interop): + """Load then unload model via core interop.""" + model_id = self._get_model_id(catalog) + mlm = ModelLoadManager(core_interop) + + mlm.load(model_id) + loaded = mlm.list_loaded() + assert model_id in loaded + + mlm.unload(model_id) + loaded = mlm.list_loaded() + assert model_id not in loaded + + def test_should_list_loaded_models(self, catalog, core_interop): + """list_loaded() should return an array containing the loaded model.""" + model_id = self._get_model_id(catalog) + mlm = ModelLoadManager(core_interop) + + mlm.load(model_id) + loaded = mlm.list_loaded() + + assert isinstance(loaded, list) + assert model_id in loaded + + # Cleanup + mlm.unload(model_id) + + +class TestModelLoadManagerExternalService: + """ModelLoadManager tests using external web service URL (skipped in CI).""" + + @skip_in_ci + def test_should_load_and_unload_via_external_service(self, manager, catalog, core_interop): + """Load/unload model through the web service endpoint.""" + cached = catalog.get_cached_models() + variant = next((m for m in cached if m.alias == TEST_MODEL_ALIAS), None) + assert variant is not None + model_id = variant.id + + # Start web service + try: + manager.start_web_service() + except Exception as e: + pytest.skip(f"Failed to start web service: {e}") + + urls = manager.urls + if not urls or len(urls) == 0: + pytest.skip("Web service started but no URLs returned") + + service_url = urls[0] + + try: + # Setup: load via core interop + setup_mlm = ModelLoadManager(core_interop) + setup_mlm.load(model_id) + loaded = setup_mlm.list_loaded() + assert model_id in loaded + + # Unload via external service + ext_mlm = ModelLoadManager(core_interop, service_url) + ext_mlm.unload(model_id) + + # Verify via core interop + loaded = setup_mlm.list_loaded() + assert model_id not in loaded + finally: + try: + manager.stop_web_service() + except Exception: + pass + + @skip_in_ci + def test_should_list_loaded_via_external_service(self, manager, catalog, core_interop): + """list_loaded() through the web service endpoint should match core interop.""" + cached = catalog.get_cached_models() + variant = next((m for m in cached if m.alias == TEST_MODEL_ALIAS), None) + assert variant is not None + model_id = variant.id + + try: + manager.start_web_service() + except Exception as e: + pytest.skip(f"Failed to start web service: {e}") + + urls = manager.urls + if not urls or len(urls) == 0: + pytest.skip("Web service started but no URLs returned") + + service_url = urls[0] + + try: + # Setup: load via core + setup_mlm = ModelLoadManager(core_interop) + setup_mlm.load(model_id) + + # Verify via external service + ext_mlm = ModelLoadManager(core_interop, service_url) + loaded = ext_mlm.list_loaded() + assert isinstance(loaded, list) + assert model_id in loaded + + # Cleanup + setup_mlm.unload(model_id) + finally: + try: + manager.stop_web_service() + except Exception: + pass diff --git a/sdk/python/test/openai/__init__.py b/sdk/python/test/openai/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/sdk/python/test/openai/test_audio_client.py b/sdk/python/test/openai/test_audio_client.py new file mode 100644 index 00000000..f430d8d5 --- /dev/null +++ b/sdk/python/test/openai/test_audio_client.py @@ -0,0 +1,156 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for AudioClient – mirrors audioClient.test.ts.""" + +from __future__ import annotations + +import pytest + +from ..conftest import AUDIO_MODEL_ALIAS, get_git_repo_root + +# Recording.mp3 lives at sdk/testdata/Recording.mp3 relative to the repo root +AUDIO_FILE_PATH = str(get_git_repo_root() / "sdk" / "testdata" / "Recording.mp3") +EXPECTED_TEXT = ( + " And lots of times you need to give people more than one link at a time." + " You a band could give their fans a couple new videos from the live concert" + " behind the scenes photo gallery and album to purchase like these next few links." +) + + +def _get_loaded_audio_model(catalog): + """Helper: ensure the whisper model is selected, loaded, and return Model.""" + cached = catalog.get_cached_models() + assert len(cached) > 0 + + cached_variant = next((m for m in cached if m.alias == AUDIO_MODEL_ALIAS), None) + assert cached_variant is not None, f"{AUDIO_MODEL_ALIAS} should be cached" + + model = catalog.get_model(AUDIO_MODEL_ALIAS) + assert model is not None + + model.select_variant(cached_variant) + model.load() + return model + + +class TestAudioClient: + """Audio Client Tests.""" + + def test_should_transcribe_audio(self, catalog): + """Non-streaming transcription of Recording.mp3.""" + model = _get_loaded_audio_model(catalog) + try: + audio_client = model.get_audio_client() + assert audio_client is not None + + audio_client.settings.language = "en" + audio_client.settings.temperature = 0.0 + + response = audio_client.transcribe(AUDIO_FILE_PATH) + + assert response is not None + assert hasattr(response, "text") + assert isinstance(response.text, str) + assert len(response.text) > 0 + assert response.text == EXPECTED_TEXT + finally: + model.unload() + + def test_should_transcribe_audio_with_temperature(self, catalog): + """Non-streaming transcription with explicit temperature.""" + model = _get_loaded_audio_model(catalog) + try: + audio_client = model.get_audio_client() + assert audio_client is not None + + audio_client.settings.language = "en" + audio_client.settings.temperature = 0.0 + + response = audio_client.transcribe(AUDIO_FILE_PATH) + + assert response is not None + assert isinstance(response.text, str) + assert len(response.text) > 0 + assert response.text == EXPECTED_TEXT + finally: + model.unload() + + def test_should_transcribe_audio_streaming(self, catalog): + """Streaming transcription of Recording.mp3.""" + model = _get_loaded_audio_model(catalog) + try: + audio_client = model.get_audio_client() + assert audio_client is not None + + audio_client.settings.language = "en" + audio_client.settings.temperature = 0.0 + + chunks = [] + + def on_chunk(chunk): + assert chunk is not None + assert hasattr(chunk, "text") + assert isinstance(chunk.text, str) + assert len(chunk.text) > 0 + chunks.append(chunk.text) + + audio_client.transcribe_streaming(AUDIO_FILE_PATH, on_chunk) + + full_text = "".join(chunks) + assert full_text == EXPECTED_TEXT + finally: + model.unload() + + def test_should_transcribe_audio_streaming_with_temperature(self, catalog): + """Streaming transcription with explicit temperature.""" + model = _get_loaded_audio_model(catalog) + try: + audio_client = model.get_audio_client() + assert audio_client is not None + + audio_client.settings.language = "en" + audio_client.settings.temperature = 0.0 + + chunks = [] + + def on_chunk(chunk): + assert chunk is not None + assert isinstance(chunk.text, str) + chunks.append(chunk.text) + + audio_client.transcribe_streaming(AUDIO_FILE_PATH, on_chunk) + + full_text = "".join(chunks) + assert full_text == EXPECTED_TEXT + finally: + model.unload() + + def test_should_raise_for_empty_audio_file_path(self, catalog): + """transcribe('') should raise.""" + model = catalog.get_model(AUDIO_MODEL_ALIAS) + assert model is not None + audio_client = model.get_audio_client() + + with pytest.raises(ValueError, match="Audio file path must be a non-empty string"): + audio_client.transcribe("") + + def test_should_raise_for_streaming_empty_audio_file_path(self, catalog): + """transcribe_streaming('') should raise.""" + model = catalog.get_model(AUDIO_MODEL_ALIAS) + assert model is not None + audio_client = model.get_audio_client() + + with pytest.raises(ValueError, match="Audio file path must be a non-empty string"): + audio_client.transcribe_streaming("", lambda chunk: None) + + def test_should_raise_for_streaming_invalid_callback(self, catalog): + """transcribe_streaming with invalid callback should raise.""" + model = catalog.get_model(AUDIO_MODEL_ALIAS) + assert model is not None + audio_client = model.get_audio_client() + + for invalid_callback in [None, 42, {}, "not a function"]: + with pytest.raises(TypeError, match="Callback must be a valid function"): + audio_client.transcribe_streaming(AUDIO_FILE_PATH, invalid_callback) diff --git a/sdk/python/test/openai/test_chat_client.py b/sdk/python/test/openai/test_chat_client.py new file mode 100644 index 00000000..d96891b9 --- /dev/null +++ b/sdk/python/test/openai/test_chat_client.py @@ -0,0 +1,243 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for ChatClient – mirrors chatClient.test.ts.""" + +from __future__ import annotations + +import json + +import pytest + +from ..conftest import TEST_MODEL_ALIAS, get_multiply_tool + + +def _get_loaded_chat_model(catalog): + """Helper: ensure the test model is selected, loaded, and return Model + ChatClient.""" + cached = catalog.get_cached_models() + assert len(cached) > 0 + + cached_variant = next((m for m in cached if m.alias == TEST_MODEL_ALIAS), None) + assert cached_variant is not None, f"{TEST_MODEL_ALIAS} should be cached" + + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + + model.select_variant(cached_variant) + model.load() + return model + + +class TestChatClient: + """Chat Client Tests.""" + + def test_should_perform_chat_completion(self, catalog): + """Non-streaming chat: 7 * 6 should include '42' in the response.""" + model = _get_loaded_chat_model(catalog) + try: + client = model.get_chat_client() + client.settings.max_tokens = 500 + client.settings.temperature = 0.0 # deterministic + + result = client.complete_chat([ + {"role": "user", + "content": "You are a calculator. Be precise. What is the answer to 7 multiplied by 6?"} + ]) + + assert result is not None + assert result.choices is not None + assert len(result.choices) > 0 + assert result.choices[0].message is not None + content = result.choices[0].message.content + assert isinstance(content, str) + assert "42" in content + finally: + model.unload() + + def test_should_perform_streaming_chat_completion(self, catalog): + """Streaming chat: 7 * 6 = 42, then follow-up +25 = 67.""" + model = _get_loaded_chat_model(catalog) + try: + client = model.get_chat_client() + client.settings.max_tokens = 500 + client.settings.temperature = 0.0 + + messages = [ + {"role": "user", + "content": "You are a calculator. Be precise. What is the answer to 7 multiplied by 6?"} + ] + + # ---- First question ---- + chunks = list(client.complete_streaming_chat(messages)) + assert len(chunks) > 0 + first_response = "".join( + c.choices[0].delta.content + for c in chunks + if c.choices and c.choices[0].delta and c.choices[0].delta.content + ) + assert "42" in first_response + + # ---- Follow-up question ---- + messages.append({"role": "assistant", "content": first_response}) + messages.append({"role": "user", "content": "Add 25 to the previous answer. Think hard to be sure of the answer."}) + + chunks = list(client.complete_streaming_chat(messages)) + assert len(chunks) > 0 + second_response = "".join( + c.choices[0].delta.content + for c in chunks + if c.choices and c.choices[0].delta and c.choices[0].delta.content + ) + assert "67" in second_response + finally: + model.unload() + + def test_should_raise_for_empty_messages(self, catalog): + """complete_chat with empty list should raise.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + client = model.get_chat_client() + + with pytest.raises(ValueError): + client.complete_chat([]) + + def test_should_raise_for_none_messages(self, catalog): + """complete_chat with None should raise.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + client = model.get_chat_client() + + with pytest.raises(ValueError): + client.complete_chat(None) + + def test_should_raise_for_streaming_empty_messages(self, catalog): + """complete_streaming_chat with empty list should raise.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + client = model.get_chat_client() + + with pytest.raises(ValueError): + client.complete_streaming_chat([]) + + def test_should_raise_for_streaming_none_messages(self, catalog): + """complete_streaming_chat with None should raise.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + client = model.get_chat_client() + + with pytest.raises(ValueError): + client.complete_streaming_chat(None) + + def test_should_perform_tool_calling_chat_completion(self, catalog): + """Tool calling (non-streaming): model uses multiply_numbers tool to answer 7 * 6.""" + model = _get_loaded_chat_model(catalog) + try: + client = model.get_chat_client() + client.settings.max_tokens = 500 + client.settings.temperature = 0.0 + client.settings.tool_choice = {"type": "required"} + + messages = [ + {"role": "system", "content": "You are a helpful AI assistant. If necessary, you can use any provided tools to answer the question."}, + {"role": "user", "content": "What is the answer to 7 multiplied by 6?"}, + ] + tools = [get_multiply_tool()] + + # First turn: model should respond with a tool call + response = client.complete_chat(messages, tools) + + assert response is not None + assert response.choices is not None + assert len(response.choices) > 0 + assert response.choices[0].finish_reason == "tool_calls" + assert response.choices[0].message is not None + assert response.choices[0].message.tool_calls is not None + assert len(response.choices[0].message.tool_calls) > 0 + + tool_call = response.choices[0].message.tool_calls[0] + assert tool_call.type == "function" + assert tool_call.function.name == "multiply_numbers" + + args = json.loads(tool_call.function.arguments) + assert args["first"] == 7 + assert args["second"] == 6 + + # Second turn: provide tool result and ask model to continue + messages.append({"role": "tool", "content": "7 x 6 = 42."}) + messages.append({"role": "system", "content": "Respond only with the answer generated by the tool."}) + + client.settings.tool_choice = {"type": "auto"} + response = client.complete_chat(messages, tools) + + assert response.choices[0].message.content is not None + assert "42" in response.choices[0].message.content + finally: + model.unload() + + def test_should_perform_tool_calling_streaming_chat_completion(self, catalog): + """Tool calling (streaming): model uses multiply_numbers tool, then continue conversation.""" + model = _get_loaded_chat_model(catalog) + try: + client = model.get_chat_client() + client.settings.max_tokens = 500 + client.settings.temperature = 0.0 + client.settings.tool_choice = {"type": "required"} + + messages = [ + {"role": "system", "content": "You are a helpful AI assistant. If necessary, you can use any provided tools to answer the question."}, + {"role": "user", "content": "What is the answer to 7 multiplied by 6?"}, + ] + tools = [get_multiply_tool()] + + # First turn: collect chunks and find the tool call + chunks = list(client.complete_streaming_chat(messages, tools)) + last_tool_call_chunk = next( + (c for c in reversed(chunks) + if c.choices and c.choices[0].delta and c.choices[0].delta.tool_calls), + None, + ) + assert last_tool_call_chunk is not None + + tool_call_choice = last_tool_call_chunk.choices[0] + assert tool_call_choice.finish_reason == "tool_calls" + + tool_call = tool_call_choice.delta.tool_calls[0] + assert tool_call.type == "function" + assert tool_call.function.name == "multiply_numbers" + + args = json.loads(tool_call.function.arguments) + assert args["first"] == 7 + assert args["second"] == 6 + + # Second turn: provide tool result and continue + messages.append({"role": "tool", "content": "7 x 6 = 42."}) + messages.append({"role": "system", "content": "Respond only with the answer generated by the tool."}) + + client.settings.tool_choice = {"type": "auto"} + + chunks = list(client.complete_streaming_chat(messages, tools)) + second_response = "".join( + c.choices[0].delta.content + for c in chunks + if c.choices and c.choices[0].delta and c.choices[0].delta.content + ) + assert "42" in second_response + finally: + model.unload() + + def test_should_return_generator(self, catalog): + """complete_streaming_chat returns a generator that yields chunks.""" + model = _get_loaded_chat_model(catalog) + try: + client = model.get_chat_client() + client.settings.max_tokens = 50 + client.settings.temperature = 0.0 + + result = client.complete_streaming_chat([{"role": "user", "content": "Say hi."}]) + + assert result is not None + chunks = list(result) + assert len(chunks) > 0 + finally: + model.unload() \ No newline at end of file diff --git a/sdk/python/test/test_catalog.py b/sdk/python/test/test_catalog.py new file mode 100644 index 00000000..aeb39c20 --- /dev/null +++ b/sdk/python/test/test_catalog.py @@ -0,0 +1,74 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for Catalog – mirrors catalog.test.ts.""" + +from __future__ import annotations + +from .conftest import TEST_MODEL_ALIAS + + +class TestCatalog: + """Catalog Tests.""" + + def test_should_initialize_with_catalog_name(self, catalog): + """Catalog should expose a non-empty name string.""" + assert isinstance(catalog.name, str) + assert len(catalog.name) > 0 + + def test_should_list_models(self, catalog): + """list_models() should return a non-empty list containing the test model.""" + models = catalog.list_models() + assert isinstance(models, list) + assert len(models) > 0 + + # Verify test model is present + aliases = {m.alias for m in models} + assert TEST_MODEL_ALIAS in aliases + + def test_should_get_model_by_alias(self, catalog): + """get_model() should return a Model whose alias matches.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + assert model.alias == TEST_MODEL_ALIAS + + def test_should_return_none_for_empty_alias(self, catalog): + """get_model('') should return None (unknown alias).""" + result = catalog.get_model("") + assert result is None + + def test_should_return_none_for_unknown_alias(self, catalog): + """get_model() with a random alias should return None.""" + result = catalog.get_model("definitely-not-a-real-model-alias-12345") + assert result is None + + def test_should_get_cached_models(self, catalog): + """get_cached_models() should return a list with at least the test model.""" + cached = catalog.get_cached_models() + assert isinstance(cached, list) + assert len(cached) > 0 + + # At least the test model should be cached + aliases = {m.alias for m in cached} + assert TEST_MODEL_ALIAS in aliases + + def test_should_get_model_variant_by_id(self, catalog): + """get_model_variant() with a valid ID should return the variant.""" + cached = catalog.get_cached_models() + assert len(cached) > 0 + variant = cached[0] + + result = catalog.get_model_variant(variant.id) + assert result is not None + assert result.id == variant.id + + def test_should_return_none_for_empty_variant_id(self, catalog): + """get_model_variant('') should return None.""" + result = catalog.get_model_variant("") + assert result is None + + def test_should_return_none_for_unknown_variant_id(self, catalog): + """get_model_variant() with a random ID should return None.""" + result = catalog.get_model_variant("definitely-not-a-real-model-id-12345") + assert result is None diff --git a/sdk/python/test/test_foundry_local_manager.py b/sdk/python/test/test_foundry_local_manager.py new file mode 100644 index 00000000..b0a9c4e2 --- /dev/null +++ b/sdk/python/test/test_foundry_local_manager.py @@ -0,0 +1,22 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for FoundryLocalManager – mirrors foundryLocalManager.test.ts.""" + +from __future__ import annotations + + +class TestFoundryLocalManager: + """Foundry Local Manager Tests.""" + + def test_should_initialize_successfully(self, manager): + """Manager singleton should be non-None after initialize().""" + assert manager is not None + + def test_should_return_catalog(self, manager): + """Manager should expose a Catalog with a non-empty name.""" + catalog = manager.catalog + assert catalog is not None + assert isinstance(catalog.name, str) + assert len(catalog.name) > 0 diff --git a/sdk/python/test/test_model.py b/sdk/python/test/test_model.py new file mode 100644 index 00000000..54a30ef4 --- /dev/null +++ b/sdk/python/test/test_model.py @@ -0,0 +1,58 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for Model – mirrors model.test.ts.""" + +from __future__ import annotations + +from .conftest import TEST_MODEL_ALIAS, AUDIO_MODEL_ALIAS + + +class TestModel: + """Model Tests.""" + + def test_should_verify_cached_models(self, catalog): + """Cached models from test-data-shared should include qwen and whisper.""" + cached = catalog.get_cached_models() + assert isinstance(cached, list) + assert len(cached) > 0 + + # Check qwen model is cached + qwen = next((m for m in cached if m.alias == TEST_MODEL_ALIAS), None) + assert qwen is not None, f"{TEST_MODEL_ALIAS} should be cached" + assert qwen.is_cached is True + + # Check whisper model is cached + whisper = next((m for m in cached if m.alias == AUDIO_MODEL_ALIAS), None) + assert whisper is not None, f"{AUDIO_MODEL_ALIAS} should be cached" + assert whisper.is_cached is True + + def test_should_load_and_unload_model(self, catalog): + """Load/unload cycle should toggle is_loaded on the selected variant.""" + cached = catalog.get_cached_models() + assert len(cached) > 0 + + cached_variant = next((m for m in cached if m.alias == TEST_MODEL_ALIAS), None) + assert cached_variant is not None + + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + + model.select_variant(cached_variant) + + # Ensure it's not loaded initially (or unload if it is) + if model.is_loaded: + model.unload() + assert model.is_loaded is False + + try: + model.load() + assert model.is_loaded is True + + model.unload() + assert model.is_loaded is False + finally: + # Safety cleanup + if model.is_loaded: + model.unload() From b76b3eaeda4caf50a4748ddece0dfa864985e7e9 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:39:53 -0500 Subject: [PATCH 06/83] Update privacy policy link in website footer (#557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replaces the hardcoded privacy statement URL in the footer with the Microsoft short-link redirect. ## Changes - **`www/src/lib/components/home/footer.svelte`**: Updated `href` from `https://www.microsoft.com/en-us/privacy/privacystatement` → `https://go.microsoft.com/fwlink/?LinkId=521839` Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: MaanavD <24942306+MaanavD@users.noreply.github.com> --- www/src/lib/components/home/footer.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/www/src/lib/components/home/footer.svelte b/www/src/lib/components/home/footer.svelte index 44bc8df2..03a4df2d 100644 --- a/www/src/lib/components/home/footer.svelte +++ b/www/src/lib/components/home/footer.svelte @@ -111,7 +111,7 @@ © {new Date().getFullYear()} Microsoft Corporation. All rights reserved.

Date: Fri, 27 Mar 2026 15:00:18 -0500 Subject: [PATCH 07/83] Add model context capabilities (#554) SDK: add contextLength, inputModalities, outputModalities, capabilities - C# ModelInfo: add ContextLength, InputModalities, OutputModalities, Capabilities - JS ModelInfo/IModel/Model/ModelVariant: add new fields and convenience getters - Rust ModelInfo: add new fields; Model: add accessor methods --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: maanavd --- .github/workflows/build-rust-steps.yml | 2 +- .../GettingStarted/Directory.Packages.props | 4 +- sdk/cs/src/FoundryModelInfo.cs | 12 +++ sdk/js/docs/README.md | 84 +++++++++++++++++ sdk/js/docs/classes/AudioClient.md | 2 +- sdk/js/docs/classes/AudioClientSettings.md | 2 +- sdk/js/docs/classes/Catalog.md | 2 +- sdk/js/docs/classes/ChatClient.md | 2 +- sdk/js/docs/classes/ChatClientSettings.md | 2 +- sdk/js/docs/classes/FoundryLocalManager.md | 2 +- sdk/js/docs/classes/Model.md | 92 ++++++++++++++++++- sdk/js/docs/classes/ModelLoadManager.md | 2 +- sdk/js/docs/classes/ModelVariant.md | 92 ++++++++++++++++++- sdk/js/docs/classes/ResponsesClient.md | 2 +- .../docs/classes/ResponsesClientSettings.md | 2 +- sdk/js/src/imodel.ts | 6 ++ sdk/js/src/model.ts | 20 ++++ sdk/js/src/modelVariant.ts | 20 ++++ sdk/js/src/types.ts | 4 + sdk/rust/build.rs | 45 ++------- sdk/rust/src/model.rs | 25 +++++ sdk/rust/src/types.rs | 8 ++ www/.npmrc | 3 - www/package.json | 2 +- 24 files changed, 382 insertions(+), 55 deletions(-) delete mode 100644 www/.npmrc diff --git a/.github/workflows/build-rust-steps.yml b/.github/workflows/build-rust-steps.yml index 27c22da8..f007b7ee 100644 --- a/.github/workflows/build-rust-steps.yml +++ b/.github/workflows/build-rust-steps.yml @@ -28,7 +28,7 @@ jobs: working-directory: sdk/rust env: - CARGO_FEATURES: ${{ inputs.useWinML && '--features winml' || '' }} + CARGO_FEATURES: ${{ inputs.useWinML && '--features winml,nightly' || '--features nightly' }} steps: - name: Checkout repository diff --git a/samples/cs/GettingStarted/Directory.Packages.props b/samples/cs/GettingStarted/Directory.Packages.props index 2d91a9fe..02984002 100644 --- a/samples/cs/GettingStarted/Directory.Packages.props +++ b/samples/cs/GettingStarted/Directory.Packages.props @@ -5,8 +5,8 @@ 1.23.2
- - + + diff --git a/sdk/cs/src/FoundryModelInfo.cs b/sdk/cs/src/FoundryModelInfo.cs index 1f795d22..2d1327cc 100644 --- a/sdk/cs/src/FoundryModelInfo.cs +++ b/sdk/cs/src/FoundryModelInfo.cs @@ -119,4 +119,16 @@ public record ModelInfo [JsonPropertyName("createdAt")] public long CreatedAtUnix { get; init; } + + [JsonPropertyName("contextLength")] + public long? ContextLength { get; init; } + + [JsonPropertyName("inputModalities")] + public string? InputModalities { get; init; } + + [JsonPropertyName("outputModalities")] + public string? OutputModalities { get; init; } + + [JsonPropertyName("capabilities")] + public string? Capabilities { get; init; } } diff --git a/sdk/js/docs/README.md b/sdk/js/docs/README.md index dd483aa4..5e50e636 100644 --- a/sdk/js/docs/README.md +++ b/sdk/js/docs/README.md @@ -462,6 +462,30 @@ get alias(): string; `string` +##### capabilities + +###### Get Signature + +```ts +get capabilities(): string | null; +``` + +###### Returns + +`string` \| `null` + +##### contextLength + +###### Get Signature + +```ts +get contextLength(): number | null; +``` + +###### Returns + +`number` \| `null` + ##### id ###### Get Signature @@ -474,6 +498,18 @@ get id(): string; `string` +##### inputModalities + +###### Get Signature + +```ts +get inputModalities(): string | null; +``` + +###### Returns + +`string` \| `null` + ##### isCached ###### Get Signature @@ -486,6 +522,18 @@ get isCached(): boolean; `boolean` +##### outputModalities + +###### Get Signature + +```ts +get outputModalities(): string | null; +``` + +###### Returns + +`string` \| `null` + ##### path ###### Get Signature @@ -498,6 +546,18 @@ get path(): string; `string` +##### supportsToolCalling + +###### Get Signature + +```ts +get supportsToolCalling(): boolean | null; +``` + +###### Returns + +`boolean` \| `null` + #### Methods ##### createAudioClient() @@ -740,6 +800,18 @@ alias: string; cached: boolean; ``` +##### capabilities? + +```ts +optional capabilities?: string | null; +``` + +##### contextLength? + +```ts +optional contextLength?: number | null; +``` + ##### createdAtUnix ```ts @@ -764,6 +836,12 @@ optional fileSizeMb?: number | null; id: string; ``` +##### inputModalities? + +```ts +optional inputModalities?: string | null; +``` + ##### license? ```ts @@ -806,6 +884,12 @@ modelType: string; name: string; ``` +##### outputModalities? + +```ts +optional outputModalities?: string | null; +``` + ##### promptTemplate? ```ts diff --git a/sdk/js/docs/classes/AudioClient.md b/sdk/js/docs/classes/AudioClient.md index 12e79de5..e661bad0 100644 --- a/sdk/js/docs/classes/AudioClient.md +++ b/sdk/js/docs/classes/AudioClient.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / AudioClient +[foundry-local-sdk](../README.md) / AudioClient # Class: AudioClient diff --git a/sdk/js/docs/classes/AudioClientSettings.md b/sdk/js/docs/classes/AudioClientSettings.md index dae7cbbe..49e806dc 100644 --- a/sdk/js/docs/classes/AudioClientSettings.md +++ b/sdk/js/docs/classes/AudioClientSettings.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / AudioClientSettings +[foundry-local-sdk](../README.md) / AudioClientSettings # Class: AudioClientSettings diff --git a/sdk/js/docs/classes/Catalog.md b/sdk/js/docs/classes/Catalog.md index b77f254f..23f7cff3 100644 --- a/sdk/js/docs/classes/Catalog.md +++ b/sdk/js/docs/classes/Catalog.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / Catalog +[foundry-local-sdk](../README.md) / Catalog # Class: Catalog diff --git a/sdk/js/docs/classes/ChatClient.md b/sdk/js/docs/classes/ChatClient.md index c3120f0b..26cc6f0c 100644 --- a/sdk/js/docs/classes/ChatClient.md +++ b/sdk/js/docs/classes/ChatClient.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / ChatClient +[foundry-local-sdk](../README.md) / ChatClient # Class: ChatClient diff --git a/sdk/js/docs/classes/ChatClientSettings.md b/sdk/js/docs/classes/ChatClientSettings.md index 7d48bcca..323bd3ca 100644 --- a/sdk/js/docs/classes/ChatClientSettings.md +++ b/sdk/js/docs/classes/ChatClientSettings.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / ChatClientSettings +[foundry-local-sdk](../README.md) / ChatClientSettings # Class: ChatClientSettings diff --git a/sdk/js/docs/classes/FoundryLocalManager.md b/sdk/js/docs/classes/FoundryLocalManager.md index fb9a4783..63bb2dd1 100644 --- a/sdk/js/docs/classes/FoundryLocalManager.md +++ b/sdk/js/docs/classes/FoundryLocalManager.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / FoundryLocalManager +[foundry-local-sdk](../README.md) / FoundryLocalManager # Class: FoundryLocalManager diff --git a/sdk/js/docs/classes/Model.md b/sdk/js/docs/classes/Model.md index 424d673b..0b2dcfa6 100644 --- a/sdk/js/docs/classes/Model.md +++ b/sdk/js/docs/classes/Model.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / Model +[foundry-local-sdk](../README.md) / Model # Class: Model @@ -51,6 +51,42 @@ The model alias. *** +### capabilities + +#### Get Signature + +```ts +get capabilities(): string | null; +``` + +##### Returns + +`string` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`capabilities`](../README.md#capabilities) + +*** + +### contextLength + +#### Get Signature + +```ts +get contextLength(): number | null; +``` + +##### Returns + +`number` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`contextLength`](../README.md#contextlength) + +*** + ### id #### Get Signature @@ -73,6 +109,24 @@ The ID of the selected variant. *** +### inputModalities + +#### Get Signature + +```ts +get inputModalities(): string | null; +``` + +##### Returns + +`string` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`inputModalities`](../README.md#inputmodalities) + +*** + ### isCached #### Get Signature @@ -95,6 +149,24 @@ True if cached, false otherwise. *** +### outputModalities + +#### Get Signature + +```ts +get outputModalities(): string | null; +``` + +##### Returns + +`string` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`outputModalities`](../README.md#outputmodalities) + +*** + ### path #### Get Signature @@ -117,6 +189,24 @@ The local file path. *** +### supportsToolCalling + +#### Get Signature + +```ts +get supportsToolCalling(): boolean | null; +``` + +##### Returns + +`boolean` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`supportsToolCalling`](../README.md#supportstoolcalling) + +*** + ### variants #### Get Signature diff --git a/sdk/js/docs/classes/ModelLoadManager.md b/sdk/js/docs/classes/ModelLoadManager.md index f445659b..564d561f 100644 --- a/sdk/js/docs/classes/ModelLoadManager.md +++ b/sdk/js/docs/classes/ModelLoadManager.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / ModelLoadManager +[foundry-local-sdk](../README.md) / ModelLoadManager # Class: ModelLoadManager diff --git a/sdk/js/docs/classes/ModelVariant.md b/sdk/js/docs/classes/ModelVariant.md index 837ead70..6f4e5ee8 100644 --- a/sdk/js/docs/classes/ModelVariant.md +++ b/sdk/js/docs/classes/ModelVariant.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / ModelVariant +[foundry-local-sdk](../README.md) / ModelVariant # Class: ModelVariant @@ -56,6 +56,42 @@ The model alias. *** +### capabilities + +#### Get Signature + +```ts +get capabilities(): string | null; +``` + +##### Returns + +`string` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`capabilities`](../README.md#capabilities) + +*** + +### contextLength + +#### Get Signature + +```ts +get contextLength(): number | null; +``` + +##### Returns + +`number` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`contextLength`](../README.md#contextlength) + +*** + ### id #### Get Signature @@ -78,6 +114,24 @@ The model ID. *** +### inputModalities + +#### Get Signature + +```ts +get inputModalities(): string | null; +``` + +##### Returns + +`string` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`inputModalities`](../README.md#inputmodalities) + +*** + ### isCached #### Get Signature @@ -118,6 +172,24 @@ The ModelInfo object. *** +### outputModalities + +#### Get Signature + +```ts +get outputModalities(): string | null; +``` + +##### Returns + +`string` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`outputModalities`](../README.md#outputmodalities) + +*** + ### path #### Get Signature @@ -138,6 +210,24 @@ The local file path. [`IModel`](../README.md#imodel).[`path`](../README.md#path) +*** + +### supportsToolCalling + +#### Get Signature + +```ts +get supportsToolCalling(): boolean | null; +``` + +##### Returns + +`boolean` \| `null` + +#### Implementation of + +[`IModel`](../README.md#imodel).[`supportsToolCalling`](../README.md#supportstoolcalling) + ## Methods ### createAudioClient() diff --git a/sdk/js/docs/classes/ResponsesClient.md b/sdk/js/docs/classes/ResponsesClient.md index 5ee70c81..0ccd9a60 100644 --- a/sdk/js/docs/classes/ResponsesClient.md +++ b/sdk/js/docs/classes/ResponsesClient.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / ResponsesClient +[foundry-local-sdk](../README.md) / ResponsesClient # Class: ResponsesClient diff --git a/sdk/js/docs/classes/ResponsesClientSettings.md b/sdk/js/docs/classes/ResponsesClientSettings.md index 8401faf1..47dfc55e 100644 --- a/sdk/js/docs/classes/ResponsesClientSettings.md +++ b/sdk/js/docs/classes/ResponsesClientSettings.md @@ -1,4 +1,4 @@ -[@prathikrao/foundry-local-sdk](../README.md) / ResponsesClientSettings +[foundry-local-sdk](../README.md) / ResponsesClientSettings # Class: ResponsesClientSettings diff --git a/sdk/js/src/imodel.ts b/sdk/js/src/imodel.ts index be0913d6..f5b72622 100644 --- a/sdk/js/src/imodel.ts +++ b/sdk/js/src/imodel.ts @@ -8,6 +8,12 @@ export interface IModel { get isCached(): boolean; isLoaded(): Promise; + get contextLength(): number | null; + get inputModalities(): string | null; + get outputModalities(): string | null; + get capabilities(): string | null; + get supportsToolCalling(): boolean | null; + download(progressCallback?: (progress: number) => void): Promise; get path(): string; load(): Promise; diff --git a/sdk/js/src/model.ts b/sdk/js/src/model.ts index e2b37119..155d5dd1 100644 --- a/sdk/js/src/model.ts +++ b/sdk/js/src/model.ts @@ -104,6 +104,26 @@ export class Model implements IModel { return this._variants; } + public get contextLength(): number | null { + return this.selectedVariant.contextLength; + } + + public get inputModalities(): string | null { + return this.selectedVariant.inputModalities; + } + + public get outputModalities(): string | null { + return this.selectedVariant.outputModalities; + } + + public get capabilities(): string | null { + return this.selectedVariant.capabilities; + } + + public get supportsToolCalling(): boolean | null { + return this.selectedVariant.supportsToolCalling; + } + /** * Downloads the currently selected variant. * @param progressCallback - Optional callback to report download progress. diff --git a/sdk/js/src/modelVariant.ts b/sdk/js/src/modelVariant.ts index 4d3e2bee..db06033a 100644 --- a/sdk/js/src/modelVariant.ts +++ b/sdk/js/src/modelVariant.ts @@ -45,6 +45,26 @@ export class ModelVariant implements IModel { return this._modelInfo; } + public get contextLength(): number | null { + return this._modelInfo.contextLength ?? null; + } + + public get inputModalities(): string | null { + return this._modelInfo.inputModalities ?? null; + } + + public get outputModalities(): string | null { + return this._modelInfo.outputModalities ?? null; + } + + public get capabilities(): string | null { + return this._modelInfo.capabilities ?? null; + } + + public get supportsToolCalling(): boolean | null { + return this._modelInfo.supportsToolCalling ?? null; + } + /** * Checks if the model variant is cached locally. * @returns True if cached, false otherwise. diff --git a/sdk/js/src/types.ts b/sdk/js/src/types.ts index 639676de..40a9110b 100644 --- a/sdk/js/src/types.ts +++ b/sdk/js/src/types.ts @@ -50,6 +50,10 @@ export interface ModelInfo { maxOutputTokens?: number | null; minFLVersion?: string | null; createdAtUnix: number; + contextLength?: number | null; + inputModalities?: string | null; + outputModalities?: string | null; + capabilities?: string | null; } export interface ResponseFormat { diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index 0f9726d5..996eaf2a 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -9,7 +9,7 @@ const ORT_NIGHTLY_FEED: &str = const CORE_VERSION: &str = "0.9.0.8-rc3"; const ORT_VERSION: &str = "1.24.3"; -const GENAI_VERSION: &str = "0.12.2"; +const GENAI_VERSION: &str = "0.13.0-dev-20260319-1131106-439ca0d5"; const WINML_ORT_VERSION: &str = "1.23.2.3"; @@ -42,29 +42,18 @@ fn native_lib_extension() -> &'static str { fn get_packages(rid: &str) -> Vec { let winml = env::var("CARGO_FEATURE_WINML").is_ok(); - let nightly = env::var("CARGO_FEATURE_NIGHTLY").is_ok(); let is_linux = rid.starts_with("linux"); - let core_version = if nightly { - resolve_latest_version("Microsoft.AI.Foundry.Local.Core", ORT_NIGHTLY_FEED) - .unwrap_or_else(|| CORE_VERSION.to_string()) - } else { - CORE_VERSION.to_string() - }; + // Use pinned versions directly — dynamic resolution via resolve_latest_version + // is unreliable (feed returns versions in unexpected order, and some old versions + // require authentication). let mut packages = Vec::new(); if winml { - let winml_core_version = if nightly { - resolve_latest_version("Microsoft.AI.Foundry.Local.Core.WinML", ORT_NIGHTLY_FEED) - .unwrap_or_else(|| CORE_VERSION.to_string()) - } else { - CORE_VERSION.to_string() - }; - packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core.WinML", - version: winml_core_version, + version: CORE_VERSION.to_string(), feed_url: ORT_NIGHTLY_FEED, }); packages.push(NuGetPackage { @@ -75,12 +64,12 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.WinML", version: GENAI_VERSION.to_string(), - feed_url: NUGET_FEED, + feed_url: ORT_NIGHTLY_FEED, }); } else { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core", - version: core_version, + version: CORE_VERSION.to_string(), feed_url: ORT_NIGHTLY_FEED, }); @@ -101,7 +90,7 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: GENAI_VERSION.to_string(), - feed_url: NUGET_FEED, + feed_url: ORT_NIGHTLY_FEED, }); } @@ -143,24 +132,6 @@ fn resolve_base_address(feed_url: &str) -> Result { )) } -/// Resolve the latest version of a package from a NuGet feed. -fn resolve_latest_version(package_name: &str, feed_url: &str) -> Option { - let base_address = resolve_base_address(feed_url).ok()?; - let lower_name = package_name.to_lowercase(); - let index_url = format!("{base_address}{lower_name}/index.json"); - - let body: String = ureq::get(&index_url) - .call() - .ok()? - .body_mut() - .read_to_string() - .ok()?; - - let index: serde_json::Value = serde_json::from_str(&body).ok()?; - let versions = index["versions"].as_array()?; - versions.last()?.as_str().map(|s| s.to_string()) -} - /// Download a .nupkg and extract native libraries for the given RID into `out_dir`. fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result<(), String> { let base_address = resolve_base_address(pkg.feed_url)?; diff --git a/sdk/rust/src/model.rs b/sdk/rust/src/model.rs index 4a197e3f..50c1fe1a 100644 --- a/sdk/rust/src/model.rs +++ b/sdk/rust/src/model.rs @@ -113,6 +113,31 @@ impl Model { self.selected_variant().is_loaded().await } + /// Context length (maximum input tokens) of the selected variant. + pub fn context_length(&self) -> Option { + self.selected_variant().info().context_length + } + + /// Input modalities of the selected variant (e.g. "text", "text,image"). + pub fn input_modalities(&self) -> Option<&str> { + self.selected_variant().info().input_modalities.as_deref() + } + + /// Output modalities of the selected variant (e.g. "text"). + pub fn output_modalities(&self) -> Option<&str> { + self.selected_variant().info().output_modalities.as_deref() + } + + /// Capabilities of the selected variant (e.g. "reasoning", "tool-calling"). + pub fn capabilities(&self) -> Option<&str> { + self.selected_variant().info().capabilities.as_deref() + } + + /// Whether the selected variant supports tool calling. + pub fn supports_tool_calling(&self) -> Option { + self.selected_variant().info().supports_tool_calling + } + /// Download the selected variant. If `progress` is provided, it receives /// human-readable progress strings as they arrive from the native core. pub async fn download(&self, progress: Option) -> Result<()> diff --git a/sdk/rust/src/types.rs b/sdk/rust/src/types.rs index d1d1f002..bab2f9c8 100644 --- a/sdk/rust/src/types.rs +++ b/sdk/rust/src/types.rs @@ -87,6 +87,14 @@ pub struct ModelInfo { pub min_fl_version: Option, #[serde(default)] pub created_at_unix: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub context_length: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub input_modalities: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub output_modalities: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub capabilities: Option, } /// Desired response format for chat completions. diff --git a/www/.npmrc b/www/.npmrc deleted file mode 100644 index 06fe7275..00000000 --- a/www/.npmrc +++ /dev/null @@ -1,3 +0,0 @@ -registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ -always-auth=true -engine-strict=true diff --git a/www/package.json b/www/package.json index 8a311947..5454236d 100644 --- a/www/package.json +++ b/www/package.json @@ -12,7 +12,7 @@ }, "license": "MIT", "engines": { - "node": ">=22.0.0", + "node": ">=22.0.0 <23.0.0", "npm": ">=9.0.0" }, "scripts": { From d731c6d6f612ab1122f8dbee4a54c19bd1f3dff8 Mon Sep 17 00:00:00 2001 From: Nenad Banfic <46795300+nenad1002@users.noreply.github.com> Date: Mon, 30 Mar 2026 04:05:54 -0700 Subject: [PATCH 08/83] Rust bug fixes & changes (#560) Part 1 of Rust changes (have part 2 but don't have time to test it now). This is mostly improving perf by reducing cloning and fixing some bugs + making code more readable (avoiding early returns). --- sdk/rust/docs/api.md | 2 +- sdk/rust/src/catalog.rs | 15 ++++---- sdk/rust/src/configuration.rs | 39 ++++++++----------- sdk/rust/src/detail/core_interop.rs | 47 +++++++++++++++-------- sdk/rust/src/detail/model_load_manager.rs | 34 ++++++++-------- sdk/rust/src/model.rs | 34 ++++++++-------- sdk/rust/src/model_variant.rs | 4 +- sdk/rust/src/openai/audio_client.rs | 4 +- sdk/rust/src/openai/chat_client.rs | 4 +- 9 files changed, 98 insertions(+), 85 deletions(-) diff --git a/sdk/rust/docs/api.md b/sdk/rust/docs/api.md index bdc86974..278402fb 100644 --- a/sdk/rust/docs/api.md +++ b/sdk/rust/docs/api.md @@ -149,7 +149,7 @@ pub struct Model { /* private fields */ } |--------|-----------|-------------| | `alias` | `fn alias(&self) -> &str` | Alias shared by all variants. | | `id` | `fn id(&self) -> &str` | Unique identifier of the selected variant. | -| `variants` | `fn variants(&self) -> &[ModelVariant]` | All variants in this model. | +| `variants` | `fn variants(&self) -> &[Arc]` | All variants in this model. | | `selected_variant` | `fn selected_variant(&self) -> &ModelVariant` | Currently selected variant. | | `select_variant` | `fn select_variant(&self, id: &str) -> Result<(), FoundryLocalError>` | Select a variant by id. | | `is_cached` | `async fn is_cached(&self) -> Result` | Whether the selected variant is cached on disk. | diff --git a/sdk/rust/src/catalog.rs b/sdk/rust/src/catalog.rs index 78485bff..9e04c943 100644 --- a/sdk/rust/src/catalog.rs +++ b/sdk/rust/src/catalog.rs @@ -135,7 +135,7 @@ impl Catalog { self.update_models().await?; let s = self.lock_state()?; s.models_by_alias.get(alias).cloned().ok_or_else(|| { - let available: Vec<&String> = s.models_by_alias.keys().collect(); + let available: Vec<&str> = s.models_by_alias.keys().map(|k| k.as_str()).collect(); FoundryLocalError::ModelOperation { reason: format!("Unknown model alias '{alias}'. Available: {available:?}"), } @@ -152,7 +152,7 @@ impl Catalog { self.update_models().await?; let s = self.lock_state()?; s.variants_by_id.get(id).cloned().ok_or_else(|| { - let available: Vec<&String> = s.variants_by_id.keys().collect(); + let available: Vec<&str> = s.variants_by_id.keys().map(|k| k.as_str()).collect(); FoundryLocalError::ModelOperation { reason: format!("Unknown variant id '{id}'. Available: {available:?}"), } @@ -216,18 +216,17 @@ impl Catalog { for info in infos { let id = info.id.clone(); let alias = info.alias.clone(); - let variant = ModelVariant::new( + let variant = Arc::new(ModelVariant::new( info, Arc::clone(&self.core), Arc::clone(&self.model_load_manager), self.invalidator.clone(), - ); - let variant_arc = Arc::new(variant.clone()); - id_map.insert(id, variant_arc); + )); + id_map.insert(id, Arc::clone(&variant)); alias_map_build - .entry(alias.clone()) - .or_insert_with(|| Model::new(alias, Arc::clone(&self.core))) + .entry(alias) + .or_insert_with_key(|a| Model::new(a.clone(), Arc::clone(&self.core))) .add_variant(variant); } diff --git a/sdk/rust/src/configuration.rs b/sdk/rust/src/configuration.rs index d23d5986..c1ec2964 100644 --- a/sdk/rust/src/configuration.rs +++ b/sdk/rust/src/configuration.rs @@ -183,31 +183,24 @@ impl Configuration { let mut params = HashMap::new(); params.insert("AppName".into(), app_name); - if let Some(v) = config.app_data_dir { - params.insert("AppDataDir".into(), v); - } - if let Some(v) = config.model_cache_dir { - params.insert("ModelCacheDir".into(), v); - } - if let Some(v) = config.logs_dir { - params.insert("LogsDir".into(), v); - } - if let Some(level) = config.log_level { - params.insert("LogLevel".into(), level.as_core_str().into()); - } - if let Some(v) = config.web_service_urls { - params.insert("WebServiceUrls".into(), v); - } - if let Some(v) = config.service_endpoint { - params.insert("WebServiceExternalUrl".into(), v); - } - if let Some(v) = config.library_path { - params.insert("FoundryLocalCorePath".into(), v); + let optional_fields = [ + ("AppDataDir", config.app_data_dir), + ("ModelCacheDir", config.model_cache_dir), + ("LogsDir", config.logs_dir), + ("LogLevel", config.log_level.map(|l| l.as_core_str().into())), + ("WebServiceUrls", config.web_service_urls), + ("WebServiceExternalUrl", config.service_endpoint), + ("FoundryLocalCorePath", config.library_path), + ]; + + for (key, value) in optional_fields { + if let Some(v) = value { + params.insert(key.into(), v); + } } + if let Some(extra) = config.additional_settings { - for (k, v) in extra { - params.insert(k, v); - } + params.extend(extra); } Ok((Self { params }, config.logger)) diff --git a/sdk/rust/src/detail/core_interop.rs b/sdk/rust/src/detail/core_interop.rs index e69a6e98..75146164 100644 --- a/sdk/rust/src/detail/core_interop.rs +++ b/sdk/rust/src/detail/core_interop.rs @@ -137,25 +137,42 @@ impl<'a> StreamingCallbackState<'a> { /// Append raw bytes, decode as much valid UTF-8 as possible, and forward /// complete text to the callback. Any trailing incomplete multi-byte - /// sequence is kept in the buffer for the next call. + /// sequence is kept in the buffer for the next call. Invalid byte + /// sequences are skipped to prevent the buffer from growing unboundedly. fn push(&mut self, bytes: &[u8]) { self.buf.extend_from_slice(bytes); - let valid_up_to = match std::str::from_utf8(&self.buf) { - Ok(s) => { - (self.callback)(s); - s.len() - } - Err(e) => { - let n = e.valid_up_to(); - if n > 0 { - // SAFETY: `valid_up_to` guarantees this prefix is valid UTF-8. - let valid = unsafe { std::str::from_utf8_unchecked(&self.buf[..n]) }; - (self.callback)(valid); + loop { + match std::str::from_utf8(&self.buf) { + Ok(s) => { + if !s.is_empty() { + (self.callback)(s); + } + self.buf.clear(); + break; + } + Err(e) => { + let n = e.valid_up_to(); + if n > 0 { + // SAFETY: `valid_up_to` guarantees this prefix is valid UTF-8. + let valid = unsafe { std::str::from_utf8_unchecked(&self.buf[..n]) }; + (self.callback)(valid); + } + match e.error_len() { + Some(err_len) => { + // Definite invalid sequence — skip past it and + // continue decoding the remainder. + self.buf.drain(..n + err_len); + } + None => { + // Incomplete multi-byte sequence at the end — + // keep it for the next push. + self.buf.drain(..n); + break; + } + } } - n } - }; - self.buf.drain(..valid_up_to); + } } /// Flush any remaining bytes as lossy UTF-8 (called once after the native diff --git a/sdk/rust/src/detail/model_load_manager.rs b/sdk/rust/src/detail/model_load_manager.rs index 41507cbd..57eb3cfb 100644 --- a/sdk/rust/src/detail/model_load_manager.rs +++ b/sdk/rust/src/detail/model_load_manager.rs @@ -34,12 +34,12 @@ impl ModelLoadManager { let encoded_id = urlencoding::encode(model_id); self.http_get(&format!("{base_url}/models/load/{encoded_id}")) .await?; - return Ok(()); + } else { + let params = json!({ "Params": { "Model": model_id } }); + self.core + .execute_command_async("load_model".into(), Some(params)) + .await?; } - let params = json!({ "Params": { "Model": model_id } }); - self.core - .execute_command_async("load_model".into(), Some(params)) - .await?; Ok(()) } @@ -47,14 +47,14 @@ impl ModelLoadManager { pub async fn unload(&self, model_id: &str) -> Result { if let Some(base_url) = &self.external_service_url { let encoded_id = urlencoding::encode(model_id); - return self - .http_get(&format!("{base_url}/models/unload/{encoded_id}")) - .await; + self.http_get(&format!("{base_url}/models/unload/{encoded_id}")) + .await + } else { + let params = json!({ "Params": { "Model": model_id } }); + self.core + .execute_command_async("unload_model".into(), Some(params)) + .await } - let params = json!({ "Params": { "Model": model_id } }); - self.core - .execute_command_async("unload_model".into(), Some(params)) - .await } /// Return the list of currently loaded model identifiers. @@ -67,11 +67,11 @@ impl ModelLoadManager { .await? }; - if raw.trim().is_empty() { - return Ok(Vec::new()); - } - - let ids: Vec = serde_json::from_str(&raw)?; + let ids: Vec = if raw.trim().is_empty() { + Vec::new() + } else { + serde_json::from_str(&raw)? + }; Ok(ids) } diff --git a/sdk/rust/src/model.rs b/sdk/rust/src/model.rs index 50c1fe1a..9d08f9a5 100644 --- a/sdk/rust/src/model.rs +++ b/sdk/rust/src/model.rs @@ -19,7 +19,7 @@ use crate::openai::ChatClient; pub struct Model { alias: String, core: Arc, - variants: Vec, + variants: Vec>, selected_index: AtomicUsize, } @@ -57,7 +57,7 @@ impl Model { /// Add a variant. If the new variant is cached and the current selection /// is not, the new variant becomes the selected one. - pub(crate) fn add_variant(&mut self, variant: ModelVariant) { + pub(crate) fn add_variant(&mut self, variant: Arc) { self.variants.push(variant); let new_idx = self.variants.len() - 1; let current = self.selected_index.load(Relaxed); @@ -70,17 +70,21 @@ impl Model { /// Select a variant by its unique id. pub fn select_variant(&self, id: &str) -> Result<()> { - if let Some(pos) = self.variants.iter().position(|v| v.id() == id) { - self.selected_index.store(pos, Relaxed); - return Ok(()); + match self.variants.iter().position(|v| v.id() == id) { + Some(pos) => { + self.selected_index.store(pos, Relaxed); + Ok(()) + } + None => { + let available: Vec<&str> = self.variants.iter().map(|v| v.id()).collect(); + Err(FoundryLocalError::ModelOperation { + reason: format!( + "Variant '{id}' not found for model '{}'. Available: {available:?}", + self.alias + ), + }) + } } - let available: Vec = self.variants.iter().map(|v| v.id().to_string()).collect(); - Err(FoundryLocalError::ModelOperation { - reason: format!( - "Variant '{id}' not found for model '{}'. Available: {available:?}", - self.alias - ), - }) } /// Returns a reference to the currently selected variant. @@ -89,7 +93,7 @@ impl Model { } /// Returns all variants that belong to this model. - pub fn variants(&self) -> &[ModelVariant] { + pub fn variants(&self) -> &[Arc] { &self.variants } @@ -169,11 +173,11 @@ impl Model { /// Create a [`ChatClient`] bound to the selected variant. pub fn create_chat_client(&self) -> ChatClient { - ChatClient::new(self.id().to_string(), Arc::clone(&self.core)) + ChatClient::new(self.id(), Arc::clone(&self.core)) } /// Create an [`AudioClient`] bound to the selected variant. pub fn create_audio_client(&self) -> AudioClient { - AudioClient::new(self.id().to_string(), Arc::clone(&self.core)) + AudioClient::new(self.id(), Arc::clone(&self.core)) } } diff --git a/sdk/rust/src/model_variant.rs b/sdk/rust/src/model_variant.rs index c4be6822..760306f6 100644 --- a/sdk/rust/src/model_variant.rs +++ b/sdk/rust/src/model_variant.rs @@ -143,11 +143,11 @@ impl ModelVariant { /// Create a [`ChatClient`] bound to this variant. pub fn create_chat_client(&self) -> ChatClient { - ChatClient::new(self.info.id.clone(), Arc::clone(&self.core)) + ChatClient::new(&self.info.id, Arc::clone(&self.core)) } /// Create an [`AudioClient`] bound to this variant. pub fn create_audio_client(&self) -> AudioClient { - AudioClient::new(self.info.id.clone(), Arc::clone(&self.core)) + AudioClient::new(&self.info.id, Arc::clone(&self.core)) } } diff --git a/sdk/rust/src/openai/audio_client.rs b/sdk/rust/src/openai/audio_client.rs index da0f9f5b..0319da38 100644 --- a/sdk/rust/src/openai/audio_client.rs +++ b/sdk/rust/src/openai/audio_client.rs @@ -116,9 +116,9 @@ pub struct AudioClient { } impl AudioClient { - pub(crate) fn new(model_id: String, core: Arc) -> Self { + pub(crate) fn new(model_id: &str, core: Arc) -> Self { Self { - model_id, + model_id: model_id.to_owned(), core, settings: AudioClientSettings::default(), } diff --git a/sdk/rust/src/openai/chat_client.rs b/sdk/rust/src/openai/chat_client.rs index 62d0be5b..6597de82 100644 --- a/sdk/rust/src/openai/chat_client.rs +++ b/sdk/rust/src/openai/chat_client.rs @@ -132,9 +132,9 @@ pub struct ChatClient { } impl ChatClient { - pub(crate) fn new(model_id: String, core: Arc) -> Self { + pub(crate) fn new(model_id: &str, core: Arc) -> Self { Self { - model_id, + model_id: model_id.to_owned(), core, settings: ChatClientSettings::default(), } From 9b576e3810cc7744ac2a57accd6e450e5e04bee8 Mon Sep 17 00:00:00 2001 From: bmehta001 Date: Mon, 30 Mar 2026 18:47:39 -0500 Subject: [PATCH 09/83] Add model context capabilities to Python (#564) Python SDK: add contextLength, inputModalities, outputModalities, capabilities; also added tests for these fields --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/python/README.md | 25 +++++++++++++++++++ sdk/python/src/detail/model_data_types.py | 4 +++ sdk/python/src/imodel.py | 30 +++++++++++++++++++++++ sdk/python/src/model.py | 25 +++++++++++++++++++ sdk/python/src/model_variant.py | 25 +++++++++++++++++++ sdk/python/test/test_model.py | 30 +++++++++++++++++++++++ 6 files changed, 139 insertions(+) diff --git a/sdk/python/README.md b/sdk/python/README.md index 7cc8b44c..ace19bac 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -142,6 +142,31 @@ cached = catalog.get_cached_models() loaded = catalog.get_loaded_models() ``` +### Inspecting Model Metadata + +`Model` exposes metadata properties from the catalog: + +```python +model = catalog.get_model("phi-3.5-mini") + +# Identity +print(model.id) # e.g. "phi-3.5-mini-instruct-generic-gpu:3" +print(model.alias) # e.g. "phi-3.5-mini" + +# Context and token limits +print(model.context_length) # e.g. 131072 (tokens), or None if unknown + +# Modalities and capabilities +print(model.input_modalities) # e.g. "text" or "text,image" +print(model.output_modalities) # e.g. "text" +print(model.capabilities) # e.g. "chat,completion" +print(model.supports_tool_calling) # True, False, or None + +# Cache / load state +print(model.is_cached) +print(model.is_loaded) +``` + ### Loading and Running a Model ```python diff --git a/sdk/python/src/detail/model_data_types.py b/sdk/python/src/detail/model_data_types.py index b8b9e8d6..df367b44 100644 --- a/sdk/python/src/detail/model_data_types.py +++ b/sdk/python/src/detail/model_data_types.py @@ -74,3 +74,7 @@ class ModelInfo(BaseModel): max_output_tokens: Optional[int] = Field(alias="maxOutputTokens") min_fl_version: Optional[str] = Field(alias="minFLVersion") created_at_unix: int = Field(alias="createdAt") + context_length: Optional[int] = Field(alias="contextLength") + input_modalities: Optional[str] = Field(alias="inputModalities") + output_modalities: Optional[str] = Field(alias="outputModalities") + capabilities: Optional[str] = Field(alias="capabilities") diff --git a/sdk/python/src/imodel.py b/sdk/python/src/imodel.py index a092b98e..7f83d1cc 100644 --- a/sdk/python/src/imodel.py +++ b/sdk/python/src/imodel.py @@ -37,6 +37,36 @@ def is_loaded(self) -> bool: """True if the model is loaded into memory.""" pass + @property + @abstractmethod + def context_length(self) -> Optional[int]: + """Maximum context length (in tokens) supported by the model, or ``None`` if unknown.""" + pass + + @property + @abstractmethod + def input_modalities(self) -> Optional[str]: + """Comma-separated input modalities (e.g. ``"text,image"``), or ``None`` if unknown.""" + pass + + @property + @abstractmethod + def output_modalities(self) -> Optional[str]: + """Comma-separated output modalities (e.g. ``"text"``), or ``None`` if unknown.""" + pass + + @property + @abstractmethod + def capabilities(self) -> Optional[str]: + """Comma-separated capability tags (e.g. ``"chat,completion"``), or ``None`` if unknown.""" + pass + + @property + @abstractmethod + def supports_tool_calling(self) -> Optional[bool]: + """Whether the model supports tool/function calling, or ``None`` if unknown.""" + pass + @abstractmethod def download(self, progress_callback: Callable[[float], None] = None) -> None: """ diff --git a/sdk/python/src/model.py b/sdk/python/src/model.py index 4c8750ca..f964a820 100644 --- a/sdk/python/src/model.py +++ b/sdk/python/src/model.py @@ -94,6 +94,31 @@ def alias(self) -> str: """Alias of this model.""" return self._alias + @property + def context_length(self) -> Optional[int]: + """Maximum context length (in tokens) of the currently selected variant.""" + return self._selected_variant.context_length + + @property + def input_modalities(self) -> Optional[str]: + """Comma-separated input modalities of the currently selected variant.""" + return self._selected_variant.input_modalities + + @property + def output_modalities(self) -> Optional[str]: + """Comma-separated output modalities of the currently selected variant.""" + return self._selected_variant.output_modalities + + @property + def capabilities(self) -> Optional[str]: + """Comma-separated capability tags of the currently selected variant.""" + return self._selected_variant.capabilities + + @property + def supports_tool_calling(self) -> Optional[bool]: + """Whether the currently selected variant supports tool/function calling.""" + return self._selected_variant.supports_tool_calling + @property def is_cached(self) -> bool: """Is the currently selected variant cached locally?""" diff --git a/sdk/python/src/model_variant.py b/sdk/python/src/model_variant.py index f0d40109..1c7ad717 100644 --- a/sdk/python/src/model_variant.py +++ b/sdk/python/src/model_variant.py @@ -57,6 +57,31 @@ def info(self) -> ModelInfo: """Full catalog metadata for this variant.""" return self._model_info + @property + def context_length(self) -> Optional[int]: + """Maximum context length (in tokens) supported by this variant, or ``None`` if unknown.""" + return self._model_info.context_length + + @property + def input_modalities(self) -> Optional[str]: + """Comma-separated input modalities (e.g. ``"text,image"``), or ``None`` if unknown.""" + return self._model_info.input_modalities + + @property + def output_modalities(self) -> Optional[str]: + """Comma-separated output modalities (e.g. ``"text"``), or ``None`` if unknown.""" + return self._model_info.output_modalities + + @property + def capabilities(self) -> Optional[str]: + """Comma-separated capability tags (e.g. ``"chat,completion"``), or ``None`` if unknown.""" + return self._model_info.capabilities + + @property + def supports_tool_calling(self) -> Optional[bool]: + """Whether this variant supports tool/function calling, or ``None`` if unknown.""" + return self._model_info.supports_tool_calling + @property def is_cached(self) -> bool: """``True`` if this variant is present in the local model cache.""" diff --git a/sdk/python/test/test_model.py b/sdk/python/test/test_model.py index 54a30ef4..e2ea1509 100644 --- a/sdk/python/test/test_model.py +++ b/sdk/python/test/test_model.py @@ -56,3 +56,33 @@ def test_should_load_and_unload_model(self, catalog): # Safety cleanup if model.is_loaded: model.unload() + + def test_should_expose_context_length(self, catalog): + """Model should expose context_length from ModelInfo metadata.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + # context_length should be None or a positive integer + ctx = model.context_length + assert ctx is None or (isinstance(ctx, int) and ctx > 0) + + def test_should_expose_modalities(self, catalog): + """Model should expose input_modalities and output_modalities.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + # Modalities should be None or non-empty strings + for val in (model.input_modalities, model.output_modalities): + assert val is None or (isinstance(val, str) and len(val) > 0) + + def test_should_expose_capabilities(self, catalog): + """Model should expose capabilities metadata.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + caps = model.capabilities + assert caps is None or (isinstance(caps, str) and len(caps) > 0) + + def test_should_expose_supports_tool_calling(self, catalog): + """Model should expose supports_tool_calling metadata.""" + model = catalog.get_model(TEST_MODEL_ALIAS) + assert model is not None + stc = model.supports_tool_calling + assert stc is None or isinstance(stc, bool) From d1a9e3c409d76bf44c3548ca2bd6e6709fe841c3 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Mon, 30 Mar 2026 22:29:24 -0700 Subject: [PATCH 10/83] Add live audio transcription streaming support to Foundry Local C# SDK (#485) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Here's the cleaned version: --- ### Description: Adds real-time audio streaming support to the Foundry Local C# SDK, enabling live microphone-to-text transcription via ONNX Runtime GenAI's StreamingProcessor API (Nemotron ASR). The existing `OpenAIAudioClient` only supports file-based transcription. This PR introduces `LiveAudioTranscriptionSession` that accepts continuous PCM audio chunks (e.g., from a microphone) and returns partial/final transcription results as an async stream. ### What's included **New files** - `src/OpenAI/LiveAudioTranscriptionClient.cs` — Streaming session with `StartAsync()`, `AppendAsync()`, `GetTranscriptionStream()`, `StopAsync()` - `src/OpenAI/LiveAudioTranscriptionTypes.cs` — `LiveAudioTranscriptionResponse` (extends `AudioCreateTranscriptionResponse`) and `CoreErrorResponse` types - `test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs` — Unit tests for deserialization, settings, state guards **Modified files** - `src/OpenAI/AudioClient.cs` — Added `CreateLiveTranscriptionSession()` factory method - `src/Detail/ICoreInterop.cs` — Added `StreamingRequestBuffer` struct, `StartAudioStream`, `PushAudioData`, `StopAudioStream` interface methods - `src/Detail/CoreInterop.cs` — Routes audio commands through existing `execute_command` / `execute_command_with_binary` native entry points - `src/Detail/JsonSerializationContext.cs` — Registered `LiveAudioTranscriptionResponse` for AOT compatibility - README.md — Added live audio transcription documentation ### API surface ```csharp var audioClient = await model.GetAudioClientAsync(); var session = audioClient.CreateLiveTranscriptionSession(); session.Settings.SampleRate = 16000; session.Settings.Channels = 1; session.Settings.Language = "en"; await session.StartAsync(); // Push audio from microphone callback await session.AppendAsync(pcmBytes); // Read results as async stream await foreach (var result in session.GetTranscriptionStream()) { Console.Write(result.Text); } await session.StopAsync(); ``` ### Design highlights - **Output type alignment** — `LiveAudioTranscriptionResponse` extends `AudioCreateTranscriptionResponse` for consistent output format with file-based transcription - **Internal push queue** — Bounded `Channel` serializes audio pushes from any thread (safe for mic callbacks) with backpressure - **Fail-fast on errors** — Push loop terminates immediately on any native error (no retry logic) - **Settings freeze** — Audio format settings are snapshot-copied at `StartAsync()` and immutable during the session - **Cancellation-safe stop** — `StopAsync` always calls native stop even if cancelled, preventing native session leaks - **Dedicated session CTS** — Push loop uses its own `CancellationTokenSource`, decoupled from the caller's token - **Routes through existing exports** — `StartAudioStream` and `StopAudioStream` route through `execute_command`; `PushAudioData` routes through `execute_command_with_binary` — no new native entry points required ### Core integration (neutron-server) The Core side (AudioStreamingSession.cs) uses `StreamingProcessor` + `Generator` + `Tokenizer` + `TokenizerStream` from onnxruntime-genai to perform real-time RNNT decoding. The native commands (`audio_stream_start`/`push`/`stop`) are handled as cases in `NativeInterop.ExecuteCommandManaged` / `ExecuteCommandWithBinaryManaged`. ### Verified working - ✅ SDK build succeeds (0 errors, 0 warnings) - ✅ Unit tests for JSON deserialization, type inheritance, settings, state guards - ✅ GenAI `StreamingProcessor` pipeline verified with WAV file (correct transcript) - ✅ Core `TranscribeChunk` byte[] PCM path matches reference float[] path exactly - ✅ Full E2E simulation: SDK Channel + JSON serialization + session management - ✅ Live microphone test: real-time transcription through SDK → Core → GenAI --------- Co-authored-by: ruiren_microsoft Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com> Co-authored-by: Kunal Vaishnavi --- .../GettingStarted/Directory.Packages.props | 2 +- .../LiveAudioTranscriptionExample.csproj | 32 ++ .../LiveAudioTranscriptionExample/Program.cs | 106 +++++ .../LiveAudioTranscriptionExample.csproj | 30 ++ sdk/cs/README.md | 59 +++ sdk/cs/src/Detail/CoreInterop.cs | 115 ++++++ sdk/cs/src/Detail/ICoreInterop.cs | 17 + sdk/cs/src/Detail/JsonSerializationContext.cs | 4 + sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 4 +- sdk/cs/src/OpenAI/AudioClient.cs | 11 +- .../OpenAI/LiveAudioTranscriptionClient.cs | 385 ++++++++++++++++++ .../src/OpenAI/LiveAudioTranscriptionTypes.cs | 105 +++++ .../LiveAudioTranscriptionTests.cs | 267 ++++++++++++ sdk/cs/test/FoundryLocal.Tests/ModelTests.cs | 2 +- 14 files changed, 1134 insertions(+), 5 deletions(-) create mode 100644 samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj create mode 100644 samples/cs/GettingStarted/src/LiveAudioTranscriptionExample/Program.cs create mode 100644 samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj create mode 100644 sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs create mode 100644 sdk/cs/src/OpenAI/LiveAudioTranscriptionTypes.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs diff --git a/samples/cs/GettingStarted/Directory.Packages.props b/samples/cs/GettingStarted/Directory.Packages.props index 02984002..efd388a6 100644 --- a/samples/cs/GettingStarted/Directory.Packages.props +++ b/samples/cs/GettingStarted/Directory.Packages.props @@ -1,7 +1,7 @@ true - 0.12.1 + 0.13.0-dev-20260319-1131106-439ca0d51 1.23.2 diff --git a/samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj b/samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj new file mode 100644 index 00000000..ad6086f5 --- /dev/null +++ b/samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj @@ -0,0 +1,32 @@ + + + + Exe + net9.0 + enable + enable + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/GettingStarted/src/LiveAudioTranscriptionExample/Program.cs b/samples/cs/GettingStarted/src/LiveAudioTranscriptionExample/Program.cs new file mode 100644 index 00000000..68bba83f --- /dev/null +++ b/samples/cs/GettingStarted/src/LiveAudioTranscriptionExample/Program.cs @@ -0,0 +1,106 @@ +// Live Audio Transcription — Foundry Local SDK Example +// +// Demonstrates real-time microphone-to-text using: +// SDK (FoundryLocalManager) → Core (NativeAOT DLL) → onnxruntime-genai (StreamingProcessor) + +using Microsoft.AI.Foundry.Local; +using NAudio.Wave; + +Console.WriteLine("==========================================================="); +Console.WriteLine(" Foundry Local -- Live Audio Transcription Demo"); +Console.WriteLine("==========================================================="); +Console.WriteLine(); + +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +await FoundryLocalManager.CreateAsync(config, Utils.GetAppLogger()); +var mgr = FoundryLocalManager.Instance; + +await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); + +var catalog = await mgr.GetCatalogAsync(); + +var model = await catalog.GetModelAsync("nemotron") ?? throw new Exception("Model \"nemotron\" not found in catalog"); + +await model.DownloadAsync(progress => +{ + Console.Write($"\rDownloading model: {progress:F2}%"); + if (progress >= 100f) + { + Console.WriteLine(); + } +}); + +Console.Write($"Loading model {model.Id}..."); +await model.LoadAsync(); +Console.WriteLine("done."); + +var audioClient = await model.GetAudioClientAsync(); +var session = audioClient.CreateLiveTranscriptionSession(); +session.Settings.SampleRate = 16000; // Default is 16000; shown here to match the NAudio WaveFormat below +session.Settings.Channels = 1; +session.Settings.Language = "en"; + +await session.StartAsync(); +Console.WriteLine(" Session started"); + +var readTask = Task.Run(async () => +{ + try + { + await foreach (var result in session.GetTranscriptionStream()) + { + var text = result.Content?[0]?.Text; + if (result.IsFinal) + { + Console.WriteLine(); + Console.WriteLine($" [FINAL] {text}"); + Console.Out.Flush(); + } + else if (!string.IsNullOrEmpty(text)) + { + Console.ForegroundColor = ConsoleColor.Cyan; + Console.Write(text); + Console.ResetColor(); + Console.Out.Flush(); + } + } + } + catch (OperationCanceledException) { } +}); + +using var waveIn = new WaveInEvent +{ + WaveFormat = new WaveFormat(rate: 16000, bits: 16, channels: 1), + BufferMilliseconds = 100 +}; + +waveIn.DataAvailable += (sender, e) => +{ + if (e.BytesRecorded > 0) + { + _ = session.AppendAsync(new ReadOnlyMemory(e.Buffer, 0, e.BytesRecorded)); + } +}; + +Console.WriteLine(); +Console.WriteLine("==========================================================="); +Console.WriteLine(" LIVE TRANSCRIPTION ACTIVE"); +Console.WriteLine(" Speak into your microphone."); +Console.WriteLine(" Transcription appears in real-time (cyan text)."); +Console.WriteLine(" Press ENTER to stop recording."); +Console.WriteLine("==========================================================="); +Console.WriteLine(); + +waveIn.StartRecording(); +Console.ReadLine(); +waveIn.StopRecording(); + +await session.StopAsync(); +await readTask; + +await model.UnloadAsync(); diff --git a/samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj b/samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj new file mode 100644 index 00000000..b4489af2 --- /dev/null +++ b/samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj @@ -0,0 +1,30 @@ + + + + Exe + enable + enable + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + diff --git a/sdk/cs/README.md b/sdk/cs/README.md index f58e41e0..92ad34b7 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -233,6 +233,63 @@ audioClient.Settings.Language = "en"; audioClient.Settings.Temperature = 0.0f; ``` +### Live Audio Transcription (Real-Time Streaming) + +For real-time microphone-to-text transcription, use `CreateLiveTranscriptionSession()`. Audio is pushed as raw PCM chunks and transcription results stream back as an `IAsyncEnumerable`. + +The streaming result type (`LiveAudioTranscriptionResponse`) extends `ConversationItem` from the Betalgo OpenAI SDK's Realtime models, so it's compatible with the OpenAI Realtime API pattern. Access transcribed text via `result.Content[0].Text` or `result.Content[0].Transcript`. + +```csharp +var audioClient = await model.GetAudioClientAsync(); +var session = audioClient.CreateLiveTranscriptionSession(); + +// Configure audio format (must be set before StartAsync) +session.Settings.SampleRate = 16000; +session.Settings.Channels = 1; +session.Settings.Language = "en"; + +await session.StartAsync(); + +// Push audio from a microphone callback (thread-safe) +waveIn.DataAvailable += (sender, e) => +{ + _ = session.AppendAsync(new ReadOnlyMemory(e.Buffer, 0, e.BytesRecorded)); +}; + +// Read transcription results as they arrive +await foreach (var result in session.GetTranscriptionStream()) +{ + // result follows the OpenAI Realtime ConversationItem pattern: + // - result.Content[0].Text — incremental transcribed text (per chunk, not accumulated) + // - result.Content[0].Transcript — alias for Text (OpenAI Realtime compatibility) + // - result.IsFinal — true for final results, false for interim hypotheses + // - result.StartTime / EndTime — segment timing in seconds + Console.Write(result.Content?[0]?.Text); +} + +await session.StopAsync(); +``` + +#### Output Type + +| Field | Type | Description | +|-------|------|-------------| +| `Content` | `List` | Content parts. Access text via `Content[0].Text` or `Content[0].Transcript`. | +| `IsFinal` | `bool` | Whether this is a final or interim result. Nemotron always returns `true`. | +| `StartTime` | `double?` | Start time offset in the audio stream (seconds). | +| `EndTime` | `double?` | End time offset in the audio stream (seconds). | +| `Id` | `string?` | Unique identifier for this result (if available). | + +#### Session Lifecycle + +| Method | Description | +|--------|-------------| +| `StartAsync()` | Initialize the streaming session. Settings are frozen after this call. | +| `AppendAsync(pcmData)` | Push a chunk of raw PCM audio. Thread-safe (bounded internal queue). | +| `GetTranscriptionStream()` | Async enumerable of transcription results. | +| `StopAsync()` | Signal end-of-audio, flush remaining audio, and clean up. | +| `DisposeAsync()` | Calls `StopAsync` if needed. Use `await using` for automatic cleanup. | + ### Web Service Start an OpenAI-compatible REST endpoint for use by external tools or processes: @@ -297,6 +354,8 @@ Key types: | [`ModelVariant`](./docs/api/microsoft.ai.foundry.local.modelvariant.md) | Specific model variant (hardware/quantization) | | [`OpenAIChatClient`](./docs/api/microsoft.ai.foundry.local.openaichatclient.md) | Chat completions (sync + streaming) | | [`OpenAIAudioClient`](./docs/api/microsoft.ai.foundry.local.openaiaudioclient.md) | Audio transcription (sync + streaming) | +| [`LiveAudioTranscriptionSession`](./docs/api/microsoft.ai.foundry.local.openai.liveaudiotranscriptionsession.md) | Real-time audio streaming session | +| [`LiveAudioTranscriptionResponse`](./docs/api/microsoft.ai.foundry.local.openai.liveaudiotranscriptionresponse.md) | Streaming transcription result (ConversationItem-shaped) | | [`ModelInfo`](./docs/api/microsoft.ai.foundry.local.modelinfo.md) | Full model metadata record | ## Tests diff --git a/sdk/cs/src/Detail/CoreInterop.cs b/sdk/cs/src/Detail/CoreInterop.cs index 8411473b..c5eba7ec 100644 --- a/sdk/cs/src/Detail/CoreInterop.cs +++ b/sdk/cs/src/Detail/CoreInterop.cs @@ -158,6 +158,31 @@ private static unsafe partial void CoreExecuteCommandWithCallback(RequestBuffer* nint callbackPtr, // NativeCallbackFn pointer nint userData); + [LibraryImport(LibraryName, EntryPoint = "execute_command_with_binary")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreExecuteCommandWithBinary(StreamingRequestBuffer* nativeRequest, + ResponseBuffer* nativeResponse); + + // --- Audio streaming P/Invoke imports (kept for future dedicated entry points) --- + + [LibraryImport(LibraryName, EntryPoint = "audio_stream_start")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreAudioStreamStart( + RequestBuffer* request, + ResponseBuffer* response); + + [LibraryImport(LibraryName, EntryPoint = "audio_stream_push")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreAudioStreamPush( + StreamingRequestBuffer* request, + ResponseBuffer* response); + + [LibraryImport(LibraryName, EntryPoint = "audio_stream_stop")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreAudioStreamStop( + RequestBuffer* request, + ResponseBuffer* response); + // helper to capture exceptions in callbacks internal class CallbackHelper { @@ -331,4 +356,94 @@ public Task ExecuteCommandWithCallbackAsync(string commandName, CoreIn return Task.Run(() => ExecuteCommandWithCallback(commandName, commandInput, callback), ct); } + /// + /// Marshal a ResponseBuffer from unmanaged memory into a managed Response and free the unmanaged memory. + /// + private Response MarshalResponse(ResponseBuffer response) + { + Response result = new(); + + if (response.Data != IntPtr.Zero && response.DataLength > 0) + { + byte[] managedResponse = new byte[response.DataLength]; + Marshal.Copy(response.Data, managedResponse, 0, response.DataLength); + result.Data = System.Text.Encoding.UTF8.GetString(managedResponse); + } + + if (response.Error != IntPtr.Zero && response.ErrorLength > 0) + { + result.Error = Marshal.PtrToStringUTF8(response.Error, response.ErrorLength)!; + } + + Marshal.FreeHGlobal(response.Data); + Marshal.FreeHGlobal(response.Error); + + return result; + } + + // --- Audio streaming managed implementations --- + // Route through the existing execute_command / execute_command_with_binary entry points. + // The Core handles audio_stream_start / audio_stream_stop as command cases in ExecuteCommandManaged, + // and audio_stream_push as a command case in ExecuteCommandWithBinaryManaged. + + public Response StartAudioStream(CoreInteropRequest request) + { + return ExecuteCommand("audio_stream_start", request); + } + + public Response PushAudioData(CoreInteropRequest request, ReadOnlyMemory audioData) + { + try + { + var commandInputJson = request.ToJson(); + byte[] commandBytes = System.Text.Encoding.UTF8.GetBytes("audio_stream_push"); + byte[] inputBytes = System.Text.Encoding.UTF8.GetBytes(commandInputJson); + + IntPtr commandPtr = Marshal.AllocHGlobal(commandBytes.Length); + Marshal.Copy(commandBytes, 0, commandPtr, commandBytes.Length); + + IntPtr inputPtr = Marshal.AllocHGlobal(inputBytes.Length); + Marshal.Copy(inputBytes, 0, inputPtr, inputBytes.Length); + + // Pin the managed audio data so GC won't move it during the native call + using var audioHandle = audioData.Pin(); + + unsafe + { + var reqBuf = new StreamingRequestBuffer + { + Command = commandPtr, + CommandLength = commandBytes.Length, + Data = inputPtr, + DataLength = inputBytes.Length, + BinaryData = (nint)audioHandle.Pointer, + BinaryDataLength = audioData.Length + }; + + ResponseBuffer response = default; + + try + { + CoreExecuteCommandWithBinary(&reqBuf, &response); + } + finally + { + Marshal.FreeHGlobal(commandPtr); + Marshal.FreeHGlobal(inputPtr); + } + + return MarshalResponse(response); + } + } + catch (Exception ex) when (ex is not OperationCanceledException) + { + throw new FoundryLocalException("Error executing audio_stream_push", ex, _logger); + } + } + + public Response StopAudioStream(CoreInteropRequest request) + { + return ExecuteCommand("audio_stream_stop", request); + } + } diff --git a/sdk/cs/src/Detail/ICoreInterop.cs b/sdk/cs/src/Detail/ICoreInterop.cs index 1fff9dde..b493dfb7 100644 --- a/sdk/cs/src/Detail/ICoreInterop.cs +++ b/sdk/cs/src/Detail/ICoreInterop.cs @@ -51,4 +51,21 @@ Task ExecuteCommandAsync(string commandName, CoreInteropRequest? comma Task ExecuteCommandWithCallbackAsync(string commandName, CoreInteropRequest? commandInput, CallbackFn callback, CancellationToken? ct = null); + + // --- Audio streaming session support --- + + [StructLayout(LayoutKind.Sequential)] + protected unsafe struct StreamingRequestBuffer + { + public nint Command; + public int CommandLength; + public nint Data; // JSON params + public int DataLength; + public nint BinaryData; // raw PCM audio bytes + public int BinaryDataLength; + } + + Response StartAudioStream(CoreInteropRequest request); + Response PushAudioData(CoreInteropRequest request, ReadOnlyMemory audioData); + Response StopAudioStream(CoreInteropRequest request); } diff --git a/sdk/cs/src/Detail/JsonSerializationContext.cs b/sdk/cs/src/Detail/JsonSerializationContext.cs index 894f9454..3fefd305 100644 --- a/sdk/cs/src/Detail/JsonSerializationContext.cs +++ b/sdk/cs/src/Detail/JsonSerializationContext.cs @@ -33,6 +33,10 @@ namespace Microsoft.AI.Foundry.Local.Detail; [JsonSerializable(typeof(IList))] [JsonSerializable(typeof(PropertyDefinition))] [JsonSerializable(typeof(IList))] +// --- Audio streaming types (LiveAudioTranscriptionResponse inherits ConversationItem +// which has AOT-incompatible JsonConverters, so we only register the raw deserialization type) --- +[JsonSerializable(typeof(LiveAudioTranscriptionRaw))] +[JsonSerializable(typeof(CoreErrorResponse))] [JsonSourceGenerationOptions(DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull, WriteIndented = false)] internal partial class JsonSerializationContext : JsonSerializerContext diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index 905f9652..8f03be7d 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -99,8 +99,8 @@ $(FoundryLocalCoreVersion) - 0.9.0.8-rc3 - 0.9.0.8-rc3 + 0.9.0-dev-20260325T055840-33ebe7c + 0.9.0-dev-20260325T055742-33ebe7c True diff --git a/sdk/cs/src/OpenAI/AudioClient.cs b/sdk/cs/src/OpenAI/AudioClient.cs index 564858f3..a8cbc1d7 100644 --- a/sdk/cs/src/OpenAI/AudioClient.cs +++ b/sdk/cs/src/OpenAI/AudioClient.cs @@ -8,7 +8,6 @@ namespace Microsoft.AI.Foundry.Local; using System.Runtime.CompilerServices; using System.Threading.Channels; - using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; using Betalgo.Ranul.OpenAI.ObjectModels.ResponseModels; @@ -85,6 +84,16 @@ public async IAsyncEnumerable TranscribeAudioS } } + /// + /// Create a real-time streaming transcription session. + /// Audio data is pushed in as PCM chunks and transcription results are returned as an async stream. + /// + /// A streaming session that must be disposed when done. + public LiveAudioTranscriptionSession CreateLiveTranscriptionSession() + { + return new LiveAudioTranscriptionSession(_modelId); + } + private async Task TranscribeAudioImplAsync(string audioFilePath, CancellationToken? ct) { diff --git a/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs b/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs new file mode 100644 index 00000000..6da4d076 --- /dev/null +++ b/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs @@ -0,0 +1,385 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.OpenAI; + +using System.Runtime.CompilerServices; +using System.Globalization; +using System.Threading.Channels; +using Microsoft.AI.Foundry.Local; +using Microsoft.AI.Foundry.Local.Detail; +using Microsoft.Extensions.Logging; + +/// +/// Session for real-time audio streaming ASR (Automatic Speech Recognition). +/// Audio data from a microphone (or other source) is pushed in as PCM chunks, +/// and transcription results are returned as an async stream. +/// +/// Created via . +/// +/// Thread safety: AppendAsync can be called from any thread (including high-frequency +/// audio callbacks). Pushes are internally serialized via a bounded channel to prevent +/// unbounded memory growth and ensure ordering. +/// + +public sealed class LiveAudioTranscriptionSession : IAsyncDisposable +{ + private readonly string _modelId; + private readonly ICoreInterop _coreInterop = FoundryLocalManager.Instance.CoreInterop; + private readonly ILogger _logger = FoundryLocalManager.Instance.Logger; + + // Session state — protected by _lock + private readonly AsyncLock _lock = new(); + private string? _sessionHandle; + private bool _started; + private bool _stopped; + + // Output channel: native callback writes, user reads via GetTranscriptionStream + private Channel? _outputChannel; + + // Internal push queue: user writes audio chunks, background loop drains to native core. + // Bounded to prevent unbounded memory growth if native core is slower than real-time. + private Channel>? _pushChannel; + private Task? _pushLoopTask; + + // Dedicated CTS for the push loop — decoupled from StartAsync's caller token. + // Cancelled only during StopAsync/DisposeAsync to allow clean drain. + private CancellationTokenSource? _sessionCts; + + // Snapshot of settings captured at StartAsync — prevents mutation after session starts. + private LiveAudioTranscriptionOptions? _activeSettings; + + /// + /// Audio format settings for the streaming session. + /// Must be configured before calling . + /// Settings are frozen once the session starts. + /// + public record LiveAudioTranscriptionOptions + { + /// PCM sample rate in Hz. Default: 16000. + public int SampleRate { get; set; } = 16000; + + /// Number of audio channels. Default: 1 (mono). + public int Channels { get; set; } = 1; + + /// Number of bits per audio sample. Default: 16. + public int BitsPerSample { get; set; } = 16; + + /// Optional BCP-47 language hint (e.g., "en", "zh"). + public string? Language { get; set; } + + /// + /// Maximum number of audio chunks buffered in the internal push queue. + /// If the queue is full, AppendAsync will asynchronously wait. + /// Default: 100 (~3 seconds of audio at typical chunk sizes). + /// + public int PushQueueCapacity { get; set; } = 100; + + internal LiveAudioTranscriptionOptions Snapshot() => this with { }; // record copy + } + + public LiveAudioTranscriptionOptions Settings { get; } = new(); + + internal LiveAudioTranscriptionSession(string modelId) + { + _modelId = modelId; + } + + /// + /// Start a real-time audio streaming session. + /// Must be called before or . + /// Settings are frozen after this call. + /// + /// Cancellation token. + public async Task StartAsync(CancellationToken ct = default) + { + using var disposable = await _lock.LockAsync().ConfigureAwait(false); + + if (_started) + { + throw new FoundryLocalException("Streaming session already started. Call StopAsync first."); + } + + // Freeze settings + _activeSettings = Settings.Snapshot(); + + _outputChannel = Channel.CreateUnbounded( + new UnboundedChannelOptions + { + SingleWriter = true, // only the native callback writes + SingleReader = true, + AllowSynchronousContinuations = true + }); + + _pushChannel = Channel.CreateBounded>( + new BoundedChannelOptions(_activeSettings.PushQueueCapacity) + { + SingleReader = true, // only the push loop reads + SingleWriter = false, // multiple threads may push audio data + FullMode = BoundedChannelFullMode.Wait + }); + + var request = new CoreInteropRequest + { + Params = new Dictionary + { + { "Model", _modelId }, + { "SampleRate", _activeSettings.SampleRate.ToString(CultureInfo.InvariantCulture) }, + { "Channels", _activeSettings.Channels.ToString(CultureInfo.InvariantCulture) }, + { "BitsPerSample", _activeSettings.BitsPerSample.ToString(CultureInfo.InvariantCulture) }, + } + }; + + if (_activeSettings.Language != null) + { + request.Params["Language"] = _activeSettings.Language; + } + + // StartAudioStream uses existing execute_command entry point — synchronous P/Invoke + var response = await Task.Run( + () => _coreInterop.StartAudioStream(request), ct) + .ConfigureAwait(false); + + if (response.Error != null) + { + _outputChannel.Writer.TryComplete(); + throw new FoundryLocalException( + $"Error starting audio stream session: {response.Error}", _logger); + } + + _sessionHandle = response.Data + ?? throw new FoundryLocalException("Native core did not return a session handle.", _logger); + _started = true; + _stopped = false; + + _sessionCts?.Dispose(); + _sessionCts = new CancellationTokenSource(); +#pragma warning disable IDISP013 // Await in using — Task.Run is intentionally fire-and-forget here + _pushLoopTask = Task.Run(() => PushLoopAsync(_sessionCts.Token), CancellationToken.None); +#pragma warning restore IDISP013 + } + + /// + /// Push a chunk of raw PCM audio data to the streaming session. + /// Can be called from any thread (including audio device callbacks). + /// Chunks are internally queued and serialized to the native core. + /// + /// Raw PCM audio bytes matching the configured format. + /// Cancellation token. + public async ValueTask AppendAsync(ReadOnlyMemory pcmData, CancellationToken ct = default) + { + if (!_started || _stopped) + { + throw new FoundryLocalException("No active streaming session. Call StartAsync first."); + } + + // Copy the data to avoid issues if the caller reuses the buffer (e.g. NAudio reuses e.Buffer) + var copy = new byte[pcmData.Length]; + pcmData.CopyTo(copy); + + await _pushChannel!.Writer.WriteAsync(copy, ct).ConfigureAwait(false); + } + + /// + /// Internal loop that drains the push queue and sends chunks to native core one at a time. + /// Terminates the session on any native error. + /// + private async Task PushLoopAsync(CancellationToken ct) + { + try + { + await foreach (var audioData in _pushChannel!.Reader.ReadAllAsync(ct).ConfigureAwait(false)) + { + var request = new CoreInteropRequest + { + Params = new Dictionary { { "SessionHandle", _sessionHandle! } } + }; + + var response = _coreInterop.PushAudioData(request, audioData); + + if (response.Error != null) + { + var errorInfo = CoreErrorResponse.TryParse(response.Error); + var fatalEx = new FoundryLocalException( + $"Push failed (code={errorInfo?.Code ?? "UNKNOWN"}): {response.Error}", + _logger); + _logger.LogError("Terminating push loop due to push failure: {Error}", + response.Error); + _outputChannel?.Writer.TryComplete(fatalEx); + return; + } + + // Parse transcription result from push response and surface it + if (!string.IsNullOrEmpty(response.Data)) + { + try + { + var transcription = LiveAudioTranscriptionResponse.FromJson(response.Data); + if (!string.IsNullOrEmpty(transcription.Content?[0]?.Text)) + { + _outputChannel?.Writer.TryWrite(transcription); + } + } + catch (Exception parseEx) + { + // Non-fatal: log and continue if response isn't a transcription result + _logger.LogDebug(parseEx, "Could not parse push response as transcription result"); + } + } + } + } + catch (OperationCanceledException) + { + // Expected on cancellation — push loop exits cleanly + } + catch (Exception ex) + { + _logger.LogError(ex, "Push loop terminated with unexpected error"); + _outputChannel?.Writer.TryComplete( + new FoundryLocalException("Push loop terminated unexpectedly.", ex, _logger)); + } + } + + /// + /// Get the async stream of transcription results. + /// Results arrive as the native ASR engine processes audio data. + /// + /// Cancellation token. + /// Async enumerable of transcription results. + public async IAsyncEnumerable GetTranscriptionStream( + [EnumeratorCancellation] CancellationToken ct = default) + { + if (_outputChannel == null) + { + throw new FoundryLocalException("No active streaming session. Call StartAsync first."); + } + + await foreach (var item in _outputChannel.Reader.ReadAllAsync(ct).ConfigureAwait(false)) + { + yield return item; + } + } + + /// + /// Signal end-of-audio and stop the streaming session. + /// Any remaining buffered audio in the push queue will be drained to native core first. + /// Final results are delivered through before it completes. + /// + /// Cancellation token. + public async Task StopAsync(CancellationToken ct = default) + { + using var disposable = await _lock.LockAsync().ConfigureAwait(false); + + if (!_started || _stopped) + { + return; // already stopped or never started + } + + _stopped = true; + + // 1. Complete the push channel so the push loop drains remaining items and exits + _pushChannel?.Writer.TryComplete(); + + // 2. Wait for the push loop to finish draining + if (_pushLoopTask != null) + { + await _pushLoopTask.ConfigureAwait(false); + } + + // 3. Cancel the session CTS (no-op if push loop already exited) + _sessionCts?.Cancel(); + + // 4. Tell native core to flush and finalize. + // This MUST happen even if ct is cancelled — otherwise native session leaks. + var request = new CoreInteropRequest + { + Params = new Dictionary { { "SessionHandle", _sessionHandle! } } + }; + + ICoreInterop.Response? response = null; + try + { + response = await Task.Run( + () => _coreInterop.StopAudioStream(request), ct) + .ConfigureAwait(false); + } + catch (OperationCanceledException) when (ct.IsCancellationRequested) + { + // ct fired, but we MUST still stop the native session to avoid a leak. + _logger.LogWarning("StopAsync cancelled — performing best-effort native session stop."); + try + { + response = await Task.Run( + () => _coreInterop.StopAudioStream(request)) + .ConfigureAwait(false); + } + catch (Exception cleanupEx) + { + _logger.LogError(cleanupEx, "Best-effort native session stop failed."); + } + + throw; // Re-throw the cancellation after cleanup + } + finally + { + // Parse final transcription from stop response before completing the channel + if (response?.Data != null) + { + try + { + var finalResult = LiveAudioTranscriptionResponse.FromJson(response.Data); + if (!string.IsNullOrEmpty(finalResult.Content?[0]?.Text)) + { + _outputChannel?.Writer.TryWrite(finalResult); + } + } + catch (Exception parseEx) + { + _logger.LogDebug(parseEx, "Could not parse stop response as transcription result"); + } + } + + _sessionHandle = null; + _started = false; + _sessionCts?.Dispose(); + _sessionCts = null; + + // Complete the output channel AFTER writing final result + _outputChannel?.Writer.TryComplete(); + } + + if (response?.Error != null) + { + throw new FoundryLocalException( + $"Error stopping audio stream session: {response.Error}", _logger); + } + } + + /// + /// Dispose the streaming session. Calls if the session is still active. + /// Safe to call multiple times. + /// + public async ValueTask DisposeAsync() + { + try + { + if (_started && !_stopped) + { + await StopAsync().ConfigureAwait(false); + } + } + catch (Exception ex) + { + // DisposeAsync must never throw — log and swallow + _logger.LogWarning(ex, "Error during DisposeAsync cleanup."); + } + finally + { + _sessionCts?.Dispose(); + _lock.Dispose(); + } + } +} \ No newline at end of file diff --git a/sdk/cs/src/OpenAI/LiveAudioTranscriptionTypes.cs b/sdk/cs/src/OpenAI/LiveAudioTranscriptionTypes.cs new file mode 100644 index 00000000..a0e98542 --- /dev/null +++ b/sdk/cs/src/OpenAI/LiveAudioTranscriptionTypes.cs @@ -0,0 +1,105 @@ +namespace Microsoft.AI.Foundry.Local.OpenAI; + +using System.Text.Json; +using System.Text.Json.Serialization; +using Betalgo.Ranul.OpenAI.ObjectModels.RealtimeModels; +using Microsoft.AI.Foundry.Local; +using Microsoft.AI.Foundry.Local.Detail; + +/// +/// Transcription result for real-time audio streaming sessions. +/// Extends the OpenAI Realtime API's so that +/// customers access text via result.Content[0].Text or +/// result.Content[0].Transcript, ensuring forward compatibility +/// when the transport layer moves to WebSocket. +/// +public class LiveAudioTranscriptionResponse : ConversationItem +{ + /// + /// Whether this is a final or partial (interim) result. + /// - Nemotron models always return true (every result is final). + /// - Other models (e.g., Azure Embedded) may return false for interim + /// hypotheses that will be replaced by a subsequent final result. + /// + [JsonPropertyName("is_final")] + public bool IsFinal { get; init; } + + /// Start time offset of this segment in the audio stream (seconds). + [JsonPropertyName("start_time")] + public double? StartTime { get; init; } + + /// End time offset of this segment in the audio stream (seconds). + [JsonPropertyName("end_time")] + public double? EndTime { get; init; } + + internal static LiveAudioTranscriptionResponse FromJson(string json) + { + var raw = JsonSerializer.Deserialize(json, + JsonSerializationContext.Default.LiveAudioTranscriptionRaw) + ?? throw new FoundryLocalException("Failed to deserialize live audio transcription result"); + + return new LiveAudioTranscriptionResponse + { + IsFinal = raw.IsFinal, + StartTime = raw.StartTime, + EndTime = raw.EndTime, + Content = + [ + new ContentPart + { + Text = raw.Text, + Transcript = raw.Text + } + ] + }; + } +} + +/// +/// Internal raw deserialization target matching the Core's JSON format. +/// Mapped to in FromJson. +/// +internal record LiveAudioTranscriptionRaw +{ + [JsonPropertyName("is_final")] + public bool IsFinal { get; init; } + + [JsonPropertyName("text")] + public string Text { get; init; } = string.Empty; + + [JsonPropertyName("start_time")] + public double? StartTime { get; init; } + + [JsonPropertyName("end_time")] + public double? EndTime { get; init; } +} + +internal record CoreErrorResponse +{ + [JsonPropertyName("code")] + public string Code { get; init; } = ""; + + [JsonPropertyName("message")] + public string Message { get; init; } = ""; + + [JsonPropertyName("isTransient")] + public bool IsTransient { get; init; } + + /// + /// Attempt to parse a native error string as structured JSON. + /// Returns null if the error is not valid JSON or doesn't match the schema, + /// which should be treated as a permanent/unknown error. + /// + internal static CoreErrorResponse? TryParse(string errorString) + { + try + { + return JsonSerializer.Deserialize(errorString, + JsonSerializationContext.Default.CoreErrorResponse); + } + catch + { + return null; // unstructured error — treat as permanent + } + } +} \ No newline at end of file diff --git a/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs b/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs new file mode 100644 index 00000000..2bc39d68 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs @@ -0,0 +1,267 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using System.Text.Json; +using Microsoft.AI.Foundry.Local.Detail; +using Microsoft.AI.Foundry.Local.OpenAI; + +internal sealed class LiveAudioTranscriptionTests +{ + // --- LiveAudioTranscriptionResponse.FromJson tests --- + + [Test] + public async Task FromJson_ParsesTextAndIsFinal() + { + var json = """{"is_final":true,"text":"hello world","start_time":null,"end_time":null}"""; + + var result = LiveAudioTranscriptionResponse.FromJson(json); + + await Assert.That(result.Content).IsNotNull(); + await Assert.That(result.Content!.Count).IsEqualTo(1); + await Assert.That(result.Content[0].Text).IsEqualTo("hello world"); + await Assert.That(result.Content[0].Transcript).IsEqualTo("hello world"); + await Assert.That(result.IsFinal).IsTrue(); + } + + [Test] + public async Task FromJson_MapsTimingFields() + { + var json = """{"is_final":false,"text":"partial","start_time":1.5,"end_time":3.0}"""; + + var result = LiveAudioTranscriptionResponse.FromJson(json); + + await Assert.That(result.Content?[0]?.Text).IsEqualTo("partial"); + await Assert.That(result.IsFinal).IsFalse(); + await Assert.That(result.StartTime).IsEqualTo(1.5); + await Assert.That(result.EndTime).IsEqualTo(3.0); + } + + [Test] + public async Task FromJson_EmptyText_ParsesSuccessfully() + { + var json = """{"is_final":true,"text":"","start_time":null,"end_time":null}"""; + + var result = LiveAudioTranscriptionResponse.FromJson(json); + + await Assert.That(result.Content?[0]?.Text).IsEqualTo(""); + await Assert.That(result.IsFinal).IsTrue(); + } + + [Test] + public async Task FromJson_OnlyStartTime_SetsStartTime() + { + var json = """{"is_final":true,"text":"word","start_time":2.0,"end_time":null}"""; + + var result = LiveAudioTranscriptionResponse.FromJson(json); + + await Assert.That(result.StartTime).IsEqualTo(2.0); + await Assert.That(result.EndTime).IsNull(); + await Assert.That(result.Content?[0]?.Text).IsEqualTo("word"); + } + + [Test] + public async Task FromJson_InvalidJson_Throws() + { + var ex = Assert.Throws(() => + LiveAudioTranscriptionResponse.FromJson("not valid json")); + await Assert.That(ex).IsNotNull(); + } + + [Test] + public async Task FromJson_ContentHasTextAndTranscript() + { + var json = """{"is_final":true,"text":"test","start_time":null,"end_time":null}"""; + + var result = LiveAudioTranscriptionResponse.FromJson(json); + + // Both Text and Transcript should have the same value + await Assert.That(result.Content?[0]?.Text).IsEqualTo("test"); + await Assert.That(result.Content?[0]?.Transcript).IsEqualTo("test"); + } + + // --- LiveAudioTranscriptionOptions tests --- + + [Test] + public async Task Options_DefaultValues() + { + var options = new LiveAudioTranscriptionSession.LiveAudioTranscriptionOptions(); + + await Assert.That(options.SampleRate).IsEqualTo(16000); + await Assert.That(options.Channels).IsEqualTo(1); + await Assert.That(options.Language).IsNull(); + await Assert.That(options.PushQueueCapacity).IsEqualTo(100); + } + + // --- CoreErrorResponse tests --- + + [Test] + public async Task CoreErrorResponse_TryParse_ValidJson() + { + var json = """{"code":"ASR_SESSION_NOT_FOUND","message":"Session not found","isTransient":false}"""; + + var error = CoreErrorResponse.TryParse(json); + + await Assert.That(error).IsNotNull(); + await Assert.That(error!.Code).IsEqualTo("ASR_SESSION_NOT_FOUND"); + await Assert.That(error.Message).IsEqualTo("Session not found"); + await Assert.That(error.IsTransient).IsFalse(); + } + + [Test] + public async Task CoreErrorResponse_TryParse_InvalidJson_ReturnsNull() + { + var result = CoreErrorResponse.TryParse("not json"); + await Assert.That(result).IsNull(); + } + + [Test] + public async Task CoreErrorResponse_TryParse_TransientError() + { + var json = """{"code":"BUSY","message":"Model busy","isTransient":true}"""; + + var error = CoreErrorResponse.TryParse(json); + + await Assert.That(error).IsNotNull(); + await Assert.That(error!.IsTransient).IsTrue(); + } + + // --- Session state guard tests --- + + [Test] + public async Task AppendAsync_BeforeStart_Throws() + { + await using var session = new LiveAudioTranscriptionSession("test-model"); + var data = new ReadOnlyMemory(new byte[100]); + + FoundryLocalException? caught = null; + try + { + await session.AppendAsync(data); + } + catch (FoundryLocalException ex) + { + caught = ex; + } + + await Assert.That(caught).IsNotNull(); + } + + [Test] + public async Task GetTranscriptionStream_BeforeStart_Throws() + { + await using var session = new LiveAudioTranscriptionSession("test-model"); + + FoundryLocalException? caught = null; + try + { + await foreach (var _ in session.GetTranscriptionStream()) + { + // should not reach here + } + } + catch (FoundryLocalException ex) + { + caught = ex; + } + + await Assert.That(caught).IsNotNull(); + } + + // --- E2E streaming test with synthetic PCM audio --- + + [Test] + public async Task LiveStreaming_E2E_WithSyntheticPCM_ReturnsValidResponse() + { + // Skip if FoundryLocalManager is not initialized (no Core DLL / no models) + if (!FoundryLocalManager.IsInitialized) + { + return; + } + + var manager = FoundryLocalManager.Instance; + var catalog = await manager.GetCatalogAsync(); + var model = await catalog.GetModelAsync("nemotron"); + + if (model == null) + { + // Skip gracefully if nemotron model not available + return; + } + + if (!await model.IsCachedAsync()) + { + return; + } + + await model.LoadAsync(); + + try + { + var audioClient = await model.GetAudioClientAsync(); + var session = audioClient.CreateLiveTranscriptionSession(); + session.Settings.SampleRate = 16000; + session.Settings.Channels = 1; + session.Settings.BitsPerSample = 16; + + await session.StartAsync(); + + // Start collecting results in background (must start before pushing audio) + var results = new List(); + var readTask = Task.Run(async () => + { + await foreach (var result in session.GetTranscriptionStream()) + { + results.Add(result); + } + }); + + // Generate ~2 seconds of synthetic PCM audio (440Hz sine wave, 16kHz, 16-bit mono) + const int sampleRate = 16000; + const int durationSeconds = 2; + const double frequency = 440.0; + int totalSamples = sampleRate * durationSeconds; + var pcmBytes = new byte[totalSamples * 2]; // 16-bit = 2 bytes per sample + + for (int i = 0; i < totalSamples; i++) + { + double t = (double)i / sampleRate; + short sample = (short)(short.MaxValue * 0.5 * Math.Sin(2 * Math.PI * frequency * t)); + pcmBytes[i * 2] = (byte)(sample & 0xFF); + pcmBytes[i * 2 + 1] = (byte)((sample >> 8) & 0xFF); + } + + // Push audio in chunks (100ms each, matching typical mic callback size) + int chunkSize = sampleRate / 10 * 2; // 100ms of 16-bit audio + for (int offset = 0; offset < pcmBytes.Length; offset += chunkSize) + { + int len = Math.Min(chunkSize, pcmBytes.Length - offset); + await session.AppendAsync(new ReadOnlyMemory(pcmBytes, offset, len)); + } + + // Stop session to flush remaining audio and complete the stream + await session.StopAsync(); + await readTask; + + // Verify response attributes — synthetic audio may or may not produce text, + // but the response objects should be properly structured + foreach (var result in results) + { + // Verify ConversationItem-shaped response + await Assert.That(result.Content).IsNotNull(); + await Assert.That(result.Content!.Count).IsGreaterThan(0); + await Assert.That(result.Content[0].Text).IsNotNull(); + // Text and Transcript should be the same + await Assert.That(result.Content[0].Transcript).IsEqualTo(result.Content[0].Text); + } + } + finally + { + await model.UnloadAsync(); + } + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs b/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs index b5a49657..1f49560d 100644 --- a/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs @@ -51,4 +51,4 @@ public async Task GetLastestVersion_Works() var latestB = model.GetLatestVersion(variants[2]); await Assert.That(latestB).IsEqualTo(variants[1]); } -} +} \ No newline at end of file From 9e0c70d0fcd8aa6dc2bdcd679dd432bf4ae7a59b Mon Sep 17 00:00:00 2001 From: Scott McKay Date: Tue, 31 Mar 2026 17:07:26 +1000 Subject: [PATCH 11/83] Use IModel in the public API. (#556) Use IModel in the public API. Changes allow ICatalog and IModel to be stubbed for testing as you no longer need a concrete Model or ModelVariant class. - Make Model and ModelVariant implementation details - Add variant info and selection to IModel so it works with either Model or ModelVariant - Move GetLatestVersion to Catalog and take IModel as input - ModelVariant has insufficient info to implement this and intuitively the catalog should know this information. - Update tests - fix usage of test config file for shared test data path --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: skottmckay <979079+skottmckay@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../src/ModelManagementExample/Program.cs | 20 ++- sdk/cs/src/Catalog.cs | 69 ++++++++-- sdk/cs/src/{ => Detail}/Model.cs | 32 ++--- sdk/cs/src/{ => Detail}/ModelVariant.cs | 11 +- sdk/cs/src/ICatalog.cs | 30 +++-- sdk/cs/src/IModel.cs | 15 +++ .../FoundryLocal.Tests/AudioClientTests.cs | 2 +- .../test/FoundryLocal.Tests/CatalogTests.cs | 121 ++++++++++++++++++ .../ChatCompletionsTests.cs | 9 +- sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs | 5 +- .../FoundryLocalManagerTest.cs | 2 +- .../FoundryLocal.Tests/LOCAL_MODEL_TESTING.md | 23 +--- sdk/cs/test/FoundryLocal.Tests/ModelTests.cs | 54 -------- .../TestAssemblySetupCleanup.cs | 22 ++-- sdk/cs/test/FoundryLocal.Tests/Utils.cs | 4 +- .../FoundryLocal.Tests/appsettings.Test.json | 2 +- 16 files changed, 272 insertions(+), 149 deletions(-) rename sdk/cs/src/{ => Detail}/Model.cs (74%) rename sdk/cs/src/{ => Detail}/ModelVariant.cs (95%) create mode 100644 sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs delete mode 100644 sdk/cs/test/FoundryLocal.Tests/ModelTests.cs diff --git a/samples/cs/GettingStarted/src/ModelManagementExample/Program.cs b/samples/cs/GettingStarted/src/ModelManagementExample/Program.cs index 2b6fe2e8..38dec588 100644 --- a/samples/cs/GettingStarted/src/ModelManagementExample/Program.cs +++ b/samples/cs/GettingStarted/src/ModelManagementExample/Program.cs @@ -51,39 +51,35 @@ // Get a model using an alias from the catalog var model = await catalog.GetModelAsync("qwen2.5-0.5b") ?? throw new Exception("Model not found"); -// `model.SelectedVariant` indicates which variant will be used by default. -// // Models in Model.Variants are ordered by priority, with the highest priority first. // The first downloaded model is selected by default. // The highest priority is selected if no models have been downloaded. // If the selected variant is not the highest priority, it means that Foundry Local // has found a locally cached variant for you to improve performance (remove need to download). Console.WriteLine("\nThe default selected model variant is: " + model.Id); -if (model.SelectedVariant != model.Variants.First()) +if (model.Id != model.Variants.First().Id) { - Debug.Assert(await model.SelectedVariant.IsCachedAsync()); + Debug.Assert(await model.IsCachedAsync()); Console.WriteLine("The model variant was selected due to being locally cached."); } -// OPTIONAL: `model` can be used directly and `model.SelectedVariant` will be used as the default. -// You can explicitly select or use a specific ModelVariant if you want more control -// over the device and/or execution provider used. -// Model and ModelVariant can be used interchangeably in methods such as -// DownloadAsync, LoadAsync, UnloadAsync and GetChatClientAsync. +// OPTIONAL: `model` can be used directly with its currently selected variant. +// You can explicitly select (`model.SelectVariant`) or use a specific variant from `model.Variants` +// if you want more control over the device and/or execution provider used. // // Choices: -// - Use a ModelVariant directly from the catalog if you know the variant Id +// - Use a model variant directly from the catalog if you know the variant Id // - `var modelVariant = await catalog.GetModelVariantAsync("qwen2.5-0.5b-instruct-generic-gpu:3")` // -// - Get the ModelVariant from Model.Variants +// - Get the model variant from IModel.Variants // - `var modelVariant = model.Variants.First(v => v.Id == "qwen2.5-0.5b-instruct-generic-cpu:4")` // - `var modelVariant = model.Variants.First(v => v.Info.Runtime?.DeviceType == DeviceType.GPU)` // - optional: update selected variant in `model` using `model.SelectVariant(modelVariant);` if you wish to use // `model` in your code. // For this example we explicitly select the CPU variant, and call SelectVariant so all the following example code -// uses the `model` instance. +// uses the `model` instance. It would be equally valid to use `modelVariant` directly. Console.WriteLine("Selecting CPU variant of model"); var modelVariant = model.Variants.First(v => v.Info.Runtime?.DeviceType == DeviceType.CPU); model.SelectVariant(modelVariant); diff --git a/sdk/cs/src/Catalog.cs b/sdk/cs/src/Catalog.cs index eb9ba0d7..5cdb050f 100644 --- a/sdk/cs/src/Catalog.cs +++ b/sdk/cs/src/Catalog.cs @@ -52,51 +52,59 @@ internal static async Task CreateAsync(IModelLoadManager modelManager, return catalog; } - public async Task> ListModelsAsync(CancellationToken? ct = null) + public async Task> ListModelsAsync(CancellationToken? ct = null) { return await Utils.CallWithExceptionHandling(() => ListModelsImplAsync(ct), "Error listing models.", _logger).ConfigureAwait(false); } - public async Task> GetCachedModelsAsync(CancellationToken? ct = null) + public async Task> GetCachedModelsAsync(CancellationToken? ct = null) { return await Utils.CallWithExceptionHandling(() => GetCachedModelsImplAsync(ct), "Error getting cached models.", _logger).ConfigureAwait(false); } - public async Task> GetLoadedModelsAsync(CancellationToken? ct = null) + public async Task> GetLoadedModelsAsync(CancellationToken? ct = null) { return await Utils.CallWithExceptionHandling(() => GetLoadedModelsImplAsync(ct), "Error getting loaded models.", _logger).ConfigureAwait(false); } - public async Task GetModelAsync(string modelAlias, CancellationToken? ct = null) + public async Task GetModelAsync(string modelAlias, CancellationToken? ct = null) { return await Utils.CallWithExceptionHandling(() => GetModelImplAsync(modelAlias, ct), $"Error getting model with alias '{modelAlias}'.", _logger) .ConfigureAwait(false); } - public async Task GetModelVariantAsync(string modelId, CancellationToken? ct = null) + public async Task GetModelVariantAsync(string modelId, CancellationToken? ct = null) { return await Utils.CallWithExceptionHandling(() => GetModelVariantImplAsync(modelId, ct), $"Error getting model variant with ID '{modelId}'.", _logger) .ConfigureAwait(false); } - private async Task> ListModelsImplAsync(CancellationToken? ct = null) + public async Task GetLatestVersionAsync(IModel modelOrModelVariant, CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling( + () => GetLatestVersionImplAsync(modelOrModelVariant, ct), + $"Error getting latest version for model with name '{modelOrModelVariant.Info.Name}'.", + _logger).ConfigureAwait(false); + } + + private async Task> ListModelsImplAsync(CancellationToken? ct = null) { await UpdateModels(ct).ConfigureAwait(false); using var disposable = await _lock.LockAsync().ConfigureAwait(false); - return _modelAliasToModel.Values.OrderBy(m => m.Alias).ToList(); + return _modelAliasToModel.Values.OrderBy(m => m.Alias).Cast().ToList(); } - private async Task> GetCachedModelsImplAsync(CancellationToken? ct = null) + private async Task> GetCachedModelsImplAsync(CancellationToken? ct = null) { var cachedModelIds = await Utils.GetCachedModelIdsAsync(_coreInterop, ct).ConfigureAwait(false); - List cachedModels = new(); + List cachedModels = []; foreach (var modelId in cachedModelIds) { if (_modelIdToModelVariant.TryGetValue(modelId, out ModelVariant? modelVariant)) @@ -108,10 +116,10 @@ private async Task> GetCachedModelsImplAsync(CancellationToke return cachedModels; } - private async Task> GetLoadedModelsImplAsync(CancellationToken? ct = null) + private async Task> GetLoadedModelsImplAsync(CancellationToken? ct = null) { var loadedModelIds = await _modelLoadManager.ListLoadedModelsAsync(ct).ConfigureAwait(false); - List loadedModels = new(); + List loadedModels = []; foreach (var modelId in loadedModelIds) { @@ -143,6 +151,45 @@ private async Task> GetLoadedModelsImplAsync(CancellationToke return modelVariant; } + private async Task GetLatestVersionImplAsync(IModel modelOrModelVariant, CancellationToken? ct) + { + Model? model; + + if (modelOrModelVariant is ModelVariant) + { + // For ModelVariant, resolve the owning Model via alias. + model = await GetModelImplAsync(modelOrModelVariant.Alias, ct); + } + else + { + // Try to use the concrete Model instance if this is our SDK type. + model = modelOrModelVariant as Model; + + // If this is a different IModel implementation (e.g., a test stub), + // fall back to resolving the Model via alias. + if (model == null) + { + model = await GetModelImplAsync(modelOrModelVariant.Alias, ct); + } + } + + if (model == null) + { + throw new FoundryLocalException($"Model with alias '{modelOrModelVariant.Alias}' not found in catalog.", + _logger); + } + + // variants are sorted by version, so the first one matching the name is the latest version for that variant. + var latest = model!.Variants.FirstOrDefault(v => v.Info.Name == modelOrModelVariant.Info.Name) ?? + // should not be possible given we internally manage all the state involved + throw new FoundryLocalException($"Internal error. Mismatch between model (alias:{model.Alias}) and " + + $"model variant (alias:{modelOrModelVariant.Alias}).", _logger); + + // if input was the latest return the input (could be model or model variant) + // otherwise return the latest model variant + return latest.Id == modelOrModelVariant.Id ? modelOrModelVariant : latest; + } + private async Task UpdateModels(CancellationToken? ct) { // TODO: make this configurable diff --git a/sdk/cs/src/Model.cs b/sdk/cs/src/Detail/Model.cs similarity index 74% rename from sdk/cs/src/Model.cs rename to sdk/cs/src/Detail/Model.cs index bbbbcb5b..c4d96057 100644 --- a/sdk/cs/src/Model.cs +++ b/sdk/cs/src/Detail/Model.cs @@ -12,11 +12,13 @@ public class Model : IModel { private readonly ILogger _logger; - public List Variants { get; internal set; } - public ModelVariant SelectedVariant { get; internal set; } = default!; + private readonly List _variants; + public IReadOnlyList Variants => _variants; + internal IModel SelectedVariant { get; set; } = default!; public string Alias { get; init; } public string Id => SelectedVariant.Id; + public ModelInfo Info => SelectedVariant.Info; /// /// Is the currently selected variant cached locally? @@ -33,7 +35,7 @@ internal Model(ModelVariant modelVariant, ILogger logger) _logger = logger; Alias = modelVariant.Alias; - Variants = new() { modelVariant }; + _variants = [modelVariant]; // variants are sorted by Core, so the first one added is the default SelectedVariant = modelVariant; @@ -48,7 +50,7 @@ internal void AddVariant(ModelVariant variant) _logger); } - Variants.Add(variant); + _variants.Add(variant); // prefer the highest priority locally cached variant if (variant.Info.Cached && !SelectedVariant.Info.Cached) @@ -62,31 +64,15 @@ internal void AddVariant(ModelVariant variant) /// /// Model variant to select. Must be one of the variants in . /// If variant is not valid for this model. - public void SelectVariant(ModelVariant variant) + public void SelectVariant(IModel variant) { _ = Variants.FirstOrDefault(v => v == variant) ?? - // user error so don't log - throw new FoundryLocalException($"Model {Alias} does not have a {variant.Id} variant."); + // user error so don't log. + throw new FoundryLocalException($"Input variant was not found in Variants."); SelectedVariant = variant; } - /// - /// Get the latest version of the specified model variant. - /// - /// Model variant. - /// ModelVariant for latest version. Same as `variant` if that is the latest version. - /// If variant is not valid for this model. - public ModelVariant GetLatestVersion(ModelVariant variant) - { - // variants are sorted by version, so the first one matching the name is the latest version for that variant. - var latest = Variants.FirstOrDefault(v => v.Info.Name == variant.Info.Name) ?? - // user error so don't log - throw new FoundryLocalException($"Model {Alias} does not have a {variant.Id} variant."); - - return latest; - } - public async Task GetPathAsync(CancellationToken? ct = null) { return await SelectedVariant.GetPathAsync(ct).ConfigureAwait(false); diff --git a/sdk/cs/src/ModelVariant.cs b/sdk/cs/src/Detail/ModelVariant.cs similarity index 95% rename from sdk/cs/src/ModelVariant.cs rename to sdk/cs/src/Detail/ModelVariant.cs index 6ca7cda7..9f2deaba 100644 --- a/sdk/cs/src/ModelVariant.cs +++ b/sdk/cs/src/Detail/ModelVariant.cs @@ -9,7 +9,7 @@ namespace Microsoft.AI.Foundry.Local; using Microsoft.AI.Foundry.Local.Detail; using Microsoft.Extensions.Logging; -public class ModelVariant : IModel +internal class ModelVariant : IModel { private readonly IModelLoadManager _modelLoadManager; private readonly ICoreInterop _coreInterop; @@ -22,6 +22,8 @@ public class ModelVariant : IModel public string Alias => Info.Alias; public int Version { get; init; } // parsed from Info.Version if possible, else 0 + public IReadOnlyList Variants => [this]; + internal ModelVariant(ModelInfo modelInfo, IModelLoadManager modelLoadManager, ICoreInterop coreInterop, ILogger logger) { @@ -190,4 +192,11 @@ private async Task GetAudioClientImplAsync(CancellationToken? return new OpenAIAudioClient(Id); } + + public void SelectVariant(IModel variant) + { + throw new FoundryLocalException( + $"SelectVariant is not supported on a ModelVariant. " + + $"Call Catalog.GetModelAsync(\"{Alias}\") to get an IModel with all variants available."); + } } diff --git a/sdk/cs/src/ICatalog.cs b/sdk/cs/src/ICatalog.cs index 35285736..85851a9c 100644 --- a/sdk/cs/src/ICatalog.cs +++ b/sdk/cs/src/ICatalog.cs @@ -18,36 +18,46 @@ public interface ICatalog /// List the available models in the catalog. /// /// Optional CancellationToken. - /// List of Model instances. - Task> ListModelsAsync(CancellationToken? ct = null); + /// List of IModel instances. + Task> ListModelsAsync(CancellationToken? ct = null); /// /// Lookup a model by its alias. /// /// Model alias. /// Optional CancellationToken. - /// The matching Model, or null if no model with the given alias exists. - Task GetModelAsync(string modelAlias, CancellationToken? ct = null); + /// The matching IModel, or null if no model with the given alias exists. + Task GetModelAsync(string modelAlias, CancellationToken? ct = null); /// /// Lookup a model variant by its unique model id. + /// NOTE: This will return an IModel with a single variant. Use GetModelAsync to get an IModel with all avaialable + /// variants. /// /// Model id. /// Optional CancellationToken. - /// The matching ModelVariant, or null if no variant with the given id exists. - Task GetModelVariantAsync(string modelId, CancellationToken? ct = null); + /// The matching IModel, or null if no variant with the given id exists. + Task GetModelVariantAsync(string modelId, CancellationToken? ct = null); /// /// Get a list of currently downloaded models from the model cache. /// /// Optional CancellationToken. - /// List of ModelVariant instances. - Task> GetCachedModelsAsync(CancellationToken? ct = null); + /// List of IModel instances. + Task> GetCachedModelsAsync(CancellationToken? ct = null); /// /// Get a list of the currently loaded models. /// /// Optional CancellationToken. - /// List of ModelVariant instances. - Task> GetLoadedModelsAsync(CancellationToken? ct = null); + /// List of IModel instances. + Task> GetLoadedModelsAsync(CancellationToken? ct = null); + + /// + /// Get the latest version of a model. + /// This is used to check if a newer version of a model is available in the catalog for download. + /// + /// The model to check for the latest version. + /// The latest version of the model. Will match the input if it is the latest version. + Task GetLatestVersionAsync(IModel model, CancellationToken? ct = null); } diff --git a/sdk/cs/src/IModel.cs b/sdk/cs/src/IModel.cs index c3acba61..a27f3a3d 100644 --- a/sdk/cs/src/IModel.cs +++ b/sdk/cs/src/IModel.cs @@ -16,6 +16,8 @@ public interface IModel Justification = "Alias is a suitable name in this context.")] string Alias { get; } + ModelInfo Info { get; } + Task IsCachedAsync(CancellationToken? ct = null); Task IsLoadedAsync(CancellationToken? ct = null); @@ -67,4 +69,17 @@ Task DownloadAsync(Action? downloadProgress = null, /// Optional cancellation token. /// OpenAI.AudioClient Task GetAudioClientAsync(CancellationToken? ct = null); + + /// + /// Variants of the model that are available. Variants of the model are optimized for different devices. + /// + IReadOnlyList Variants { get; } + + /// + /// Select a model variant from to use for operations. + /// An IModel from `Variants` can also be used directly. + /// + /// Model variant to select. Must be one of the variants in . + /// If variant is not valid for this model. + void SelectVariant(IModel variant); } diff --git a/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs b/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs index ec4ab4c9..5c4cc8d6 100644 --- a/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs @@ -12,7 +12,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; internal sealed class AudioClientTests { - private static Model? model; + private static IModel? model; [Before(Class)] public static async Task Setup() diff --git a/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs b/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs new file mode 100644 index 00000000..d270ac15 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs @@ -0,0 +1,121 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json; +using System.Threading.Tasks; + +using Microsoft.AI.Foundry.Local.Detail; +using Microsoft.Extensions.Logging.Abstractions; + +using Moq; + +internal sealed class CatalogTests +{ + [Test] + public async Task GetLatestVersion_Works() + { + // Create test data with 3 entries for a model with different versions + // Sorted by version (descending), so version 3 is first (latest) + var testModelInfos = new List + { + new() + { + Id = "test-model:3", + Name = "test-model", + Version = 3, + Alias = "test-alias", + DisplayName = "Test Model", + ProviderType = "test", + Uri = "test://model/3", + ModelType = "ONNX", + Runtime = new Runtime { DeviceType = DeviceType.CPU, ExecutionProvider = "CPUExecutionProvider" }, + Cached = false + }, + new() + { + Id = "test-model:2", + Name = "test-model", + Version = 2, + Alias = "test-alias", + DisplayName = "Test Model", + ProviderType = "test", + Uri = "test://model/2", + ModelType = "ONNX", + Runtime = new Runtime { DeviceType = DeviceType.CPU, ExecutionProvider = "CPUExecutionProvider" }, + Cached = false + }, + new() + { + Id = "test-model:1", + Name = "test-model", + Version = 1, + Alias = "test-alias", + DisplayName = "Test Model", + ProviderType = "test", + Uri = "test://model/1", + ModelType = "ONNX", + Runtime = new Runtime { DeviceType = DeviceType.CPU, ExecutionProvider = "CPUExecutionProvider" }, + Cached = false + } + }; + + // Serialize the test data + var modelListJson = JsonSerializer.Serialize(testModelInfos, JsonSerializationContext.Default.ListModelInfo); + + // Create mock ICoreInterop + var mockCoreInterop = new Mock(); + + // Mock get_catalog_name + mockCoreInterop.Setup(x => x.ExecuteCommand("get_catalog_name", It.IsAny())) + .Returns(new ICoreInterop.Response { Data = "TestCatalog", Error = null }); + + // Mock get_model_list + mockCoreInterop.Setup(x => x.ExecuteCommandAsync("get_model_list", It.IsAny(), It.IsAny())) + .ReturnsAsync(new ICoreInterop.Response { Data = modelListJson, Error = null }); + + // Create mock IModelLoadManager + var mockLoadManager = new Mock(); + + // Create Catalog instance directly (internals are visible to test project) + var catalog = await Catalog.CreateAsync(mockLoadManager.Object, mockCoreInterop.Object, + NullLogger.Instance, null); + + // Get the model + var model = await catalog.GetModelAsync("test-alias"); + await Assert.That(model).IsNotNull(); + + // Verify we have 3 variants + await Assert.That(model!.Variants).HasCount().EqualTo(3); + + // Get the variants - they should be sorted by version (descending) + var variants = model.Variants.ToList(); + var latestVariant = variants[0]; // version 3 + var middleVariant = variants[1]; // version 2 + var oldestVariant = variants[2]; // version 1 + + await Assert.That(latestVariant.Id).IsEqualTo("test-model:3"); + await Assert.That(middleVariant.Id).IsEqualTo("test-model:2"); + await Assert.That(oldestVariant.Id).IsEqualTo("test-model:1"); + + // Test GetLatestVersionAsync with all 3 variants - should always return the first (version 3) + var result1 = await catalog.GetLatestVersionAsync(latestVariant); + await Assert.That(result1.Id).IsEqualTo("test-model:3"); + + var result2 = await catalog.GetLatestVersionAsync(middleVariant); + await Assert.That(result2.Id).IsEqualTo("test-model:3"); + + var result3 = await catalog.GetLatestVersionAsync(oldestVariant); + await Assert.That(result3.Id).IsEqualTo("test-model:3"); + + // Test with Model input - when latest is selected, should get Model not ModelVariant back + model.SelectVariant(latestVariant); + var result4 = await catalog.GetLatestVersionAsync(model); + await Assert.That(result4).IsEqualTo(model); + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs b/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs index b7a91190..2624f98a 100644 --- a/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs @@ -15,7 +15,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; internal sealed class ChatCompletionsTests { - private static Model? model; + private static IModel? model; [Before(Class)] public static async Task Setup() @@ -24,11 +24,10 @@ public static async Task Setup() var catalog = await manager.GetCatalogAsync(); // Load the specific cached model variant directly - var modelVariant = await catalog.GetModelVariantAsync("qwen2.5-0.5b-instruct-generic-cpu:4").ConfigureAwait(false); - await Assert.That(modelVariant).IsNotNull(); + var model = await catalog.GetModelVariantAsync("qwen2.5-0.5b-instruct-generic-cpu:4").ConfigureAwait(false); + await Assert.That(model).IsNotNull(); - var model = new Model(modelVariant!, manager.Logger); - await model.LoadAsync().ConfigureAwait(false); + await model!.LoadAsync().ConfigureAwait(false); await Assert.That(await model.IsLoadedAsync()).IsTrue(); ChatCompletionsTests.model = model; diff --git a/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs b/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs index 80ab4c0a..56c70769 100644 --- a/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs +++ b/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs @@ -29,8 +29,9 @@ public async Task EndToEndTest_Succeeds() await Assert.That(modelVariant).IsNotNull(); await Assert.That(modelVariant!.Alias).IsEqualTo("qwen2.5-0.5b"); - // Create model from the specific variant - var model = new Model(modelVariant, manager.Logger); + // Get Model for variant and select the variant so `model` and `modelVariant` should be equivalent + var model = await catalog.GetModelAsync(modelVariant.Alias); + model!.SelectVariant(modelVariant); // uncomment this to remove the model first to test the download progress // only do this when manually testing as other tests expect the model to be cached diff --git a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs index 5227e062..cd7e7793 100644 --- a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs +++ b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs @@ -26,7 +26,7 @@ public async Task Manager_GetCatalog_Succeeds() foreach (var model in models) { Console.WriteLine($"Model Alias: {model.Alias}, Variants: {model.Variants.Count}"); - Console.WriteLine($"Selected Variant Id: {model.SelectedVariant?.Id ?? "none"}"); + Console.WriteLine($"Selected Variant Id: {model.Id ?? "none"}"); // variants should be in sorted order diff --git a/sdk/cs/test/FoundryLocal.Tests/LOCAL_MODEL_TESTING.md b/sdk/cs/test/FoundryLocal.Tests/LOCAL_MODEL_TESTING.md index 1145cd9d..1b4a71e7 100644 --- a/sdk/cs/test/FoundryLocal.Tests/LOCAL_MODEL_TESTING.md +++ b/sdk/cs/test/FoundryLocal.Tests/LOCAL_MODEL_TESTING.md @@ -6,10 +6,14 @@ The test model cache directory name is configured in `sdk/cs/test/FoundryLocal.T ```json { - "TestModelCacheDirName": "/path/to/model/cache" + "TestModelCacheDirName": "test-data-shared" } ``` +If the value is a directory name it will be resolved as /../{TestModelCacheDirName}. +Otherwise the value will be resolved using Path.GetFullPath, which allows for absolute paths or +relative paths based on the current working directory. + ## Run the tests The tests will automatically find the models in the configured test model cache directory. @@ -17,21 +21,4 @@ The tests will automatically find the models in the configured test model cache ```bash cd /path/to/parent-dir/foundry-local-sdk/sdk/cs/test/FoundryLocal.Tests dotnet test Microsoft.AI.Foundry.Local.Tests.csproj --configuration Release# Running Local Model Tests - -## Configuration - -The test model cache directory name is configured in `sdk/cs/test/FoundryLocal.Tests/appsettings.Test.json`: - -```json -{ - "TestModelCacheDirName": "/path/to/model/cache" -} ``` - -## Run the tests - -The tests will automatically find the models in the configured test model cache directory. - -```bash -cd /path/to/parent-dir/foundry-local-sdk/sdk/cs/test/FoundryLocal.Tests -dotnet test Microsoft.AI.Foundry.Local.Tests.csproj --configuration Release \ No newline at end of file diff --git a/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs b/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs deleted file mode 100644 index 1f49560d..00000000 --- a/sdk/cs/test/FoundryLocal.Tests/ModelTests.cs +++ /dev/null @@ -1,54 +0,0 @@ -// -------------------------------------------------------------------------------------------------------------------- -// -// Copyright (c) Microsoft. All rights reserved. -// -// -------------------------------------------------------------------------------------------------------------------- - -namespace Microsoft.AI.Foundry.Local.Tests; -using System.Collections.Generic; -using System.Threading.Tasks; - -using Microsoft.Extensions.Logging.Abstractions; - -using Moq; - -internal sealed class ModelTests -{ - [Test] - public async Task GetLastestVersion_Works() - { - var loadManager = new Mock(); - var coreInterop = new Mock(); - var logger = NullLogger.Instance; - - var createModelInfo = (string name, int version) => new ModelInfo - { - Id = $"{name}:{version}", - Alias = "model", - Name = name, - Version = version, - Uri = "local://model", - ProviderType = "local", - ModelType = "test" - }; - - var variants = new List - { - new(createModelInfo("model_a", 4), loadManager.Object, coreInterop.Object, logger), - new(createModelInfo("model_b", 3), loadManager.Object, coreInterop.Object, logger), - new(createModelInfo("model_b", 2), loadManager.Object, coreInterop.Object, logger), - }; - - var model = new Model(variants[0], NullLogger.Instance); - foreach (var variant in variants.Skip(1)) - { - model.AddVariant(variant); - } - - var latestA = model.GetLatestVersion(variants[0]); - await Assert.That(latestA).IsEqualTo(variants[0]); - - var latestB = model.GetLatestVersion(variants[2]); - await Assert.That(latestB).IsEqualTo(variants[1]); - } -} \ No newline at end of file diff --git a/sdk/cs/test/FoundryLocal.Tests/TestAssemblySetupCleanup.cs b/sdk/cs/test/FoundryLocal.Tests/TestAssemblySetupCleanup.cs index ac536d12..2136a8eb 100644 --- a/sdk/cs/test/FoundryLocal.Tests/TestAssemblySetupCleanup.cs +++ b/sdk/cs/test/FoundryLocal.Tests/TestAssemblySetupCleanup.cs @@ -15,16 +15,20 @@ public static async Task Cleanup(AssemblyHookContext _) { try { - // ensure any loaded models are unloaded - var manager = FoundryLocalManager.Instance; // initialized by Utils - var catalog = await manager.GetCatalogAsync(); - var models = await catalog.GetLoadedModelsAsync().ConfigureAwait(false); - - foreach (var model in models) + // if running individual test/s they may not have used the Utils class which creates FoundryLocalManager + if (FoundryLocalManager.IsInitialized) { - await Assert.That(await model.IsLoadedAsync()).IsTrue(); - await model.UnloadAsync().ConfigureAwait(false); - await Assert.That(await model.IsLoadedAsync()).IsFalse(); + // ensure any loaded models are unloaded + var manager = FoundryLocalManager.Instance; // initialized by Utils + var catalog = await manager.GetCatalogAsync(); + var models = await catalog.GetLoadedModelsAsync().ConfigureAwait(false); + + foreach (var model in models) + { + await Assert.That(await model.IsLoadedAsync()).IsTrue(); + await model.UnloadAsync().ConfigureAwait(false); + await Assert.That(await model.IsLoadedAsync()).IsFalse(); + } } } catch (Exception ex) diff --git a/sdk/cs/test/FoundryLocal.Tests/Utils.cs b/sdk/cs/test/FoundryLocal.Tests/Utils.cs index 6313b0d5..9611d0d4 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Utils.cs +++ b/sdk/cs/test/FoundryLocal.Tests/Utils.cs @@ -55,7 +55,7 @@ public static void AssemblyInit(AssemblyHookContext _) .AddJsonFile("appsettings.Test.json", optional: true, reloadOnChange: false) .Build(); - var testModelCacheDirName = "test-data-shared"; + var testModelCacheDirName = configuration["TestModelCacheDirName"] ?? "test-data-shared"; string testDataSharedPath; if (Path.IsPathRooted(testModelCacheDirName) || testModelCacheDirName.Contains(Path.DirectorySeparatorChar) || @@ -74,6 +74,8 @@ public static void AssemblyInit(AssemblyHookContext _) if (!Directory.Exists(testDataSharedPath)) { + // need to ensure there's a user visible error when running in VS. + logger.LogCritical($"Test model cache directory does not exist: {testDataSharedPath}"); throw new DirectoryNotFoundException($"Test model cache directory does not exist: {testDataSharedPath}"); } diff --git a/sdk/cs/test/FoundryLocal.Tests/appsettings.Test.json b/sdk/cs/test/FoundryLocal.Tests/appsettings.Test.json index 87410c33..d42d8789 100644 --- a/sdk/cs/test/FoundryLocal.Tests/appsettings.Test.json +++ b/sdk/cs/test/FoundryLocal.Tests/appsettings.Test.json @@ -1,3 +1,3 @@ { - "TestModelCacheDirName": "/path/to/test/model/cache" + "TestModelCacheDirName": "test-data-shared" } From 2a1bf56520f65b57d7f4e430164fe825d9fe6ba0 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Tue, 31 Mar 2026 10:34:59 -0700 Subject: [PATCH 12/83] implement ADO packaging pipeline for FLC & SDK (#552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Foundry Local Packaging Pipeline ### Summary This PR introduces the **Foundry Local Packaging Pipeline**, a unified ADO pipeline that builds, signs, and tests Foundry Local Core (FLC) for all platforms, packages it as NuGet and Python wheels, then builds, signs, and tests the C#, JS, Python, and Rust SDKs — for both standard and WinML variants. **Pipeline stages:** 1. **Build FLC** — Native AOT binaries for win-x64, win-arm64, linux-x64, osx-arm64 2. **Package FLC** — Multi-platform NuGet package + Python wheels from the built binaries 3. **Build SDKs** — C#, JS, Python, Rust using the packaged FLC 4. **Test SDKs** — Validate each SDK against the pipeline-built FLC **Produced artifacts:** `flc-nuget`, `flc-nuget-winml`, `flc-wheels`, `flc-wheels-winml`, `cs-sdk`, `cs-sdk-winml`, `js-sdk`, `js-sdk-winml`, `python-sdk`, `python-sdk-winml`, `rust-sdk`, `rust-sdk-winml` **SDK Changes:** 1. Adds ability for python sdk to skip installing native depenencies and use pre-installed binaries like `foundry-local-core`, `onnxruntime`, `onnxruntime-genai` 2. Adjusts APIs to leverage new download_and_register_eps native interop call for manually downloading and registering EPs 3. Adds temporary nuget.config to github actions c# pipeline to allow ORT-Nightly to auto-fetch missing dependencies from upstream nuget.org ### Test coverage All SDK tests currently run on **win-x64 only**. Additional platform test jobs are blocked on infrastructure: - **Windows ARM64** — waiting on a 1ES-hosted win-arm64 pool - **macOS ARM64** — waiting on a 1ES-hosted macOS ARM64 pool - **Linux x64** — waiting on the Linux onnxruntime dependency to be stabilized TODOs are tracked in the pipeline YAML for each. ### Build strategy All FLC builds (including win-arm64 and osx-arm64) run on **x64 machines** because .NET Native AOT supports cross-compilation. The win-arm64 build cross-compiles from x64 Windows — see [Cross-compilation docs](https://learn.microsoft.com/en-us/dotnet/core/deploying/native-aot/cross-compile#windows). Linux builds run on its own respective x64 hosted image. ### Origin - **Foundry Local Core build steps** were lifted from `neutron-server/.pipelines/FoundryLocalCore/` - **SDK build/test steps** were lifted from `Foundry-Local/.github/` --------- Co-authored-by: Prathik Rao Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .github/workflows/build-cs-steps.yml | 35 +- .github/workflows/foundry-local-sdk-build.yml | 56 +- .pipelines/foundry-local-packaging.yml | 812 +++++++++++++++++- .pipelines/templates/build-core-steps.yml | 194 +++++ .pipelines/templates/build-cs-steps.yml | 191 ++++ .pipelines/templates/build-js-steps.yml | 156 ++++ .pipelines/templates/build-python-steps.yml | 146 ++++ .pipelines/templates/build-rust-steps.yml | 207 +++++ .pipelines/templates/package-core-steps.yml | 256 ++++++ .pipelines/templates/test-cs-steps.yml | 116 +++ .pipelines/templates/test-js-steps.yml | 121 +++ .pipelines/templates/test-python-steps.yml | 133 +++ .pipelines/templates/test-rust-steps.yml | 159 ++++ sdk/cs/README.md | 4 +- ...ft.ai.foundry.local.foundrylocalmanager.md | 8 +- sdk/cs/src/Detail/CoreInterop.cs | 9 + sdk/cs/src/FoundryLocalManager.cs | 16 +- sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 5 +- .../Microsoft.AI.Foundry.Local.Tests.csproj | 5 +- sdk/js/docs/classes/FoundryLocalManager.md | 23 + sdk/js/src/foundryLocalManager.ts | 18 + sdk/python/build_backend.py | 115 ++- sdk/python/src/detail/core_interop.py | 3 + sdk/python/src/foundry_local_manager.py | 10 +- sdk/rust/src/foundry_local_manager.rs | 14 + 25 files changed, 2693 insertions(+), 119 deletions(-) create mode 100644 .pipelines/templates/build-core-steps.yml create mode 100644 .pipelines/templates/build-cs-steps.yml create mode 100644 .pipelines/templates/build-js-steps.yml create mode 100644 .pipelines/templates/build-python-steps.yml create mode 100644 .pipelines/templates/build-rust-steps.yml create mode 100644 .pipelines/templates/package-core-steps.yml create mode 100644 .pipelines/templates/test-cs-steps.yml create mode 100644 .pipelines/templates/test-js-steps.yml create mode 100644 .pipelines/templates/test-python-steps.yml create mode 100644 .pipelines/templates/test-rust-steps.yml diff --git a/.github/workflows/build-cs-steps.yml b/.github/workflows/build-cs-steps.yml index dcfed979..cf680d49 100644 --- a/.github/workflows/build-cs-steps.yml +++ b/.github/workflows/build-cs-steps.yml @@ -41,19 +41,41 @@ jobs: env: NUGET_AUTH_TOKEN: ${{ secrets.AZURE_DEVOPS_PAT }} + - name: Generate temporary NuGet.config + run: | + # The repo-level NuGet.config cleared all sources and only included ORT-Nightly. + # We generate a temporary one with both nuget.org and ORT-Nightly. + # We provide credentials to allow the ORT-Nightly feed to pull from its upstreams. + $xml = @" + + + + + + + + + + + + + + + "@ + Set-Content -Path sdk/cs/NuGet.temp.config -Value $xml + shell: pwsh + # TODO: once the nightly packaging is fixed, add back the commented out lines with /p:FoundryLocalCoreVersion="*-*" # /p:FoundryLocalCoreVersion="*-*" to always use nightly version of Foundry Local Core - - name: Authenticate to Azure Artifacts NuGet feed - run: dotnet nuget update source ORT-Nightly --username az --password ${{ secrets.AZURE_DEVOPS_PAT }} --store-password-in-clear-text --configfile sdk/cs/NuGet.config - - name: Restore dependencies run: | - # dotnet restore sdk/cs/src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=${{ inputs.useWinML }} /p:FoundryLocalCoreVersion="*-*" --configfile sdk/cs/NuGet.config - dotnet restore sdk/cs/src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=${{ inputs.useWinML }} --configfile sdk/cs/NuGet.config + # Clear the local NuGet cache to avoid bad metadata or corrupted package states. + dotnet nuget locals all --clear + # Restore using the temporary config file with credentials. + dotnet restore sdk/cs/src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=${{ inputs.useWinML }} --configfile sdk/cs/NuGet.temp.config - name: Build solution run: | - # dotnet build sdk/cs/src/Microsoft.AI.Foundry.Local.csproj --no-restore --configuration ${{ inputs.buildConfiguration }} /p:UseWinML=${{ inputs.useWinML }} /p:FoundryLocalCoreVersion="*-*" dotnet build sdk/cs/src/Microsoft.AI.Foundry.Local.csproj --no-restore --configuration ${{ inputs.buildConfiguration }} /p:UseWinML=${{ inputs.useWinML }} # need to use direct git commands to clone from Azure DevOps instead of actions/checkout @@ -89,6 +111,7 @@ jobs: - name: Run Foundry Local Core tests run: | # dotnet test sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj --verbosity normal /p:UseWinML=${{ inputs.useWinML }} /p:FoundryLocalCoreVersion="*-*" + # Use the temporary config file for test restore as well. dotnet test sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj --verbosity normal /p:UseWinML=${{ inputs.useWinML }} - name: Pack NuGet package diff --git a/.github/workflows/foundry-local-sdk-build.yml b/.github/workflows/foundry-local-sdk-build.yml index 13eddf6d..07ae4d68 100644 --- a/.github/workflows/foundry-local-sdk-build.yml +++ b/.github/workflows/foundry-local-sdk-build.yml @@ -17,60 +17,8 @@ permissions: contents: read jobs: - build-cs-windows: - uses: ./.github/workflows/build-cs-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'windows' - secrets: inherit - build-js-windows: - uses: ./.github/workflows/build-js-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'windows' - secrets: inherit - build-python-windows: - uses: ./.github/workflows/build-python-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'windows' - secrets: inherit - build-rust-windows: - uses: ./.github/workflows/build-rust-steps.yml - with: - platform: 'windows' - run-integration-tests: true - secrets: inherit - - build-cs-windows-WinML: - uses: ./.github/workflows/build-cs-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'windows' - useWinML: true - secrets: inherit - build-js-windows-WinML: - uses: ./.github/workflows/build-js-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'windows' - useWinML: true - secrets: inherit - build-python-windows-WinML: - uses: ./.github/workflows/build-python-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'windows' - useWinML: true - secrets: inherit - build-rust-windows-WinML: - uses: ./.github/workflows/build-rust-steps.yml - with: - platform: 'windows' - useWinML: true - run-integration-tests: true - secrets: inherit - + # Windows build/test moved to .pipelines/foundry-local-packaging.yml and runs in ADO + # MacOS ARM64 not supported in ADO, need to use GitHub Actions build-cs-macos: uses: ./.github/workflows/build-cs-steps.yml with: diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index b87eb70e..2cb9ee2a 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -1,9 +1,807 @@ -# Foundry Local SDK Packaging Pipeline (placeholder) -trigger: none +# Foundry Local Packaging Pipeline +# +# Builds Foundry Local Core from neutron-server (windows.ai.toolkit project), +# then packages the C# and JS SDKs from this repo using the built Core. +# +# Produces artifacts: flc-nuget, flc-nuget-winml, flc-wheels, flc-wheels-winml, +# cs-sdk, cs-sdk-winml, js-sdk, js-sdk-winml, python-sdk, python-sdk-winml, +# rust-sdk, rust-sdk-winml -pool: - vmImage: 'windows-latest' +pr: +- main +- releases/* + +name: $(Date:yyyyMMdd).$(Rev:r) + +parameters: +- name: version + displayName: 'Package version' + type: string + default: '0.9.0' +- name: prereleaseId + displayName: 'Pre-release identifier (e.g. rc1, beta).' + type: string + default: 'none' +- name: isRelease + displayName: 'Release build' + type: boolean + default: false +- name: neutronServerBranch + displayName: 'Foundry Local Core branch (windows.ai.toolkit/neutron-server)' + type: string + default: 'dev/FoundryLocalCore/main' + +variables: +- group: FoundryLocal-ESRP-Signing + +resources: + repositories: + - repository: neutron-server + type: git + name: windows.ai.toolkit/neutron-server + endpoint: AIFoundryLocal-WindowsAIToolkit-SC + ref: refs/heads/${{ parameters.neutronServerBranch }} + - repository: test-data-shared + type: git + name: windows.ai.toolkit/test-data-shared + endpoint: AIFoundryLocal-WindowsAIToolkit-SC + lfs: true + ref: refs/heads/main + - repository: 1ESPipelineTemplates + type: git + name: 1ESPipelineTemplates/1ESPipelineTemplates + ref: refs/tags/release + +extends: + template: v1/1ES.Official.PipelineTemplate.yml@1ESPipelineTemplates + parameters: + settings: + networkIsolationPolicy: Permissive + pool: + # default all windows jobs, individual jobs override + name: onnxruntime-Win-CPU-2022 + os: windows + sdl: + binskim: + break: false + scanOutputDirectoryOnly: true + sourceRepositoriesToScan: + include: + - repository: neutron-server + - repository: test-data-shared + stages: + # ── Build & Test FLC ── + - stage: build_core + displayName: 'Build & Test FLC' + jobs: + - job: flc_win_x64 + displayName: 'FLC win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-win-x64' + targetPath: '$(Build.ArtifactStagingDirectory)/native' + steps: + - checkout: neutron-server + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-core-steps.yml@self + parameters: + flavor: win-x64 + platform: x64 + + - job: flc_win_arm64 + displayName: 'FLC win-arm64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-win-arm64' + targetPath: '$(Build.ArtifactStagingDirectory)/native' + steps: + - checkout: neutron-server + clean: true + - template: .pipelines/templates/build-core-steps.yml@self + parameters: + flavor: win-arm64 + platform: arm64 + + - job: flc_linux_x64 + displayName: 'FLC linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-linux-x64' + targetPath: '$(Build.ArtifactStagingDirectory)/native' + steps: + - checkout: neutron-server + clean: true + - template: .pipelines/templates/build-core-steps.yml@self + parameters: + flavor: linux-x64 + platform: x64 + + - job: flc_osx_arm64 + displayName: 'FLC osx-arm64' + pool: + name: Azure Pipelines + vmImage: 'macOS-14' + os: macOS + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-osx-arm64' + targetPath: '$(Build.ArtifactStagingDirectory)/native' + steps: + - checkout: neutron-server + clean: true + - template: .pipelines/templates/build-core-steps.yml@self + parameters: + flavor: osx-arm64 + platform: arm64 + + # ── Package FLC ── + - stage: package_core + displayName: 'Package FLC' + dependsOn: build_core + jobs: + - job: package_flc + displayName: 'Package FLC' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Build.ArtifactStagingDirectory)/flc-nuget' + - output: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Build.ArtifactStagingDirectory)/flc-wheels' + steps: + - checkout: neutron-server + clean: true + - task: DownloadPipelineArtifact@2 + inputs: + buildType: current + artifactName: 'flc-win-x64' + targetPath: '$(Pipeline.Workspace)/flc-win-x64' + - task: DownloadPipelineArtifact@2 + inputs: + buildType: current + artifactName: 'flc-win-arm64' + targetPath: '$(Pipeline.Workspace)/flc-win-arm64' + - task: DownloadPipelineArtifact@2 + inputs: + buildType: current + artifactName: 'flc-linux-x64' + targetPath: '$(Pipeline.Workspace)/flc-linux-x64' + - task: DownloadPipelineArtifact@2 + inputs: + buildType: current + artifactName: 'flc-osx-arm64' + targetPath: '$(Pipeline.Workspace)/flc-osx-arm64' + - task: PowerShell@2 + displayName: 'List downloaded platform artifacts' + inputs: + targetType: inline + script: | + foreach ($name in @('flc-win-x64','flc-win-arm64','flc-linux-x64','flc-osx-arm64')) { + $dir = "$(Pipeline.Workspace)/$name" + Write-Host "Contents of ${dir}:" + if (Test-Path $dir) { Get-ChildItem $dir -Recurse | ForEach-Object { Write-Host $_.FullName } } + else { Write-Host " (directory not found)" } + } + - template: .pipelines/templates/package-core-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: false + platforms: + - name: win-x64 + artifactName: flc-win-x64 + - name: win-arm64 + artifactName: flc-win-arm64 + - name: linux-x64 + artifactName: flc-linux-x64 + - name: osx-arm64 + artifactName: flc-osx-arm64 + + # ── Build C# SDK ── + - stage: build_cs + displayName: 'Build C# SDK' + dependsOn: package_core + jobs: + - job: cs_sdk + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + outputs: + - output: pipelineArtifact + artifactName: 'cs-sdk' + targetPath: '$(Build.ArtifactStagingDirectory)/cs-sdk' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-cs-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + + # ── Build JS SDK ── + - stage: build_js + displayName: 'Build JS SDK' + dependsOn: package_core + jobs: + - job: js_sdk + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + outputs: + - output: pipelineArtifact + artifactName: 'js-sdk' + targetPath: '$(Build.ArtifactStagingDirectory)/js-sdk' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-js-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + + # ── Build Python SDK ── + - stage: build_python + displayName: 'Build Python SDK' + dependsOn: package_core + jobs: + - job: python_sdk + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Pipeline.Workspace)/flc-wheels' + outputs: + - output: pipelineArtifact + artifactName: 'python-sdk' + targetPath: '$(Build.ArtifactStagingDirectory)/python-sdk' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-python-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: false + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + + # ── Build Rust SDK ── + - stage: build_rust + displayName: 'Build Rust SDK' + dependsOn: package_core + jobs: + - job: rust_sdk + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + outputs: + - output: pipelineArtifact + artifactName: 'rust-sdk' + targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-rust-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + + # ── Test C# SDK (win-x64) ── + - stage: test_cs + displayName: 'Test C# SDK' + dependsOn: build_cs + jobs: + - job: test_cs_win_x64 + displayName: 'Test C# (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-cs-steps.yml@self + parameters: + version: ${{ parameters.version }} + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + + # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. + # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Test JS SDK (win-x64) ── + - stage: test_js + displayName: 'Test JS SDK' + dependsOn: build_js + jobs: + - job: test_js_win_x64 + displayName: 'Test JS (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-js-steps.yml@self + parameters: + version: ${{ parameters.version }} + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + + # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. + # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Test Python SDK (win-x64) ── + - stage: test_python + displayName: 'Test Python SDK' + dependsOn: build_python + jobs: + - job: test_python_win_x64 + displayName: 'Test Python (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Pipeline.Workspace)/flc-wheels' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + version: ${{ parameters.version }} + isWinML: false + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + + # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. + # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Test Rust SDK (win-x64) ── + - stage: test_rust + displayName: 'Test Rust SDK' + dependsOn: build_rust + jobs: + - job: test_rust_win_x64 + displayName: 'Test Rust (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-rust-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + + # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. + # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Build & Test FLC (WinML) ── + - stage: build_core_winml + displayName: 'Build & Test FLC WinML' + dependsOn: [] + jobs: + - job: flc_winml_win_x64 + displayName: 'FLC win-x64 (WinML)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-winml-win-x64' + targetPath: '$(Build.ArtifactStagingDirectory)/native' + steps: + - checkout: neutron-server + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-core-steps.yml@self + parameters: + flavor: win-x64 + platform: x64 + isWinML: true + + - job: flc_winml_win_arm64 + displayName: 'FLC win-arm64 (WinML)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-winml-win-arm64' + targetPath: '$(Build.ArtifactStagingDirectory)/native' + steps: + - checkout: neutron-server + clean: true + - template: .pipelines/templates/build-core-steps.yml@self + parameters: + flavor: win-arm64 + platform: arm64 + isWinML: true + + # ── Package FLC (WinML) ── + - stage: package_core_winml + displayName: 'Package FLC WinML' + dependsOn: build_core_winml + jobs: + - job: package_flc_winml + displayName: 'Package FLC (WinML)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/flc-nuget' + - output: pipelineArtifact + artifactName: 'flc-wheels-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/flc-wheels' + steps: + - checkout: neutron-server + clean: true + - task: DownloadPipelineArtifact@2 + inputs: + buildType: current + artifactName: 'flc-winml-win-x64' + targetPath: '$(Pipeline.Workspace)/flc-winml-win-x64' + - task: DownloadPipelineArtifact@2 + inputs: + buildType: current + artifactName: 'flc-winml-win-arm64' + targetPath: '$(Pipeline.Workspace)/flc-winml-win-arm64' + - task: PowerShell@2 + displayName: 'List downloaded WinML platform artifacts' + inputs: + targetType: inline + script: | + foreach ($name in @('flc-winml-win-x64','flc-winml-win-arm64')) { + $dir = "$(Pipeline.Workspace)/$name" + Write-Host "Contents of ${dir}:" + if (Test-Path $dir) { Get-ChildItem $dir -Recurse | ForEach-Object { Write-Host $_.FullName } } + else { Write-Host " (directory not found)" } + } + - template: .pipelines/templates/package-core-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: true + platforms: + - name: win-x64 + artifactName: flc-winml-win-x64 + - name: win-arm64 + artifactName: flc-winml-win-arm64 + + # ── Build C# SDK (WinML) ── + - stage: build_cs_winml + displayName: 'Build C# SDK WinML' + dependsOn: package_core_winml + jobs: + - job: cs_sdk_winml + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + outputs: + - output: pipelineArtifact + artifactName: 'cs-sdk-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/cs-sdk-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-cs-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + outputDir: '$(Build.ArtifactStagingDirectory)/cs-sdk-winml' + + # ── Build JS SDK (WinML) ── + - stage: build_js_winml + displayName: 'Build JS SDK WinML' + dependsOn: package_core_winml + jobs: + - job: js_sdk_winml + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + outputs: + - output: pipelineArtifact + artifactName: 'js-sdk-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/js-sdk' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-js-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + + # ── Build Python SDK (WinML) ── + - stage: build_python_winml + displayName: 'Build Python SDK WinML' + dependsOn: package_core_winml + jobs: + - job: python_sdk_winml + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels-winml' + targetPath: '$(Pipeline.Workspace)/flc-wheels-winml' + outputs: + - output: pipelineArtifact + artifactName: 'python-sdk-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/python-sdk-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-python-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: true + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels-winml' + outputDir: '$(Build.ArtifactStagingDirectory)/python-sdk-winml' + + # ── Build Rust SDK (WinML) ── + - stage: build_rust_winml + displayName: 'Build Rust SDK WinML' + dependsOn: package_core_winml + jobs: + - job: rust_sdk_winml + displayName: 'Build' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + outputs: + - output: pipelineArtifact + artifactName: 'rust-sdk-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/build-rust-steps.yml@self + parameters: + version: ${{ parameters.version }} + isRelease: ${{ parameters.isRelease }} + prereleaseId: ${{ parameters.prereleaseId }} + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' + + # ── Test C# SDK WinML (win-x64) ── + - stage: test_cs_winml + displayName: 'Test C# SDK WinML' + dependsOn: build_cs_winml + jobs: + - job: test_cs_winml_win_x64 + displayName: 'Test C# WinML (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-cs-steps.yml@self + parameters: + version: ${{ parameters.version }} + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Test JS SDK WinML (win-x64) ── + - stage: test_js_winml + displayName: 'Test JS SDK WinML' + dependsOn: build_js_winml + jobs: + - job: test_js_winml_win_x64 + displayName: 'Test JS WinML (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-js-steps.yml@self + parameters: + version: ${{ parameters.version }} + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Test Python SDK WinML (win-x64) ── + - stage: test_python_winml + displayName: 'Test Python SDK WinML' + dependsOn: build_python_winml + jobs: + - job: test_python_winml_win_x64 + displayName: 'Test Python WinML (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels-winml' + targetPath: '$(Pipeline.Workspace)/flc-wheels-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + version: ${{ parameters.version }} + isWinML: true + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels-winml' + + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. + + # ── Test Rust SDK WinML (win-x64) ── + - stage: test_rust_winml + displayName: 'Test Rust SDK WinML' + dependsOn: build_rust_winml + jobs: + - job: test_rust_winml_win_x64 + displayName: 'Test Rust WinML (win-x64)' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-rust-steps.yml@self + parameters: + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + + # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. -steps: -- script: echo "Foundry Local packaging pipeline - placeholder" - displayName: 'Placeholder' \ No newline at end of file diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml new file mode 100644 index 00000000..9f024c42 --- /dev/null +++ b/.pipelines/templates/build-core-steps.yml @@ -0,0 +1,194 @@ +# Steps to build a single Foundry Local Core native AOT binary. +# Parameterized by flavor (RID) and platform (arch). +# The parent job must checkout 'neutron-server'. +parameters: +- name: flavor + type: string # e.g. win-x64, linux-x64, osx-arm64 +- name: platform + type: string # e.g. x64, arm64 +- name: isWinML + type: boolean + default: false + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + # Multi-checkout places repos in subdirectories; single checkout places contents at root + $multiCheckout = "$(Build.SourcesDirectory)/neutron-server" + if (Test-Path $multiCheckout) { + $nsRoot = $multiCheckout + } else { + $nsRoot = "$(Build.SourcesDirectory)" + } + Write-Host "##vso[task.setvariable variable=nsRoot]$nsRoot" + Write-Host "neutron-server root: $nsRoot" + +- task: UseDotNet@2 + displayName: 'Use .NET SDK from global.json' + inputs: + packageType: sdk + useGlobalJson: true + workingDirectory: '$(nsRoot)' + +- task: PowerShell@2 + displayName: 'Override nuget.config' + inputs: + targetType: inline + script: | + $nugetConfig = @" + + + + + + + + + + + + + + + + + + "@ + Set-Content -Path "$(nsRoot)/nuget.config" -Value $nugetConfig + Write-Host "Updated nuget.config to use nuget.org, ORT-Nightly, and Neutron with mappings" + +- ${{ if eq(parameters.isWinML, true) }}: + - task: DotNetCoreCLI@2 + displayName: 'Restore FLC Core ${{ parameters.flavor }} (WinML)' + inputs: + command: restore + projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' + restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + feedsToUse: config + nugetConfigPath: '$(nsRoot)/nuget.config' + + - task: DotNetCoreCLI@2 + displayName: 'Build FLC Core ${{ parameters.flavor }} (WinML)' + inputs: + command: build + projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' + arguments: '--no-restore -r ${{ parameters.flavor }} -f net9.0-windows10.0.26100.0 /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + + - task: DotNetCoreCLI@2 + displayName: 'Publish FLC AOT ${{ parameters.flavor }} (WinML)' + inputs: + command: publish + projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' + arguments: '--no-restore --no-build -r ${{ parameters.flavor }} -f net9.0-windows10.0.26100.0 /p:Platform=${{ parameters.platform }} /p:Configuration=Release /p:PublishAot=true /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + publishWebProjects: false + zipAfterPublish: false + + - ${{ if eq(parameters.flavor, 'win-x64') }}: + - task: DotNetCoreCLI@2 + displayName: 'Restore FLC Tests ${{ parameters.flavor }} (WinML)' + inputs: + command: restore + projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' + restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + feedsToUse: config + nugetConfigPath: '$(nsRoot)/nuget.config' + + - task: DotNetCoreCLI@2 + displayName: 'Build FLC Tests ${{ parameters.flavor }} (WinML)' + inputs: + command: build + projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' + arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + + - task: DotNetCoreCLI@2 + displayName: 'Test FLC ${{ parameters.flavor }} (WinML)' + inputs: + command: test + projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' + arguments: '--no-build --configuration Release -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }}' + +- ${{ if eq(parameters.isWinML, false) }}: + - task: DotNetCoreCLI@2 + displayName: 'Restore FLC Core ${{ parameters.flavor }}' + inputs: + command: restore + projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' + restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:TargetFramework=net9.0' + feedsToUse: config + nugetConfigPath: '$(nsRoot)/nuget.config' + + - task: DotNetCoreCLI@2 + displayName: 'Build FLC Core ${{ parameters.flavor }}' + inputs: + command: build + projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' + arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release' + + - ${{ if eq(parameters.flavor, 'win-x64') }}: + - task: DotNetCoreCLI@2 + displayName: 'Restore FLC Tests ${{ parameters.flavor }}' + inputs: + command: restore + projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' + restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:TargetFramework=net9.0' + feedsToUse: config + nugetConfigPath: '$(nsRoot)/nuget.config' + + - task: DotNetCoreCLI@2 + displayName: 'Build FLC Tests ${{ parameters.flavor }}' + inputs: + command: build + projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' + arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release' + + - task: DotNetCoreCLI@2 + displayName: 'Test FLC ${{ parameters.flavor }}' + inputs: + command: test + projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' + arguments: '--no-build --configuration Release -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }}' + + - task: DotNetCoreCLI@2 + displayName: 'Publish FLC AOT ${{ parameters.flavor }}' + inputs: + command: publish + projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' + arguments: '--no-restore --no-build -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:Configuration=Release /p:PublishAot=true /p:TargetFramework=net9.0' + publishWebProjects: false + zipAfterPublish: false + +# Cleanup non-binary files +- task: PowerShell@2 + displayName: 'Cleanup publish artifacts' + inputs: + targetType: inline + script: | + Get-ChildItem "$(nsRoot)/artifacts/publish" -Recurse -Include "*.json", "*.xml" | + Remove-Item -Force + +# Stage the native binary for the artifact +- task: PowerShell@2 + displayName: 'Stage ${{ parameters.flavor }} binary' + inputs: + targetType: inline + script: | + $destDir = "$(Build.ArtifactStagingDirectory)/native" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + # WinML publishes additional files (e.g. WindowsAppRuntime Bootstrapper DLLs) + # beyond Microsoft.AI.Foundry.Local.Core.*. + $isWinML = "${{ parameters.isWinML }}" -eq "True" + if ($isWinML) { + Get-ChildItem "$(nsRoot)/artifacts/publish" -Recurse -File | + Where-Object { $_.Name -like "Microsoft.AI.Foundry.Local.Core.*" -or $_.Name -eq "Microsoft.WindowsAppRuntime.Bootstrap.dll" } | + Copy-Item -Destination $destDir -Force + } else { + Get-ChildItem "$(nsRoot)/artifacts/publish" -Recurse -File | + Where-Object { $_.Name -like "Microsoft.AI.Foundry.Local.Core.*" } | + Copy-Item -Destination $destDir -Force + } + Write-Host "Staged binaries:" + Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } + diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml new file mode 100644 index 00000000..978c2fff --- /dev/null +++ b/.pipelines/templates/build-cs-steps.yml @@ -0,0 +1,191 @@ +# Steps to build, sign, and pack the C# SDK NuGet package. +# When test-data-shared is checked out alongside self, ADO places repos under +# $(Build.SourcesDirectory)/. The self repo is 'Foundry-Local'. +parameters: +- name: version + type: string +- name: isRelease + type: boolean + default: false +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' +- name: outputDir + type: string + default: '$(Build.ArtifactStagingDirectory)/cs-sdk' + displayName: 'Path to directory for the packed SDK' +- name: prereleaseId + type: string + default: '' +steps: +# Set paths for multi-repo checkout +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- task: UseDotNet@2 + displayName: 'Use .NET 9 SDK' + inputs: + packageType: sdk + version: '9.0.x' + +# Compute package version +- task: PowerShell@2 + displayName: 'Set package version' + inputs: + targetType: inline + script: | + $v = "${{ parameters.version }}" + $preId = "${{ parameters.prereleaseId }}" + if ($preId -ne '' -and $preId -ne 'none') { + $v = "$v-$preId" + } elseif ("${{ parameters.isRelease }}" -ne "True") { + $ts = Get-Date -Format "yyyyMMddHHmm" + $v = "$v-dev.$ts" + } + Write-Host "##vso[task.setvariable variable=packageVersion]$v" + Write-Host "Package version: $v" + +# List downloaded artifact for debugging +- task: PowerShell@2 + displayName: 'List downloaded FLC artifact' + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcNugetDir }}:" + Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +# Create a temporary NuGet.config that includes the local FLC feed +- task: PowerShell@2 + displayName: 'Create NuGet.config with local FLC feed' + inputs: + targetType: inline + script: | + $nugetConfig = @" + + + + + + + + + + "@ + # Determine the FLC version from the .nupkg filename + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + $flcVer = $nupkg.BaseName -replace '^Microsoft\.AI\.Foundry\.Local\.Core(\.WinML)?\.', '' + Write-Host "##vso[task.setvariable variable=resolvedFlcVersion]$flcVer" + Write-Host "Resolved FLC version: $flcVer" + + # Point the local NuGet feed at the directory that actually contains the .nupkg + $flcFeedDir = $nupkg.DirectoryName + $nugetConfig = $nugetConfig -replace [regex]::Escape("${{ parameters.flcNugetDir }}"), $flcFeedDir + $configPath = "$(Build.ArtifactStagingDirectory)/NuGet.config" + Set-Content -Path $configPath -Value $nugetConfig + Write-Host "##vso[task.setvariable variable=customNugetConfig]$configPath" + Write-Host "Local FLC feed directory: $flcFeedDir" + +- task: NuGetAuthenticate@1 + displayName: 'Authenticate NuGet feeds' + +- task: PowerShell@2 + displayName: 'Restore SDK' + inputs: + targetType: inline + script: | + $proj = "$(repoRoot)/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj" + if (-not (Test-Path $proj)) { throw "Project not found: $proj" } + dotnet restore $proj ` + --configfile "$(customNugetConfig)" ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Build SDK' + inputs: + targetType: inline + script: | + dotnet build "$(repoRoot)/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj" ` + --no-restore --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +# Discover target framework directory +- task: PowerShell@2 + displayName: 'Find target framework' + inputs: + targetType: inline + script: | + $base = "$(repoRoot)/sdk/cs/src/bin/Release" + # The SDK targets net9.0 (standard) or net9.0-windows10.0.26100.0 (WinML). + # Find whichever TFM directory was produced by the build. + $tfmDir = Get-ChildItem $base -Directory | Select-Object -First 1 + if (-not $tfmDir) { throw "No target framework directory found under $base" } + Write-Host "##vso[task.setvariable variable=TargetFramework]$($tfmDir.Name)" + Write-Host "Target framework: $($tfmDir.Name)" + +# Sign DLLs +- task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@5 + displayName: 'Sign SDK DLLs' + inputs: + ConnectedServiceName: 'OnnxrunTimeCodeSign_20240611' + UseMSIAuthentication: true + AppRegistrationClientId: '$(esrpClientId)' + AppRegistrationTenantId: '$(esrpTenantId)' + EsrpClientId: '$(esrpClientId)' + AuthAKVName: '$(esrpAkvName)' + AuthSignCertName: '$(esrpSignCertName)' + FolderPath: '$(repoRoot)/sdk/cs/src/bin/Release/$(TargetFramework)' + Pattern: '*.dll' + SessionTimeout: 90 + ServiceEndpointUrl: 'https://api.esrp.microsoft.com/api/v2' + MaxConcurrency: 25 + signConfigType: inlineSignParams + inlineOperation: | + [{"keyCode":"CP-230012","operationSetCode":"SigntoolSign","parameters":[{"parameterName":"OpusName","parameterValue":"Microsoft"},{"parameterName":"OpusInfo","parameterValue":"http://www.microsoft.com"},{"parameterName":"PageHash","parameterValue":"/NPH"},{"parameterName":"FileDigest","parameterValue":"/fd sha256"},{"parameterName":"TimeStamp","parameterValue":"/tr \"http://rfc3161.gtm.corp.microsoft.com/TSS/HttpTspServer\" /td sha256"}],"toolName":"signtool.exe","toolVersion":"6.2.9304.0"}] + +# Pack NuGet +- task: PowerShell@2 + displayName: 'Pack NuGet' + inputs: + targetType: inline + script: | + dotnet pack "$(repoRoot)/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj" ` + --no-build --no-restore --configuration Release ` + --output "${{ parameters.outputDir }}" ` + /p:PackageVersion=$(packageVersion) ` + /p:UseWinML=${{ parameters.isWinML }} ` + /p:IncludeSymbols=true ` + /p:SymbolPackageFormat=snupkg + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +# Sign NuGet package +- task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@5 + displayName: 'Sign SDK NuGet package' + inputs: + ConnectedServiceName: 'OnnxrunTimeCodeSign_20240611' + UseMSIAuthentication: true + AppRegistrationClientId: '$(esrpClientId)' + AppRegistrationTenantId: '$(esrpTenantId)' + EsrpClientId: '$(esrpClientId)' + AuthAKVName: '$(esrpAkvName)' + AuthSignCertName: '$(esrpSignCertName)' + FolderPath: '${{ parameters.outputDir }}' + Pattern: '*.nupkg' + SessionTimeout: 90 + ServiceEndpointUrl: 'https://api.esrp.microsoft.com/api/v2' + MaxConcurrency: 25 + signConfigType: inlineSignParams + inlineOperation: | + [{"keyCode":"CP-401405","operationSetCode":"NuGetSign","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"},{"keyCode":"CP-401405","operationSetCode":"NuGetVerify","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"}] diff --git a/.pipelines/templates/build-js-steps.yml b/.pipelines/templates/build-js-steps.yml new file mode 100644 index 00000000..e288bbce --- /dev/null +++ b/.pipelines/templates/build-js-steps.yml @@ -0,0 +1,156 @@ +# Steps to build and pack the JS SDK. +# When test-data-shared is checked out alongside self, ADO places repos under +# $(Build.SourcesDirectory)/. The self repo is 'Foundry-Local'. +parameters: +- name: version + type: string +- name: isRelease + type: boolean + default: false +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + default: '' + displayName: 'Path to directory containing the FLC .nupkg (for tests)' +- name: prereleaseId + type: string + default: '' +steps: +# Set paths for multi-repo checkout +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + Write-Host "Repo root: $repoRoot" + Write-Host "Test data: $testDataDir" + +- task: PowerShell@2 + displayName: 'List downloaded FLC artifact' + condition: and(succeeded(), ne('${{ parameters.flcNugetDir }}', '')) + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcNugetDir }}:" + Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +- task: NodeTool@0 + displayName: 'Use Node.js 20' + inputs: + versionSpec: '20.x' + +# Compute version +- task: PowerShell@2 + displayName: 'Set package version' + inputs: + targetType: inline + script: | + $v = "${{ parameters.version }}" + $preId = "${{ parameters.prereleaseId }}" + if ($preId -ne '' -and $preId -ne 'none') { + $v = "$v-$preId" + } elseif ("${{ parameters.isRelease }}" -ne "True") { + $ts = Get-Date -Format "yyyyMMddHHmm" + $v = "$v-dev.$ts" + } + Write-Host "##vso[task.setvariable variable=packageVersion]$v" + +# Install dependencies including native binaries (FLC, ORT, GenAI) from NuGet feeds +- task: Npm@1 + displayName: 'npm install' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'install' + +# Overwrite the FLC native binary with the one we just built +- task: PowerShell@2 + displayName: 'Overwrite FLC with pipeline-built binary' + condition: and(succeeded(), ne('${{ parameters.flcNugetDir }}', '')) + inputs: + targetType: inline + script: | + $os = 'win32' + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + $platformKey = "$os-$arch" + $rid = if ($arch -eq 'arm64') { 'win-arm64' } else { 'win-x64' } + + # Detect macOS/Linux + if ($IsLinux) { + $os = 'linux' + $platformKey = "$os-$arch" + $rid = "linux-$arch" + } elseif ($IsMacOS) { + $os = 'darwin' + $platformKey = "$os-$arch" + $rid = "osx-$arch" + } + + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + + # Extract the NuGet package (it's a zip) + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + # Overwrite FLC binary in the npm-installed location + $destDir = "$(repoRoot)/sdk/js/packages/@foundry-local-core/$platformKey" + $nativeDir = "$extractDir/runtimes/$rid/native" + if (Test-Path $nativeDir) { + Get-ChildItem $nativeDir -File | ForEach-Object { + Copy-Item $_.FullName -Destination "$destDir/$($_.Name)" -Force + Write-Host "Overwrote $($_.Name) with pipeline-built version" + } + } else { + Write-Warning "No native binaries found at $nativeDir for RID $rid" + } + + Write-Host "Final binaries in $destDir`:" + Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } + +- task: Npm@1 + displayName: 'npm version' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'version $(packageVersion) --no-git-tag-version --allow-same-version' + +- task: Npm@1 + displayName: 'npm build' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'run build' + +- ${{ if eq(parameters.isWinML, true) }}: + - task: Npm@1 + displayName: 'npm run pack:winml' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'run pack:winml' + +- ${{ else }}: + - task: Npm@1 + displayName: 'npm run pack' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'run pack' + +- task: PowerShell@2 + displayName: 'Stage artifact' + inputs: + targetType: inline + script: | + $destDir = "$(Build.ArtifactStagingDirectory)/js-sdk" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(repoRoot)/sdk/js/*.tgz" "$destDir/" diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml new file mode 100644 index 00000000..6fd0cd34 --- /dev/null +++ b/.pipelines/templates/build-python-steps.yml @@ -0,0 +1,146 @@ +# Steps to build and pack the Python SDK wheel. +# When test-data-shared is checked out alongside self, ADO places repos under +# $(Build.SourcesDirectory)/. The self repo is 'Foundry-Local'. +parameters: +- name: version + type: string +- name: isRelease + type: boolean + default: false +- name: isWinML + type: boolean + default: false +- name: flcWheelsDir + type: string + default: '' + displayName: 'Path to directory containing the FLC wheels (for overriding foundry-local-core)' +- name: outputDir + type: string + default: '$(Build.ArtifactStagingDirectory)/python-sdk' + displayName: 'Path to directory for the built wheel' +- name: prereleaseId + type: string + default: '' +steps: +# Set paths for multi-repo checkout +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: '3.12' + +# List downloaded FLC wheels for debugging +- task: PowerShell@2 + displayName: 'List downloaded FLC wheels' + condition: and(succeeded(), ne('${{ parameters.flcWheelsDir }}', '')) + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcWheelsDir }}:" + Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +# Compute package version +- task: PowerShell@2 + displayName: 'Set package version' + inputs: + targetType: inline + script: | + $v = "${{ parameters.version }}" + $preId = "${{ parameters.prereleaseId }}" + if ($preId -ne '' -and $preId -ne 'none') { + $v = "$v-$preId" + } elseif ("${{ parameters.isRelease }}" -ne "True") { + $ts = Get-Date -Format "yyyyMMddHHmm" + $v = "$v-dev.$ts" + } + Write-Host "##vso[task.setvariable variable=packageVersion]$v" + Write-Host "Package version: $v" + +# Configure pip to use ORT-Nightly feed (plus PyPI as fallback) +- task: PowerShell@2 + displayName: 'Configure pip for Azure Artifacts' + inputs: + targetType: inline + script: | + pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.extra-index-url https://pypi.org/simple/ + pip config set global.pre true + +# Install the build tool +- script: python -m pip install build + displayName: 'Install build tool' + +# Write version file +- task: PowerShell@2 + displayName: 'Set SDK version' + inputs: + targetType: inline + script: | + Set-Content -Path "$(repoRoot)/sdk/python/src/version.py" -Value '__version__ = "$(packageVersion)"' + +# Install the FLC wheels from the pipeline if provided, so the build +# backend picks up the freshly-built foundry-local-core instead of +# pulling a stale one from the feed. +- task: PowerShell@2 + displayName: 'Pre-install pipeline-built FLC wheel' + condition: and(succeeded(), ne('${{ parameters.flcWheelsDir }}', '')) + inputs: + targetType: inline + script: | + # Determine platform wheel tag for the current machine + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'amd64' } + if ($IsLinux) { $platTag = "manylinux*x86_64" } + elseif ($IsMacOS) { $platTag = "macosx*$arch" } + else { $platTag = "win_$arch" } + + $filter = if ("${{ parameters.isWinML }}" -eq "True") { "foundry_local_core_winml*$platTag.whl" } else { "foundry_local_core-*$platTag.whl" } + $wheel = Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse -Filter $filter | Select-Object -First 1 + if ($wheel) { + Write-Host "Installing pipeline-built FLC wheel: $($wheel.FullName)" + pip install $($wheel.FullName) + } else { + Write-Warning "No FLC wheel found matching $filter in ${{ parameters.flcWheelsDir }}" + } + +# Build wheel — standard or WinML variant +# skip-native-deps=true omits foundry-local-core/onnxruntime pinned versions +# from the wheel metadata, since the pipeline pre-installs its own builds. +- ${{ if eq(parameters.isWinML, true) }}: + - script: python -m build --wheel -C winml=true -C skip-native-deps=true --outdir dist/ + displayName: 'Build wheel (WinML)' + workingDirectory: $(repoRoot)/sdk/python + +- ${{ else }}: + - script: python -m build --wheel -C skip-native-deps=true --outdir dist/ + displayName: 'Build wheel' + workingDirectory: $(repoRoot)/sdk/python + +# Install the built wheel +- task: PowerShell@2 + displayName: 'Install built wheel' + inputs: + targetType: inline + script: | + $wheel = (Get-ChildItem "$(repoRoot)/sdk/python/dist/*.whl" | Select-Object -First 1).FullName + pip install $wheel + +# Stage output +- task: PowerShell@2 + displayName: 'Stage wheel artifact' + inputs: + targetType: inline + script: | + $destDir = "${{ parameters.outputDir }}" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(repoRoot)/sdk/python/dist/*" "$destDir/" + Write-Host "Staged wheels:" + Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } diff --git a/.pipelines/templates/build-rust-steps.yml b/.pipelines/templates/build-rust-steps.yml new file mode 100644 index 00000000..efccfaa4 --- /dev/null +++ b/.pipelines/templates/build-rust-steps.yml @@ -0,0 +1,207 @@ +# Steps to build and package the Rust SDK crate. +# When test-data-shared is checked out alongside self, ADO places repos under +# $(Build.SourcesDirectory)/. The self repo is 'Foundry-Local'. +parameters: +- name: version + type: string +- name: isRelease + type: boolean + default: false +- name: prereleaseId + type: string + default: '' +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' +- name: outputDir + type: string + default: '$(Build.ArtifactStagingDirectory)/rust-sdk' + displayName: 'Path to directory for the packaged crate' +steps: +# Set paths for multi-repo checkout +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +# Compute package version and patch Cargo.toml +- task: PowerShell@2 + displayName: 'Set crate version' + inputs: + targetType: inline + script: | + $v = "${{ parameters.version }}" + $preId = "${{ parameters.prereleaseId }}" + if ($preId -ne '' -and $preId -ne 'none') { + $v = "$v-$preId" + } elseif ("${{ parameters.isRelease }}" -ne "True") { + $ts = Get-Date -Format "yyyyMMddHHmm" + $v = "$v-dev.$ts" + } + Write-Host "Crate version: $v" + + # Patch Cargo.toml version field + $cargoPath = "$(repoRoot)/sdk/rust/Cargo.toml" + $content = Get-Content $cargoPath -Raw + $content = $content -replace '(?m)^version\s*=\s*"[^"]+"', "version = `"$v`"" + Set-Content -Path $cargoPath -Value $content + Write-Host "Patched Cargo.toml with version $v" + +# List downloaded FLC artifact for debugging +- task: PowerShell@2 + displayName: 'List downloaded FLC artifact' + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcNugetDir }}:" + Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +# Extract FLC native binaries from the pipeline-built .nupkg so that +# build.rs finds them already present and skips downloading from the feed. +- task: PowerShell@2 + displayName: 'Extract FLC native binaries for Rust build' + inputs: + targetType: inline + script: | + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + Write-Host "Found NuGet package: $($nupkg.FullName)" + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract-rust" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + # Determine RID for this agent + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + if ($IsLinux) { + $rid = "linux-$arch" + } elseif ($IsMacOS) { + $rid = "osx-$arch" + } else { + $rid = "win-$arch" + } + + $nativeDir = "$extractDir/runtimes/$rid/native" + if (-not (Test-Path $nativeDir)) { throw "No native binaries found at $nativeDir for RID $rid" } + + # Stage them where build.rs can discover them + $flcNativeDir = "$(Build.ArtifactStagingDirectory)/flc-native-rust" + New-Item -ItemType Directory -Path $flcNativeDir -Force | Out-Null + Get-ChildItem $nativeDir -File | Copy-Item -Destination $flcNativeDir -Force + Write-Host "##vso[task.setvariable variable=flcNativeDir]$flcNativeDir" + Write-Host "Extracted FLC native binaries to $flcNativeDir`:" + Get-ChildItem $flcNativeDir | ForEach-Object { Write-Host " $($_.Name)" } + +# Install Rust toolchain +- task: PowerShell@2 + displayName: 'Install Rust toolchain' + inputs: + targetType: inline + script: | + if ($IsWindows -or (-not $IsLinux -and -not $IsMacOS)) { + Invoke-WebRequest -Uri https://win.rustup.rs/x86_64 -OutFile rustup-init.exe + .\rustup-init.exe -y --default-toolchain stable --profile minimal -c clippy,rustfmt + Remove-Item rustup-init.exe + $cargoPath = "$env:USERPROFILE\.cargo\bin" + } else { + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal -c clippy,rustfmt + $cargoPath = "$env:HOME/.cargo/bin" + } + Write-Host "##vso[task.prependpath]$cargoPath" + +# The .cargo/config.toml redirects crates-io to an Azure Artifacts feed +# for CFS compliance. Remove the redirect in CI so cargo can fetch from +# crates.io directly without Azure DevOps auth. +- task: PowerShell@2 + displayName: 'Use crates.io directly' + inputs: + targetType: inline + script: | + $configPath = "$(repoRoot)/sdk/rust/.cargo/config.toml" + if (Test-Path $configPath) { + Remove-Item $configPath + Write-Host "Removed .cargo/config.toml crates-io redirect" + } + +- task: PowerShell@2 + displayName: 'Check formatting' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + cargo fmt --all -- --check + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run clippy' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo clippy --all-targets $features -- -D warnings" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Build' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo build $features" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +# Overwrite the FLC core binary in cargo's OUT_DIR with the pipeline-built +# version so that integration tests use the freshly-built FLC. build.rs +# sets FOUNDRY_NATIVE_DIR to OUT_DIR, which the SDK checks at runtime. +- task: PowerShell@2 + displayName: 'Overwrite FLC binary with pipeline-built version' + inputs: + targetType: inline + script: | + # Find cargo's OUT_DIR for the foundry-local-sdk build script + $outDir = Get-ChildItem "$(repoRoot)/sdk/rust/target/debug/build" -Directory -Filter "foundry-local-sdk-*" -Recurse | + Where-Object { Test-Path "$($_.FullName)/out" } | + ForEach-Object { "$($_.FullName)/out" } | + Select-Object -First 1 + if (-not $outDir) { throw "Could not find cargo OUT_DIR for foundry-local-sdk" } + Write-Host "Cargo OUT_DIR: $outDir" + + # Copy pipeline-built FLC native binaries over the downloaded ones + Get-ChildItem "$(flcNativeDir)" -File -Filter "Microsoft.AI.Foundry.Local.Core.*" | ForEach-Object { + Copy-Item $_.FullName -Destination "$outDir/$($_.Name)" -Force + Write-Host "Overwrote $($_.Name) with pipeline-built version" + } + +# --allow-dirty allows packaging with uncommitted changes (build.rs modifies generated files) +- task: PowerShell@2 + displayName: 'Package crate' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo package $features --allow-dirty" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +# Stage output +- task: PowerShell@2 + displayName: 'Stage crate artifact' + inputs: + targetType: inline + script: | + $destDir = "${{ parameters.outputDir }}" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(repoRoot)/sdk/rust/target/package/*.crate" "$destDir/" + Write-Host "Staged crates:" + Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } diff --git a/.pipelines/templates/package-core-steps.yml b/.pipelines/templates/package-core-steps.yml new file mode 100644 index 00000000..e5755a21 --- /dev/null +++ b/.pipelines/templates/package-core-steps.yml @@ -0,0 +1,256 @@ +# Steps to collect per-platform FLC native binaries, organize into NuGet layout, +# pack + sign the NuGet package, and build Python wheels (wheel package name and +# platforms depend on the isWinML parameter). The parent job must download all +# platform artifacts and checkout neutron-server. +parameters: +- name: version + type: string +- name: isRelease + type: boolean + default: false +- name: isWinML + type: boolean + default: false +- name: prereleaseId + type: string + default: '' +- name: platforms + type: object # list of { name, artifactName } + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $nsRoot = "$(Build.SourcesDirectory)" + Write-Host "##vso[task.setvariable variable=nsRoot]$nsRoot" + +- task: PowerShell@2 + displayName: 'Organize native binaries' + inputs: + targetType: inline + script: | + $unifiedPath = "$(Build.ArtifactStagingDirectory)/unified" + New-Item -ItemType Directory -Path $unifiedPath -Force | Out-Null + + $platformsJson = @' + ${{ convertToJson(parameters.platforms) }} + '@ + $platforms = $platformsJson | ConvertFrom-Json + + foreach ($p in $platforms) { + $srcDir = "$(Pipeline.Workspace)/$($p.artifactName)" + Write-Host "Looking for artifacts at: $srcDir" + if (-not (Test-Path $srcDir)) { + throw "Artifact directory $srcDir does not exist. All platform artifacts must be present to produce a complete NuGet package." + } + $destDir = "$unifiedPath/runtimes/$($p.name)/native" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + # WinML artifacts include WindowsAppRuntime Bootstrapper DLLs in addition + # to Microsoft.AI.Foundry.Local.Core.*. + $isWinML = "${{ parameters.isWinML }}" -eq "True" + if ($isWinML) { + Get-ChildItem $srcDir -File | + Where-Object { $_.Name -like "Microsoft.AI.Foundry.Local.Core.*" -or $_.Name -eq "Microsoft.WindowsAppRuntime.Bootstrap.dll" } | + Copy-Item -Destination $destDir -Force + } else { + Get-ChildItem $srcDir -File | Where-Object { $_.Name -like "Microsoft.AI.Foundry.Local.Core.*" } | + Copy-Item -Destination $destDir -Force + } + Write-Host "Copied $($p.name) binaries to $destDir" + } + + # Copy build integration files from neutron-server + $nsRoot = "$(nsRoot)" + foreach ($dir in @("build", "buildTransitive")) { + $src = "$nsRoot/src/FoundryLocalCore/Core/$dir" + if (Test-Path $src) { + Copy-Item -Path $src -Destination "$unifiedPath/$dir" -Recurse -Force + } + } + $license = "$nsRoot/src/FoundryLocalCore/Core/LICENSE.txt" + if (Test-Path $license) { + Copy-Item $license "$unifiedPath/LICENSE.txt" -Force + } + +# Compute version +- task: PowerShell@2 + displayName: 'Set FLC package version' + inputs: + targetType: inline + script: | + $v = "${{ parameters.version }}" + $preId = "${{ parameters.prereleaseId }}" + if ($preId -ne '' -and $preId -ne 'none') { + $v = "$v-$preId" + } elseif ("${{ parameters.isRelease }}" -ne "True") { + $ts = Get-Date -Format "yyyyMMddHHmm" + $commitId = "$(Build.SourceVersion)".Substring(0, 8) + $v = "$v-dev-$ts-$commitId" + } + Write-Host "##vso[task.setvariable variable=flcVersion]$v" + Write-Host "FLC version: $v" + +# Pack NuGet +- task: PowerShell@2 + displayName: 'Pack FLC NuGet' + inputs: + targetType: inline + script: | + $nsRoot = "$(nsRoot)" + [xml]$propsXml = Get-Content "$nsRoot/Directory.Packages.props" + $pg = $propsXml.Project.PropertyGroup + + $outDir = "$(Build.ArtifactStagingDirectory)/flc-nuget" + New-Item -ItemType Directory -Path $outDir -Force | Out-Null + + if ("${{ parameters.isWinML }}" -eq "True") { + $nuspec = "$nsRoot/src/FoundryLocalCore/Core/WinMLNuget.nuspec" + $id = "Microsoft.AI.Foundry.Local.Core.WinML" + $ortVer = $pg.OnnxRuntimeFoundryVersionForWinML + $genaiVer = $pg.OnnxRuntimeGenAIWinML + $winAppSdkVer = $pg.WinAppSdkVersion + $props = "id=$id;version=$(flcVersion);commitId=$(Build.SourceVersion);OnnxRuntimeFoundryVersion=$ortVer;OnnxRuntimeGenAIWinML=$genaiVer;WinAppSdkVersion=$winAppSdkVer" + } else { + $nuspec = "$nsRoot/src/FoundryLocalCore/Core/NativeNuget.nuspec" + $id = "Microsoft.AI.Foundry.Local.Core" + $ortVer = $pg.OnnxRuntimeFoundryVersion + $genaiVer = $pg.OnnxRuntimeGenAIFoundryVersion + $props = "id=$id;version=$(flcVersion);commitId=$(Build.SourceVersion);OnnxRuntimeFoundryVersion=$ortVer;OnnxRuntimeGenAIFoundryVersion=$genaiVer" + } + + $nugetArgs = @( + 'pack', $nuspec, + '-OutputDirectory', $outDir, + '-BasePath', "$(Build.ArtifactStagingDirectory)/unified", + '-Properties', $props, + '-Symbols', '-SymbolPackageFormat', 'snupkg' + ) + Write-Host "Running: nuget $($nugetArgs -join ' ')" + & nuget $nugetArgs + if ($LASTEXITCODE -ne 0) { throw "NuGet pack failed" } + +# Sign NuGet package +- task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@5 + displayName: 'Sign FLC NuGet package' + inputs: + ConnectedServiceName: 'OnnxrunTimeCodeSign_20240611' + UseMSIAuthentication: true + AppRegistrationClientId: '$(esrpClientId)' + AppRegistrationTenantId: '$(esrpTenantId)' + EsrpClientId: '$(esrpClientId)' + AuthAKVName: '$(esrpAkvName)' + AuthSignCertName: '$(esrpSignCertName)' + FolderPath: '$(Build.ArtifactStagingDirectory)/flc-nuget' + Pattern: '*.nupkg' + SessionTimeout: 90 + ServiceEndpointUrl: 'https://api.esrp.microsoft.com/api/v2' + MaxConcurrency: 25 + signConfigType: inlineSignParams + inlineOperation: | + [{"keyCode":"CP-401405","operationSetCode":"NuGetSign","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"},{"keyCode":"CP-401405","operationSetCode":"NuGetVerify","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"}] + +# Build Python wheels from the NuGet package +- task: PowerShell@2 + displayName: 'Build foundry_local_core Python Wheels' + inputs: + targetType: inline + script: | + $stagingDir = "$(Build.ArtifactStagingDirectory)/flc-wheels" + New-Item -ItemType Directory -Path $stagingDir -Force | Out-Null + + $isWinML = "${{ parameters.isWinML }}" -eq "True" + + # Find and extract the NuGet package (.nupkg is a zip archive) + $nupkgFilter = if ($isWinML) { "Microsoft.AI.Foundry.Local.Core.WinML*.nupkg" } else { "Microsoft.AI.Foundry.Local.Core*.nupkg" } + $nupkg = Get-ChildItem "$(Build.ArtifactStagingDirectory)/flc-nuget" -Filter $nupkgFilter | Where-Object { $_.Name -notlike "*.snupkg" } | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found matching $nupkgFilter" } + Write-Host "Found NuGet package: $($nupkg.Name)" + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extracted" + $nupkgZip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item -Path $nupkg.FullName -Destination $nupkgZip -Force + Expand-Archive -Path $nupkgZip -DestinationPath $extractDir -Force + + # Convert NuGet version to PEP 440 + # NuGet: 0.9.0-dev-202603271723-bb400310 → PEP 440: 0.9.0.dev202603271723 + # The commit hash is dropped because .devN requires N to be a pure integer. + $nupkgVersion = $nupkg.BaseName -replace '^Microsoft\.AI\.Foundry\.Local\.Core(\.WinML)?\.', '' + $parts = $nupkgVersion -split '-' + $pyVersion = if ($parts.Count -ge 3 -and $parts[1] -eq 'dev') { "$($parts[0]).dev$($parts[2])" } + elseif ($parts.Count -eq 2) { "$($parts[0])$($parts[1])" } + else { $parts[0] } + Write-Host "Python package version: $pyVersion" + + $packageName = if ($isWinML) { "foundry_local_core_winml" } else { "foundry_local_core" } + + if ($isWinML) { + $platforms = @( + @{rid="win-x64"; pyKey="bin"; tag="win_amd64"}, + @{rid="win-arm64"; pyKey="bin"; tag="win_arm64"} + ) + } else { + $platforms = @( + @{rid="win-x64"; pyKey="bin"; tag="win_amd64"}, + @{rid="win-arm64"; pyKey="bin"; tag="win_arm64"}, + @{rid="linux-x64"; pyKey="bin"; tag="manylinux_2_28_x86_64"}, + @{rid="osx-arm64"; pyKey="bin"; tag="macosx_11_0_arm64"} + ) + } + + foreach ($p in $platforms) { + $nativeSrc = "$extractDir/runtimes/$($p.rid)/native" + if (-not (Test-Path $nativeSrc)) { + Write-Warning "No native binaries found for $($p.rid) — skipping." + continue + } + + $wheelRoot = "$(Build.ArtifactStagingDirectory)/wheels-build/flc_wheel_$($p.tag)" + $pkgDir = "$wheelRoot/$packageName" + New-Item -ItemType Directory -Path "$pkgDir/$($p.pyKey)" -Force | Out-Null + "" | Set-Content -Encoding ascii "$pkgDir/__init__.py" + Get-ChildItem $nativeSrc -File | Copy-Item -Destination "$pkgDir/$($p.pyKey)" + + $normalizedName = $packageName.Replace('_', '-') + $wheelTag = "py3-none-$($p.tag)" + $distInfoName = "$packageName-$pyVersion" + $wheelName = "$distInfoName-$wheelTag.whl" + $distInfoDir = "$wheelRoot/$distInfoName.dist-info" + New-Item -ItemType Directory -Path $distInfoDir -Force | Out-Null + + $utf8NoBom = [System.Text.UTF8Encoding]::new($false) + + [System.IO.File]::WriteAllText("$distInfoDir/WHEEL", + "Wheel-Version: 1.0`nGenerator: custom`nRoot-Is-Purelib: false`nTag: $wheelTag`n", $utf8NoBom) + + [System.IO.File]::WriteAllText("$distInfoDir/METADATA", + "Metadata-Version: 2.1`nName: $normalizedName`nVersion: $pyVersion`n", $utf8NoBom) + + $recordLines = Get-ChildItem $wheelRoot -Recurse -File | ForEach-Object { + $rel = $_.FullName.Substring($wheelRoot.Length + 1).Replace('\', '/') + $raw = (Get-FileHash $_.FullName -Algorithm SHA256).Hash + $bytes = [byte[]]::new($raw.Length / 2) + for ($i = 0; $i -lt $raw.Length; $i += 2) { $bytes[$i/2] = [Convert]::ToByte($raw.Substring($i, 2), 16) } + $b64 = [Convert]::ToBase64String($bytes) -replace '\+','-' -replace '/','_' -replace '=','' + "$rel,sha256=$b64,$($_.Length)" + } + $recordContent = ($recordLines + "$distInfoName.dist-info/RECORD,,") -join "`n" + [System.IO.File]::WriteAllText("$distInfoDir/RECORD", $recordContent, $utf8NoBom) + + $wheelPath = "$stagingDir/$wheelName" + Add-Type -AssemblyName System.IO.Compression.FileSystem + $zip = [System.IO.Compression.ZipFile]::Open($wheelPath, 'Create') + try { + Get-ChildItem $wheelRoot -Recurse -File | ForEach-Object { + $rel = $_.FullName.Substring($wheelRoot.Length + 1).Replace('\', '/') + [System.IO.Compression.ZipFileExtensions]::CreateEntryFromFile($zip, $_.FullName, $rel) | Out-Null + } + } finally { + $zip.Dispose() + } + Write-Host "Created wheel: $wheelName" + } + + Write-Host "`nAll wheels:" + Get-ChildItem $stagingDir -Filter "*.whl" | ForEach-Object { Write-Host " $($_.Name)" } diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml new file mode 100644 index 00000000..f7dc1aff --- /dev/null +++ b/.pipelines/templates/test-cs-steps.yml @@ -0,0 +1,116 @@ +# Lightweight test-only steps for the C# SDK. +# Builds from source and runs tests — no signing or NuGet packing. +parameters: +- name: version + type: string +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- task: UseDotNet@2 + displayName: 'Use .NET 9 SDK' + inputs: + packageType: sdk + version: '9.0.x' + +- task: PowerShell@2 + displayName: 'List downloaded FLC artifact' + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcNugetDir }}:" + Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + + Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + + Write-Host "Installing Windows App SDK Runtime..." + & $installerPath --quiet --force + + if ($LASTEXITCODE -ne 0) { + Write-Error "Installation failed with exit code $LASTEXITCODE" + exit 1 + } + + Write-Host "Windows App SDK Runtime installed successfully." + errorActionPreference: 'stop' + +- task: PowerShell@2 + displayName: 'Create NuGet.config with local FLC feed' + inputs: + targetType: inline + script: | + $nugetConfig = @" + + + + + + + + + "@ + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + $flcVer = $nupkg.BaseName -replace '^Microsoft\.AI\.Foundry\.Local\.Core(\.WinML)?\.', '' + Write-Host "##vso[task.setvariable variable=resolvedFlcVersion]$flcVer" + + $flcFeedDir = $nupkg.DirectoryName + $nugetConfig = $nugetConfig -replace [regex]::Escape("${{ parameters.flcNugetDir }}"), $flcFeedDir + $configPath = "$(Build.ArtifactStagingDirectory)/NuGet.config" + Set-Content -Path $configPath -Value $nugetConfig + Write-Host "##vso[task.setvariable variable=customNugetConfig]$configPath" + +- task: NuGetAuthenticate@1 + displayName: 'Authenticate NuGet feeds' + +- task: PowerShell@2 + displayName: 'Restore & build tests' + inputs: + targetType: inline + script: | + dotnet restore "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --configfile "$(customNugetConfig)" ` + /p:UseWinML=${{ parameters.isWinML }} ` + /p:FoundryLocalCoreVersion=$(resolvedFlcVersion) + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + + dotnet build "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --no-restore --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run SDK tests' + inputs: + targetType: inline + script: | + dotnet test "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --no-build --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml new file mode 100644 index 00000000..41ef7f62 --- /dev/null +++ b/.pipelines/templates/test-js-steps.yml @@ -0,0 +1,121 @@ +# Lightweight test-only steps for the JS SDK. +# Builds from source and runs tests — no npm pack or artifact staging. +parameters: +- name: version + type: string +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + + Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + + Write-Host "Installing Windows App SDK Runtime..." + & $installerPath --quiet --force + + if ($LASTEXITCODE -ne 0) { + Write-Error "Installation failed with exit code $LASTEXITCODE" + exit 1 + } + + Write-Host "Windows App SDK Runtime installed successfully." + errorActionPreference: 'stop' + +- task: PowerShell@2 + displayName: 'List downloaded FLC artifact' + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcNugetDir }}:" + Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +- task: NodeTool@0 + displayName: 'Use Node.js 20' + inputs: + versionSpec: '20.x' + +- task: Npm@1 + displayName: 'npm install' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'install' + +# Overwrite the FLC native binary with the pipeline-built one +- task: PowerShell@2 + displayName: 'Overwrite FLC with pipeline-built binary' + inputs: + targetType: inline + script: | + $os = 'win32' + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + $platformKey = "$os-$arch" + $rid = if ($arch -eq 'arm64') { 'win-arm64' } else { 'win-x64' } + + if ($IsLinux) { + $os = 'linux' + $platformKey = "$os-$arch" + $rid = "linux-$arch" + } elseif ($IsMacOS) { + $os = 'darwin' + $platformKey = "$os-$arch" + $rid = "osx-$arch" + } + + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + $destDir = "$(repoRoot)/sdk/js/packages/@foundry-local-core/$platformKey" + $nativeDir = "$extractDir/runtimes/$rid/native" + if (Test-Path $nativeDir) { + Get-ChildItem $nativeDir -File | ForEach-Object { + Copy-Item $_.FullName -Destination "$destDir/$($_.Name)" -Force + Write-Host "Overwrote $($_.Name) with pipeline-built version" + } + } else { + Write-Warning "No native binaries found at $nativeDir for RID $rid" + } + +- task: Npm@1 + displayName: 'npm build' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'run build' + +- task: Npm@1 + displayName: 'npm test' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'test' + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml new file mode 100644 index 00000000..f54a9464 --- /dev/null +++ b/.pipelines/templates/test-python-steps.yml @@ -0,0 +1,133 @@ +# Lightweight test-only steps for the Python SDK. +# Builds from source and runs tests — no artifact staging. +parameters: +- name: version + type: string +- name: isWinML + type: boolean + default: false +- name: flcWheelsDir + type: string + default: '' + displayName: 'Path to directory containing the FLC wheels' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + + Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + + Write-Host "Installing Windows App SDK Runtime..." + & $installerPath --quiet --force + + if ($LASTEXITCODE -ne 0) { + Write-Error "Installation failed with exit code $LASTEXITCODE" + exit 1 + } + + Write-Host "Windows App SDK Runtime installed successfully." + errorActionPreference: 'stop' + +- task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: '3.12' + +- task: PowerShell@2 + displayName: 'List downloaded FLC wheels' + condition: and(succeeded(), ne('${{ parameters.flcWheelsDir }}', '')) + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcWheelsDir }}:" + Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +- task: PowerShell@2 + displayName: 'Configure pip for Azure Artifacts' + inputs: + targetType: inline + script: | + pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.extra-index-url https://pypi.org/simple/ + pip config set global.pre true + +- script: python -m pip install build + displayName: 'Install build tool' + +- task: PowerShell@2 + displayName: 'Set SDK version' + inputs: + targetType: inline + script: | + Set-Content -Path "$(repoRoot)/sdk/python/src/version.py" -Value '__version__ = "${{ parameters.version }}"' + +- task: PowerShell@2 + displayName: 'Pre-install pipeline-built FLC wheel' + condition: and(succeeded(), ne('${{ parameters.flcWheelsDir }}', '')) + inputs: + targetType: inline + script: | + # Determine platform wheel tag for the current machine + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'amd64' } + if ($IsLinux) { $platTag = "manylinux*x86_64" } + elseif ($IsMacOS) { $platTag = "macosx*$arch" } + else { $platTag = "win_$arch" } + + $filter = if ("${{ parameters.isWinML }}" -eq "True") { "foundry_local_core_winml*$platTag.whl" } else { "foundry_local_core-*$platTag.whl" } + $wheel = Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse -Filter $filter | Select-Object -First 1 + if ($wheel) { + Write-Host "Installing pipeline-built FLC wheel: $($wheel.FullName)" + pip install $($wheel.FullName) + } else { + Write-Warning "No FLC wheel found matching $filter" + } + +# Install ORT native packages from the ORT-Nightly feed. +# skip-native-deps strips these from the SDK wheel metadata, so they +# must be installed explicitly for tests to locate the native binaries. +- script: pip install onnxruntime-core onnxruntime-genai-core + displayName: 'Install ORT native packages' + +- ${{ if not(parameters.isWinML) }}: + - script: python -m build --wheel -C skip-native-deps=true --outdir dist/ + displayName: 'Build wheel' + workingDirectory: $(repoRoot)/sdk/python + +- ${{ if parameters.isWinML }}: + - script: python -m build --wheel -C winml=true -C skip-native-deps=true --outdir dist/ + displayName: 'Build wheel (WinML)' + workingDirectory: $(repoRoot)/sdk/python + +- task: PowerShell@2 + displayName: 'Install built wheel' + inputs: + targetType: inline + script: | + $wheel = (Get-ChildItem "$(repoRoot)/sdk/python/dist/*.whl" | Select-Object -First 1).FullName + pip install $wheel + +- script: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 + displayName: 'Install test dependencies' + +- script: python -m pytest test/ -v + displayName: 'Run tests' + workingDirectory: $(repoRoot)/sdk/python + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/test-rust-steps.yml b/.pipelines/templates/test-rust-steps.yml new file mode 100644 index 00000000..31bfd75e --- /dev/null +++ b/.pipelines/templates/test-rust-steps.yml @@ -0,0 +1,159 @@ +# Lightweight test-only steps for the Rust SDK. +# Builds from source and runs tests — no cargo package or artifact staging. +parameters: +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + + Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + + Write-Host "Installing Windows App SDK Runtime..." + & $installerPath --quiet --force + + if ($LASTEXITCODE -ne 0) { + Write-Error "Installation failed with exit code $LASTEXITCODE" + exit 1 + } + + Write-Host "Windows App SDK Runtime installed successfully." + errorActionPreference: 'stop' + +- task: PowerShell@2 + displayName: 'List downloaded FLC artifact' + inputs: + targetType: inline + script: | + Write-Host "Contents of ${{ parameters.flcNugetDir }}:" + Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } + +# Extract FLC native binaries from the pipeline-built .nupkg +- task: PowerShell@2 + displayName: 'Extract FLC native binaries' + inputs: + targetType: inline + script: | + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract-rust" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + if ($IsLinux) { + $rid = "linux-$arch" + } elseif ($IsMacOS) { + $rid = "osx-$arch" + } else { + $rid = "win-$arch" + } + + $nativeDir = "$extractDir/runtimes/$rid/native" + if (-not (Test-Path $nativeDir)) { throw "No native binaries found at $nativeDir for RID $rid" } + + $flcNativeDir = "$(Build.ArtifactStagingDirectory)/flc-native-rust" + New-Item -ItemType Directory -Path $flcNativeDir -Force | Out-Null + Get-ChildItem $nativeDir -File | Copy-Item -Destination $flcNativeDir -Force + Write-Host "##vso[task.setvariable variable=flcNativeDir]$flcNativeDir" + Write-Host "Extracted FLC native binaries for $rid" + +- task: PowerShell@2 + displayName: 'Install Rust toolchain' + inputs: + targetType: inline + script: | + if ($IsWindows -or (-not $IsLinux -and -not $IsMacOS)) { + Invoke-WebRequest -Uri https://win.rustup.rs/x86_64 -OutFile rustup-init.exe + .\rustup-init.exe -y --default-toolchain stable --profile minimal -c clippy,rustfmt + Remove-Item rustup-init.exe + $cargoPath = "$env:USERPROFILE\.cargo\bin" + } else { + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal -c clippy,rustfmt + $cargoPath = "$env:HOME/.cargo/bin" + } + Write-Host "##vso[task.prependpath]$cargoPath" + +- task: PowerShell@2 + displayName: 'Use crates.io directly' + inputs: + targetType: inline + script: | + $configPath = "$(repoRoot)/sdk/rust/.cargo/config.toml" + if (Test-Path $configPath) { + Remove-Item $configPath + Write-Host "Removed .cargo/config.toml crates-io redirect" + } + +- task: PowerShell@2 + displayName: 'Build' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo build $features" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +# Overwrite FLC binary with pipeline-built version +- task: PowerShell@2 + displayName: 'Overwrite FLC binary with pipeline-built version' + inputs: + targetType: inline + script: | + $outDir = Get-ChildItem "$(repoRoot)/sdk/rust/target/debug/build" -Directory -Filter "foundry-local-sdk-*" -Recurse | + Where-Object { Test-Path "$($_.FullName)/out" } | + ForEach-Object { "$($_.FullName)/out" } | + Select-Object -First 1 + if (-not $outDir) { throw "Could not find cargo OUT_DIR for foundry-local-sdk" } + + Get-ChildItem "$(flcNativeDir)" -File -Filter "Microsoft.AI.Foundry.Local.Core.*" | ForEach-Object { + Copy-Item $_.FullName -Destination "$outDir/$($_.Name)" -Force + Write-Host "Overwrote $($_.Name) with pipeline-built version" + } + +- task: PowerShell@2 + displayName: 'Run unit tests' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo test --lib $features" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run integration tests' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo test --tests $features -- --include-ignored --test-threads=1 --nocapture" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + env: + TF_BUILD: 'true' diff --git a/sdk/cs/README.md b/sdk/cs/README.md index 92ad34b7..7037814b 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -48,7 +48,7 @@ dotnet build src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=true ### Triggering EP download -EP download can be time-consuming. Call `EnsureEpsDownloadedAsync` early (after initialization) to separate the download step from catalog access: +EP download can be time-consuming. Call `DownloadAndRegisterEpsAsync` early (after initialization) to separate the download step from catalog access: ```csharp // Initialize the manager first (see Quick Start) @@ -56,7 +56,7 @@ await FoundryLocalManager.CreateAsync( new Configuration { AppName = "my-app" }, NullLogger.Instance); -await FoundryLocalManager.Instance.EnsureEpsDownloadedAsync(); +await FoundryLocalManager.Instance.DownloadAndRegisterEpsAsync(); // Now catalog access won't trigger an EP download var catalog = await FoundryLocalManager.Instance.GetCatalogAsync(); diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md index 93f162b7..9e5be8aa 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md @@ -98,7 +98,7 @@ The model catalog. The catalog is populated on first use. If you are using a WinML build this will trigger a one-off execution provider download if not already done. - It is recommended to call [FoundryLocalManager.EnsureEpsDownloadedAsync(Nullable<CancellationToken>)](./microsoft.ai.foundry.local.foundrylocalmanager.md#ensureepsdownloadedasyncnullablecancellationtoken) first to separate out the two steps. + It is recommended to call [FoundryLocalManager.DownloadAndRegisterEpsAsync(Nullable<CancellationToken>)](./microsoft.ai.foundry.local.foundrylocalmanager.md#downloadandregisterepsasyncnullablecancellationtoken) first to separate out the two steps. ### **StartWebServiceAsync(Nullable<CancellationToken>)** @@ -141,9 +141,9 @@ Optional cancellation token. [Task](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task)
Task stopping the web service. -### **EnsureEpsDownloadedAsync(Nullable<CancellationToken>)** +### **DownloadAndRegisterEpsAsync(Nullable<CancellationToken>)** -Ensure execution providers are downloaded and registered. +Download and register execution providers. Only relevant when using WinML. Execution provider download can be time consuming due to the size of the packages. @@ -151,7 +151,7 @@ Ensure execution providers are downloaded and registered. on subsequent calls. ```csharp -public Task EnsureEpsDownloadedAsync(Nullable ct) +public Task DownloadAndRegisterEpsAsync(Nullable ct) ``` #### Parameters diff --git a/sdk/cs/src/Detail/CoreInterop.cs b/sdk/cs/src/Detail/CoreInterop.cs index c5eba7ec..a7a43447 100644 --- a/sdk/cs/src/Detail/CoreInterop.cs +++ b/sdk/cs/src/Detail/CoreInterop.cs @@ -124,6 +124,15 @@ internal CoreInterop(Configuration config, ILogger logger) _logger = logger ?? throw new ArgumentNullException(nameof(logger)); var request = new CoreInteropRequest { Params = config.AsDictionary() }; + +#if IS_WINML + // WinML builds require bootstrapping the Windows App Runtime + if (!request.Params.ContainsKey("Bootstrap")) + { + request.Params["Bootstrap"] = "true"; + } +#endif + var response = ExecuteCommand("initialize", request); if (response.Error != null) diff --git a/sdk/cs/src/FoundryLocalManager.cs b/sdk/cs/src/FoundryLocalManager.cs index 639be3a2..d3e4fb79 100644 --- a/sdk/cs/src/FoundryLocalManager.cs +++ b/sdk/cs/src/FoundryLocalManager.cs @@ -99,7 +99,7 @@ public static async Task CreateAsync(Configuration configuration, ILogger logger /// /// The catalog is populated on first use. /// If you are using a WinML build this will trigger a one-off execution provider download if not already done. - /// It is recommended to call first to separate out the two steps. + /// It is recommended to call first to separate out the two steps. /// public async Task GetCatalogAsync(CancellationToken? ct = null) { @@ -135,7 +135,7 @@ await Utils.CallWithExceptionHandling(() => StopWebServiceImplAsync(ct), } /// - /// Ensure execution providers are downloaded and registered. + /// Download and register execution providers. /// Only relevant when using WinML. /// /// Execution provider download can be time consuming due to the size of the packages. @@ -143,10 +143,10 @@ await Utils.CallWithExceptionHandling(() => StopWebServiceImplAsync(ct), /// on subsequent calls. /// /// Optional cancellation token. - public async Task EnsureEpsDownloadedAsync(CancellationToken? ct = null) + public async Task DownloadAndRegisterEpsAsync(CancellationToken? ct = null) { - await Utils.CallWithExceptionHandling(() => EnsureEpsDownloadedImplAsync(ct), - "Error ensuring execution providers downloaded.", _logger) + await Utils.CallWithExceptionHandling(() => DownloadAndRegisterEpsImplAsync(ct), + "Error downloading and registering execution providers.", _logger) .ConfigureAwait(false); } @@ -259,16 +259,16 @@ private async Task StopWebServiceImplAsync(CancellationToken? ct = null) Urls = null; } - private async Task EnsureEpsDownloadedImplAsync(CancellationToken? ct = null) + private async Task DownloadAndRegisterEpsImplAsync(CancellationToken? ct = null) { using var disposable = await asyncLock.LockAsync().ConfigureAwait(false); CoreInteropRequest? input = null; - var result = await _coreInterop!.ExecuteCommandAsync("ensure_eps_downloaded", input, ct); + var result = await _coreInterop!.ExecuteCommandAsync("download_and_register_eps", input, ct).ConfigureAwait(false); if (result.Error != null) { - throw new FoundryLocalException($"Error ensuring execution providers downloaded: {result.Error}", _logger); + throw new FoundryLocalException($"Error downloading and registering execution providers: {result.Error}", _logger); } } diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index 8f03be7d..bec1cc22 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -13,7 +13,7 @@ https://github.com/microsoft/Foundry-Local git - net8.0 + net9.0 win-x64;win-arm64;linux-x64;linux-arm64;osx-arm64 true @@ -87,7 +87,8 @@ Microsoft Foundry Local SDK for WinML Microsoft.AI.Foundry.Local.WinML Microsoft.AI.Foundry.Local.WinML - net8.0-windows10.0.26100.0 + $(DefineConstants);IS_WINML + net9.0-windows10.0.26100.0 win-x64;win-arm64 10.0.17763.0 diff --git a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj index 5f0c7cf2..fe0dfcd2 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj +++ b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj @@ -1,7 +1,7 @@  - net10.0 + net9.0 enable enable false @@ -19,10 +19,9 @@ - net10.0-windows10.0.26100.0 + net9.0-windows10.0.26100.0 10.0.17763.0 None - true diff --git a/sdk/js/docs/classes/FoundryLocalManager.md b/sdk/js/docs/classes/FoundryLocalManager.md index 63bb2dd1..dc4908a6 100644 --- a/sdk/js/docs/classes/FoundryLocalManager.md +++ b/sdk/js/docs/classes/FoundryLocalManager.md @@ -87,6 +87,29 @@ Error - If the web service is not running. *** +### downloadAndRegisterEps() + +```ts +downloadAndRegisterEps(): void; +``` + +Download and register execution providers. +Only relevant when using the WinML variant. On non-WinML builds this is a no-op. + +Call this after initialization to trigger EP download before accessing the catalog, +so that hardware-accelerated execution providers (e.g. QNN for NPU) are available +when listing and loading models. + +#### Returns + +`void` + +#### Throws + +Error - If execution provider download or registration fails. + +*** + ### startWebService() ```ts diff --git a/sdk/js/src/foundryLocalManager.ts b/sdk/js/src/foundryLocalManager.ts index bc408f78..6da0bcc7 100644 --- a/sdk/js/src/foundryLocalManager.ts +++ b/sdk/js/src/foundryLocalManager.ts @@ -61,6 +61,24 @@ export class FoundryLocalManager { return this._urls; } + /** + * Download and register execution providers. + * Only relevant when using the WinML variant. On non-WinML builds this is a no-op. + * + * Call this after initialization to trigger EP download before accessing the catalog, + * so that hardware-accelerated execution providers (e.g. QNN for NPU) are available + * when listing and loading models. + * + * @throws Error - If execution provider download or registration fails. + */ + public downloadAndRegisterEps(): void { + try { + this.coreInterop.executeCommand("download_and_register_eps"); + } catch (error) { + throw new Error(`Error downloading and registering execution providers: ${error}`); + } + } + /** * Starts the local web service. * Use the `urls` property to retrieve the bound addresses after the service has started. diff --git a/sdk/python/build_backend.py b/sdk/python/build_backend.py index b4b91a1b..3789501b 100644 --- a/sdk/python/build_backend.py +++ b/sdk/python/build_backend.py @@ -18,9 +18,14 @@ python -m build --wheel -C winml=true +Skip native deps (use pre-installed foundry-local-core / ORT / GenAI):: + + python -m build --wheel -C skip-native-deps=true + Environment variable fallback (useful in CI pipelines):: FOUNDRY_VARIANT=winml python -m build --wheel + FOUNDRY_SKIP_NATIVE_DEPS=1 python -m build --wheel """ from __future__ import annotations @@ -46,6 +51,13 @@ _STANDARD_NAME = 'name = "foundry-local-sdk"' _WINML_NAME = 'name = "foundry-local-sdk-winml"' +# Native binary package prefixes to strip when skip-native-deps is active. +_NATIVE_DEP_PREFIXES = ( + "foundry-local-core", + "onnxruntime-core", + "onnxruntime-genai-core", +) + # --------------------------------------------------------------------------- # Variant detection @@ -63,6 +75,23 @@ def _is_winml(config_settings: dict | None) -> bool: return os.environ.get("FOUNDRY_VARIANT", "").lower() == "winml" +def _is_skip_native_deps(config_settings: dict | None) -> bool: + """Return True when native binary dependencies should be omitted. + + When set, ``foundry-local-core``, ``onnxruntime-core``, and + ``onnxruntime-genai-core`` are stripped from requirements.txt so the + wheel is built against whatever versions are already installed. + Useful in CI pipelines that pre-install pipeline-built native wheels. + + Checks ``config_settings["skip-native-deps"]`` first + (set via ``-C skip-native-deps=true``), then falls back to the + ``FOUNDRY_SKIP_NATIVE_DEPS`` environment variable. + """ + if config_settings and str(config_settings.get("skip-native-deps", "")).lower() == "true": + return True + return os.environ.get("FOUNDRY_SKIP_NATIVE_DEPS", "").lower() in ("1", "true") + + # --------------------------------------------------------------------------- # In-place patching context manager # --------------------------------------------------------------------------- @@ -96,58 +125,88 @@ def _patch_for_winml() -> Generator[None, None, None]: _REQUIREMENTS.write_text(requirements_original, encoding="utf-8") +@contextlib.contextmanager +def _strip_native_deps() -> Generator[None, None, None]: + """Temporarily remove native binary deps from requirements.txt. + + Lines starting with any prefix in ``_NATIVE_DEP_PREFIXES`` (case- + insensitive) are removed. The file is restored in the ``finally`` + block. + """ + requirements_original = _REQUIREMENTS.read_text(encoding="utf-8") + try: + filtered = [ + line for line in requirements_original.splitlines(keepends=True) + if not any(line.lstrip().lower().startswith(p) for p in _NATIVE_DEP_PREFIXES) + ] + _REQUIREMENTS.write_text("".join(filtered), encoding="utf-8") + yield + finally: + _REQUIREMENTS.write_text(requirements_original, encoding="utf-8") + + +def _apply_patches(config_settings: dict | None): + """Return a context manager that applies the appropriate patches.""" + winml = _is_winml(config_settings) + skip_native = _is_skip_native_deps(config_settings) + + @contextlib.contextmanager + def _combined(): + # Stack contexts: WinML swaps requirements first, then strip_native + # removes native deps from whatever requirements are active. + if winml and skip_native: + with _patch_for_winml(), _strip_native_deps(): + yield + elif winml: + with _patch_for_winml(): + yield + elif skip_native: + with _strip_native_deps(): + yield + else: + yield + + return _combined() + + # --------------------------------------------------------------------------- # PEP 517 hook delegation # --------------------------------------------------------------------------- def get_requires_for_build_wheel(config_settings=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.get_requires_for_build_wheel(config_settings) - return _sb.get_requires_for_build_wheel(config_settings) + with _apply_patches(config_settings): + return _sb.get_requires_for_build_wheel(config_settings) def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.prepare_metadata_for_build_wheel(metadata_directory, config_settings) - return _sb.prepare_metadata_for_build_wheel(metadata_directory, config_settings) + with _apply_patches(config_settings): + return _sb.prepare_metadata_for_build_wheel(metadata_directory, config_settings) def build_wheel(wheel_directory, config_settings=None, metadata_directory=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.build_wheel(wheel_directory, config_settings, metadata_directory) - return _sb.build_wheel(wheel_directory, config_settings, metadata_directory) + with _apply_patches(config_settings): + return _sb.build_wheel(wheel_directory, config_settings, metadata_directory) def get_requires_for_build_editable(config_settings=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.get_requires_for_build_editable(config_settings) - return _sb.get_requires_for_build_editable(config_settings) + with _apply_patches(config_settings): + return _sb.get_requires_for_build_editable(config_settings) def prepare_metadata_for_build_editable(metadata_directory, config_settings=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.prepare_metadata_for_build_editable(metadata_directory, config_settings) - return _sb.prepare_metadata_for_build_editable(metadata_directory, config_settings) + with _apply_patches(config_settings): + return _sb.prepare_metadata_for_build_editable(metadata_directory, config_settings) def build_editable(wheel_directory, config_settings=None, metadata_directory=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.build_editable(wheel_directory, config_settings, metadata_directory) - return _sb.build_editable(wheel_directory, config_settings, metadata_directory) + with _apply_patches(config_settings): + return _sb.build_editable(wheel_directory, config_settings, metadata_directory) def get_requires_for_build_sdist(config_settings=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.get_requires_for_build_sdist(config_settings) - return _sb.get_requires_for_build_sdist(config_settings) + with _apply_patches(config_settings): + return _sb.get_requires_for_build_sdist(config_settings) def build_sdist(sdist_directory, config_settings=None): diff --git a/sdk/python/src/detail/core_interop.py b/sdk/python/src/detail/core_interop.py index 7a6bb08c..4f4ddb67 100644 --- a/sdk/python/src/detail/core_interop.py +++ b/sdk/python/src/detail/core_interop.py @@ -205,6 +205,9 @@ def __init__(self, config: Configuration): if sys.platform.startswith("win"): bootstrap_dll = paths.core_dir / "Microsoft.WindowsAppRuntime.Bootstrap.dll" if bootstrap_dll.exists(): + # Pre-load so the DLL is already in the process when + # C# P/Invoke resolves it during Bootstrap.Initialize(). + ctypes.CDLL(str(bootstrap_dll)) if config.additional_settings is None: config.additional_settings = {} if "Bootstrap" not in config.additional_settings: diff --git a/sdk/python/src/foundry_local_manager.py b/sdk/python/src/foundry_local_manager.py index 4486eaf1..4c02a127 100644 --- a/sdk/python/src/foundry_local_manager.py +++ b/sdk/python/src/foundry_local_manager.py @@ -71,17 +71,17 @@ def _initialize(self): self._model_load_manager = ModelLoadManager(self._core_interop, external_service_url) self.catalog = Catalog(self._model_load_manager, self._core_interop) - def ensure_eps_downloaded(self) -> None: - """Ensure execution providers are downloaded and registered (synchronous). + def download_and_register_eps(self) -> None: + """Download and register execution providers. Only relevant when using WinML. Raises: - FoundryLocalException: If execution provider download fails. + FoundryLocalException: If execution provider download or registration fails. """ - result = self._core_interop.execute_command("ensure_eps_downloaded") + result = self._core_interop.execute_command("download_and_register_eps") if result.error is not None: - raise FoundryLocalException(f"Error ensuring execution providers downloaded: {result.error}") + raise FoundryLocalException(f"Error downloading and registering execution providers: {result.error}") def start_web_service(self): """Start the optional web service. diff --git a/sdk/rust/src/foundry_local_manager.rs b/sdk/rust/src/foundry_local_manager.rs index f80a7176..9cf2477f 100644 --- a/sdk/rust/src/foundry_local_manager.rs +++ b/sdk/rust/src/foundry_local_manager.rs @@ -133,4 +133,18 @@ impl FoundryLocalManager { .clear(); Ok(()) } + + /// Download and register execution providers. + /// + /// Only relevant when using the WinML variant. On non-WinML builds this + /// is a no-op. Call this after initialisation to trigger EP download + /// before accessing the catalog, so that hardware-accelerated execution + /// providers (e.g. QNN for NPU) are available when listing and loading + /// models. + pub async fn download_and_register_eps(&self) -> Result<()> { + self.core + .execute_command_async("download_and_register_eps".into(), None) + .await?; + Ok(()) + } } From da088d0eb50bb4a9d3c2798689fec157d7f90ade Mon Sep 17 00:00:00 2001 From: Samuel Kemp Date: Tue, 31 Mar 2026 20:34:26 +0100 Subject: [PATCH 13/83] Add named regions and tutorial samples for docs externalization (#563) To ensure that our docs on MS Learn have accurate code samples, we will update the docs so they consume the code from this repo. In this repo, we will run a test to ensure that the samples work - if there is a break in the samples then this should be fix before a PR can be merged in. - Add named regions to 15 existing sample files (CS, JS, Python, Rust) - Create 3 missing Python samples (audio-transcription, web-server, langchain-integration) - Create 16 tutorial sample projects (4 tutorials x 4 languages) - Add samples-integration-test.yml CI workflow --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../workflows/samples-integration-test.yml | 260 ++++ .gitignore | 3 + .../Directory.Packages.props | 3 + samples/cs/GettingStarted/README.md | 61 - .../AudioTranscriptionExample.csproj | 39 - .../FoundryLocalWebServer.csproj | 33 - .../FoundrySamplesXPlatform.sln | 53 - .../HelloFoundryLocalSdk.csproj | 32 - .../LiveAudioTranscriptionExample.csproj | 32 - .../ModelManagementExample.csproj | 33 - .../ToolCallingFoundryLocalSdk.csproj | 31 - .../ToolCallingFoundryLocalWebServer.csproj | 32 - samples/cs/GettingStarted/nuget.config | 7 - .../AudioTranscriptionExample.csproj | 36 - .../FoundryLocalWebServer.csproj | 30 - .../windows/FoundrySamplesWinML.sln | 71 -- .../HelloFoundryLocalSdk.csproj | 30 - .../LiveAudioTranscriptionExample.csproj | 30 - .../ModelManagementExample.csproj | 30 - .../ToolCallingFoundryLocalSdk.csproj | 30 - .../ToolCallingFoundryLocalWebServer.csproj | 30 - samples/cs/README.md | 43 + .../{GettingStarted/src => }/Shared/Utils.cs | 0 .../AudioTranscriptionExample.csproj | 55 + .../AudioTranscriptionExample.sln | 34 + .../Program.cs | 23 +- .../Recording.mp3 | Bin .../FoundryLocalWebServer.csproj | 52 + .../FoundryLocalWebServer.sln | 34 + .../Program.cs | 14 +- .../LiveAudioTranscriptionExample.csproj | 55 + .../LiveAudioTranscriptionExample.sln | 34 + .../Program.cs | 0 .../ModelManagementExample.csproj | 48 + .../ModelManagementExample.sln | 34 + .../Program.cs | 0 .../NativeChatCompletions.csproj | 48 + .../NativeChatCompletions.sln | 34 + .../Program.cs | 16 +- samples/cs/nuget.config | 22 + .../Program.cs | 18 +- .../ToolCallingFoundryLocalSdk.csproj | 48 + .../ToolCallingFoundryLocalSdk.sln | 34 + .../Program.cs | 6 +- .../ToolCallingFoundryLocalWebServer.csproj | 52 + .../ToolCallingFoundryLocalWebServer.sln | 34 + samples/cs/tutorial-chat-assistant/Program.cs | 101 ++ .../TutorialChatAssistant.csproj | 50 + .../TutorialChatAssistant.sln | 34 + .../tutorial-document-summarizer/Program.cs | 109 ++ .../TutorialDocumentSummarizer.csproj | 50 + .../TutorialDocumentSummarizer.sln | 34 + samples/cs/tutorial-tool-calling/Program.cs | 228 ++++ .../TutorialToolCalling.csproj | 50 + .../TutorialToolCalling.sln | 34 + samples/cs/tutorial-voice-to-text/Program.cs | 104 ++ .../TutorialVoiceToText.csproj | 50 + .../TutorialVoiceToText.sln | 34 + samples/js/audio-transcription-example/app.js | 19 +- .../chat-and-audio-foundry-local/src/app.js | 2 +- .../js/langchain-integration-example/app.js | 12 +- samples/js/native-chat-completions/app.js | 14 + .../js/tool-calling-foundry-local/src/app.js | 16 +- samples/js/tutorial-chat-assistant/app.js | 84 ++ .../js/tutorial-chat-assistant/package.json | 9 + .../js/tutorial-document-summarizer/app.js | 84 ++ .../tutorial-document-summarizer/package.json | 9 + samples/js/tutorial-tool-calling/app.js | 186 +++ samples/js/tutorial-tool-calling/package.json | 9 + samples/js/tutorial-voice-to-text/app.js | 78 ++ .../js/tutorial-voice-to-text/package.json | 9 + samples/js/web-server-example/app.js | 10 + .../python/audio-transcription/Recording.mp3 | Bin 0 -> 329760 bytes .../audio-transcription/requirements.txt | 1 + samples/python/audio-transcription/src/app.py | 39 + samples/python/functioncalling/README.md | 53 - samples/python/functioncalling/fl_tools.ipynb | 362 ------ samples/python/hello-foundry-local/README.md | 18 - samples/python/hello-foundry-local/src/app.py | 33 - .../langchain-integration/requirements.txt | 4 + .../python/langchain-integration/src/app.py | 59 + .../native-chat-completions/requirements.txt | 1 + .../python/native-chat-completions/src/app.py | 54 + samples/python/summarize/.vscode/launch.json | 14 - samples/python/summarize/README.md | 38 - samples/python/summarize/requirements.txt | 3 - samples/python/summarize/summarize.py | 86 -- samples/python/tool-calling/requirements.txt | 1 + samples/python/tool-calling/src/app.py | 182 +++ .../tutorial-chat-assistant/requirements.txt | 1 + .../python/tutorial-chat-assistant/src/app.py | 71 ++ .../requirements.txt | 1 + .../tutorial-document-summarizer/src/app.py | 78 ++ .../tutorial-tool-calling/requirements.txt | 1 + .../python/tutorial-tool-calling/src/app.py | 187 +++ .../tutorial-voice-to-text/requirements.txt | 1 + .../python/tutorial-voice-to-text/src/app.py | 78 ++ samples/python/web-server/requirements.txt | 2 + samples/python/web-server/src/app.py | 59 + samples/rag/README.md | 206 ---- samples/rag/foundry-local-architecture.md | 116 -- samples/rag/rag_foundrylocal_demo.ipynb | 1042 ----------------- samples/rust/Cargo.toml | 4 + .../audio-transcription-example/Recording.mp3 | Bin 0 -> 329760 bytes .../audio-transcription-example/src/main.rs | 25 +- .../rust/foundry-local-webserver/src/main.rs | 12 +- .../rust/native-chat-completions/src/main.rs | 26 +- .../tool-calling-foundry-local/src/main.rs | 20 +- .../rust/tutorial-chat-assistant/Cargo.toml | 11 + .../rust/tutorial-chat-assistant/src/main.rs | 102 ++ .../tutorial-document-summarizer/Cargo.toml | 10 + .../tutorial-document-summarizer/src/main.rs | 157 +++ samples/rust/tutorial-tool-calling/Cargo.toml | 11 + .../rust/tutorial-tool-calling/src/main.rs | 330 ++++++ .../rust/tutorial-voice-to-text/Cargo.toml | 10 + .../rust/tutorial-voice-to-text/src/main.rs | 110 ++ 116 files changed, 4036 insertions(+), 2646 deletions(-) create mode 100644 .github/workflows/samples-integration-test.yml rename samples/cs/{GettingStarted => }/Directory.Packages.props (74%) delete mode 100644 samples/cs/GettingStarted/README.md delete mode 100644 samples/cs/GettingStarted/cross-platform/AudioTranscriptionExample/AudioTranscriptionExample.csproj delete mode 100644 samples/cs/GettingStarted/cross-platform/FoundryLocalWebServer/FoundryLocalWebServer.csproj delete mode 100644 samples/cs/GettingStarted/cross-platform/FoundrySamplesXPlatform.sln delete mode 100644 samples/cs/GettingStarted/cross-platform/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj delete mode 100644 samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj delete mode 100644 samples/cs/GettingStarted/cross-platform/ModelManagementExample/ModelManagementExample.csproj delete mode 100644 samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj delete mode 100644 samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj delete mode 100644 samples/cs/GettingStarted/nuget.config delete mode 100644 samples/cs/GettingStarted/windows/AudioTranscriptionExample/AudioTranscriptionExample.csproj delete mode 100644 samples/cs/GettingStarted/windows/FoundryLocalWebServer/FoundryLocalWebServer.csproj delete mode 100644 samples/cs/GettingStarted/windows/FoundrySamplesWinML.sln delete mode 100644 samples/cs/GettingStarted/windows/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj delete mode 100644 samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj delete mode 100644 samples/cs/GettingStarted/windows/ModelManagementExample/ModelManagementExample.csproj delete mode 100644 samples/cs/GettingStarted/windows/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj delete mode 100644 samples/cs/GettingStarted/windows/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj create mode 100644 samples/cs/README.md rename samples/cs/{GettingStarted/src => }/Shared/Utils.cs (100%) create mode 100644 samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj create mode 100644 samples/cs/audio-transcription-example/AudioTranscriptionExample.sln rename samples/cs/{GettingStarted/src/AudioTranscriptionExample => audio-transcription-example}/Program.cs (78%) rename samples/cs/{GettingStarted/src/AudioTranscriptionExample => audio-transcription-example}/Recording.mp3 (100%) create mode 100644 samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj create mode 100644 samples/cs/foundry-local-web-server/FoundryLocalWebServer.sln rename samples/cs/{GettingStarted/src/FoundryLocalWebServer => foundry-local-web-server}/Program.cs (91%) create mode 100644 samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj create mode 100644 samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln rename samples/cs/{GettingStarted/src/LiveAudioTranscriptionExample => live-audio-transcription-example}/Program.cs (100%) create mode 100644 samples/cs/model-management-example/ModelManagementExample.csproj create mode 100644 samples/cs/model-management-example/ModelManagementExample.sln rename samples/cs/{GettingStarted/src/ModelManagementExample => model-management-example}/Program.cs (100%) create mode 100644 samples/cs/native-chat-completions/NativeChatCompletions.csproj create mode 100644 samples/cs/native-chat-completions/NativeChatCompletions.sln rename samples/cs/{GettingStarted/src/HelloFoundryLocalSdk => native-chat-completions}/Program.cs (88%) create mode 100644 samples/cs/nuget.config rename samples/cs/{GettingStarted/src/ToolCallingFoundryLocalSdk => tool-calling-foundry-local-sdk}/Program.cs (94%) create mode 100644 samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj create mode 100644 samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.sln rename samples/cs/{GettingStarted/src/ToolCallingFoundryLocalWebServer => tool-calling-foundry-local-web-server}/Program.cs (98%) create mode 100644 samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj create mode 100644 samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.sln create mode 100644 samples/cs/tutorial-chat-assistant/Program.cs create mode 100644 samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj create mode 100644 samples/cs/tutorial-chat-assistant/TutorialChatAssistant.sln create mode 100644 samples/cs/tutorial-document-summarizer/Program.cs create mode 100644 samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj create mode 100644 samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.sln create mode 100644 samples/cs/tutorial-tool-calling/Program.cs create mode 100644 samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj create mode 100644 samples/cs/tutorial-tool-calling/TutorialToolCalling.sln create mode 100644 samples/cs/tutorial-voice-to-text/Program.cs create mode 100644 samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj create mode 100644 samples/cs/tutorial-voice-to-text/TutorialVoiceToText.sln create mode 100644 samples/js/tutorial-chat-assistant/app.js create mode 100644 samples/js/tutorial-chat-assistant/package.json create mode 100644 samples/js/tutorial-document-summarizer/app.js create mode 100644 samples/js/tutorial-document-summarizer/package.json create mode 100644 samples/js/tutorial-tool-calling/app.js create mode 100644 samples/js/tutorial-tool-calling/package.json create mode 100644 samples/js/tutorial-voice-to-text/app.js create mode 100644 samples/js/tutorial-voice-to-text/package.json create mode 100644 samples/python/audio-transcription/Recording.mp3 create mode 100644 samples/python/audio-transcription/requirements.txt create mode 100644 samples/python/audio-transcription/src/app.py delete mode 100644 samples/python/functioncalling/README.md delete mode 100644 samples/python/functioncalling/fl_tools.ipynb delete mode 100644 samples/python/hello-foundry-local/README.md delete mode 100644 samples/python/hello-foundry-local/src/app.py create mode 100644 samples/python/langchain-integration/requirements.txt create mode 100644 samples/python/langchain-integration/src/app.py create mode 100644 samples/python/native-chat-completions/requirements.txt create mode 100644 samples/python/native-chat-completions/src/app.py delete mode 100644 samples/python/summarize/.vscode/launch.json delete mode 100644 samples/python/summarize/README.md delete mode 100644 samples/python/summarize/requirements.txt delete mode 100644 samples/python/summarize/summarize.py create mode 100644 samples/python/tool-calling/requirements.txt create mode 100644 samples/python/tool-calling/src/app.py create mode 100644 samples/python/tutorial-chat-assistant/requirements.txt create mode 100644 samples/python/tutorial-chat-assistant/src/app.py create mode 100644 samples/python/tutorial-document-summarizer/requirements.txt create mode 100644 samples/python/tutorial-document-summarizer/src/app.py create mode 100644 samples/python/tutorial-tool-calling/requirements.txt create mode 100644 samples/python/tutorial-tool-calling/src/app.py create mode 100644 samples/python/tutorial-voice-to-text/requirements.txt create mode 100644 samples/python/tutorial-voice-to-text/src/app.py create mode 100644 samples/python/web-server/requirements.txt create mode 100644 samples/python/web-server/src/app.py delete mode 100644 samples/rag/README.md delete mode 100644 samples/rag/foundry-local-architecture.md delete mode 100644 samples/rag/rag_foundrylocal_demo.ipynb create mode 100644 samples/rust/audio-transcription-example/Recording.mp3 create mode 100644 samples/rust/tutorial-chat-assistant/Cargo.toml create mode 100644 samples/rust/tutorial-chat-assistant/src/main.rs create mode 100644 samples/rust/tutorial-document-summarizer/Cargo.toml create mode 100644 samples/rust/tutorial-document-summarizer/src/main.rs create mode 100644 samples/rust/tutorial-tool-calling/Cargo.toml create mode 100644 samples/rust/tutorial-tool-calling/src/main.rs create mode 100644 samples/rust/tutorial-voice-to-text/Cargo.toml create mode 100644 samples/rust/tutorial-voice-to-text/src/main.rs diff --git a/.github/workflows/samples-integration-test.yml b/.github/workflows/samples-integration-test.yml new file mode 100644 index 00000000..c844ca12 --- /dev/null +++ b/.github/workflows/samples-integration-test.yml @@ -0,0 +1,260 @@ +name: Samples Build Check + +on: + pull_request: + paths: + - 'samples/**' + - '.github/workflows/samples-integration-test.yml' + push: + paths: + - 'samples/**' + - '.github/workflows/samples-integration-test.yml' + branches: + - main + workflow_dispatch: + +permissions: + contents: read + +jobs: + # ── Python Samples ────────────────────────────────────────────────── + python-samples: + runs-on: ${{ matrix.platform }}-latest + strategy: + fail-fast: false + matrix: + platform: [windows, macos] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + clean: true + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Configure pip for Azure Artifacts + run: | + pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.extra-index-url https://pypi.org/simple/ + pip config set global.pre true + + - name: Build and install SDK from source + working-directory: sdk/python + shell: pwsh + run: | + python -m pip install build + echo '__version__ = "0.0.0-dev"' > src/version.py + python -m build --wheel --outdir dist/ + $wheel = (Get-ChildItem dist/*.whl | Select-Object -First 1).FullName + pip install $wheel + + - name: Install sample dependencies + shell: pwsh + run: | + Get-ChildItem samples/python/*/requirements.txt -ErrorAction SilentlyContinue | ForEach-Object { + Write-Host "Installing dependencies for $($_.Directory.Name)..." + pip install -r $_.FullName + } + + - name: Syntax check Python samples + shell: pwsh + run: | + $failed = @() + $samples = Get-ChildItem samples/python/*/src/app.py -ErrorAction SilentlyContinue + foreach ($sample in $samples) { + $name = $sample.Directory.Parent.Name + Write-Host "=== Checking: $name ===" + python -m py_compile $sample.FullName + if ($LASTEXITCODE -ne 0) { + Write-Host "FAILED: $name" + $failed += $name + } else { + Write-Host "OK: $name" + } + } + if ($failed.Count -gt 0) { + Write-Error "Failed syntax checks: $($failed -join ', ')" + exit 1 + } + + # ── JavaScript Samples ────────────────────────────────────────────── + js-samples: + runs-on: ${{ matrix.platform }}-latest + strategy: + fail-fast: false + matrix: + platform: [windows, macos] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + clean: true + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: '20.x' + + - name: Setup .NET SDK for NuGet authentication + uses: actions/setup-dotnet@v5 + with: + dotnet-version: '10.0.x' + + - name: Build SDK from source + working-directory: sdk/js + run: | + npm install + npm run build + npm link + + - name: Syntax check JS samples + shell: pwsh + run: | + $failed = @() + # Find all sample app.js files (either in root or src/) + $samples = @() + $samples += Get-ChildItem samples/js/*/app.js -ErrorAction SilentlyContinue + $samples += Get-ChildItem samples/js/*/src/app.js -ErrorAction SilentlyContinue + foreach ($sample in $samples) { + $dir = if ($sample.Directory.Name -eq 'src') { $sample.Directory.Parent } else { $sample.Directory } + $name = $dir.Name + Write-Host "=== Checking: $name ===" + # Link SDK and install dependencies + Push-Location $dir.FullName + npm link foundry-local-sdk 2>$null + if (Test-Path "package.json") { npm install 2>$null } + Pop-Location + # Syntax check + node --check $sample.FullName 2>&1 + if ($LASTEXITCODE -ne 0) { + Write-Host "FAILED: $name" + $failed += $name + } else { + Write-Host "OK: $name" + } + } + if ($failed.Count -gt 0) { + Write-Error "Failed syntax checks: $($failed -join ', ')" + exit 1 + } + + # ── C# Samples ───────────────────────────────────────────────────── + cs-samples: + runs-on: ${{ matrix.platform }}-latest + strategy: + fail-fast: false + matrix: + platform: [windows, macos] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + clean: true + + - name: Setup .NET SDK + uses: actions/setup-dotnet@v5 + with: + dotnet-version: | + 8.0.x + 10.0.x + + - name: Build SDK from source + shell: pwsh + run: | + # Build cross-platform SDK package + # Note: /p:TreatWarningsAsErrors=false avoids failing on SDK doc warnings + dotnet pack sdk/cs/src/Microsoft.AI.Foundry.Local.csproj ` + -o local-packages ` + /p:Version=0.9.0-dev ` + /p:IsPacking=true ` + /p:TreatWarningsAsErrors=false ` + --configuration Release + + # Build WinML SDK package (Windows only) + if ($IsWindows) { + dotnet pack sdk/cs/src/Microsoft.AI.Foundry.Local.csproj ` + -o local-packages ` + /p:Version=0.9.0-dev-20260324 ` + /p:UseWinML=true ` + /p:IsPacking=true ` + /p:TreatWarningsAsErrors=false ` + --configuration Release + } + + Write-Host "Local packages:" + Get-ChildItem local-packages/*.nupkg | ForEach-Object { Write-Host " $($_.Name)" } + + - name: Build C# samples + shell: pwsh + run: | + $failed = @() + $projects = Get-ChildItem samples/cs -Recurse -Filter "*.csproj" + foreach ($proj in $projects) { + $name = $proj.BaseName + Write-Host "`n=== Building: $name ===" + dotnet build $proj.FullName --configuration Debug 2>&1 + if ($LASTEXITCODE -ne 0) { + Write-Host "BUILD FAILED: $name" + $failed += $name + } else { + Write-Host "BUILD PASSED: $name" + } + } + if ($failed.Count -gt 0) { + Write-Error "Failed builds: $($failed -join ', ')" + exit 1 + } + + # ── Rust Samples ──────────────────────────────────────────────────── + rust-samples: + runs-on: ${{ matrix.platform }}-latest + strategy: + fail-fast: false + matrix: + platform: [windows, macos] + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + clean: true + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + with: + components: clippy + + - name: Cache cargo dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: samples/rust -> target + + - name: Use crates.io directly + shell: pwsh + run: | + # Remove crates-io redirect in SDK (points to Azure Artifacts) + $configPath = "sdk/rust/.cargo/config.toml" + if (Test-Path $configPath) { + Remove-Item $configPath + Write-Host "Removed sdk/rust/.cargo/config.toml" + } + # Remove crates-io redirect in samples + $configPath = "samples/rust/.cargo/config.toml" + if (Test-Path $configPath) { + Remove-Item $configPath + Write-Host "Removed samples/rust/.cargo/config.toml" + } + + - name: Build Rust samples workspace + working-directory: samples/rust + run: cargo build --workspace + + - name: Clippy check + working-directory: samples/rust + run: cargo clippy --workspace -- -D warnings diff --git a/.gitignore b/.gitignore index 8d088525..552012ec 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,6 @@ bin/ obj/ /src/cs/samples/ConsoleClient/test.http logs/ + +# Local NuGet packages built from source +local-packages/ diff --git a/samples/cs/GettingStarted/Directory.Packages.props b/samples/cs/Directory.Packages.props similarity index 74% rename from samples/cs/GettingStarted/Directory.Packages.props rename to samples/cs/Directory.Packages.props index efd388a6..e5ba306b 100644 --- a/samples/cs/GettingStarted/Directory.Packages.props +++ b/samples/cs/Directory.Packages.props @@ -7,7 +7,10 @@ + + + diff --git a/samples/cs/GettingStarted/README.md b/samples/cs/GettingStarted/README.md deleted file mode 100644 index afe6e88d..00000000 --- a/samples/cs/GettingStarted/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# 🚀 Getting started with the Foundry Local C# SDK - -There are two NuGet packages for the Foundry Local SDK - a WinML and a cross-platform package - that have *exactly* the same API surface but are optimised for different platforms: - -- **Windows**: Uses the `Microsoft.AI.Foundry.Local.WinML` package that is specific to Windows applications. The WinML package uses Windows Machine Learning to deliver optimal performance and user experience on Windows devices. -- **Cross-Platform**: Use the `Microsoft.AI.Foundry.Local` package that can be used for cross-platform applications (Windows, Linux, macOS). - -> [!TIP] -> Whilst you can use either package on Windows, we recommend using the WinML package for Windows applications to take advantage of the Windows ML framework for optimal performance and user experience. Your end users will benefit with: -> - a wider range of hardware acceleration options that are automatically managed by Windows ML. -> - a smaller application package size because downloading hardware-specific libraries occurs at application runtime rather than bundled with your application. - -Both the WinML and cross-platform packages provide the same APIs, so you can easily switch between the two packages if you need to target multiple platforms. The samples include the following projects: - -- **HelloFoundryLocalSdk**: A simple console application that initializes the Foundry Local SDK, downloads a model, loads it and does chat completions. -- **FoundryLocalWebServer**: A simple console application that shows how to set up a local OpenAI-compliant web server using the Foundry Local SDK. -- **AudioTranscriptionExample**: A simple console application that demonstrates how to use the Foundry Local SDK for audio transcription tasks. -- **ModelManagementExample**: A simple console application that demonstrates how to manage models - such as variant selection and updates - using the Foundry Local SDK. -- **ToolCallingFoundryLocalSdk**: A simple console application that initializes the Foundry Local SDK, downloads a model, loads it and does tool calling with chat completions. -- **ToolCallingFoundryLocalWebServer**: A simple console application that shows how to set up a local OpenAI-compliant web server with tool calling using the Foundry Local SDK. - -## Running the samples - -1. Clone the Foundry Local repository from GitHub. - ```bash - git clone https://github.com/microsoft/Foundry-Local.git - ``` -2. Open and run the samples. - - **Windows:** - 1. Open the `Foundry-Local/samples/cs/GettingStarted/windows/FoundrySamplesWinML.sln` solution in Visual Studio or your preferred IDE. - 1. If you're using Visual Studio, run any of the sample projects (e.g., `HelloFoundryLocalSdk`) by selecting the project in the Solution Explorer and selecting the **Start** button (or pressing **F5**). - - Alternatively, you can run the projects using the .NET CLI. For x64 (update the `` as needed): - ```bash - cd Foundry-Local/samples/cs/GettingStarted/windows - dotnet run --project /.csproj -r:win-x64 - ``` - or for ARM64: - ```bash - ```bash - cd Foundry-Local/samples/cs/GettingStarted/windows - dotnet run --project /.csproj -r:win-arm64 - ``` - - - **macOS or Linux:** - 1. Open the `Foundry-Local/samples/cs/GettingStarted/cross-platform/FoundrySamplesXPlatform.sln` solution in Visual Studio Code or your preferred IDE. - 1. Run the project using the .NET CLI (update the `` and `` as needed): - ```bash - cd Foundry-Local/samples/cs/GettingStarted/cross-platform - dotnet run --project /.csproj -r: - ``` - For example, to run the `HelloFoundryLocalSdk` project on macOS (Apple Silicon), use the following command: - - ```bash - cd Foundry-Local/samples/cs/GettingStarted/cross-platform - dotnet run --project HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj -r:osx-arm64 - ``` - - diff --git a/samples/cs/GettingStarted/cross-platform/AudioTranscriptionExample/AudioTranscriptionExample.csproj b/samples/cs/GettingStarted/cross-platform/AudioTranscriptionExample/AudioTranscriptionExample.csproj deleted file mode 100644 index 02eefb31..00000000 --- a/samples/cs/GettingStarted/cross-platform/AudioTranscriptionExample/AudioTranscriptionExample.csproj +++ /dev/null @@ -1,39 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - - - PreserveNewest - - - - - diff --git a/samples/cs/GettingStarted/cross-platform/FoundryLocalWebServer/FoundryLocalWebServer.csproj b/samples/cs/GettingStarted/cross-platform/FoundryLocalWebServer/FoundryLocalWebServer.csproj deleted file mode 100644 index 672e8726..00000000 --- a/samples/cs/GettingStarted/cross-platform/FoundryLocalWebServer/FoundryLocalWebServer.csproj +++ /dev/null @@ -1,33 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/cross-platform/FoundrySamplesXPlatform.sln b/samples/cs/GettingStarted/cross-platform/FoundrySamplesXPlatform.sln deleted file mode 100644 index a51c62d6..00000000 --- a/samples/cs/GettingStarted/cross-platform/FoundrySamplesXPlatform.sln +++ /dev/null @@ -1,53 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 -VisualStudioVersion = 17.14.36705.20 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "HelloFoundryLocalSdk", "HelloFoundryLocalSdk\HelloFoundryLocalSdk.csproj", "{785AAE8A-8CD6-4916-B858-29B8A7EF8FF2}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ToolCallingFoundryLocalSdk", "ToolCallingFoundryLocalSdk\ToolCallingFoundryLocalSdk.csproj", "{2F99B88E-BE58-4ED6-A71E-60B6EE955D1B}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "build", "build", "{8EC462FD-D22E-90A8-E5CE-7E832BA40C5D}" - ProjectSection(SolutionItems) = preProject - ..\Directory.Packages.props = ..\Directory.Packages.props - ..\nuget.config = ..\nuget.config - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FoundryLocalWebServer", "FoundryLocalWebServer\FoundryLocalWebServer.csproj", "{D1D6C453-3088-4D8D-B320-24D718601C26}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ToolCallingFoundryLocalWebServer", "ToolCallingFoundryLocalWebServer\ToolCallingFoundryLocalWebServer.csproj", "{B59762E0-B699-4F80-B2B6-8BC5751A4620}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AudioTranscriptionExample", "AudioTranscriptionExample\AudioTranscriptionExample.csproj", "{2FAD8210-8AEB-4063-9C61-57B7AD26772D}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ModelManagementExample", "ModelManagementExample\ModelManagementExample.csproj", "{AAD0233C-9FDD-46A7-9428-2F72BC76D38E}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Release|Any CPU = Release|Any CPU - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {785AAE8A-8CD6-4916-B858-29B8A7EF8FF2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {785AAE8A-8CD6-4916-B858-29B8A7EF8FF2}.Debug|Any CPU.Build.0 = Debug|Any CPU - {785AAE8A-8CD6-4916-B858-29B8A7EF8FF2}.Release|Any CPU.ActiveCfg = Release|Any CPU - {785AAE8A-8CD6-4916-B858-29B8A7EF8FF2}.Release|Any CPU.Build.0 = Release|Any CPU - {D1D6C453-3088-4D8D-B320-24D718601C26}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D1D6C453-3088-4D8D-B320-24D718601C26}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D1D6C453-3088-4D8D-B320-24D718601C26}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D1D6C453-3088-4D8D-B320-24D718601C26}.Release|Any CPU.Build.0 = Release|Any CPU - {2FAD8210-8AEB-4063-9C61-57B7AD26772D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {2FAD8210-8AEB-4063-9C61-57B7AD26772D}.Debug|Any CPU.Build.0 = Debug|Any CPU - {2FAD8210-8AEB-4063-9C61-57B7AD26772D}.Release|Any CPU.ActiveCfg = Release|Any CPU - {2FAD8210-8AEB-4063-9C61-57B7AD26772D}.Release|Any CPU.Build.0 = Release|Any CPU - {AAD0233C-9FDD-46A7-9428-2F72BC76D38E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {AAD0233C-9FDD-46A7-9428-2F72BC76D38E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {AAD0233C-9FDD-46A7-9428-2F72BC76D38E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {AAD0233C-9FDD-46A7-9428-2F72BC76D38E}.Release|Any CPU.Build.0 = Release|Any CPU - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {9FC1F302-B28C-4CAB-8ABA-24FA9EBBED6F} - EndGlobalSection -EndGlobal diff --git a/samples/cs/GettingStarted/cross-platform/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj b/samples/cs/GettingStarted/cross-platform/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj deleted file mode 100644 index bb8df514..00000000 --- a/samples/cs/GettingStarted/cross-platform/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj +++ /dev/null @@ -1,32 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj b/samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj deleted file mode 100644 index ad6086f5..00000000 --- a/samples/cs/GettingStarted/cross-platform/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj +++ /dev/null @@ -1,32 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/cross-platform/ModelManagementExample/ModelManagementExample.csproj b/samples/cs/GettingStarted/cross-platform/ModelManagementExample/ModelManagementExample.csproj deleted file mode 100644 index 70af7023..00000000 --- a/samples/cs/GettingStarted/cross-platform/ModelManagementExample/ModelManagementExample.csproj +++ /dev/null @@ -1,33 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj b/samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj deleted file mode 100644 index aa2b5400..00000000 --- a/samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj +++ /dev/null @@ -1,31 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj b/samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj deleted file mode 100644 index dcaeb80d..00000000 --- a/samples/cs/GettingStarted/cross-platform/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj +++ /dev/null @@ -1,32 +0,0 @@ - - - - Exe - net9.0 - enable - enable - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/nuget.config b/samples/cs/GettingStarted/nuget.config deleted file mode 100644 index b5c4e511..00000000 --- a/samples/cs/GettingStarted/nuget.config +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/samples/cs/GettingStarted/windows/AudioTranscriptionExample/AudioTranscriptionExample.csproj b/samples/cs/GettingStarted/windows/AudioTranscriptionExample/AudioTranscriptionExample.csproj deleted file mode 100644 index 98219697..00000000 --- a/samples/cs/GettingStarted/windows/AudioTranscriptionExample/AudioTranscriptionExample.csproj +++ /dev/null @@ -1,36 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - PreserveNewest - - - - \ No newline at end of file diff --git a/samples/cs/GettingStarted/windows/FoundryLocalWebServer/FoundryLocalWebServer.csproj b/samples/cs/GettingStarted/windows/FoundryLocalWebServer/FoundryLocalWebServer.csproj deleted file mode 100644 index f08a2b4a..00000000 --- a/samples/cs/GettingStarted/windows/FoundryLocalWebServer/FoundryLocalWebServer.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - \ No newline at end of file diff --git a/samples/cs/GettingStarted/windows/FoundrySamplesWinML.sln b/samples/cs/GettingStarted/windows/FoundrySamplesWinML.sln deleted file mode 100644 index 10a0d851..00000000 --- a/samples/cs/GettingStarted/windows/FoundrySamplesWinML.sln +++ /dev/null @@ -1,71 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 -VisualStudioVersion = 17.14.36705.20 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "HelloFoundryLocalSdk", "HelloFoundryLocalSdk\HelloFoundryLocalSdk.csproj", "{72ABF21E-2BFD-412A-9039-A594B392F00C}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ToolCallingFoundryLocalSdk", "ToolCallingFoundryLocalSdk\ToolCallingFoundryLocalSdk.csproj", "{93C21DF0-17D5-4927-9507-C10A79359E7D}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FoundryLocalWebServer", "FoundryLocalWebServer\FoundryLocalWebServer.csproj", "{77026F3A-25E0-40AB-B941-2A6252E13A35}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ToolCallingFoundryLocalWebServer", "ToolCallingFoundryLocalWebServer\ToolCallingFoundryLocalWebServer.csproj", "{5A8536E2-04B6-4F06-80B1-1018069DF73F}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AudioTranscriptionExample", "AudioTranscriptionExample\AudioTranscriptionExample.csproj", "{80F60523-40E1-4743-A256-974B21A9C6AB}" -EndProject -Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "build", "build", "{8EC462FD-D22E-90A8-E5CE-7E832BA40C5D}" - ProjectSection(SolutionItems) = preProject - ..\Directory.Packages.props = ..\Directory.Packages.props - ..\nuget.config = ..\nuget.config - EndProjectSection -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ModelManagementExample", "ModelManagementExample\ModelManagementExample.csproj", "{6BBA4217-6798-4629-AF27-6526FCC5FA5B}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|ARM64 = Debug|ARM64 - Debug|x64 = Debug|x64 - Release|ARM64 = Release|ARM64 - Release|x64 = Release|x64 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Debug|ARM64.ActiveCfg = Debug|ARM64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Debug|ARM64.Build.0 = Debug|ARM64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Debug|x64.ActiveCfg = Debug|x64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Debug|x64.Build.0 = Debug|x64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Release|ARM64.ActiveCfg = Release|ARM64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Release|ARM64.Build.0 = Release|ARM64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Release|x64.ActiveCfg = Release|x64 - {72ABF21E-2BFD-412A-9039-A594B392F00C}.Release|x64.Build.0 = Release|x64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Debug|ARM64.ActiveCfg = Debug|ARM64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Debug|ARM64.Build.0 = Debug|ARM64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Debug|x64.ActiveCfg = Debug|x64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Debug|x64.Build.0 = Debug|x64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Release|ARM64.ActiveCfg = Release|ARM64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Release|ARM64.Build.0 = Release|ARM64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Release|x64.ActiveCfg = Release|x64 - {77026F3A-25E0-40AB-B941-2A6252E13A35}.Release|x64.Build.0 = Release|x64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Debug|ARM64.ActiveCfg = Debug|ARM64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Debug|ARM64.Build.0 = Debug|ARM64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Debug|x64.ActiveCfg = Debug|x64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Debug|x64.Build.0 = Debug|x64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Release|ARM64.ActiveCfg = Release|ARM64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Release|ARM64.Build.0 = Release|ARM64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Release|x64.ActiveCfg = Release|x64 - {80F60523-40E1-4743-A256-974B21A9C6AB}.Release|x64.Build.0 = Release|x64 - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Debug|ARM64.ActiveCfg = Debug|Any CPU - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Debug|ARM64.Build.0 = Debug|Any CPU - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Debug|x64.ActiveCfg = Debug|x64 - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Debug|x64.Build.0 = Debug|x64 - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Release|ARM64.ActiveCfg = Release|Any CPU - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Release|ARM64.Build.0 = Release|Any CPU - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Release|x64.ActiveCfg = Release|x64 - {6BBA4217-6798-4629-AF27-6526FCC5FA5B}.Release|x64.Build.0 = Release|x64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection - GlobalSection(ExtensibilityGlobals) = postSolution - SolutionGuid = {17462B72-2BD9-446A-8E57-E313251686D9} - EndGlobalSection -EndGlobal diff --git a/samples/cs/GettingStarted/windows/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj b/samples/cs/GettingStarted/windows/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj deleted file mode 100644 index 23d2ee91..00000000 --- a/samples/cs/GettingStarted/windows/HelloFoundryLocalSdk/HelloFoundryLocalSdk.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - \ No newline at end of file diff --git a/samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj b/samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj deleted file mode 100644 index b4489af2..00000000 --- a/samples/cs/GettingStarted/windows/LiveAudioTranscriptionExample/LiveAudioTranscriptionExample.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - diff --git a/samples/cs/GettingStarted/windows/ModelManagementExample/ModelManagementExample.csproj b/samples/cs/GettingStarted/windows/ModelManagementExample/ModelManagementExample.csproj deleted file mode 100644 index bc4afe67..00000000 --- a/samples/cs/GettingStarted/windows/ModelManagementExample/ModelManagementExample.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - \ No newline at end of file diff --git a/samples/cs/GettingStarted/windows/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj b/samples/cs/GettingStarted/windows/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj deleted file mode 100644 index de209c13..00000000 --- a/samples/cs/GettingStarted/windows/ToolCallingFoundryLocalSdk/ToolCallingFoundryLocalSdk.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - \ No newline at end of file diff --git a/samples/cs/GettingStarted/windows/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj b/samples/cs/GettingStarted/windows/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj deleted file mode 100644 index 9101d778..00000000 --- a/samples/cs/GettingStarted/windows/ToolCallingFoundryLocalWebServer/ToolCallingFoundryLocalWebServer.csproj +++ /dev/null @@ -1,30 +0,0 @@ - - - - Exe - enable - enable - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - \ No newline at end of file diff --git a/samples/cs/README.md b/samples/cs/README.md new file mode 100644 index 00000000..1847bb8e --- /dev/null +++ b/samples/cs/README.md @@ -0,0 +1,43 @@ +# 🚀 Foundry Local C# Samples + +These samples demonstrate how to use the Foundry Local C# SDK. Each sample uses a **unified project file** that automatically detects your operating system and selects the optimal NuGet package: + +- **Windows**: Uses `Microsoft.AI.Foundry.Local.WinML` for hardware acceleration via Windows ML. +- **macOS / Linux**: Uses `Microsoft.AI.Foundry.Local` for cross-platform support. + +Both packages provide the same APIs, so the same source code works on all platforms. + +## Samples + +| Sample | Description | +|---|---| +| [native-chat-completions](native-chat-completions/) | Initialize the SDK, download a model, and run chat completions. | +| [audio-transcription-example](audio-transcription-example/) | Transcribe audio files using the Foundry Local SDK. | +| [foundry-local-web-server](foundry-local-web-server/) | Set up a local OpenAI-compliant web server. | +| [tool-calling-foundry-local-sdk](tool-calling-foundry-local-sdk/) | Use tool calling with native chat completions. | +| [tool-calling-foundry-local-web-server](tool-calling-foundry-local-web-server/) | Use tool calling with the local web server. | +| [model-management-example](model-management-example/) | Manage models, variant selection, and updates. | +| [tutorial-chat-assistant](tutorial-chat-assistant/) | Build an interactive chat assistant (tutorial). | +| [tutorial-document-summarizer](tutorial-document-summarizer/) | Summarize documents with AI (tutorial). | +| [tutorial-tool-calling](tutorial-tool-calling/) | Create a tool-calling assistant (tutorial). | +| [tutorial-voice-to-text](tutorial-voice-to-text/) | Transcribe and summarize audio (tutorial). | + +## Running a sample + +1. Clone the repository: + ```bash + git clone https://github.com/microsoft/Foundry-Local.git + cd Foundry-Local/samples/cs + ``` + +2. Open and run a sample: + ```bash + cd native-chat-completions + dotnet run + ``` + + The unified project file automatically selects the correct SDK package for your platform. + +> [!TIP] +> On Windows, we recommend using the WinML package (selected automatically) for optimal performance. Your users benefit from a wider range of hardware acceleration options and a smaller application package size. + diff --git a/samples/cs/GettingStarted/src/Shared/Utils.cs b/samples/cs/Shared/Utils.cs similarity index 100% rename from samples/cs/GettingStarted/src/Shared/Utils.cs rename to samples/cs/Shared/Utils.cs diff --git a/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj b/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj new file mode 100644 index 00000000..bd42e38b --- /dev/null +++ b/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj @@ -0,0 +1,55 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + PreserveNewest + + + + + + + + + diff --git a/samples/cs/audio-transcription-example/AudioTranscriptionExample.sln b/samples/cs/audio-transcription-example/AudioTranscriptionExample.sln new file mode 100644 index 00000000..46fb73d9 --- /dev/null +++ b/samples/cs/audio-transcription-example/AudioTranscriptionExample.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AudioTranscriptionExample", "AudioTranscriptionExample.csproj", "{11616852-BB4F-4B60-9FAC-D94E2688BB30}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Debug|Any CPU.Build.0 = Debug|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Debug|x64.ActiveCfg = Debug|x64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Debug|x64.Build.0 = Debug|x64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Debug|x86.ActiveCfg = Debug|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Debug|x86.Build.0 = Debug|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Release|Any CPU.ActiveCfg = Release|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Release|Any CPU.Build.0 = Release|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Release|x64.ActiveCfg = Release|x64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Release|x64.Build.0 = Release|x64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Release|x86.ActiveCfg = Release|ARM64 + {11616852-BB4F-4B60-9FAC-D94E2688BB30}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/GettingStarted/src/AudioTranscriptionExample/Program.cs b/samples/cs/audio-transcription-example/Program.cs similarity index 78% rename from samples/cs/GettingStarted/src/AudioTranscriptionExample/Program.cs rename to samples/cs/audio-transcription-example/Program.cs index be1db5db..b78e13d2 100644 --- a/samples/cs/GettingStarted/src/AudioTranscriptionExample/Program.cs +++ b/samples/cs/audio-transcription-example/Program.cs @@ -1,5 +1,9 @@ -using Microsoft.AI.Foundry.Local; +// +// +using Microsoft.AI.Foundry.Local; +// +// var config = new Configuration { AppName = "foundry_local_samples", @@ -17,8 +21,10 @@ // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +// +// // Get the model catalog var catalog = await mgr.GetCatalogAsync(); @@ -44,15 +50,16 @@ await model.DownloadAsync(progress => Console.Write($"Loading model {model.Id}..."); await model.LoadAsync(); Console.WriteLine("done."); +// -// Get a chat client +// +// Get an audio client var audioClient = await model.GetAudioClientAsync(); - // Get a transcription with streaming outputs -Console.WriteLine("Transcribing audio with streaming output:"); -var audioFile = Path.Combine(AppContext.BaseDirectory, "Recording.mp3"); +var audioFile = args.Length > 0 ? args[0] : Path.Combine(AppContext.BaseDirectory, "Recording.mp3"); +Console.WriteLine($"Transcribing audio with streaming output: {Path.GetFileName(audioFile)}"); var response = audioClient.TranscribeAudioStreamingAsync(audioFile, CancellationToken.None); await foreach (var chunk in response) { @@ -61,7 +68,11 @@ await model.DownloadAsync(progress => } Console.WriteLine(); +// +// // Tidy up - unload the model -await model.UnloadAsync(); \ No newline at end of file +await model.UnloadAsync(); +// +// \ No newline at end of file diff --git a/samples/cs/GettingStarted/src/AudioTranscriptionExample/Recording.mp3 b/samples/cs/audio-transcription-example/Recording.mp3 similarity index 100% rename from samples/cs/GettingStarted/src/AudioTranscriptionExample/Recording.mp3 rename to samples/cs/audio-transcription-example/Recording.mp3 diff --git a/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj b/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj new file mode 100644 index 00000000..fe890be2 --- /dev/null +++ b/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj @@ -0,0 +1,52 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/foundry-local-web-server/FoundryLocalWebServer.sln b/samples/cs/foundry-local-web-server/FoundryLocalWebServer.sln new file mode 100644 index 00000000..91d7e953 --- /dev/null +++ b/samples/cs/foundry-local-web-server/FoundryLocalWebServer.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "FoundryLocalWebServer", "FoundryLocalWebServer.csproj", "{2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Debug|Any CPU.Build.0 = Debug|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Debug|x64.ActiveCfg = Debug|x64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Debug|x64.Build.0 = Debug|x64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Debug|x86.ActiveCfg = Debug|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Debug|x86.Build.0 = Debug|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Release|Any CPU.ActiveCfg = Release|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Release|Any CPU.Build.0 = Release|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Release|x64.ActiveCfg = Release|x64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Release|x64.Build.0 = Release|x64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Release|x86.ActiveCfg = Release|ARM64 + {2DEC84E5-8530-45AF-B26D-EC78A6A7D6E7}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/GettingStarted/src/FoundryLocalWebServer/Program.cs b/samples/cs/foundry-local-web-server/Program.cs similarity index 91% rename from samples/cs/GettingStarted/src/FoundryLocalWebServer/Program.cs rename to samples/cs/foundry-local-web-server/Program.cs index f50ac1b0..3ca68854 100644 --- a/samples/cs/GettingStarted/src/FoundryLocalWebServer/Program.cs +++ b/samples/cs/foundry-local-web-server/Program.cs @@ -1,7 +1,11 @@ -using Microsoft.AI.Foundry.Local; +// +// +using Microsoft.AI.Foundry.Local; using OpenAI; using System.ClientModel; +// +// var config = new Configuration { AppName = "foundry_local_samples", @@ -23,8 +27,10 @@ // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +// +// // Get the model catalog var catalog = await mgr.GetCatalogAsync(); @@ -46,8 +52,10 @@ await model.DownloadAsync(progress => Console.Write($"Loading model {model.Id}..."); await model.LoadAsync(); Console.WriteLine("done."); +// +// // Start the web service Console.Write($"Starting web service on {config.Web.Urls}..."); await mgr.StartWebServiceAsync(); @@ -79,4 +87,6 @@ await model.DownloadAsync(progress => // Tidy up // Stop the web service and unload model await mgr.StopWebServiceAsync(); -await model.UnloadAsync(); \ No newline at end of file +await model.UnloadAsync(); +// +// \ No newline at end of file diff --git a/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj b/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj new file mode 100644 index 00000000..3d91b677 --- /dev/null +++ b/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj @@ -0,0 +1,55 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln b/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln new file mode 100644 index 00000000..65ba7510 --- /dev/null +++ b/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LiveAudioTranscriptionExample", "LiveAudioTranscriptionExample.csproj", "{A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|Any CPU.Build.0 = Debug|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x64.ActiveCfg = Debug|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x64.Build.0 = Debug|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x86.ActiveCfg = Debug|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x86.Build.0 = Debug|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|Any CPU.ActiveCfg = Release|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|Any CPU.Build.0 = Release|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x64.ActiveCfg = Release|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x64.Build.0 = Release|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x86.ActiveCfg = Release|ARM64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/GettingStarted/src/LiveAudioTranscriptionExample/Program.cs b/samples/cs/live-audio-transcription-example/Program.cs similarity index 100% rename from samples/cs/GettingStarted/src/LiveAudioTranscriptionExample/Program.cs rename to samples/cs/live-audio-transcription-example/Program.cs diff --git a/samples/cs/model-management-example/ModelManagementExample.csproj b/samples/cs/model-management-example/ModelManagementExample.csproj new file mode 100644 index 00000000..4d948c56 --- /dev/null +++ b/samples/cs/model-management-example/ModelManagementExample.csproj @@ -0,0 +1,48 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/model-management-example/ModelManagementExample.sln b/samples/cs/model-management-example/ModelManagementExample.sln new file mode 100644 index 00000000..f255391b --- /dev/null +++ b/samples/cs/model-management-example/ModelManagementExample.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ModelManagementExample", "ModelManagementExample.csproj", "{9316B939-946C-4956-A4E7-9410017FD319}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {9316B939-946C-4956-A4E7-9410017FD319}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Debug|Any CPU.Build.0 = Debug|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Debug|x64.ActiveCfg = Debug|x64 + {9316B939-946C-4956-A4E7-9410017FD319}.Debug|x64.Build.0 = Debug|x64 + {9316B939-946C-4956-A4E7-9410017FD319}.Debug|x86.ActiveCfg = Debug|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Debug|x86.Build.0 = Debug|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Release|Any CPU.ActiveCfg = Release|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Release|Any CPU.Build.0 = Release|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Release|x64.ActiveCfg = Release|x64 + {9316B939-946C-4956-A4E7-9410017FD319}.Release|x64.Build.0 = Release|x64 + {9316B939-946C-4956-A4E7-9410017FD319}.Release|x86.ActiveCfg = Release|ARM64 + {9316B939-946C-4956-A4E7-9410017FD319}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/GettingStarted/src/ModelManagementExample/Program.cs b/samples/cs/model-management-example/Program.cs similarity index 100% rename from samples/cs/GettingStarted/src/ModelManagementExample/Program.cs rename to samples/cs/model-management-example/Program.cs diff --git a/samples/cs/native-chat-completions/NativeChatCompletions.csproj b/samples/cs/native-chat-completions/NativeChatCompletions.csproj new file mode 100644 index 00000000..4d948c56 --- /dev/null +++ b/samples/cs/native-chat-completions/NativeChatCompletions.csproj @@ -0,0 +1,48 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/native-chat-completions/NativeChatCompletions.sln b/samples/cs/native-chat-completions/NativeChatCompletions.sln new file mode 100644 index 00000000..a127bfba --- /dev/null +++ b/samples/cs/native-chat-completions/NativeChatCompletions.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "NativeChatCompletions", "NativeChatCompletions.csproj", "{A53372CE-F7E1-4F09-B186-77F76E388659}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A53372CE-F7E1-4F09-B186-77F76E388659}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Debug|Any CPU.Build.0 = Debug|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Debug|x64.ActiveCfg = Debug|x64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Debug|x64.Build.0 = Debug|x64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Debug|x86.ActiveCfg = Debug|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Debug|x86.Build.0 = Debug|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Release|Any CPU.ActiveCfg = Release|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Release|Any CPU.Build.0 = Release|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Release|x64.ActiveCfg = Release|x64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Release|x64.Build.0 = Release|x64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Release|x86.ActiveCfg = Release|ARM64 + {A53372CE-F7E1-4F09-B186-77F76E388659}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/GettingStarted/src/HelloFoundryLocalSdk/Program.cs b/samples/cs/native-chat-completions/Program.cs similarity index 88% rename from samples/cs/GettingStarted/src/HelloFoundryLocalSdk/Program.cs rename to samples/cs/native-chat-completions/Program.cs index 52efe410..082a19f5 100644 --- a/samples/cs/GettingStarted/src/HelloFoundryLocalSdk/Program.cs +++ b/samples/cs/native-chat-completions/Program.cs @@ -1,6 +1,10 @@ -using Microsoft.AI.Foundry.Local; +// +// +using Microsoft.AI.Foundry.Local; using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; +// +// CancellationToken ct = new CancellationToken(); var config = new Configuration @@ -20,8 +24,10 @@ // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +// +// // Get the model catalog var catalog = await mgr.GetCatalogAsync(); @@ -43,7 +49,9 @@ await model.DownloadAsync(progress => Console.Write($"Loading model {model.Id}..."); await model.LoadAsync(); Console.WriteLine("done."); +// +// // Get a chat client var chatClient = await model.GetChatClientAsync(); @@ -62,6 +70,10 @@ await model.DownloadAsync(progress => Console.Out.Flush(); } Console.WriteLine(); +// +// // Tidy up - unload the model -await model.UnloadAsync(); \ No newline at end of file +await model.UnloadAsync(); +// +// \ No newline at end of file diff --git a/samples/cs/nuget.config b/samples/cs/nuget.config new file mode 100644 index 00000000..9913c715 --- /dev/null +++ b/samples/cs/nuget.config @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/samples/cs/GettingStarted/src/ToolCallingFoundryLocalSdk/Program.cs b/samples/cs/tool-calling-foundry-local-sdk/Program.cs similarity index 94% rename from samples/cs/GettingStarted/src/ToolCallingFoundryLocalSdk/Program.cs rename to samples/cs/tool-calling-foundry-local-sdk/Program.cs index 3cdf3d38..bbb050c0 100644 --- a/samples/cs/GettingStarted/src/ToolCallingFoundryLocalSdk/Program.cs +++ b/samples/cs/tool-calling-foundry-local-sdk/Program.cs @@ -1,9 +1,13 @@ -using Microsoft.AI.Foundry.Local; +// +// +using Microsoft.AI.Foundry.Local; using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; using Betalgo.Ranul.OpenAI.ObjectModels.ResponseModels; using Betalgo.Ranul.OpenAI.ObjectModels.SharedModels; using System.Text.Json; +// +// CancellationToken ct = new CancellationToken(); var config = new Configuration @@ -23,8 +27,10 @@ // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +// +// // Get the model catalog var catalog = await mgr.GetCatalogAsync(); @@ -48,6 +54,7 @@ await model.DownloadAsync(progress => Console.Write($"Loading model {model.Id}..."); await model.LoadAsync(); Console.WriteLine("done."); +// // Get a chat client @@ -63,6 +70,7 @@ await model.DownloadAsync(progress => ]; +// // Prepare tools List tools = [ @@ -86,8 +94,10 @@ await model.DownloadAsync(progress => } } ]; +// +// // Get a streaming chat completion response var toolCallResponses = new List(); Console.WriteLine("Chat completion response:"); @@ -150,7 +160,11 @@ await model.DownloadAsync(progress => Console.Out.Flush(); } Console.WriteLine(); +// +// // Tidy up - unload the model -await model.UnloadAsync(); \ No newline at end of file +await model.UnloadAsync(); +// +// \ No newline at end of file diff --git a/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj b/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj new file mode 100644 index 00000000..4d948c56 --- /dev/null +++ b/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj @@ -0,0 +1,48 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.sln b/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.sln new file mode 100644 index 00000000..adbf5ea2 --- /dev/null +++ b/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ToolCallingFoundryLocalSdk", "ToolCallingFoundryLocalSdk.csproj", "{7B40637D-D7E3-4A95-9B57-8D0EF84C8532}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Debug|Any CPU.Build.0 = Debug|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Debug|x64.ActiveCfg = Debug|x64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Debug|x64.Build.0 = Debug|x64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Debug|x86.ActiveCfg = Debug|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Debug|x86.Build.0 = Debug|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Release|Any CPU.ActiveCfg = Release|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Release|Any CPU.Build.0 = Release|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Release|x64.ActiveCfg = Release|x64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Release|x64.Build.0 = Release|x64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Release|x86.ActiveCfg = Release|ARM64 + {7B40637D-D7E3-4A95-9B57-8D0EF84C8532}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/GettingStarted/src/ToolCallingFoundryLocalWebServer/Program.cs b/samples/cs/tool-calling-foundry-local-web-server/Program.cs similarity index 98% rename from samples/cs/GettingStarted/src/ToolCallingFoundryLocalWebServer/Program.cs rename to samples/cs/tool-calling-foundry-local-web-server/Program.cs index 6d6937fd..4c283cd4 100644 --- a/samples/cs/GettingStarted/src/ToolCallingFoundryLocalWebServer/Program.cs +++ b/samples/cs/tool-calling-foundry-local-web-server/Program.cs @@ -1,4 +1,5 @@ -using Microsoft.AI.Foundry.Local; +// +using Microsoft.AI.Foundry.Local; using OpenAI; using OpenAI.Chat; using System.ClientModel; @@ -178,4 +179,5 @@ await model.DownloadAsync(progress => // Tidy up // Stop the web service and unload model await mgr.StopWebServiceAsync(); -await model.UnloadAsync(); \ No newline at end of file +await model.UnloadAsync(); +// \ No newline at end of file diff --git a/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj b/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj new file mode 100644 index 00000000..fe890be2 --- /dev/null +++ b/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj @@ -0,0 +1,52 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.sln b/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.sln new file mode 100644 index 00000000..7d1568e1 --- /dev/null +++ b/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "ToolCallingFoundryLocalWebServer", "ToolCallingFoundryLocalWebServer.csproj", "{F9BD2479-A235-4BBF-A722-DF180A076143}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {F9BD2479-A235-4BBF-A722-DF180A076143}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Debug|Any CPU.Build.0 = Debug|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Debug|x64.ActiveCfg = Debug|x64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Debug|x64.Build.0 = Debug|x64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Debug|x86.ActiveCfg = Debug|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Debug|x86.Build.0 = Debug|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Release|Any CPU.ActiveCfg = Release|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Release|Any CPU.Build.0 = Release|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Release|x64.ActiveCfg = Release|x64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Release|x64.Build.0 = Release|x64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Release|x86.ActiveCfg = Release|ARM64 + {F9BD2479-A235-4BBF-A722-DF180A076143}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/tutorial-chat-assistant/Program.cs b/samples/cs/tutorial-chat-assistant/Program.cs new file mode 100644 index 00000000..10e9a63b --- /dev/null +++ b/samples/cs/tutorial-chat-assistant/Program.cs @@ -0,0 +1,101 @@ +// +// +using Microsoft.AI.Foundry.Local; +using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; +using Microsoft.Extensions.Logging; +// + +// +CancellationToken ct = CancellationToken.None; + +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +using var loggerFactory = LoggerFactory.Create(builder => +{ + builder.SetMinimumLevel(Microsoft.Extensions.Logging.LogLevel.Information); +}); +var logger = loggerFactory.CreateLogger(); + +// Initialize the singleton instance +await FoundryLocalManager.CreateAsync(config, logger); +var mgr = FoundryLocalManager.Instance; + +// Select and load a model from the catalog +var catalog = await mgr.GetCatalogAsync(); +var model = await catalog.GetModelAsync("qwen2.5-0.5b") + ?? throw new Exception("Model not found"); + +await model.DownloadAsync(progress => +{ + Console.Write($"\rDownloading model: {progress:F2}%"); + if (progress >= 100f) Console.WriteLine(); +}); + +await model.LoadAsync(); +Console.WriteLine("Model loaded and ready."); + +// Get a chat client +var chatClient = await model.GetChatClientAsync(); +// + +// +// Start the conversation with a system prompt +var messages = new List +{ + new ChatMessage + { + Role = "system", + Content = "You are a helpful, friendly assistant. Keep your responses " + + "concise and conversational. If you don't know something, say so." + } +}; +// + +Console.WriteLine("\nChat assistant ready! Type 'quit' to exit.\n"); + +// +while (true) +{ + Console.Write("You: "); + var userInput = Console.ReadLine(); + if (string.IsNullOrWhiteSpace(userInput) || + userInput.Equals("quit", StringComparison.OrdinalIgnoreCase) || + userInput.Equals("exit", StringComparison.OrdinalIgnoreCase)) + { + break; + } + + // Add the user's message to conversation history + messages.Add(new ChatMessage { Role = "user", Content = userInput }); + + // + // Stream the response token by token + Console.Write("Assistant: "); + var fullResponse = string.Empty; + var streamingResponse = chatClient.CompleteChatStreamingAsync(messages, ct); + await foreach (var chunk in streamingResponse) + { + var content = chunk.Choices[0].Message.Content; + if (!string.IsNullOrEmpty(content)) + { + Console.Write(content); + Console.Out.Flush(); + fullResponse += content; + } + } + Console.WriteLine("\n"); + // + + // Add the complete response to conversation history + messages.Add(new ChatMessage { Role = "assistant", Content = fullResponse }); +} +// + +// Clean up - unload the model +await model.UnloadAsync(); +Console.WriteLine("Model unloaded. Goodbye!"); +// diff --git a/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj b/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj new file mode 100644 index 00000000..a3533047 --- /dev/null +++ b/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj @@ -0,0 +1,50 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.sln b/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.sln new file mode 100644 index 00000000..a9c77e16 --- /dev/null +++ b/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TutorialChatAssistant", "TutorialChatAssistant.csproj", "{5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Debug|Any CPU.Build.0 = Debug|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Debug|x64.ActiveCfg = Debug|x64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Debug|x64.Build.0 = Debug|x64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Debug|x86.ActiveCfg = Debug|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Debug|x86.Build.0 = Debug|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Release|Any CPU.ActiveCfg = Release|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Release|Any CPU.Build.0 = Release|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Release|x64.ActiveCfg = Release|x64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Release|x64.Build.0 = Release|x64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Release|x86.ActiveCfg = Release|ARM64 + {5D5778BD-B40A-4D9E-BC2F-65AD50EE6F94}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/tutorial-document-summarizer/Program.cs b/samples/cs/tutorial-document-summarizer/Program.cs new file mode 100644 index 00000000..bc5546f6 --- /dev/null +++ b/samples/cs/tutorial-document-summarizer/Program.cs @@ -0,0 +1,109 @@ +// +// +using Microsoft.AI.Foundry.Local; +using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; +using Microsoft.Extensions.Logging; +// + +// +CancellationToken ct = CancellationToken.None; + +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +using var loggerFactory = LoggerFactory.Create(builder => +{ + builder.SetMinimumLevel(Microsoft.Extensions.Logging.LogLevel.Information); +}); +var logger = loggerFactory.CreateLogger(); + +// Initialize the singleton instance +await FoundryLocalManager.CreateAsync(config, logger); +var mgr = FoundryLocalManager.Instance; + +// Select and load a model from the catalog +var catalog = await mgr.GetCatalogAsync(); +var model = await catalog.GetModelAsync("qwen2.5-0.5b") + ?? throw new Exception("Model not found"); + +await model.DownloadAsync(progress => +{ + Console.Write($"\rDownloading model: {progress:F2}%"); + if (progress >= 100f) Console.WriteLine(); +}); + +await model.LoadAsync(); +Console.WriteLine("Model loaded and ready.\n"); + +// Get a chat client +var chatClient = await model.GetChatClientAsync(); +// + +// +var systemPrompt = + "Summarize the following document into concise bullet points. " + + "Focus on the key points and main ideas."; + +// +var target = args.Length > 0 ? args[0] : "document.txt"; +// + +if (Directory.Exists(target)) +{ + await SummarizeDirectoryAsync(chatClient, target, systemPrompt, ct); +} +else +{ + Console.WriteLine($"--- {Path.GetFileName(target)} ---"); + await SummarizeFileAsync(chatClient, target, systemPrompt, ct); +} +// + +// Clean up +await model.UnloadAsync(); +Console.WriteLine("\nModel unloaded. Done!"); + +async Task SummarizeFileAsync( + dynamic client, + string filePath, + string prompt, + CancellationToken token) +{ + var fileContent = await File.ReadAllTextAsync(filePath, token); + var messages = new List + { + new ChatMessage { Role = "system", Content = prompt }, + new ChatMessage { Role = "user", Content = fileContent } + }; + + var response = await client.CompleteChatAsync(messages, token); + Console.WriteLine(response.Choices[0].Message.Content); +} + +async Task SummarizeDirectoryAsync( + dynamic client, + string directory, + string prompt, + CancellationToken token) +{ + var txtFiles = Directory.GetFiles(directory, "*.txt") + .OrderBy(f => f) + .ToArray(); + + if (txtFiles.Length == 0) + { + Console.WriteLine($"No .txt files found in {directory}"); + return; + } + + foreach (var txtFile in txtFiles) + { + Console.WriteLine($"--- {Path.GetFileName(txtFile)} ---"); + await SummarizeFileAsync(client, txtFile, prompt, token); + Console.WriteLine(); + } +} +// diff --git a/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj b/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj new file mode 100644 index 00000000..a3533047 --- /dev/null +++ b/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj @@ -0,0 +1,50 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.sln b/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.sln new file mode 100644 index 00000000..7d7a0fc9 --- /dev/null +++ b/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TutorialDocumentSummarizer", "TutorialDocumentSummarizer.csproj", "{6868D03F-BD8E-46ED-9A5B-95346A3810A4}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Debug|Any CPU.Build.0 = Debug|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Debug|x64.ActiveCfg = Debug|x64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Debug|x64.Build.0 = Debug|x64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Debug|x86.ActiveCfg = Debug|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Debug|x86.Build.0 = Debug|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Release|Any CPU.ActiveCfg = Release|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Release|Any CPU.Build.0 = Release|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Release|x64.ActiveCfg = Release|x64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Release|x64.Build.0 = Release|x64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Release|x86.ActiveCfg = Release|ARM64 + {6868D03F-BD8E-46ED-9A5B-95346A3810A4}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/tutorial-tool-calling/Program.cs b/samples/cs/tutorial-tool-calling/Program.cs new file mode 100644 index 00000000..74f137db --- /dev/null +++ b/samples/cs/tutorial-tool-calling/Program.cs @@ -0,0 +1,228 @@ +// +// +using System.Text.Json; +using Microsoft.AI.Foundry.Local; +using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; +using Betalgo.Ranul.OpenAI.ObjectModels.ResponseModels; +using Betalgo.Ranul.OpenAI.ObjectModels.SharedModels; +using Microsoft.Extensions.Logging; +// + +CancellationToken ct = CancellationToken.None; + +// +// --- Tool definitions --- +List tools = +[ + new ToolDefinition + { + Type = "function", + Function = new FunctionDefinition() + { + Name = "get_weather", + Description = "Get the current weather for a location", + Parameters = new PropertyDefinition() + { + Type = "object", + Properties = new Dictionary() + { + { "location", new PropertyDefinition() { Type = "string", Description = "The city or location" } }, + { "unit", new PropertyDefinition() { Type = "string", Description = "Temperature unit (celsius or fahrenheit)" } } + }, + Required = ["location"] + } + } + }, + new ToolDefinition + { + Type = "function", + Function = new FunctionDefinition() + { + Name = "calculate", + Description = "Perform a math calculation", + Parameters = new PropertyDefinition() + { + Type = "object", + Properties = new Dictionary() + { + { "expression", new PropertyDefinition() { Type = "string", Description = "The math expression to evaluate" } } + }, + Required = ["expression"] + } + } + } +]; + +// --- Tool implementations --- +string ExecuteTool(string functionName, JsonElement arguments) +{ + switch (functionName) + { + case "get_weather": + var location = arguments.GetProperty("location") + .GetString() ?? "unknown"; + var unit = arguments.TryGetProperty("unit", out var u) + ? u.GetString() ?? "celsius" + : "celsius"; + var temp = unit == "celsius" ? 22 : 72; + return JsonSerializer.Serialize(new + { + location, + temperature = temp, + unit, + condition = "Sunny" + }); + + case "calculate": + var expression = arguments.GetProperty("expression") + .GetString() ?? ""; + try + { + var result = new System.Data.DataTable() + .Compute(expression, null); + return JsonSerializer.Serialize(new + { + expression, + result = result?.ToString() + }); + } + catch (Exception ex) + { + return JsonSerializer.Serialize(new + { + error = ex.Message + }); + } + + default: + return JsonSerializer.Serialize(new + { + error = $"Unknown function: {functionName}" + }); + } +} +// + +// +// --- Main application --- +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +using var loggerFactory = LoggerFactory.Create(builder => +{ + builder.SetMinimumLevel( + Microsoft.Extensions.Logging.LogLevel.Information + ); +}); +var logger = loggerFactory.CreateLogger(); + +await FoundryLocalManager.CreateAsync(config, logger); +var mgr = FoundryLocalManager.Instance; + +var catalog = await mgr.GetCatalogAsync(); +var model = await catalog.GetModelAsync("qwen2.5-0.5b") + ?? throw new Exception("Model not found"); + +await model.DownloadAsync(progress => +{ + Console.Write($"\rDownloading model: {progress:F2}%"); + if (progress >= 100f) Console.WriteLine(); +}); + +await model.LoadAsync(); +Console.WriteLine("Model loaded and ready."); + +var chatClient = await model.GetChatClientAsync(); +chatClient.Settings.ToolChoice = ToolChoice.Auto; + +var messages = new List +{ + new ChatMessage + { + Role = "system", + Content = "You are a helpful assistant with access to tools. " + + "Use them when needed to answer questions accurately." + } +}; +// + +// +Console.WriteLine("\nTool-calling assistant ready! Type 'quit' to exit.\n"); + +while (true) +{ + Console.Write("You: "); + var userInput = Console.ReadLine(); + if (string.IsNullOrWhiteSpace(userInput) || + userInput.Equals("quit", StringComparison.OrdinalIgnoreCase) || + userInput.Equals("exit", StringComparison.OrdinalIgnoreCase)) + { + break; + } + + messages.Add(new ChatMessage + { + Role = "user", + Content = userInput + }); + + var response = await chatClient.CompleteChatAsync( + messages, tools, ct + ); + + var choice = response.Choices[0].Message; + + if (choice.ToolCalls is { Count: > 0 }) + { + messages.Add(choice); + + foreach (var toolCall in choice.ToolCalls) + { + var toolArgs = JsonDocument.Parse( + toolCall.FunctionCall.Arguments + ).RootElement; + Console.WriteLine( + $" Tool call: {toolCall.FunctionCall.Name}({toolArgs})" + ); + + var result = ExecuteTool( + toolCall.FunctionCall.Name, toolArgs + ); + messages.Add(new ChatMessage + { + Role = "tool", + ToolCallId = toolCall.Id, + Content = result + }); + } + + var finalResponse = await chatClient.CompleteChatAsync( + messages, tools, ct + ); + var answer = finalResponse.Choices[0].Message.Content ?? ""; + messages.Add(new ChatMessage + { + Role = "assistant", + Content = answer + }); + Console.WriteLine($"Assistant: {answer}\n"); + } + else + { + var answer = choice.Content ?? ""; + messages.Add(new ChatMessage + { + Role = "assistant", + Content = answer + }); + Console.WriteLine($"Assistant: {answer}\n"); + } +} + +await model.UnloadAsync(); +Console.WriteLine("Model unloaded. Goodbye!"); +// +// diff --git a/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj b/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj new file mode 100644 index 00000000..a3533047 --- /dev/null +++ b/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj @@ -0,0 +1,50 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/tutorial-tool-calling/TutorialToolCalling.sln b/samples/cs/tutorial-tool-calling/TutorialToolCalling.sln new file mode 100644 index 00000000..6a86331b --- /dev/null +++ b/samples/cs/tutorial-tool-calling/TutorialToolCalling.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TutorialToolCalling", "TutorialToolCalling.csproj", "{155923AB-A0C6-447D-A46A-7C8318D31596}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {155923AB-A0C6-447D-A46A-7C8318D31596}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Debug|Any CPU.Build.0 = Debug|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Debug|x64.ActiveCfg = Debug|x64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Debug|x64.Build.0 = Debug|x64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Debug|x86.ActiveCfg = Debug|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Debug|x86.Build.0 = Debug|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Release|Any CPU.ActiveCfg = Release|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Release|Any CPU.Build.0 = Release|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Release|x64.ActiveCfg = Release|x64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Release|x64.Build.0 = Release|x64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Release|x86.ActiveCfg = Release|ARM64 + {155923AB-A0C6-447D-A46A-7C8318D31596}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/tutorial-voice-to-text/Program.cs b/samples/cs/tutorial-voice-to-text/Program.cs new file mode 100644 index 00000000..976b44e4 --- /dev/null +++ b/samples/cs/tutorial-voice-to-text/Program.cs @@ -0,0 +1,104 @@ +// +// +using Microsoft.AI.Foundry.Local; +using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; +using Microsoft.Extensions.Logging; +using System.Text; +// + +// +CancellationToken ct = CancellationToken.None; + +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +using var loggerFactory = LoggerFactory.Create(builder => +{ + builder.SetMinimumLevel( + Microsoft.Extensions.Logging.LogLevel.Information + ); +}); +var logger = loggerFactory.CreateLogger(); + +// Initialize the singleton instance +await FoundryLocalManager.CreateAsync(config, logger); +var mgr = FoundryLocalManager.Instance; +var catalog = await mgr.GetCatalogAsync(); +// + +// +// Load the speech-to-text model +var speechModel = await catalog.GetModelAsync("whisper-tiny") + ?? throw new Exception("Speech model not found"); + +await speechModel.DownloadAsync(progress => +{ + Console.Write($"\rDownloading speech model: {progress:F2}%"); + if (progress >= 100f) Console.WriteLine(); +}); + +await speechModel.LoadAsync(); +Console.WriteLine("Speech model loaded."); + +// Transcribe the audio file +var audioClient = await speechModel.GetAudioClientAsync(); +var transcriptionText = new StringBuilder(); + +Console.WriteLine("\nTranscription:"); +var audioResponse = audioClient + .TranscribeAudioStreamingAsync("meeting-notes.wav", ct); +await foreach (var chunk in audioResponse) +{ + Console.Write(chunk.Text); + transcriptionText.Append(chunk.Text); +} +Console.WriteLine(); + +// Unload the speech model to free memory +await speechModel.UnloadAsync(); +// + +// +// Load the chat model for summarization +var chatModel = await catalog.GetModelAsync("qwen2.5-0.5b") + ?? throw new Exception("Chat model not found"); + +await chatModel.DownloadAsync(progress => +{ + Console.Write($"\rDownloading chat model: {progress:F2}%"); + if (progress >= 100f) Console.WriteLine(); +}); + +await chatModel.LoadAsync(); +Console.WriteLine("Chat model loaded."); + +// Summarize the transcription into organized notes +var chatClient = await chatModel.GetChatClientAsync(); +var messages = new List +{ + new ChatMessage + { + Role = "system", + Content = "You are a note-taking assistant. Summarize " + + "the following transcription into organized, " + + "concise notes with bullet points." + }, + new ChatMessage + { + Role = "user", + Content = transcriptionText.ToString() + } +}; + +var chatResponse = await chatClient.CompleteChatAsync(messages, ct); +var summary = chatResponse.Choices[0].Message.Content; +Console.WriteLine($"\nSummary:\n{summary}"); + +// Clean up +await chatModel.UnloadAsync(); +Console.WriteLine("\nDone. Models unloaded."); +// +// diff --git a/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj b/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj new file mode 100644 index 00000000..a3533047 --- /dev/null +++ b/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj @@ -0,0 +1,50 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.sln b/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.sln new file mode 100644 index 00000000..ae2a2b39 --- /dev/null +++ b/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TutorialVoiceToText", "TutorialVoiceToText.csproj", "{C12663C3-AB3F-4652-BC43-A92E43602ACC}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Debug|Any CPU.ActiveCfg = Debug|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Debug|Any CPU.Build.0 = Debug|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Debug|x64.ActiveCfg = Debug|x64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Debug|x64.Build.0 = Debug|x64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Debug|x86.ActiveCfg = Debug|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Debug|x86.Build.0 = Debug|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Release|Any CPU.ActiveCfg = Release|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Release|Any CPU.Build.0 = Release|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Release|x64.ActiveCfg = Release|x64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Release|x64.Build.0 = Release|x64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Release|x86.ActiveCfg = Release|ARM64 + {C12663C3-AB3F-4652-BC43-A92E43602ACC}.Release|x86.Build.0 = Release|ARM64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/js/audio-transcription-example/app.js b/samples/js/audio-transcription-example/app.js index 78efc8af..c2517ec7 100644 --- a/samples/js/audio-transcription-example/app.js +++ b/samples/js/audio-transcription-example/app.js @@ -1,14 +1,20 @@ +// +// import { FoundryLocalManager } from 'foundry-local-sdk'; +// // Initialize the Foundry Local SDK console.log('Initializing Foundry Local SDK...'); +// const manager = FoundryLocalManager.create({ appName: 'foundry_local_samples', logLevel: 'info' }); +// console.log('✓ SDK initialized successfully'); +// // Get the model object const modelAlias = 'whisper-tiny'; // Using an available model from the list above let model = await manager.catalog.getModel(modelAlias); @@ -25,15 +31,18 @@ console.log('\n✓ Model downloaded'); console.log(`\nLoading model ${modelAlias}...`); await model.load(); console.log('✓ Model loaded'); +// +// // Create audio client console.log('\nCreating audio client...'); const audioClient = model.createAudioClient(); console.log('✓ Audio client created'); // Example audio transcription -console.log('\nTesting audio transcription...'); -const transcription = await audioClient.transcribe('./Recording.mp3'); +const audioFile = process.argv[2] || './Recording.mp3'; +console.log(`\nTranscribing ${audioFile}...`); +const transcription = await audioClient.transcribe(audioFile); console.log('\nAudio transcription result:'); console.log(transcription.text); @@ -41,13 +50,17 @@ console.log('✓ Audio transcription completed'); // Same example but with streaming transcription using async iteration console.log('\nTesting streaming audio transcription...'); -for await (const result of audioClient.transcribeStreaming('./Recording.mp3')) { +for await (const result of audioClient.transcribeStreaming(audioFile)) { // Output the intermediate transcription results as they are received without line ending process.stdout.write(result.text); } console.log('\n✓ Streaming transcription completed'); +// +// // Unload the model console.log('Unloading model...'); await model.unload(); console.log(`✓ Model unloaded`); +// +// diff --git a/samples/js/chat-and-audio-foundry-local/src/app.js b/samples/js/chat-and-audio-foundry-local/src/app.js index 49ce199c..50bc195f 100644 --- a/samples/js/chat-and-audio-foundry-local/src/app.js +++ b/samples/js/chat-and-audio-foundry-local/src/app.js @@ -11,7 +11,7 @@ const WHISPER_MODEL = "whisper-tiny"; async function main() { console.log("Initializing Foundry Local SDK..."); const manager = FoundryLocalManager.create({ - appName: "ChatAndAudioSample", + appName: "foundry_local_samples", logLevel: "info", }); diff --git a/samples/js/langchain-integration-example/app.js b/samples/js/langchain-integration-example/app.js index 94e0afdc..9e4b7b60 100644 --- a/samples/js/langchain-integration-example/app.js +++ b/samples/js/langchain-integration-example/app.js @@ -1,17 +1,22 @@ +// +// import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { FoundryLocalManager } from 'foundry-local-sdk'; +// // Initialize the Foundry Local SDK console.log('Initializing Foundry Local SDK...'); const endpointUrl = 'http://localhost:5764'; +// const manager = FoundryLocalManager.create({ appName: 'foundry_local_samples', logLevel: 'info', webServiceUrls: endpointUrl }); +// console.log('✓ SDK initialized successfully'); // Get the model object @@ -35,6 +40,7 @@ console.log('\nStarting web service...'); manager.startWebService(); console.log('✓ Web service started'); +// // Configure ChatOpenAI to use your locally-running model const llm = new ChatOpenAI({ @@ -61,7 +67,9 @@ const prompt = ChatPromptTemplate.fromMessages([ // Build a simple chain by connecting the prompt to the language model const chain = prompt.pipe(llm); +// +// const input = "I love to code."; console.log(`Translating '${input}' to French...`); @@ -76,9 +84,11 @@ await chain.invoke({ }).catch(err => { console.error("Error:", err); }); +// // Tidy up console.log('Unloading model and stopping web service...'); await model.unload(); manager.stopWebService(); -console.log(`✓ Model unloaded and web service stopped`); \ No newline at end of file +console.log(`✓ Model unloaded and web service stopped`); +// \ No newline at end of file diff --git a/samples/js/native-chat-completions/app.js b/samples/js/native-chat-completions/app.js index 67348e8c..399fd634 100644 --- a/samples/js/native-chat-completions/app.js +++ b/samples/js/native-chat-completions/app.js @@ -1,14 +1,20 @@ +// +// import { FoundryLocalManager } from 'foundry-local-sdk'; +// // Initialize the Foundry Local SDK console.log('Initializing Foundry Local SDK...'); +// const manager = FoundryLocalManager.create({ appName: 'foundry_local_samples', logLevel: 'info' }); +// console.log('✓ SDK initialized successfully'); +// // Get the model object const modelAlias = 'qwen2.5-0.5b'; // Using an available model from the list above const model = await manager.catalog.getModel(modelAlias); @@ -24,7 +30,9 @@ console.log('\n✓ Model downloaded'); console.log(`\nLoading model ${modelAlias}...`); await model.load(); console.log('✓ Model loaded'); +// +// // Create chat client console.log('\nCreating chat client...'); const chatClient = model.createChatClient(); @@ -38,7 +46,9 @@ const completion = await chatClient.completeChat([ console.log('\nChat completion result:'); console.log(completion.choices[0]?.message?.content); +// +// // Example streaming completion console.log('\nTesting streaming completion...'); for await (const chunk of chatClient.completeStreamingChat( @@ -50,9 +60,13 @@ for await (const chunk of chatClient.completeStreamingChat( } } console.log('\n'); +// +// // Unload the model console.log('Unloading model...'); await model.unload(); console.log(`✓ Model unloaded`); +// +// \ No newline at end of file diff --git a/samples/js/tool-calling-foundry-local/src/app.js b/samples/js/tool-calling-foundry-local/src/app.js index f11eacdd..f92464ee 100644 --- a/samples/js/tool-calling-foundry-local/src/app.js +++ b/samples/js/tool-calling-foundry-local/src/app.js @@ -1,8 +1,11 @@ +// // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +// import { OpenAI } from "openai"; import { FoundryLocalManager } from "foundry-local-sdk"; +// // By using an alias, the most suitable model will be downloaded // to your end-user's device. @@ -10,22 +13,27 @@ import { FoundryLocalManager } from "foundry-local-sdk"; // following command in your terminal: `foundry model list`. const alias = "qwen2.5-0.5b"; +// function multiplyNumbers(first, second) { return first * second; } +// async function runToolCallingExample() { let manager = null; let model = null; try { + // console.log("Initializing Foundry Local SDK..."); manager = FoundryLocalManager.create({ - appName: "FoundryLocalSample", + appName: "foundry_local_samples", serviceEndpoint: "http://localhost:5000", logLevel: "info" }); + // + // const catalog = manager.catalog; model = await catalog.getModel(alias); if (!model) { @@ -47,7 +55,9 @@ async function runToolCallingExample() { baseURL: `${endpoint.replace(/\/$/, "")}/v1`, apiKey: "local" }); + // + // // Prepare messages const messages = [ { @@ -154,7 +164,9 @@ async function runToolCallingExample() { } console.log(); + // } finally { + // if (model) { try { if (await model.isLoaded()) { @@ -172,6 +184,7 @@ async function runToolCallingExample() { console.warn("Cleanup warning while stopping service:", cleanupError); } } + // } } @@ -179,3 +192,4 @@ await runToolCallingExample().catch((error) => { console.error("Error running sample:", error); process.exitCode = 1; }); +// diff --git a/samples/js/tutorial-chat-assistant/app.js b/samples/js/tutorial-chat-assistant/app.js new file mode 100644 index 00000000..9a5a430c --- /dev/null +++ b/samples/js/tutorial-chat-assistant/app.js @@ -0,0 +1,84 @@ +// +// +import { FoundryLocalManager } from 'foundry-local-sdk'; +import * as readline from 'readline'; +// + +// +// Initialize the Foundry Local SDK +const manager = FoundryLocalManager.create({ + appName: 'foundry_local_samples', + logLevel: 'info' +}); + +// Select and load a model from the catalog +const model = await manager.catalog.getModel('qwen2.5-0.5b'); + +await model.download((progress) => { + process.stdout.write(`\rDownloading model: ${progress.toFixed(2)}%`); +}); +console.log('\nModel downloaded.'); + +await model.load(); +console.log('Model loaded and ready.'); + +// Create a chat client +const chatClient = model.createChatClient(); +// + +// +// Start the conversation with a system prompt +const messages = [ + { + role: 'system', + content: 'You are a helpful, friendly assistant. Keep your responses ' + + 'concise and conversational. If you don\'t know something, say so.' + } +]; +// + +// Set up readline for console input +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +const askQuestion = (prompt) => new Promise((resolve) => rl.question(prompt, resolve)); + +console.log('\nChat assistant ready! Type \'quit\' to exit.\n'); + +// +while (true) { + const userInput = await askQuestion('You: '); + if (userInput.trim().toLowerCase() === 'quit' || + userInput.trim().toLowerCase() === 'exit') { + break; + } + + // Add the user's message to conversation history + messages.push({ role: 'user', content: userInput }); + + // + // Stream the response token by token + process.stdout.write('Assistant: '); + let fullResponse = ''; + await chatClient.completeStreamingChat(messages, (chunk) => { + const content = chunk.choices?.[0]?.message?.content; + if (content) { + process.stdout.write(content); + fullResponse += content; + } + }); + console.log('\n'); + // + + // Add the complete response to conversation history + messages.push({ role: 'assistant', content: fullResponse }); +} +// + +// Clean up - unload the model +await model.unload(); +console.log('Model unloaded. Goodbye!'); +rl.close(); +// diff --git a/samples/js/tutorial-chat-assistant/package.json b/samples/js/tutorial-chat-assistant/package.json new file mode 100644 index 00000000..3e2393ce --- /dev/null +++ b/samples/js/tutorial-chat-assistant/package.json @@ -0,0 +1,9 @@ +{ + "name": "tutorial-chat-assistant", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "dependencies": { + "foundry-local-sdk": "*" + } +} diff --git a/samples/js/tutorial-document-summarizer/app.js b/samples/js/tutorial-document-summarizer/app.js new file mode 100644 index 00000000..f43e204d --- /dev/null +++ b/samples/js/tutorial-document-summarizer/app.js @@ -0,0 +1,84 @@ +// +// +import { FoundryLocalManager } from 'foundry-local-sdk'; +import { readFileSync, readdirSync, statSync } from 'fs'; +import { join, basename } from 'path'; +// + +async function summarizeFile(chatClient, filePath, systemPrompt) { + const content = readFileSync(filePath, 'utf-8'); + const messages = [ + { role: 'system', content: systemPrompt }, + { role: 'user', content: content } + ]; + + const response = await chatClient.completeChat(messages); + console.log(response.choices[0]?.message?.content); +} + +async function summarizeDirectory(chatClient, directory, systemPrompt) { + const txtFiles = readdirSync(directory) + .filter(f => f.endsWith('.txt')) + .sort(); + + if (txtFiles.length === 0) { + console.log(`No .txt files found in ${directory}`); + return; + } + + for (const fileName of txtFiles) { + console.log(`--- ${fileName} ---`); + await summarizeFile(chatClient, join(directory, fileName), systemPrompt); + console.log(); + } +} + +// +// Initialize the Foundry Local SDK +const manager = FoundryLocalManager.create({ + appName: 'foundry_local_samples', + logLevel: 'info' +}); + +// Select and load a model from the catalog +const model = await manager.catalog.getModel('qwen2.5-0.5b'); + +await model.download((progress) => { + process.stdout.write(`\rDownloading model: ${progress.toFixed(2)}%`); +}); +console.log('\nModel downloaded.'); + +await model.load(); +console.log('Model loaded and ready.\n'); + +// Create a chat client +const chatClient = model.createChatClient(); +// + +// +const systemPrompt = + 'Summarize the following document into concise bullet points. ' + + 'Focus on the key points and main ideas.'; + +// +const target = process.argv[2] || 'document.txt'; +// + +try { + const stats = statSync(target); + if (stats.isDirectory()) { + await summarizeDirectory(chatClient, target, systemPrompt); + } else { + console.log(`--- ${basename(target)} ---`); + await summarizeFile(chatClient, target, systemPrompt); + } +} catch { + console.log(`--- ${basename(target)} ---`); + await summarizeFile(chatClient, target, systemPrompt); +} +// + +// Clean up +await model.unload(); +console.log('\nModel unloaded. Done!'); +// diff --git a/samples/js/tutorial-document-summarizer/package.json b/samples/js/tutorial-document-summarizer/package.json new file mode 100644 index 00000000..c3c62321 --- /dev/null +++ b/samples/js/tutorial-document-summarizer/package.json @@ -0,0 +1,9 @@ +{ + "name": "tutorial-document-summarizer", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "dependencies": { + "foundry-local-sdk": "*" + } +} diff --git a/samples/js/tutorial-tool-calling/app.js b/samples/js/tutorial-tool-calling/app.js new file mode 100644 index 00000000..efdd710c --- /dev/null +++ b/samples/js/tutorial-tool-calling/app.js @@ -0,0 +1,186 @@ +// +// +import { FoundryLocalManager } from 'foundry-local-sdk'; +import * as readline from 'readline'; +// + +// +// --- Tool definitions --- +const tools = [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get the current weather for a location', + parameters: { + type: 'object', + properties: { + location: { + type: 'string', + description: 'The city or location' + }, + unit: { + type: 'string', + enum: ['celsius', 'fahrenheit'], + description: 'Temperature unit' + } + }, + required: ['location'] + } + } + }, + { + type: 'function', + function: { + name: 'calculate', + description: 'Perform a math calculation', + parameters: { + type: 'object', + properties: { + expression: { + type: 'string', + description: + 'The math expression to evaluate' + } + }, + required: ['expression'] + } + } + } +]; + +// --- Tool implementations --- +function getWeather(location, unit = 'celsius') { + return { + location, + temperature: unit === 'celsius' ? 22 : 72, + unit, + condition: 'Sunny' + }; +} + +function calculate(expression) { + // Input is validated against a strict allowlist of numeric/math characters, + // making this safe from code injection in this tutorial context. + const allowed = /^[0-9+\-*/(). ]+$/; + if (!allowed.test(expression)) { + return { error: 'Invalid expression' }; + } + try { + const result = Function( + `"use strict"; return (${expression})` + )(); + return { expression, result }; + } catch (err) { + return { error: err.message }; + } +} + +const toolFunctions = { + get_weather: (args) => getWeather(args.location, args.unit), + calculate: (args) => calculate(args.expression) +}; +// + +// +async function processToolCalls(messages, response, chatClient) { + let choice = response.choices[0]?.message; + + while (choice?.tool_calls?.length > 0) { + messages.push(choice); + + for (const toolCall of choice.tool_calls) { + const functionName = toolCall.function.name; + const args = JSON.parse(toolCall.function.arguments); + console.log( + ` Tool call: ${functionName}` + + `(${JSON.stringify(args)})` + ); + + const result = toolFunctions[functionName](args); + messages.push({ + role: 'tool', + tool_call_id: toolCall.id, + content: JSON.stringify(result) + }); + } + + response = await chatClient.completeChat( + messages, { tools } + ); + choice = response.choices[0]?.message; + } + + return choice?.content ?? ''; +} +// + +// +// --- Main application --- +const manager = FoundryLocalManager.create({ + appName: 'foundry_local_samples', + logLevel: 'info' +}); + +const model = await manager.catalog.getModel('qwen2.5-0.5b'); + +await model.download((progress) => { + process.stdout.write( + `\rDownloading model: ${progress.toFixed(2)}%` + ); +}); +console.log('\nModel downloaded.'); + +await model.load(); +console.log('Model loaded and ready.'); + +const chatClient = model.createChatClient(); + +const messages = [ + { + role: 'system', + content: + 'You are a helpful assistant with access to tools. ' + + 'Use them when needed to answer questions accurately.' + } +]; + +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout +}); + +const askQuestion = (prompt) => + new Promise((resolve) => rl.question(prompt, resolve)); + +console.log( + '\nTool-calling assistant ready! Type \'quit\' to exit.\n' +); + +while (true) { + const userInput = await askQuestion('You: '); + if ( + userInput.trim().toLowerCase() === 'quit' || + userInput.trim().toLowerCase() === 'exit' + ) { + break; + } + + messages.push({ role: 'user', content: userInput }); + + const response = await chatClient.completeChat( + messages, { tools } + ); + const answer = await processToolCalls( + messages, response, chatClient + ); + + messages.push({ role: 'assistant', content: answer }); + console.log(`Assistant: ${answer}\n`); +} + +await model.unload(); +console.log('Model unloaded. Goodbye!'); +rl.close(); +// +// diff --git a/samples/js/tutorial-tool-calling/package.json b/samples/js/tutorial-tool-calling/package.json new file mode 100644 index 00000000..07337434 --- /dev/null +++ b/samples/js/tutorial-tool-calling/package.json @@ -0,0 +1,9 @@ +{ + "name": "tutorial-tool-calling", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "dependencies": { + "foundry-local-sdk": "*" + } +} diff --git a/samples/js/tutorial-voice-to-text/app.js b/samples/js/tutorial-voice-to-text/app.js new file mode 100644 index 00000000..08074100 --- /dev/null +++ b/samples/js/tutorial-voice-to-text/app.js @@ -0,0 +1,78 @@ +// +// +import { FoundryLocalManager } from 'foundry-local-sdk'; +import { fileURLToPath } from 'url'; +import path from 'path'; +// + +const __dirname = path.dirname(fileURLToPath(import.meta.url)); + +// +// Initialize the Foundry Local SDK +const manager = FoundryLocalManager.create({ + appName: 'foundry_local_samples', + logLevel: 'info' +}); +// + +// +// Load the speech-to-text model +const speechModel = await manager.catalog.getModel('whisper-tiny'); +await speechModel.download((progress) => { + process.stdout.write( + `\rDownloading speech model: ${progress.toFixed(2)}%` + ); +}); +console.log('\nSpeech model downloaded.'); + +await speechModel.load(); +console.log('Speech model loaded.'); + +// Transcribe the audio file +const audioClient = speechModel.createAudioClient(); +const transcription = await audioClient.transcribe( + path.join(__dirname, 'meeting-notes.wav') +); +console.log(`\nTranscription:\n${transcription.text}`); + +// Unload the speech model to free memory +await speechModel.unload(); +// + +// +// Load the chat model for summarization +const chatModel = await manager.catalog.getModel('qwen2.5-0.5b'); +await chatModel.download((progress) => { + process.stdout.write( + `\rDownloading chat model: ${progress.toFixed(2)}%` + ); +}); +console.log('\nChat model downloaded.'); + +await chatModel.load(); +console.log('Chat model loaded.'); + +// Summarize the transcription into organized notes +const chatClient = chatModel.createChatClient(); +const messages = [ + { + role: 'system', + content: 'You are a note-taking assistant. Summarize ' + + 'the following transcription into organized, ' + + 'concise notes with bullet points.' + }, + { + role: 'user', + content: transcription.text + } +]; + +const response = await chatClient.completeChat(messages); +const summary = response.choices[0]?.message?.content; +console.log(`\nSummary:\n${summary}`); + +// Clean up +await chatModel.unload(); +console.log('\nDone. Models unloaded.'); +// +// diff --git a/samples/js/tutorial-voice-to-text/package.json b/samples/js/tutorial-voice-to-text/package.json new file mode 100644 index 00000000..55f2ea83 --- /dev/null +++ b/samples/js/tutorial-voice-to-text/package.json @@ -0,0 +1,9 @@ +{ + "name": "tutorial-voice-to-text", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "dependencies": { + "foundry-local-sdk": "*" + } +} diff --git a/samples/js/web-server-example/app.js b/samples/js/web-server-example/app.js index 5e97edfc..b03bf9df 100644 --- a/samples/js/web-server-example/app.js +++ b/samples/js/web-server-example/app.js @@ -1,18 +1,24 @@ +// +// import { FoundryLocalManager } from 'foundry-local-sdk'; import { OpenAI } from 'openai'; +// // Initialize the Foundry Local SDK console.log('Initializing Foundry Local SDK...'); const endpointUrl = 'http://localhost:5764'; +// const manager = FoundryLocalManager.create({ appName: 'foundry_local_samples', logLevel: 'info', webServiceUrls: endpointUrl }); +// console.log('✓ SDK initialized successfully'); +// // Get the model object const modelAlias = 'qwen2.5-0.5b'; // Using an available model from the list above const model = await manager.catalog.getModel(modelAlias); @@ -28,7 +34,9 @@ console.log('\n✓ Model downloaded'); console.log(`\nLoading model ${modelAlias}...`); await model.load(); console.log('✓ Model loaded'); +// +// // Start the web service console.log('\nStarting web service...'); manager.startWebService(); @@ -52,9 +60,11 @@ const response = await openai.chat.completions.create({ }); console.log(response.choices[0].message.content); +// // Tidy up console.log('Unloading model and stopping web service...'); await model.unload(); manager.stopWebService(); console.log(`✓ Model unloaded and web service stopped`); +// diff --git a/samples/python/audio-transcription/Recording.mp3 b/samples/python/audio-transcription/Recording.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..deb38418bf5fde82fe380add4a999d513baa9536 GIT binary patch literal 329760 zcmd4330xC*w>Lgn2_#_HLRiFrf{_Y;fg&QZ83&3w34(wqi?)?Mv?OdIi#st0 zDzb`=8W!7XttMen2`G!UQf)IcE&h-)}+YON^auTKI?*A%zo!2*N1LIYgGGnHpDbw&ft|fCY#z$?S?P1vb+h9wcZe=@oA zx2YqOkv+j%*4Ff8W7x-Eel(HN+0F^)2ey3o+Y{*I>Rs=ZSKj}0Zs{h2p5?$cmLwee zG$tp@%=YPbCXJ&%;L=dQn@ByqxG zg(B#F>ThGURTEhs8D{>)BJ~t;!G5pzCx;Uk^*v6uHyvb_84P}vk1&sapcx&fXgJGaJ%WHD?NO(7Lv-0ScfTwk;9o&y?=}6`>(M zwyTqc6vcbM8SZBpc=oz^=b@Fs0QipkfApA!+{#7}9A7DdEFal@J1u#swT!bfk~a9V z(`%KYKXsZ~!Q35U&%c=TAa(kqE9HZ|MR`}M;EM7RK3DDF+0Ux7UUz8Vt7yX^GMC-$ z2X1hR=m#nTxqMD`U$-XaU=caY*3{_$hV>UR>!p3&5-q2%J2|APf0!b>h~CBT53H|1 z?X9bHq}=xQs?rC4c4?Sk(b(~okzBrNi}qA-aNrhgkOlL=L)sBOHlA0yifV0gsCeo~ zjg{*n&L?Z!1*!+L${nb^;yx@`)bEmWE@IpD$rAN_Wu})jC?I=|XXVN%qsYjr=$Ufi zSW)m%IoYB^dM&z3$>VqNc$5-pmsT^RVpW%vh{Ywcbj*%R)gi;>*f8poQgQ|5d1*&I zJ-IxtaH@;gWzj_v2KiW72El?gQP)bso3r9M- zdAz9(ttMT*+*&^K?)hqYwcIvcESFpV`7aMsDZ6y^2;CY=i=Us!5497tH`?2A;|yFo zo~d<}VrZRCZVRQ_Ij#$)z%(T8f9VyJmCpkGZIYrjd-l2Y=$Qj#X z#3Bs@2C5uI4x;&b@f>Q#e>SC>o;QzYG=qL1CtX$sbQhI(U+wQc;OX}^aUcn$@nqqw z#P~7K#qw(60bbXnwZ4T%=Oj|HWR0h@&Zf@%pHkgs6g7VP8hYYz+I-I(~c9N7lEuhaE`| zGrWSWu-lJ#^m*4Fb2lrxCb!0&s-U#I$Eq1ZMLa?JZIvgNRb5)yWY2Y%lTz(XS)^1R ze3>X27GsGbJD$~mkGCbe;5w%K*qAb?@2pyeLQaGYXzr?CZPZx)u4eu+h zAf?C}+8Zt`w2z&(x0`D3jI|eB@KS%Rxcfj(XLNNbVN$(2k%;anIg!%byeAmP-RtXB zyFC44@wJf$@bK_>tURnssZu5A3r9V9)1I(vN|TMA=0;BDOozys8# zfIgOX8xR6;lEeIbL^!@b`Tq|1?#$bV%P)Y8_|J)a^|D4gd#+u;9q0C|2A(@S3Vju% zP#It?4!_+u+gIA(SDBeLFn#%E;H?1Vlx8BYcdJ1@j|=OaIwO{u;q_Yi%@zB3^9}l) z>;@KPO5NgCBBK-Y%ACsGQGBhs9JTE46@mYtO;ovC#Aw~Mw%#8oIL7j8_c+EYh<3@7Z$RFW}&OD`; zKA!iMUszm<8F#tgs>XS5@u#6x$8i>^zk62o!GwbY`WAN{*UVdm_wWoB)62K;A2*tr zaZ{EtAJ3b|AAg);#+z?Dl+$69w`V@fOh=gLe;-7}LFBmAVZ-o=oa1A4c?o?vKa5=) zzV=}2F#I3uEvhhLE$2CGbwmS|j{_fd>gHFU{9(Aa=&~vgn}%(}t;X_OIKD5JnHb~v zZhC|7iv3&w?N;FL0qxLlaz|(7bcK=JjOV!Zedz7S_OTZ^*yDgln!cRA9R1enU(tY@ zIK+K9_&Ez^u%Eqedf(*DpUBe}%_^q@e&z-$T!R;$& zRZi#$kvrNf%ZE)y44r6Qjxv>T%*`>)%VN&QG-!fib(G(^4W|;)PB(tsYLd%5-*)Tw zsDE9wU4=K_+|CnosZ(qq(q2JTH*&cp7BkIYJ2;P;llf;+pOPO|Lf^C4*^oo zdpD)%rHW%4%#o892#`}=#X``;-1+&qQugGTHG{I|SeLoT$V#Dr3~G15R4*aXSd5${ z4>2Q#=rosd(tEqSw76zv&48qe)9X3b_0>airYQj#N{bjWC6IOVK{R!^eCj|c58-N- z(WM%S&R0oAg=ucC(n3)o8pPw+5eY-})%C|s36kPTH&Y7vENONq8ag;jRzZX5AR&(< zXBNOdI*n31k1O=Cv(OsX*B>(>h{Z2Z(PujP8akbBDtX}hNyJxjmi1ZRPu5CCw6}OX zZsIo9j4}~Lw}}?4KY`--{=WPxIurTe!yRw%MMWx=>Z~|XtL;wgY9Aexj9e%wX)Y-# zDQy=gcKfgEf0BoNUDKG7IxyDJxmllDId;`Mta7YD1OG0oGF97j)XPeD%pF}tQz+fV zf)*2T99wur9A{m^BXdo}!PW4eN8!sN-GbGzs^;|aQAKnq(+P7brlBSxE9K(q626S# z7OP&zm(d9(+8J!IoT!Ly7UwJ7OK2j?eyyCChi>MdGSONW^JOb_(8lmGC}e$HwH=pL zekQxT+^5xvaxftZm+k&uw+C(7s^9C}zBaV=Mh2}7^_*kVda#TkVvQp zAOQTd*V8VSVoMl$$uJ~q2@>g9WD)>!EtH~9Cfd`_J{5g7DA8VMxgeGhlIC%x*&I2i zT5aGKW@E>COFP>($0e%1df{8K($N1^^;rWB-OO3Qsap9bZ17zDk}xOgI)CcyyL!N* zWDh-xTbihPrllJjWyp;yp!Jw$L8O2~J5c}Ws3wD<+Fij1I`AHjv5R);zOXiQ?fNw)Gvc`QvD%UhmNhq8b$%wXxy0lS zH~vnX&cv>~p!$rc)i8q{UUYIwvvOj!%Ln*6Oq`QErf7A7%a$6lL2f+N5pjL0Bmc(Y z{u^Af(vnP6JBkQW0!qQ;1io-e6{xgOQ)w$FXcTF_ihx+Ofqj)x9i}&gaCzb1_)PVXx6h6Zva5Oj% zTgSD$tb`8wdmgn1_B_XD)#ALPs|Dx@$?iJIhf$L3I?-ag$i>x1Q)B_E#nyW#>gv*O z%%M?J+Pv1q1#-&@V}0?IHb)Ggj-IP9?oIDIaa>>gNSo8TxVqIYH%}ZedbsNC099T3 zlim+fPns z^C4@f0;|ZP`WL@D>-?ZNqBufU?P0`jVw{TJEA%Q6_8*~IX4 zad^l%a}S0))tF?lm}*ShOWRw#fly{)&ZIhh%&?#?dZ&m?BFoAw;kz<*PZ_ODR%Tu- zEJBJ9f|STWU?NZnB*cg&rAdp0L3{GqLf>G`7GFuD?FDd{Ay`UF#jzKbVL9Jk>rY3cR+T>mXXGBMlBcYS-iVHZNfrI}mm* zH+zGx?7QS^)%&>%^vM;ZfoxEq$pgEm+IwXWK*x##RBz3ytW;J?_hOp(&p}aDr?S#g zrcIdIRs)a1rO%0pY~kK^eT%0*_%0t(T(7mFm`mL3>0iI0e)I2?g5T>-k8qvORN-?w+Po}O(*AL)hwsy!lc!rf zb{gi&ue5q(x2|Yi@KfBYIFEvi!MJZ4+3@F# ztSDy8FG3hJHi?8Fnjt>Ia3ax&fwUJmzWcdRMjT{k++X7_10hWcyVZO$1EG@H2u5_G z5*Z8zEX`@`Rj+p;-Rd%-`t><5>}C-HUj$Q;;$Q|6;=~+R%Zd>(!X^|8&5;l$1`VMP zEu2!!40bABMJsk@u$dU2zlT-?O`uO&sX4S$k^d@$&+MK3?+JY9k|tlFSUkE|=WxvDw}G*n(64g-s6S(d5Q5_sXEXFTY@) ztc;XA|G?(GzwrIPkkJ3nhk&DV`7(ce>a*JWSvgH|Y{nP{n^(7FuwP;h(?aXaW7 zB+HigMt;Fh-4n2fU$T?681FG%{q^q@0!!FM7j}(#Iwjuds9lk`Yiw=dXv?jc^e_Da zO+IVsrw}$6UYM-izqXj}DM%mUz5?^c`s(fUFx=eP4tA{7cIz*JbqI7S03$c9I-;%G zqs5H!iAPFgTfrUfhK!$LKKUa)qiX#p7YZ_-)||NzcJ7YdXW-ES@th8lPM_{?d<8#4!|&-zwtV z`G1NpP)oh9z?}ZMF<^RS=l0I+4WAA??!P&uE_FMx{nPEAqG{m%l`fj#lrGZmR5a9S zS!6Cibs{HW4h>KaOKKzs6B;y~EtyY_d8bwmH~L0CvFEm+KDa(s=IwXbv1bOIb_i7F z>~jzWDu)xUa$7)A+f8XYG!31hsr^snrnYCcMn0%MHU$d(D!46Rv*zssdo~Zlk9q4g zKiS~fVdQ{h{X;(w{H;R`y1Z952My6_$F~eR(kvJ-dFW%ufNT4BW}xR$U&BP&gX-F8 zpG@Tg7(QSCxoViwT;=-J8mnquTH$FR@jSgU;Q=5XSUQ?Ky45g>b(OTNJv4H9C@5YO z`d&b2ie23PxMIt7YV2J68#~*+a`U)B%JC~d{YB_pVdHGrb?zyqe`V| zG?yJEd)7?@tLe$(93vVh8XY4N>g%oR!GWeA=V#9v^$)?yS+b5^Fbnk7XF5s?nuvlY zJR>&J(Oin>`B+(i-(|dUPfBxz*}eR8+Aku0;=OCoSLC;+-%HNUO>`K5H0_=CKtH}ZS(9qT%Hz8-ZBOjl0#<66W~l$X%EwfdyNpxVW3}wXxLzTj*OlA}EXj z;4xY_S5PaJI~J_I0$U}rh#s!ZKu}~*L4Oa=MC6Xewig(5MBpe{$Nk>+vrO@0E*;&{ z)m(Bo#gD%FN#~p_+-6Re-cf7ZIcYP=s$Ka6R#mc(WvgLPyI>AxS-MJ=(!BP_-Ko)n z-!nb_!q-b$?>D`S%ZvE%%L8k(z2 znC9k^MkxkAc;me4dLT4#HpI>p5Cv?-WR*0{XK}DpBu#Up6qD*t@uKYja^s!D*5 z0<=-^REF*=w@I)|K%TfYue9Z7i~g#3KBHGEnnGYWoiO_j)A@ zKe&UD$UvzVFhQDKuWE0-cdDjTQz|QcwpX?H^NA3=)Ed&y?0ol z&nZhRwW@fnrnJyi1`Vz-e5v*iEu+Pk**h4H=8lZ{wEeVwumAQHze6%28Hvpwnh%vd zD|;q*tSK7T3<c}afwrui{81S=mJrchJ{I}oBVDivk)5+=kM(ijDxsa@R(dGhE8iw+0^n$z+OhFwoZe*BrB? z97cMu3DnIJb_4Ls6ajfk4PvZ}Z4U%d2GUa9!fBR(S@2qZ`@ep1S9le|2)}2t4s`WG zh+%|v5`_4)^%Go3SJbYMr8Ehf=!Uf?L3#gT|04dEfY5cgY6o==%DZG)-2RxWG0pZd z7na2||0Tcwp0Pw^%p11#LVLbiMOsPezY!*IAykIhxDXE2i_rA&en$M=!1)|iFtO!y05QsM>~gm0*-uI6nm*Q(E6CuJlL+(S6rx(eIyvM9 zxQvGmx%<(>DM?-WW-!s#o}3y57fF72bStiVTe+K|)fHKq&7?I5;mI^KJcg4INd6->Vv~He9OvusP#Q^(SXi&)rFE{bPf{ zi~v?z{E0i?T)mJA?$H;b{}A6rRDRx(SB;HEMf(?aIs|_E1#g~z@~knB@!3H3G?*Ee zd^C~HJ58(}lXXYFw zb|rV%e-^fEct0?G$$Mn<4>MT`#?>KrV!uxDT)!tYSw{k2^RrFKK>o3x>t%g!4`Vsv z;eESye9L~=E9(VM^pW~e`$S+4mQzAYAFXcTMv0g>mFJ@|(XyjU6^j+ZX6Y4iu=P=R zz}#3=l)4e97U1;ZsV6Pvv0jXUeXENU6LCiOB#!Td-wHZAc4y-2_#hrX(o0LE&EmBI zvFccLuzK@!6jMv)w+(n?acL-RgTp9UAUT-i=Emvpco`pq*4Hq=erw^sCQ=hTl2% z#rhZYwMWK0-P5DpAr^Su3gyW{o4`c>Gh)a(I+{ZKl875=Te?bGh39E*<&)=7R{8aF z<#TW{lm(u!W8}$x4IC^Bx1#V;DrD_)#XKi? z{HZAUh$uXg!<|6|?zv)+_5^ ztGENFGJHhQ)>)oz*14nLKXGDTM{E<)XS=Q>W_d1!U9J%14X<@q)zVnCF3s`%)p2D= z#!pEqQhdf>!Y3Z0Gk0_d3yD8d^NEL;=A`jmAJKN~vooS|4g`#PrtR(63-2qPf<7_4 z%ud06Z_W7b&f9SpM5jfh_+OQG>^{#nsX62O-nzf={rB|qdPFP>z2(d6?H$m(97u>5 z5SF^6r(Qpj>K=BTm+I=uJCaIWT-*QA_dg_9Va57XBqn#1K&gmI3?7jw22i!q)0*MN z<3zi$m^>M_3h^d1ds#QLFYZ++WTBNPIuIGrJD^S=N_(X-?=7QQ!vJDJX}cbuuhmvw zccV!XcSf)TyE@mf{GSx1MEFkxYf;;>uF{^1@*`Aj<{H@=P~%4jI+2Bzk|Y|c?8wXm zZ$zqG6Pd|o(PjM(ms>!cW9SlU5*395;0o>%k|V`W@mueE#A<3^jMAu!+3y=2-Xg`~`m5HxCIfX4QUQyRK*%i+|0* zYUlTdFj>VRli4Au22qkI){&TYgK@n~N(0C)VB=&?b)f@kJ?<6ziq5sIUwHu+{f4Ia zrzgMZ+>rle-QUJ*Gbb;aaFRY74B2dSV|we-ih%q>Z$HmZ=w6n8(&p9!-+$msWJJy? zV6bH@5me0HdYJ9byMHQoRH5BM?Xayrwr#P~q5GvZEef?{OXKrHeXcfiMUOmjmkG(H zKBl6%rtEVol!nzJl4qQY6$%-pQmvletXnW)m=n!r&F)E+q#9%CaRL@ez*b$Xpc5$S z-Ybc@qw*uljW}Vzyv6yQ{Ys zAJ2t0&O4&hik zw`4q2uaaF@g&+Zz>*Ie)i1TzyQ!Q$dl}_(k&=?L>76g{7{;xsd_8_HA{IT$XETy{;wDmfGSx8u{L7 z8I$>&8)BQ``OcDgOY-fhXBQORziKF|{s+GQB*4a4lw{}XgT=j)otEAMQA#gC(<5^7 z!p?#H=G+hLmpkwDXgfIQN?E9I78F`KB*h%IwBx%c2nr8Z7v>dO$_Y`P4v!8F)Ua3Y6#(HU~ zl@q#|N9MOzy2VvFX#4o5B6-OsR;oJfVjg9!6!M0E`bA*$z@5bXQ-6-X?gm%F&JWkU zkzW`iT5kPLY0;#QCcpA40EJvI3qxY9OAEM_AlFl>D<};$){9rZ4(qx4JY2gxSDQ?$ zJG-ofmR`t?Zn45+zf9nyBbjFf*it-nOD9BVt07p6$NP#6d|AP)))g+!EiTe>z%6d% zWWUzBg?|bLsffh5B^&V}AW0_xQvn|S_`AZcyLcwn+Wm&Lg(Kh{%MWy^V|!bKjZ5Bz z_R&t+UbpF<9+@9fU?}+x55*+UOUTLe#A_fTzCzSo%$g@g`HF@cZ@6M7pa+QNf9}>R}{-@USQqkw7a7uX=h= ze$SL22ljw?700*Ni)HMA?0okx{uwfrj%0!6y?XocD+sFMG4kQfKs8?O+6%P895^X1 zO~MBCMGP$U4Uc+mGF_6!53v6|Yc_x#n z6YJWp8BRX{quCt-_1t0;7V*fqR;xXg;s0~d_{^1bhno);-{SEc&-p!SfP*4}6b8E~C( zu(-;#o3^*dli;eWtgF~VUn{;jS>0xLsHV!4Jv5wn@|a?{?UCKFgOA!4{QN{5dHZo> zCZxh8uiBKHycvii$e1daL+z*3V^|uTCUe_tUti_+@Y}cXMQQm>9ih+hy7*zQ^zm{? zYct46Is!Py(kT`G@ z_Hbop-|d_JOhee<+Q{ctRgsx0pN!>hO!&1<_%vlCzr%*#m)g`M@=1SnJsu^n4KtThy)QPbVi!7V zu4i#0lG$NmW*C|f*~qSRruE;wSzb1&*jH9LlXi1&nJO%EOK9dMbfHg9Y4QKDO8GXtmTJhe(wpicG1E_eteOf8x7> z?q-$s1^W%Y5o(B#PY{V1sTp%ckYp(}AV7qO!nSNlfoU^59tM5kLT9+r>VxxbZLzY+ z)BTf`Rh8FI50sT%zdKM_dA)3KZ`oZ%`Q85OI&x}QD1HcjkXOe_uJg3B1plE6@x$e` z3Sxy?y9THGo{?h|evsMS>aW&_WpUjFA&zG*Zjag|h2t5e#TZiaRU7mVBUHl*F=jnxnHAtM#-bxyU zOSE<5q@5D%l{ObAe6q2RY^hLG=2#_sYf4?b@R-|3#;`y}?sHvOQ(q~f@Hxz$r`+lB+gRFOW#sigMe2$fNvClWA{8coo(RTu#47f!*<$mhY3)wGO&+XbU7WyQn1#u zaMEBu*LV&!p0IK%F-b41+tpX+-V%lGLm~Cax|J)TZj!BT2lfEGo|d)6CI(Oc*LVCi z|2*O6`FIKF=Y4PRoxzqu0u+pmt4yr4*TD02i&d3~P5E(=Ocf0A>+nN)MC(H9LWP@? z*g>@qJFVZ+o0GaPYRmV1bQC)nT3VEz<|kd{O6HRJ!O3JYKYEqxVlH`_Ou>(xrcn68 zSP(!OajbSu_|xrv2s$F-$l zFg}!8n>hH!q6-!4V|V^!eCv9jl}QcTV~b?jHIEb%*20q?|KPYxvg}CZj&Bu~E!j^n zLLDL^tEGH|P6<@DVXbl#mWlNduA69Ke-FF5x4d%OBy^L6tdO}#qUwv7i@rAI zCZQIn`2;5jEmRk(52;VWowtI?+N;D&u+FQ!@>Fa`ORNApb&0s+W_6waJArB`7s0@- zR4`qnE9Y?e{lPDS&X4)vSlAnUQ38$veC9-pREr0Mh3$)D^3F5i)FF1ix5lq7OG1;j z6|=}QC}0d_sn3cv31pu*Z;cp z{;76X&gwD?NeBym$qM*~RLgh-J`#yos7b^R`qgEz8O^GZDU~B8WFdK&kwHPa zs>jw2EtocIQg%ov-3m;Ttrr(+x{cV`6G_X^T|JSZZ!sKOk4zn>bhrH+*Z-bjuJ!2~ ze5Y-ZM{0-U28L+4oTybNW>olySnNcW+_p35Kr-EE)%ReZea`SHj}JQaHUhhTM= zGEv0yuJ7t?7*TiKthQvuGZH<0UpU;Xwz%-Tf>Kg+>ilfjdqp!?qC_Lc@Kk8ClC5#9 ziZ#%Gm-1b`Ok5Fqbxy?NvS!dOmFJYYt9MVVT6MEwboqQUQ>s9GySG8x*@i-f^S5-< z+bX3BPrX#>)<9NiqUc??S^C0mPsoMt_S}Upc>b}Fz4_ol?I))mN$S#utK6-@HaIzh z{*|8o`VPaQzpuaVGM=dZF^=zV0TO}-@*r}m3?7F)GiV+P8NhPL9PaO~AWS&I*Hh|(>OHS>=&Se4VBmH{ zXu$ipv3G!}7?v@t>NJE%EATQZK$J^Bwu52h1vN8aVyp+jro6$I|4;J!Z#JbSENa!- zNL7<)1gkHRt_rINXxJG>@!GO8tRnEyPFGic$j+Neze8DE3fGlOnWm7va$H#>x+apEl(=OK=MFrV0+U{iXWt7;bc8@H=wt?{ zMy9s!Z9!E)Fi?cCn%oiB8wr@#$Y@@=P#2d1v9kiW&4e`s6L324lv9?a-L5_>_ZCP# z#HmNQB|ExAyoLZB!B{|nOhouhsn_B8GscjMSZpF*cnm8MnqPA#;YQ8o&m7ZdM2GF_VonXl`nR43;|OcfInbs(=*ysVg(u4RYo z%YIkopl57)jX0-}1r9pIsZIvP#@P&d^&0xKl1 zorM@Nu=k$eUO)~zq_w*f4Z83|%xl~obJ{W{Y~-OpP-VWJQc z??ir%1W@99gJ{xhb1AJ%NGePG405(H4ndttbatTL3E`MVE6G9*>SVUi%qjkCe1I`< zyJwut`3QkVlL~tfyGzX(-?bH$(TN_%<2+JNw^c7ald}40-7WR1vSW~~^c{xdYgnFZ zFfCjC0vBSMGTO? zjIDtmC%ul4Ij%(KwpG%8wVkS%@`(bWWs{(xMLY7yLPIO2#KF}AaabI5=1d&ZoJ#=4 zg@UPBsSkI&36d)+AUQB z5)O#KI8ZbGNOR;`a;H(z;D?t3YU>B&onA#azP~Q>vk_tg|EMqPzZQx;o?leOS-zz9 zh}hh=BDCJ=81v}c+cZPT2aV1WCtU*_`O)4?6&98pR8Mmx&=j{5easS_vIA&g36>1m z__X={fsB>>-IJ;>{lkA8S=fokT9*s%PAVNyBSnBivML9AKe>Il#wcw0e8uDfcKpwm zca)cw^1okc74Nf4aA~E%@XS0vV1=1km~NAF*BZ}+El0#i3yE<0I@v-~O~_bqY^ec$ zop6cS5^JfQDtA`7DS2w=24-0ZVo5_7M00)_)Gmq%#?lainmJC4>-(Gg*T-VhS2wR5 zzfN+N7Vr?Q#f?wbkIjwQJ6C;Ixg&YYYB5vfFZld! zyFhSh(OC=F@ox4ot!Rg{DRZw+1m|93ZdVX`|Exut>|#}EpWojd`mp32D8z$atDs8n zYIRj*roT_{qQ35(_NG-9ey_*h<+&t3hExsgfa|!g^5@#yIa#0~iZT4cE-g~|@!&`F`Gp*F!Y=~LOn(1i zLkuOY$liG^6B@I>Wu_g3-J-pIOWH4@_wireKqo=yE$5+BQEC=Z3Qb8f9zi67SHP$U1xRwU5?s*?ME+e9WJ7&A#|2I z;p)X$Bw5|^%{#x>HC(D{>kh~t5gcE^co+*hR9Xz`Q``eyBgP>znd9>^%Bw%|-JCJF z`I}BrM%4x=&$P1a*D$P@H}>kteKcCC^Anuu2cJcF9u~mBz5WQ=&jVechT1R|wf}bi z?W!oBz9W^HX-cTyE63~mUj31KM=YH%^jfCG`o^u9jE0R~G zlz7|OP1{uon!V$IBdFVwp8VqZ%E+{|+c>`e63|cgL9#wP_r`v{z0VhF$jWaIu3ZBO zM7VVnnt}5*K(gb7%yF~;v z57a1h+5y?~n*I=6SQr<}wexlAP=8$wzpgS3*qVLSUYAlc#~>@XRi8RLXy+YPL9HC_ z^h+D81+zf|l`mW$(Y9G7p61m6sbm9C4Aa?^IcP8FhJa~1p12F~)@v-Iyi4;Tn>YKh z1LT=uIT%D{ts!d5g%UT&^*uBF9+c!AJ9ZG5lNMlUO1vv${ewq&p8mlPyhgMg!w9i8 z<7v)Q->12a8QG0#%9|qszgKlv9B~`aFl`1kVkW?@#X_M=o7Thao4OA{oi}_2dA#7B z;}lB}gINd6F?fG*WU@fK%S4cq1vZ8O?5m@?H8`+yP+B?yR&c`B+BUI#dF?6jrOn5m zLwRWJsr1*Sg6NKOrISx#@#N!_lG4PWBH;W$G)j9j^{557RD7vG$1WFR27!}8NYFUfv62SMw8+>{1@iyn-6{)Yg z(ygIWu87_RTOIP{v?CgE0$j z$Jg!6xSPw)xusi;>W2h0=?OIw`xp1!`E zEr7b2PQUkW>Ms@;edHi1t?GZdeBN)K*G05tDCfGb?$BzxFJ^CS{`v4ZL*`(0=kGsf zR&+oT=&hH_bh-EUINit{|2{r9egYCib#V&+@z=8di)D-pTe$M{L5!$kFeAZE;=xFv zGA=YmU9f5?3e_FRJRWgJAy)X>zBu&5v)*zx2=`32>c-a@)c75<^CJF zyUun~x)F;Qwmm$X>RVI1&6)U*R)+CFmvq>4joHsKI8yxO?MCA&AqgOKTZpqEhIa}NGVw(Ui$ zA7!|f|C4|8Z1S5gAadxt?r9BJ?X`}OlrTEf-n{bQ;yKsZXx|U?vSpjT*F4*KNTJ)9 zG0SfK_`-qh{kd`_`+Lp#h8B!RsSSF5)cx&|w~znr`3}r_nCjHX&|!o!>Kp&>yx~2 z!2)l7qoaul#V!C15!W=!QLy5W}dRP*$WeBG;4f17)59=QMH>Tmw|^xWUS5E*9LU%q(u zp*{Wk64Ufm1+PvupDHlyJzWjYvh<2DVEF=}M`gWrSiBhN_clCT{B)5N$2ZyK5hWa3 zv25rKzVBJaECn9Saw5p3FtU(Nu0F8E0_EITq%7<(o1&9v4(EIqbYOLTnJ+W1xj24x zn8`3s`L1~tMmWte54kOd6NG5Wj#*aMo0WvEp^+M5ib-zQ1=bcXNN&d}-TL>s-7=iM zcKT4|OJ9nu&~5o5B3#tlA+oaSdUoz{szd850pq>$YmsrbyRHe2GJFMxZPyvvMkGK= zHTA_k-KFT2b(_UCT?0FJo%;ya?fwGlvK_1B-!F?U(432ac#7tsaQx-6{?If(v%lo` zzY4~G`|;DP+@5PMQV!7fc|P-q)jPPI35`r%Wa4`qlG~2^#icKVgoWq%A3fii`LGTG z;757!yS@=zWAQC&a%=7emlSGkepwc}W&JPVZQpdX{t|xY%UFY)GSwc@J9V)#VV^iM zhFD_Xi{qQ(>NFF8r0&1_2H$Eq)KNoqPh&*R(;S20q4~>@-R$TCK2N0D5?TqZm{vlf zSvfP=R%}ZsMFBTyT+scJ{qUjrc=9!pQBaYpY#e)rP4eHT2JB`(hn?hi$Qz4{NJh{? z^PTwu**-_}1=@w`Yi1;eK<5DG0CoV|is2?KOLDeywqkf-Whj5KK_eqNAzJdwcaac+ z4N~kWbvEJCq|Ws0Mx>0$r>&gz1TFY7hQcNXZP%7%KFh5-u+vnUYbxvV3mY1|8w#EcORH(CZ|1GdFi;T`{oAqVT(ntXgp&9xBm|XsWfN!?yjvuHmPLWY!8iV z{xJJ#K9Ai^HCh-`O#G0bX^IRlO5*e^ncl?dF@BbAZ2@5=%U^JOqZe*}9)M(6e)R@l z+qmx(Ct7d+ev)gZcmO6JP{nd`^*J7m%apFSG;)q9v5fJxG{h_-84OyEKvpj(j-Dzx zxavA9gtdz9G|w$>)pbYVw9(Ode&$G@c>tdpz$XcPNz5_^r-wo0uw`We+9@jrp^|MN z3CkG5A!itEY4ts9vQPj8Qd$HH=0LfW5%dm-GWkRX*%`rt>PX{4@uM_$Q$&%~UQ|-a z#(K>?UP?}JXCt0_Se)|d`AQ%$p?R1NSA2ennIvI^_ptwY{QWnyRT?_we_8__Z<^gvG|@V%R<(>6&I`56mwjoqD1>dG?WY$ zx@%ZK@4_nFErHj13=LzMU24hHs4HI3SC5v5Bsyz(HuOLB^Bs`i9aVqXSLW-qK3Z4l zpk47=idJ?a(46I5=;o}+RwMwWRYU-jsGaDWHO;GG>`uIgfLqD}oA(la@Gy7RNd_xEsovLdi*?+J}V?$!KU# z65B15_>de8J9;)bYq{oGv!TUFp&=q01~Ihkb2v3cOVO5SOHP|p)Lrz3mzP+I&q-aL z*xU3%L&c+)UsNc*C~4YcLmZ}~S8XIO6HlejCO!J;f290Mi3|8lqH4jkl@*u9v#~y! zhsB>|tK8E5*d%Fa{#%Nqxj5MhLZ&S9a&5krKC7iy-H(RwuLs0T-Mb_Wr5+xV3l!Np z?mN%Q4){LeNJ7Rn%rXR{g;33j48qHhdVYP#Xafd%C!vQGUCkxY;O|3+8fOX z;AQi6+1GQLf`ZC9S7%|lq!FqoII3P|yrgIs<*uI)9d`~05oc>Xf)B`sD-;Dz?@Te1 zirp>-er&@r%+PIkA3H64PPgID&Zk|Z1zQ!*mR!^SkHq(lYA(C1QBSE?&YB3O)apK` z0vJM{Ql%!JE+FxRR6y5@A@X|!+v|Y5$h&l&n`W*c-CEn z4Rr^VsJhc^43jpHR|oOpwZ(UZO=H3)69W+vNw?A0Oj;$zLir)Q0|)=K+^XO3(i+lz z@#4fpp>O@R!TdGhwUWJy{`ROg@<)qf+gCUj6&mHs-s#+u4$#8v&rCydPnMHmR@?d8+!nKqFLh-Z%2kXrm=a3ePbR{WKJ+wIcS*X=0#*I z!NhYcxNjbj_>IuJ52e=`(1zO z=Yxe#LM-0-9ZgXdSTG@B4D;fUAagdmGL%-P60j@T&PC+xvT}3#B4By%vdd&ueQHgY zv;Q+75ZV4k)MsUPo&8Dnrs`q00~o#<(jYch*>Sd$?5i|IyV>`%io+hf49TKZX&2_R z{ix88lnrDjwEQI6c@ZPx8Fm1ZVp88n!Op_3p;n)Ep0tU><~T3ZIul_Ec3wH>+K|Ck zt_?X&DWZByg#^ur z7jm;_uRGIqWb@B=8Yhs^?8+j}W#xNQDtO=7sE#No9(TgX0=BCct!PR^(HGK2d@6L7 z#-HybB!;B35R_9{7``9jTvi>lKJf7tTgn`@p0nE)k>*Lfy1fY+-;ZEVX9*8TOV& zYB0<)P){ly=`B3c)b_vdcBwEtlm3hhU0tBXT;P4dH)OL z8~F_>l+`y0(-yG>WNJ~6to}fDIbG`(=v+kQ)Tj9{1yp?R^#fSiBBq_O5%qOx8!{BV zmVl@Vp$;+)FKk-?V(CM+=pt&N8croegSf(Jc*z?Xf6vd*d=o9K<%?DOXtoaURJl`JD zQ3^*06-;*sn7$$!Ae{Im)IU5?;bny>O~U`6{%CrB@F@MA%{&^lplk)Yvj5JMIx*!X z3K9Rp=(=B{t;<|FpY%!ySP;DKZb~hcAVqpvxYC|SN)0OymDFH%g7CO0wGVc;1!Xek z+!#XU8VaGz^GU)idSZV7UZWUkV4C!hm}Fu*qQYwu6-|&(W8Hv z#oU{~HFd6Q-zy^_2@r;a01jc0DG|f0I4}r^5D)<|P?g0zNGqsSX>Ccu6d))liGWxc z1cgQopslu6!zcz&aH!JVDr#}6tqQgki{!l*Zr%Hwea`PY@ApoJXy#$TleM1VzOMf@ zZ@U{QpHNrlWrFY0nc8m*S{k9{eq**hR{y49hsAMp)}Qj&qlU&qc5Zt=Kf;zt!FQ?v z?o%j^INNZ?XDz7nC%SZFadn)j;qV%gt`-nZ%(}T$1BLzWcMCTCM{+Z&k3Kg9;mWXEN841~&XzI=E%zVFB z>`jh!AH7^ez&~flS>PBK2#)oi7+N6e3uMwXy;ZNAFJa)VL~MJ$)Q3_ga#opkig(jn zZR*??sPd8TD~>uefLdnY|?8u#6Z=s<6*-F-Yt%cJ#*clEy5ty)xdev!QNHhmE> zM4deGDxi(9=qp^A$mPW@tGN6Sb=PzM@A5;2_Db~?y2^@x>4Z{UTwDJ*` zQ#eU@0|K`HPA{I(v(f$F^!xD@hmi%Z^PwE`12Tg-!Ud7uiJx@_2HLm^U#D#gN`j(l%!_Yyk3E-m{hI;uceHpj-9{=3` zIy0n$8Loy^vOp{6xs^W`TCZha9!Og1DE;}Hm4?UQUfycGzy|1m2K}JgBpnP$@OEWO zm8-_lyR|08A`F@ zl`~~9(3b88Tg##h7R!!IcMYf_e68vRW zQavO7vLJPj6XgW3aTqpqA1PZt)`@h&pq>Cg93#3RMVjwV^zW4TS>Yr;J<|O3d8S0T zBi@idL7*V%NV~lmp3`M%gfn7Hc5CJdc=^l9CuiVNx~x9Q@@E6Pdz+2JUZUmiY0r=cwyWh9K7&4_KCOb=TBk*e}7-5cmcluPKXPR_f| zbXwh0trnh9QMis%5>hX^Pwg8RwHj&V3yDk#-H5`&@9sqMOyyKf^E$_>a~l%wwa9}D zKV8l~XOu8eFZZ=wWXiVLfR?Y8lg5)*nleL>eVtKPeD<~F$Z@t5qJrAoK&+}DpUsJV z2Ff``G^a=Ees9^$bzYJXJPl6|0tsE)0x_B68=rFmgmyJ|Z8@tHM3CPl?C#NZrVR#9 zd{f9-UCc-zDu_8!Bap}8M7r@^YBEBmkWg(EB9}xPXs3kUaBQA}m3%tZzP~S?r)v-X z&{OZ)7pq7R9zX>EUIK6_M9W2hC}oTobuQRxW;(a0+GcJK0I50`B(W$Ho%50yaN3k) z6ulbV>#~|>6zz*j+6AW6PCml-ffIu9s2Dg!xmLOQTyd8payaOCL~M}t3p zEURkSQ>EE{G(KB%F;aDA26T3Z1s^Zxr$Jx&#?{2!SrF;UbYJ4r%TFtFyYZ2$Nu+bA zn~R$Tmps_>f#yoLiL~3=w}UKhG%0W;_d+izBx^bwfX1hnt01|k?sYU8VDLzc7C8yU zP9TOxLaEatf@=X<_@y8pn^0#L%>|`c&ZrQxXqSP6TmwbcQfi7DR-(o9XCPNIj|2@j zJb=m1QvlPt5-6ZTP_lvWJ0g+`SFo!fRKq?B1{^s>?JR<9*|N*kA-G=Ar2bNnslf&q z7`&t{Tq0Dl+wl8S)x9n4B-4qj*wRL3Zu1p+5sRSJbeDjD?GDzyTCZ4L4szn}{8-Rm zX~#v_#sYgI8}*(BZ~9{JuY>74{_Q6`zxNiDd9QN>U8V#Ar%<13z`x(258DHyB#_8u zqszMUWZ0`N&wAd$178AyIlygpgC!gWNSJUAo{eof=8s?Ot%bX1qT%0LsAg*5yg8|< zDciRLnN#1j@dE#CMP^p3&xg;4Ur@_6fFT4A58uELkk*|S5<&V#1Q9MjB9R1w+(;6L zX3an`YbTdXp_oqv=R>CuK0D=wtKYqOm`${Og^M9@{M9%|dn|8h>cY5g5d~2oQ$l7K2f1;>p&Xf$_d{!wKK=%1kf=={MGbNUDM&^kfu$2#w|q%A`tEfk(Qbkw73G_a>D_|q4;KZwI208 z9vQ9xqZmgI&&rxwk@@RH>Y{^VQwJ7KIK{5E*?B&5ted>pHF&J#E1uDke6e2VO4+oL zddnZmme_CCM?I;Ia9r~OLB^IV91g~8UbN6wl*7meNW($ABx)vQ}* zk;i%E4v3y(F7(V_D|Tp4jNcwzpCHS9U3~0t9b-DM4xZ3E7l=DLuu%C(}P!;?ix{W@uq(SWzLb zFf#=6Lw~<^0Q%GbuoDCZ_9elUxbfLr$zZfFME7Do0_dUxl2QUO*Fk1ruYab0pg-1K zTXVrS0p^2AYb&nxtiJu3XCTf)_4M!iFX(f4FbJ4mZD>|eaS(Hg@%G!OZ%te!>cEr{c&Yx z^xOsR=?;(F%wPAv+2M3$!|x|R!bS1dzeyZD{7Ctc$P2vquT^w z8L}njk9Ykq)GW9gHXmRSYsNu2U%YAallg6AtNO%|*G38J&F{izU*GWYa)l}LBySNs z)oAlg7&Dv$h5JcRxaZ1?C)E3(_QB~n$%TP{{Djw+rL^guMee^0`rSWvu1A-LZM6HE ztCNP|H`l?|hek3uPbltPR44~Db3h8O#oliA%~&vwkQZ||fTq7<9{e)vt11?3o>r|c z_uaQT#@3hY3qycKEC%x));i6q&F`PW|h%fG(!r9>ekd? zAX^NU4GH$QCyNi0XMbG@e-5Ce)p#4xWD|Na)_+b!;n$|X9 zvU7J!Fs)fwOmu;!9G=kE!oD?H)&jz)GKZSVc(A(x!#neM(EY$tLZJKWX!w)>KzVy) zWF%(*cOA29VWf9?nY;xYT-fn>X2}Yi8?yNm5J11>dtWac%mjHC$i4btcn}Di2j;K# zo_G{gU-zx)AMx*#Wgqdc_sKHclxfN>uyK8jp+RZNOy!p8>!8+3aRb>Ed@zWxKo##? zk8Lf+fCntp)#|c9^K#f!%u!c?g7*w)@E%7pvYVSdvhz~hj$}8&U%~=55wPczn0u|g z4VdDXW_~obBDNYfAyc^-`-mo>IL5ShHhOHHZWE}|RB6gPfGo}nOuH$s;Wmw@l$uOq z=*EA4e15k~xzKq$frv6$CMi2c71m}CR+&5R7xBu&mmCe7Mc2+V(WjbkIJFn#u{L*f zh7Q?eNYC@eU!<-ndfgnQsfct>wg8d~ls_UXqXUwn1ESXbZoc+3cHe|rt98CF+t#st znG*D0oH&eB(1lqxt3eI!k*n#0of|Z2(H@D3$=FHU7%5wP!@4ZOeF4bioZEo=4!?2W z;Rg;a74QYzJ`~Mx}fVm`Y%! z{bbcSOH0Gr_n}7w%?aAK=Uw}H1rCt+9J2Nd8nT$^JPBwt_^ZL6DmrrrFl!}4Ip`=l zau6VEfXIAo){&An3SU+?p@+~TrKS2Ccq-7&Mq^I827&?~Q^~o(w|WY0Z0R&BS{yI0{cxfuPPc zaFByhK_V$8UCnNHF-UZzp!S*_pg};`UxQwN-VgX8U`+=3zkm~qwQnmIWBdZ#x1{I3 zasFh@zC^w5gl=W_+B2A0(0y_!a0k z>-p{kUjwc`yZEi_$&{``gOQhuE|>G0_4K84v+{@C{npbem_EQnqYA0gm;d>IVhghn zm>PgWN{uLuv%A#Ti~s~p_-LF&7sUhfmQ0zCttj0AvhQCCPu@rC&n70`N%Zub^z?L1 zOz7C!vCYGi>jAeWI?rzKjrqFz?Ae$LQ^I=TNd-x74%C&DSZ&E3Do+Vb3Fo81gI8#8 zAH7G&boPzF+V}P1R+#_pT>8YClDHLhUPDRppy=z*K<`?xNwyMn>4sM1cnnxTY%@PTqqyHeG&ja{#(B!*LP3 zh)1b2Lkh$u1o?h;u@`Qb-Vp2zLX;>%9ea+yC>hLzGW&hS$27&qF4ma^{?q*T-Jbhj zx}2m|3_7mTx&_+}ekcJw@f>S%zHgV6T6#QST$~*f*)^8bM-vY^kux6|`W!fLXMpJ2 ze?9Sf@|tFsM2DMDSNJMWGaB-nO;5X@HMvS$VWk)LaZpW0Q44>G?$J;5zhIwoV)L_w ziEFbKZU2eo>03$-{oCmk@z-l_c!S)!*(;%WNs)fES=a^b076wW#jZyMv_?E~xpu`x z`Y#gmclm_GN45z+ zf7iYUQw$H{&=v8xd{R72KCAqqjVaXELL5qp=ObP7xw_aD9Zi=IC#0%{1`2#MUd6@> zartCKfk$;k3q~3Bl*YCVcWQb*G|2+M@_lVo5{pH)gc|F@W@PC3IJBjQmQIm0qg3!F5 z$3kr9yf*Lg66#kcsiF}+<eM2uLj|HIA3v5J{hQ;j zna3C~3h&U$F;X=b$thadgaX=Fuw%|7a;Rhgr1N|BZ4&A$z{fjRzNZw|z4}N=ci|jF zzGnb2-u~#sB~(`o<~e9q@g$z0omL+Lu&5A0!{&-?5!^<}*PnkR0{dWI(`PcQeU*#w zBsXZ^zIW~WxT~`pPgD^BUQ1N!qHq$zv_lQWi{Fj3WW%)uY-}g#!fE>LD`N0nNrcz3 zV>PnZxTsM7@BH8RSEdeA8y)$L^Pku>%uYUq9g)H9(Ay9gId7hi#odz zI*zwpIS)0cf<=rOxH|+ITZw&1;&q?n5}CDwXkVfGn(94OPqwGDAEu{oEY;zHwH5pX zk$0dQn>o7QEbL9kU&)!jR=4N=mAtX6dglr4@3+4-P53RdOuqAzr~g_1`(N6N|L33N z(?}z!>$iXtc*JK7FS+DKlgOcXXaySXP~-E3kR~rUck1wX(|)%L`$b(@@(%4bq)wKRz zNn#*zqSHBciSVt_U4qX+;mPml^Xb-UVE$3#C7XNd;hJ7>d0h}K4n~F<~*%Bbl zR`oE$LJaa98Q+v0yV=kHQ0CeKYQUPcN20Wmhku743@x0UUJ?*qFxXHsKv^4R^6+|k znD=zAF*E}Fz=r|qJmGS*!)$iS`2>NG(Nn8(kUB^g$)!Su$`Xy3h1h8oM9c@*l%SOt zKd^5~VA(;@m;i|q&1&o^Ll) z>)O2im230IGnfDW9-sf`TlU|-V$xVlr(kYyb$+5TEWu0$^^|?$#nT_$;WeRts+{|P zh?k3(d%Uq;{<{8i^yb|+xkt`ld((Ovc)EF_O*eKcDMI|)G#8}V4$n5B0J(eVvOG-2 zEdxpaZQ*0pl~Bb`k3@ln3>*-}!E5o6VDGH} zt5|DOR2O4j3;5{;EppfBjN_1m0>+Q^HKRf7LdU7eIcoW$g{BiD%F z9M#>kEa4i^(App>1l*VDAwlBsM8(lssn;G!3IE1AuVx^V*FCgM_kF&(g8Wt!$)iFJ z0B~3Zt4r~@un#x~@Mz-+RSi`Q3cx)=wh)|@epdl1v+UI4cA%{QwhUAgaajB2*zeQj zBbyh!_b)(c^0HlRm%WJ5hHIf}+XP2%hzO;0OZs!s^bm0p(a4`TI^Z9^L2d5x&0 z=Qr8I?K)L@5!p9CBmHg2GD`c1g10FN>HN6Y0E)_xMYNEe0r|VS*kqJJrE4K*3mpX3s5lB z&Jd)KLXC#5v{?{_ikh#6C8-Mq0#sB;*^{00fJ({U54cGa%jU3nzbcRp2%$OBT4sS$GU?S!jaU!{G+orzw7!%2cZXk2wzljo=eR z%~+024GI^{d*botN&RpXKj;m++vB#G*ew$nPf}5-3%C)dSiftE{p#PIFBn)e-t>}+ zzoU5yEVO_o0HlB=OZyu9-5~U2^_!5+rc2iF2Vv1*!EJ!S(Z8v;|L$SBKbX@E`xHbgO zyqr)ni|Q^&fDeSRQR$MQC1@0&`%kn1UGfr8e1S|5dKmgwPE0yuA@F8#ri57HMi9<` ze9jLT1mNPQ1p(ty9I#n@9ALnbYq=0QSQl3pkqKw97@0C8m=uG@O!`e2Wi%#sCKh$S zEe<&PlJ@tWT&s<`4VG~CkAqtTl$G@v4Gi-OQxZ{DAVm)=fkYp>k^81~-9Pr<5#b|c z|K=NytBrJw|6o7r2M_Wdao;%|c{P2z0a%ke)su4x=Pnh4yR(X++@RcW>X=EaKNZi$ zl|o^2Mo8kpuof49aR;NQT-HKw1wr-{^h7GqHQvVxgp&qP?BeMF%^Ay>8&dfH95z5N zNDZ8`BmQBEW?uZoRaTnR)a1Ml08~7m+=21%ffTzqTL{Eh^)sG67lu6NFibvXdsNJo zf9u>08B;ne9U#jlPAjufhXO48%TfqP2Bl>|SURMFHP=Gm7ZzuN5n&FfDkx5N(VT;z z6xD@DgDN$f;doG^W{UL5EjN~~(`J-Zv{oTxA0*2?=*BwNC_s)f|Ixnd)^Tz4K%C&2 zZYkn`rnwK>@+f`Mm4yS$LThPXfOK5M=-GOzC(A?$1S5YAl5ZGwzNK44q-pV`iv5FN zx(C4NjW>88NE~&}-rAPsAZ35&A=uD!s#lzFRZ-iiE30OYbnH=8T`C^3SUfT^)HG-E z2SF|4(!I;PS~xoel#o5`_CMfYo};W%QOK3f+1p25jg8upMxEo6s00?Um(QI#AB~|5 zsc=>pwkK)qeA=UNv>-pR#14QUc0?C+B{7h@jLBkzbLygEgr_Z>S9M7SbR;wfweJ=P z>Uv}_xd(KhG!68Nq` zqq=qK@guqt-p0-C2H3E8*=+Ly$jGOVVFwhbmNE<)b=*09NW8VZH|falwYc%jMh(gX7dJz)u-zGk}f_Mo)RS*i2d%W zS*<7oe>4hGKXj1KBHW{owV1az0c@Nv-0eu?3wbIkwMZtL7vLj0TOs!KSzA;X=;~=M zWa+q82`l5bS?;@(!e7vQso=_kjueG4*_mdJ&LkSGl?B?fqWLZ{ue!QnGd5I`O>s2b z0amR_t!zsN@E{hhdc+nd5Wg?~F0tG|UQw8kw+a0rEeSo6 zP{IDtCF4VWbBc{qioF{;+?=`oZmgRo+10C*rUR@a8ggGpUvn{TCVC;rQG1U@>f}N0 zD374&>_zVZWWqzJ$If(%j)3ggBR$-}+UrNQTzuSS*lh8+#mggUJ?Zi6%$WKD8j6@j+M;Ygs5Nz7g*oDo*mM z$yAMknghtT`k09J0!aZZt3ml zl`aJ#lyfTSh_zeVIaMjvkOLHG73p!aM8i$q+*Q~*B-}kf(Trff=K!S)XbjSkhGl86 zmQFc$+zk6Y(@No*sT>|Zcib@6+kId#2ykvaQXZ71!D4qjr+CxHrtAt^s7@P!{f)?! zP8yx*adNiO%i8N~72Lyy-+T>Fn#KQU-+vt_|LZ+m*1R-GeFo%MZ|Nj!uHX^RRmmQE zxH4{TaBqP){##fN9{&gk`&sv?7<3Bync%+UkXc05F?~|^tR3grC>cXs8}X% zrqM#+q?Z8Xfv)ZkhM`_IT608rBxR_&BULa82}l=~e#kx5mO7Pi_1h4ZVj=gGD*Mk7 zh_@)Y0)bW8U%bup9lT$11>$}WJ;+VYI|ql3dmWm~ygAX?5Nv~m+vK2F2=KYtK#bY6 zD+tJ#nazj2>e@=11Nyr6?duJx#PX5^EU(5-T>zmNOc+r2KpK|1Kuqm%Cc)B)I0P0h z6(k-7QXF6&!#oN7fcH3n^R=_l7;rrQ4quAxS+F9xXbOeZ$qVBUhBMLTJ%DExkBh0j zToV|cgTfr*a_nF-1OTu%9^S)TU{*CXEnIB4^>ir>Vl`3+LT50}l?w?V767~si;Xl{ z1Islgy|A3KyT{}~?E{G0!9HT8$r^|i#zMPRnqVoz3JCFN5<9-%fB&^L|Kof3BZ9xi zT@xy0J76(Uw!>he%T4K)8BtMKlAH``293IzWL#w;h_>Tr{W5!~9zgD?7puJZSrEi> zPo1Xp9FK2TXR0E3I6Bpq3yY9~u)qnmsir$1zR{slquCT~&JBb*Y9^Fbp`jsSaOj$A zAkx@T8?vXM>e>u7*Tzj)3?ZajLEhlt2$rSQ0P!hiZ*pEj+kcT; z4dE(~rVVLCd78^`1&e0xg*=tRt5^ou{(ITgMVAfj`#xV1(p8pShA^YroeGe#c~4(i zItT&JW*4h3%D)hLW7nr*WjQH4P5{++YYdAchTl>NY#!^It5T~ zh~O5-%|H&jPXln9X9H1yC36dM&h}~-E>i4=6=cm#jP4ARAea=HZ0;(s|AFh!RSS{5 zR|;rrFK}}LD`Fs(JIrL9dHp`d{x?Yve1k=zEn7T4s^m~aYa9dLV*cg2j3%)(%4_Ha12IOLTz*&)C7YPD4{Bq)#V4ydiIoBVs`3U|1px!E#7wXm?aDp+;_z?)HhlCD= zP7VqYVmoH%E>$lcJcTu6h>C>3$cZ*MK0qoY#O(Ga%CcZCrNyRG+6RkwPVI$tXl$Z2 zBgAMV7v*~(dH15eXIR;_#+{WvVeR|W6Hl9m+Y+~HV7ZEqjJEeH;XXz%iNDfPUwwyq_9KG8mBnb3jx9kcAA ztYNQR*}V&Im{iT$3jqP<0paz5F=;1~A8b)DzNww63>PDR4q9|~^Vf0%TGl_*=Sqa48k*INz9+A1EL7Hz&WSxjM`0F3}DgW`_+S)qlbtpt<V5Q!e;k%568m0R1lg)y$d{4rfplhqV2;H(tDEEB~}u(Ff_$Nk*uMyx+~ zOKF*F4F`gQ&Cy%1OzmOD8Ae+={cIAIXUYX5U-E4I=(f8H$p-r0_v2^v8MCdK3YuKO zRg#J-8&B&U=-W;uutR40rjA7cTKP%?f5Z@O^N6f91_ zI2v`CXdiDT@Cdw@RIU>VO$I9K{r<&2c{z8N{KV3!y4$xHCh}D;W)gpb=ox+O+b^UX za-uIxW6GAP^hgNM0pjLH2u^u}R~HPB20|XU!GIZt%&>?a2RwHe+iRhJz=(txJNylV ztYFJwu!pq6q8Kf-=0DhfTYWl6g)8a)D!!UDe^9%GdU4YGw~qQ%W1N}9SG?FrO3*cf zuKN@p0=oe0Or_;K{Zzr-*3)ZvhyX!SN&UbN_cyx=Os)b#5v+R3-L6=j| zH5Puhppcy8au6aNJg1b0dqrnufwt`n%qnLRO*)l*wjD7FUSATMEp$rpTU$^ZXu5#i z? zEu

DmE$&K1k;AVqxaHJ!-8g?+SJS$xomaI;;)A_TOJTE)&3i^bfx1mbEJ9>~vV? zoQLJUrK=#~1QJgiNjQ?IVJyX%73@lEbpG6xsInB7E@0IvV=xpvbe)U9;Z=&i4b=x& zLnQN@;}b*p_$o!MK6;eABI(2aPMyLO6UHtRD!)yOky~Q9`GHlh!o%&qzc{jyc^yKx zW5S|73^R;UI)ttL;CiOL$x#w4C1lyRmC_)TB)v2!cKfYa41X?s+ur@0H#j){^p`vH zcfYZ&z8YAfi{C5uZQ<+-(12Fz{J&Y>z-CAj>KS>_PKAkVkdsRzyxMxK_u+^2KPJAG zG{@t96zbof->u8lpHT0t^36A`^3^pgKp^7-2j>bLQ3dOaag(I6IQ}NmJvA5_@i>%0 zL-NK&b#Gs@vuJ14&Z3;6U2iY4cCvJyUwpn)wzTMZ_qp5#k7mYr{5+G1E0Ol1+yb)Q)(%UyZq;`3a=(!4V&??UhTpB~J;`_tG* ze`Yz~Hr$E(_NQCUKN{|QUiyvUPVu*g?wanhxbTXO-LKBQxC?uV`lsJ!=fBT96Ld|G zPfiWKKR*B21vVvJB3)EZ2rtLXO9)-!Ln*g3(x-F$Y!HZk^#xxM-e$h`mGh6}XFG|x zqK~v=7H5B?&k*L`HQmjArhT4xp>xl3&u!=QZa<@ZD{&fguj$uI7L$3R?{_iW6=Zr$ z9xI+%Za$Puh*653b2$lvdP(gRiL3}qOIy;X5 z6&Wd9rLgm!C&0vQ#?UiA6z$%{|DK#VY}OGTuks4)EEEkLq|^}H(yX?SEwKG}9hmTR zzkGi8@HiOqa92VfSL!|fx?|+Z?#`JfFRsK~`djc+rQM54#~0FM?3sB!Bk->T9fjaX zl*gq7cXg@0)^uHQzP_DD2z1Dcd!Uz>EZRIk#JB5~2hB?&>0bEq;ilUMr)HAyxMNY| zdXAaiOa=*e;c%Zewe z!Tlr&4$T{v;L3wY5(5Rig2SRLNW!=4XsZ!P5?;65^L!EsK41apwnRxrI#cBoTOW?R z9NGp3`yu#l7A*vSRsA&tI9_p1`&v6F$o3DFl6m@Ag^ja+nKT*(54(nlSR;=)~0=Y^{3$%b0k1ivUa8qTbuF z$+PXdZk;pYvp74>eRrKRvCjRoO|H}RE~5G+PyJk9&46)*Yf_4gq7nSe2%cdxjl7@< zFX_`G1fU~%=!g*PoKt87kB}^0Ge9AWB++@}`Sam-N(bK~27v3hTmUJM!NbABdiZ>m zD7T+fV(jC~3uj@zS=i|%>;1MU(qM+^2A)x7w-Fsk`2-<PBB@XO^*QnFj&)>&KUGDj_K-U=F&i|$*gQ1U~BU-}P&Ka`= zpZ2q*+R7ML7#Lx$ix=STG>5yXzZ2SSf;3Fkp!VtE(6b<9awDSo{99__C=Da`_+r6a=2ORm~K%z!7`Qx1VU^ zZ)1Koh8YB|mLi0UpHtjgjgTZ{e98x&%tn)pih^kZaHw?phW6gbVb`+$VR)m zBbHkmEk7hFI#0iAZQ?tbx?*4EueB@wqxn&5D?vc7G?$=6Ec7vliMDIzgXNbZ4a#56qXtCwtH=3V9$8ItB0?gS%k@zhPVz0N znFo#F`{-l24HXW24w*ip0#UnYAS4J&ucL0zlMN_bo`wWP$9PZ&V5QY(pj1Evpk1=8 zE#WKD(a^&LCp>GR7p(XmfKWaN9lHw0$PLsO$Oz)JmJv-RS=?zy2WBb0OCq6yR_isl z;TuO5>DaCC*s_9v*JZ47DlEt}yNvHoI&~XN?GsncwmwjwRXCAj$yob7@%ZZBw(kb0 z;jzjoYM7zLX4+nXMZh;6Ajny8JC1ISLc18%jJ~;MNiy-i2Md|48^(|12k+EA9fwxG9A^jU zuZV!!jsv}uyQ}Y<&wQrs0k~oA4_}{-ZslNJs86GR7qn=9`0oAq`F(ML?4ye&xd{yT zk1DwOWh-K3kIm>yZS*qC1QlNT{y*s%d(oHr4?eb`FEyh-vJqIAl(}2eBgnFk=Z2CO z$u9WHw$Ft-2N39_?^HGGv2b(~6Qzj?VysY6LDFrk`g8aJg@&2*RQFVHbP|H)4qc|& z9)I_?5Rz@GZ|%fZho=Bpkd=K%2eM#qWfy@l4gMyh^&r< zi|+^-D&T5@4oS8U@suqM_;@r(I|#E``B-1h5R?I7bV8-0{dn9K^)h z_g7DI15WmDPv82>z<&hUV^}~kDBGZp0E@cVIt2tLe~yi#x55^~LbQTV0;|-^RKSp& zP)#m{&7LaKVHUA>0ZSi#870K$mdu_8z}Be6*1M;K*!r~=o(564MTZn-dX-Nvckg*o zemPGDD-3zq6Q3WOLKg-?(o$FBj0qK@y{UVrP=_#YtrkYs-fx)KGrJ^}KX(homUVJIJ`&^%Aha(5rG5@N z@_I zxc0&D_dI@ms7oTnL}fex31HfqwDI&(98me;Z;5>~cEa}n3eDifd<1d8s{-Qt-dZok zbcQ|E3EC58htQ`%(*m9b622hg3u-yGU2vKEnw0Z)|NTangY5iZ)4TC=x;ISXp%59n z2IPvD;A;iJ#0tV7G$-_)m`7Bf>xH>A?{%DXZT9QvFfeef!;0I`%2m7IyEq$TEX*z| zgNn)Uohbko3vWWxLMW1g44Dr?nY%MsP~6?Ex*)7!R?PsF1k8B?B~%j-c0xKE#_+;~ zECLf#>{|gU@AFuy8<*&kg$YQlA-cuEIo4SsVd1pn~ps=DwkC^5*U%?2-H|#I} zJZ*_tJTcA2uetP(3M*eNt zZMbEDo=cv10cZE74wUvvrebzOL`>(dzmX;v9Vi)i6>x|0?eo;x7v<49ji%XglmjZ4DWEryO*^Ib0Xlh%tb|#<*@Wi_T`KaaJo8=q z&KUZ3+1opl4SoHb9tGT{@66xWX}as(l`PjO!p?8&=RdOe?nlF!{EfqjFayhjY0)l= z?|vfeTv~6s%NaIEJ8ktWZ|<=7{c~(=yKsA+m!4t6Jl#ucfl7La=@RTWQ3Rz z#ek0BBw^F98AA@RBP@*&e+eB}4xA*@=jI{_xemLYfv4~R)-5g#Cl6L>`ho+ZCPtFXGSX)`c$k6uyk(n<)z|3a{`3imW ziB%#AE^5tnbdFEs{0Byxq8UPPYUJtaLb?-aSucs&3unQ-^mXjx?B0OjUO`k;@5bq$ z{usjOWg`WG@F>+86{m;d3|9WU4R+HY;{%f*7Br*PR{Nb7%A&(nQ74>Z{IF6^bwrb- zKxCVB7Za$XIH55}-8CS2m?tERqXO`}K=w>Dw*TICx20Jb?)=C46%mT$gmFmE5d!r$ z7&;KV8Y>d=Ot_9hdnh0EIVMwUw>F2jpe-8`9i#3CPIBjHnD8)nTmqjd9Z#ko*)nM# z4)Qn0qE9K3AwKmZoF^b_xscwub4eo9J*sJ>blhh#RL;8ih zIJ4nL37wFq6JIydNZlmNIXhki3%b`gceP@Mwe3+<2WHe=X4KEuPenyaPJfaK#q=xK z8sY#ET0q$(VeZ$wo_utF*~G%iFk-Iu4=fR(`lJ8r`H ztg?OW1uZ7GS`Q1f;r4oI<`Z3GyNuDd3ML3S;l$OgS5&F&uncyoL&DZJfiDdY!gkYX zC`(1O1@~wt8IdLtZPZ>~SORt1j%wSh?7?wMA}sI#3A#vDjxqOt&`b z_}WMojvN3vX8M6 zN9&4k-#~&P>`=j-v(MK3pc%2fUkGT7!(nVBSAoF0KIbD=2>+$oFu%}{ZZ*&92xt2X zkeci7^P}zaE^l~Pe#HK4knawsd(UJWHuf!IpZSI@d79A8EIwJhX+c67pbGPtc|1D5 zCXdcOX)H`Q=0BfOG;L)-K>RGlB1?uiANdTKk0&$o{m4k?H&!&7>Ri4Fo<^I?ZlD(I zp|z0;?X>rerH-6qk_PIYbjKy#UxetR%6&a;jv?8D3w9rv3EubQ{r>x(9iRW7-M^~p z5vwYel~d4IAM@FROxjF*hJNz%*!Z+3JC;DbXt#0`u#z%bWTKp+^cUi82eCT4#6P|S z7S|Fg^8Bhx5~^i&IVe<;bdu3e*YU%n?jcM|FeKWugmJJ;@W^Cy{CEM@zR9+8wN8wk z3uW*2U(xq9uf+Ek@=Z-f@lK$C8Bd);pG|a}yX-xnWA>0`;QD%U@gx)(vHM>_aBFBj z4W>8Z^)P2r->h3NW)WgzQLFJIPz{E3X!v;{G(O3+s%v5PrXAD0rR3s2W_EfZI8t~R zDSnggkI}@jDCWI0cBrvPv`(_s5g=Z0QwfdRLt`uye?&U0#s3IlK`Nd&jfM!-UrTo2 z_^rBXJI*1h=#XRw-Y^Q$VZJ)X?OdvT^~;N@?Q_YyKfYf#w5RddmG|xYf8ntFzw!~g zh@>20tx^z8!2`?tKNb-KK?J*dh{Or^BI&0Fl7jL3>EJYKo^@ZG~jQFg_-YgnE{cYWhLpJkt$M zs(7{0ZBwC>D&zS)y`X)SF};p_QZG(V=vV-cMAIo$AjpOX zXnVq)*iI$^#Hax63)Tx$?F**iS`%8nVb&R6mh^&NESLRd$r&YCskh0zoqz(RwnVL@ z_GjFUfp?7dNy`>m-1aOSyKT{!TQ>5oMfa|cTHm+t|Fiw{Uq2XkPRUW)4N#ksd%^MQ z#3LRwG>n;l*b;+WbkRr;7>48V^PemjyEk8l!E94d27uGCYCI{8=!`QV5d4FCOhWw+ zI(+t~{lo*!=s~bWT+9pC@o#i=5&50MJybAsd>;|l@)I0g8iKI)-E8{<%?rO}(TR8M ztMd}CJ$9weVwznti)CL9w!rJD!}<;Ab^RpjunvY3shit=;MvWD-~PDtD@2^U`}v#8 z(~U#bI~3GK-N_q&IdfrNT)?^T1PN?dKMjNf!3fhW5WXe}3nI zpZ3!HKmS;5x6dVp%h{KEX(n`L#&OjY{d$0wOAR_0v7^P-_=R_9&2r~!w#CiWp|-{; zlFct~KECie>8f#OTd)1FTd2*5-AdB~fB8+!{cBa+%GHJ$=L>&-rj2{k)bQW2eOqzo zF0K+FhzgGyr6693jm;`Alxz3J)6~SKy!O+1IK&y;yx^oXo`dt@FD?#Th6PgONbbJW zJ@Z&}SEMVsiy9LNL@H+#!UHkcne#goFAJee!*9 zVgeZviwJoLb{$X0lS#v7!{B4Zi4jA1CK)?J`1Ll^c?oydzq5AV0{PRcFkV(EPZ7A$Fi61?CDJOK)_ zs~@od0upUi>ZWa^R5vIWq-N*Y~&O_sPo#$Z3BXcRrq1 zt5lx)B)DncrI_^f^9oj8U9KsrUmMjb{D3FVe)hh7|C7!USJG9GcebMccIsB=*t)KI zztg^7U4B?sSo`KwY9cuN9+*(gZpo5{o%hoUL|$cOxN|5OMSxs|GH^no3UNIlOY zwc!9RqLbZ!H_mw^Nr^xa%@%@ zLJ}NF=#1pvQnpmWoKs#2j(ZL)| zip*!DXlv6Rgr9^5Q9ACb$`;X?f~l~P8n5zxdrXD)z2hEh?1-vjBmGV5Yh*|k+ z@`yr^m@KI5k{mfd<9)ucQ52DEm?Yc#^RMe>1PWzgz`#Do)vkZp?e~86vpj5F%(~As zTcgH>Q9q;nBt2nCH|Lg@9uRTdHLqOgTJ6@cXL+1kOa8yw_djX&f4+N?5lpa$gP}zD z#b7-nggPZ%per>V{kjPkp1p+X1gR7E;-@*HAD(2hZsN+lHfOmyMu0KSU5D2R_V zN5Qj&f>n(1+@7ae7+A568I=i!lbpR^Nhu}3pIh_2{P;vy_v7JHg)t(t3{rK%cr%ZM z#oAZB=uJF#(O{wtJSrFW0=VVVlg^hl*RW)0n(h$h3cx}>LBU=hx;TSP12ZgY(~pt5wo`+OM$=fxgkf!~0sIR;wNm z6~KN6(vB740MTI(Bgca+VFJcl!Cfpo^Qc>=z-d2xGUk?vdmD#ps(C#HAy@ER>}NLd z>f%b_sqa5_FAdG;7OIZN$KG)-UDMD*1Ja%*lM?}R^vpV;Sw<#~24xOb1z!6GDk}7I zr^X`sO(1P_`zYkSO?Pv^yo^Z^wy)H2w4}N{$137gdQH% z8$iPsLM{3_pUSgy*i3R-<2zshIz0aIAT%76P`L77%U zI6JVvKr?Oafx*389Id``wMulcwYIkQu*GO!wpQ(M9avpFL*SWVXcD-nsK}L9Xyp~` zx_UHHtLY8y4gRTYl-;}bTrX)i7G2(y@P&}VrEoD;mI^!V*QM}?uo0*YWV>6g5W+1M z-docfff=XUol&-@Trn@k(s@-5c9~ z*R7k}5=_|QZ?Gas3#A3$B0-&f=%)31=B$_1F~a8B)9V>D#^cHbAgj=c<0vS0aU5oO z@b!5@AFl7DkB^&)ah)&M7b71l?4fWXd-N1)B9AL)!Hvf&XVduHJb(!G7BiU`H=d6N zjxsQkIW>AZ3I@hlW&}S9D-^X&3+ucLCVk<(pdcTEfsqqi;6hOypN@V#t%PcR2px7Tc{GiEWs*Z;e6T^vn_BfAgoeeHiaRmAbeS_ z$bI|glPj?Hom#A*u|a?3_pW_sKO6m8^AmNzdoXP?WI;o*A2xo|TM*{SWx{TYNNxl- z(tUxoMWl0m`)N>VxH~sSm~$g|;pTW)8e!0YKs_Ih^5O>~&^s7RoSM%Im)FfTm>TBD z>*l}4C5Rp|LO>27ik>F~XeJ;n4ZJY-T~!v;Ot5_CQrxU{0)qdrOlSiGU~2-3QZ@0_ zN~mNcV|Oj(dcCDhlC&;aepsNpmP?vw-vR;5u=WZ|Y_LG=h_!DkKQq#8xYvar!K0=; z`yb4`2~<<(+AqEn2t$~HKn#NjI0X_gi~@2rAfO^DlQB@0Fv%dbpjM?vOBh8aQ6U1N zG6*V04TGmnN5iZP3ZhjzIYMn~s|DMNRrCM7^wgg3-v7Gm-nGtM-zAW2!p=^}llR@v z`#is4dV_z~djBI0m05nVi9h=61rX;H=pb&fQ5;ZIqRU+pj~^A;TEs>{PpoopkzTnK z(YDynvGdqZmuXzOm0Oq$AOqg!EHqX?1jOscEBlu!=A_IhY4n+pj!g1~{^Vrx&Ou33 zL>>Z?Ni)DM@N_JIgqMpr)bZTSrsnpFh>D9_H-2s2H0qUp@K_wWP49_?O|af`Py0i4 z0UH;4V|rvxWfx%3molb&Z0RM3KdEh$V-DI@Pl za{MeVrodEp`?%$E;_o>BjXW(1aP5OGq6Ec9Z<k@QDT>~an}rm=uuN(*Lz-S0 zPA%(W!lg4Q0A`&>-Bz2$Ky?my7Yq*=iQbWE6yxc-@!0_v7z4^(PCzd`{uY+7%844a z=oR%68CQIe#%q;-e13q&g->Z4&FdO&joX&?PLjs2;AAN|LauV8D<|jY-`yW+TUu|v zsP$)j)l~rWDs7?3VkLm+ToymdJ1VH&Ov@N>a>gfM!Q0iI0c0V^r*uO<-+qNpMnFnbl(%NS3;Msu8}kR& zg0d$EcEzs5UHjzUS3$vpef&h_G1b|XNkt!gapm{0_xaVo)+7SO$F9Q5PH#Tu=`7bc z)JKat(3^4KVN}i@)9WKY=VaB4dne}POk96%dY~=pO!2ORk!G*H|J1prU`5V`$B_kk z`Q~mmf(}2Y)iE;MqIfZ2v6O zo|ug`58GdM>It>C=Y7n>@Ap-Fc12>8?1BlV?IP&EYM4ryd`)Oce6ZdNJcGh<*khgG;MIEczb! zy3F>D6Bm$$#+O|vSDa7WAFFUsXWKiL7g!Mh355OL!qr_T5XA8?CdN15bbOuJH!^fX zC0cy6^FabfA=KU}nO)kN2kin42ei7`2M6a4Vt(eSKQ?vTFSw}wNI9p%Ro0fdeQmjF zw`0!TYvrn=OLx1^DZeIb6RS9L3?}vm&Ed?+xwmH`nR6{OE$8mqvi<#4D_n0Kx$)+r z>yTH|Y!A<;cQ$QSoi03lfj6hZt4g$De62(8weA&bP4*Y4k2Ppc z#p84Kt9q}EWTx)Fvwr2Avi)za{m|T;DmffGwypBVt+8Wi`>*xv+b^3Mmzfrky7pc~ zYLwUTu0$}Q>j9J5dG>VCp28_9mWa)rxFT#a?z zi%199!-(v!`q%xBTE|zcaQVG%belH6O?3W%~kQV$gP9Z zi+zf$x!Nj$^au|r*krCKCp$)tt}z^Cu|#W8$^#T;I+mAgtP*uoG~XzxWhm<~Bn&BC zsVDGOH!^sLB?6PK3^L=fzLRhRcj-R4x`i9KFf+UV0Sh)yFS@ob?4O%)tNqn9rc!as zU_8A<&!+}H28wE+gs5<+GNl8}M0PSJlzt4wQP612T=QO0cPNRePnVnfL3X@ z6-d3N=?&c>h80&vGKcN4T2qygp&kAahT|Xkvq;Yv@qN!kiSH5k zMOjg0#(4E=RrTuXAeOjww*tBxQ2JQDM|cH`rw{;R z9chuyuY}{H0ctE?HLiUf*H?|@k+nMCi1k4S3{DiuSBC->dK_swCll7Buqk0=y;4}Q z)zxZ*F7SKNJj9@dJMM2ab#%lJbh}c~}l3qb{pvbC(%gCQR?nly2(+x;3tz%ytOMSaW;xeI9OZ zjI|p~M9?B=BoAeCnS;x;EMYza7ne|G)o?c8Kl?epqb43=A4RV5Tp68UfqecYh0>la z$N&;G05T1N3|+2|;&9X-@qMjh_V4_CNG*hF8TGbvf*M#*R}rz=bNCD-qbj}u&r-Ay z%WS>@U*EB#@b7h7+`rc68+c{q_Z*z$KtVJM6en=1T;GZ3sYVmPdIO&M3i0EHoGanC z_+|O6FW%eSMUtv}kQJQ=HBf186^J#7nUnf0L`EX}53!cZWTnm#)=Mu#Lx#KfTWCL8bZ}areV7g{G0#R@0;IIib>bF4;CCHy zR?LVBA5`$2%9DG&Yrzf-aY+o6yNGQk#0he6yekI7N9_qRT+E&*VB|!#=o$CBnejC) zbwt(aY4mnLlO;`p{mO7sHXqkBhQ-yp=pmfGbD}%g(^lwi;juYqKNZ=#o@Bl@yMFBU zM`bX-7Jq<=awMmIgICj?fscI7=AKQATqIU}4!>(yXNy5C1Y2!=QpG_~5FsdgaNI3f z4%Jp3`87d)2cy?VN#gtCJWFk}pnv1S5o8xNTu^Y==N7=EfabF55`v3s)}c>y2x z;{37p8)IiLi&}a=H~Xf3=Q17U=V_-t00r)YGh-cf7W4a49zkE1(Bmw`(@_TjLk^6| zU7VjsVhTSEX&U|8kR&bYp7=#(@4Tm{^XoH~9M3;~`k0+te!g4&_b$f9_zeihia-z!r8i)|^3zkU(dv@~K-VM`Fdf6q9lkUD8KQ}Vh0qV-4mWsZWT(rYnO zl;Li|42ff5>nK|%Bj-V|#4)j;fOsUE?%ti)FIJ*VEUdOV#{o)+F_lz%TOR~R-z--U z!-Rj*2}^2!6dv!v|eS4E-+BCqE42b&WJlkI=0aI^GtqC;(Nn6 zT*akrPJ7>9VC>R3(Mf5lL0!U7APmc8+E4*Q0*aMsMY$X00OvzZIj7GFG$?+ua$di@ z=+TXN?K$dvR&CMis!!~o{iMz{-f^F6y=cc-Gr!Os&CPqo^6EXymOk*4eh#dz!mNF` zS`4*8kR+?{PeD)vK+QR_1B3Q%R6$yz^bY!vVaa{Gc4I$bO@LdSx6iE&{k5fltv~eN z8+vQQP^9YChTh(nVN68xcwInB{;9|>F0M{GeA~pPKCCDl@DdDRh~}x1zAyVTzI?jw zuUZ1(IXNrUJ2lgLwnKhJ*c7o{)ze?&77e}28FiRix!YGXSKu2YZxsK{G-!=TOR&UB zZZxW`4o8&urd0T(tgemu62rG(D{`xU!m*&rmtb#v>YR zV%M2PYa9|d9rxhOABuK)*-6q|SzN+XC?v(WN`u&|cd#&brtK9ri_QF1>XaKW+t;fIO(RLrNa{ zY!sF6&*0C8?PHMMqFTWy3cuwGn*_wPAWSMG5PP=>Jy)47ie`IctDTC>h*49 z$m{AQT})wC?Aqh3wI|OuJN)w1z11)&IOi}v41{q)N@ z|M&>vXWQJ?_ufnYa(=98QZj$CzVUu)ZsaqVU;1xlPTULh%P=f2^{;a*FAmt~ZI~zD zH7mHh(05~~sW`85lTVYOq%+BSdU>ATi1dUw2yF+&2}xmGo5mDY6m7B&2%`XVbHgZz zGz7pPAg{?csdW9iH6j50XS3ENH9}$#Ld>-}(G7*%=;NdC`#e5cSSViX=0?00f~Of7 zHaVJ3wH-4&G|jMS(;*|%p&`?0hIgL3>S2r=x=`DLh^H^zQYdyr<=`dq8>cIQ$(*e1D#ot4I8gKK(Pk{@vo} zG1L@QuLPQJY}9Qu)#!9X_>QVnS%%7?8Cvi;g0&HR2=i?649o=Utz5+I+>E?-u5zeO zBLx1n^uQ|7Upr8JO8z~ykp4W*Q3QBf>)?)k+JH+NlMXz+x) zrQ_B4tYcFJ=Y2&EQJej)Iz+w7F3Egi&A93VsB^&Wu_kqeRn^P%iIb1e68P}ZCBU4M z-n`s=PZ+K36HQ5@wWM)WW;B%UkhvR-zpHU!s@1?3 z5BKt|D(Tu0n&d$fQAqt2nOpD(wQBs~5EEHq6nyW%O&YbFMsjEi5pD>^ItvokZlQJh zac+y6Th0JtRK6WG8v^?$KzVG+)Gh&x{je)a^)6!_L9=@@Xazbez_9@BzB*D?=C%y& zj~1Z#r$RjdEelzNGpq2K%tx8mA3X!!WZg5IIfb1YLxY#pE8#_LjPj=+=a^g|sc=20iZPL9&ExC^J0l62IHD)<0pd+03}?_}x=0UHq~? z75gUq%r-2m<@t(H-1iq1DhhkM`To40;+Q5qq=Tq~E?(89`pkx~6XA>Of)qfdN|s6{ zJ+90iGNQdsr9ukRaP$w?t283A(z%;wI5Ih1LWmUc{lz^K`Sa<7KllqmiZaQA1dEjd zQuCPmXDVj&c9y}9BQW4m6I4qOsj@m;xh&}&GILp61Zfu$#8V=i+9C8ZG=t|miHHSG z_OO^M3hFgc@yYi%cl8@+y3|NcblZt-N5g8uL%CtSCcI)Gi+Q8!-w}yv3b@=uRJ;&f z#}rz|7710(Vn`VP=yIq95+GyCGEi1tAQ8$apcHttQFLJqs*T?I2Wi!DHG=oGO@|@5 z&Tg&}-QhA!cObOnj3U-~slIVS!+;^jJV!Mw|+z+FamGN{fWBo||oz5?4@J^#T zm-VYt$|dSn%u;!<7}V%`Gp42b{^duDSQ!GVKrKeV#8fMMsAeW;osDF{?`u)DDP4oo zmfD;sN;O$wp%AIVXP~QFGyx1fV&1d>7iH^Ej2^;Tr z*pI6_DHB^M-HJjw##FK?yG1sX2S=)vqH*HK;1JNxHZ{|f0m{bBbfFMUdLLgOb$OnY zULRI3#ea4cD!_aytj|xYB*(G(m(x$EUADI@Vi)OIMO-U9w!!;q)~(-EdDJc{DgRKY z7G)=V;=`IvsjU`RnYDm|q4Y4ZG!XI0u%KiK{3OvwxN zaxSArbBZA2i%sbZ%^iJo6Q_(!?bqF5_rzkm#H=<(So2dh$HmaRUm9F1$>=8W9h=u+ z?mpQ6SA4x}X1i*8?_!v(@IA3SJ6w%E%eZb>Ctqj%cU~@o*AA{_z1Y%X=XUpL*bf#Q zef^m3qgS&%_Dm_BaiZIw+H){^_LCVIbG&t0qjvEX-a!Jt+h)R)HEFxd!k?Vdq%XPh z>FQXnu1D*t^)uHv{cbDaT4W9X^Hfuhp?+6X=sz9yoi__Kv>JFMypwh0U0iU*Ub@c@ zx9n(h^SDdBCBdst$mB=q6{E9{D=AOz#njWElxAhuBrn`4Ef*@4$>*;H-jTAny0%t^ zMXp9~=Rf%AkN#0(qcSh$T3tW)QeQUZqsH|=dMVA!lyB~ugxFpFDP9tap+1wQ zjHW5wUtG1~Se#!!b6We8tVK8F0#Baf+qt)H?25?`VhL|@oOc&(S#PrAVzm-V-*vB4N%MD-E-9fXU)oF!;oQ#k`VR3O<#eHs?>b7lYk9#!OPj!< zr9~!!qgEPQu2*mLKUll2(M2@VLh9NOvEKOdgN%m`Vl9_?hkXO7wqlO^)fi3*<(Dz5 znZ);%`zMB6){p;+uacr>OKSE`#C~`>Q+j3T(of8UySbe8f|*O3j(gH+*(E7$RB+09 zR-$0aP;_pXlUjQlzGm$etGk>R{BmkPokl;pD_KlQjDoKHJN*t2uzDO z{4Q5rq55Stdx}m~`uj%vZnByUm>2=_<_ml_DMFOC660$bI#kZC5aMb(L5lnp;I7Nc?wfu% zaX<3V_=yV_OcSFnfQK#9@shWa%hzl5ZYiFBt^aoL>$SQ~xB7H+TU!<3qu+}+*5^im z*L_$VvEdUF)H6f?JMj6g1p%K|cGZ)zhT^j~z=C|%r?ca8KJSCkro^a^D!UrKyZ2;k z%%Q`J{W3-}y1h^gfL!R)uZzbzYCl`=MGzsRvnN34 zRAe9wa}bI{#6Bl~Km|dU^*92IcJ56&^?VAK9&;?;(P9A6c89!4CW-4yOVkNOK>!us zN%0RYc1BzS$+K2BbU7|DcRlawBnTBGx;QxnW5by}Rgl~zNWAYd*y$uNbG=jq)dhd% zy6AGZ*aSDRG05peERW=GZ6he1Zi#9eZ9w}zCJFs-z8-tVGN!-F(e%2wnE2hCq($)s z`AKN_q3`DpAIacc7Z^^hcLNdLv3`bG2;a6UsiLT;YNKuUqr-OSCYyA#jRyh~Zk6Ug z4orrdl#bf^h6WLeoBl^#M~_arMy+pvGZGPNIyz}qm3*r9A6I(6gw=+({n(jVM*}cz&JxN%b)EqpY!Yf>By%EgOVD~Zl=lNZr1#>G0% zTD;4>KDs>9BtKe9ggYgnET1M2Ve;Rfgx@0H@x&c7-n&amkPG^P$90~QSO5qXIJdNTmbir*?fvI?3HcQ?%sNj% z9LCyUWe%$EpTFIDvnLwW48TUOAnNK+sk3+p(SFG_zp(ayijr&Aws0XIRdaLo+LTBr_cX@h;(AF6GNLr+cs2OaN@C z4MRJ9TnmL_VA*_>i_m3g5S%FFhB8qs4u#8C&X{shAcxXMIDw0xE8(9ackY88e-|yJ-|?(_$#fChJTT7^miBduiFR!u?AV zO`s#^lUbU~<#KIJAa{}86(si)%bDjIA01!i7u1-0#zg2v<78x8pUiydyT&i`As7ng zhJk+P^6{Jc8V8P_G@vsK+V8gJnNo9=%Dns=^EVrrxOQlq&mW5$lqB8KncCE%EI=O*CM%Jtr0blS@u72Q_uE01i z(KN@fG4-9>4t#rO)APA^evePCpGeM`5K0-#D_R^Ln%#-od2mKta>4nQzsIeue>ppI^@~)-t;lxwStsD!|3aW}mIs&E@01A)$%;eTQ6(b_#Chw$7b9 z*Sg?seNFnAGp*KLwc`Qf0l&GV9UImzt_<9AfBEc*+DexqXWzlVEj^1W(-YUJT2)^} z%wPF7U3I5&^=pgVGuC}i(`Wi5@7mqS`R7FJ@N0|9mxu3sksg`w^rIi{pV=4p($Qd% zcmC4dHm}Dla%)cU2JE#qnct}ldNTjz-wYP5dOtJ&4{z*$&#yBx98Fdnt7Dy0%;;aT zCorY03q~79G)j0jQs1u7YlWZ?NYj7$;$Feidk;p!vP>d$vr1RK_04z?oMRe1m+$S* zVoKwt2dMV+V*#p#%}W#y{+6UN{&O++DY^UhUN4 z#V;MNsH&@rx?|Egb%l-2e-j*^Xj{byEME7fe^()w-`~@xHfkKpa!B#_7S<|*_vDmS zl)nhi$&u09Yt|hD;Vt3uNR}Y$rHH4aWvs9<;-1V*gF^mx zIk#sh)0Eks-=4`B;Bc+N+J^?FB@f~7NTKTd*=!RX$A@EW`--x4)R~-D5mlTJ#~gdJ z!Qk=xRTn?umF>ZC!?+8do@VinS<`!6@{gZ<+9=jx=F~Rj7?g`A_UtKt5mF?hXBrHJ zDPoUl$xWE3ya=l6baecJsv~%1IVXCHD)tRk7))PKar?VN5%*5bGvDM?bhuO@Y^<_r zE;@csGO6FL@>ns|h8lSGM|`EIUoCvYH8x@L87JDSY$v_eCYN(&Wj*njd-kzc!u1x17Hf*uZvW*$9{jZFYzA}3le9(nms)Kw+{GF z3691@I0+Oj(-*rojep?m7w672-~@&5$*FXI&EpPwKg=bI`39rtdie*gA4OkF>{et&wr!Jfv(-{>6}#% zDE`>paZq9J^t+_*$9>I2rD5#1vIBjL=d6N$D=TO_bJ01RUH*I5RZZA@dMo2-U~Y*Mn>Om{q^d}3hUNnqzba3U-&_;}DevPe zx%8o|?Lgl*agS6J;DfxNmf_GQOki-K16VjFScV_=1_~+PSX~-hm)2LK8ns$rUQJIJ z(vs4nXcHg@v!=oXeSbgQ3P$U&!Fx!PGQKbAdZ@<2LK^md=|CFo{C{Vf&Y~A-Sq&{P z5Vb4(%}&lp|7T-!`lbiJtlo54Fa6!}+38PiMy4O#^Mv!nw$sdn6BeV!%-0po&4QP> z!TzQ1mX0OPc>T_MFxFJlyHVcQ@VPoCq4#tyU$){z?4HMORH|!Fu4s6qY%$eHR{@te z$q(H)$3feZdM5cJzDkPf%)i<=!tiyiKAu~k>0Pp|t@p(DDI<-wb~27!U+nF3Dr@J8 z1-h#~7I9cZRcFIENf_mJJ1pwFHAJlHJ~gf0D!ax;kCddw6YVEGR;;mj;$6JDO{H<3 z72U|`G+}wKIp=Ax$>P|o6;{8*Y_iy8-Mgv!mFu-t2@7BSZwavEAFTu5MljNzV{wCFf=dYX}+TNji;D;BR_kZ$B z?}pz_oOz?8F&ug9{HC`*G`OIg$!oELnd)`EC!Rn;Oicr^;V@ihP z?5bt&Zhvv*tVn)40u|yKrR!!I^;DO$!g#s5TD42CfC@V$_JnR)#Kl#=1m2D~DI$h; zjiSce3%>wv`5U;E=<9}U-L&Rd=Xt7b|A%h-0$Gj`e!cuT-&@06d-<_}nftuQfBpD* zf#(kRc=(36j>S8yYC0d!liL*;|xUJo5 z&Y32X_?~pQspk~+(aO_*##ctXr%()fYJ8B93CE%!r?Iw8%@2G=>V;Q?NAJDV`Sr~A z2fk=*(1!|hbA8f}p>IR%(vtu|Aj;b5Z(6|u1jvmTUp$=((eKdGa5rk-aGXiI4yVaG zIErDoYDL($L*Q6_ge6sQFqEzQOCJw~`)4w_HzDku!^mEsdAuud&z7P3VU3K2grD;r zms>w`9JbBKHIv@n`{w3yvlv!%L+H}|O9p2Q`|laejBj}Tp;P5|rM;427bES`Yey4a z4%H^k4%Pl^{rew@uZ ztQE(I$%Z7MI~QPiQV{c6H3RnE$8R=G`kOm8RmVubc`)yi&5M}okGA{0GC!m-U+X8` zL1(eEVd_ze-Nj3BC|pbRNbE8e=Xhx4p{m%qooJ(gm~ zi355yrhNX3N9Z0EGNjNlY%mKT;T^3!>+bil8IfE0%Rqw%QE`zwOq-xPzY$&quZ+)u z9mg(Jgu9^+&c|Wkq$aN=zEP~j-{egP=0f8+>@Gx{kJ!7jE+ngD z2j9}6;}r%;(Yqix!0>!8%!q)M1RwSE?VXKKV8ONBswEbTlW7uWeONv|gzv}ie{6mx z2O5usXBpo~rq(xQ7WBX%{WV#JA}&{t1IJ|Gy1g zuoU;k?2=8BX>y+BruGbVp;i}fvC~ovn}zVvw61tk_B)^7>8L)|6wpPCBNf)+q}GVa z;U3=8-@SfEX;D6gf+DJi`s41;$GyfNlcAD9(zreYmAV43%3x3sqC70J79R$~4DmhT za99oXjqtEP$1lo{T$ z($UXxwnsK(_CfozS}i+*8kRluK%qT`+hr@n2&gT+Pe*o$J{(0aAZmw@eFcbBrFzMP zXTuQ)6-w=t8-xY4saij;pw~mA-$vo{P84Bxq8Fg9BGv1nY_18{I;1TrW%4G#r7p?M z*mgxaG)2v>fObq~kjBVg0#2;5tB2_O?RjS-e?9X)zWbH z(%$eZP{^>`tTnZ_T?TK!@}Kd>6E^9U-T~9bxGjZWJGG!@FBu{Z4qe8b#^18KNN7Wi zNqBRJ*@d!xvj1CXH%97L;qUS7Zxrbhs6L~f9O;MBFFqac?Le^_wdT}F|J3en@k8Bd zqp9TcX4(VAC15Dh-(~nUz;+76Z55DnH#1I+4X2|LnGW0rDwfdg=nI4u2Te}c2hh2X zoGend6B?)$s+KC$E}^_>B+-kKpO1nYSox{wn&J$xBa&5ZKP_%kY%za<=W4s|J6+TEx7h}ptgd?Ms*F9n_#`r9}WgNNUz>YTosQhvZnbiNEp6-p&c}KIMM6jS{@X_)kH^DMnYgpLOX} znq=jKbQ0>ISsF#f2s9Ci-PE?Z7;Gg5y*u85R!>iX= zV5R%8gHFkTlK^%?K@*Ej=JEz7NM{7B#pLW#Xre$607}!$7(6DEn_1zyDi<{J-wRXZ@^2?>NKP zx3`sCNQU>|9>9YE5dOAgTUjw(_7XHdP;YiCL?862srt8rH;Bez>+6@UCznSLN3<$h z!TjzZ8khIs^~E(NtZa-nwDU-O!)G2s@HQBA{u$pMeW2H`->`bTGty)dXv*tk46j1 z)2Da!I&6Qp(4f;=Yhf(+Je09IL_0+35WvxkFl!Y>%SCFdHBOoMoK)p}uDejxq0dQ) zld>P&b)h-vQU+Ag&*EHY<`_~*uaX|zZddRY;6V&=($6BE^+`2RzT)sW_{iyv-^9<}z+dLQ5a==t|QaxjzGELN0@Ulb#1R+I*7oxXLjl_cXpBcu+) z)AkYq^a~=Nb=y?IZ^ zi+4VI5aiPAG2`?-PpsZrr*uV&X6;z6erL+LvRqr;kXga5P(7k7^whO2z4MTNZ zYmF2RwMYFHC(2HPvz!t?O41cXpVKK{zQ{zw)@V&YhY5AQ6XUpSk+JrgfX>siGm~Zo zyQp>tMHe~>%W}C!-gZ+IQ{5~7AD^H9Z4?ti2>j6>l72c`(Cqtzc^L+s9XSXm!e)BM&B~d1uO^5ZZbe#&Z596pE*cg?(y$YDIS+DW|ifAYH>dv+=f}I`t^=YV{|pP5gu_7~4aH{Fl2I_fNm| z-O3YR6rurC8s~d!iNc67s2Mj{MhT<}BLf{8pK4d5pt;STc}kv<)-F~6q3a32tk&fS2Yjw_Lw(ICrZ_Mjt zg?R_>=HxWnMevuYuW9{&&u(r@Xcq4czWl)$)NJ0LfBz3hET6H$Y_aQ7i>sY6VL7^X z$KCD*Jvu7_p|2o2aK~|9tmQ|Zbt}l_Cq~*W~cTzck)4aIW>_yTh%BI%oP{zd~E8I z6Bk&ss5tELR+3-!IEggjFB$%xzrcsqFD~TT#3?7BS5;ODoyv75|ocsuApJ^b*Jh4J+}pHcGCh zK1HFc8KC5;V{IDiZ99cwawW%5-Zc5e@&RN^Uj9YcxY0&y&Bo_kq*< z`2Lq62eZJjp&47wEZ@G@WsmMnjZi1wKAqtu6}r!!$)6JO`Rl^xum!me3Hn0!rwR)| zZY(|q0v}K;NapMFr>-kiS znUT~pZom?dNCJ#)%`Lst<>T)JE@(@r5m3;|rTxa1Vl`dKZ6KJRfd_*PSo$p5Ay$US?!m zRsr4O5ixoPluCy_AvFjKyWwP?!bszp;`4oA0n z`(zrw$YJ)9GTJX5u)ss%?B(iT@Dp0PI{2*n%T<1>{?fntFY*1Ke^FykpKDuQU@1A@ zzc8b`_a1e>;azET_iTwQy8S_P$5cmpfvNPIaX6&iGUAe{K9K+7>V+*1ZNMMJ@YnRQ z=`uBYBr3}o!~>Jxh0?LIP?$5w)e^%iV~wImSwD!{u@)|U2ZSyB$lklO0~MRP8kS3) z#%0R8O*~m#xyb>KxPm~21Wp_fsN!RlLgUDEGGvuMu#fbu*x;bis+VB0>Fk?RDXvPt z-p}a{j5W-rMCs=s0MZ#h20x#@7nM>8F(r39Rk@oV&++bGz}(aw#^yB4J@vrIhl3}%d|53{#d=l zL@#DD@da2GsJ`Y!7yQleg0Ut;^%}__e0$rX(qS!kSO=U&oj`_IFD+_|j{O`|CyPFw zEMTC-v3D{DBpye1$Ij+|F06K798(iVNbI^A_zvo8K40SwXPDd0VXW%gW+z3zo<3oA z>{T`c%?s$N7(I7VeD0fSQYCLcjI@pzE@1;-2?X4G6zj~ zg;xhB7G=aZtbx$hNban9odEjsiauDMB3BB(dFc7e;?6wwoO~5$w*^k+^~Ex@fU}*ocho2n%Z~ z(=RWZcs?iOYk1UHV}`oNj^wd`p-IM z&oxa|AVPQfQ~7L&4Kn-K6)TH$%x7RAg(Aido>3$#lSQ57_}}A(B`{_|;VP5BwD45slxZU5y@@iS4I=~Z0;>BtH%c}P`l zt=(%L`js|~MK-lXcJJf+*N_!hRSg9jx$G@0R(JH^bdF}WaXPQI7-<(6WOL(;qXU7x zv`v(r%$RC#8x^w_FHfD%jLzGf7o8`vE^v&FRyOdvqD`)gjZeoIFRF|=98>PozIlal z`(lvN0~0gLvR&Fhp@+Vlu1yRR07Qu`xadBdu&%^@tCf?c!R_T?P%w(}5Qp5%&@qvz zIpA|o6hlf|FT>nV#8Q;EPeSUZ_R#{ZB3&LXBZeEOD0B-jr8ZEaN_iSIMFpUC{|$^UbH2D6(98M?9@Vg>>dzoADC z=}KXC0hJ%N#^Zk+Az<=3#oExWQ%rr+zF3gK942J@dTB0eE`XiaFh?l+U`iW8-YT9DanxejmdpQN+~w#ht0+7rWZ;BNdr&p%V`` zmC$`?Kl#POn9$ZFqWNf2mxS1`H&&d~5<;o&c4b{y*p-LMOFoRozk^Z(htbtZ@B4#} zcb|Rk(cXOR5Ki;jyV$cW^ktgsJ^5yzX#ar6czs8G;x~ZHcnGi1k4CW1<7~7La2&8nUGL<~d zH*E~+oz%}%qgoo{%`gH}xy&fIlx}5oOLA(u(8|?VjeDbdn*YLEalW^COQtt}9#b<^ zT-Xq+$$8oSP=$Qexlm9j6x44Y%ABT!9p&Ib8 z`)@-ZTygxVaQh?WbH|_h)GqbR=NmX)FWmlkt5=V{S6zWV1P7q}b)`?gKp)QD+aFsz z>$x~@V}aTLRNbP{@9Bc zD)^(S{|-scyZ9;~#V7NgpDw|P85YhJJxF%2%lg!B@B62|i~pEZ|0}+oP8v{$CKI-* z_`~s4;%9P|^B!Q-f9RVAA@)5azA6t9{OLA_ulRHPqG5y;?~VC=nrJhic|~%#r`Mkm z$h+A6Op7~?umH~C*Re_lIKBUIfo8>9wgJ!U#V>hGx8B<0JX|hl-IY1q8nKI2ySelsd=?zS+t_+UcTfBeGOqA|x z%6Q*ez(fQz^jR4i*6C$=WJRHJ?_xeK>xP|3AzAR4l)4WUdxW(4w5=qp&_$p{mO14F zo(Scx?gP0KJkjQ3oSJZ-wvcn(VKqHiBw1|iievVsNw7>3h4qp(3Yq~SZK1)7Rz4$ z;+a=xUOU&6w$EF#-)djm?B(4{HI~j^O5*#wd$$f-vhA<@YF1{zRj0YUOV7{A_jSFC zEfLS?-xrw4yy~=sH{;FKIeqG;Gfhgwmzq*155! z85qekAD*`Izt$%~R`C#qI0hCumKxw&@N7QcFjvp8LC-LE8m(JXGY$(ibDnXbnQ=cu zyKu2~Z)Z%1IWNSqrp}ybwb^p+!lp^#hW2|25hm1EIilAd!Xw&@W+B)Pg&G}czK1}k z5duV=3t_^9?$TaxUkcRxVu;ud`uE~{5f#w_IT%lnehb7^QDA~% z-bjuOlz8rTlbCm{q9RGV7*G}FtfRu3IcJ2WK0aBsQNdwoH%^F?Q@Y~sP~z+&bR zR!iTIa@3hRFAzmOtR?Xsn|Hy$_}}>{|G1>M_krJsX&=Pp=6~t-rE2`cE4*aI5PmIc zD9~e@^S6rU_g)`sDSWJg;rqhg>na|`x(Z$B>&|;HiCiW_+T`WxdsW*izJ;Yb8Ry@K z7m|V)M(?3_Vl3b=Cylk_6RwMDtYtXLP53!D*{Fi1criB6R)hF}I_k1O2$w-1k*FVq zFl42zaCU}cnGW)v7dh?qHjQ@2@1MBp188Zl+cCR&??~3AC9ezE1Zy*_nfC@mvEzMy zh5T6%Wb(J2dOU`%-D2nFqwPmyX6!8r+2-8zRr{{ZxqI`k_IyrtOq_nGh}f3aerG_6IvYB zTk2H5!UNpyUVkFiY`UZ?Zkb^r8QLIwb46i~F=BNLuX}CYbNBIYPWAd8iweVj?_My1 zngoxd040Z^;p!l%J=%QVZDn8#=>6k+X>Yig3}G)aIYLkd8UlFXC(77 zUZoEllS=JGL2|qVy!|%82OTJZfNwED{+kT*Az?XFvUN{RcB8+aW|UVnPSoiyY}&}=vDfo9+%Qu?@#;s-f{vh^2W058C>dOrS)3~`ggIQu<9 z$pSTpBa2CVN8Fg2# z6fxN9d>wKQ<>4Nzw=>&Ejx8_M0k8uOu))N!JC;dUOz4TAzb|R+I*LWz*ziPR{vBcM zSs+7CUu-iSY_~UmmJ?4mxA9TJDtDpKe z*IBstpqpCw_gdxSEnZ*4Xx{Phz;KJj)1C|S7NI4peALl`U5b&Gbqw595!}-wXm$6J z!^BLs6$T$Qb$fv28Pi<0XWqu5yN@jw4h+{S$39#Clt@9Li;Ib`3qT9g9=5*o4ZejK zlinmr5ciRLNEU>e(QCr`0~3#M@W}7ior|4Ge4n|!)-(?JxAX4}g3Un?5)Z(Re%eL=8)wjz(Nh&%h9_>j-dpECRdOxeQ++UntkuSybDkW z8~|Q2UourFi6H1DTujtt&!{a-wRIc#5a0+IMmYqA&pKstSaS`0ygvpGitvjcj{cwG zt2-;}XvYcGHM+2)`DJ&|Q{}@Fe0S88bX2>`=ks+NG<9>N32oKM3R_NtURi#eG(m3N zqm?*Z84F%*@wRsxdyo-}p2;EFyOAW@yW-N8nJ&{Oqq6&1YShr4t!Zmzng>5y?+(nF z=vfb1^)aB*jv|EU*pEJiJ{WG?Qzu!rNW#$;9K(mli;VBDExmF4gH1$K%u?jlV}L9c zDfV?JoRuLzM&)o@MGr$XD98sLCg5opqsO>(PGbRW@vOHzD?8bIUW!Gt2Tt){;n)^y<*r3`ccj#2|A@R;qYLyj4PN*s^b36^g|Tf}!Q@NGbOWYaKe zA;F1WfbF4%LLAjr@uulsKGc61ChNmGKEpbTLzH5eNQrd<`8v$@j%oA`jkemRB^ayi zg9^F5pp`H1^5i4eqK#^5`dfkm3`=ToJuNLJH3*3S_PALn&z=ap4%)<@AgQ;=V6{BwOYIofYv2CzUw^k$Y9CzYI;D2!P4L>J4k2!>Ls`H*}|w`2%xBu?nrimj2*nbQ7#~I$53g>B7x#u;;cOjz|Y|uM}NWl!e}p-@odU+Pgb@zWmCJ z%Z!cT4ViF9=2Tz$0JXtKcNUOvna?Cm2?2Z5+7HF1V> z_;XN%+FfY3Ik4&!gtb#M&34W=N|IhD;nR~#y1$`k|{MrGRz`DH>O~EQG{$>1N4XR zMG{yw3YV9Y!98qIU}=KxExES}u@l=Uadksg$9 zsV7lzg$6Jmn!vjqSoj=whHkQmJ|Em701tN4AjxZEL=p|EmlhDf+^D}|59=H+ z3y~2Z7K1*S*dm8iM9~0nNPw9x@AdC^*1L65@{d3~s*_8;zr>rYUU9xY6B_J9#SvTf zC9)S6SB%=M35lw8u_>qO&Rs!evu^%Y!1fzYgD_SU zrbhUM@QO@p^i-l|c>JsS^FK!VKdxCH-A?7}eZ<)mY+7XRL}fUc1zQ@{O)0lG)bEIs z=TFfeoFcE42s#k^4&f_jKkKd$r+uyH!@uV5v?LcDSh7-#xjX|gJ&9+4IBOcg z1<<7mTGB80Z}5siAs6CFLYgYHMQAppWYa_-c1g20+Eef;sA9TwL_AHb;ZZkEl741| zEJlN#aH;-*Ne2w=rSVqi2}hT6{9KD2wo`Y^500Bp>6|K!9~V9Ka_p(L*B?C3jwqWb z_*1Y$Q|FrOsP-K@^k?K-30z;D>!u&MA$p$EXQBMeJN8sZ_-7v;Dq{(D{1gd8*th$P zDg~j}H4lqpPkV>Q!E$5$znH&*((_)-^Qdp|^@3eDrRm8A^{5?-^nxeiI?iPs6E~%b zGiA;FvgTfLuF4caf~~_QONi}wa1t!jhm=D=uS0O*kqQzGo!5$A__XGBwsyZDzUt^0 zL*TgB=SBZqbdSCLTCWwho`)4Lkz!UzVBwO!4_Hib7!k9 zc_Yx{&yywP9XKL-nzS@g$Y$%ss1Sh}o9Psr>J-lbGU`a){*@z%eF?Xer%oP8Y)w4% z?QzK5JexQrC^t3h+O`9AD?6cilLyTkBk18;x7U4rQ+{UwFyrmDMWHWv%7!I-Dm3M%guPH;>8T^* zJLutR!leFF{ET31dFS^YcZXWj`f}9yqj#5npK-jgHRw}PIj+h~ZbTx4d^`6%@+0g4 z%FI3eRVbK~yDnkG8JAK35L*z&9aZPLYPKvR~NE$NCx7K{{F zax**e3iSF9Lh*x8uXg>*@ui*8ZF{ul+zMm;>8`8SEY*eVg^Od-!RAExLY$b@aqC*S zUAtX#Q^Kupvv`R~0V5p|=%f9fnr-rZP3wV4J}n;&8!B4tc?f|JAXz}VI*DezDsN*4 zTOgWiNCL9WUDWxWG0MEmE#Aa{bG{4o?OkMHkg7(kMcQ%cW!Z`N&Bpd5W2eam2dQHCL+xi@a`lk+`2=j+LOW^=IWzwXhhY%nkqINx*Q~` zYq?BdGAgEF zlMSP`DCdc$HcyMSEb45Y2K8R1cw1&s-e8<1=R8wf8o4KWnv5>l(B>=v$_Z_Twv)bL z#i0X-x%KlzxlG{U+#iLuamv*)`>FFzGMBem|2j<)Mhkte*p)wZUcorwneR^Ww4Uk! z(MiHXq=LBv<#+R;i6Lz zS&5E9_V#)Av&HhpFgy{ssNAVr@!}yvjmF!3NGm~HhCB3mc=*qFpOI33vAo$A6~%em z=WX}L=beVnSYf~)KRAyr57VtAZ!wQIz4MCeJY14)!oxGvGY`=%rL3T=@I=QEmtqPW z{`Am($W=>oV0t@38j4aw6`v+XXH`5oJSVm@JEG5AaOx(yb80w4YJi3eDTzg30tg*LuT{ZWk={(GGg@faP@d~1V!*TRFKr27Mfd`2^TV7_9 z=v7u`<$_;W34ALIx+r<#{0Kv_G#@Wz?<$6237=LjXHyNE!>bfmW#LuKdW&9}F>_xx zli8dYdbP*OrrBR>QzK8N>%!H^4_ay>M|90DipF>Qhqeu)9$3e%bI9kSPLR*7L#@Cx z(>R*gpmXHeh0uZ$;XdSyoEnrI%2?MHcy*(Xrp3pOdD-i+fxmh`z61#sj%-s=Q`JJU z(;-zp_mIyR*|Z@smG6LzX@LO!y1hcg&!ndZF{*$D(@gRVaw1JXwEOhe7!C3V@#sT7 zYDqbQYE&vni4yr5`hO^-Mh-I<^Qx4#1@LW`6fBsnkh2BzrS|rX58}H*7|K)XT3UT# z)*Zh~Jm8T+-YvhdS%?NPHa3;kH6QF(r8^{IS>U5CWuv+wTYzO5oBaHJsBR4PY2Ies z7I5x*$Y;Am1q<%171(@)A8i}3!?2(OL9y&kvp=Uz9NTR{4-cjrv*;VXJK^3E+E!u7 z!7L6B+1~OS{nDgb^;@4 z(4X`V(eImA`q7BC2Wana?~hlXT4BIL^Kjy0yvq-GI;L)dJN+;z#U_E+J2++sqV^RFUpx*?_GWfwdyDm)Lo*b-H~#Tdx)S_$VP` zhwyif84qFefH$JxUkD`>UQ?p%JrhSZ?WvLy1CYkb{{BsyU;!e2(VB}uTI*8vu;cSA zgvokVcd}tM1P~@v&s5paF%Rb`-gJqK@2FS4 z0jXcTpTFqG|3O-cOC}Dk4MGJHWlDrjqywo9o6(>bk?;xXbIP?9rrI|mCCo_(zK~Rx z|Avq&r+kkZ?^a>P`=t{VaZ(@!|By&wsB-~zT}x!@*k+_+jTmdK25B@ZSm-q%vKLv7 zg6N19K}AYepgE$fIijssZXiWTbTw=qk=k{k(2n=EN|%4&%R`2<3ZI@pU+|Ej$03Kq z2U1h*&Xs^tdYA~qRJ|>Ue;;I_N7%sd>W_9gl{GuyCNw0^P1v85kaX_;uJ-(fq~p(p zE4yz`i5TDG8PnVdp~U{(2St-e=Lc90HXEK7&!H8`E`KE$08ry|l%U0#TVOW9+U1?r z#pu56=8@3@q9<~W1G6%e8SoS7-|ZoqwVa((e%$?-Z;#|YOfy8HgK0e@@Wg=5qFY)K zqqB$A=ayF7f52louI}c!kEQign3V74c|ff3Ku}t7?@4e+5qyJHl2@Bo*N|Q>TF$m^ z-*h{@K5@CQ{Iz?LiR8WpMQ*@N?h%4EVMAy}FKaDO$oT#;I}!dkYuD5M%72T|^%#N| z-r5m-ej9k0#Aa_af+vd@P%&a_myKE+%uEQ9s~$HYYNznaXW|+h@T4$9VcO;a2TB4h zqM=5@t1uD;!m0x>Ev=0^@kIdwO}3#Wj|X5^V!%xhJ>ep;o}7{N37El38rnlfbQHPt zNo$A%XLba<$rV{;rx%3^kr=JK^#p|v&8?NUmgB!V7&(%~!-x+e;y%S?@HPnEefm#B z`;Q7Gu2CE)gRv%)syu|SIqB>Sp5VyV37!yKzN2_nEYHL$WEO;Ca+}-51CDS)!96UQ zp(;;`Ql?r8p&X$pU#AOV?vbR-SPpj?(EV@>lyBj~O@{ByH|0%Qv5Bq6g6T*%h$QXk zHjQjS%RofRUZZ)k4zGT#x1Z_}xF+w}^e=|YW@nr64!pbPyb>d&A&sl{WPpIcM!D2^ zxdN|AJZySyXl6b1skhw-^c#?L=BSpI9Bk?EygT1z?iNK~p3CNLE{USa_(Gr1#JcgH z^u5hJLc4%z1Xn)hCYXWjZM32YIzKT7!Ee+IeCsqc>N=Rt8trhx5mw<>i#I`V4(lDj z@-`x>R$fv|wE~Fsc1{R)?P{v%K}W8?b$VN6cj;{vk#>1oRRfi#BO;Lg0~Y|O9F&48 z<&4?j1;>!=7Rf-eDZ|1(-)WJ_EWJ}uZjwB4QuSP`2 zvu-`E9}y>r8XNrSG()T3()iJ0QM@85hF3w?WeYx%Pqv~kbgA1YHq!6*o6;ned2~)) zNIg>o3yy1r&)px=TP8c)iLMkK6voQ&8b!1o#ep;jE)lPCvij+{$02&gh2ZX&2Nysa zK`B>D3a?j~u?htrbw{0^Fue85^(MC$#c!I}-Tix^=BQ@v_%tGnRz%wtA6%rjLme!a z%Z;@9W*V;(mR!oUrCNL}K!cQyGSrCm^|IBmPf_E#UeXl+r%O5kYeyC)^ zl;^XbuX%d(kpA)Ks|#x-h3z>Qx-4yrRW6sU6Ib><(}L`jXSz6 zmcumM4S00i^KgUw&W~1pU7-_{>Soel!kNHxtT0TQU}6=H$?I|_qyC_9f~z8fh+GwC zc@H&5c2R4T{rQD3E#fS7M^jLZwQqMr$J1=l5b+zji8WUUgCWdT%s1mTFo7C6hqf<4 z;k`3~lEHFQNxFN`!Oc!M$#WlEv2EAvF|5qbDopk`1*LUrm)`E)nr57!)vH~tv9h}b zclVwoy15gbQVUbz69qcI5-mo)_weUFWIMouI$1lBSzF-*NxF=Wum)%Z*G7q;FKY{- z4dcD>?$E@@7Oy4q_Yco+VX67^QYu-WsVWKu{Y`W%qw0{kho{2~3(F5Oc-F>Q&x+n< z=qv$oqk=JMo)M<05Hgr8_I&$cV1V;xJ)G|$dxTl?8ql~9DcS9HI;VbFiLOdTscFkM?47_E zjVGUG!KyhMpxjxVG3^lG$mKh=u=7%fdeF8NP`*)~k_K}Fu2>`n8qzG0`pYU%D90{D z2dwx`5nbAKUb)ctv{t~P_LU*u5;jJ*Ugr}mGl&9^`;n%iLg|==1~H92!>g<7fsmWp zUaP}@>H3P$Um@#Jz7wO0U89uOCnm6IQn32;lcVlA%R2WQ%{d(u`P0XD55zAUg9%by z-0@G2{sC>-JxBj*eNxAWvg3xJK-o0EjAI-=Gt4zgrC;fyY^$G{D0W>4Hzt=eqI!N} z3dyoaNtLc@udSV5S{*H(5nk$Ca-R$B+af}mcuu~UtBj62c=9xunB%O>Vtd^OTkjGs|>}GmJTtW9!ldV&NPV5 ziWk<4z(ttLhO?%4q46SyvyNc*A0rCisxHUwewbrT;NLGco1#;pGIKaCwb?IjP=ZVE z`qFWgEih?lswC2LzznGLFq${EdlD(|M7fgr`*QYb1Mth%zwiITc&Z!+qv66+PiAbE zL|4pYh!`D-P)JI>k3B6CWv5q0Gdx)ZR!K^+ELe0ly|b&Ls*BAJ)k)Toc+h(`R@)}; zSa|2L&n4CN&LR8ekD0u2ebk+-3fJ~hM;auYK**^-yAw`;%_|_>eh*cQGhtqQDN-P$^!N9qE_jZO~zVw5TBEv`0)TZ|vA zXVx>D{c}c}{f( z_EC#2c!Y(@zLYIi!^aIv1m;OYI%Fde+x4X$JN@|)jOS0$NBGcKW$)wX_gz8Ggrt$%!MpzUu%l!5qwF82TMuOx?A(>Dzxvmm%uSIT z%H{%ng;XjHZ1YQ0uA+zD_MaOUR8;_}5q&=dNDV=f9ghPjo>Z|>9C!^h*m&rVY@^wj z#B#+ATP zzapEZ8*iIHd1~*!DmVQEW$E;Ss7-K$3NO^z_d)EYFWes(>+4W9-sif1AMR@Rgt95$ zEIi9DS9op@EL@}pG8&hbLeYjRN5qDQ_!QIp6iRt@+{N38M{DEq#KvTNf11r;IWhmq zKZQD;LXB~X{OyF08teXf{cU3d9j-3SjpF%K)8L7e z8|H9>(KVLRlu&^wrQFVyx~H8Gqs(BafNu)ryBD+ne&N_A8q7}`WZc`g1v1L*E~8x8 z>13L&GUzB6Cc%e~#7|ie*Asp8XcGP=T~F(SuS0Aoo@Pgbk7$1Se2u0{!6)u9Z6vA zZ8hu}^8QWl{e0yKfu>Wyao7SBz#Kq|P%iw!U+SFXEJ3dUa-IW}e?l^b2aH@}8OB zI{SCVRBGeYz{kNme>r&eQ<=8@Jm7xUYNqs`#@AoBJjrFwPK)aOd%Y!gF8Qf`Vu@XN znA%{2sTog(?yg-}R><1Host7TBaXsDU>cJbwy`y$6DYb=lQRnij^P~vO_$~vFY^=putP;F zqi*ck&JcKB>88AldfdMM6+@w*3H2(ZGW9t-CdbEa3Z)^5sUM}=cmIJyCw2SUKUXc# z)!TXD_BivE;}_P?zx$D-OZ`mcZ9Hq7fsu>YU)<;5Bq$v}&R$^Nu$^jqQ{_i7wX!pD z_48YLhC^v>Z`KXiSeM9LnpSsii_YSA`XBN0)We}m+fu4&od>WP7>u*h zw>O$f`&KjK_NpLifLS=#ILo3S$gEqR<|OvZqYUas#pw$kmzj@uITcm9#| zLohR2RyIwfAEGO8Vn@4F+lqA&9ZPQOQU?aXbmIKHS@;O0Hvo|KRl$lIeFIku4gG*qL~ZlKsw~zzt`iodAd$B zFOQA&nXxX=3C#8q#f?=Cwwq)HbM;GyW-K^d{(gP_F99>(=*hIA!J|ck&PA8+oyGa! z(Rpc4iqyF&$1e=~c8)g+Xlb|s%v~ETw~Iyra?A*)25XH4Hl&tpJj6ynH z!!zMz!26YzU)Z`>E$Q-r0G~V%Ok5Vq;TNcp= zoWGjixXAU_bzMDWM=h@%?e{*A^1aXS(fH7=!8;kdQ!YZ>cW0McjpjKXX5w_`)D&h*V>R;jk^qQz}5QcQlA^lckictQ|%etpViV|c)*ST-T>&s zs_adBta|(RxT1jj6gY}*PtW>)@Vo!Z`2M>$H|;726bab6OHx*|b%Se$tcnBEW;=?C zl~Z`p`8uuCtA$B1zU6s=a`oGc+CM;j+o;1ekF2xX$J4PFh-~|G4y4u$`*7Ex@(Os-Qc#Mqs@;FpXzhfP2l!`w6UI zM5$lAx%iHv^U8TRNfvY$6x44e_Uhfm#Y(tQijs*1J8N4G8Q+u6F$PnaYqH+!t3Y~5 z38$1$b@UUd`Ma5tnVU~H)o0>D`*?)r;oxYeLTby#B?#>TJUY7sq`u+ zVHC(vpkz+rQ`jIBW!3Aw8<8&E9CgZopB^wihZ+#lPErFDHcYMmC9eouan+Mku>sN93l9%NC zlIy);@$}30E!&5ed^s{zwtAoC_5QvGZ{7_vsWV^Kny9e(iGDMrFLcfrNN&oIe$Kcw z3YZSW3-SAw3}n;x*NH{|tXSZdtcg|@ZbU`!Aq%>*7Eql<^?9j7 zw53=#;lv7yR(YAiQBWpVIM|AuM?|*b@qe-`(lH6i`kXxNuF*`&3en(S^Ecvy;0wh3 zW?M8XX5N&=x_3A9fB)dxWV0nJ%eu!d5?QOs5|v!N^NyVP@p<~;o!8v=^b?|AIR z%$UINhiq%}{g;2+rZ;M(=X78j!~e#Tvo344jUBqAx%HDE?(LrSdmQgd0A979Ugy0p zJlr*bqP7X51zW3ym7ny^z4*Zd0PkG{Ap{aHr*8f z%}Gn{S>HF=`yZQe6FAucS9GqocCrH|iBtSMY{%#GsQLL0#zOlBUcS@VH2?7i2j>Ug zfa2u1{k(B!b{SMl8t83oqe+ZsI;j-H=0=s_!{h@@|8NMVWeMotF{3`o+|0SO_06d7 z%9cI;~Te=8xmT*e*282s4u*7 ztZ;uE&qqj45zZAB?iX60iif%2OL3Ig52uB8mOA=OV+TLahk+vFsA`LumZo+se7hDW zyKOA`pSMCW$&I7I2oSpuI<}rXG9LX#I8m z$-)jEt*P*m#^%bC?QQ!Vr(N~$nMS{H@FSm?1N*kWem}ndeZkTH^()QbPT(4g=mNpo z$Ne7RH@Ihua%2i4GxedMK%pUh>LxjLpKi&9oAWv`g}i8s#|{~;Dm-j~E)Pekv?@^Y zg*!xHGi*g?IH(n)^_>`J1RbmSrFQLkTUk!u=#HbUOtF-3URA>d~s#2_mar12jP5j zF>)vQS#et;RHs~j{H4~~0$?)~`wPYA1wawn|wt)OqM zKFBv+o#mgIEWN{HyGq5X#~tBfWrN6D!g2B5Q=WT7J?FP8-x&QYo7xfE`L|oo1Uue{ zv&Edgh+LiGgElU+oM{|uj!5#LbDfKOWInq**HP^wVi=nm6)_lAH$dO_x=?ga!%$qI#=`5M8ObG*td!@MOa%9Po>9YZM|UM6S^qC+a%ITLbp z==PS@H>8*H4L>2>)mX9d*L_O}L_f8CvtI^X{vd;rI{1my;%t)CYAqgIg(VctT=W>* zYtc59Fc#4(5~Fw)chI5*o3AKzw*y=DC6uSoqSb(UMcTB@NGj0-%F(ji$@o_CE2vW` zD|a#f8sE+?`zxevTu$3D&|-ABUmQ~fK+TaB9xOgjYBbR&gMz^E+Yt|k4G{h`n%F#u zk{q&)UglAVAoqFZb2uXqHH^&Xvo!!(7lstHoD=tNE=#lb>u_^54x<;JueoWJ9Rvxx5*IKD2mt{=0HF9q%rjQ2wb%s3sk*j? z#}#@`86q|m2(t@puX};K7t;oQUV$uKDN8*F-2x+{U0u`#kPuu=SeJg^aubpyr=`cJ zmNT4j8vrUvUxg+5mcTBt%e9ouki=E)`_LLkn?K8tKa-5_ac8z3*L+RF`~E(l$ACSh z)?yV$!xK3#&SjRs6Jvd4&WMOhhdZlzEL7vnMGi3~u&+av;d0GkNC3511m(=|eVd^{ zc|wCDN$Z(#7{bJWXv9GmB|SEYruueQmrRKEB9S>mHjP2_?Mc1C*FT$H>ge$W@%lNU zB;EfQfbQsT>C_W6qK)RW3P1)>O?^jCU2W1lYk1Z$2D#YUTHeA_dv{NM)}TRada>Un z@cnTh-+5_=*Z~J~kL%OOS-X|96z2q=WenuAj2r z-F}oaQg-$7^iS9Z9g)*V#QW58H@hiAjn1xicFpDqR^sBmvYrn-`}&Fu+SA)N{xTdH zG3&907cu8pP5XCyZe|}kl%uH_U-E%ZU*DFgca#~zmFnCz9<%uq*mg#Y!HgAYgX-Mm z*&h4}Rt;ZQo^5E`oh`o8Q{$`Y{^3qfN%5T@>VHsXqzw+P$Plg&c?>nJZd$3~;n5=r z4G9fod^KLd2F9pA&-`otitp6_fM@>j{irfSG<&GIVWEG**@Omh!SMoRU`pVZqA9#7 zu69$VOySw_Q3&lW+1jNW92h8?qTx*;cNAV3y`s>J*54r~u)4i3x+PYVprh)YdbB*Hs6^Ubk=kBEyj zomYwrP-nf=MY|}n;2SCqA1W^#DJ-m@`*tCgP|IQ(=CUk?Em(rUHNhY8SbRfmf8S;O z%bya~=p8h%^LBH1ag#C}l3Q$(>gKqkyKb)QL1Pt!d&8qR#*gg%>}51K<_zC_Uc0)kgRfpes;+SzBhq z>7>iFc-=1qSt**RUeO7W7QiNx`XTEuw0l2ZYTW>UTc!kB<@w_`d{a<}iJ!_-_+$f) zOw_fn4_ywzB~%lJpAuyaVv_?+jcY`$$m+@Dk08-d1xYiam_VeF?tL+la0{~&?vcB- zzw#yH`-kT}t-jg&{sIX5*M>a@Jcm7p|6T*8aQR*$x&%I;qlf^KK@kL50#u1S=x7-G znLlr>%olLLfH!l<>7R5P#ShmKE;% zGv;)B2$z1$JqVSvrnqHj_P?!zH5#eimc+-N79Jz(Gj>kCVcIV(9TJ@&S!ul)(O=j^ zh@h^P)ukMj*ru7wQJQc(9NBbN)hvl(mJs4qiW^X$s$gj4tCE*}DVhLlc1Cf=(vFVN z`V8ekp@-0es9K%mL1O_*RUMAIO7R3d?`~yMh;o+5dZ^(f-$Zevx1@w~vy!}2A$+ci zmqG$d1Gg)@#;l|#!IQFtSxYkViSlsABpcx*oCx0C43yBX&VQeAH{*MpjyDp;s-&j$ z12{E)1-Ek$M2BCHF>PN`;mw-^sT|^cZgsix3EPawV{{LkS7M}JWs~uJhsPAjw#)8O0f51|F3zu;V9PvkeihB1T+@UTSyG3@}zYg#S1XmUVv zv8m850R~$gf5Va3CykzZ)(9C*qMw7kC!o*noYQ(j2|seVrlyMU&Z<~DpU)~RI4X>B zA_qo`)4!}wGV_CVKH4z3R@_hAi8wzb9XVpH=#{v1954x4dMh7>M2>r$mW1~w5O8B^ z?xM6FD*|?h)zCrVQst@R0G59ttN_M5FRlAWXSd+O9;*Yv8NCTj?ja4~JvY6Bi~F1E zfL5<2k(bE&w)rcJZy<($4qv0gky8Z&n-9W9=5#1%Q0{E&w%qMt0Sz z0n8jp008q6c&1IMY3H^2SAX~D)Eg&W`w#x$$}R@r`2ClLXK6pw5CL(jfRGnW9Q4eo z&;u@X4#9B?S_|PwMlk%72Yqls)+>PhMMJQ4C!(o_BBMctqbL9bNn?k(z{2CYDppHe zj8O%s@Lx&b@Ib#C6t-lGodJ@!m`=oLLlOg;Q|`=|J%UIKh`OSAW3ojuacXTH!Z5Al z(OZJSicMwhc*yzy8fL*eo&emAbPfpM4}xzE+`R>)j|WZ0lIWA z23`#N9+=YzuE>X5hJ6MgYSoQLfM?r$Z%Gyw<97l#xCdf#kgUwSN7U*-yuE&)cz_s^ zu}$$Fct$sFb(8t~-K<19;UB%p&fdVsQzW;lp5HyJ`4$_BJ%VUAR-M3Id>oY zsQ_A60LlJN8yW!@)Q-`|AezTJfn-l1F&WUI1_a{%3c}H7KS6w&3@-_+p{U}lxs^_^ z=HPaN(H%E{(R~6gLwdv)5$5r%9gp6}&riL*RHZIY&3LBWb9B1rSQ(d2HJ;-geqn0( z1?O`-NLr5LS>{s&T6(9p{Nwy}gYS zZHhNHUa2Z_ys}4;-75xZzo&f1wbtSnFQEGVQShWmOOnv&5fRqDnVDN1>F=B!_f>3Z zDbLg+w#ke%eTFZ&)kQ+@@C8OLn<&*3{^54c?GI49b0B`Mx9=keg%Yis9EbsefSo}2 z-Z(_A^L(*V6H%n{y&7SA11S&c(ss(czOw}8(z(Dmq7 z#umOGU+tu2Y&02+_w%Ou=eCaDY8+uct8HV$2CoBeu02T}pC6tz-gCxe-K>-eMX{lYeD#qBqj2%_&#({OqVVOly|vy&fM;#`o9RTY#Tj zF(G}+#}ps=P1#OJtf%gU9Jx9bh|yX^41t8-9>u@2(}P|VvS5aXaTCRM%?#qR`P71u z08$P85CM5t(jZ7c_^{A{r^GyKgen07lQOYVbSm!BC^DC+8(A*kRp+882&?mr0w z$E}E1;0TCxm8hG)W7pX=lJULf7GPvYIgaub!CwI%TZKRc_+H2B8$WP+AwMLa%sKFI z?$yS`*qI`IWSg=tcTa}__6%{7; zu)Kl}WWiWSmoO4_#jpT=}`fY`mQpu;;v;ggjZ#r-6>n{6bx zZ+w&T+A}tnPAgtWVH$PYne(Zks(kw?<+3nJjM7%7JE%)jFpHP9`u{BXP&Co1{6<<- zri_v&S;?cZWX5-yZepqDsD%3K){Tsmkjm1}MUYp5WjmVZ_E+;-#&$ED^{pD>me>4R z*JeL+vXv(VTGY8a>q;l$Vd4il*m^2TPvOf_8tBkuN9_z5igj_xVu39Go=A`5Gf^$g z6i*yOS%L&13s77LulmrGS-GxC@_@3+xE>9uIl^Ju<|0b$czp0WIzCj6*1rt-FUx^) zZPmN+l>+}vRWVK+38S!2+>W0jTvn10+gvn1{cJEphhf~MpfT-zIET_puue*|b7dI6 z5x4x+BNxLT{<>kZ$EuNwX7Yj$BW1a?d}|#_@lxHg6W?y{o0?aeaYZCv)nC#kp6Z_$ zP%F*c(w!&G7R^Cs3x9h-?~djNzidF)V&sprRU-ya$JjN;c z6@6KqBSUe=;eg@%wWl`L=j^+E&xezf_>o3Iu{*NsXf2;&eRvi1nEPk2nNIw=fZ|kn zOq>5|@f-g+sN!38Uy2)c-r3nTxHzMdB40Mnpj4l+$hrKh@bim#8RcIYKCfGRbYj%` zPq&YVGCKWhR2fQel~#6pOyDz$sEy6KN`J95{fC~*cycE%c0O^LOy_M<)~6u;Ax?f#*T;V-)a?8 zRUJUi=F+H!1ok&fqZ;ee$^3olu^#?7KmW)0s$kzkV^i{^)9bDynEilu0YMB=g~Q@8 ze+S4Zb{Q2_N|+83!Cp}Od?5}V>kD2#PxN`Xi04~GYh@dS(OR1J z=jR3`)ij5CV{nlqL%E?DSa*_ffY=**@MHHne1(}!18xnA~5pF~M9XQYhZn9ioGFvk-DMcniC za#!PAUPyN?xA2OutH~6Nt9~w@nJU*mK7|)jEDsqS)YETpVUKg;-n46Gq~JLroi|+@ zjg|gveYcP+Za^q-3tXFZQ_S&0SIdn?@f+p_rEE|t&9Y$R28jH4)!t?U`!&ALX{U6m ztG#u{`}^_uu3ZCJP`YfL@!9N{mgXcF*-Z@X3RgCD{%EMhw`;tT{;;6@?VI=YtI#gv zh8xO6p-7mZPR*0IG_R~M%IO3;Z?EuUbt=lM0NM^U-u!wl5y|YchUO*;HFx?^wwaC9 zT1aX)s(vDodA=imwBr_PHudDjaebvsy^+?U1yqhC;$kF8uS?%oqh898%a>X(E?!*z z_rVKF5slX2O>b>#~#xEoOZ{(M{`HGxtuPj%@{bWvLKf)yDasV?6smST&o5I_Ibi>NQivWg=-uF&eL?$EYXutuqm%jDzd{eICAv$pdPo*V zb+t;HCP)0E6!CZKTRv+VBl_x2u<1d>hgDsgs;(uTK#2k8a@1+(>+hHE*6?r?sB?qT zE|C!%t?%FE{v;ei25HYmP%uKF=ft*~9$7t=P4Fhcy+=qn2k8`?s&|7+RpO;WPJ<8`o-Xs+9H5CA~?os;Ysm+VP~Mq{RE~$2J^a zSzgck7~GB1>4`1G2K3wV*B>5>JULf%s^Jy{YQOHjZFep;trI+Ba?;}4f0#8$wSzuV z+h6&S32T>Ia1o=hhg@FU*4T{wp#DB|cWRKg63JH$(Yrw+3^|=b#~y(=wkmd9RzD5e z%e9ae1@8M;jDR^v7Y`m<1Ia# zJZXMv$!F3tY)c<7MAKE@BACqo%CT{|M0hs6EEPvskr6kMR5pqtA1n_Gg{)avDB}Wj zMsW-Rly~Gh;SOpJ-$%XjtrIlw(9&H0pQJbbcMaTs?{{WZ;_oUL*I&YuA@jf@VeR0o zI;>vaE)O0x_m6d0@2c!5!Mb*Y)Uw5>H;X0=wN#3F$?aYdu~+CoeY$@vxPq~D+^hjc z&-@VO!Q{|jk6Foc7bVYKJm`UE5V7Euqa7VxD4jRL?O}r8OrLd$yJsJKAFSPNo2;z` zuzav4;w@_x>vGCBWkcR&9duh-ThF)VYRLSpnQ@NpKwZgu-`^i4%WD2;{Zi$u3W-kNI`QR9fQjqz+zVYvk;J@`OaP2|4XVV13SLSrDd)!Z}n7?&6;l_{WW+xcJ;g_)P zp}IkWQ%X#Ss3GyX#=U6%lDQYw{jzAzcjr%Fv4RFT`c=QGWq7Pu%h7Kjp$XyYLk@lt zkp@T9-b*C=-QLb{u%z`-1v5OyTlDimTF!$^`g#4YQDx*VI4wMS z+xqL%VDK%t+Z3<)4at?K6jqWJCKSWoS z4Hfw3S;μdAtFq%izaHl|F;VNB5RSjx%3X{qsT7q_?;`xg7=d{xf`hOSPy1*Zxy zImxO5zk-lJF2_&lBBQiyWavIn$(Z|B)i}01^ew2vLTOjyU{@_^r|cIey*!g?X`X3O zaI$S-kjt4&3n~pbs~h453-in~vj;w(o%1L+KW=5a-h|e*?F^2^N<-b1`FfQ7s-#wi zkp-u^QyQ(UvRYyu;VO}!^_RrpL2za(3>_r6IBQa1$^*Lh}t zd~yhVmc1ghCb0WyW@m@#l-w9PUkm&0A{|VBxW3KbIz;3VF!Um{pP?l!&P;YZ_xXw+ zcOOcXA)nA@L}(d$LYj$0xH;HCQG~|)B;#A?bV6t5xPS83$Qk)9^Y{L}na}{e2glEa zr&P|uw43wRDwUR(1VNmnQiI!LS?=|uWyWp6@6w8%EdIw%e_Uk!ZdSl$Qul-nk{I#Sbu z8`PtEjN5@+J=*PE=jt@ep1&X8|6Jer|NIs<$bLwsDb{6P(c5kN-EN2Mgx;hbN|?<; zFzvzcoKi08z$?uS6!TUYJ23q(aCyVF3DvtmyBAg zy&~c`>a$-y-Y`KA$Dy9)?5dS#ojeMgjnD|#l@RsBEAy$M`XSGzB~GZK;jVMqvq7(ft6)PzYussTY| zP$FOmR3%|lqzXu_v|7RtL_lRQ7!(ymCXE^fTc>IgMgd2()eedS6n$;ApsiRW-?P)| zd){;JJ->6l-~GOG1YsxH?7fp`uV=0Gtp9*mBq7J0ge)dXp?L8#Cut7feizGX=|@XH3fw0_#_+2=aG&q0wXnux(fFo0{c7NZY&Q9Te{NR$ ziTl7L{G+8bvS{zyQeE@@x0u@rQ)+X>Pe}wLMYTDJf?=D;^WTNjtHX&4ZtEHsw2b&h zUJgrI)Reka@hodb?QPS?Ka@XyP^jGN&>6AJWOLv2j5fb7Odp=`GHBY%e{H?=UfU-V zX?MMR+CDkB{RgkBV`&h!^=9MCp-4fuNuEGfj3uNM=1Z^vtR^?BC8Jh6pRp;{>nACX3Ad`e0fHPl!Ufy)m7F;P}JxiPA^ zM$HH1Pjh(@EXcXUXPztd-`)i-Bp_XJitzCb7));fRCTrz<6I9hQDIKf5BXz zjqucpZ3n87PICt4#qMf8SS34cp5e(Eh`8Ohs8jXloW$WbQ*RdStsFaaWZ9L4i%jxg z=&oFOx#NhzmGSMf0-p>qi3bwn4+PgeS3Lg7?ZAoyh6R0HOurlN+}1mpZ|L|w@#czG z50-qle}CB%&xSARuFlMEFy63k{n+Lu0G^j)#P>hb#N6%q?@ET2@43oa=2WZ( z0hKz&OmI^GV+IhYLk*Jr@~3*CCZnGS4`Tt)t&)Mj zU=>yewjWL0x-jR+FwnqKo(DlC0JwH^TmtM2%wqul2nb$T!`~WCfUJQVZ^ea{;V3F`AhWPXV6aXyJvSON;clI}J7@{n&E4*irlRR}m>ohxTm#gU)u|7=Bx$kzIIHM5ZvgzUAPJB${l(!-kQ#86|V)Wb5P@6Di#Z?h#UZHzv07Jy9uzi8X3X< zIEuFhWZlurXyKyBC@O{&c{`C*#MB!4=R3`|jg=zXZu##maL}aYYLlLQS*z3GEUR}F zqY7I^^l3oMw!zBAU;6FGk5Aa+;#Cg@7c2PK5<{NoaD4jVEs;}6cWXrR zrn-Lf9%uz=PE%13Fez=tW1xD&z}#y6tO{c*!ZR?7NAfaayp=~r!`A{F7Lc&j-ZdMK zW~@=1+`17!wpV)2&!JZi)~s~P@VnAAuJKbIP8_*ei2`ZmrvP6YUODT{Vc~gB@YIzj zFWfS`zYBakr?RwWC3_AWZCW%(UO9k&q*<&Rz(1a2K6Pc_Xd4Q}eUwqTtJi7HjOR7N z^PV>s4jx{~ZTV5@Dx`Bvr?hnuYJtjvTN^p({P#~7Z~|;X9XSBR+vovIxL*Hx(!2am zSLD~1f5jJONo2Y#L@P_oR7WXY6VvO;^BNMjkO}0jq`X2xOX({{Fd!9-u#qan2>~e{_{4SS14pTe z0>O%qY!SFHPdDk_g@1lHY_%_#YOrxt$fyCXadFG{KwCWI^7fzY_wOMy2Vnx-6|@CH ztwy)PG5j94BNz~)K`;{W^DX_o)E6Dl2jZW9yRsC)_XsV}?>{H=s~V+6dd zxWhvrUgn^Tk`8xEnam5LZs8Zw&E{%Tbk2+ z023uaj=R;nI@Qn8zc7u;tZP7#R%0+=%$x_d&r0GKaMHQv&=5-?j`@o z_2>F?FtBJ0Hm`{Itnlf1%pW(;K0Rxg_cU(lu)$E{i{xR}l``u3qxHmsK7zqgqadV+ zzz%HXF3_IKy;(c!hSBiY3OtQ6U2iMQD`i;)UVevMy6~>;t55I81-xYifA(i`+Esbm zsh<9O(OVMN4xHJl`zrQ-jV~e0c`3I!deFJWb9t1?!h6Gy+PWnRW#nsMawUJ&-#7X_ zS$=5Hc^TUw`%uw+5t%OaQ)8}k4Ili#A}Q`s#x4wCBHF)J2=)l+Y_b*8RcOKSD%M^~ z5!Z>GQwwjFYa4%fT7B15wnUJAqeT`VlPqzNQSb>zT*F*H^G=((_Aqkc zl9k)(ZoD5~`QjW1MiDJ+2WC`hVkS@QoS5DMtt!t)hLuTM#HA8`i`ZErs1pgaxp1lA zf9l2+Xq&b;9H;JC>gM60bcQ`*Y?RI^nY?msp@oB$jkCysE(_JKFZ{Z|QO7|tXFvn~ ztG=;>EDLj{a|ofBa4vYW>ZYlF_PPQ-H&;5NKu~N-!J=POexCXWhs48@y8wP~S%qij zIswL&$hfjcK^(*D_ZkuwEQ%W-QT6iwFMiYhqm$2U{cUB>Ij0x;Jr)(csscZ+PYI2SCRH&TwGw>xGdWy%d92y^+tdd6lNTeQyuJ_=-cIIxpkaF(H%HF;7NF0({Ev z5G0Z0&XB(D!h140Y&-l=>J76G>T}G*LK!F-vyp7+k_M@vl)`!>=CX`wZ+AIg5jzXF zvdK)rFC)@AQS{)RmI;SLgU{)3&>vFZlQ!P5+5N0QTael2BMWGxljY_8+QJ_oVy=l> zzE-RfK->T`^8Jk&$Mz&{`0Xn>(SmHlix4j3%pAaPcH>4ti~@Yb&LXm0M%KdLqtRJH zh8RSsTEwoRg$e;0b&;S?6X^iA2;5r?Y$2Uo9c3o6WDrIT@l2MP2(Oq;6XBU>X+lHp z^4Tgwy?@y|AESSbFE#}F0f=gVb0DcKft1UjBDbkXmwgr4D&TNZh^{)|un0_IL99Cf zY^%ZN7My88u6}zz9o3#E_gkS(uszO6F7S`dC?SNk=v}UL9E)*H99ZU( z!6r&HEpm;;i<36b8-~XHjP;qiy-L*(bqCYh%;`b{&mNGqzvSV6D z;-Q3eQN+1C*e(tw965-MkyjZJevQ`>1VVquk!zxt!tw2QdZK$di() ztI+Rq{m<&Epz4_9%w5T8hKZSb?`P&Frr~Qd^N}MEGUuK9+S@ych@l6 z^BIET>o1E3?c0c)+!Aj5AhR8(9_D|$kz0&z&fl*d7v_H>r*zLFN)QfSr z`Igdwk>Oq2m}q`}xgeRsLbjg!fUk?orvO2oqRZW1@>238bw1`RiaP1h%OE^4&tKd{ z;}K2$HAQ~*d8FFfF6Im)A~D}zePg{hk3g!0-&~IwRQr6KTB+7w1bShx^y8wt7RGHg$PyA*-}d5vfEl z+%i_=idEhL_0D+RsyrCC>>XX|Acqzh2{{=P8TA#k<)t?iQP4cAnR_6a%CLnN&{8ST zI_tzRTw=omKNVevCL6k^|J^&Y6Uc{3e_aL!%HZCxdn75WE^fT-a1wb^d>V?6a`0nT zw(R>=2o}%t6 z(R`!?UIU94@(VC=S-$Ya!e1BmEiB^D^u7>z7Y4!2AqNgGuYf1uFd^XQAOcHwuvwv3 zL}#GJ4Z#HPi?Etqjh~Dk@9qf#J9&TfIiPQW|NCs^5glsY5{jpn>p*cy2MM$vQ$__- z=P?4`6@{_V$kNqJqW!;XzyIIYB`0PgT*OR;n<;XyHJ`cAvs|k(kAdW`Fv?X*s|<`1 z(|cj~hA1&e;Wmdc1Av&y&3R^<$5lxnR57MRRNi9537aK$Xxxe3SL zwomHkpXmR@#5;zTjk{*M-S3iZm|yPCCeAUWY}|F-E)ms>c>+B>MX}<)v3~Wxy_J>| zMpA-OzD)V7vCmeNgo56Uy<%{_;Mt)ALddU`iKH%Y%OrM>G{I6qI!wVWR0`B=6il|z zVhNZgWbBR$Tc=zB+QqPZf8)(>;*b~vIC)Y+EUxmUUnIupQZTaTJ8DTRZ zB+rCL9d6kX=?bz;2kN6MbFNGWJ622`4xdxG_{H;z72`QMv;T*z53ZJmu1p#LhVx3H z00l{l&Pj6ntCC8p;{h8DZb?(r3n(qN^_Q6AXFgqi=RmtK^_R6LzF3`l>CX3OrMT6= z_L$2uTNRUq61(4`T;J^gh32c=Zi81VXlt|Q5$2%;Twj#X3&yR>VeUUZ1QcPk{cdyK zYUE*Ya{XWPUp|6>4mkjAP|_}{?htqn%fNBwx&Q$^fg|!UEVrMDsLS10p@YZtMN8G0r;2Zn{ep7+~qCPH0LG^c1&4Z)QnQ|G1zAwO?KF3u%&52E!6kH zLLlhs51}Z0iC5?fw2sukW}uc+SPDRCSkd4>0W~U-h+ivkffYZX30Z(S1TX^N;{j;9 z<+NTx999(P&WCqqo&uN-Py$(tihK1#b!;WJz8`#J7mPL6qxQfVx`)_0?BV+LAeNPx z@G#Yb=89gHodAk_r`hEHRD3PKe}YOC6s)xzretT8E}xZawKTV9$Lx?jr;gq3x)4}c zXUJt(7-7$@vG~lNJv&Yi>W#(PVE^Q}nqm-h#1ykaKr^>R$j@uD&o567K4n3rDmqJ^ zQC-)RM<(BRT3R3plvWgyxyP`Ev)6n0xh&l2PFlgj&ziegDFAC!@BkTr+A@O~)8AHu)KyygcjEF_l`a?v)CiW}GT_C`-&-2yJ$YlFh zEo})Eg+cpe^R((xW^P%sY5!OIhV=Qra# zTc}rYn`t};3op9@mh{-wjG-{3J)mAjJeEzGqv$KPq7d0wo8x8F_Uruzd^vu{qpk(7 zGyMZ0MC1F``;>_*^2^uEcM{LxdnO{B59*?mR~W^w+~^CA*w zc@$4zNu{~tO+L-kJ@63cFbIF=W628rWKjYPopBkSZ|Cja)SV*@wmwC76 zg1?m~|FUNwhZEg%)jc_m)f+)C^dOBE?ItAH0Da54X8KL1|s!v|oA#-)MA zP$L$;rHxqp(>6Q-+?h}@APzwW7+cP`8k}V(Wpx`wU@aTcyMz+ZIdfTrs-xHccCP^L z*Xl3h6tQs($^?a$Fn{L~|Gds^0EWMDBmPZq#2Y1CRu+4H|G=Zx+uEBqRE#QL`AJR| z7|Nm^vnrq*18j2q%w@m_qaIhXjiHoK%8K|~+Rj+OU4h-~NeSw8m8}$sP+MBP{}uRk zy^h_8#&>){8~87Lwz~iQ_)4ggD|HjLsJRlbefPpT7L3pmcB0-&yvN|z^mQ^Aki!}n zYUjkB1)ShX{Tdc3O(3!r`so**(BIi&J*pne%l(v}>HAKYaK%1ge>@6aVR}fIX9+mr zaya0HD^#5nb48|%0DHB2t;+sW9HYcP@5Zj%%(*z<)9PzSZ-_h4Cn*8;*QgAw{evmy zT-~r{J@;c$3Aac^70oSJDNXSgGdtiI1wnPD;y~EY)`a>OF#wwb#ME{KY{!M@g4PYGGZa0 zExh|azPf?XY1H@}M6>q7+GZKhY&jjEWs*PBo9YzY(zlv+S1fy3y^WiQCJITt8zs z{+N5MF$dEPCf?ALa~~*d4VX6Qcdmb?YK_toOv$NuFyC+yI|1lSjXIS?sMd7S)+o8y zJk*vrHk^$cG{!;f)ixsV>26c`sbBi3W-@T?IH`{7Z332vynTsWzjE$ak{f>^ugruy zX3X1X%(ePOqmp#@m!v_DzBS}8Ju4EmPwJubxDrXBlC?ne;q%Y$cD|RV zNovFrN>)6?1gK)!49t4Srr&)$BsSa$04IQdhkOSNtx43@TntKd>Jzn~WUx}Sh7660 zg5~6jgVZI5DRL#i;hOS*)zsmt8*x4C3T-8A(!6QvT5QT{o4Y~~q(7gEH0wGb4 zZ?EBu8?um7i67=iKv;kldouCvYoFIXt2&~C0;5PGrK)cEsp8R?<;R;`J(?EPr=}N= zoqXn1TUA$fdlBirfn`AYIVv8SuCu3dB)B zMrH5sFn_&#CMX0IS6m%(=OdWWDuFrlC$JUQM(O}$3+gDJH$+`($Kg(wr*J50gJNMLkarjXKcysjy-47(-UZ><}TRDI@qTIE+ z%U{nR&+j@`iQ3d{ts$TvoL+pme(CmcHX)qZt}y`!J^jBJ-~aoq``;XK zA}n*{O|IG5?Z3Y9HTNehZ=sY;E^^CW7@iaN;;q%EbLrRLn$*_5=_b%;;^U3-hjsbi z*32Rv-+HQLM*V7Is<&rI%lCC+dN@p39q^`D1B5I;4nR>ri!N>X{%Aw$v+9U5Tbu8E zbo`6ezqd@^k-H01(fDqe^N8YyJb&_6`&A>zB_Xznmiw$G07*Am8{xS*Rih^PbFg-p z2$Fv)RE$v5uwPb3=A40F(eENZZ=%5pu$r&Ls_jt*?k6!P&Lk`E+@{jpwbpRtq6&#)3^%Z0%GfhY{GF*xTMu^DZAH3kia;w`UDA++P^OV7q$Jt;*|M$ z%NDs%XQaKI4%gBl+f65K~UuGUUX)t%= zxlJKA^KF{uPgDOV#rJ)v2?Y~GA|>C6S2B}mNzh!wi~0$JR_Q~SQf6(pbogh-si=H) zf3uSNMb*f5+xpaAvtt*&d|uu6#NNmHLYt( z{Xz$-lOQ<#`lE-Eed9-7`-TZRD`~wp^Uki>%w{UHdLvIqPB+D&5zsvYWsLB*hg@z* zT8bDVx8b&@rS~W=JW-Q$dmLSgkrOcut?;@L}^&r(@%b!5D<(8W` zqYBSgOt{&Xbq`0U8GKuQ8&b-5Lu`?>asrW+_h@V_HT*Zi!-P%?fBzE7Ov`N)n~=KV zz!TAnm0rj*vAd9oW9EE5<;H;K;v5<-vDJa_?~J^I{V zk~OdU#`)L37s^Ind9*G0-q198XGN6Hj=Jm}b;j#9#J$U@>)W{ELww&ip8qkw`L)h! z>U_S+WeVIOV?=ATJxiD%%aK6pPh*Pqfg{{nukGHsBT|D)X(PX_Vq_`$cMSY7_saMX zOJH%<;@tlDgN}LXh@Q2(1~^G2xfDz*L!msk0k~L6s3fVpb*^3OJR%z3Bqw7-FJw#P z2Y#rC5T7d!{u0kg1Le!A{R0Hx|Am2;1_NrYsKca?7oa_Q6M(WJR@-i{lC^IZ#Ijwk zh|c(!W>`{W7*|RmCX~>Lvy^FzYD;v}sNL)oss#;*LqE(3X%?cU%^(>O+4jvK61{qx zl6Y#^8}VRqrJn0k)ps-}I-^OaO~ZA+fo`xV->lBkbbSe_xrCs7bE3FUS$MMWB;B8e z=|TKhV$$(qb?S{fxhL|Y$=8c7U*Dho_}18HcuMHSvsqKmpLYIhedE8m-Tr<0RrMwY zx7T;{a2-LHqYz0j74J*-FS4l0sC=10Qre3V5^bK)a;`J;8);))N@&6FGn=K z;GaT(_^$d8-%8Y=ljaOsRUmW{yBR>OP*NU8)l1ns zV?SAI@^H=zh{Lrq?1FA0Y~1g8q3d~5n~x(JQmHm}mlhD;)E2(UHjIecVi><>OD}hy z&CP2S54$QfUBv+*d$wJ$xp~z_D_UFe&Iz*fHMJcHt=`gk}LB69<1l;L9l9CfCTG|ru$%dw$7+-Vqz#&^x^-zjduUpVkS zKZ8vh2G))3u#Kq=4g3z!O8eGzrvdycIhWH!~W;-aS zU_rVkpw$ONdZc!!1j!r)jU@Kxi5rEb9OB?Hka`*Ph+rF4zk{L&XpZWtOmryCbB~U* z6)bLnmQaGm7x+h%dFQvxt$80`)LJ(Q6$8BG1gm}DxpoMXk(T<&+Cu?3q5yjf2TFS=GS^g%7!ap;7Kqq8~5Ci;kbJfaR?_zaLah@_7|0|oE zSF8Nc|NeKiS9?P!;`VQXN-uM&A-5N6Qo|(d2&EXs-%?N5jw!Qa+Cbtm#G{GGiKqbw zY#Ey*w9poCa{=ZDlmWW}(x%?`EI~c@p$TgV^!Lye0P%+UP~6YU?;Y?mNH^<7Gbtqu z%FejELe{Lo|@EO7#T9!hU}2E9R~Q1Ww-WJC&Oov778 zTkt?O3TNZF5wOLc-Pqu$oBaIm~QE~1{j8K38=0rXq|(uyF`Ol>_AflT&$oZ zOI&VKyA|^TG0XvZp4(5AW(IH_(s$!j#_EMwu0sIV3bUUWM^cyKR93L^kwwPEOr!BV zG}o5mglzKZcpqQTOVbNzK^HvI2%R>-n+R_SRh3ZmYXpi0YHkg}>DW`S2;CYx%4&3$ z<%X>ycTLb>>NBoxWT9k3p!q`~nt4=_XEVgWv#Y&w1}Kbj)KGnwDmLrhiX%WB1wwu- z6dJQ*r-^4#{iS0QdQroGR?r{@5#a&sWD&|-1Z{KC5p>!$4Y)~N75-H;NuUxf11SKP zrR9-R&apKFTaHC>VT?c7{wP~U21*O?|ABXSsXlefhxYsbJ{wQ`=yY7Ro~a6S3O1p;Q7$MPAqv(lH<+2W>&TTR1^ z7^Z@FoP~XD&hQS+EaI2cmy2Jf!c)QQW*o99<3<{uEDaCQiO`0pqflx zLcPE&xZB=53Mwa;byJ{r>#<(78N_GZmk!?3tKv=`@DRZzK;{X9%!7w3GgmC{mXL=O z{6@Nuj3hCq7w9I)7DK>ff;Tk8j2eZf$*`}Co4a_B1xmBpEhOMRNqDjG+8j3e3Z8@K zw(RzfBp(^H-?9ht8Tuv}SSG&^{Zk2+DL@(1;nPBW{R%?M&0XQK83u+>m__AigH=#p ztq=Rhz0OCIi>M@|oQ}e_p0+-Q@Z3d#K-IFoIq6by^Qk7*M$om* z0qrXXsM^vkK3NGUnq&OS zdRf$}vTsG-c3w>aw*}Tl`3D@U{a-YwYw~vjUQcG9r~p8!hI5 zWsbD425rA@ytf+8LpJ(;;18lKGLZOfB8y09WDcYkn(`XS!WyMZyiXHJ1Wya$<8IQ2 zfCC@3j+;w$Mx}piv~%=qGjwBI9S)(y(i_>Oa1gl6#u^tHNvIf-amxoxi?s*DC`~O) z;&34SvYrJehnrFaw`CO&*Q&4z0lWdRizt!=1UL3d(H=>pg1^(srAXCFeD;oa`yZVKE_cPZyTals zJw+c42xry48LS9)qT#p!fSiA~a^FjlkJu0#xs9t3c?|~x=tIL>?G0i-wG9Wbj$}3c zkZxS%gokBAOiV!AC6TibQ2o^rGFV#NzDNQh$G9pCS=_JYB5d50vXX_OP(ce# zAzR|uC?8M+y|Eq4nZ*ZhSY1rsa7agAe1{}m2R7#!mW_*6isUg4M}!p^#CC?0dn zg#P6KKScqk{*H+ZLo~iu-Hi?0Aih2C+ppVZfV1v$lPw+~0Lm7w4ghn2Mt5KU*}$!V zLI~*xWO{%MFu9R?ZTkOkE8oKlXlsnB6Q8b*xI8Lo+2@=T0{hC{WRNZfmGNzbQFU^c zktA~eWK5)MRR2B>9VIhR2!8zliJ=ISK zegp6vvc;#X@baPw)5yV)N9w#*r`i4hdpFzIB`;z(Z~Ts4gmNh;loD(hzd~KnN&q$m zK;b<<#P`3HpCj9CWecOJ&$t(U@O_>$<7iV|mcypfE=s3nZ7g2yBWGq2XDK*gV}#4Yan9E9}{-W%3H4zZhV_ z!R*;r03QsYC$U+5EahH6S`q;{%z!80D;T{eV1Ivc{`ra7Xne1@RTy|7DZZE9$2b4% z5frK1$AXJhR+jb;d3l za9B=r40#aVJoR3lJ$3mJo_*MI`TsV)Y-H0O1n`pOUNq_xFP(k!e~?xA^9YU7rItK5%dsZtst+%zpU<+@N$Fn>~^TZ4Jep2Xt*IjXK4X zI|IvxY?IlLx06}-l4z^gCd8_#7!IObB*Ns&r1G7*{--~ORJn?ps$8?`;+sO~H=ZtO z2Api0pFekG$B>= zAmovqHg&(3W;}-Lj&F=S7ZbVL=J7u7?yzI=XGaBf;+A-ym!Irnr6?iTQbUBAUu7QRPN-VkX@Jn--mW>vu!l`@{w)#f#3wG zCG1!xnn&c$(GUuiKv^gLclE0|*Sy0szr8rahH_`2Fsi@$N`8&(eq8e5p&#aS=u(Tz zPk0J5uN^VjZU&5M^Qp%L+?GnH=*Up3)S)CVO6W)vKu}>}_M#Fn4b*6ALxIDppecJyFfn--SmOPQ8d24EjVi8CfMlvVg7rFHx{;`68gZXkqxD; z2_%b?J_VreGFbpaI5Kpc&^H%A*E%V-6Y1T3h6w0z&xjkKYCP=I2%V_VVu;|Ofw5oI zXf+S|=^7Puq8<;atR-agLFXkTg%Z^Jg_b(l4y%D77xd{UT^#yc8eAIK=|%O>;Mid> zkn(t1JfsMO4jM)SWN?v!>-T9|xuFsrVA#n@QTjP|*8p_EAwy`l%@lP@3r&62B!KG> zO*uzi@S*+wv;Pef{ABUA^#o<#1)6=_r#_itSVK9pNXbo*6MY05D|OGZEN8CV_e*%o zYd?`y;O(TqGQ*Me&gD{!h=@2!DTo4Lvgmo#W$yxSr|51kS7M1W~L${hEha?2=QtucTO0P zT#-|nvxW)vcrz7QvtSL|pkKqr>Ggr1dp(2tRZ7eU{vaGwi9@iekRQ1iE}sK^!MU4K z@OFHT%NPQF3h^I>xW-{8gCsb;eg~{rD#HJWs0^isaQRe}-omQFOdvQyCjtHdef!^q zf%m$$vvJH(%$!HhgZ>3`9*Ci0b9k9b3sJ-S#SH+nmxCFRhNwAMF(Q_VeZ^-tNyWGk zX&8-SJZlq^CC?LL@(kf)CX$?RvKglP z4b0*Lb0$T3=tv-*g3Zwl5OVq~E%zBdGYl^OHi_rEYSLU$R5)GO%kfo#4q+@@k!v*M zP4x6N>qjGj^#?q`ZQV)eSV=~0i9<(2llxp@wAd080m;@6E=};b*L!3oFVZ>c&(FNO zLkZNnNH!COSxY%|@8631={;;$VdPZ*J~tRym$pRiwH`dS1^Sxb-DD=`fS!nc5J@36 z+j^oPCqr7@Hvum$bCaQcU*_5SNe)#DQO4EE#hWKwY@lONChUPs562X!gt@KE%DEGe zI)grufAz^%07}b9sFi6g$yIs_7*JX4R_M(mn0X+;m^TR?Vv!aBfF*-Rzx@p=WM_d+ zgILrjEf-RZll&VIlPmD0tlpl0oy62ER_^o`Iyjy;NTW9Q>0AqX^x22SY z8jSd9VfIIb&CB|U=k{+;vSUWiND^8=SMt`G!ZVVSu^SoLuz5)&5_Wt$*YkwQ@uAob zRCf}Pk-iK-RMKCw0T_9p8xWKmDS)3eO6`of{p~By5~Ea5qLekBfg@=4sPTnAYPbbv{BPTO``o_5 zpq+OlO4yLU@;pJAi9$9(?NZ5^>shk>B&cEP0JjOqPKjhg+!E2(Q{T=VhpQ+&vjHx7 zLtFr8V=kUjwGRNyQ>(po=E%s##aC0c+6U}dAv_uKZ8rMGgzZ;TucQ7X-b#=eK>?e* z&fRxW%aWs=ZC@uZwnI%@P%RVYA4woAG7fUVSBKiry5CQGKnG0at^D*ue*U-PXAZK` zd=3!mH4nJylS2k!EY4QY3Bcq*ayU=M(5Us!1_lB3;BRn)teL>if^_lHgx$1Nv}F(pgKPu?U`V%OER2W^#pak>YQiQ3$PNiH zA79#p6=OG@MbGuhP;91x%!KB9_<~fPJ0fPXn8@V0d#FBvb!`;}5-C;{iomGapNnA| zT!&m23NOHy6mlJQQ@9`Dj>$X^7-^Ukn z=87BJ(SF6Y1X3*%#0(~Y5=TWSh&&IjeBsE>73%G3G>Mz*8HKi+yv1ilhz3yug+E(7 z3r*_iyMtY@s_hy?gIDAA-BcJtg*TK9EJhQ!I@~C9Jp?Hk&Bsy^PB(~eo8cxQxFE`mEJ=F&v(OMZqn;V3Iyc(MeA!t{24`_1eA*ho)GZb zZ3)uvK1SsKOMd>l3wGnyv_|oGHhq2dLc_t8%eX9>k`ynt>scuYn2~iwC0y%8bq zhMD(kq3gGBiQPg{3=ud4w&Ao0ymiA^OVBwsI);D(0gN1A<{l<7Kalmajh!f*1O8B` zpG?u;U_}J}SWcDrh*7^WSv)hZ<`FZm3J*3YWH2Q$ zA6ylJ#`n3`IAsR#LqFu_Znz5cXg{+pPHh1X6gYP`txM5m(GAXJFqD%QgW}m`u)4zY z2;f{bQ4`S$ZYzPjWT+}ybaWpB%2*YH9fJkcSM)nYeS#H{Hsc?k6F}KL2Nl(+_A=MI z+t4^F0BV9m%HUsOw5Tsj7cXeMHK)IeV>lhZYZ|ni0#G0)j2?qfOs=TVxsI|eKtr}~ z5ufo1if zBsUB>J+dKrUVJiD>Yv$B&&zQ)$1xaSv3o&CLp-yh*c8y~86RROI;KSoSUjHuS$ilp z(VR&TO`Ih-{0t}U0)=Ga$77Zw$AD7@V`dmUgUMtjY7KM?RD@~+x|2$>lH7{krp!DTYb^RjeU))Hp;^lH} zIwEGV@Gma%O^W{g;(&6D}N1XxtnnV$!M`%UD@^w_E-2s9ZU09vkdlm__!SJ+$Fa$;$FaJ+sBF zJ={n2rCqu7+NPe;09s$|Z-oJ4{%Ru5m|Ac?=~~z}A1pg~Wy!G)oYB0@Bavs0Hidj{ z@*l?cpOMyY5fQr$gVSHKDe_Y4R0!6J|6@C)+Ndwo&28#w?BltnrC;T9MI05GM?!kl zrbwU`OB<$15jzB6sgV*d3TA7w7%4s$)n6V_1!*W+;aplg8oS^Cby4QsTqZ2iN(Ukn zJElQ^2>jXh0c{VWVJhf85|r7s<^JHw>L2!`?LEj#-ch!ydZ$pbYexGhv*Ht{??0g! zr6djDcUn-%%qCahUB!3H2+X>r9Kq5^Kj-dV@8_j&+UYjG*z!^aBkkKYa|4pMNtFxM@uHVzc6-lpYDfv@9_B-@m6rh#Viyb{*8MbXyf?_R@;28VCP6p)Cg4+QH z5)8f#(+6Ezbx+$wBA8$nf9F|+iv`js{S>?w8wbp?auAVLp8)6FE!|sU63IX}1As!? zklx)Y8kQ(?rodDtR}uk)vmHQC1N$zOxdCeW}T`%|a1rtU^ z`3p!D8nfhwXC>!o8;a5-XVa2@4H0f)_VAcJrd7s~rkY5C|B{(qTS$5K>Oi3>1M8+{ zX;}GqELlS&;X&epglAr)2}v9*xx2^9dWIcM6gXq{rcZ93JDTjY%lhD_X6wt(?e4iR zvStwB2eYVqB-d0_q)Kp~QTn9N!L(>F@QF`;)=gZzmen(sr@q_sI9@wjnI;6)Av!8P z#5)U5C0OsiJb=q={kn=cy1;sJcTYEn5&bZ@$%}XP2fYr_Q6sY=wsjRzHNg!c0C02F%wD^gDFU#`d{z?WluNEg_i!K`D=#b>8^ zi!QbbPG*2$OCD4s(Mg7%$^le2R4mnO^b3{epil<+GNyq&$S*?4NkBT9%00TCsN>Iv zM>)_1KhQmM`%g7sUS0jW8SF^-)vcG!ZUY@|u#BuC?p~gzA|E(Vbg0#2m4mUz`o|e24DRj6#$MbWaFk2~A#JbJkYo@2kH(DR zuWbA?h32lat~HD{^}305gx7$H0Scqvez`>MnL3EMiA5LMv9P^ts)AYfDg6z^#fY9NXZ7NC#S@dy*NC185me;s_*fI?FPUoqV&;9^#j zl9lobrZFJOV+W9Q@h9 zO0yR%-L5aXKH>drqS%Sc4@7P5LNuV4tb!1Ur45PqV>hQBm!EK53fSV~uHZ%4{N0&M zHEuP)=#RrlMvXe2`oMLMIv0W^>Lk1ia<^XDF>btd`_Jg0zz_aYu;7px7Jp6X5)VZt zZFeK~6uS1gSBJV(9lS9gs$MBK^w}&@Og=kfdy`1{iuO^?Z^ah0wJWidqs@XCQAZTW2L?ql=F-8=k@}~nwfkUsRQF-h=R!_* zJLu_YOW(C8x-2c7aML{Y5c+oQcRr2=nnks_6b0<^CARI<=jo(|am={ng zIu=EC^NP*}Q#LKM^yS^3Uk87mrGAdr)Q6mc38+DDF37CcPQl+Ex z2v&%CWWi5v{*=;D!ok0VGBb<)R7{Z8G9S<4bG=+=A6+LZ20;+LrYow_RXn?%%&oVT z24b5PUIYmTQRBuaHsk8WgRs=3GGQAQ%$gR1Tq_WGK)mCP87RZTSxl-B;j9UR5VZuA zlpQt{<5h?dHzHmAG*={Lws{@!Mnq%;SQJ-BWi?-H_#2@;aFsK*jSdzS7gJ+aA`OjSn9ydwBf)U zkcxn42)Q>bD@%#*MW|W$YibHY^hJc;Lhpp8WZx{YK~lnDF;tcu#ZHK~*rCwdTZo8k zxTadoO(|k^N&jkj+B(TC+2ipT)Shc2?N< zaN*RvJNY&?Jk5}e&A8^?At@89QO;z{E`5&gWrSs=_Zm>sp+Kd=ijWWK8RoxiV;@7D zu}KurJVh9`@wf`Le5}75S=Jok>U0I@OxX1vieDp+y$-R#+F*s2)Ab=9^-0H|Tnq}D ztQq6C7z(3!upCKaD9E7T0~iLqJ!2!M$N4a7y9jdhK!@wK7++)+AFPj86N!=9c z8LD@l95Bc}oKt0tnmkWHW-|sF-GnhrA7M{$yc92<>g>EcrLvWPFL02g$0kdJ=I4n8 z*9cI*TKFNpzT8RNLKF9|A`_FikK4;YfqJ~X4L%HXL-j~Cu6n_J`PkS54v%V6KVJBk zIVS2Var4m3D&tZ=Z&g-W9UNZra6}5{cdvb=EFkZ;^}*!?iK0n3qDI#^IR zb{CM$5os_10+`Gwu=CHsTqk4z@NEEVLqb7?$u43l%EMK~{ts?00)F@E)UtUgmG0zp@SD{X)mKMdSO+0y4!J+4%8?^+Dmn z{$99dkggrvHV*7v@A1S1D|Nc7KeV;>cxM3D*SmM`gv}T#6Mx=DxL7v@E>t!Nhpg-F zgDx#j52im-Ph>5;?r`+$~_(xpx7~guy}=P)rv)tEtbQD*>Za>b+R+c9as?(9%SoA zx0D2@4Nb{g&!@8+&(9P8oE#A$e)Mz?(?-$1wMimQAQHoZn6Gfo6{Wbqu-UVfbEZ^g zsmB|CKc3n!`up*$)W&sY5{zR~x3qG;;e;8C138AAd2ATt9(X-^F*tT&9Sjb?i}KV` zutuMw4`9s8A#rRt2-{njbqyI;V^y{(D;2awV!pg+ymi)ed?2ii#&7SMhiU~X%89c< z8BqGE)v7WHScb~Y(1MOLw-`j;N^33O1zv`>+~1W0dV=M%bb9<1_|s81s?DNP@B3e& z!yqi8RyLnKnZ0Wb937U*gGGBeB=}37eMnaoesUL0v~FWT6`t|`@b)HPOPFtkg+$qGSn2h1xm458 z9J&I8Hc*iJsl6!s1GnK?gsqV{##c^P@>p_uT=8~lwRV1 zs>(U^bj+q52V}Yl#tu3Siat?K#OFOEvmLfu@0LdLyLC?Mp~RzJ`U-!(d44%7+>gSeQSw)feDaV`*Y7Z@L>RPjw_AF|FTYK;wg+Fgh#F^GM9ZS4z3b!@L!zuc)*wg=Tka8j@L`s{Y& zk_->FOWae>xA06_E0lQbi*k3qAH7 zn}R)+KJ}vB!R1&T3DST6>H1V>NyxwO&2D=#mSZvS17%qG6cK=DQ91ABqL)x0@eim6 zdLR6r6HX1jV->6(es_{-SG;`8kWzeSzY_TwVRDa`f4w;;DBO{4!A_LAWx6=B&7H>0o!*AAbbRAd8s|W)P{ynQHP^7PX zecWW98NJN>5kfYUR~eX@8)Oz&6!V<;W^DTygPd0N1?aN9NG zwnyqjE_5xoVA0QPj?ZxlH}~+WvZ}dxqSzYOiL@9kIDbcVTf>IQy8;8hi-uzog4Y)QE8Vs`z zITBdRh(Fjf+cVq)R^tE{IKgvQZsWgZg3mM`CI{d>4yEcmSOVRK@m>iY5#Rn%iMv z4Cn14_kpxm^}!tX;0cu-*M^4OzS?Iy&dXb67pr8B2*Z0)R)VYP%zeR?`mM-w7FXRG?bMc+iKQFq6?suKHczG_x=~ar;tG_rGQ#e;rm=@BlD+_ax$4cL=EYQi2 zKX#)(ape7`oUo`Ko@a_G$Awo~j?4{3%>cd?)7 zxLHiGALCQ@>KF`}rm`O!yLsMxHlF{=AbqXq{LL1VJ@jD05#h~eV`lCr^cU?jUNorQ zW4*Q|d)zT|IX-yNi3JITW>5XNdCELxK{uh$-1$~gHmOp2ZXSR;ma?oT{iMpg zb7qx(iNz(A6Y5pa!Wze%fG*24L=A@MxN^kT5J=-G?Fka9|Jr}+>R7nk=$tBA&I#Uj z>Bcz)_?9~06I`jwj0xcE0{6jAL2KD{gpB@8PSc`i)Nf2TTZOM1vC| z+&|_`!P7sd9LN1;41b^1zp8U}pYHA{$0@%ltAF;n92d+L?&W&EtIyuezopE7GM*Q# zPc$YGa`3opZ}0i3Y!O4C_1AdgmZPyn?L2n(WBa1B33y|7ddRgz!n~2hRkK_PbW9=7 zi^a$kpw8*e^pQz~g^75HCtPJf!-Ie1unqBU?-%~V_yQf@SYJ_vy13SWK2Tg(TT-Z* zE-aiz+}MD$fp|{#1|=f4PT<1E18cR)_!aEQO2ZXSFe}#}evmL>H-y>oB;riZG=y1T zmImfa{1shVPy;CJN9WGvB7_Yipl`^Vk(#$TFO*T<1GHn^frByN8x3X%n8BC{)nz1| zvF=m*{%d{^zSiM6IU1&a*CQK7r{ksUdWmyk>m*|o^F2XXJa2WMJc>pmK~wVPboHKe z;%}T#HrKO&zT-?^JW0Z+mEUW+_2!l0i>XEXxLWs5rAZ4m$s%{X&^>A$vFag8?zj77mwG|spv&N%AxkvOv=_~YpfV(GqkUG-ae>IpczVfFC$YJ>_#9n6ZQEagMh>BRc{WXM!@rit(Nd2?3=h zu*Jp<^@4dC&5&_|G0n5Rq7Ui6FI_Kb{*51-*My&=P|wh#XRHAwI~f0T#(JZA;UGex zeSin@?5{${>3slwm5@|Bq7-Zd`ic#NyN#Re0_>~kyD!Ob*J~SxOvc&B{x=1KYM{On zzC#xl9YFB45>j9O41I?Lr)Wt;FgMx0iWV*JMpWPnYQsb@VbrIM=QZLN)6fNQr#S1y zcoILRC7I456rjZjN{1y|p;>tg)`5qZ9`9hJ|09z>(b6uNxdr3>9}v!RpJ?jVrJmq@ zCOYisr@tVTlW=3mG#BsRM$uQ%2|g+v`Z&EQfRViSPjn8u3QrLi`50ioptHSHH|dgW zZvvZv^+nN7QP}(@_^$AL-y)w#k=zu6XSEm68)=@cC_L-_l;zb}RZ9Y%)i~x8i7v3w z-pY;?6*yPu)4WV|;F0>YXi)(tUWbk8q_b4*hePoshNF)hb&D?6A^YU%7Xxb3%iEfS zC$RG*GxSCYEj7Oj-=tDA*)C;01_?j_{?03B-0`nCYoPujoA3=@i~gGhyvsTo0sYRv zH3)TgoPEW3sg9QQi)``Z|L8>8SFdM+yJhfn+*XfYu6lHdV8*CixqCbL~v3Ev<@ zVaGSn#@c5ecbwiuM_v-u-9G3zy~oU2#|!AdX^eaVU5sal^4R(C`$H@GqSbbGzVMUu zwzD(WgUjrY5N8t-E7~kcaG7>|8p_}zQ%aKrkx4R|S?npn-TLNwWp`j!kFsf|JCN9| z4?RcO4P|nXv)m|d6mJKMRwARyU^w6>*Kg?o>)GUv+@8c_Wfix2B-S@J9_$h=IDfme zQSv+fV}M@A4#rNB6qnh-qzjH%y~7P_S!8=&b!Jgbs|TQL?@RwP5qo41}zMtpFaAO+ew`Q>)!JZon|3otybYw0syhm;u zh`q^|Po5w2Zj8J8yL){p%XAeSc{C%ruP3*ru-s@C*JsXMR6gcyWw}{w&y0E1uruk! zjf|a;ZC+$Gkon6ZnQ8Abk8>+#&aFTm_V>ST(|IFE`@VHYzn=^Kzp{_x^MS){H^0pM za@+TASu%_KpL>58J9oY7XHDao30^Ssvh&-?%WIwAYMAh77V`E#ThXoeWF%=M`)RZG?K?WI za-+imaD8Wa)+}Qhp(jtO^otAH$FpYI{HO7mkjFUC3Oz?^y+U|n z-;!*84SN>*c`E%k0oiHVUu|c+PBqJv|0x;UdT`s-DSWJU)1WXoe+e}%&Ay;Td%1g|z4V`u2-?xDj=?0OBa$iXs% zH_4JAW2`YbTD@e-_mO^v=!fHj0JpYU2R(F-@Mj=%Pw%mYo}*rf(7LYyty{hDH~eoI z*sogV?6H2`a&XMz(j)xq42jiX){RtiLqGju37Nmgo(!A1HeNalyvzW_jS#w_PYEcn z4R&_fAxQg=lDYui=^0b_fmJ04vtKv+%yyqP5 zKeey+C=6z!I1yHvMLjtp5GY5MYEBqOLZ^*JL zy}ykIUa!ar<{sVXCnMq~EE@GP_-xN8dGR@C;&Z@wd!SL>(AbuQ(8Kz*Yb72s$4Tz! z$nWS$O!?4@p?&}9dJnyzEn|1h$Nu}_mTq{id-W_9Q1U8zI!`d&JyoY&p~pqHR^jrwgrN#u6u~Gi$*K^ zF$XYvC${V^Yp?pgHVTAfpw3da&KvtZ3s*e)6C zcyke2VVd_iVO#mv4~{<%FYVY=I{M4^ew$#!%{L*lbTms-QaU1WZ%eRNTQ`{T{kYalV|ku0_|RzJVkGXh z)-VSeH1o)Tc5xNg)-EzzhPd&-wJ$he)>FQvG4sgHkvI)Jh=2S; zyTMl!2P@9TEScVH9MXUPbdSQ(Q5nZR#jl=bAw^{;c$=tAnyv@4$)lM~rL(+EB~907 z9`roD`rx+*H!g2F_E|aiugSeb(#I(*3+dynnu(^2(uW}qd}?2IT^(By0oZ#Ru&h}9eH}PdxCBk9Md8HTg&|b-+2mha z!iiQ=2MWtrDy70*Zw-a1BGlR2SK#)5PX*+i37lOQ>!n=EgtB@XOiDhtp|1<$W;k*z z!BayOQt!Ycf&~T`6T$OGvy6MJkaQEDja3`5g%@)2k8j=qbql=e<96w%2CnTLiVlSI z4BP?awmO78DyTz&Wq&y6U_on3VM1*HA}2iqLeoM^tuhv>2_jb?LPV$9+8$`385J6E zehgq5o2X1NC>1;(QB@*hfyS5(5@n)_hME>>kH2oe87x3;H$VNc|DtDkNcqJGXj`E) z!OS}+-lHCh0=9LPdgq3mid+ra)*CC|9CY4(Fl6}XL>nSy1r=@_6Cm3P<~Td zomB8bTK%3wd3W+{`|r!c|4t9C;}9qhL`qR7>fx5gHzgsPcmRLERz*mN{-Rsc*&=oTKf99|~F5zY`Z#awN=X&DMat861p2XHc z_t+px$KksY#(VGFq^u8%U13P>#cmhgaMxiCoevc! zDcl8}*$2D2x*ZPZ9(~b%*}24_V3DVplu+~AR8en*IblD}GX)i!pHfkDwz&_Tg%4gH zX}kb$MnmQK^M`xFdeHjfBUnp5M^Yqss!#pJX>;tZ)eCkm_$G1R0>*-h1#8}g)n62< zbq*~Ky{yguLj52K+IRU0;otn61M?k2V5PC^NPJ^nPFV0lhrK}|u?8_F39!HzVLe7+ zU&s!qDCzl19r12Ik{K5H0U2(MwhZ6Q(w&GCU7?&5r zqo-Egd2mQSt|*UUF^!h#4}^vgxUssW){M$rk8RY{FoqX7vE|HIc@X>hIHS`?>hFkI z`fji5Et8}LI#Xp&Tg`Re-3#@&l=NNk7vy2JXS)pBgHF?@Z%H~YUHDHs+p1SXa`)!m zce|FfJAd`-12Jd!uRgm!j?d74|I5GntcveD%4ltB3g@u6)sE-OT5{7fR^nqaOIS<% zvgemScGx{jd1f-@ZhGO$rAs&L??2ozr@@(9#KV!pLU}va_;em~mbsDlFF5qb<0t#k z$6x(mP4;S=^NSY)+aEUZzKVD>|vzu1nsK{P!FkxiN0s z>5Iant8IK#b%sSdzAga%1Z~+Sm-`XS z&f2CQGIvS9y*VF@iECdy&y=??Ejf`Mz`vV~>Y;B7)3rn2q$RB_3Uj8c`G*bCe}`S3 z8aqS%Lg&Z+%cg8L?xJrC%E97t%uH(J=U(rP_%$faSGct+L{KOv_r!f0Bg(^M&k5Rw zzC+waaCTZ$Q^$R0%i${Vok*04G9~rc1zKxqx7RoDGZkUyV;7Ji*LfUz?dc$Hv&Jbu8pD*jCM#62VQ;?l%B zaoyiI$Q*NtH_cFOpPQz3p`xdi6K6XCfF z7n>#Ja2E)Bt~i<)beg`Y|0{5X^(_Xbc<^hRyxz;h2K0Mp94mwFR?;ndUk2HPnOivO zv|{ZyIOb+ouMrFkqK(bi`X@j-Rl~{YF*PkZdRLFFyJr3oiU&UrY2P<)2MwUUjCcNH z|D^|B<`>nxB$+Fo=ILGdtR~Rs4PUv~<)I!-WbGpr4C&ZFHG6QQDVcGjo*-MW>)B$C z$TRPALTR63^{I4KHUE?vRb%6jz4KVD)G%7q=gDB|JDT`ASU(IfyQ5N!s1v-|Rd&2> z6lXsEtD+z8nX$bkf%i7QV6k)b{5P8XCdBad4lut_@W@zIhoZ zJK5iU0OOE)`n;b`Y3`RH41ckExL3g7rJNwMEsJliIWsIVHq`s__g_jM-hTVm@2+-y zc3Q$=v)8TLUtVC(cUN|0NUJg!kWv_xoMN%+_{@RPe=z8f+Ign z8Kzu~1z*iYGIStzltAPwa^Xyv!>{^-*kZ&w4wa+Q&shY<~bJVr&6by}~cctl9?X-lv=8Xi)xtqL3B#liK z%GXW`ZLa<`OLuzze)_I$`#;Q`M=f_E0XvqJ*&X{%$I`xx(nq4>`{}9R+CBFb==gt= zZf~+Ws`Qw+%(>OuHw;Fk$aRpC^-#J`d^rBT)LlSM3H=m*zRX&|gd{Iw8_hC=Jnb(s zT`&e`89^pkxvJ~Nv4-~8D>n=^NG-u|`m0~!A&Lm6hIWxQiOYjsFrBW?kD^!JDbg?>nm>NT9D4u`Im=sCH%hRxdsxElK)j|vL!wB za$oX1eUBx|uXYJe?f94Q#@_)-m-$WRZIkBaEI;$|O)`9MFuNmHx|Mym1JnV)u61{B z(JMefTXIRW(3~O=oB_8afM*WkIfIfy8KpqS&(dmi?mSQS78=>H3VA9RM*PQ8^P)IFfnX|%~uIphRwk(|d zqz^7IPp9|N3}glXfi2tB1)e`j2pT_5Ioo=1pKCokHy}-2O z&4Z$k`{2y7>;Z3>0OSC6u@s8rO=s#683B~+l>kN|5DmB&phk!S)FnAAU@#!X{(O6= zB_x1kh0<=?;}zr;Xj-nG27!7`4AlAXA$~r~g&;hL?GKDVs%fHO6{r=!dtpsBXt_b6 z0=hm>|Q0rf3_e4+s>Ge*-A znFlnmSgQN?@tKK{)2w6o4i41ft`=Y6`~#{pLi=K2?jlU->0mC7v&^z>FdTDWMu{bg z?+z+SEX;dq5N%%g5i{L_ps2M#0w%IK!bm@9oTFo3XGg4!jYT0==4?Xfl?;>fg36_i zsY3O9QxtZ!FaeFC`#eT)1a!|Ffq^3+5l$fV0kx(C=r|sCY~jngQx|ykW-4A+D$2@nc~>(~~a9a(-y& z7}MNA`A}U6^`VyhGeS7U&?}~)SRg3$!}xqy&l>6%KCOR)y!1mol#66oru_Lys2#v^ zY?!FJ3gAWE$K3#h|b7 zUS<->A1Z94{>S;%zf8D7n>0ti4%@E_R6g$l7Ix4Sg*oe+TCiZt;D+kX$eZRB^Jb$$ z7WT39vVb=OlBsslb{dDXZF@jG%hBrO{0C(`?go)(yWl~8+F=*J{tzXz|0`;7XmIF; z`7c0~$OC;L$=BDNSOB#GOd3zW_yuFCK!20|h)*xAa8iump4 z(hZ7nk4hNq`)wJ3HLUT>}!ljS%t&^XZ83g!VU#~Z6z$bR!Y7Gqj~9ZL+(%^tHe zluY%ly8Rxd=`-9vH5Ou+gY6>+5*B99gGd?{(`+=+Y{c|jPBHcJT5fLYb;{Y?q#4{D(7svl51+k?eUIXXFW3Au{qBG^)WfabAbW zljq4|94G4aDa?s_G5fpvkQas5L!uj#ti?!t9>U|z-GLtUOJ zRe`I6_7tCUBK}}5kC$i%*}!zWxm+P^iA9K-8D3;M%1Be9kin-~H5(fX$Caw8&D*M~ zTw_;*mYeG`>gvj~m}t1bGROj*0Y9ayL(|pK)iDF%pjnnUzNeCtnN;At-g0Liv?1pm~&|Iu0bMS6S!=N06L^q9s{>gpgrT+ruM`E9Yr`4W;) ztg1=y{6mmx5_7gL@zmpO`UkfW)8ow^$Dww{E98838O^ZgbK`EEOL?7w5)9|EzaVe$ zREbSPo_d*JC5N8jl%&6M{K!8dmODK0$JMJzS89 zF&H8{24ib&9fMgIvrs4@=&`V7sbBCJYHH9GM)%{M1&qfh3L~y4-=-t|_mvw)*I`>m zdj7}ujqj)<3mo?QndrV$n7>o(@Dbf5K`EMGics4AM|O@&LYHbPS=J2Rs@-n+eWT)v&JX)%RPoh!IExu7a(eLiuMq0Tx*hXHt5V6#0tv2dwow7vw32%6uR_3|A!-llO6pr@?cRA;X$9VB3sQUB;+- zFEBTqB>R}C#4!ZXD1$JU>or}JGzT6+ElTiOun$}rx9ef!%U(HhKh7aE;$Cg{vX1eYjMipem4&0Ro!3nr_0%L;_wcOt;hZ7N$GvdWsVX3x%t3 ze$J`ki?HsBFqQog(-Lusn1&8$3259HVgF-lWt(O(!aZ zpTp26aU@7IEUS);BvQ*`^ojmcRQ|kL3Ii;W6p81(9XUQ z{nQhXJb-qp&;WK6+QAQ(Z8{6u2`c18f<@SKD5Yp5+lUDX6VOvc=?Qe;Tje3?6QW7G zUSC!jMlVKqPPZ40)t>diz^&VhLn)JASitOb@lpD-w8mUx$ZMl&0^g z5nubg_X9rI)nMaDzv3^UHX`&Zs^;!<&3ixaFN=U8vLUz`u{$d}8xc{sL5cE4pLO8Y z)l!-3BML*@NnB0DDV6yT5x@J_Y92Om|M#t1H@UsU86OIQ-=B2#ot%?10Y$6R zjWOfb5>z0&=!rfoy}vH#zNzuCh;M0 z9;E!ft2+>Pf(H`@bxRV`zK@(=k|?0B+xM~mMhd?4%`N;c|7T{h^1+w!xm^#LiRZ4f zoWE}7C7-MKZsuHLs~<34M>CPKpp~GZa5}`AH@nFaM02aUb!A2@fFOXJ0wi-t)fE33 zcpyeS#(zI^4vav~LzPr})tYP~==U23GLuZ96miYioLP}SQdLO4>`)qCd*hvheODo9 z&#_C$bq*Dm3rl^WLa6rkyF%-VazGGjU~f0mzRLk?2im&Iebsj7t5?ld+g*5>mOm8M z9}46+DiGw6pUKPrzI|Er!tN0ky{ku?=aC+yvTNyT(OMS0r_GvUp4XOO6B-#vv@s?~ zeJVNRyq*{*<2v;+yQ7ph(zoJH}6da(=V~Vq!My#PCg(tLr#U}j2_Sf{9 ze5kzZFR6h2U7D``>5YzR$7a6+W&&eSpTUEj6AR6qd=7Q>XCZGOK07{pBk%KB_~UAE zMu6A=*iaDCzAs!4l4z*&i68tIJW(jXdwh0i1Sd1m{4;_12wQyHImAZq2y#d3XN1C3g?Z0 zU#$`JU*Nw%p&~fV@{B<3YQ|&;({*Z~Y{wX6x%v#DXA(sx3-;~QQYhxBQKKnq`@rIt zU6C~_^fe1zO2UUfmTP9^Pr#1d)GGK6d5IM=E#C_^0>^9LwuhhY_>f=tJAU=A3ar1k zT?4IE7pX_}RWX`&Q7oIo5u~*oD1c%Cl(cF|XUZtPi;tr%b~npz7Ov~2!}RS=+?x%} zsx6ocIrE4|<5wPAxJy*;@(qhG^7{^VhIlRWC|MCv0oCT>1me-mmGMWnJdNVJoY_J< z+WbPvy_`&&qq8w8=1$p^XxQZ&D0z8HTFf~o62uIYO-;>Rwb%2Pyn+ys6y6-@<~bT%9P8n_(R?Y25M8^HZsd{nR;F2hm1ci>v2ZS~=n? zdYUxu0_LCuiZ*{VM5xL&1j-=JH4$>ON4|Fw+qbM9zXL()|&9ER7V9#NdFSnk_jpD_h&*eqI zB8AI?@l*}twsuvfMn>ztJxUmpd7hJ=o}g$jH$8QV={4!)#oWP!B@n!@?DDa*weS{_ zQc49$k(-Xyrc5JJr|JE=G@Y*2uGRuw2(@yN{yXFTPLG26?bV0=t3sg=7%j*XX|PcF zXIHDX&Z7X9DA4Vo1_@ljVE%>7Arh5tBcjHJ>l#i&h3Y4mVeymC1tHUmZXI7yrG*7^ z5j!jvtDQMTv%_Lk^Tn8H-hAx|xVV{OjNA^QyIGp9maLf;uOJFCr$Q6?w% zIZ)Q+R#HGK#i-f`Lt_TAAfCmD!ZD(u#0%m{s!-{{N(T$Ao%r>&xZzkT9mT zB7{}HI5;#Ete8HH&;QnYCzl3G3?+@K^!UNK{C_OjK+z0R0rothbLV=7;8hhwH;q6&s>!U={h|n8$ z%ay_WLR?%&5HarAXt4AdBdC$&&o}E{vgHWmY0JM=R9{V6W*9Iac~rgdrbJeEH;po5 zfnQAc>?~q()f*Y+vJ;GSz|c9~|Hn2IPH2q6oc=us+;6KlKY97h!|Qbk8M9MPC;v#T zU=1B_y1u9M&hk*&w%O^G3%mSpv`qZji?r{$$wdk;&{>&cjjzImj?th7BmndKX4$hu zon?pr!CX{naU+;>m8i4m)K#{Z(d6QWjciXMrr`<;qnH22kSa#=Lg zqMx^=$JK&;Ke^TGI-D!b*?mA@=UOlgXgcxnj;iEVL51=`Zfm@t$pD|<>bICi!{sif zY2?k3Bcc=;%(pyQrOb z9YG}fCHJ2LMimH<+Rtp+b1!`Yz)~l;%6s(_O`ivpMmD+lHw4x9M^?0%N_1}lLdcUT zf-gbH6Q}aa35y^a1*aC#b3A&3Nt?;Vg?YLN?R{Y?3~_xPO0iae{uPYfFf;EX8_f#) zvTnm!V!#;0!Wz6233C$(i&lH5+t~_*i4I^4l?&?WfCY;fN?OYn)RuJasR81&u@7}4N;;Z>Wb^W%_4(Th*(@l@LXB@ z@GN(+HB#A}1;BW%h2&KX;4PqQ7TGmlAA>>#s8)dVkbTpO#_D-c47Kg0%h&}djdINg zOYa0C&bpBaa20zs6N?KCVX*RR&V6d%e>?vEzusZ*a(}vIpjmYAo2sjhWj>u-CG#)G z_^og>f3rJb_59PH`^Gluo+hCttqh?^lL;xAHe-zHCs|SR3FNovLxhh6Wa|~@@;%AZNljt0-?C&J_^>IJ@fPD15@imHV4VSgp(Y4IU7< zs<%*hBP(iehx}T~p1=~YnwxSuv?=J}G( z$8GP7fr#$Ia(>`1IOJOn=Cq z=5i!pdU(TS;V;&z#yDO9H{Bz;Ft(2z*~i6 zT&V{TQV1}~VB-V-#An&_Npcv{z9-=L2JLJ1ssApP!s%DvNQ&$~_`Kwx{8!N*R%NXX^#B%J#7AFF-#fIw3RTkX z7i{dw>2R>E*m5=FyB+tzckqj6`RJ~}N&^0Ov)?TwgMThDTIVa^QkSLuxUO?*rD58JniYe; zn@_nt+HVc1iZK_z!9T4HB{kPL8nZ&bVdn^nbC&tLd3$U^%gl}5nJ;%({{MOZ{Xc2# zx;mUU+cSKTEz_1Au{~*#V`_N4Ei4}MZ5`9L+uAa5lgKhMgmDQRe``yYe#@g^JK2qm^Bl_}lFgbPu z<#w=QtnGp;9P@pV_T6Q-mBb25S$g7Q`}(^3ri4~B0Mr(u#%nbkEbs~82*5jY+C>II z7xkTzrvOkowhH>4=GS#+|B66dt(w+vKJjDJ_uq2seq3jlx;V1V#qa!&QAmt9@~lIA z5SaVEJM+6gmyo`Y?~poJ)_CTRnp&!ZgUmZ+dUMnp=b2{KbVAfbU(@2iC6P37vh9ex z41zJHX`eYw{bAsBI;kwy$}y=bYfY7@-q99?Jg6*5;@RhHfEJdGmR22eKC+?zMfMYC z%^F+Ny>9QeR83bU%_jb*{O|vC)SYD51$5$a^osnU7|-$mo>yF6h)?`!=sGc8_#kS3 zHx{Fsf5cRVBtKfb_Bc=~6YKKN2sWoxuY~9*#mQ;@pk$Kh=bWBe5IfhANkpNJ(N zIty@_5ZFiB_a|pRU0c-l-N7H*SCECY+~5BojGL^?q^e03bwD*LeFFhsh-%R&9y{B! zJqTT!Zkzqn&%U&YmS45EG(Tb6Z5fV+lNn(toEWvZcpaGjGAck~;863>frhKSodc0% z2YN#SE5vIxZI1)X#@)+Pe99}pl}aIxw-<1{)%6G2m8&clUOGX^uVRJr&RC2qPOtM# zj|jc~7UEeEfJH~hdKa{KJ+5zxW>56IKaoT6n@uT_=dUG$w;ufL4u|$L45_A2^3ii>qOIURA0xA?{1+&@?~e4&iu&*!_^bhVv3 z(7?h+@nRrg3B|HGTND5pOqM}Jm73~O1N_PXq9t`UMHV-nX z(?Au1<8&slI3elYn5>A5Qu+4A_wEqBk<9_~B;P6oj@JADjF8?UK*mF!3Kt*;ZH!9M z*>G&tizYbEXA{?mzwHj##oAI29~@LwaBDhg%~4?4zx{Bb{Sp4TXjWe%*{7d2_$2(> z^VPwDhpVG?_gLx9SpDYvseS*a&in7TP0pTL4xxc-F(_dS)*Ujuo8HW>HH21ZT?=f>VBImCo>8l#+mOEtvmh_DUVK~HAX z`n%^gO-DU(IlCt2wuTwCg_{Y*K5${)o^$ZbUMT5#+5B#YOF;mX^srb?khv>`ImOZ$y7qj+Cx zkfNkbCG{?61MX#&-8!>(ZFTrdwrxj(?9 zQP_ps(}qTMnT^$ZIp$irzV%{3)S0kQgst~v5mpKk$6{-u1c^~xjGSnb$;UEvuGAZ` z-Hq5D%aL<;Bc@Ki3LzK6B3CpbnX30fvig{Bt-o~&4`Ug~pw>NJbODx;tcez+o(yt^ zS^0fX8K17}DNKZr6E1PyXp}Z&1r=jeFM09Wh;+^w@xb@qjZjXORRYnZtQln_WELTD z2r?cS^)iyZH-PF`fxogUcC{~5!~*7!>$*7>3g%-2Hga9L&atcaaosedJLkb-2kE~r zT`%jJqcV1XI{pd~=?tKVAl{51u7G!iaIUo9D+H+!O%9Em)eXu5$@A4S5Dv>XX@HeI zs5;9gz(8v@p*J(~`UIG1eIE4Sn9FnjC&z8zrwBhrSoib$Bd>!4mNqnCu^fJ;G=tL0 zed)C_18N!bbOP+crQ1k;d_vEUHm4zuPzg_*2+;HvFwDBH>3PryK_X~y0I45ix{W6G zgLxLx`X3$$m!$wuha`6?{90y|$;!>2`tSdK9)wb5x1iU$%WsN;%FF*c8)U3Fclzed z0t0;C!ML^#Vo>bngww9v(~w>PEkP5J^p+c*y^e)@SC}almmHCSWC4nl_@_IF#1Cf* zM3(@@b^EQT`yskK4PUAMv6Z5IB7ce~J_aO(V4rbd8K4jKBM)v{rPVR|vWVQJa)dfb z;4X#r5CrauVbOUV2+Ql=A$n0*mwFhaK@>hYGl{$yB08=iEQFU9e;O7+;1($>hBzMt zc3uF$6#@k-k@mgo^v=k-A!Wt2kNp>+OgbckMcwVH+pt8^fG`!7v#`7+FNL!Zdl+vG zQjOEp9hcih`k{e9mAw9;@qsbf1t4f9f{6}jXX%Z?%B-XS)UP<8SN{!dgnT(!2`lxy zr6mVuWAb8pU{QAh5k*2gfYQJLFFJ_UFBm0A6Wcn1g84P01VUU}JHLt;#8*@e5ct`n zg%qA2+dCHbxnoSM?IP_NGYO`reqiZm#Z3p`uwjS8jMh7uBVqN2=cvH5gpq(mobMH9nk7yV@oorJx^5W;H*K z@^s0`f#qall&8Zi9P3D65qMv_oP?YStz3Btu+E6Y7vdmm*9cuksxU0x21v62)(#2j zG(vh@LOP5>EC8 zA~_{Dx8C4FQy&iSU)I!y^qZ+!bC_`1Ob zG2%XmRY3$>d%NO7o;UcQZ62%Rg%xH^^Q*kg0=QuLFJPy1<{pIjG7rnLp*v{%UmyyGIH(9Az!N}yNDy>sqLfSc8rv~20K8s8U)*4B za1d9@&l?>G{+g2@CGh-Mrez$?kxKe}j=-|zi~O7UE&or(=>Iy3FP>j;U4AJ)8SSg8 z=#Vy(?F6ektjja!_}%y9w{}DnxBg|HAF&n?Gu}m|AdF`lGIP-J+Vlkg#%k zQ)dnNEu%K6ASk{7FyMiZexm^38>Ws3-a9h=0p=UW5mmS9u(Y|7r7{fBztY{Uvve^h zGOZ=mKJAW-R3d}O5-@tjEh&(ugN*4S?T`w#OlN?02xIio!TL!4l-IgtoERyuc|F0Q zdOi+X9pp`*aYEYn8pm7D0pc46Keq4s_pYXK`rksI2RC*YLmY7+HHCa&L2!Jx=o?wl z2#v)CCNP6%GWaD74W|6&_A?rYOf7?{@(~!t?J*FH3|W}}o5(XzbYC}-rpkaZWfpOC z`uMYkIPAORT~@$k+|Z0L?O}g6PINyi?G7?qKT1j`vNn)X7vK7mwV}CYj&^O31*HkI z5W@r1Z(xAPGXqMygMV7=ZUyUrl<^fuAX29KRkKF8xFC4vrI-<|FZIc{q<MsmN=0=L|MmplEdW6^BbYqfQA2 z-_|(}q6r6ts5-wo$d{y1NbFOCiqO&==t-3wQdrBa{4RDVf#UJ?*eH!cX61`Q2Sq+y z2umnEPn903y_xg9S<#!yO8QjfkB8_pbic8PJ@@YQZ&0hnjxrp*X*MROzFJjNxhlCm->K);nlT6=ta_uN~&Z6Hf3}d)^ExnkcM=mpT%Xhs}nE z{24$ay(2(#{s$DAMw*4KV9#%DzS-OuRtJAjOJ+HKVJ zH0ZcRm4y})Ez3ZHa73J0SeR7;$7U_YfZcn#{wQ<{%UYE<2(ScL7+#dOF}5a;#Mk3jNCY{e^|)F{K;rql$rCH&BcyT1R5D zkk5`t$vLO4BcQ4)9q>??O2a3BcXX=K+=7F&?~MCzI6GC!50sDn_f%7OnEQ`5;o;0M z4s0WsDv+DN89e9$^Mho)1SGaJ{7eTTSns&olL+c{sJwD@<~s8b47!Cm6DqO*mKPpA z85ZusskD%CPBp@(hrAQZ-n{66Am}b40YL&c@F;jsfS^G56QZUtbGU=NztF(^a4DQY zb(Wv}VQFD4Kqd+cVP(Ki`MXI!UUZ{Qz?crva}j**8f1 z1ITYJ01$`pDN+TH$;*&Id15;x(X}tS>GOonu1(4NZ5Rl%7kNDd^{dK2+IP@JY_LGE z{Xg`RGM($7!oUEznB&6&mIw(cLBaumkc3Q#X|~HofoQyF`quo&*-ghs4kB`u(qBrB z&u%K`X7UkD%7@caKv0=IhZ9V(kdw>^p%EewdSDqNr^oPstdo0@+chO8XAukelH(&1 zz!SoqZa9yKf&~F>%c@~I9u04VupPW)+U3vX09}S@`GJqks~MYvY>rW2N*OjF;_9?v zk<{-K4P;~@9{fxDvK1jiBJY+*ozK(Srha|C-R5}DZ=bt(D~3+T+8meKQJFS0LrDJ6 z2qS_pRfzJ4w}Bo+(O(?gwEVb4%{!zpM!mG<)y7-{$in6w7s}JB=Puij))xEv(O)I3 zo$l?3kyk`l36nX?U4Q%f_ScWPj;{PAf7mi6M_d)&&>`JhX&D3f15hm5fVk1m5u6mk zLAAbGQ=)Yf1LrdE91AGldgCDnoFn~jm~)PTxY5iTK_;vPZ^hcMM%V;Q16YXh?6&12 zTm=#i$tc6CPP9aWB667G-cIZS^GWV95J4gyUr77Db^F5r>dVZzANw!3K62MFoYH7#P0#eR&YtP1G7rxodzJ+f?s9{=qkZ8HmoX))A4FsCH1KL``ee` z73zT#cmr4u;5sQj11eE(u#pVu0#&`k`mQb-(F{XeU|=2RUsGJXJ1(3?C^ca`+q^)S zn#M?Pwf3QVp=xx&T4DDA9e){tK?SeXN&@yRW;dieq~Ixz;7^YR)#o25#Sm?UaTq(f zbG}qzY3_@dvPNruH`TTA{2%iRo(iWK0$Yb96m2J6BHm00P|?3CgEdfTq%f`mS9%nF zA+e~2q#zoRiOZ^5emqx)p&p|O{}*L%0@u{FwtZ(HA&{V;5W}EGKm-yr0YpSv28Al1 zGHBdTt4SCH+SY0qML@-=)gEgd8fralRcL#RMeSHD{Jkw?)$p`ms5*uiRtl)VuQSOnKcj5Tds6hmP09j66c-6 zwWVc)xo#F?`=>|y>#5ekW?yhS>~_!VTpgb--UPl*SAdO#J#WneMTH$Q3KouM0B1?O73 zavANEJ$3#1WgoG%SlJGRT&jrAl^X-Xa zOMZFxmMd-(6F#gL(QI=#7E68Wk-WI&wrus~6>s{ld1h>tV<&g=mBdRjOGWGw+O?CN z>niKcIDBbP#q?bF?-FL}eEbf-XiTCQ4&^1ZaD7N`TUX8o%3hxnuA3s~$it*6TAe&x zKYm1C6Rz`8L1|{ea8O0lP+kFfLt4BIWDwU1Z`iQ=k9LaPqB)8&c_?ezJ;&H`m>2ex&F2g0%d==eS3C0nwLj!jJJNjM=A!8pi&HpG^ zj!j46opFu!s=TY9?ofcoYRUuy=AE7Pl>_#cd9#4jOROR4#tWvQro&J^qT-C`Hx9Er zm0R||{a5>9!R~~=V&G@L(b|`$XWl%rNIL&f_QHiRk0P%~p^>~CB>|8Y7hC+`gg;51 zyK1Gy*mUfcsaaZ6i*UpU5ZcT&Wo>xP+L&Lt^Rk$lVkeM;6 zto+)myPHJb8Y&0A$4Q|M8pDqN>bVgFWl5@ODsK~lrF>0q_c@fO{pTN~oP9I#&Dh4% z6Io71$wCCjllgpv$A80eg0!!g@w}ktMCFWvt6KWj%N7$xI#P>lq9c~*yz99SPsktz z43&hAHGwZF9q<{_>DmWcdfVGkXYIwOx3Jreu)EyGpn}~+F|cLOQq^sV`Z|Lm1He(P zbr)nbT}qQ?|5W93`3k=j077p|Ka7%YLwUoUw=8apmd30Mh*^OOh=7eVQ`%;1cGKn+RoXn~m-cKC}&V#OK=xs&Ik{oJnVe z4riD5uCN_c!*lEvCscpu>i@R;g>%|*Ko9AEcNV4&>d8*GGgW*0@GHD<{Dizhu!)0u z^IK*B?AC)_cr&tdxwj=a2wr_wA{X3dPf+{p!vBO|1?TUfeTEl~+b*D~M|d0-M-?s% zl?eHEj#D{UicD;CrK)mt`o=zdz0_Ub~+bIgEe1_KFwB2>}<+ubS35L!)+>BeSLWS28BH7eLIDmS~a)! zpu9^7Q!?u=>~;D|ow8l8>r!8D$|xcFN7+3b4>eAbjghRMV>0zm8e{GS=)W%m zPyXq#{-fpd-}GNdymVr8aYac)!R8>%K^}F%uLd9RD0fCb1zG*guEXt3IANL3FrSO@ z6i33&2P5*8E>ROakk32o|W z$-tW4j+N%$!>kA+kHqwlerpRfE*|&lPYheB7E7wEb`G`d7PB`LYz%QaQ?Q|6{i=Ou z4pb#32;SRiEDC&2qt4oXyF-(kUAdP2IpwUS{bQtExU-MxnAj_?@s`)d%B=bHxh(Q` zrO0F3rTjo$jH8s{!ilLmw5saRcYNLuxM@e`^j~S{zv|b=&Z@dKyQ(`|^VuiV+7kAb zg9{V*yr+JFSBAP`(y|WWuWXf3@#Qe#Id?zQ1Ll08&k_Z+AU%U3Hm8rhL4HN-IJl~+ zD=kZY)h`h6wXSXDRoz*NFGOq4=~mG{i+sSp?=;nxJ^oa5bn2}bmqx&i5CyZ7j)iu5T%>z#fE=s9b}Hvm=g$I|L9oZ46$tG(>TuysjyhPtVKc&VawPNJU9H#*xf}!T z_;%p|p6ztfe@~ncb39@zSn9k^)O=<0t;gwel)T?o8(;nS=eubrX%JpbJ6*w2T2-s7 zisjej3DTRdF~BYTJ|LS0Y4#d&>^nCc>!V8xAIz3}NNF`v3*?wQvPFZ!!fB|6c#Ff1 zE%Z1oGAOMm^{Qt88gd!0xrU)3rD1r2%}6w+nyX2vkkZg~(F}UwRIBBIN_uFjMG$V4 z^l(xT8f(u|j@4A(qda+`Myuaec7aEjOcPl zNex>l3MFc4P~L5dC8S4-$0(-Y+P_V7)sv_&vgc5{rLm?aD~WEZabkdXsWD~MWJ9;h zqMgb)$uw7-BRj^*n>-u}5iZF|?X$L0Sm<(1gE=`e6>g|7rhg{yJE5W`Rie-p>Bv97 zC29|~hpIw<`8xAhl$}3sR=bC)0N=X%9bx0c#uKEr=pc(WdCFL3(r z^Q+h4$Ki>V30%fiF)M#{kz6dj73=##EFQCc<&FYAE5GP&;>Z=Xrbsy2Y< zAK;}auggJ!3XssUAT6VVYnwb{`&@2#hcR}eu5}>2^-k?g3`v;+2w(XJc}W=84&oD} zsQyb*Uc-=vfmcArT{zbqQufR~NW`mJZ?-rHWAghL;R@c*bsTuCyrz)9JLtoA97r_e zHc%MlL_x-qh(i?KoJ8>}b<~>Y8S=-g;xZC%HnIJV>o69cc`hd(aWKN1fhkyoP%uKe z)(G^b=|zl?1d%2)%29V{-51>W+2xxkx#6|8 zeuo=f-+K!Obl_w@PR-9(Uma>&^?KL-)>a~dPm(=ei=^fg8Te+Yd%GzlJM&ZX&B;t$ zVAh$mTwW_s*{u4cB+AG4`uD!=_)cX36|B{0k#uuVS5BGzx}rs`M)>0O&3K1B$i`Xq z+uQ0Oj3$z3bCEFC&lW?vm6F&53shW)EPjJF;Cg-6oA%9_no}jRAGX?6HOzkYr*!++ z(~@;l5(F3(=j6N;jXm6Ucb}`}HMw)Yb8lYTIUs@WDs1g0$=9+6+4^;e6=}x;k^fXY zdF9~8Huo}T|B)&EBh!yZ{mNVG4BCG2gT>(FOV1Kme@7Wc+~V$sO#oLql#No(rRxZm zzxZl%)7jhUvAU17QxX(cTb#Qey?uLQ)0cSnx!0h5g79A}avo`Xjr{LQ&XSWu4z>Lu zn`I=F;dOvwc9daz*~L55z2vR;mc?qyP?l|^?xti$Ms}uTMijWVIVRdAI+m9k%k7Ni zr2qaA=*^w9G~? zXnI8;$i9a%zF7gX?;`Ourw=!LhaENmOYB@S`JLbYklm$Tn#8k=inAxr>AoAK*5WET z3$NL9e@|Xp8}P?d@hr_}S%gU}zkN#Bc)Q7NFcXlQkBf%>X zX3)qqivmodfZ>TQBD$ECMU6F|3vM(yu)aYJ6-`Xz)0VzzU*I=5uFZ0(D*O1f zoL`69%PY4aKecs>Q+b0U?gXQs-|4N{Vyp}=&(>7FgD}rXXZ7X}lWgD5oD z90c;N@u^9a9em}{1yk>pM{{)QmM?psRr`HuMQU$9GLFi&fWF?{BQ?)xqxJ32Bwqoh ze%o>D1rsx@=7GHc_PuD|-)6lWi~5B#?SJ*(c2A*_=bFnb@6iv;mlX3HU9rXA3AA9- zvss(IWM|9Q{%dOl$iVb34?dgq(QDT4mu=Ozoxgm++Y)Lz_2QiuKo!%!{7%32>3!#~ zzGIGld4KJXAJ}bC5SB4}j{dvv2X_7jL~!oJfmxd_u;+e%p=0)s^i*pQ=ZUP@@X zyDfOeys)-HiISTaEAIru-VT)T880OMJR2r6R6ig{53sX&CSOaQt}k4}W{(J**=&{S zoNEHpi>>ca)X4u`Up8!?GHuvuWZFYrb^o;c)y2Jwh9?cgshc9Bncr6aQUAVR@q>`c z#XlSO&7+4HuYdHdX3uj)-%)TEWtA67J<`RDMoho1~}eCzz}c;g;p z$i!(s#^H`{pK|YJ^@#3|3xh}8Um4WdqG?aX<6#04Vw6`S7&tavwzy0EiEs9m!_C=Z z&r37D4TQag;Vz8`<*77^eb}q*n%6?u^&cwiwBCc z^AgDGZT6#ff9S}YFPYlCMg&I)CMsu|G8kMzglBAJy#qDbF^nvfRp!+=W>=Qeh5x zc6kDFE1y$LEaWD3<@_r76-RuEBXZEehsO-gFyB3RB^`lv-me%P z$uS;0z+hJ;- zen0sO%MsFB(MPvVetwU)B`>??;$|?vnOnZhL06si(HFc?i}%RO<{qy4^1|84zXSfQ zLYM#iJM&&wM(qgGylsN{2&2r2dW;dyUty3~uk5u;)PKE3ENEdSit#|0MsvkOd02^L z=I;Knv=&vP_7iq<;sb~35jS0$?yYstcBtpD)5m@|;&$TA`22cmrDuTE9Tw8ZvIM@% z`9q~084!fdyJVLV_EpjNK!!GL{mU^rPQuDB(Hj74(8PryZ zmi_9`nV7!UikFpzhl*k~$MG^b4y1f}C&LCqg<0~PlyR0-G!zO8wVbK>R&v!~jc{LH zP6~^Dp@CY4AYow#Tttdk-~GAdD|a4I1XbVj3l8|h|6Z`&5IIi^GMDQ&WR=SPr5ZOYE56UvIUOqw847jEJlTfva>ZDIpD`6Q; zl-Z9RMp1kCQ_Gw7T?&R!o})w%mgguJj$E@4mI^ydFIojg;g+}lN_9K0vs73*znn(y zgn6Zxa@Lp7*_4YPy~au6Oh3Ys#EThK@>~;8EWRAII1n(K$S@#nHcw3{tdh7Z9CnEoTPIQ$WOb<>`p&itxH`YhjNs=epQJ7pCR- zw(jg#3$df4MVL{p#y11x9JyL-yMhuH_xK5WW{8BBJJ$DJckR?K%gIWL%y7iPfq|Jv z3Hx(}*xt-@6*KuI#5YCWickW~NJ#ts9yrF5Tz}uRuUaqvyB(i?&}?f0D-(-|bFShC z<1Ac5IlCC{G+(-fYfavKS{F@{o68Add#+c)N@J^(#i=0|OO6ezgDoUi>~TC|wQ)b) zU*I%m9xu#dYiOsfOBvg;qnv3g$5xjz;uoN*83@Ib2sSUQ-4kzjRlks0a^TgGF7dZ7 zb#!Yh3}M>_&Qs;h7q;_w>QL1#d*NyanjR3=buIJq_Yt&+){ z+zXuCQ9DZ}{mGm5{ZD0vm{rgB_6VBL&Ck!zbGawc>4KbR<~igS1dSXUYKc%}?0uq2 z+(vP2VrTS=xzWr9KQJr!?x^hiFn>nx6PB_gXrlEuhWzKJuVC@xc=O4c=O_Zi z!>p1;2xcM5qsqVvXR`<-ek_bKaHG76;0?l4SOFbcTcd2tXgk3|G^-@)n^E%$h!hAS z&U1Abn@B*6YUxih2Z`QCbgA*ls+D%A6ch(d4EqENse{V~&m(F?t$C0EguYKuTK}CR z{_D(&T_Q1ryf%$=N3<{WlZbEphky4-d!7j9mzA;kIduvyJ-9tzR7%0J(n(&IHwcDK zg0;QQwjHaHd1E=&49+BZkIAUE_* zb^VmYVqz?}ud3K4tM#8*mFlYUs;crlMRDkz;#EK4Hdk4m=sak{BzAW7HdzI^pHMcA zF*669y!P7$D0J73%#Bbq>(jrlJ?)=7p^x5TQ-Ek&TTDk7Hg+gZqs(=DKZhko^+LEF z>4^k}DrON&%tRCsH!i7?^}%iWhi9u2cj?9=!l)Z-60=LlQHnc0kvWUm=X41}x@%Ns zV*rJyU7C}Z!x5gp6Kcs%3MiZX&jMtmy1X~S4~dKbC+WQ5eaW^71mdW#QxfqdW!qUv zpr72==y+&CXP^>1 zMzx^HGf>`$&tr;-E!>q7+<-6`ukzCi*gDY$2%1DTeJM;+v;oPjxQ>q&1K+(l==w+V%-Et z^@rW)QF^E*9aK^Jf(!N zvej9L@Rk^k$IRs5%Xp(*aVL^b`?huw?iA_~yz)4pv09Ukgr23~xh8Y$0c~Z8I8@Ar z8{kUW%|2$WaC7*{{{hc^Q%vOzkGJRxgEpbB*|+BhK8W#**Bw$YoaFfmu2l)$x;(k_An`*<`V@jWJABnT}FH^o5ZOPVoe*=*bvqUQB=EmOVcg;5GBfl77d zVS)NUyQWadH7Jep2qzATS{^L2&(tNsNwCbdCcOIbsp8S%YM34#^A0%ncJYZ>sQ6u9 zJZe0e`$PH8ov1?z-1hy1F8AtLZNuBD$8L+Cd;P89ZIj)1uFCvr=uQ9qOR@34-Y-;t zHaua%!ab5yABi0(wrS&qY2)-wm6cmP z8Z&3r~%#`7=2 z>qpIe{yYBrxX{-n-v6t88)bGliPTuv=n#6GKwtB5;i)EX1h66c>PZu85V?jZJrv}(F62^DQu8+u}o zF6G4PVq-6W_23n$K5gN>?)+gd_wt9il~2_>EK17{Hyl;Y=Q>df-KxG_;HUlI#oHOJ zFV1^SHuSApxBDGN>HN0jae>{A(&c%A>BV19S64gE^PPFz;IqCXSggOcBk9n_UG8W2 zsv2nuFU3}s?)9V3|Gj<3(AMP$XjAJM+M%{&E!A4fs`#;2f98WmW6QD2my!lPyq4iJ z=i6;u3c<-%{6S9-PECV=g$-N&y3i(sRep%*q6)g^u(D8w=RHuZU zP&h8r5})qTq|hmw@4%xQdH1e5A8DO$y&ibS9 z_D6h)Pl&NZKrey3aYdR{Xhy;WzL-mA<){tyK0(O1@133$Ph6o@+!yvbN4ZObR?Ma{ zqg8_XHeCg~JGGBaRsS)q;`6-~Z1^PJP-#j3H3XoTo3`e|>wmScF=LlVpAUibi0(Z` zSzgT)WwSoKtByIt*MQ+_d7sX=pBuwoo?0o4OO*RQBrhIgNSNYxvRzBDFVW zSoiVE8Kq6@eV^e7U8=OLFXj&EYmRQ>Z*$+egqq*EEDkDy@FvCFp5ro~MISG+TiJdX z1y|wR%BJ*2+gML8hvKWOh{K;(^UEJzx*p~p<9oz?{*u}K?uV!LiFlsJFHK?v`zqR| zb?tGKJgwT;=H9lTYwpoX_n^O(cvu|bAE(@Rd%tPl|D0d_ua8%6|IlaYrCgWomkwl~ z&-}@B^ec}&m*g+z+^rJtTi~88{Jq}r(Oz~)bK;yor)6Hpg=y6I=irT%mqf-P*(HzN z5R@Ds&^6KVZ!>FF)?a;z3WKO`n2DoAGb)eUcwH4S!l_`38C-$xi%iwzoSpi(SL&K7>ok>`N{32%W%2diu3es-M8-(#{qP>X#f-;% zG-Q)1nojL!bfxxxE}ydAxBc%^Q=zp;?pc0Y%M>$x#1o$RUJX4=+V_^fwM_tR)xX=f z4PUwt6h_ecpw@3hl-YOahDzt4^YL`4)yi4&r0_N+a9ftsQ!D9v>0G*i50OPcXx}T) zb+APFgh+ftXNO-G#%ZTR^~t*BE&Km><7SJuY3jwfQSY6c z_Wgt_zg7Ra&T_`%(dePA0XDfVg}(Dg|GsYb0xzZAoQm%kJFb3u%ZA08F!1q2%WDH| zhpwR9vEyc_y)@v)--dlcFiU)i-!&ToYJJ0h?O8=+>eO{f5ArXG}KMoTJ}ELIEF>F<{xF8k~OpV)u7&(21ElhK(wlXOh_f) zN_(c|Kl3)MK@s6}zzXy8QBMdHdar=!`RsEi*OQ_P)Czhv5Y`Jjv1HV*ME)cn&Bv5J4BnUnm+-M>}J&p)b}JQuLBi_Z_mH=b==bA zKaQ<#d!|hG`u%Oy%x~}95lz`0@cz9!=NFBBl$v?6^8DqhOSzhYu^T^b$v$QH_Rjuk zr+%}V{`IBDcJGbearXW7kKQ}ZA`(HEX+OL){)|mTux`Pwu)7O(E!OO&gpJkW@dkAW zWQk^!VX{anDD9-Aj-~U`rr1%XKkP`b6O{EP2l%=ryZL(ix&9z>6Db1%d|zh+t~Kp< zoLpsRS1zq`bVPE%&e6_JUf5XJSgLymM@WF8U=aK@R_!17t9^U=EQp<7WM)caWF+P0 z>+9y~7T}9dLEfPWAC1p(n(3)BGNV?VGhKc4p$tyf}WXA)Z&DWI2iwv?45 zN3CCwmsc6O*htmNm-R8~Jw27j2fg9FRWE9l{Yt&#zeTBUP~7eEh8k`40O!~}1Gfb<$ojdF~yONH&#Q$b@xVuF_o zd9`8(kvsm&lk(nBTt!`UsU^b8s_;YDO8W1wv%+osF~55JSNk?3E*5#qVtXCL4uRMD z8;H^prdhmW$s(6ivn#x*QcCorOKoEOS%bPh7RFZ;1eZ{DZY)?*yL=N)=rq3M8qH8M z1f&-staPXl!5hnQemBISJMdHk?q#F0ExaurKREHm22JTQfexpr5gAgWSy50{I$VT{ z7gU-$P&tY4G@&PEi0C7*u6mcusQ0IcsX@e7p5F)=BZ_W#;;lA_P{3$oq&0~66Gg@8%?mQ5(gOJ}B6>RV0 zur{;Or>0LWW`-f3?(f8SO?5}Ok0i*soDdS;!z%!|n}P5ERs|u}2f`s@eg5{$uv&38 zVT}!t1UmwUmIQ)mIRf&b3Q78@dDxYp-%0zLW~bSSX=}cG(|-Xx6rseKS%b(S<03Nk zT3Z`NjxN`b?vaDa)u|*iKlH`8Rt5pii-ib6BoWlDRT98xni{RAHIVUa?CC!h!As)# zNDhEFWjc-m4wt8gI)@4kvc|7O_6I-O2-|4HJcCbK;`hdfm}8mboVD62MTRzLe*@t> za-&J2SVqAm792>&3^V(YP<7$7X2v)YpkqFQvCUU~uXp(P4Euo8XkGyS8!#%SeSB7~ zzB3`?FKeJ6&d!kk`H!l8+ zb>|AoPHVA3rj**2>RiE|)g6|)?E2w^o7A&x-VZa!@4C4*USHa|c5a;Kxqc;Jwf&LF z!{F4ywUAT7hB+bO*xCsSyJc=BGTLL8!rsmZm1AOI<2+(p$hW}j4wc27skwHA{gH0? zw8$gab6%R8c^}H|5NY3MvsQC4KJRqse`1r!e!ERluJ zEL90NMuj*JCXp%Dcr(_D{9V?d8j)gdY13(ZhLe-DE(C)ZP|*8c8k3%%K5c{tIFQod zN%x$m0-_nu_cM$ogO^IWFI|d{``2M(q7JVqhmOPLTL#nb8CA{1_g%{}i6)%uGQ@U0 zO1YdX*JV03&)~y)yvqdt_M!IYimkeHYyN3iV_4G#H}Vytl{0^Tv=R_%@eHS-=SjxX zGyY2-TVMr#&X@mQA0-)ATuL(jeELxLC7j%hec0K&yHW8GL~GxSoXo!b@mAeehBbp< zHLv*x`}}#zd{E74httL+qhjmH?2&KwmT>VKnoK`7pIh_iBnqcPe$g2Yr?XC<=1(a8 zrudKGKLVnir|Hhi&QmM!DtQh2YbW6FecVymIWV(-f3#wSX{N#C+c$jP$Cp@9XoJAL z)(|%eEk8^91?k>+TBTTrzMiG!S3z@xp3Y^VFB325|F`+GW$)8IR-L?ngPO+^0SZMJ zBmd}cW-^K~W56rGKE2T^vLe&lzPNWIkRuH-G@$p=8bRgDm*+zdjpiP<@K}A4(lHtw zyeRIn)xGPFy?q3=VBS!c8|3HZq`;dLF3a!%bvcM<@w2rUgf5Jx*43p}BML2OrJ%Ke zdguM1O5J1GW)ZZEV|Zf7dQST*d!(no>At{sy0`sS#_!a!0eHFkyj^1bwU)09n!KXm zvxeW22ntbi5)t4eSu42S&VA^YP0$(nj2y479#y>F^$}q=HC|l+6SsGCqJ(%<8olUK7m*=4e4H^<-_&i<_)Z2 zYmjD!z?P5tAM>mK+hRz{=CgN>gV|M|MK_0_RqGb+zFrmRrQQS5E%6huzwgbr^_ETF zKNWa#!UEG)w&p0?Z=k!QdiOjv26aUu;B^uELqWkJQY1em!)_%h(EZz@aMkL7BP)@H zb4C{@Y+dNpn}N?M%StLIs_qfkkfVT$_>Y@6J2XJkTXUE17b^|OPaJLoHT?2^zqB1yz$RSYXYz|2(XBJ#QwVv% z49XxGgJ!T4(nhWXH$Dn}3by-Izbm4ry`-(s#D-O**xT=l*aZ@lL!@9SaZubi!cGA0T@0+P3Y~4NLh;5$WZfFi=z5`d*fPV z5u2AHHdjIZFJY(@_wK7jaftx>FIxToxqbilcBi=uJg2mFY4$+=l_;gWC@(cbqecV? z3;tNo)&iH!vMo>6%h1OIUe~^=K4DE~zh^@AJ<5ACg30ntS5WCih3glKtezbFbUXcz zsKElrS}MYA{Nw+U@!|tVR^(K%pq^sy1EZh1z3gUNou!|Opc@$(db17ZoC8N zQH)}|VBVm;SN)=l8xDDR8t!mRWKAtnFe>4r72}z}v27BZoGqWCn5~rNk^ZZfuC^rY z`^jJJyC_2~K<)@J?gf1e6L1hP8IyVS&p>3^wn+*M;xuA9m0%yP-!7?Bdh|G5KQrKi z$fcnkT$?+{K9G>yE)WUk-DZff6!o~2M4}fTW+Hf%#+}ClLyNnJeMhiAdn@0HDq<54`klXg}cET-MNDW&%p+Wtr!<2Rzja-p(`( zZMzw9aKcpg#NPQWt>vq%U0@83CKfL1oKiU_D*5wCNe}G|ceAVP?N4)|2{zM`bq}Z|?X@c<$CosUfZ1#)X?t6q6DA)&Xl3D$ z&-1oN!CKFQaQ(90_5#^#6n@1w*QpQ55?;}sNODrghfXPXJ)CZdmB=!z0sdO$G-9pe zmDpW;?ZD) zoqr0SrNNSj9E22_IaG+>@hvZ5@lf6s-&~D#YJO_Ac?Qy!K-%}$S>9GIv74yszuFgT ztqR|!{-f(qwTALC6og@Q)R^oY`%RIQ+uP|-Xp*|}=BxF)n?eT9E+`sDja$>1RWwey zjBlZ}&=isvU)%RuQPz5jo}90czIxPd( zJuZZDdWNntR8G(Fu;tTalO?&mdWEHt3U2fO>0dW?n8T%muE$bv1*d*0`KefNEBIK& zWAWzBgs$9r@9Kos&V=3%DjRlG7VW6)-2#-Jx&NLxA+;-Ps`C>RHg;2FLYktK=Fc}M z=8oCU`LIK|g<^sRAxgtShZB7tHlhdj?@^jdm=@di9%9$jC%fe-+5V5OR zDNYuhLe1Pm9^0*?)f7IN#Q^qRE=WL#<@WJI~B-HwNyo z;?p)!hyH5cQcPiqT|@+?%*68nqjE1fBsmLFB)&M^zWGtTE*tAot$T?jLzJcht5s5U zi0>3^9h&-U<2#jS4JcoX>eX6ntxZ~yVY4{))S+eJUziSx48_@6bckZD=d$pwkSiMT zlAl(J6Z)q};7U9eP0rAB=|lQx+R#O7y}~Mor4U(B+jNs*b#4Amv<=c6a{FnL_odmd z78m%7H(!id$&*u0KDzmfbiBi&uNJeVi@uUp{IsaJ|JeI)`mZ@}BzBD1qLpk7ZtFPW zo7?|EkeJ#3!B~-{;IT_GXe-;(=2$x9_A#6FffwiErtSWa9@*v~>=5W8zn?|Ha;x;M ziViq(b>*Oh*Y8LRoFs=^f>5d}xj#H-jO6}Mhu6V_f~;!?w`;aLY&XG9l-#3U7a)CZhc}fa*K^@UDVRlJA>mG$SkUHWPP}}i_ z9i@SfJFfIz>Fwx${P@Zp-p{BY9?@-@-5(bdmkl6GAwja# zsbakXJ|;Bakybr`hB4!EP$YT&!K{G{g71P>}2@e8vl=vHTKBDBi0GCOMZ~qCk zluN8}amc_C#Yn-I~>ewq27oB6QkbjZggm6M^? zg5Qq~lo?Ez2V+sqF^P)jtpsaur3fEPTqf)7y_RpXCT}IlV!WA;D>2B0X2k`r(jx9C z#gUEg!wF^OS<7s7_Kj0W`~EiT3G^GAg2w#Sf2nX?z3r4dT)G9Z1@<$HNys#}=RcdW z@p}DLgo?RQstg^8@RoxE+!q6^2ADqkS#_Y$-Nu|Eda>bX=uyPGf$b+uj;|PKl>{W^ z0ql{tq6~WyOPQ1n?EpzL60C=dPS4O! zmIz}w<8-W5JQ_nc*&yH6XeFns7&6N~S~T$34l0R|OXG3I=_iXA6hjU-nk*IW_%u_M zerk>f4M&4L&3Y>egB^u$1@W&yF$H-2(#8bCt|$F)yDORx9{Oka+WYbnbmS?>~-^c>_kGI&_*~W za!lmmbe2@q$B1Su+^TM3yDuoRRa!t8f~$1V%=U#__l!HI6Y8e*<8nP!#XcKGF6Av8yUcq>?5(U+m zqt?NO1}G&g1@JfgL|mP7!mbl=^s-x`t2tb;?q>SQ`KO<(FIFujQ_%xP#r)&4<}+SE zHzBcF5_dY}__P!LseRgAoWy`wA?vR=+R$2)gh=X<@s2?U-V#|*99 z5*bA#p8nmwl3jbAT1j^0T203zD~`mLNuP*hgDpeii+1&QnhtQ1X->;c8TKUJ=UUZ9`0t#LdJKkmERrm92w zEUMfQvoALpQmWh?EsTj%)loer-vz#$A-o)Ip|$e065AP4jPjks!s?;s7)03}Ewmhw zyDn_^VlDQ1JH>Wc*6zi|qUFkYT8pfr@dl-Jr;#G<`+MLxF8o7--n8#dscUfKg2Hi1 zi+vB!>8RXsXTLcmWkhv4J!hYRvcvivJy2;; znLl_gOp$eJyg|iw4?AtEoL8waD5JThm~`i^aDTFNX8X3z8DfsuN-8Ri!ff0chnT7a zqc0|u98&oD)3x~gsj6wQ3z8nK5bDl#P7gY4-Ki;@`7pTCYN&}VT&yXQib@zsD&Lop_pkl!Z5B7ZnXw^dBcmcAgqQ1nd~UXa-9MtH z_*J(cN6aYbh>VYPC@9Sd9cmtHRLth#R*6fF!gR^MXk1^j90c8KR3wQvL&5V?U(h+B`+4uTGiOFCVQ9WMXC-UE1=Dbkr9I#L9PpugtzIK0e zIAi#m@!gf9)Ve;K{R_2Ds5&wf6#qjUKxJO}`Oo0Shb_e~hhP)*@M0ubE0fGWAILT& zj$-0AXXI+2ZiD<`3vm=OteX6uHb{&;WK))RL$Y@sKtub^1A~=6+IE_q2oUYk+T@PM;J~VQg;x}FRe-B~&_pMCs!qLl2-YpKys{7k- z_=VJu4fsU#i8#@*NL6^*`6uJC$yLv-syW-SqAJSZGW2uD^CiqG)n1q8dzoB~jv)yU z^S&JKBj%msEN|2~hMM~%Jpd(6QFk#CSjJM*^5ZSZ+Aq~DKXIjS%ZFtNsS4C zwfd_XEg>018;+9pz3LYQe(a_X1O6JHafBM+EbFftGa$LbG#0K3Cdk1K?S}@0wMW4y zSq$_DiLraN80t}|2R-F!a@<}PU3=i~9(tWaq2mVm=Zd1eVn{r6RUZgbYvRcjsfB3J zld^xa9=c&dZoYo`=Q`}+n1vPzA6${xC*Ei{6hBBcX9^Rw^8Fs~6=zm$M9TgXfcucQ z$0cv2c*4S+rlIQ}X43bnqP5lEHrFpP{qpXs6WM<*gIx!FlA;hDs#|^ey%T35PmbO) zuYb+yqZ21+?raYW>w1fcUCLUO$ASuFZT?_br)Abb{et3#JH>}0?G zyJQRG>gK4Png1;NaQOJy&LZ*jnSI3=oLDLk_XYAWc^I{GNM9|s5|ODF27LTuV0weO zR?zFoRFs~0`VAorFcq^zHjS>Lk-cOzy-G|M(}&u=mk4=QH-gsdX?{6e%)VmqgxQBl zShg1Ptq)5*+v%~q6i$u9yoyxXdD6a^KXZj=wqRBMU-3!Iihg6{?(*DSRAcXwPpkPM z^g73K2;q#W%4`y59MQS!+&ej#2DZ|*GHtITGz>A7DY<^`+M|r8Gcuedr19z{^i-iD8G`{=oe~#=YGQY zq8;w#e!`zG%7-mj0TszC3+dSo=lEeKv;At-7iq|l9o<~Ux6#}ngV<#!>955X_ty>v zZqZuu@l*N0pfaes@DD%9B?vUecw#+bH#=|P;J(?^+{wF6-r3l-;pRQbe(l3 z^!ZG(^4q4HfKoX6w#F&)X{O510-Uoow7Knq&;r{j^)WL08};I8SpUT+60jni*OgI% z-)*s^7Ft}B5lcX< zMP4|j9r>uUh8->WrE&`FqoPMEA6MIhEUdS}+RVG(9B5lu8;J|+{`p9l|!m|g|M zde1Ahx4XP*pogcH%EkPa5B*(%Sg5KbB3_96|gEKZxHntTbyW_PM`P;&`|diBbQEJp80% zg3-iM2J76v!7PD``Nr-}g#7@;Lgvz_*+C*fqMf584heAu!h&_tEM7x=TQ~xn&}-D< zoU!@Tl)W@AyeXc%w|_~j&I|R!G9bSY zugX(4&2@+r+52za^p8mCl1=uJL3zm^Y>|e=>l>(Asy0a~U(#h)8XMcVq>B~R#bs0* zlpqI-t+R@Y9mogs@oVI(65hck!SQ1z)A6gv_t7uhRLuKoUT-J8GfEmtHAz8B&BXcZ3(4BNK=XiKPBdn38C@S3grYXrgdzDXzj4NC6VOQ zO$bAiC~!e+1MylVcrZv@-q77$b1|np*SO&HILcOL zUM+?gQc_d04JnbG-IN>AML9NI+i&OCB;|8T3rkNz?F2biW1+4)b6!MBL}sem^$R~Y zqHbzK57aWO+oRgIwKIa-&FiBVfoV1Z+LnL&m*FKYoozdgvKr=CNz_9+wSIBs`~`Uy zSyb3w%*y*LQd1&QK9M2&jtTurLrNMnRVmqnT|Fr=sl~2cj^*W=rYs`E!hC*2_YN7; z@yDR{Q@RCsxFsvaL`K%s1Zh?Ak$r*qCT8KQc6y%#rf>M#$rDVyUD1p~%ES4pxeg9P z`dWpcE3S`YQ0A-Ro5;6rNh5;zy&gxr{YH<3JaHd0CDfN#vcz%@uSWBZ`;BpYUupKs zQqO&*C;sQ;N|Nq0Yg@b|y-d1bt!PVW>6Xj)7V;7@cC^{|J7w(PZ|MUT>*51aq7q(} z8+t8wK`nJueyvbG5J1_N(C$=UJFp@7NHFGxSNyJG%6EuD46h^YAE|%a|2Mu9F-ewX z9lUk`MH~p~8FEV>l~|g9NDlJbA$r_Mwp{4F0?#T^@&Ruu(VYHAy)VU>Rbu~#=Zmir zL#wNSjgOE*bOxhJM37e?UGdph`TpGmQ`LR57=?D}sQiPQdIYkGgy()8BuC(|ZUdTy zX9H$#7ATgcQ8GUNGK=*J?K|g&r>9iEtc59j+Yw z9%5mNO4vR+h>cWGfGoV_$Q<}+A^>|sQ-YutbeAOzz4C`U7;M2M$3^9wZaNWCwt|Tb z2}KkFX2@3EHYMac|LzobIwHb3?x-vx_IMnXbO63eURN6~!paQ#4bZO=lN}1$3VJdf zr|!s-Pbjk~66>7V>23#GQ=9zId8 z<3`hxWULFwhBv;88!UD*zMN=CE?xgnKh)Hf-1{|b6tG6hS{Z&Oe+_T_kp9u6IepAN zX33b+t&b)JpTs*zRn|!bcBm(7CGt$C`+Bp8S|fvgYt2IC4eGOAl27*$i$#f8NUg!U zlG^d-`uVyY`T57L;awyZk3{^Z+mN1Lg*<-a%#!Gd3zh9>3UFe`@~`3JYK9Uflt$^U z$?IiKV$!~^X7h=^sFj2<}?10A?Q0Y{Vo2q~p<^^PW&rvhXZEBQ%g)V87 zpZnXs+P`xRsgGt%=p{UKEw5m#r671SMWE*Lhq?_^pl*!FG8;0jT$LebLoVuDEj79Q zE@LI@Lx5=-o|s$eGKMM0X*Y?uHL(H;fggY*aRr4j$BYq70MMq-B!Jh&padyY=b4C9 z=Gc)A0Y*eqJUUfD2sP)(<@RqM0ohYfCz+p)rKo)Q?z@I`l zge(a1lP|K&#G3DHZnlDAqQi}KNB_@(je%FL04v&o?>z2^Ei4>->D?}qM?u0t1)|w> zy-t0@=lq<9*E?#pL3}Rn%*{UT!kaoBac_X~tnLuC8M3tG+IL|KtoeeNfjXG$7yipX z^h=Mw=2zJ!Vh*u_!mptzb0{-u%8UTgntdAr0LrU{=PzzxtH-tO+jjD1V=DkF@8UX3 z7%SJ?h-Ew=EUhO8i1TsX_5>fXw9dCvK{N>Gz{ACH$gl$Xo{{>79Gm(cw^?{>B%@2c z-Zs(f3<+rye=lUyUG?D5A#uPpEv$I$>P|1=E*M1vL;qwlCG3p5kWe9eRueBmiugls zSh&NB2YKR`qL&8_*S#5^Uxy|cz3m-g+viRMUhBy^+s71lk$7T^n7eS_lChi_{qJw> ze?P5k-^7^rxeG1RF2~L2zc6uO+{Bpk6K&XGC3Ho8Zf&<0pn!^)lZ*Q+ZFPi2j&y%j z;1tSiut3s7NFqJ+yr8PwlhS&eZ^2)<6LtUQjF3DSk!7?H+6&Ii??F#wZZ@86Wn(4b zoD&1xMFe9`)I9R@;Ko4UWKe@R!{>6x)NPM%#(RDH9&O$%5{uq9s0M?xjXO54-3}|4 zFGZ8qZm)cf3A^fhp!Gwa#`+R-Bm{BEc8k-m@CXQNWyaNKAh4&9zu z_rk&svK9aXO^B}6UrJ1Y11z1(uVR!$*)I1MH&S8iB9_QFp2s*RiVIIEyAJEZBAkAA ziilMgZ+Nl^ver~ps%m~Cq^-w1Pd?dXIJU4F4m_>5MPk-;vt!O3rkKGGVC@pO7V=rs z_X|@^u+1Yv`8VzR-@_RH@r=Rf_N}UuH-R4Nm@4LnhKV^Lj{6e%yZZH=eXc3DEstJ( z`3soJJoS5_Lmk#GkUx7Fky{&KQJO< zcEr-zzdOgBakjROSeZ~AR~Ja^rZl(!qIDATWMXUPjL;gdb+N-HPy*1M{c4dtxs8Nk{8i*v4k6bP!R>aa$XK*w&CMT8H`iQC zFkM@D^O5<2aC04A;xb?}02>6yK4>%o*1vF1KfaE|TNU%@)jT?V_ktnGgWWR^p3i=| zXZCOZ*fx3EmA~3|n9k0%%7GqL?zxDf^R8O;>JzNGbLq6Xc0DatIdiS9F`d}CqSbTf z^mfc!Hn)M1OsB=`V-$fLMc@vlWQQWKR6gb2`)*=|q;y;a$4?->r+8*mcZbnYNow8M z?cpY{wU)<8<)}TKQk0Xgk{0RBg`(E_eB#J~$0B%g#Bf*=;ekf(;50_%fgQ@^u)~+x z#iVSsbQ;o^mt8k8?D}Is-y7c_!;(sDJN1NXY7N|}%35DA&0K1_62-1Mi`O@v2Xv7V zQLrAD+<7*MOC_%2Ukh9OMLg?wq*9lY(!3bbzW)l0=X#=jxBWFfXYAp-@!cxt4LV4v z1Wt87*5?qkVeyHQ-3f37Rjnj{lh&c?Ma&n8RjH{t`TE=E8k<>~z0Z6*NiZt}bv4Ar z1zognzU8je+w^FD1?RnI7Kh?2$`^QSe(`qZFT>m3dcG~OV~%NC_V;7AO&+^y@t(mK z=fyw10u~-Js#lTMmJ4gbmMr@{@V6x}EZh0J6PZjU>{6>(fbx1dgmYODud^>6S5ct{ zb&&eH$3|FM6~*-9MeJc5CX{&d&KMVFiOfR@t^!+0Jtu?=ZPyp0o8lm(h7z<|dm$weEJu z`nx(eB5+pv+2Ll`j&n}uCbFw6?UcC==?goRu1)_Bb6*10-ECq6W}vtw$3^4QN&LSX&2_hN4HS zLfa!2$^Yzl?CI&f|5|t5b??7^!I0t0moItq?Y-akeTK8z;q3+eUbdy%i}hj~8zs!R zs{3;1wYMc3qrAx1lEctL+8tak0wpyY$i@g4NK z2>$P;14ZxSOCBTg%n)dv52@%5*Mzw8^!x-_8#5XCA} z&#i~7^&n^rVMDIrzwI5`4tR_B6aXJs2ST$t64giOM}sU;>Y8?B97!hW#@Tur@nST4 zGD&@lKSj`s_9;qt=z7kqM7fTlN7F-v%@A!?rOb^=&RUaX)C zLRDMD1J)gx)uxgy+zxL4*m#k7fq8XhthrAjxA$pk!mCqvehg>ZjKsut--U7mFPF8q z#6u^SnJ_WPKuw&KOAFNP7opA|I@>)k6*`4W6aiR$X^*mEAYj zv-9E1ds+C>y-$_Jz_5Fn?xDXQ@-G@E^!w`@$`aTOsF-}OLUpqAFG#WW>L^1F9N!d-d%YY!j=LXH-j$9L%D~UR7|eY_6_fr z5yeYEiJMjT_GI-pJyzXOR{0YRvWYA)#EDCy$bz=|%uJ#@!@0Seh(eQPc>=U$kO2pI z92M4zwh!6bdUr3dwVCDJ8foKxtQ8vv*>a^7B))(5>|j8jFhl+R{H?Hcxg(d}*lNf& zhvvUwAi)}uWVnm^keu4&AAr1r%6Dn>t-d92jlh~FBFpcR7S(_4&yaOs4Bjj)er>hF zRAj(IS(Lj9&4=1=v{+bx8xVj_hfbcEL1>-HpfsksgDu-$vO;9`H$V5EWSu(<%#C&$ zm}l>>v#>WEuV?RLn)=c!4lyq5Eqqz{nx7hw%KtPVgEM#3@dhiuMV2k*zATnieOesL zvo>*Tado_5U3ekzbWN)98v{C|{E)CiaXa>cJdN^PaMq{0ZZ)p^DWTZ5K~io#J-dkcFDJJa!6Rc_0~oQYKyIAi0s>KSJ5 zyi87DQEze!HQ&gPGs)C3m0#u8GDXjbY06#13FvK8DrL$;223u;Pa*abhl~CARg@Nb zE{#54kM?O%p`s|()v+qIDz%Yac&y0iSn>Qv^B?6rYI&6QUsyjMcVn|o6}2Pxso?hW zz$u7H@`rvWjPB=5?~cjvo)tiZm~WX^^Ppmbk{H#M4DUIw+rJo8-RkX(R9(OSk>JbMT_1r*-GFpAq|2v^kx9r@B$-T>T<`j5 zv3UUmI5T*24xV^b_ON28>O6bgnku*rLWYyjtWJ{@H$SHbm5-@zJT19^Os80v`RaD^ zVHIvXU00Kz(*mnQLhgu`l$F`YZ%9ad?|F>k{M{h^y}vIwAy6REC22e=V^%~W9&WX8 zZ_%B)xIp4c{8Pw4k?46e?f6QZk+4N;OlG3$v>4)3?3ndar`KM#NS!5H11MDnasrrv zOT>-?R{AT5R>5>FupwlB0JY&4;5olLWqgo(U7FTR?_E>OV9QUS5jq`4reMQkW1?}8 zUxU=5v>`j;D3N_}NZu2teF#^n%!h_o2LI7{w{>pP`kbJd1?jJ6p1oRek=OUO{4bjS zY<&L1P>rh#D8`~a|5(~)OKcpL_eOjE1oHu z)bP1V*WbTC%f#3@ZOyn{@5kq0fIWo%;?Gr6()4qdT*uQ5&6FW39z~oZq>A*#C}C6i z{yd6^uEI|PO(9^KQq1mzD0!Cs!M?(<{gWzwC^H5#D@&`sfG%P!mv~(((6WVRm-CYsp7E;28A!a7M>kKHuyk2ZXi(a9*OOD^QZ9u{m6#p8zc3V!Rj1fROw{~lkz zAUpd_6$bY9aj@Myib4w+@+t^|b%RTjAucM&g=AOjwVkyuZ>Z*tDC~rG5?Jf4%})x< z1sZnn%LT-<2Dn#LT`L|OoanOEur$xjBn=2ixgUWtqIZ|A)esBT(w&2i=oH3Xy^b2@ zx}r{7G!dm@=?*&)N(i=RgM6erHfgduywi5tizet=ig8#gcj>7+(JOpLWT84a{|F*g z2hmW*GkCI|=U4eCzdiH!?}kdn?$gBB;8+eMGFwCJKNur%(1EE23XYGe)~xkBEe&(9XP z#u))T>!Av1A%5I0asGA2{ZEz}jseTkn8AvJNHL{DO$;zg^fmq5~{7QlDJ9N;O)CDuX zb(^M-yccekz)_&!>sakt4;!=DQ9A?ua?~D0Y7Y^c866rCftp1QgH`;C?awBDmr%Zf zi)q>6x1MlHCBVYqX1Hla-sDE=b75Aht?ztO?g%q$^>)xws*l6=yWB-hh$(N%>pQEP z%AFsAm-3KQA-7jo*QHz(QoL!k;97Xw)DWWftgK&_JP)5wjf6*heW_#ZuP5tm&iwoN zyUckPC4a{u(Irjs10l?ehcdhgu<1hQ4h;8b1#FxP3s*|)AZlzf87G^PW+bB?cNHFw zecV4SHqo(vL=ptU5Yuyx~$VRy`inA3xlEX~7(Y9Sv4!kT{JDd#Y z_7(%)U=J)?d*a)|N+65p3zIo5o|RkV*td}Tlq2Xx6y|N80o6ur9cfDz>) zK>%GD5YtrWZo!#hmis|hZtp)F&C~5prC%JATfB(qq8uJM4a4oask4iV0gx87tuUEp z=NCU&lh7VJS?siBrJX^28^uAH$o)2+uAk6C>zlT6#@K;*qo%oUFKc1HJtp@e>>yq~ z=0BcEp)9R+q=~s?YYLZQPI*P4r0OkaQVb~^8SUuO!=tG^3wH#FX7SS}8ZzB;dMqX~ z^!D3im+2kg^KUTeC5jz!&K)BuNi9xW%&8WCitn;z;nTfB-%@Jgt;7l-N=&O(&B)JG zaNCRtC1c#8ry255j#rlsYph43x?5Vmb*HJc-Bk-<=2Tz0{l^FH9cTjYkE}^6QfHN+ z!Hdp(&LPs;J%!#q8u5gdoK|Fqdck7-Omy~Xq#OYFvSA2d%!A4?d@fmb04Jfp7`2G` zO?CH(5<>q{oyvlamt;~}>fKI63`)TZ3h67|>#Z@ZLeZjRDTpwjDG@#sW{&Va7SGBT z8-hXtC5C#`4JP6cN{A;Gk#Go`k-u1{|NN7euRg!>`9IHJC56HuPhUQw-m5D;5FLdD zZk&_u5PCqRWB($}=4(EnCV`uRR`x1_L0GI$ZV;wYr+52eeH2lSrZTkO%BIG#^F~Nyg`tyus0vP2^Jwf>0XyuWJS+M znFAei;^JkZ-XD2bgO60#Hz&4QU{|s5p5K8apiWU6PBtZoh=Qdki~Jtne~!k@*8+E4 zyLL@_O{)Hp##@7*4cB6eVwe2LFW}~9hvHuMKr+&c>J_M*8#H$Z7ZY7Q;*4ut!j0Nd z-UpVdfEOpQQk3`5Jzj11MsW|-dJysmR0gX?o%L?XKQTEN0U<^wtF4$v{tyiCE*G6H z(ObgC07V0WE$Wu6QZO4ZE}JlWI{Ed$w*buo(%@p_Zs!F8s~$)P>89KsjLZ4P{Y;m< zcWglj-%STD1|S;U%M@6-#KeLr>yi%ujkFW8cS0+2{=V>v)aQk-{}+Fu2fNc9*Jlh1 zHKqX25XnewIV=i$2b8I`auH7%#(o)iF$s#8Ci0eA%p9(<$|%9%?nw~MYoU8w*+CAp zflMx!G_pgI`B`|6lEu`;)XbK=ApxZKK+`283UY{qq9MmLGOwE(sa#-G4qZOuQOSHZ zCJ4g*$j3*BbquVE^6^t(3%!Aq;w}-BqeU*lH@u= z(p@9bc@kpv1Fu8G_wSw$^jwi29eBTf#^}L+`B(`F>C&yZ4y!--dlAkM6bHBU<7*xQ zlkOqr0=PQsh&$aBgcmxJD8_ZaenkMWR|V)`(o3iGqG zFgQWtvW%`JLYP<}&2L<6DJ^s;0u8AUOC%;C`j*k%EVU!;RteGWVL63RlB3IHa?!0v?xQka@@^D91oZMZ_L=#>c&M1Gq(Q3 ziTX2IQa<)Rn&|8){`P3>u3GVpqU8EQ@jc$h{Mw`^g^3P@@yUg;a!=huHYsTZ9&xI2wq~ROMw}z?`Gn9h%t%GHvwRjrmAIdo8EG@ixplkktjQBgY-hO> zR0eS;+wLO&_O4`+q896Y&!cFqqFx0Xv6PfVFY*FM(O5=N3)_*6>s^qv)^onwzZH<; zGV?AJrUprTF+Q82zaae`|Ew4?2P2M1l;fMlCe>UhiDH_?`BJ%DTecq-)E7?I#9kq! zp%_H#-9I>|UB7{*Hq1m<-?{~@=8jv%Ca9z3#IZMwO>Y!Lm`1DH;4kkW4jXKohY%x= z7Doc9T$2q`a41njSLo^te`dug7Trr`0H zgI^A|rmebh*6Z;1QpttJU$Z{$xOea4tNr(X&RX(7_#n-xy)Z}hmH1x6w+-JuHUmgr zlh~e!;-V)qB!7rDjV1Bb&Q1aT`7d9ue1HB5SM@WQLi|Xm^F}2O24gKXvfOFYd30$W zy^OY)iaY)p)BXn!MC(2K2V3EcA-QnkoB}T@eDLw}{-6NpX3f#*W;uQd<*ec8anXQ& z;a{22$mPOIx~n4%B@H{f=vyF!b#FiKnBBN{a?PW$JykKU6&Z616%C@;Ry?=8ZWRC*lJ)zO*O$SBXfahId64Aa zj#1WL_z3|;O@$%Na5M^7UE@u<1d7uyx=c@>?qcBEFA{#JNw_HDX=k+DY-%Mf{~4W; zUqdknE;L@tBb!>MX3S`L$kX)~&hkTW{%(-|UZ0d6zANsV+jOyHsALEUEYB{f93ZmC z(817RNkE{e+OEaKO`A{$7wN*LNg`BIxWL{g%up7Xq#~bho;BWx?w9tiVY(141IbI>GZitc$s>%Ln{bPrOMZ3ey1U1^}54H~>Icfck@S z^h6O8zl_HkEjkWY>mwl^qN(T?vn(Ru@61}N3H^X|V;Sq#Be>B+u0iMT9QaRs`+q%{ z|1(?W`Kg`TMj97gJvd}+*MN0OC-UDu;EV;6(i{8dIh?l_8DYH*rYMwwfdtMPCYwm& zSWkwGZM&$U{4jofQHwnMkjMlgzHq+{1B^vB>lkPS$j#OuW!0jn55RMDvg{nVU-&FP z)90?jACv{*xJ%f$orqPzCVAjiLCk$s@nz|P5pf^16Ec+8zjXGuHU|O z^K?NNy?x-4_BUb5_K&_e69hSlFP5hquQ+}@>zCQOhqWf9j~EBr9ghPBWgkX|Zc%)^ z(9QWFZ|Jh~>xG*2)m70FMs<^GX<41o$+~%uBGp~M2^cMPj)-1;oOzyPzn>m=dnK~W zUb?(JLbQxkeQpZpm}}AFORPX~sADCrR#uqgD2^K|Pq5k-P2rXo$trR-vGT>CsyQsG zH@#Pwv(c?6r}DVlpXyidC*l9se@Q<9;GLvM{iH;@jI~uAc9$h8oTh7=^pg^Do6z6z zL0iBF7(|H*Z3M*&h-;7gHf`%xR#WL#wjL5-yN!k;7B78!ljxaaE?R`gN~}~xgrS%~ z&}mEZOr~OGgT=}G#;lPa%iAo~e*@EYI5~^NSL$4CHiNP*I_G_S2~=D=at(8&A?t8| zR$wlQM>~hSe{B#~+A)^FdNl^w_eH2_XPsG}a%$hj^*fsnqepM}&E1C(UL8uB_e?YR zF3_lv1@MfKT|@}{P5oil+&;T*$*x)XwLISPB7Lg$l3k7u!9NLZCbq9Y)PP1@3la^M zN7^&ty&rt>rY1$DI^)+{Oo3u|{qrd8+bHO?*B>~g)t>nz?77f1{cq)*x^;^`*Z5!G zu1(fhb(7WqVSV-gI<)`S*R$S#7cIKdt!UPfYg;*)*{F`9 z>Iu5M&$V1M)zYC8divq05GW}Vk^YR-uow#@2^n5EgDLTg+dv+b)Cr+oQ=vdkt+KNkJo8D8=HTTAHImvp&%@romFEh+Ym zF|L~$aQ)2G#g4ZOA9-5(ds(iFDa)@)iGDR}YSz@Ok>-(R-s^+x!(~~oJf<>2u?**F zap#ia8JC-RvaG4}drPu-#nYk(FB@Wm#g5&lHT5ac#+P_qj(1*p^19|A|og}d?Q>XZq_fkwVLZ(M-5*t zceYAweRRaI!@}2aNvQeunc{OTj+RDw0-8lszIW74TtrkWaA4bq~D~LY>`+PMGE75aIQFcD1 zKfySM(kqisa^g8fm{?FkI$1xs*(ZDpYP=~`k4E<%bNrLggOiW`{CR{yhGuqn%3-`z zOt@4`bD+#xCb<3A$m5A8Z?ond|8$0VRNd6%eS-6$0k$vKPB1uO?V!%h0-v(wQPz^r z6=y4>b5$BK5)z$DhTe#(cgPSH8s>vdNC5YBb*!8Yo8tae)5{p`MO8qH=L~;o>$Gb6 zxXp@>KY#UMd{_I>^nL2Cc7h~Nc&y!@iOW~fpDI?mP0uRbhjln_JlukF>xm(Yq*%Wu z8@{~6ZAB#H4K`PaPLt;Jc!Jt#WitdZJa@^hhNr>|IQ+KPMX`0`^YENl<7qFS|1-Wy z3dN-T@ThRA77o4bu)}1)u@}2AX@`q%X|bt@H*5G-dk&0#;r|P|X2}IyjC5%RRaxq; zhy5$3v-TD1zd)J;bGA=$chjqVT@OUl_h(*Ac>L}jRc1p>Y8ot!^JC!z++O|AU97kU zb4qf(XHGF5ja9*WQvV{}L1Qw~2pmMQNqc_mP2JXT8W!7$AKkzOByHfJ4iLQ(F0MSi z3o2!wVQ#nm7-CTZpOS7n;&lv*b%L@NgNX-5wa>{eJuIzTs!O@4{9Z22Ezp4J)+71i zjK2~4Sbgc-r7-Kb?OKz$C%NKS?hnKD-yFMBeH`a?T}r>7-qNK{D~^A*Jqrs!ZM}Ls zaN;0iq=^O_fgL_?PjJJmoxCybeHS{%pd|=|vUJDBIHI0IDn*=6(<+E4&VUcV72X4P zKQr4``6k|M@4yeCB>=cX7w5tc;{*c((9#)$$^(Ds$lN*n{wVV?gNi}rChX*$bPg~Z z#Z*mFprjq^FchIMai>Zv#>=I5SyV?X=${rv0g zqp*`aD^S}>h#%Tx1zFDlqQigL2RWd=Ce>XLc2r@w|JJq&UD-{#RVNAbP8SHvgy!>6AiU$spTysUBJu} zxQU8xzQ?1Gp3+*>g5zv5`a#azAqvHlaJSiv*wg*2*|j9U8sInX@J6LtaZuf2lZvYE zFywGRG;2c-UbX;@rKXlypJzbdZ5B13e%#W+~AT&ip=?> zAnkWi62+#m>Oj{76P1eQgOiT0mRiL?YN#C*-bWqg6OY?cw(BQ~dHWh(?PE>Q| zX==z_Jrbh7laK(74`o7o9n|1=xnz8cM0ojdU8t}eP17et;0Jm`GB8{f%$_{AFpVU$ zVLB;(lMkZ-AC<_M0PqpB-a7y&_Sb#>a1rE#~>wILdH%+#ebgE_Yk<_7LAHt7-WE^6(cpikE? zVcDsui|HK#6Ohw2*<$x(F(73zV)vA_oO55vIr1~R`_oG=m0mg|DYV?xu;OM(XuQ(2 z1LP>0T;8}XCx=ZH$e9f&I$s@sR3TX1*1tor`Ug$CMmE32{q!E41MU%;oIAaYx`b2S zAXoVdm{uy|5-aAFE8XRIT8W(bU5uHH^ObE^y%z}3&nJxt1QgxmP(;JBY+@PhK7%>&7+}wJjE(}j z)sf0Dusq1yAPok|JT>SQ)88y=J{;@`ql?djZH$~&1Z-B%?Ep#u<3$mb-Uw8US z2`5k^0|nBeG%ql_q``4ev9mjtqi&hV`P%~Lj)|O`QBw0LPQk?JpqWM;GdVdkjdCU$ zsb?sIu(Kp?kc1f^f=Qf3UXp_-2s_7JA|rK+qcT7ku=XA6a;0dZ+eDr_Yj^2x5J01? z%Zx5{Z-ZWaL&@brBCtQqpf9|P65f`0P&Nrq6U97;-g`PuoCxK6NZ1pFdu!;$gSQJ3 zX1!R)t<7u?TuiHp1(F2$(J$VPdcZ%YUVMN4&R3o)(b60D8fSRRrERxF;0&I>@>MTb{F z{3?+bF!jx5PI0+3gJ55?|GYYrS29-)u*|O*_}Z zFYMqJw(v`?-kj7b{$71KN3j#JHR^C|EL6Z$`JpCGDtg%O)l%C!A#rm;&1Thyrl}3i zNjH9u+h8h0rFygcffs)5oHv&l44Jj2EAF+%WheqTYi&z++e;K{C5m8*@b!j z7Pt~%Fwue@;1@fQ$tel4u*eJWQ!xVq3>+=8at%}#msGi{1J)aDuZ)u@Y#f*dp0;{) zJS`x|)IyIgR!ztY@VBtX#^P9aCiZ1&p~~SG7Mcdc>O~l3=kO(8qpdGF$60T5P*w1H z+=YjFn=h(%D)2tcGS274E8{iS$9<52_e&4H4ncxLVR^y5@sCpj4p~g`&(j)M_O@g? zh8bAqOzPd{YdR&BUm0m?l1r;p9^<>c=DQi_|1ami{-R0kj~_z{$yKr}SQOq;7dKH~ z;Czf<&$+Bx@bNb#&Q|=YZ(FM7DR|6Iw=HBcNg}v z=I;O2c|&hiMd6R8ChUo2bIa3f3N<-f?cF;88-VmDNaaAZ7ee7n=W#gm{zMieqa+N7eK zMyCs5W%OaOf1zbRomv4Mj0UP`Ec8Dhe8x7HGo4b6Ap$}Z>S^DG)qg9Ra|ooqQy?p- z|I#y|XFTo_Yvw(bhSlS)rx|r2o82!o%!5eA9SDL7QdQZhk8(ca#pBdHWt!g?m zaeL$}_vT33Sq{}LyX&`eZXP(g^y<C$jAD19KH@((t=4mhtqShkH96uU|Ir z%&Mi&KfFD0cG8yDGxuG2^$*ox=H^$Kho@(b`@R>xZJL+CP8+X{>zR{eKb-a357)+Z zUgSNg@D2NXAqty^oi?IXac}||x;3>x$1{M3C!+V|r;l01#C?>0lTzlNs&p2ahAYIC zR#O@Ii~@@^D~m-%^B)C#Sa`1Zb&=5}fZ30{9-UBcG>>s`=Z)V+=k4r!`IS+mRsR16 z@%{Jj#NgRTV3gVWB|s>%PL-ar8C9KUmoZEg&E1N#P~1d!6Ecp+*k{<~oYUod?-Gh0 z+>Le){WY^O^TjIOs=0Tr#_|t{`&JRWK!D57XqzlBFH+wRIHK?vI-cbXI9`%*v;pDdK%~KbLTWKvL zZ#7XxOFlg~N=jT2v!*iUUvJV>C2M)ZNnP{FMk`~Wbyp^yS9W1Z-aY-uu5HH-re8l} zxy32Jesy$NbxL&Ftf`Bqw|)Q0i}zJxOxdNf%w|pHfwKAQP?|bue2y~qI&3^VEwL42 znp4d_%`ube)zRraqtWSL3@1WadhW(sckX}jTkXZa<$awZ{G#+)kI647U!QRZ=|8*V ze|dcVLyhsjb#0RM8uF(GZ6*XvT*Dp(Q_&zvU>?7!g5PA0+Ap; zzz(cRj2T3&F!J(}yq;}e#m5YGpRruuhEnCqXmFh;%c#Vc*vu%x|mWj4;(jiF;M zW?ELK+T8oi*I4zcB{X!*j?EJm-gucg^sK^i&)|{XklCMtcQEm8)5oJ9#NGNp@AlPGa0R(* zU-frSv+Spy+q0kc-M-rR)U&hlGqb9Jk?-txtlb%W_6n7|a&_{8@0Q=$)%JnufMn8Y z^btBrfBEI-SyJg{>6n$t0~_W~m{fntV~?+tcT@V{n;Gs_gV9Hau0EW2=ugS-zYmrF zOSh&vGzr-ILnBrwFa&bP*Eb#Xr8g#r`~=BeXu55K>L}|8znyQ4Y@BcjqrXYUK1q*aHyi{p1?3d1ohw z^G;s@AwDJy)4|KR;nN{~>H;710qfD2rQc%4oPv5?c{S0`*NK~`U9+k$q059qp7TxJ z{p#{JsOKH~4xX>&(o+Ksb*_Y#pOQ*MG|~o+*SW%a%Hv3g*OM<_&pf(4b6n34*CtrE zzf5xZ5>(+wuQDeXcD}9n%iPRy549K9J{rp0f6w-1hRqLOrN-?xRP9)({cX?7HTzy} z`ihxp_rq874E*t+CUd;O|JL}tWY6U8?rB?$?DPFo(uMB)uXru0^qc4&FkL)DT1JZs zYFPJ(a>yy71`DUv(CjUS$;nai=GP}fQKZhVaxUdd-U{Q?xqISYh$nZU$X+~xRUDFgRFq>zFB?dK3_CI}2v;w%$9ErD4)I_r+!N<8nMRsH2ZzU* z&ZjTNgc0PFU(*?)d#L>4GvY0Qb<_h;ick=QNKWSI%WNRM@a0ew;{|+0>eX(}-)(n1 zi#f%$`k!6OEc7U~Z-9T_{+Fd_5EE->o2Hsy1ohvYuSwC4)*q;@1=Jnh-oj)5we=N0 z^;u`8qe8j^oUs`;ejC?qEt?@9*~)RLMcobv!D>!5IODQq@HcKQ@8RSCBKEA|g^sI1 zgpm4thJ1$R5x`6LXm{U*Q>SKr2(5JRF;E5{>`X|4QhNDv zU}Sutz6qdNhqNP21J+-6-$=t?XJQ&L%09f1$f-k&vwu@UZr6C&kVxB;2*0EAPT=PK zKaK-RZF7b&O*}NWTGgK0WjO9fwmHvU^bCsYea{}NI;5J-(&!)WlK9^BsAh1WFFN*p zeA{y`U^6W?2a%+NP)UB2!kwFSSd$CUr)x8JXlH)yMmDfpeitY>bpLno>^C3&@os-w ze_DGId38cmQAne;{gc|!+&5_lt)#f$nb>gv7zstta$*3@)(6%YZYbIzW*9br&O5-9 z^UoA7GEkh>)ImBkn)Vjo10Y|~OZx!u*HKtU9@MY%JB?K(q*i_=;z?S3bx`pXuYr6W zR9B2pO@q9^A}gYDiJlwFz=?LC>3Cw!?A@}0g_d&pO^7Inn98GZ=&UMzA=^cM}LLySW5H9K^3$;tfL|>^sOqW#+xg8-+x9 z01UkMyT`<~#FQl`f^rgH?d+9fY{VO)SH6!gL8$sPM2$^X)@&fJ z=4SPyRl_QReJye*p1sX|+iY=U8{+0~E-eWjK>}6loDbV~H!EnlE+}764%lp?z~5Z5 zn8qeYYN>9Aw>PsN#@%g@eQk(rBSNpU^W2zi%r=CVXB5lOYGUEw4PFsmV+!=!KR{&B zG)0sdgDsec0&5VhoSTT{B_PA6(ylw7e)u$Ppy9}tI_Hx>Vim#T``B?bZUM>KQuGiH ztUR*i$f~Dl{F@#>56xGuZt##a9hR>>Dp5QvA1Z?(j7Z+bBp)F#xek)pn=I%@5ifL! zrOl@rS;4!CJ1Mq9Z_y*8zRax5Y`CX3@dk9q>sef=@nXp7K!{(#|JkcQ&VAE1;v|0HB^0P zCTfZOP!pc7vo2~3Q(qm|oYnuqjq^G=8~Klc>$*2opAC#7^s_QU)(-d9tKat7d3Wxd z8FWc~ReI1%lbetmm>ZZ_a^r~SY6X6-+ROY=+|wS^SeHhfo_{8Ht;us#_$0g zyTOHqJHz4|;u{BalA3NF@U7Av2rQDSn}gQv|7fCDSa<4>z0ds}yfJQI$O*j_bJd;l zN6^T`RUcS0qHk8zISd6&!d`3Nx&<8~aO89k&-V1I=@P)P_mVQdD+Jt92Li22ZX6X& z1FS57xWEy2+GM1M^WJ*0=G3J;Qr>nDGnOyCx~esME0@n*ATy$zS+>Z5c6*uEnRJC~ z`F#E3Td64+qyGq?M92M+Ub8vHbPk2zOKHduq6bY^P#Fb+)N^q(iqekHHMu+-O=~hT zFkt#QZetq_?{+kG6q34-AvEzTC>z(p7iDudwUtJ{FLP6Dq{t0}C{ONggkuV+^-x$c zq49a+9O!a?ETA$oh8uZon6)Z>-XZqQBKd=>@$Bq4BB&8G|yxk7F+99+b%`r8(=)3m=J`CT^ViNT^+XSS>{-)y zv`-bE%BTAN=JgTZ4m8e>-0+E)UJx>+uQO1DQX{f$Gly*~LcE^;NsT@!ke^@&J z`X@y`4JzlN$ubkA){DxciHWdT7X>=DxSp$7SDGWju0;AWQ;OA-u;CrvD?XX~bB6$C zH#I(S=GNmw_deJCK5@6S2&?s$s6Tm9QkRk;8t2}_QOr5C7U{#6^WT0M)O~kf(#c!K zo$zq1N`aX%KQ-*N`W!-(&uU>v3 zza6Yg*chVR-%~(;gpb9NK31tOW}O$&@9U{me1EGtpO{bv%()4Mjw-2J+D1DLP!XLb_tS7N|Slykc%FCmNv+j(n9{@nVZRe1@D zP;UBVREb|qIV-oAqUC=I$-yS2PpA0BOfH@ucQiXUyJH|tlM&Y`|3^Z0|Llhct2B9< zympCz8x@L99p+h3hXyfVy>P!)LWsF;8K4D!O`K6c;puVWZ!2+)Gq&r=1EmQ}BTUng zjibVOAm$Qy0HF|{_sPBbE$MHkeT@M8Du2*##{e&X)eh#d^t?`}SUaR1=sXX$@P=l4wB50T7d)&) z09%-SLxf4-K8D_sR5BZwd#x);C4y(qI>!t#qnJ^;xtL+erVkxRZv;)DSc5fzJV%~W z`VHZ1`LvZoZavJ+8ibYVGCFXRY`Mw#wNx2bpz z>l_o6(=*a}+McYcZn$DrRnX_c3G|CW#5~4JqnzM>a zm_|IBV64%ga#DLQ_7T$F|CzP?!V2NEnRf?H+>3n~ESjXEeBLOHUfRGUE@%FQunK7) zf0j({aK77L;!xSiT~5d71TrN4~gW zsmXsJoYF;`hu;yHsNkc(2l3B$#?m!QN(&9r4Vb+WN&r_v;m`~y$`&JzzaE7zP8DYv z$s9Q|75qF7vz@kjvZQDME8y4?`kex|Z7ZE}){H$@8gMJ5(bY2WkMaF;9crw;*x-{% zLP}iaWU&Elj*xz>97Ik=$^6HSP88$w-eSM~*D(ZVQ*d0<+-X@|6alqGY3U7}ab-D( zpN8E}ZYm5meD&d_nRTJI@Je>EzE7Y7kkppd!BjeZ}a z@%bm1)$+UO2LDv_cN3fB}Qf?o&0aW94;=+fhGfO-Fb|WzX~78 zs2^hD?zIw(~9b`V7;rALfJs<-nU%#UsqEQ+f(|4UEVbJ2Zgt_1+Nm8=ZE;6EUr| z_U{*QFgP5H>KTehIhdw%3=cRHEqE5CXl8#7cVIK62r9mo5!{TG)vE^RFH=k=9htnk z(jwqGD$9lurDG%VIT5ogBRpvE+=0gPzGLbVXEjxOD>%q1>}|lUxa&r;RS~ns!l>xC zq+cG~wO*6>(TR#CZeX)#qWbRlZV6i4xjg-+kFM_C=*qL?Op%Hla&kCamz*OoP{nAw zR}CffB`}&QSIA-Jrz2P5#C%Fx@5ggOO685NzANjO_KW##A}kf zs7q#~D{AE~wE|>Wi~6nJp<~3rq!23oJFX;z&FcgNK~qae|Abn3Afr7G8JF0h6SxNv z*q@y6TC&0?2VJAXKM+t3;bxi;JcR_P8?sBJb0ogn*=1N?t+)9-|E2S{b#oVl%3D@@ zJX+!bjrEp=-j6Z~1JsNk*%GT<5c&>!QLk=cTLSR}IRHm6LjfVG;{lSv!=&T!$OL>& z50&Yp4LjsO**NiFRgBEhCY3NfIS_oG4tNcus0oy3PzgK{Xrkn2dTBnyeiv9x4UG`F zP7O6_2hRc00852|tF+p{1RIRNrvkI0cDk{P9_cSlW|dAko)C-^JxYISOtQ^y~q5Ro+P4?Z~&Y)f6+0_2{@p7NbaSCa!sw3Wz z^GdU>drPEp$bO&`>4fkZq4Sbx3-E1-Jl#R?SR$f#F^1?}K$IWz9;QvZX9|c4^j+C? zf6QR(5%OG~xz~aVojbM2Ofa)%l>%+%-uev}2}pLoH?%-S-~qkyfkR1R)=D_b*};)D zZBM655(qNT-}ntHU0F4jaD9M3#^X0FOLgixkp4<)$^b)4;ab)8&IHc*Ejp@F&ro` zhi!YOu>;>tokJs00dVOLuLqT#2nIyfM$X>rnx-c9;|U?pU+VDC|m)fotT#I7ikhq>lf?@haYGSVf@1bKuFBZ!Sru91puqgKlHhm z0sPC>iyGdK&(3&|ya2Y3j&cp}>wx1xRg}xUM%ylZQ=(MR$Z4qWbi=-J1Ky^DLa+O= z8ghnq0FJ2?W|IlngLJR6kTTplAQ;c1w^&jHTt216vUf3sPj4__mQhEA#58iOf=v+_ z=jqGJ^t2jr#LkFWoL?|TkByjY>E|BntdDWn`M|a}C0twWtuU?j*5X@B0&Xn{vc`(} zM8&2`|4p8eBX6F4bhUIN*PpiL()UP>uI_F=dHdn>il#qizoebuXTqB}Au~d3bwwqx zV%|_KrrEU++uJs)ALWg+K`JhZFpcn0IBatlM>dAEyUE+EyZudCY*vqA>=D;oo>p9*^dMcTYxdw=k5wU*v_KkFUp#5mKHb z>@^IZd2T?S!omaCJy={w;53YZ*ojyX6GLH4FFL1eKr%7JigfA$NRXpi zJ!-I0C{l<(AS7<}>a|21R|1LWLMxQE2YU$SfH3c;PR}kYJB+?aJ=k$a(FB!7XP_yv zwlYH8m<--F#0(_xF{Eh$(FZ!gWU5zbap3x>!%_J*xHufRbg53-7K>;bjN`~|K(jeB z`}fiL|I#etjo(o?*NuWjbH;f0>y&FU{uIH0UPgw3-ta^;VW!ONx}~020b>{bluX4Y zwQ`)&fc`{ANuuoU$xjkcEMZiW?Ij*_&T>x1eF`snrp)yD=L&)O&dMd7jPb1V>&~r1 zzAHYD3>8(0UaYXhwlb;gN4Ys9rl@Y4t}0ELV=i7G%bGEP>yR$P_ePb9bZbuIfpBs#=)alBT?M2z9WSRtmEv(+5JXN z1C!U0_&%H!Yli;9zt&eAu5Bgq+b8EAk^NJg`f8AsuS2qnXFocRBv)k6J6V=_;wI`+ z)Z82AsA64;iv)fGUsuP_h~*U2ROkxh99=?;H6SZT3}r46y@A$x5igGSS%xJX>Msgo zh(C#uZ6g_(fM53lMu}Ygms@|w&p+yaXg76tzx5n`uWw1}`^%iD(;b3UF!`+uE|fp` zTKm1@v$a?E-TCaNUy%#XjQ{oW-S6l9mWY+rrY|>MzI*4h7k`c~u=FEIQS4%Yhb_;b z>zra!dy*AHXr3*Hccv;C3QWo!K*WP)w>t^#l_)>I3>k2Lsgo=+Ai}&MZ z7*6meXrN8kQ?^&pMAVqT%db_hrCI|(!gT8bd<690Z_1zs&R`#e7(>@$^iS;xHO4ia z%*w}8i!L8OvibyS&`&*%tQt6m611A|W<2UxB)-UtX2|^JCa-j-_|M3Lez#NBUZ*+w zwQTd+IXf;cpTF8@a?V5EoC9XYF=O^8OUmYG%NPQv2{w11xvm%x#)x=>fYh8nK%-W2 z513J7sFjsACdQT5ODow-lYu3)yn)rYP{>vrIW?{?JuxI|R{yQ^=UK%jy~D9orA(Bo zDbz32PhHFm=^e(+98tEWaB|_~)KL1^Hd@;RZ>G5^?cBW4<&;LlE~ZIu8MhSbZvNi< z#p8Y8(J*!JV(r=bh8U4(v#_+m_luCf*<6ERLkgT3;Lh-Hux*TXu#I+ZiUD;6?c4-S zq3?3~NvaaB>AReAg2Wg8DPzV`HhuPceG*;`YlLu&$Un4(UB;deSbJuD8LqI|rLcb> zju6^1w)6zC)oYX-@JQTVX2y0I*BqD|wE8Z-iR9;$o9=E>YA?9~A3oxQ*L{1U=UO=5 zHE%)j`VNMD9jO`8DOlj6jRGfHte^9ZHOWN)J|jCJzD^BTWdkY?G%IZ7N%63zEXTxd;Ch35To5+cAVj=Q;j%97?qY9dkPOP_l-kEDzQVhFj z3`Q}0Q5m#%)`@1H=5#hSIyp!SR|?OmGLXC?eN~Xbi>5%C_jB{dQ0-$1gLegs_e|R3 zIK<6q3LP}TT_SFy^mAwm5)ZT{A4M=J=c=^YLAZaLDb21g#yALdKYv(C=9s+TF^Pla z;0L+`oGZ~R!IW*F_{ZxHMhte+Ed1qVH)n<7Aht*bht z1gj+3Ixg?wE$2`%>`%aKk*YErAvd9;h&jNBr2c$AgX$x$eA)VlS>t{g=Qo48G&o?d zSW!f>-~%N{gg=M7=Qb`2hDX}BaJj#GaK`hs&kuh;hi<*;%ViH%)pp;#ll0|A_1zCP zR)&2)rE*F0f2dEst8u{f_uAhal^LzsJv_={R2@@0{Vc~*LNL8f&MlxDN4F>p0%RUJ z+gRCd-tT$`tZW{nuOU2bXmYJoHnR@1G>v|Z$~Ocw)kx19(=du%%5tyJ620bU@Lp3; zuHhLXJ@QGI8fG@=rRr^}J}aP*h@p8v6QY-JHiYyMyw}mz(J(>dbNDfV%v5$m87OPW z1lDL+%BPi{!=_`%uC87eSex8Oftt1sW^)Sas-Rv>>3zh$bn@2B`>Ah8O?oo+!1;h< zF8#28;;Zwwdg8gm*yfM@1w2(`z^H-6o&awd)!~c=y|kXx32qdHvFu6C6B!B_sJ6i2 zG!+F6#v!3G>M&#qwxp2434^?Yk8nV``S(0}DHLpmj>hD5m}IG~)K>KI+kgx-K! zIKJR04e9Fccm}5Y$ztcj$6BaN)9MiSr~D(Xlb_Dij>5JoqSVrF3LXH^5~fyfgDl5| zz72DlQSsR!!FoV%<;ORN?y&$iCr(>CBeq`SQ@2I_uk)9*@K?-G?N#-!a=*JvSsmNx zo*kf(Ifw4al{Is(Paq9c>SEvI^ZV+-* zr#Xtrc5(Mnao(EfBRy}wuEMr1P#SU{q$#w-1{%`P*t?Hnu{X-O?|OX0aHaFPkMy;B z!;uV#Nx~xoqLh%D1mWt&ht;sz8R?wk-U>z)Hu*~vZg;0XZmm|5x-arz(w2+cN<8pN z&y!9Z>d&Y+lY{CX0F6y*So*bYqw)REq$13NuvLcwBsqV7^`4`ff496}KaYVIlMlR@ zMuh~NfV8%6;Kj@x@7$a=moLA8T$lK<4?vQmd#LEz(6d3+k2l&kBUyg86@+el2t0sz zKf%Tck~bu6&Mg_bG@_6m0u&)l+n@H3n72PjI)v2uaq|Vd8oIoYEKDHY-@u8m_nyAe zgc>w}pV=pu?7fQ|SzERbg=cj50q>oUWp`yvZA!ar87O~G_kpCV-VLF@6m>-;>Ke#> zziT?GU3y~WpVwD^UPil1He|`&H@$RjlryN&0gMi=W6}>!BFD9{P9hQ4!Rt;>=w+Yc zBbQ4&hMO}fGc1RuPoF{iVK;xn21C)ToXQB74qpx-D zK6+Jnc>inT!`4)V-Iq=)6tixNQ*Owh23NEH;QVpIf- zfuWj&Ri#ywTIpCx!XmP$sKnrkpdd8Xu-Mf~SVce-)G9OosarvG+EJmM5r>-ld~s@L z?w#MgS1<`DCnqQQ&bPep`z-c*@T-_V)7UG9}8;k81SOx;c_Br;XYf`|rJg80hnFk-W`sG?29ZS%~n0$UBIc4!)ta z&ldtilmnZ4xKT$G!<=ZvGHmspt(*%96r%q-hy;ag5;Tdx)16L$U@EfZhUb*FPACM& zbmDZjdKdi&Sne@T2LNsl6WNrN`vJp7LWq};VNf3eh)nR=>;9Oqz+aCyebM@24Q+T4 z`$1qu&-;tm!{fU)%-i+O*scu@t;atu^!wtD^CiWdE0A%L{FX{9^#k!h>zTCgS;3L1rcMo>uqkSJudACl-0N7Gw0}`9`GhD6wOVjPN3}opxA*k-5&kHVqIB6d+bGYa#6Y^sCm`ctUnIvLMYAg&S3Fi*p`QY{gGI34ekjH+?lm=(pBnzefG4k@M-#O_ zZLDS~068_DH9R5$xC>DE4Sd^P8jeK}CO~ayV{k&)5t3LsvZ^DRj6hlCz~S6TXG{BnUTO>Y*g9{-b2I{;;XOi6q#0bqu-@3Z;q&3QK4=Z*e0K0mhW&}dQIGpf^RsFP+)IT&z~8p7e|Ug%P&9D(^->+Aei`G)Jd9k0(>s4 z976@xQQa`Knuz~n&!0W+W=xB|EU7R5e1+w~M$q%MZ`khI%i{H5!j~Vpq`fSGmLzUK zgcq;(9hZ^~<)5=J4f#&&`m=ZLV-V><=|7f?{AgK0duoiAW+PYZB)prhJtM2Uqc(|* zv2+v#N-WhT8-^7_()othVj^gT_BAY!vs~bx@^=0;OzN5p*EU@bC|+GZc)6fqU~i*EsPrOfEjFwB*wz@adNUNDgPnQ%6S3`kG^EXGZ2`GTKezZ8dhFn%MXmAF~!WWct3WB#hX zUSP(&P{@on^WYUSHoYjEWgh=pZXPPm;aHeh)2FsBkyx|CJlO_o(dl*97c4F@c&^;A zxTnL`I^2Z(0*axMc(Q0z*#^F+sB&>%sHe!Vxa738u0E8`y(7`E;=A;apJi|@ab+{rN=UcB}m}*o!u8PN@;~7`JHH&72 znkbK)C4Q%ic@u?h=7lusVRCDJ>LFDc=BqgE!&tvyGz_6U23H^YX&g#m=b=(bPW#*0 zhK2gmY0b8wp$30DSAHloP5w1LqkTWV|6Bhh3by4)ayT2tno`s6zzIHg!$`&*0Lz>N z&f@{g0KI}I`x5}OE?t9NJhmapvIUs3Ye$2Ta%q~s4Dd2|wg?5U@sIC7HqM z%$42_+u8X`T<43Ox(p6u+@bSPV1ywc8eMvJQgK(toZ9b~M`o}d&#KRtzld)b8Jfpo zL33i__ND@(WQKTsaCI8`K*cP5W2utycGTnO}eI*ayG0@8o$Ca@B#$YsN_TP$|+qr>C1S@@!9s33%m|*$LDHBznp# z?JLVGaUmdyvy0?zm@1W6mF_t!Z))>Ycq|S{H}_>^5IN1FTbHyM+Vrk2=qapEN=i{y z6RQ_B)6mmZ3=h&&fB&SY6e66tDJeC}CGFjJdDDrD-yT%1%ML&0@_(ft0thzYQ=iQkpYY;_P$QofxRXrj%yV5k@z!ybvodz)lkl z!`lHwr7y_m+oA%~>B9i^&`TPlCrc_K0&4)?*7&#FzEI*Y331sXys!^H#X<+`fXKhHc8H0jcL8s#l-0>E7$xpVS9L zes((tVrc&q|M&K-SJ6#S?X_CZ9i1-yK`FtyI@T9K@5AQ3(jIY&F!NB~^pt@6Kem2X zeX)Y=YU?i(wMU?diy8BTJ43WC(nmASgmo$&!P55gn5OC}$xuVFN+H@L1JEejnxn>= zxf8ZRxHyS?$wP8gW@D{pf<<=wKA7&dx3~1b+a?4OC_y1ZtDs8ak|o|DcEu$umvj#|A;Z0bAyK3f7s=$na#bBktwKIh(HSiaggj!qN9&25a zVy2KRE~c3mdOwj?3euQ7JCA7B;S_42=|3$p4Cp32>lJhw;FMu}UiP_V9-+Jn3XbbY0oasRevYjo`(oS| zc&@r!De$zM4CV8j@)XH5D&WeP`m(N-50WW!2#MlrC)_cQ0)WjAOuaS%;BrLjoAuRG z7RKkJZ~Jc%mQD7@#sIfYO~0qM#F{F7;8;y+>BLRooA18o<}a6!*POf@e0SKk-Hv8L+zw#y(CbK zG6fcIsG+>)o)t8fGJjRC3G5N32k8Kwz5-KHq-ubs?!>2_2B0F`OUWQDd1k~?D>2NN zVY+=n>>M^2Lear1*Hy+4>CFu!mij%3_3zmY#iCt{Y!VA~Dt;IzXt$&+yYkHXbbr_K z?owv%BA1dI>F#0AFa412+h+DzI2ux_*hDjjfsAVGE7#+D>FHiJ33+Ig9JE)m@rkf; z9#+f@m3cfSkVzdvp$Vc?#bKP3%*(0RdHKVb7i0$hNBQR#y?*Kk ztI)jUqJk?4ujfWex^J_Mj4_<&Sb5`Jw+Bb3L`uRq){&|(wRLLK>7NtGm+QI~M$tyo z=#AR=5#l+mO%UsJ1NB@(`5%b-)+T?fqy$dn zk{}DMG!beDO)069SY)+ec|;b=14rk-&N42oD{lI)6Ojs6F%#>o=?e`#SKz!N4$)@s z|8O{|=n90uv1E4%A*x1vpj*BN&T~J1+Q>;CNlf#V=?l8{4-=pfWajhpx~ykIU?mjg z2L+4M@+(8Y>ElQe!L;jAq1CNNcv}2MX@xD~1Zn&dRF2n04CDUih_1T zzOBpJWmWogoipE(n}MSVZ{X!su;TWYSaM7voxB{EJ0U!haZ`LNsyzxiBh@xFzeeJX zji)LJM3i%ra1tD=BT`|1C~z|BdmDm17|3tLyMabM_@t}Jm^{N-yCk)36r zLK^M=q*4ZxGHO1{#Dc@NurY6&5hN72*>SDi9L(~W3?}`)3^r!@?ee(}mNskbZNePd z9=P#sWOl548%eR*xgth;e%ozW1|BVnj5JBLEJ{h8k-MZ!Q#q6yzA?T~I^ZOha|0vo zb5kw7N4&7xH(fVf3vRY`Mdqe@!D7%P5}tyA#FjcLvX+~)6l*E*8A(adKvPVoz|ti{ zR%dziU`0Y1vFt7_bvkfoqSUgyj>GJMD_8_v0&^gO(BcEzZ|89*XWYle26*0d#!Kz% zICed^is2AOoU>sk<}}s-i+i8b)lSA`n$aP=6sFk4Chu}A8lS!@UV~_6vTIC8$MX%LzY>*71#Br=&tI9KBo(r$mHKe1C*MAg}1spP>7qgiQ+A zkYj=o1CS`I0PZlNOn^@W(hU9!0I1N>4{6yoA_)Q=7L`SX=n?RW=(VY&V0M z5IA1M#A)T5T`bxF3dSMEbS)5ElE7=?U&zDqe8Z+r=~^#|z3BGQ`kE<`w@nnL6h8Io zF~gsrgxXVH|MA(l(zS0_TgYZ{=~VQ!fr1iC^q_Qf%XYGYW?n0FMjyMYyb$Om8n|$C zw>vu^VBt3Zo(MlnyQ20KqqBVgG6)4l$Sc7Dt`2QFP+G^mJ5=g*^x%;RY5J`Kqh~wu zUT_^&Jeq}JR%$^4AU;RDUl-EUa2P<$rVDXDcqtud1mSmTV&+DEcYQVzmITmKv zx0Fck50*(0fN0atiwto^T_x#oe#bf-TafRB0imu(_Bb_b2f3M!Oeb=o5R8Kot#ok@ zY2V>M4&y)8S6H@@RB1a2Bh3lNIAIGB#X#kqXh>rv{feQ~de1+8(;n+AZk0WRq>1%{ z7U~?tPXXyJBs6C)s3rCa4dAv(V+Ju1N}L7ETU8J252S$$?&o(JE;-lH9P1CW$Mo#(43vWWsqxBCFB3Z$^#Y z>6Qgw(!5uv++e`b-ynY~W6+|$RQf+IjD7Lq+~(m;CefajOG14ayY6+^Mq7(imBFu{ z-+PL!BE{(dT5g|mz2E6usHGzxRpUQ@#J?sfD)met;y$z4p}0XF0XL%wALL(7Ia`yE zAWneoJtCW$q@@usT8Kr9l$UDm!o80J?>@vtzJu-Fa8rl8`ru7(f$yEAA719epxqHD zg0HjVW${qrfB>ZSi#|-8x3phOo~PW1wxrT=!&KaeUSZ~;P)JS(TTE>;C#VAZO11S0 zR>*nT`S@WJ=f(Et$AA>MyqS}o0LTacBWh001Rh?N#A7zQxaBCQnLi{?2E8@~e&hH4 zn~zn|>MHUI7R%zh3C!EgI=rt{Qa*U`q(mY~Kn?@x3l#Y` z9YL2K#B}Z0l$v48gpi_sBszVy|CYWX%;Y|#(f^fL|}(##+VHmdAHDz2wjb@Q~0HPtGYH46?kSFVvX zVr4!DBxA3h<4#h7E{1$yKCr(}h~qUh2jSte#Ar*0QA7kX=po1oR`?jb;sCBA?qUR$ zkUs`w@*77pK<`3y1j|OyZKqcT18R&R4nM%>0Tl<13iJR$JC)!b5VyU>RDkwiK{Hzu z3L!#(o;6sS0H6#3LLwIjJ2T+1?TwOlF>TvnI@2}QOa?j}3OL-eh(t;IKA*qM1o+AL zxA75IMY>~$wHD4LaK8w7?9gjVI3w;YVN0k`rfZAq7U)!Ss%KghQgH1L?tAe%SMq zM=-mJDS-LXh4Q@tZ5DUz37=#ch#Vb-e3~@0Tp>iEA3oT6D?i}npuVB!P&gDWh54ho zgis;~dl67mY?nTen%*eS(@nh+(q?PMo*`6RI4cuS{&|yPr4Z0B)OqtUdbNp z!Ln02OTk?tqe^GVgt>h)gcO~X5V;`1R^3$nYK2jsVI*(~%dK^bWYur^D^oR1Ow*}p z(gz9;N_j3O4ihV2TJy9_Sl7wS^Ke9(#9Sic2`Fu@0=mw`ETnC!KB^5b(gaDUc^)`z z^2Rd0Re7F%>J=76sAw}WTR|04v<#_$g5zZ8F{FG`%@PSUw}8U^_!Dwad!-L;B(}1v zU_Ml{@=L_>rn>uGuTD%zuNHC^*F7`Dp6L)pER4Mn+w+Ofx1@|;Hz@bIn{?|ZBlT=d z?)5{5A@<4Mr`f3TgLt^Q?pf@auK#FXHk0e}CC5ZlDM)=Vlz*)&(Zez8`=rW-L!l!g zJIAeaGqR|C3}�Q|YFSfeVf->2k1hT0T>*2uc(r(94;W0E%@Q_l zpY1qR=yRY6Lu|1o$oGUMXhE(CR~Td#eR*_h=-%DMyl9>*sYQFn&TX@$kMd~>Zp)h%*#?FrFXv5}8u1exRarP{%_(&1S8rzWarw!7! zA39jY`JE*$KMKF>ROYgJT60Y-I!oe(tvw=1r9dFp4=elgRgV~Ve7acslGvJ>0H*N4 z?%W+ddr1Eso8QPH{ztohYhU&y;myRHfIA|o?&>}Vmn+kJe)YNa>O}cIOnkV~eZ`%79ZmZb2Qo}!Ht+H9O_I7-)*lZDd+y8vw-IKc} z#%gVN+%~(1Q?~Jj|D%0>@4f$DkAy7I#?|-b_jwI{jZ~4XR3~RjO@!se9L6;-LFbU= z>+Yd=7Q42)C!>Zrcdo?2#KCcB=)|&9?=J8De%fDWG-{}gwo(fz6LtJzu9CfiNs+hv zaA;iA#VM-q$+{*+{{>bw9uT)k>gnw1V<&c42vTVyJw02#i~QA>?lAvF5+{mwwxY11 zSaVeJrFBPVpD0n^nXl-~ukq$_zhuYX4@-tP7SLy*-%&Xx5>(nu5|mQWJAoG8(X^Y5 z^@VnyLCZP3EkU<_YhPt(1Q9{gF)txYiOM}35)Vnu8aA$oLe`$4x0G#M41Ja9Sn;e> zxOHxZfQEiED=Hz&!X!}{QR_V)h1*}Dn`JF%z`6{kl7_LtnpXK$c&)%vvN0j~u0X=t zvQn||6C<1%i07+1fCQi}19_5pH|F)O#IlYkbgfU=KV%SFr2?fwOoS zqhaIGuWO+jJJ~+<%lf5e;gshO;P~;h?9+gV<%XZ$gV615m^J*ziur$QR3T;2|2*GB z8MRvm*pZO|2DTJc)=s^d+5V#~<0kbc7}HijN#>*(0X)p+0cn#DFr<7 zv|YNb=LZOwx?JhEhdug+xuyVdgiisW4+lt_;s|Up`$;>6PKIyLi}g&(6D2Ke6_*0C zG9HAg@$N#?D7`^LVbaYZPG|~>b&uX&F;1iFjryWL1pM7}E+tL!^>6JfoW1fdhjwK3 zy8uyU8ub@voyRChbW%$8*FWxPmR<;n(hQW@}Up)|sxk zg(ackTAmL46o!mxZ7rMOG1cgz#E0LLon9T);I~gTZ3^S{}|VTO_d=R)XER#2_#S1#`)GA_%4dZ7YD- zWYO5mRC*9R$%b8o<+*a}LGU*5H&K-o&+h3U#L!*ZcOg^>E-4wGf1FRl_`F4O^0)S# z`E;!y4#%@!xmDE$3h1we!BwKrL+g1Rw*+g0i?3}xlz6G*vw(-}&{JfM!ND41k=VVb zqa5!c56_mqi;{hqEK~N8#+o5rq-ei6ibVouU2FS2lWkT8AswrXD81N(va5}z2WLgm z-ln%`A69%^Qt5Cj?FPFloe?_k+`*D_N2xitR@|_9>I=SLx$T<$zi`*^c797a^&kli zdU(`Z+U2;9+tJ0JpTCdjTE<#yA15)JwLPD#%H`H#M)4XvIi9Ry*RK_Z@^n|{@0{7{ zHvY~xef8$}`+AhS1C2d(?)=PX-PL#A&+Dlgf5)@v_6m!d_4ObhrHrhvPZ{n?{CQJ; z*ZRMwJWH)#k2XEhWd3pR+(#dO9PIYUe68=;_}sbUfZE>AoohBe*J!L2CPAay=h^3# z<44Wvr7{$U%*_RK4lSyU&2#VREyk9Ey4<)bew@#uYddqTzx=H3GiyoW<~%OGMeuob zGqsTWD)4g5?9vV#LXS{Xfx1XKl1_D*8`68>9|zL z5s8K=c)nx${qSM)xU~F2z4%f`oDa=%>p{J$ZQKT~=K+5uR_8e3G7p>=IV+xaH7pGU zW;l>MM7td48YaoLhFL2J$IwR2K`H>G;I}s+_{fAEoG5q51|>S2P>{oFS4|05NVi$> z-IC4DX#yTs+E5Bj*UVcpYw8-sHlEB{<|^=D9x&wrZ+Wof_S;VU+X&4!DM>adnPO6; zk7?zI{U?sJ%B0@o(`bWwiJZDbg7W# z?p*o9F2P&|#|L^Y)Z)<2&YG19-WN!t!#_|nSXOH3vZMc*@EJPD3Z?Ds%jD+! zYY%wAjgj=tf5DV#^6%{+FO%0GDu%-M{9d z^O_5UcaM~E3oaJKTAbOC$$&vCzetG`V`EPpuT=eJ3hnBs}kq+OD~e!}gxrr&&-I`a6dsq1WpbGJ2ZYmzr5%@6e3E^m^zwp4ueMMXoy zG3Sb}zUl}+9v*hAqvG=i$HFT zvndu}4#IyJqb5~quB1so(^D{{IekzJChZIUf(iU3KYoj!G?|26jJX1f1Pi@}vM7B( zZ38O_{GX6H%4`xA`!Bmbb+P}>QYZJw!t;*R;X4QH%LVW9OCKgo zf7T?KxiEt8ds$E_SdSsB!%Sf2Lzn|)t|NqIAhCUI)Iu`eqBuqOF}p!!NqE zIJi9jvY4YbjRh)C*R11kur)!#5?v1KyFBk&bn3pA3#H6yD^26@I&^Cm%j!bxlUoh9 zbWOdil}G}Sq>%*@OdQV9+n|@o2d``h0M~>AVhAd6@Gz|V`RTeF-~Al$sl|6kJM+O` z%IU%G6A9N1V;5^0pcQ+0&j1Q@lBZ&DR``-nJ|+dcjY*9l?Arf5ZAaR4qa0dSCZKKC z$65h~2+=+>;;){~moq#lAN|fhGGOo&&r^dHg5-vFwGR=oLg^l=Q3M(XOkB3q0LDt#c(cBz}sg48slM8u_<)vrVZ!zJ;I}4zkfnX z?!%Gn*TmnkKV$?o_hK+v@$Io7V8X4+E+XGEdkul8lJAs7`tMKkolIvU{(YO@pmD26 z1wR{Zy=O@yj1fsR0*NI@l7lX52XrJ+sxzpkj5U=!D!Pt$MSJ%>0^I$TUqAn^eUD)2 z3RBAKpwbp0<8^!q+Qz;g2`bVmCymE~R2?>!7a@Z+TcgUB%k?Lr%m%OFGh>4j8>2nV zzDAmP;|dX?Drj9IusXJwcs&qcRmvHNVhWuMD-Y@|*VOOaYEd3jpA+}}?S#fNac30? zpRSF2S|0O9q_FzofY5IJ{LAU!ks@7m4JPHS(o_JxTD>f zGxr=X1eODC&#)wX<2R_!DK7*N1|GS{7lMKetnhJ&!2wOdq8-1<@$kXCg|IdqUxYDl zj{r27m?$5+3Z9f4s6hQ_Op%wz>T(!SlkDi-k%J@DK5ihz_?(9pcN z8jyoD@EzXB1O^h7Q5_KVND(qn55R48OQhTls-G2#P-(d;b~o zDhOJKzy=AXD-s zg^QO#bZ{62UQA?Q3&U&_`gSe9(gp)g_$|DEx)<-97ZV-Kr%%7bAu@A)5C!`$)m zT=D%0za#Cxj=x=6{YUftAAj&o9S`gfFB~_$u&Xb(5rFp-%!SLiPi|~~JoSx^R7>H30W-QMe)id z`q>)K8Sx8X|LxFcXWdF=Eu(XMGPnBdbS&u;i4xz#IRsKVTV5w@Aa!qK99HJx1~?hg zzIXk%nG4XqZ{wr=c!pM7xlUQ!OMNYSFf!ehtPv#8iXdmEHYp;wAl(kJONR3Mv>3|9 z&^cFB!}}Z)ATWQD*(l&JBopG;iMg#z*FH!UMTx^Ms0AGc!CgpUXE7okLZ2EPh?J%oL9ngY=~A zh)|Gc5Hf@v`PTVvm_!-xOD(3Enfg%exdO9uZF?aK;|W2SHfo%x!`v`F6$$9^#My*! z*!N{pmdjk%w$@yx+NADJCq-+6V|6;Y9u8KZK(j&KLclC#QAB1K5?GXI3$~jgI_rK` zhl$BD6SGQg_iWP{%mV^zXv_R;a;|%Uqx4+_qWD2P@NM>msp2p1TnH|=+?k7yWbT7y5ec>tX}Jy~Xu~BkG9y!#DLF(Up(sT3 z(58?se{ZQZ;;fE0%1t=3{bxRID-cs7LX$6^>_Ci;r5AQZAg0Oh;^B_O?FSnck5KG$ z`$z(Os-8t%l&3Sf&jLZ5jbOU9eU#1PBjK*FS7`0uw(pzJC{(v#L##rjWVp`niwiDg zthPV(ckK+yMAMPh=eu0zx2+nSIk<7*k>^>?%vMskSUsj&1o4(~#ina(KfNf{*5&-E zzQz!DZOw}`!2_5OocQDYepiO|uK4zaY36SI_n03(JA%$&FrbPB^cWz3p}l>Q#@74i zsv5$qXsrDByhUqts+zv|qGbC;8TpRmPXnzboqpyYY+N4MJ()`D5=iL|e$*`{4%Fga z+Q{L<@;3bzy-#K_OQZYhY%#Y%dA8X3WkVm=`R7)Ruk$ZuXSvGaqI>$ma~!c$-o-d; z!r>h6%rBAX2lKCa8u>@h=YL@C5C5{a{-J%Yr3WPJ^-OO+Ticu*nx?$**qnGx@zI!Q z$C437QK!DMOD*5>@HL$S6@`*!1a=Y!{|1Z}*72Yv(K&pMw~(u|kf(ZhsON4>Mf^t> z;%yl>lXU{Iq>?TABZDzy95)(9P7zBpV+Hdbytfy^8MV? z=~_Gcr-9Y~wf`DNXDM&+o4UT~!>t#_^?e1miKE6?^?=jy7`fq{Xp zM~gjjemJ-~`3vd7=K;MpK3qHyePdUuU&Oo2umju64bQi$fB5IC+m5#Da(-xxpV!iu zW^7zTk6!w?Jhu-i#nyH+b_fbXgDAbMYtv~$Wlu<9hORfEu%_XMW|g~)b3_?qx*p%%xdNFZTofvTC~ zjadqYf|>Q?M6h^k|2Ol~AY^YnODZy(s<(wWqypoT!hi1po;ZSXj>%s&B{X?`?g5kO zxwVdTSSu+axNGulO?k}PH3u8b7;pP;eZ4PzwfO8*rVX=<;{*Y*siFHcxuJ|4BwTrg9Iq8T- zx6Cga=4XfK#*X#=oZUTox~kEqs<4hcIEUZ51Wz%J0ZdE8t_F9t&62h`;-ZPF!qbUl z{{4CW7{imYdD{Bl+P7p|@SJVFbJFYQti#SrBa-D`aR-)jbwz5M)dqvVIFcX1GOLr> z3EWPmS9uNR#gS*0mJMLf?GbC1w0U7HE^<4Y7kkYvVx_(D1K||-kKNr0iri|7s?Mg@ z;pE6U0c}%tQW6nz-flbsZ-m!+N+O~LVk2x~n%DIV#I(&xzA6-pIJ_kngRYi`=tSj* zrf>TAbgX>)A6Ll!{TbuzqK#vwxK#s zYSQSr88+66fYhxoJ#!b%T`~OZ==*-nB`a%05C8bEfALWJNMiH;T^pENUpjcf_$BM5 z-xt@O@7myzCE)fSTt1$*yWiDr|FX6pnl7wqiHKP6{9FQ*#^>ZOX*-?%S<&eEr(5kT zU`HOYc0mMGyK+dYp^3koH}<7lhoju={{5o+{TA%|pax9duWUi_HH_B9h)Z@X-)H3VHvNMro2k-#DDJr(Eto%%biaqGY)`dCJ!yK7qYBS$f zx;U(Y9tKS5fpAA@t}k09u#Ql%jPdXD`8!y`=q=y89iMyYjt7!$&LrDNRsJQaabBTY zD?QnfDGuY9xi#XbYO}=#{<2HjT%32S=}4G$B+e#`ymz;+nJx@@6cyee*ooF`5_(z? zU0&$~WeXx(af9Qzl1jOO8~UiH#f@D$$u&GlxnYezN7F57dFLrQ zl_Yem*=nrK@@@P6mYT#pT^H9A$@i3E7}3}ke3dCdmVLjA_QH|BeDrx-SL~A#qTkVG zxVYT?)oZUGP~(U_i^An1jWQRQc^?+F?SZN@m&wTWJXqp+z0yUu^w-FL$t;q5uxQyq zz0xiJpBWE*-apXjX_GPhiVO*xel)gob4q%#Sq9W61Qv>83%k1~y9;~Sk&%%S zcY}d^LVOFjim(UyoH?(Plh-GQr-sc^v5WguY}E>S1s#`D>#X9wgDg77oX@-J-b>Fu zu>2sTE)HcLTteCx{9pRJ0-Cbea>BDOqU61opLI6~wfLj>FrTk;`g9$ivZ=l-HXxY=r{n|%U}x|TAl{g$bS5lm*zVwib%swdx1#W? z;$lsMa^oSX&P-ZaV`e5^IkHNWe80e=BwJr#u`2y1&vWt$&mrl{xD8d6VOi_eTy#Qu z*Qnap#u$ecC_spO3y2^>ArX!MMD*Q%pl?pnKEQa4Vu?$fwiak+)I!sc3$=I1XxS{t zw-9THD4tQd#AfkPIEm7nwqAxj7LhJYNMcp$j^DiU4jG@p|23Hld`|ao7Z zU=2}^7j!_^9a$>ENkzbo=(`W8>Hh5&1doG+3AE&keI#f17IltKmFx4jBMqa{RB5V? zKLpN)kXnp@9W652;(6GvLV^n!FztqMyVX7n5c@+fm?RLRhblr^fy^vI(@7X(p?pn* zRz4){yLK&;GR`|-(G5WPo$l#dUQG~exy{QX!}${z`U_&cj@g|NJNr)L^j%u-R|ygh z^poI${B&C_H9CGTzE3bR9N^phOn~4P#oeOTPNhs54~hbq3WOFcZPta2fnGL~)<`|) zI=p9sz}8K8o-EK4JVUgKWCnoj%+Tl)*^}bR0M^T{uDhGlaL1@q%?4ry?xu- z1zCI?nGBbUdv37Iri+!Y#mW&??r{37JS&60)6`~K8}&Vm#A-E%74?@DjG6=qUY~zf zbrYC8{EVSaqtpH2wZwa@bxQ?c_P`I?s{tgGHudz1n!o7jx+Vhu3dA%Jx88ir)Ea+@ zGrz>=G#uHa8MnFs4Ap_$nzR(SVR684(uQF|vy!H3w-{OvntDc3d)OKmU5SaBecYY5 z?cegx;EkOr9cvZcRRS@74H)*J!{2m)G`qZbIBz3x=zyc&15{jkKNA#W7x9$P7DVwJ zP1^%lmmIg?cn8@D60(MX>Wk_m1U6}nnT3QtPNxfK8qaxZz$GJ;P0mjI*r0)GCxTBV zH%)*%pT6J&)Uz|+Ip}VCX-Uv+_t+Spi*EQZ={ka{P69|;c>&nyG5ye!_k<~r(Io;Sm8W*2?BoJe#fKfSmz%-fWn> z=Ij><3jx5TK@atd>@NeWB?AIpknn13Z2^S7X~Qt)Ezpg@7zGcaM&d7qV3#9UDFG1u zBOb8%M7J*wUdF`kS6BxXPWZvbt~!3+2jhpC*rCr8darEW1_7YdWJKZuO-Lg~$^@Wj z6ByOYIRx2Cczdljj6(W>cmequ$I}fpo&RnA{e3DPyS%|S*!>50-?>ZOs(t5R+7j5D z;@e0|y3duA6!*o&bxtdZi`LF;qlqr@bT-2ZTMC7!y;Bu{m_1P_I1~|T`Jrq&T?gQX zfTFoi1@MfbOO#PP2v%U040lE>K&zv)@NRcW2`0Oq?xOC=P6ms18ETedPx-k{c}>ZW z=?@D$_sOWUP%btT(19r6&D74GY@>zfti`ru>PC?+dAG!UjLgWFU>;kD<4xYZTT)n} zdv^rWW%6!u#A`9`Q<5W8n70j>@)}6{J_y)rfPVY${;*}IW;iyKskx=4g`;I(x zcDT5bc|5XnScM64%+GDS5;zkQ&+KD0yHT+?TNo+JsWZ9GswaE)ItSv9Dt<+l{#nm;Vm5OUHM%MMpz9Dpn1CA&ssBJDA&uav?-@Xfc*AF~RNIxV z6rEqD^B2#5EqTL%JzkC`3oy1b^b}4 zFdK+#(A8so&iXchGvRaAt)a1@1xs#c5Wn&80=a3_-@zMWYycet6Ny29^B4d_qm&@; z@sopld*e@eX*N#s?hu9puv+ovfZ=zwOp-xL9Ecx(i4)it2EK7JD(W&A2t*&8H1bRg z8E#5^&icVRGr+B_q7DbT1E5=3;5#HK4qF_%1Q5#_iR{kz3G$n$2@@((3a$MA-hY4Z z$^ZUHFeT)(zMY2o+3cNTdT8B4mVRm9&WHM6qHh1G0jm!hfYqxKwk!|Ke0FHznr4W{ zuZ4Pmw{HePh_xNhXQ&x&Rma_`_~-5cFt&j#%NXDayeX<%jKK(N4<+u}d$D^cn;u$w z$LZ(`XtkTbv@lEp{>^&*hR;F3&>ADR?r%`&hb0NoL&*F(OZ?aCp5rc~e+hnL#&Yb7 z)R4>*zAnXH@k4|dN)%sE!0HPMA`FER(!RaE4=v$8nEm{>@fjS|_8vlF0EL0tmDQc0 z?8?f5tP6@Aow?U9UvKKU4*f%vhx&v17veFL5aH1@oMzMthA?8O2O;AT?B#hc6M9)EWBc&D^KPOq7@<5oXEdzeQNF?>1ZA^ZzJk0vf_8mWbIO6$o z(@p}st6dQ@C2h<8mTQ5leptXyu$*vzIh4HrS#zJJNo0>AOHB1d;Fh&D0@~^*tDm$K41gZ*q`QH{7`iJlbA&rKfTyBbnmj@r=^?&Uu~%WIA1tx-^0b_@ecnit?~aY3TOLLEcnj?Xot;2jbSfc* z#HJJgiplJOYki@eWGF`8Im(eg9Nx}+jS?xXnCijSCwAsvD?i*>!V*bxU=(6w?f-mg zEA@a`1x2ZFus>o-wO0g0YI!_giuJ>(qV%cE%tIOI?GG4g?3SiaE>=!S#8Sn;U$^63Nz=!54QM#z@=<|QKUv#mRnvS znSjjPUi(~)&=>AHM?5=PulZm5`DVt{7bQV6Gn0DNTjB(=OTv2jm6@`VgYTb7`r%k; znbM@GdU}&+TSK3$)VuTt-2PBHquAt3;kRAFt_8J)kN#Ze)}B&-P%XORa;xx4$+WNP zCjRDgu3E&Ll6o|?e#&1AIh2*=!kMzorc6`ro>^|Mm;e9v-%SPF@&YT-N3(i!)!LMo zx!?Abv=;5Qs@i?Lu+MqvyWCIG>xTV5`e5?-xkPdg$%GCaOR;_`MAM+2SZj zUnQ&)oL4cZy%pIh`@1@oy@Hh9=`mgJsWNJ=s^uxg)FVnY0yFgiv-fC&TpSY|nHwb{GzTAnV@jjYyB}vLS%9uad-m#9<_Z!m^M-X6t`?0xORgPrYX}RA^n4k; zF7KU)rJQ*KYa@V*+ zgjTgC$t7lq+OIPlQz;n{XJKQAw6vqVE3rX0Hv!M}9$DNcYQ8`_0+}q|z7DUb7+4c|YuFy> zfm_3?KCvH&9RP&j2W`u6GT-Y!!IJXBx|mjWz5q(-_lCC!7WaI=B~v!?c;c^V=Q;k8 zq4hogc>Hbl`l7!}2A)Nfo@^EeZi-3`tP!7q1cN_OWq@rnBA}F{-Qs}Pn3zttfZ?w; znQYJPX}_cmY0qr|4nRBFpW8PI{yve>0ze>s02SjMvevFZ1WY-28If;%mL6(eziz5tmxt{koqW0M(C`R_s*9$9=U(Hr))B(!ugTYhwpK=VTaeXQ9Cpz zt9ApsIg13@5AG zad+gskTaWy)D+)+tkJiTETfU0LSo6d?C&cq(Uxcn?`rz?6?PY1BR74e#Xa5o_F~&L z1P0{EyO-{Emm>1mTY_DRN-i9@dk&9v;TfW|h@On(cf-Ra)%+^KHmu;w%8ohPG}s)o zv57Y>!)+QGNV>}=yP>SY*%2Yk-D?O9uyPmJ*f(&@-lW*VBS?}>;?7SpX7?Ir{pb8k zR#D^6szc!7(Ab)yp!4cKvTV(%xPCdkzSbhJIWg}_?*HTmFPT-NDT4U0fAywqwG7q3 zd-nK!#~zYug|I)|3{Qbmqa0;Ahgf8vp_2x!F@xF)J_+zU z*pQNDe==>_YjNgDh&{&RRakqc0V)Mi6{8{~)Rq@cUS9~f)N$-$bh{)zl#w~_4v_W* zJ_r3bZQE17aLv5dp|Z4{~@B|jYj5CD>VQy1Y#vaTIiq@~G*(;}y5iNTCD%8xt>#@J)r zU+0pR774Y!2XDmuh#X&g&yZ*|sJ|Vmd^p;V7_hML>n|C_!mm^v+CIcGO>@hF#$VPB zF%B(RC>)7RF(M>zuG~M)fQrpKZ`+rywI0|z*|w+XO3ALi)-aK^rZr*p)XD)9_Fnx+ zHTz3?Z?}jZ8br~Di#$EsR4QeZzf!tpyMu%Gf@00FB|a)!Ctsgx+ia;_+m(UDD_7dA zLgp-zSUmTI7z9YOKoYG&jT4f^m|!90nEKw{D_}jhT?u+IshyFqt!=x-@tYt=64m3j z8W>w4bcf76k-w*p6D57&X+-_HHxm99GFyAFDUaW*6(D~}Tqi@JlyA%&((G$sO$qN8 zJ1ucYL2gs$fU*;q)5r--!1Hp3gpd|%gi7f4OE!@9{d>?7#@y&F@uz-UUlGSl>_m)R z^5M=>UMoBuqMbdFkTa$`Nt`uxwtb$fjpw6}-DgKm4Ql@-)@@zKsL1e16^H=c0_vy`Aiy#G9$^h=k5L`u zxwq}RcICkafdxysHmm$9`*my%Fef~j`a#H$ornxP9 zD|D<_W{$V+>sV^4{3eyTG}6SxRME8Hp!=+jthGXcpHMK}#DV)&A*$Y#KIU9OLXwZq z&n&)!3CP3f&GpASLPeqUP&cU?&DxeX*T-&ru1#QgbxwuJ?qlvRz|&%sdV5-WN;p1A zH2qic%CjtGafw8t^B$J#Q>}frTI>7bL+c_6mZk{(Ol%z-=8xMA2~f=kqFUaAvU+#- zmZ1WGs@pnqZ5=o(RjoNRj)?%_1Q8@vv#1SJ^()f8zXVNX34xC-`mKGD`^l)ePC!=b za@x1FUJ+~_p4r-~^4Ro-hAspWH0UT<6k4o*4nq+dSv)DvY2-3{Z)4F+pDIRpNu`A>e&i|(A7nemqic8 zm)*Gr$LHSQU9PjQo;dTzhZC3C&+b|6G8pa!yqS5|0=wR(e=3vyVI;WndoibC*EKGr zPwY*?bce!J?JU-#oqJ-L%4%lGQg_B;0A_{Ff^iV9b>0E5CbgrGhkQ962|YA4$ ziNIj%Eh0bJ+?U z6lgnNq-`l4=|3DXw(N0hxa;iBaae#qHat71EV5SF*zxqaX64Tdz0LN7?mcE=My0c( zizo|gJVQIuoy_*@r7>nIz3}NYN_(ZigsKl>PczZ7IeV6c2P;*233D1Xog3qU~o3;g|6=#p$iQ4U<|TZImqwh2GAAWzf@kX68`| z)gy)YG!&6;&2$~RXk`>l!c8a-b+uW^R*)Fb<__n-+5Z5)GZ~6Ww`M}2NlKKEh-}jL zNApx-Xd`jV%s9lFX+49*mB^VsR3@XA#-=Efhxl9#WQPIk*3KI~yHcgQV<<1UMA{er zR;HemKUBP(f3;Y$Tb4ryX1%P^sNGUtP!b49x4tt(&$1`Qt+6Z~f=vcKF9+8F&TzUo zZ2knjd!`8wf=yxLJkO}^>C(^{@@*ZDg$07)?O--Qs)4xe0+9)YSm3JJz&aCnCzKq1 zQ?vDwZ)?~EJ%`zvstKZ03l@a=2u5qHOBi^Yv4$MtQmARN?`Giw) z{a9F0YZJ3%Nuy9}CHygk4@h*T^8QC^$|GafMR$WKFy}b;sf6P=uzPDJ^sQ==r`cDm zpge?m1Pm&VhNI(hN3+CMMD=XC^nUifE2&n~IE;LGJEu6R^D@zSD9Fln9As~PAiHpX z`^@3?tdI-0g-W5P?VByH^kM6=MzrxXa#AE8t$D`wPu$eAc+6nJwqx=~56rdg7< z#@NyQswN?QNVL%wzNT+JB^~RltZF*F(Bpgj#*U>kkCC~w+(DiDlj~}%XFxf{PCFaw0w^@9X*tkF zK|Y#6N(fjsoQK5_aaqvW-ta>9(VG7&?N^%NhBxh7LLMa^?=g+W8Um_2l^ci?^v#s| z`Up|oZTGmi9*vGTnQ3p^&&H7^8n#a@sdi@b^asRg)1lB8G zhX((;t2^1e2BLtZqLksIv2hY% zfF{sT6a)dWRRi=X&2$@r28be>(l_%!AeKEAbi$DR-}fkcp4a#P{WXIfKO4J0_jOw^|8BdQy z)6F11KgZ1g)TST;Ww34t_A!}zS}(XEz=m;7FE~g`I&@QFAQa$A`cpU2a7P6)7q-b- z?%VkKEx7YXQ~MwW#;WAaAr7L`m+?!sv#}i_uDKK(MyDvG-sWW9nQNzJ9Yb!glH#d`;$ zv8YFSkaiG?%Fqu5IVzEOXJ{MNoeGO=BP3u5F`U_Ss;%pQ$0kMFw4cM;%n#$@&%70L zEAOk%gqgi$2|gtWfuUSr4)xPLxu4V4gTqu#>WTe7#v547v#a@X=cC{P_1NViJn z$h_J}b+^R*;H6P2!P=@fT7lzo{*Iq7XE0Px`pbqk6!hoJS^D6BdZa`3 zjpFHUzI*QtUKL|UgiF+KK zL;>CnlNQUpXK5MLS84}`MY%~g%H>nFMMdskP&|#MS93EXCUM)%M>j{1Et%3Ica)AR znlgP^ra!MWvi*5&SYF)6ncvla^})K#9{@`4yMtvX@xXD zy|0>tiN|hTytNPlNC!V4HQ<$Booo5E?K_NjSoBQW-Vc^$zQ~{dgtX6*OPujfTRR=g z)@Nugq|hWHOM&+Jz;3t5OL{A-A9fTfoohp!a|;k@4cUPFJ#615SAsNh{9=idD3*xK z>BVj{xi=Q%eC(GQtd0i~bqgp>Q7{%9n|D}JFbY|^h!q=|^!0@b$v>|?zFp-Sl;`kV zI~EjcM=Ka5dUeq=4P3G$|MTvHi#MTPI4?J3BUbA~X676&k;>c1FqB`1l2$o>9zHM# zu~syHN2O@zv_tm_&6dTBy|#xgW=<%R`LW4dCKp|3Ha$>H7uZ-&tAkC{d1M4Z!6f4g zeQ#3_`mQ6%FY@Q2G-&^9Z7#~c(Qyw6S`FJW%IKv+#yQfzH=J5OM?1CNu*TAyZg|z8 zkU*URO!SDhEWlFh4XZn<_sMrINVz-jqk{LkJ!Yfmq-2Mn*9?bMSEisGh!Ic z?iy~J#^YICp6<-hPN7l)@)7(syg%dRY(BQRZtvTBoi_h61_B$LG$7NDHJj3UKU?vP zw~O?a8sfw|Zm?@1K8x{y0K*LQnWYifoR79byg9jHJ>jFE<7XNT9Z4nFqeqy!LQnuv z%lQX}etpWBvKTmPILBpNO;dfqt~1Zb08l#CO=&7jyhkQ&ii$~jY$KnKkdZnQ!E*S2 zk1yBIJ_vbIb%pG$8t#4)xvTb{qr(8SZuJe}IDdE0<1IQ zghrI4#l%Ro*jtGz4bQOlI>=}HuE|y1+Y{=>86i8u%28n2{-H3-v>(@$@=@ZJ}@d} zyQYrLJ`d!>{WFA2IBBV)`K4&ly#}~y3jT{azTW^CR^)8z$tC4B^a}H)@CMxleTH_I z!9*G$J!(5RF+5bGEanFI6{;B!T3S%HY)Vv&+n`nn0n7rk>LU8%s7i1_G9*Xe>lO*05abWi3TXWn!sX9fS!sc{C z%D1x}7E>^(>(s$fv?8jAIwnHxq@7CA2@@;a;R=ZyRY26M!u{094dJ+6{^t67}at8oP>3@J(k zVdF93;i%)MkopF1!M3Cq`=gF5Hi+O18Ja0PTX;eOxJE0g;OqVX{&5^eJ5>mm%lc7q z32S*-|Z;<2Ib*ax09u%q$XTiKlA&^ z$_rUP9R1@yvDJ9xwXp#^pqBrYW5hXY%t$Xg$3%jQ82$36d04c*MfQW-sFQjvcDSa!65EJi4Mb5W0=V&g9@F%?1q z)v%9kLKTjtcW)dkw|UdtKmhQT7Zu7}?6YHsdZpaiop?}JSO ze=9vNYFqRRd@P3Uu~NFaa~w>}7C@OGt_(#%i0>MRpt)?&la(e-V=04_zI^u_aQsno zMcYl=klb<&c5wtpit?8_BS1bxUM>NovSyUtWv}zrQE2cEl}w@ufJGxXu9ux9-Rl^ z#cS|s$61_ArY9$;<(NNZsWjRMBrN&bjVd;NJ2q(ZrgMuxmL2UXz@U!Bm{8>y@r~d1 z+=xjZ?};%Ih5KyWsUPmOyUTSkKO_}DfX?i@;dS?rSX^%S${`; zS%3J@cD@$y*ZK<-Z#`MW5bi|!*bM4pGZ+GrB00mznJSkn@zzCDi{M6fM0eqij-5>A zpJ!SPJ&(L@njnFj@uBeYWPGj(C1@9_C0LLx6u&8zum7+>eCwd!9?b0s#ukkoB#VGR{n4Dto)xuS@I>N zZO!r}to)z${g}szg^@hXnUe>NB$~6_Ie*TS=(RC+MN2?Y5tX|ybOB|-)$6w){~(^q zwUs6N1}ZizjlJXfPV%x9QA=Z?$;az{u%~-uRO zn29Yga^7hBqkW;o%DKR3!Ccy^d^2awqWQnCDlo8QETZjaSS|T|)#uZkWUci5zTfTM zxv00qm)7@#+#$8jETzCCDE-C(PSy!C^I&O%mpt9fCqd7>O0z)8zmsL=6R+o9tm+as zQ0W{8Bi%J}w{N^rv0D1+DjzFzJ-uMdJF9#R?9J6uyd<4tuV-GI?|h%XeTA7}0bOmm zeU+oZ9K#^BYMNtVpr>(u0!F?{eSdU9Jy)#UHY&L@M7!6Y$|>zx!oTyi*CJp3-P=B@ z{ro!+!sz9H?aMRh@zf6!TT!W8qiKaNUL|zkSnA6+*s`QfDJSQOOl2j@%$AwUmbpap zT?D=^(FJc^W8iVfC9qnh!2VY9fH3nn8tB$-)rDmTkSMKw4U>o30$JOmeR+Rd|j&1&Lcy}K3KyLR9*B;g9{U^P0sG9WL3(%ckIeyj?^JIvxa zfyER7&svSAO;KfFM~GZ>DoVufTeJTpIJmw0pY4Ap+r8iI{b&30$dyxkeLDY%h_4?o zp}Hh8n$~z%DO`9)JqsC-Too+jl^fA5~ON@#T?!ZWup)JpW~sCa&jMZB`IKLrE~ zD|wGqZryRK!eFbaH{I)w+ZCqT-5hkMQu!qdy{l6BG8ewAiR)HIw<`>@6Ri>%$x@9< zHQq)&Udpom6wb6}f$Xc;k{&M^{%M>`G|sYtSo|#Mb(U%P_*bIgSN7SY5spUQXjWRj zcWMghwJ-5W{v_req}mxWCWxnQ3xTu55n7eZZJyj;Ax>`I&Dr~FGxYY3i~U~ijK zilp)FAtXNk=oe4wle=H=7urK@SQDx?(Z{+o03(4xs6LpB;o_8D!GM-kDN3%$D-WQb zKQ2daUwH|DSdj~mxnxTOI4*`@gD3^BCNe03pvuX)P{(5J_jJjdR=hezfyJvO#9&A{ zEh6;f;F1knZdc!gk`hZ07OGR^oJ%ynUc%$6OCYEOI>Yc4`X%p)DXu|(fS}DNSJge4}3keJRZQ6?szWTQGhU6$_q_Eu>utfKv>G52`+ncO+P+LacNJV$-Ak9 zv(OH3{Kt6uaRA(biy;71C{E)C;IE$Mg7lk!vqS)5dP3wdi)I}b0ZfbkXNL~vkwkw| zqX&MOVC6|jBWRzA`{<-KvY+wx+!YvC#OSF(2prAE?d|>bk)&8K+ zxvKUu(-sxyVjtsv%C|a`$vSu+8ey~Ck@Bz`-N~Wd=pX_VVPrc4&FG+y<35HL`&)i- zC)S0nN{lNT2tZA-6nSO?- zg}URj$>nP@kV-SzLP0zRvC2$l(?=VY7cZ`%H!8RFcP|tq7?c?wm!sd};Qey~Ki%G& zt@f2C;3(c~win;QGosuxB4N^erCEzT&qfCHG-Sh1r18<8N#hR_dPJwW!L*`z%;DYI zh!(MO`0A$O1oIk`a2b*QPltlHk61alp05O`ihzn(Z5TwcfT(%{tQ?wFq9I@-z@Y0= z>JL|mMZ-W(MfGP94n*^4>SnuA;fYL9b9OOl+YvE%2Xaz0*^}}8dD#zI5}&L78ecMQ zs@pIq()~w(P8&#@kLRu>CyskvCZy=8bZx46bi_z%bVQcYC%bfm2xNzgTzegU+5W&i z9ncwkl44m_*3EWPkT;s_<58v}MN!CAoGxMO6jQK&osUf{K&_}>9c7dWq*QP!8^+yj z=5-XL&D-2LUy*midl0_LELQr@rA7DYW}vzQu5i~;`CX<)3=3pV zbG%P>_IB5gO5x(ZIkZir9B54_0H77@mYbv}Wp|x4d|+t^cS7P6ep>ncD4Idyq;2r4 zO3I*4>-?&~3gSTvT%3`#nHX6)9M7OW=|A0h_11NDTzLazJEHW*j7-N+XVE7G%IbWM z^8)Hfzol&-?U)eU&>7)tWPG3bwSb?p>qyLBs`8qUvWVLKP7uvB20;+GO_1}1C^$u)WuOrh%%WB z>V#_Xv+~^@I^bhbi0mDnbK+&qc{dxOjim?6BrNEpD5$aArNhJJRJq5C{jY9cH0h@C zQe6^S)iAs|tFW4`ieobta=wahF9CBpqJz!w_TLbq>Ji(mdeb><%*wIYsxVD#R~S0K z#s$ym^*b;oE>T)d6$a0j_v~=^^myrV^220my=Un{5F(R6`(SfqvdU>?*l7O3g;+WX zxiW6vJS%a@lz;So*_{%QHL`RZNJ7I5ck@Ws6s3^GG8y1F6=)gc!aA5sRZic5QFlo> zk`JKI(~7-Q7xFyu9_i^l1zgLj{VDipHb!X?s+7r4vP)63rols$4lL!CaM~O(|lC z7)>jHS!~Q))JT16@C)BZdeC{JQNe-S1J0o48@d3;c(NWT*4h{2`=8~F+3U^*2(w=N zcz!w{oF_Jq`-)7>E-dMzi%X;Z>rZ4*4uMzV@vx@=;aJAcy5wzBK1t#<_-R>JuBWS& zS=}#vs#cOKAAaK*VNV*(1>xp;X;Og#M7Z9bX%by~dU^-ICYYrP!2A`}O5iH;2rn5&eCqr%{mO>V$`kNA<}NGorVk{%+2y}cnCcKvjr8X5~c{PEx3eMPTbg? z1!{+O`t*JnVM3@~i{@AS4fc`etg1Iz2j5_Yu#7~#Zr}VRDX-b?Yi*)gM?OOVI&EQRC0ILXLxjRi;`))G1+Awyqs7QMA7t7PvsnZ$_aOt== zprKow0sq$`>E_z&Nnw#;XjGrT3Ca5er+Z%l+ti}rA~(6l0*!m1cP<806X?y--#_X9 z?k)J?r2D2h->xq1?Fy3&ieQn$vzhKn!vfI9fPZaZcNs7Tg7rCYhfuzS9|L8R0rR5w zDyD{ z#;&m9@|NEJ(N6rUyyXz}Va^?R=R)^tZQmNS4$wDfywP$^`XTS-8EHoQXfw>rP_=+G zCiP(op%9_U6=4hV$-C0k?K5~f@i{{$7#6W;3PS`7XvNP_AtbD$oMFA!h3)IRPrU4B zg?Di$mceBZkaj1M(s|L<0%OX8@3)a>_^xP4X`j}|&JfCjyu|1uKyUb+QiP6Ft{8P}COPw`E~ zIH3*$aE0sL`9NWGAK0NfO@<4`!_CrdH5NtqHUI*Hj2%C_~lgtO~7>&Ii}){hpMNZ|CVi^ z=xY4rUDC$ad1zz|@Bz6rn{|*N94$5@x5#u9}ZP2M$viv zR8u-i&f8l}d|I7aO=i=onN|9UVm_mmQB+d933o+N#`yG9@k_+EzdG zN$-W~=i$Ij`|TPpz0;oCcqzS|)#M{8pX+iuyqq{fy!vQ8>-IqPM7PChUhMZF-7!_% zkB@qYj;A!2PTja5YI`HFJvrbynZHBcJ%(=LouU(eTp!N)I~E+z031I9JK0p#d8l z9&0{s5LZ2y_v(OU(8`mwDJJ|T57#CSz9cWMUW;w5_7_Nj1_<&JPIo6XubgSEdis2g z`_qKxf53#{?bgqU2gCns{r%r7GSiB7zS}cGVLze%9lIuxLH{(iyThfi?FH0eK}zjs z_pu{RIe>j|8KV|=Q4Tzl4I+bH%{49mbdIMWe$qzTX&~*i>0W4XrOTk(yGl4|0Ekz~ zkd3rMU9!p9hZ-);IC&yP?jv-!d**J(NwF(&uRd73^&qi}@+G`zf|brLcuY1R>T9?uC4T5$V%a_>Fz>2W@`vs6gN z_lEZcQ{JXsNiW71Pq1U}kiLbl_ymHGmfb83IRbtd{kR zkDwE|J`d*0kmt>N#@wyzGW5dh1kG;*W%O-{h(%n+USI3f>s%suc>Z+J35Ei4hrbgy zN&k$iG*em$J+u7v5WU17@2_tc(N#utuD+V4V#Y5Op1MWAHsxzS@YwVLdc9ihA8Rvb zZv7*{1CQMDUYB|5fPxhPN&ZveAN=sfsZ*OyikPR)gn#rRzxsR92ANSmX4e{ST5gzI zd=K_kHCDx!+~rw5yLUZKicmi1KCRK))NL-eF-j5pQ$`|ldp_{6T-bP#9V)i7i?(6g zY7YlFvSJ>w;%xhJLDg9C^LHIAPo!~ks}8of_qx7&u*F>7a`<4ec|o{)qyc`K>xFH} z7M(~rKz87p%nRNXx>px$%DPu$K1B^5Svo_k!4$V^+;RJ`FF5o~@8*=x^A67%ya6ix zrg9iEPr$ShHbY0q?W?&-7l{j?%!Rn+6u}cm0N~S_2^IPt#AXOx$qD5J;jDhU@2_-^ zxjWCj_t*RdBEFBlvfnvGwMK`Z*DNN_Cc%>&C<>r^An&7e7Zc#{h#L`rae(2Wd5OzH z63L^mG_Ypz`hkjFaP#)dMkK#CDjk(VV50YObp;lGo`vT~>0Q=mo>K%k=19y~+^mn^Vru6;lnxKxZIbNgMKEe8JdOWfaL!Hu9ov~7@F>&cAcWG_xsC!o4 zc!~?1BDZ_b%H1`@v#71N-0kj*@%`Vk_0M}IItmqV(>6Q4vcJJH%J8T9hjWgej?JxN z&_8LUQ^8)62CiP29vr-uOti1j_7OX6x(J}2ylLY4J{AjK@K1^grhE0K|7Uo-*LwQa z(>wpu^W&|Dh1VtuFEI*x>9k99+O?UD-v&fiJpp0tN#M73X5-`S&hUD5bP^Vf7S7>0 z!$x_kp&?gH`w+MYV!QlOq26(LO&&Hke(g9#Hpn6&gf+ZwXgV}%@oUFx*hh{--9mh` z&lKsaPT})mXDRr#rPsKi?Y6@b7;Iea44J=om$HqqKgxYke->t!>U?}f^)u`I(aI;1 zuOi#tOPH?23N{RrTgwIL3Je(t4HIkzGU^KN&sLmUF@Vgv5;DTs$9C1fS`z?3LH!Jv z!po->5Fj1Py*FBb51|HG7-F-f*H%<|e}<1k$m{qxxsUj*e$6Rx6(&K#7Ur`cH6+;J z!cI}?Xbmt4Mi9aN2r;F7Wv@E3^xy|oS+|zp#3~6o9kj=armjJFNEP%f{ z_hNkip1=QfjYl7gpm$TbQdO?>O6@*exu3iKUFsdDt#&R#X!GWu7AOrlIbJ7+rPP$j zMxP0PaEUr4{GWa-djEX=vKXB;G15o0m~adV2#x*#JHdfPf&e?S$7vPY0|XX&jpw|=Pq7?SAW*u8ErR^O^bzau?ff6f_}*I@ zPVnu2kMAVD(|AH3fiE+0WfomTtG;Z^8H3!%0Hneda_P>{S7g7uKyfw$z?oGLVJ8%v zq0P@37kjT^W@2uFwuHr@eh|>KYfTsXC(n}>Kg672Vg+KFmY+8n(f2V91;#34>r~J5Uy)%tOK1>t8KgXtnOLeKLfOT zwD%TwAMM=t#ZM8xseSL+l>gRs58V2F7gqd!>&5u~pY!+62MnB2w!2I9zu74EOgf>= z@bc#<^#dB}6eglD)(3m8I)?!A+)Mqqo5{KK$`SX@AL2gnNU+r3qclmV3`<OYH1WLbQj$$hPjloQ5B{x{Pijj;LH03t~}$b#};#~<(4s-ss&P`o-%8W zrHME}9@{M0XXjZc$T)&Eawp7 z5_m&O+kPITj^=ufgS2E%!R8FMZ$B_e&oyV?MQW&F6o`XCeJmNkp}j zOS^Rm&s+NTDG}q=`g0z;%eBwAjU8N#-xHV(>0 zWlTvk{K`aN1Ou0Il%Z;ttfeP*zN~+T8Q*_~SGMHR^$zQl+cibC8KxDDc zpVqcq^f}vDJGQ^^u+Jga{U_2VLtD;mX&G|=bN|Z^TGXu>(Pu=_3iOxD<{I&X&JW`% zBgW(3#CjufDaTo9?A725)`DKK=^;ne{j?RWnQzO#C}|D1MK_9Gm3n>7n2~-B-N0CX zO61(Fs^qUu?3Em{g{R4+2O~OcA3x&L?$Lxsm~9-oVnlqV8pH%|$me*0y7?m3j(-r9Da<>0`?+x6x5biM*Xl5q*B#weJcdRS^}h^uS{7;qqzF zy5|hY^pr)4;PDG5U;gzi({L4q2e3~(sci%|!nxPpC)4Ln$qiN39}hastIFt|?gr(h z33$sEw2wkmgFS7WJHtIa)#Z*U2WP_NH#5q^Nk!M){s1bvZSFwCLYM?CBygv76!37u z^+_x8c91RgM5~7f$ofO#TDrcr2SqQ`tCYT*VHE#RmAwzN&Qio`na&#&93pVV&v%je z`@=E=@PBvazVJWlLVBeg1)dDJ3xI1;?{ya~uFlV;tb5gaAV_Vg#Eun7I~pCcA1EC% zD5#`?mQFeVgre>vf-H-7d;-t>aNRaomDUe}miqAW=6mmMcGLm#@~Z1L*N;M^LtA^9 zJ!jHy#P8vS{>k0}?*T37tbvH!sHK!FCSbl&_kQ=Q{@46*8&dDEUcqUy5?hBQI0?_@ z8K^&ceV&2(qJdhOGvDCZtsP%~y7KE!J=gfJG*Dj{^>|hOCWnaM^tWy!Zpnhsa6+?L z@yCzEcloNBkv;%AR{+Zye!q{P`E;Z@TgEG3(~$RGAZl^e0mxygNR)7h_)D%hq6d|D zS$fvZCIM=Joa|Gtav()RDEi?`?cb5N_=3v5>BO?JrJPhzYpGB6!Ilo9!W6 z)6$X|nWlp>qlt6)0O_5yWRkBeGp?n@Co?iE69vlbCe$f?PGn|oT2}f_!Da`07!PwG zY_-(0ZLfgR27c6wfQROhW@~TCq|$dT_~t9)J%2to0WfdsyyV zF8nJ#V{paY;u)dSY_*jC<0O&kLL-p;fFuv;S(0<9x*QE9G#7I55&u-cA;MZs@$pNS zE|j6=35Bml)jcB37A8d_ld$!5ngZaBwoOqw7u$3t`C>xf73$#IXgAJTP9UM@l}?-S z8O8vbx8P5L#g+^6gXI`hXcd5UjO865>OAeUH~%^E^vJ_0@0z!smW@6=GB$8;S@%So zO~qz_?_7r-JS}=v4hGM9HXLv=MBkx=1;>n6LyzWbl*-Gorxh}OIw?OR$?`HQHk6Jr z0F-p6m|S|)>Tw%=4p3wV+=v*SyON4sBDXXVF{-5|Dx@PQJoZB;Zk}MAH$m|4Xg+PO z9o!=dsJ*!!m8|4CN73elwvQ`Xcm;>2zuWRt#>ekJie=Yc9~ZvGTXMd;5e2S+MsSoM zlQ$zom9Z1`-OvC~idsY3(W+|UvE^spC1j*b*9!rMe_yXFfE8_RPAgtNzO}jKJDq6U zjgGFDU)E0Yc=$e`y|XM{qvlu)imOXms;dzYiT1L>r&wI5^lZuOn@)FTRiw`THpSQOPRPXo*_~ z*|EpEgW?p=l3+KzfR@=tZ1zwG-_4xW*SGDrv{agGR5fqxrU0=cfVEKl#a<~px>kR= z8Mw$MYdmi*?~8-|bvEDA1xFpfP@6kielb|8-g5~wlY#76XAD3~1;jU1xmX@sd=#Ka zpp5s1Qxqv0J3?`tSj8i_z9N5X~76lU7S zV>ZOao(GbyuKxVvvV3En<+cFzU-Op-0Rg}RrKWT<7~M7(;>$GAu#g@FD%@L*;KrK( z&spqn2@9O~b3+`H(7s9CGTQe&Wh-Yv5t!wKTV#s8@(aaE!cAX*4-O6zaGYQvnT}a5 zXw{{CMJ1!Uy|sx3N}87L8NGeLU^c`vr8PBGW~adwKD1d?1kc0yu-9P2sSpoopAm_x zJniuRjB1}A3Ha^mqWSZGyEgnk{=aTNs2HtVu>_*OpIn^~@C@ri-8&5B+jGTL0~NiX z#==wkd;m5ax|r}(m482YTOyRTZ>hf~W z5gF98CY=I592Si@`6>Mr*{^lJ9f*UR<2r4<%bv%1FcygNfNFByfy))B?e6SQO*$^` z-+7VLc_(w8T($t03(eOYBv!V zj!@MeA`f9Kip(BqCd4VYS=W8F3%QV8S{sknYnq@fzT-lgNo`0=Y%6)#!e@s%L6-P& zdr~M`yHykLv?`AGtg0X{pQ^&qvU_XekC9vG$m(pT;5~diDFl679EMdjS&`7+7E1Cf z^!Lr3X*=z0{+hoP34Q1oUcHYN*SwweHFRZTTQyjCcWhS+_FpA zc&ph<^<}T-$X>e;xEGO0ZS zG~$C?L#sZ@g}o-)#Z(jxwwO~=l&S+&;AZljiMl(UAmP#c}r-a`=@9{0}H|p!p$) zA#{@qLb=Wye2CS`#cmGm6rTssw?qU)HI6U~Kq`PfFHzs65QW6xM#9V+j@$xW6k{St zBCsKDN?PicR3pBjWQdpBLgyEISABk4pK%%8Y|)N(K~hC-8KcIPi)c(ry|0tqf6;!F z2QH0D1;H?l8p9*dvmj{OeOc3n1$nH9y5=i|+Y z(>5U-;gf+%dV#!2W^lQQ0U;7kIA}$A_@ULcK)y zFgaWedcob~kBM(bx6cWspqH1SN{x2g6`S?&waq81+>vP*=@4Jp9fU}6op<`1BW@ox zrJq7$sr1y2(q3PX8cKV8EKilYU5~v8N%fpW$fw7hUW=az0M>z<^Z0Y_a{RKe;Qu($ zk?P9y+MWQ03YZ5lcM+^lyaRBL`@VgkzKL*w4K5^b(9iV8rd}LHHwAA5T;&)sEN3~f z#g+?1T2e|?&?7(cZnae>;LXSplGm*godBtGbd)!sHV|^)0iD zvXK3=vAsLB%%V@PA8k5>>&|JEC`STE0<|ly5lRptf%Q+q6N`-RbAK08=l}F)H?+IU zs)M}cLn24m)Mp49Q3WcEb?XM}pyC6Go^MFpg}7kPuJG)!-0li+%m!9eAnf8bouvar z1`_c)< zD})tD!86gIopAtOLt?PIO(Q{ZO&QYGR4LD*YzD`lJ*USB_6{8iP=lS5Yu*P79M2|f zB~(t(_qg-(-};370|mSP&3|uXu_e!lRl*5cUHCpHB3LqyRZ)BWAzc>qY9F*)^HssH zd}Wy8>U$l5%GK1NMfsnY*_tApOi_9{b5%@RhpI!b!}n8u(BM2=JUz+y{_3~F5aS#0qQB2ra|gDi zC_fkx3qN`F*m)E&_HRG|1HOK_>nBwCfTu>WOo@*TvANj~-`T709D*&55qASAX{^{% z$c9m=rSMbbr^N7fsst}4-n3Jq{C3#Y<7ltrzy}QgkA-&+tAwm=!bJ+h8k*d439OD9m8qzI7@;!jrxodt;W4;T)pby4t!=sYt=Y!9vr_@^4dmM zv_AJd{|atBgZ1~y|EO>9N1B#D=KmTxhbS`HnYp=LzAU@4hmbHwCHLLVG3jCba_fFe zU8V?ebtBt8<38kvewWXAj`ZpEp~-MwSx0C+6yh%uo_|-mO|*|8SJ9;)cwVJvS1(BS zxQoih1rt0K`C0C39qlui%D-(QEfy6NOM8(pEsG)3S2zDFFwsHP-7_0LQzbAKglqC=W@cPcchicg033t_R zwtGY_Q;_9)sD>NiAAWy~4?Ov}Gmy{+`$tHvVz%3$JJw3Xi^<9hy#u6Y$p!Qd zemU+@-`{ebXVd|rlleRL_m>uJ0WK%#OL(Mxk%9VS{LmC+b!OEW9(Rtm1YP`bX$hOK z&7>yomrveqJm=x|73mh8yiv|J4clI1^Q|%|DKq=K?X4a=UX1U5brnuc9z5@@90>^E z&68&jl08@d_!AQ#gIbZqA~c>`77BnuM|U%r$k;<@^7zt74GZIU9*OBh`)I0cJCas1 zLV-%ZyNI6as+8l3T%@pw(Ikppm>vlH0~aO0Ox(b0PZT3h$X^UKV3J$<4LliG+5c~X zfvSI%)OUD}1n3k9HZ3m)k#Q7XEi%#MSMO~Ql0wiiXtom9K|IvGx%N^0-uZjyzgkUH zTq`O&`$vGDy4HjU{cI(PwOpW~(8`(FOu{A^-*MmFy77JGul#Cqd8Z$$O{gbTBC4*a zARsIqyejt?h7#(rBmgIGLLV6xgi}yan|$)H^ZZ9U+F^L8Ao3}#fjw@s5>z^GJgndl zy~0D532>6UKoVhjNV{3EAa3O}UPaPntlK6jq+ZmVf;_sBg|Zc{vf*q=JPRqJ$U?j) zu|u@&?Vhwr)+Q9$$DK$FOHL>hhOJr~cVcZql`u|O6t_0UDk=Qt1Gr7bB)%DbF#hlH z^;CB$+xaxhY?@DyW5^cTOR2>SMykY>@ph_&DS0a(p>wfi+)5sS_Bor~pNnd&UI5r_ zN}9r0;*WBksiBFg_PPOF(P<*RQ3F)mP|cYr2Y!h`wPIYRz)z8u|+9gmRC1XCH3BH$I;tHu45E*O8*Fto*Gs zp>aMu=f&5^_<|p*+aJA{zmsou(HLFLRE;Ay=PpeJ8+mFjZnEjz?QoQTAbB8piq%Me zuF94662+NE+~+CD)^ik$>3@CjDgCwLkrtXO4WfG@=2kh4^%>0Q>HODu4C+@r@lzPp z<8n8LnBI$+6bBhGk47;ly>Y`hFO3n?ps~rwkQ>@(K>vB9X1{%1d)vH?&qgjv2Ci#j9ShMzFCbp*Qd5#L+gLKl`ag zDq!g!on{-$-i+wE*`J=SkbfZ+!1Hf4DNYDPv$+mVqOervHP*JTTKvyY$n&Hop?+1 z;ndt&k`f~Xc8!WZD~pTlMy*qhQ$Cp|fOsJqpqIKKDP)mBnxbrd{}vs47L{2%v_qFh zBU^^;1d$b|Le5r*b(pwSo{Mq*6B02JK>_sbnP}>O(uhvISm3hL$Qn#ujPEnQ4pW!o z+XCMGYyLXsd;`=JgH?zYL<`HmFox*aD9{FivJLJ(yd84-MLLrWYz8cskaj*w9Wix7 zOfKpu3dpHIB?rBJ@=hpeU}r%p8AZnU5N;qulfD*Fuf`#vrUzEBrU(jsM5hPhIIEBo z1vM;48G^dPb9|x42n>D&a_bFnTO_WDKSO*!>sd=C&{lehnGaO!8(UGG*e_y6w5@Mi z`xA{7(YDr;GCY$E1?>;c?EZWHo;tlvABem^+XF~N9dGTGW#FeD#P|0=Ny004LjMTd|^Ed?U@Y&m`N2@EBB}ET4 zIgPhhT|UP{5d)Yt6(Pd^nSg?5Pg2la^u|f!+`9t(bzHcCf55U6F;n($EP69Cz+Gkk zMvhy_h153UHC8^N4xgL6DVL1zep-paN=Xj`gPsyn&rCB zw^A44^Kk1KlraIPeW<=SBix-;{n7f3cl46SZG=kudq0~%L`U}nh@XaaE!R-eV%r8? zL|#m^i^Ia~A)200AGQ`06?XAwyX`*$-<&#Y+^ArnEL(gV@NDsOBoC z3SScQ_w}mw(vt;ruJO;VNVF-5&=_%vP2Bw9X@&?Sg%E~e0_2n6Nv=i)c+1GUT-sfI#oSK>SbPHU;jz5TTlNN@#@&9Gd=F4=+Ww>l&_<-~9y3D6p zKh$+ZAAI|jr&&e6XEp!v#?0`M{~!I>|97KbbxD(2;v$P#%y+I+N#a%V(~|g}*OfMB zYf_}i5Qgjv>VrZPsL2a51fqug2XVo1>8FIR8(l)nn*DxXq)?wnY3rX zsPR)Ln#Wj2cM+8S+S(x*lX`jn()Sr3jy4B1xQsRfwia>&CiL$O+4o6>dOho7T~uLx z(f8L%K4?)n*_cGXcLR5C&`%=cd(AuE)FpOjlG9)DnV3Yr0erE+r{?cC>6_vxVBjBc zLxM}cADw?j^(Fg&QpuuRT|4e?iQW?xBRmVaeCXwhAhh`c|5vzuDF>7zD%8uSaHS=TAdJXM_UZhb=e(o_SG;_8x&=&;I_%wYLeTYVjqTf)_?5%#W0}4RR z7}3vDe&7)D`=5jLrPpr76wf=n=`02KhXP1{UH21r2Em zQ;vq2oA@gNCb z-@rT`jY}Vuz=)C}xhI2)VM)kAs34AYk;IioryfgnmC0NY;ioXI<6g4+nc-}y483mB z82Qq71QLssE3Q6h&;KF;|^gDiIrWcqfarHl=&5tZAr!_OD^^n8O-&z7v+J0Hl zddT*iN4t8U}z2L^LbuS|1;TmsXP6CaH=gos1fP}3}_ta#L1N{ z`fNz3L4GNQ&`D7BQV*iEl}lQhz3BZCo+L5-IJ3~<68fPIXqa}KSqNdO;~kx01)I`` z4=)=n2;X{;x;=IH#HOpYooNMWo6Zxgt77o5PwHi|u8&Hwp1;=kNthHnU!V?A+OKp3 zE|MBP4Bw&NtJ>OA;aRtN_ z%F~Xe6IY^-y*FL#GXR>_cT4{;nZLjKd007GZcF;;ukrmjAiRID@f`W2Kx1wyKkB_H z>-JPq$GI@Ta?5a$4R=7{4oA(U?&;8+Q@{cf;3kZxa$l{+?on4^BTWplwXZQmevv~q z@uT=tzCqujQplQc8%O8F4)$`C>adW~6!wj27uG^b?nO6Tr2QGlor& z=BqqEuZEdW?H{(yC|Nrg7nAp#W%D40v==gBaAhycW&{Fi88LBjEK+|4@%I#0pC5Tc z?MaDFpJbEwAuNniD7IBY%knm1#K5uxgb8X(lmP81b`^po?Wf#~C{tFy2zthkEHBmJ zB8MRJV!qVED-U*Zn(9`rOl(a$bx5j!kh1NgM+fXcVT2@FZ89{;;-UHson~DQNh;7* z17160Y(DfaggB0-(Cx#84y58iFD|&^-y&5)HMx;V7hI18xm9^Bl7osjN#8(rAx0Itg4}=)}s|(;_}BZM9?r1<72tnAw>8k0LcJ7M1;(w$v-3|EKU9;1^}@=AoCn$l)1YYpIx3!D?Hj!P5jtEAoyX5Tl_a_&lb%dvT77Ua;-9)ZgET{_<|QpJCR zPv-?})f=U9^No>SJrHYDD@41lUKbb&45L+uwhi<5&TcHXGDJCdbs<(wLng&gn%N3y zk!pSrS9kLh^XmedDb@*!wl$x^2S8Fo6B-_AI3nR7V7*hpOUF&icN+Gk34)cf&_q{+;#x+0K6n`(}n67XNG0!0_`9?MI{k zL&kTl4;A|FYh0$DXp6$&^d*~*Qf*Lbu?)2&AW=bFOtxTk=_Q^i5o)4+i4{IqoH}=B zUBWT+dV&Krmq|N`oO#Wdry-}i3wz`XTF@%8&l%0=qmwBJn72qEG7{;u&@mgZNg~>< zn3)h-7u1n$Q{<3bG=}c?nlWPj2(UOH=ss#wx|#DD(w`@r&SvqRAtnK+1FxRZk0Z<~ zv|IP-=7l_{xe)tH-Zp+)IX}S!&Hofn@iO@;C}B~MYUQ_;4(Y5QDHv3u6IRGu?DNHW z#R4+EWdBR%uf<>SSp--+532EX8f=)&bs=ZANpD?U==WI0OEq>JS*(TEBtGL8 z_QGZmA&IophKhin#~&Te#T14<7y2y(1yIP}--m`7cDv96)1Nd!4;OzV*#QF`59B=$ zSg#_9B)&Vp8zqE1K%r{hpFTZP|{`v>DtT7t*gHQybKhftDO zW#7?iu?l*1YctgFFH`cD@zw_i_Y9pjPrQcBZcge?{gckgF z{2=T5WA1)ck<=e#I!B@}=$Mgz7y)syJn_waimrtGh7L1oIXar*Dy|KhPB5)1IR6YKOcbV{66du)!F6dBz1MU~ zhQQUG2c%thp(g#nB_#_W**^?Xr?0RvrVL0_^InmrHN&ypIhn9sz;{W?uykh}cp2JA zj9UxWC0;-sBj4nN1wFomo;WI5e^#_Wwa?&|i8`~z$b2mU?A-dtDUaFp*GNsD^Qj!r zKxL6B6YLv#w~f)KR@(z3o0m$5T zHDD*47WC0qb`CY>5c2~pk*?fm%xRR17?6q++LC3M928&~9m08kKa`wNFCvO};t<3P zNl{_9I0kV6Wg)WcVpYRO!=k8L=!mS@?2N&rf|(;3-zmRh?2onunEy3@Vg3-WLdIZJ zM;7V{h~FI}6|5m05+`G*x&<`Eq3S|dsfn%St7h;aNdby1>0+b^#{j_b*a@0p3yD0U z7_6tFZpp>5I@GmJBSzt`(&+-5ppu5_7`_MGgz*GXnzlww!sZc56K?{35e|;;!F3So zznKyKJmw#)=P_~XZf?W1@aN%!aSQGG9{zq|TiNo}dKU8*4ZT^n!tC+U6>G}AJ+iI- zM4A6)r-9+2`&0i5-yWni+53Yl1OgyN*1Zt|aDWIIVZv@4&CoRVUSl(*Q8>>IQKv_N zaeJ9>0|ICS#u_?)RbAaJ46z_!G zROiqBbeWKh@8A1hqN+~b2KCX4lln>`jh89zJc$hSu_?B6q9G_dlXLH1K!eszk!{mH{9jWWAJIz}{qd*(nyJ)8vg~fxD_dBQUEPL$()tUd=qj)#VGm z?M(%9ikC*jwL5`}LPwB5mvn7^@Tj7!Cf(z!9K@(W>{p=KTk5fGbZTGpP%%0QCpx;yl6& z0%>sG6tT-GxYkM9KwQLcG=oS1^D|T`5k4u@8fVvEjp2XQCxyAx$T*I$x&Br59KsK3 zAO00>_{(V|tfH-dd2nX0)81p7kBOd=7FX?EyUVO&q}-VeZX%Eaq)Ro4P7$;vor`c% z>T2*Y|Ee_ZyC^MCP2f#kFrOmznTKBA;f#Oz=phSs4M=^iCl&^4409*31i(bcPh9 z|2u#F?+PWdL5aiXm9^Qqh5DctG{L(RVj8SMjkQq}onNT3%nE*B5CoblCxkMAYgGv| z?$CV`4LU(gF<+t{GTQSyApf7QK#k#+Ps4GP;T+Yd(p*3xEJAX6C5$6=>^7_K@Gmqmh!L{(;;!2X#*R zr^e?V=RbI@`2Gs?Vgk0Jqa#AqPu!I=hetzx3g`hva%SOE{BH~qK6?IR(JX&qWa$la z^cU=Sf5qoqJZLO3pfR_iyrPf#B(3@!m5+i5dG{z9>r?97)^mID4njH-f7km4ddH}< zWB9AgEKSr%rSWM7k?y!%BRkekAed_kwjdj*KZlA;^MFQdb=7WTKqQbRr70CJENe}ETq^I~>xo3W?W?1%0jch&Z}8)HK&vs|-rp0tE@%%Gn^aII zW|-`WcFHrIa~_JF1TmRXy3=GQ&Os7qO5H^uy6Mu2P)nrIgx+%*Yo$`ISaNu;tK7X6 z`&Wf~VFkfs!FH&+mFG$}yuh+zUy6+%CxQTbqUeFd+X%_5&lml!1*Q$K9$15XmuxGs zHPo%B*7C$ru{l5fbdRNb3phSl~r38zTbPk_xsM-fUdTmEgxDxx_ne7+1}ao~W29FJc8ri7qhIMeV!{9m zSS?40j?>i0O+p#FjiH7_w(80m(hGdz%%b1wYQGM{#<@*0{=|}SbQN->$WMM(_dWt@ zWdB#r$slAp$KcCgLrg=GtxVidnKz-WIG%F>t4V1qw3;juCXwZjA<#|HCD;csBhuX< zKHn-9F+sKPn1RP4*EhR803irMzgHN_VC@hQ+v}4J#@1->A{yCqL)?Mz1Q8LuCio;N zfY^d~60CH6zS|kz6C5YWbVNGQEE|Lu6=4Z9%-!$=& z$H(zaszlijN5p;(kE=1`K!0YNA90PSoCHGm8_GG(?joYoRQ)#b1q+ zTNdO&fEYm@$Z3(2B^dE*rd~gW`;S^`M0Lh&kmNLo=_IYG~PU!|A1x>|#%HYaA7m1JOY!;19-LtjTY@nzzn?mk{{@W~U6)jpgu(ny)=2<-?pUd`bNUg`BN_5I()ve6g zy)_@_gFZH0_BcrWB5cU4?M+bRhDZa6NF0h7@}Zk}o;|i^MC&<{JN+Fh3c$lUct&p! zT|!-fR2i0{EZD*z9jWkZJ__@c98lk(q?~sz=-}1Y^S@UezBuFP#Tkd5Tz|w(+Pi1J zXX-w`*P%M9d67SQrry8Ye|@sXzYG;40e$z>y-xRDDon0>ELILpE_h%hh;zEWXG#SW z;!sd+?BUI8$Gk`)oGtyPme4#RKTN8(&_Mz#@__f|VZ_S^hJsl$=jPG7zaqP$Ju|Uy zLJYinW^Csm5kR7Qy+_SK zmM3vFjt8AQlE5fWqdBYnll$LVh&PGx807pV`AG{(N)+Ye`CEvnJ@)~GVjf#@JH+k^ zd_X4;t>!Pc5Qe{kxwXJhBNZc&JLK)(%YRPm|K$Jma)_${ z3|{}-UoEkpIB8jjc%r-HD?DRrWm^(F#XQmxmR zfA1m5kciox59E)>wl0_d?*EePpC|Y2z~=f&5q9#LMu;sC+g+K3`aj+=MHAET$M?K1 z+4vc9Ayu$6rL|}-> zZ#~HP{_0_+gYliE`*D1)%p&a&L<)%a!ob$gn$WR;2f@JXaT-~Ocjck#1c(|4PXU75 zE;@$W>2mIsk!U<4q8f}KQo^VCNlzfnMY_hq;{k34lkv-!>?h)#P2MMH$9RG+i@sPO zco23>1gi&c<_O}pu81U^zy)O2+yRK8LQ8-zhZt23Li1$gu`YEj1m@$k=#uF4+a+lG zCPL?UPrpU@QT;XoR_V}Ug`y#RB?|rc{&9Q-rh5YO?G4g^s*2m_nUmS>Z(<(k4h|$( zQ-8S1MeG&OFDSr`EYkOjZt{52bOWLU?kvO`?-7?wh7LF*ZzAMtkjK3vu?Qj&h#NC< z-veB*ho~M9s*IBK&X*|btl5h?5Q<2C38U90xcFZsD@oU2r8O-(lu0v zToQ6M1t!0Q>%|qWfTkRB7o$qVIRv80{R-FB&&SV7xW2b(=6uE4?pv@eidc6oY*u)l zcK7|$IPSFx_8L5gc~d-UKb`O{;u;h;kvb55h0+rl-`_oAnBy_Nt3QtKc@(Bb2*x7< z^(<7J%2D4+SYM2q)U_f5t}Q8d`6YDXDd5=0$*r!H5WGtx_#dPYmWxNhV`Ri{$|bkK zy-1Y{w{?x!aD8vNs=Nu*kV7;3NlogKJzh&1j%r|y>{T>PnuW2=3ykl=qzrmA7n3rq zt06-7?SU5}zAq*=E5KAPP{{m_V>}B%6kQT}I_yAr=<$eOCxjhUV|;)03_b0)WP%!m zGcvxA*m~p~S4?+y;wj7K(*B5gOO<6|Oh&$=8PZfJH_Jl2Uj&l=@M@D|shn#%7YTOF{`tMf zLjykQ`zt<AbBg^@pRMOSBD(&C-fz z7U?=esxG9ehmvUDq$)|NT?<9|$^FbKT?PqvQSU)<=5aa|(J0D52s26b<|oSO3JU!X z(WFv)KpXX%qy4(pf6b<6K~ms_O42w5QN|d2Z*UU@f5>zc&(9hiDkHiW32psZ5&=i( zokvtm3RGtyRK(qks)Lk7e91C{HbBXc8ho$uUwGy=X24f$F#mJ^tWg-Kp`OH3Ard07 z^(+6sW=ZilN<7`O5!AEKQCHGtR_?$9Yi@CtDlsCWeuUZ@S*DPR>nWr`wH$0fQMRj%JwVC%lHWpB8`Gz#tjbRn-XRL(@LlBWR37+XFN zN4jY35=nwpZ_E&vOXpJkt_fQCqiDTOT>WG3wa+c;yRYQ$vQgHHoBJId4^69d+PLWt zAqsYj0@j6Q2yf;J#LZP#Y}K73ri3f9PxsC_ zwmOO{vClbMvT^2Uqae@sPO~7-xw}cA7PQ_B@_w>DN>kte&-rz_*5k%Sduopx7q7Br zv&D`AlOipzmVkWz)RutmkdRw#tk9pj-sgW8-m-+1q;=&}CdHcO5iw4z=iGZ#m_Ddn zkZCp|;VZ^PGx}@lwwhc2x^Gjo_Ofx&)B2aOBiL(>X|vhUjJ{}X_Of`DHrqr!M(zvB zh8gLPF)PgEEdXHSpw^*#KDU0KaDQW6-GqBSnPw#~E}~IUrhOKT9+FQ-z^l0M{Z7*0 z(TY|Tw&N+-bf!4b(z<|NM@vh?q(q-%-hHcGr=Daa3Z|Z%mbtZZ+G=~B=s9Aa z%4wOL>T5Po6K0I6S}w{(5pY;;63-sRWa0B9em zeRWMq4m5Y$$d@v6%0oF+&N#P!aXNt0_U)Ch(}D4y{E@;!N(;>^f&WfF39}OaL^y^p zrfa-u>_GsMh*dr;cBx7)-xTBt)q-os*?+a#HSxH)<=GKMAccIbp~@>2OR8Hv30BQ_&IPPI_q=2{hwk+$> z7~1{b>Vx(PT3ywZst%sGVtyR~N4oV%SHd{RszGf3!T-Qx|L=l*rHV2e8|G_i`7XDW z;dO;!XCmrq7b;PurjdsG@Lgghf1E<+efXe~-bLZSHS*htUju-l+jd+XURQj=ZpnNL z`Wm8V_OH2%UVRvZfu}?m`1Q2ryXQ8(Sfk3iiq%6;0->Bwh_?+rVl7`~Fs)flBlS!B`FI+!8-8g%) zQSsUQ2z}wOfWNK6hboaa?6}-IVTet=X|)6OcP-7+dX$YHQVlaK;a}Ywz(ReOITuTpya#eLqLR zm{uZjjE=RHIL4aT9&PDQn|4UDfyy02+&R08&?_iUwV*8GTxJXWSs{QN_Gv z+CtPSFQ8QEV41J2ojO@A<0U9%M@8Cm*z&V0Bovv{*HSsIL6*Nj6l^|21n z82O`dbf)N7lBR!f*kikykn#`XD{a1b$ZjuSa9!~0lCY_nG;~f@{W@Pvsl72A!p-eW z&dqhN);%#$N1OyF_eD#DO*b7NtLUQAbu;HtAf24YHxlYnPw0xML=m}oC&ijd(G?ji z_s^r8fgT(kiBo6ne$@G?j;F4_<;sd zH!Qbw&(m=Z5nszrnZgb{>%uT$iKcz^V|oB@=loc~@-JOO&fU1QLq5C|hrdtEW@Kmy_4NpkyBW zFtD@RH{fW>r_bQnwb4NzjV02 zb+PK1Z;a77cGin`&iA#RCgi)_er8F{5;=yM#=QK{wwU#}=c~oLP75ks)0Yqb^3dO6 zX~~4|3OnXl9Q5AsQ=a-mW&39=gX--Yet7$dZmprujQZK1s?{$d4OKI&*FT#y^zC40 z!_PkL6*2XHTU7Gr`2KJ4(I8t#Rko<-3Hx%-H6EQUXw%!tYslt%D4ve5X%_8I;Ej~H zZu*gG-H_q2%-@y&)ugy8e$*U!NQNJ4p@UuM7Yu#o1l?m5Q>E$Ybe}S7ny#d`RjE=^ zrW7cgo#h48&yR*m_mz#G$l|lVWxTbcnb69w`XuRg=ucfkZI9RDhq|gN{GG>@I$aj( z*6_}@1!PcI>!>MutZ}>K?5k^)?SA!woa8X&Ia{x&B+6AOr8-)ys@Tt*TWLv=N=E9m#g73>!wu95FTur;Rt@69%yv?6#EqE ziv6J2fl8d8MT0~<_<#(ko?CR#9Z3(y*!f#m;V9rQ?`nRtHp(SNLnVDP;YwCsi zvt5OO7tUf^PWA4Kwq~#FXR{;5#Toadv(f{!ip>gt`f|n6jmz8OI0bNW*cZ2N>Ba|J zA8d`B_NBR2VL^nC1ij?^^v1OAeO_dIU$|e?nM&EeKec}U)QqXP zke)V*>#P@eqqY31fGwU%I@{0g!>u^`;vV!bm=p17v6i_OV*LfU)1E=qoQMSl3nUq5 zGcN46exhQ;748#joxDuF%xL+BMbTHHC@huG`=nk9DrwCjEjOf?j2r+1NnV;lAMrfo zqT`hORTRn#rRxjJ7Or(_3XHl$Z#uOG!n_`qf@#Uvwina(1owRLQ%{Fd!Odrhm zVazjY=eOluSf1Cg*|8x{zziI?XwT&}M**jv2O|1qE}^3XjGh-L-bC=}U%1rXY0#OK zLx5DX@&WTp*r13WA6>3o*XeT!$YJpY z@c7eS zm?YNWc@RHh@)JynA4V9eg-NraOc5q|)dPYhO(Wy0_Pmbz(W#_A&R-FQBKMk%U&(P- z2`pbU0z(OY);Pb+QlkOSYhu44bS`a>&*>Cq5Ty$}4?XrTRZq+D1zaA*Mm&e!rxWS9 zD$<(g0gWP#XAz;fp<1{FkB%eYIBN0`xakuZo6GQ^BjQhJ$_zR>u8yTJ86U77he;sl zh)N^01+XenSb7WR7nojhvi74MvFh#=?6mu&^ziVLOMke867Y+|i}7_=<=qMH~M zwXoXPPl3~FwIsN!9j{xOpOn14o4o?Pf})!nvFkp z%c0?RO#Sq|OAsJ&_O4WSPaU^_vY2wYH-#>fiJtA!%FIu581sEH!QJAjj79@&Ehu7-guf6$g)Agg=O(XB2s6&Z=V0h14_FC8SuYcP2>&5(s z1{qITocG&T&cCAGN%0=L`=PE*(mTE~90;Ae|1SRh-xlHj_A8KQhQKT6sKZQ@(e>F& z@2pdR1F_3%gMEI42L6C5?-Al@fM3HR7!3rOkpF=SR%(l$f&Js-P{qvBPwXj40=*jo zjFo_uIe5P#?|?DLNkke#w;q(55Fns0fsF5l>5J*ElwVL@pwQ=2vHs$>!5mH=Ej|4} zB4=-CuxTF$rFQ)64aWsiz&D)%E%g!4YDgE*k@N%t-51XN(ypTkNn( zcjIlJ;_YXiKe|3|EO+Kx}LI0Kl%^y&;RXX`)?mGC_}Z0x#pPCEq=bGmdCX6 zzH{-bAZEK#{Bz-NtBxYMcQpK=1uxT>W&_n3Oh)Ktysfu74o|SR+@Uih7K|>(jjIXi$4U4@=pr3cAtKWzJhU~)+-@H5i zYcSZtXvdBWQxoNRnEHo`h~JQ(Xj-BMPVlZv#F}50_Wo{V5cRiki@uXHGRhuh6g68s zOP%;S`RMO<$XUF~kPLVxI>dW5Z#T%`ze=_^n$bHjTaAJ3y)o>f?!7mM^{e;amYMs# zsk8ZBee0(Gn47t}qD-A1(q1!Q_ec2aVjRXT|8;boYi$kNww&)d$TNPa4 z`Hp%?^v0dM<$cG87^l;X`zJf_Qn`|{kwpC#rKoXT(ShbFGo~!x--#y|OQPm7$8`(1 zv?_zH1ed|?tN!!uiz=AKtkE<&lNlH-QSeswRnr(<^To_!UIN=o*M;HZCym@8^S>P+ z%feqDvIc;hoQve`)Yxf^JSv(=efz4R4?pDA!ALHW_h>0>ZQWT|P@3=A^M=_1F_V+k zL(bn3j}qg5Ge5_UHa`N$q!PWFL7DeJp@NJ;mFU$h4N)NG<`$JUXGzbIDI5(31mWCx z+xlvv7t4WIL6^(Jor z(Pi{oQ?H9{(jP1)U!4{1`m8SGw}RWxFd1`P!q?Ol*1Zq6$MHWbcH6M#TGW5IfBv^8 z(*MY%!E<>u)F;BCIi_n{xPEaGW>+^QWn%v?#==P5glf5}nzNqC7Jot8I zEVLtAP&kK(jmWehU@Oc51?v7EUGEw_7}52*z5x@wWrOfoDjDBZ)(aW#l$Qg4&d;P^ z5Kh`~(I&zu+7G*CA|(psYh*!L=w}K$%6n)=LvAuq15}|t1mJabR?JE^X$y{>8#67mG#fg&3T47LOnWBt&=*w zIsC1;&edmoUR|9!xM*Z*(SqswuRePcvIV_bt$zzyEoC;eTXk|NfwZ7eBbO zZJMtK6jT=qGpQ6?1q6KoL~ndWhlbu7b^X;_WM4&^)>w$-*>k{Znm$U@(%Yh<2ST1=YaSe{W z^*nQ7Rfe1VZLpy+oi;6gZlJY6zg)-eroN#u4xC}f6V*hpL#0W(=G~DfjGa8@wLb09 z&B?c73$#M?#gpWvfzc+RIqCY=5yrSx7Nv@9!h5?dKGd@$UEA11UTS4%&7bHWKEKB* zc9NEljFPld0Yjm(2{&~${y4vSj5l$$y!Q9_NJ^m?i)?dR?4}wTtKUIiEb$UrROc#R ze1V#4_)BH)7qcNs)?E5}QSI%R+V89Ejs$LMXX#XVEHf0-fn3jK-Wh0lys6!uY5VJ} zFfi^feL%G~Mw_0lN;=CE=m>?AKLj86y@&$EGCYf%4h$Y;QV+bAI4!dMA;$JuTcF>p zAv*QIFU!mB>z|N^9`#r^Al7hrQTTtyW|A zc6ZNDXXh9u_YOF9W}wnwvBUD?{7fU&?HPK`Pll{whNi$4y11remzTm{JjLbmWiMla z{`gs*G5G^KuP3bP-mDWCHqkJD*$jIv9)Dbfv5qdaJXtwaMcJo3+nKd;P`OiKrFP=w zx5hZP6|}B#hPhQ{f|ap3uCO!~?-_0NpW>u@Y4l2Zimn!`&dcqK*EiP;`DmqeX$>vE zntK&npy27|9!DY;Ia5kiB89F5dct|;PIUoYl>J(?R26@plCRL!F_FnQFnHpLEHiy+ zZud(LXz!aXFMS#J#xwmgW4~zMF9I#o1(cmSB0K7x1^#T^^7#yVK!c}z=bG;izWZeyA;jpf0j>Pj<3+P35B7oyHhH_f&ums|LjCdW*q zn?*7yh1^Q}C51=U-*whwI#_>i{<;3L`OukIvMup_@)(Owr%-rWEM4|aeB>x|l+G%p zSjgN#l|9a-S!UJ;Y1J%zl9(edNzJnKyEgghZ{9|OrrReUpStYn@VRW8^rxmXjB_0; z*voZo+xh2r+Bs%&+2KB`zh!nT*LCETPm2k+PG4;;FVPDr%sK9~^2wTMsZQdU2D@>` zT=B_@V6lu1Sc`e}xKd`9ZD$hFW|=!@+LjW%rDI~NzMQyRRzcZ3iF1t1>Uc<=!R?!6 z*@1EX!VMj@T&vcQGk=cn|B-&^p(2oP#u2Xd;L4?w8n-nF3Rg2HnV-Rz;y5Y zXFtww>9fgrHZsQu=>35GulCPMT{%Pj`;`xGQq$DxNA8=i-~aLHtI;EY>IHF)EB9Zz zFu9@^;qRWl|N+1B#(^la- zWt>7+3p1~2&>}E3y(;vlkCm@+>5CRjrvAg7)MnYYSHCM?L9`}T&bf#GWF{R@u1TYW32?$7c4pWG_{{juo+cUIuJWJP5n zdX<{{i=C>16DxS(KU<*v7pW=9PA)1oz~`z{SDIaWomzYuVaH(Vr4R0uEM;YP6Fpyk z(h0pDi|j#)`x?S<80G@|mJHW=Xq-B`%s2BFx2t4D0(Pi>XIrMJKtD`M#y4g9Z3Ab@ z$-n2XL=;N;K=w@XR2#zf=bUvG3~K$R>HF8k_0gPP{mL0x6DPLhFjz)>^Ux ze(hzMo&QN9!cgOs#-4h~iFehIX)JalspHcZ7w!ABsgj)5_5Mry*8G>o*WSq~ z#YAXE=gRVwZP_u7iT>IG$aLstg>p_FCyyE#9BtF~sC!u($9TDa=G@QS>{(m3)Z5sY zf=0zH)pdj}jW`rD_Q&bRBpCG<6=~0~C-N5;v!`4$8jqTHq3uz=N^pOivRf1iB2))Y zao?|7Wz#<{kEV=yY9@4wHE|LOcts4T?^@T@iWUtuaot0cZ*N<-ZQa($T)?oQg~KgB zjS?7T(a=jvV;F~W0tJFA=?9+PTB%l+w>t{J={l}{-8+S1?DB++?`@YBGY`tOzvr*8 ze>O2@v!!*(3P$i<{2k#3*|lIS3iF``b<)YOz8tO&-?b!67deVawRbJ0QafJ-l|tD_ zs}4V=+=w-tXEom5A2~tnSzGs4?z=YSu3< z0?Wc{Nlboavex3t4-UTjmv@X?m)CqT-9IaSN_*8j!D` ztQk0wTXW*+@Tm88Z=+i&c_oga@7ua^Z*ca!zT2=gSGlHlJZ*e4j}kFIU=y$tq zTJlEBi9RuRsrG4(EW)lW&PwFArOseE$LB;=oSURx`*d5(Kl%6`qnw%(*L$fiYleVU zn9ilLK%h1?>t=(^CD~;itS^yP#*M*;O66TdC0%a1ampi-vWw#B&Pj_^o?_em6{Yw> zD{=CyNSE6>i7h3j3bdK7gmD$#`~n&|eWsFYMn!x#(!jSCH5rR1VSdgKrTGCQ1^jq6Zo3Rs|4KV}I-h2_iC3+319Q;pX2-Axi$VTJ(g6VK}8I3WBP3s;@N|W z!KTx~-d6yT8yX7yN}OiOlQxQBZp1ZIcfW*-J!|A%3*Snadn4{8}EjJZ=qf z9B*dyq^T`7w_yoBIkIrN$gW}OPDP%2)UQ?SXyG`{N|hu_dLp2mvY{xSNGG&NC#0wE zLh5ln7K4xf-l1A4eF?D|92)c*33gF_N@b>RV{;sRM;`W(wF}+Fi;4aZ1c&i=aAFz5y;~$S9NXT|eC!`RA0d{Ey?SK!YjThe^LM;QV?qREL+M*pBCHbr>S`mDDgN~}lcxjl#y&TUv-`sU< z2lJFcM*3gp8jbNbFi&UmguRBZcV!rwEn81d$_|;3FSJR@9)8q5{FA9x$i{CcoP9W< z;lfP+-ZKAZ8w|*RXG}8QuKly`U$re9&$_c%!sxPl5aDG=Zm#3;Bw^YAjxWlM zq%7E^#{PMZ7$jbF=IJ}7u${KBfgPQXl16ULscQXX@;P~oC|)nA)z8uJRsEU}51Nq^ z*Tq)fYyf2nIO8UoNc6!vI&RM$>_JLgJ-kMp+qy(wxkeRfu)-dzwYdI?!tRp#jcMz$ zDbW*{*~UKSDRZU9ZAmoU8oS(b#px#(BAy&y@}%CREVyk$?$O-6ONUQztZGhj$`xd0 zr&#K@G>3DBYwT#7?9-F=y2Ckbq01t)o-O5Y{z=Do$Pir1A*0#$vjjSa_+J*82T=Q@ zgMSH|HQ;j2#3+8wkqjd^3A=|i85()g8yl|d1)OsI1NraqTZ8UBo#frUlF}^79IdoVNmvJ zq^qRhFb{%u=x<1Cr>y^zTVitl?nXE1gF`Y>Cp;YLNFY}l`H#GHMyby75?O(4nUWoXtKMEFq&Mmi_8i{!*FX_u+?$t&{ zLtZ z)Y$2l*GF)CW^tTCmTLXYPXE?Y&c^VioVN9Lep{yV59bcoQ+zn($5-W*4_h(!CM`2# zs8SthFCBbVn^_H(C>He?Rn}w(cApOCiCp1&3#$%S(SdFpG>NZ zab03(H>u+G(n)RN-|X%Fsp^BmB#+yap^0<|);#1MS3sJx%^>E6od{hL33>*f?AAk%Jfbh_`}Zl^Za%29*OEO!&Ke?!N)*-1#GkJU(9^r zTCpan+LF0#SFN(-jk`lY2#1p95D;wLSG+n>uXc5mUQNj^nZn)U#|f#Q8Avi&QG8*;NXzg2xA2Y>w2&=&3 zbuPL`FuLVsex^UGVf;i+SV42&apDvK&Nws{5P4aV@!e!Y)3Kv$FgWsYenyu(7=V{L zUxGFTSpFehvoX~BHO2?q81ER|{ZQWF>S@OvW+F>n7Dc>ihW*fK1Om!|rvx4n=1_lh+$U!=T98LL?6UTueWj9+F_KJ~n@bY?VfifE=U z--{J-Rn@GDYVd^KNZ%U^g?$95!xo@O`=5>{w#kz(sCBGxJzt-s81q zSsiC*?M`=f(fujGynW-*-4(81?B09xi-}W?C;WGhFD&<*(z_XutNG5PJIU zQv1j#b;}X^sM7Lt(KxpueIyJ&wtY`5dSTFwf9V4)FZhrRhrFw9rxg|0R8H_qs_Xoh z3coC^P=d$VyM(ZDK0tK`T+Rm&2#uEqhxaO)!*$Ey{0?Fr+>Pq+>d@y@-dJjT<6UW5 z?UmXa>Y7L8XZ;}5g!s>R`P4_Q`%@ojC!y9bJJtNk#TCO%-ynT^>Cxm>8~#6uk0?{O z76tcPz$hm0rNUKCxm@eAd3U!W(k)Y7zd5=^RW!F!`glihU%KbsyVjrTb1BoLdQuSs z^j)?DGVq$1q5I!&WoXDFGC;Q%)Yxwoj)P=$5aU zY6RjKf+sE2Z6(XnUT;Sg$Va zTf}-2?>>nz6D0`Jy}4B!NXGYD;ccpoBE@|7$McsW&?4Ru5=OLuekb1FG^A#yYWC&OqX)D^v#8-glovu$59V*$UcOrE*KzoPGUQWJqRS~a%<}&P zehS*<(iEL?g}!nKqAOT50PiNieomo+jd-)hR?p(H1N#|^UZke=1wN&!3WpI|JMEO*G=ye)?GXF zc*KKR5~H%Cbd7V{RdyUn1(13|SU`nt)OB`l3(vNSKmj{pEQq;Rw$!cYEz|8FwmdXP zgeTbUX2p!a+8k(fa1|~B`8cCUYw-3+V%|bTL1cVYPUTE%@HhV4Uq$(zL5uk4$(Y;k zNM!=2psuNGg>3sD4mHS!0%6m957}t4#WIO@PaCbDu7;gvCb(E2V%0;S3kbQWG7;?H z-KB)d1SMDxz-Y!!(YFXlCw2#n7Y7hB!=D*sl{ER|hq1u6jO_Wc4@?-3J2f6N*PlbX>|p*kfXe)`3IT;)b!9 zLyYvkDg=h9_20;t#WDSDvMxvM724t`iDfL>^>Z4YBm8VUG_4AG`NkfsahNJgp?X6Q z>D<>N(O=g55Wo>mk-&+zkRB6MU>nmT3*a)XZ;T`&w@|CVxI$IOgz^!4sRB&zvPAg0 z4cZX#($1iad;59OtZ|~C`L}kvs zmPz(nWTTETCjaoFQGih;-n4&YMo1ewn*Ne>{gKX9X!3fVT#vkQaiw}}Sbgz7t{;g4 zEsR|3M2cB1_Q#(DT%LbAc>0=|^IzWfOxaXyz>jmy6)Av*9=NiAKJmbyA$*#D1gD7D zF%P+}u76Zy%L6#H3sK}?L;FiY4B8Ys2=8Np6k1w9u(^O?A6wW{64!p%8^|G$h=8Vo zNg=TsC1%XI#8=c~BPdB7KoG?ZZ5jY}r|)a75&Q#T71R#1qx>Oq78B>qjYqyLG9) zobs>!urQnh!&HMVRkf$V0&fJyM^E5eI2*2dupu2JZt+Nmy4)DNn0uW#DMj|fNfaK3 zuvLOF=$FMm!xuCXo=6hh7@SfL(pWG(gt2q|E6`1ExO|Hz`SNrH@xk*L8X%5@RAar6r2GtzZR^t2+{7blyw6M2`Z^2_E1+WdZ zjMN_N0DO3N_7>LLBkoc$=f8|ZwVbKUs?5r6SXHR&8B!&Br!b#*2 z;Q)e6Um}5DLm}uwYKVOaMVqkhw*t%vCLm0iv;Nf|_8%r>VXK*Qx7^9b$!R!VsO)ws za1t&L5-Q`I4m5hs<2uLCox8_$eF7&<=>AM6KW(y7wK4rNKS?W^Lb24MOZmcfvEY&_EVuFBaQCFJrO>iE&kL|yJBqU+$dmIx!nxu-8JL);6K+L59Gx;@; z-KH=Y>jP?XZz#v~bI1kKZbkZEYGKy4?6U2&L&d=A(mVU|@?G*{3zIlpF_&lzR-YzG zQ*X!*^xfg^T!9iYY@UA!yE_Axb)AQPP$X%iZ^HYBOLYk+e~ETfSkES1# zyV{OlT_SglDa=T)ot$m!+W*bo>=NJpSCgy!jk9g{W=Yqz@z*H~cR+q`RnS})vvyj-Mt#$+Oscp z;a;w>bJNDe69fk1<+q{(-#7)$U-#61SKw-ed+b~s%N5@eT;0ZEW8-62M7T$|^~sth zOc2=0Z$26k-8pac3N}J%2S0+(nPyO{Qc*+=p`h6DL>9zLSwXSydWACft6QIgeqIK3#q>4~S<0OrBxRtkHE1 zHsucyIP^pKnI6H_u!df>HIlS4RNIRI7oB$i1UP9Mg{Azs zfZ$>ACa?x}N0pezl$}INo&NXsk?0ZpfE65@5rk~-82IJ>II`5oA17e4r9}mbMjV%f z^dxCM4Q)Flo7O^(4HVIRGQLX$dN-1Dg|=lK&tm;@cA4g3!`t!lsfo)quC>rk@I-t6 z6ur^bXR%ZCI$PT$qFwZYDV98AMLZpgsj#*i$WH>`C%%a}yF9{RO99qCLElyt6O(V< zjV*;$NCW}rrP!SHF#nGbMJaJA6Q)geqFDU^3LMsKR9R1 zegJ!k$0M=Q(cpGL_HPBupOF1Q9{$6&LSfp|PB5nWEo$QbPB6BFV%qMi3UZBUi6wMV z;!48-ytcE&0?^>p9!>u~`Uja7l;l~9KaMXUxXD8*!5-#-+)JD`fIfy{hF%}sYr=|5 zz+21GOozlbB(Vi0CC$bePfX*l2n;T8xdh9mG4g34Ln2`*2xAEiEOH>7+K$r>icSO8 zwAz$xqDUqbWxwXON@A5Jum1+!gG({i+(P$&V8U0jLya&5#tU&Upp0V%9VKszS){uH zCLHQJKdTWuAAseN`U7T|;P9sitkrk=H0q%x7+>M7`Tct?`)hYGtQ7YTT;49WH4k)+ zR@}!sScI zf8a7Rpc@WS8@lfk8sDR0EJyLVet;+HjrM5SFM=6?J!U0UkgGE(iQ^J~Jji(jvoBO;-g9U^s zRY_ICbMw0GWGOr3zH~kg0z4@`;4#H}+(!tX*wu|HJWhU3&figw7vK|~`d9sx{4wD} z64#HH#Irc|3Sm~_8&?NtP3!|ldx1!jMT8N~1zluB4JU>woj2&5=JKj9qs86AsAo|&$_wFJw^<6w^0IZ@W0 zspXA7JLE{PC^3r;=!?6Y+{fjg!~4cIF~1>J5D9^Ze^;=~FFv=e5DnpMS@^E-dRi%$ z!O3;{G`Fj&V*+~u1xT9OPJ64exvdc{EqlfFV)KoknMB*@+2}^nMp@q|yLu#DT-rhB zmw^o_MXk+03(j>p#&;DS^i00K41HXPLqY+kuRoVJC#FryG%USopk`H@MU|c+D#ZD< zZE|D(il}vnD1cs^&t<@$-gH&g1WSLL*Z#hLN$B`p-t^eVfB8;}h81%imz~V%6vXFo zndY0P#lsFN4gpfs2AKI}-t(Kw6|}kNUN<#0E^YI7|1_VBuiA6BnH}ZY-^UmFD=`S2 z6?3|~8U$wUf}lJT$JRb4ch7}`rB#&pop|ty)pc0)s^{c(BG;IV?_;@Rmi&oH9gZ_w zS$eQHzPXnzT_fvY>9PNst+g}B#W&M~ZF(*;x$$aaL~ECsyYuzWOb%e$j#pM*%Sp0L zvP^MoH%z`Wv{xr}vd%enoJF7Nb(*Rmb)vR<_!dPX-PrmSCF9ewB;O%R>e}}7W9Dn= z#rd=uzQ>PkSC7`8QwR`mfj8^SC{FGJ`cZ!w>%u+m;9vOhIgN+xguW@ObA`S!FrYIj z+|QNkI-*@mz~zbZDh! zUMDd02;2+1Oah1o>|KhlQozHlQUW#H=pve?zVN~Gg`PF#7szw|-v0{|pJ*JM*cuUT zIcU^yS^+E<&-Cv&?nPjFL*@gNb(Y4{g#!ryX=x)aD>yh6845<7wi8>+^iO zSS&qQp91+|fyDm^F9Rz9tb+bH%qY3cMuZ*jPU*t=b${?Sf?_6VUOXs>@8U02zhmHY zo5r?15MzO6e)O~f#iJ|G?AyXW$5~8ro;a=HzJl`-pP!+-*uTz&o!=1e#@qQ*x>akz zv)yy281FW}?dD_A_9pCKehqrKWVYjhXQ#rPwWQX!_g z6@Uxj{CmYVJz+tH<2E2R9Ch8Tr%ajRY(2#)*4b)GU&SnUmx|aqE5iD$>|+GRBa-}% zo!zyIRbe*aorTU>fycsZ!a6(CN?hYtUr^*L@~(|@ZjR6N&oO>FZ}qM5Q~h(y%iG2C z#QqUGZ%wiH7yGZ?t1iU-ShXt!ruH^syNHnbW0Km)MT4!eY(k&2?`$nR?e>JVaA?Ja zR$YOf6OAR>&%SW-OSz1Jk2w(`od*P_&ROLA9roC*2Y-!i3qP*EUO3hDySDUKLV{=dcpb$0mMZ zv2Rv7`N&@E8M$&$wMgyUklR_LKXL%iwe!GW1ej8DaohL=bNdH z`}E|?Oljt{X>lP&V-=Gs^rxv`OdqTGzc299(%X6Y+0NS5JZiKh-vaA|zwB%f3ZcF=i9yKC?>uqi$SoOML zUypVCH1pB11pnyb$!+UJ&LKT|Qd>dG)%}shfnBS-?zLWxrBBp@@xn1g!M6WKc=+YkqNc{5BO{@ipCS-iSbDH>z z4dP#~kK-#L_meumB_*R-q&St@d<6Njt!M!r;0zEUbndaxg((P9EE#*DdL!XdD91Py zsvj(4^TDn>4gF_=eFI;fS$)kilZ2S&Z7|S z2JLHnD8u`tulj#6_aC{0&N137DH70S zR<4Ho<1PEG9D?F4Z%47C;}^Hbzhg3{LyIzO-Q}QnScf5MoTG}U=saZ6y2An&Q~}mX zS34tDyYP@wYb&Yb06nME3g<_M-(TqVGj$-x)XlZf&D*Bd+kW^9>4BW1ogsH>=`L>G zC81|-u&oQX z|FytTugAeZEJ`n*)-RXz3mrYVkz;x-RmzkW{I5)DskZLmg(*#?d7-AdNv1}jp+=!) zcl2(S957w{xODmfQ{Ch_Bz-;*`eo3ANpwfkJn&aNC5!k34}tXqF1T0+1ob_xZy2ews=9@XNRJcSM;M*D@40>^yDQ zIW6u%M;zuxEqf}$&6^N(0LR0yTlEON>JN;C@aJ#w_i~8oxN&o;=ssRdzqDrwHi8y3RohWk+_x?lW zLs<5TsSo})^Y?$wXxuW|$Ze_ItZ?vp9>q;@;Lshy}iJo(2T zw(Z=U5PwNj@+ygS^4)Q}Ehq94wr-}Z3kvlL3S5En6uK;uWGs?Y&X82PQwG90VM3OH zz)D8L_H_iD=hEeZpvLs_V}ik5h0i#C%4clx#m>i$M<=QN_~d%cvClrdr#aS44Q$H$ zmFnMAU{Rr{Ame-8E95uKU&rP*^B01u&{CDRSYG-%_nMQuMJLPxXGSe1)>H8r8X$JA zUb$=X)SNSrkp#Q0dbt9Tblz1GR3ntGIYmNsBMpj7CrQNzMXk!%afq|~>{Vb$$e3#X zX@Q(^SSEMo>%InWwkF%vqWOIHRWzTsOeV*nZ9T3l)EOhj9cxsHj~=weSNu|2TVC%K zus?_5I^w`dQ@t7GsUe-8;sq0JHG#dPyT;)(I|k$d^~MY ztq~QLI*7QkVn!__YRdK8R7yv~<9?#+bNY!qf=xCQl_7ayOg6irOyCACDVQpKoYq=| zv1N+@fM!;tbvZ2R3L};|o!0LA;pOSD<5)RscWoDGT`E*GleY1 zYkoWUJ*Vj3n>{jKEt!=cp79aP51`a6`=lT46$ndl1|#JlPF~A;cXfqoVGthz4;8&Znm7A3ERhVz=$-OCBMr=8l&1smvF9Vy6b( zn9r_7gk;MMd1z>=o#ll>gWH3ruUl56*>}G8|Eux+ z*Dsb4ZLCkOn6tyuK!;{M@Z4Pl11OBtmhSMFpq8|O7qlfT=BI&YSN83!IEh*;ntQ5L zHM_*Q3yDLMDEBe9meY}25Iv!s%gopSC=Uz{i+RxZRnCSPjWmBqWOo<>{$zZU+STL*%*kV6x7Mxc zGpe5JDcMP6vPDzN9xz3agI)e{gWJp%lvZGyC}|t1#a(k#)#JibB&j&A8J;^=FjthB zEy~VV8oJwefBbG8o{Fy2)rsa>-e?(+NXCT>Rlq8^E7OH6HyXs|!e}NmBKD_04r94l z5*S2F#8r8@<2@2z+E%wbx4gNYn^{$xBx$;p5NcQvd`XEy52zFdWPH0ldr5z&xbe;W zg>9WQYuLC(D2FMt)&?5pcnYi3wq4afRX=I{+!)4qj%Vd8ISXCrux!qxXm|BtF-IF! z&60#!fSjQV`+=`T$)5?QJoGH6D~{}D(v-AD`o_!M4ZD4!+%qRWcsly^CqFk%?Wfab zIr3>IX038_okLrDB*JhPr-{?}XJtU6!IYGm-%`4-+!j6cXnmIQv$XNUgm((1O&ZN- zA$JBge3+baC}q0d#U0xxoE}}=u~iv(=>3ZpY4-YolS3EZ-DvlSty`iWsI3qFLtkZV z%Jz_iby3Ap``1BeIqG1`i{#a7&wNnvL19%Be=i)A#?w)6oO4gc04d`5Bz3%J?Or$| z!WL6i3pZSwJlMyRi`f|_BYdTEa`H!%xsM>S&HdoCih|sOo}~i=hv{}R79DPuvu0Fx zICw`lA0~WPi?1_Qeywf^Bk_?j4e=qmebDHWjZ=*zmKzYH3~Joj2>gX&L9$pSx^Y8( z;foaV10H19#*GH^sj2hkShqv){ud(zK@sjgkh@AerHzP;_4GI*2A5+jzW_`5jwedy zR8sa|8?!dMw2Zr!jPEZCM-9Dc$>K|I#y30L?)SCX<__Fw>J{5AVs!4Fb)2q)d99h% z9Z7M4gT~9al}+2dcn`*mjF25Xf05n2-`J*Q8Z}1k$rX(aL)eaB`hdwyl0iL|0cPvuDqq)~+LuSnElC)mCM0dgh=WWGLw;;p33C z&del;mbQAst+_SpCU3=Dfmd^#!xtyFR$0VcAzkCEXejAdafY7%`u?;0;a=vl`G0+F z7`yzpma*TijV5pTtoPpBxAXVkMpOT9-}rpipIh9&clh1y=KWK${mzg6`1sGx%PS|p zj(x)ZW7d|)Sj}EE?QhQ-?u~Ba@7*`L{I{>4@7?!8OIL;;5K>M0CyOF`qiC+WoQ>UF zT@RE}bms%Em2sH!CNqSIKg6s-l#7~O2Pv;;0<``Msk8G8eQELk^k2A98DalEG95K> zOCUDdsJx@n?!M&^B_>~tVmB>4^Z#IcIl4W&eL1=XB50WFyx&x1Bdi z_|M|E`)~3%%6>hA1%vt1N*_ZmuOb=lufk^}H2q~@o(FQ>_H|)_VS%gYFt^LJoy~*oFS+P7EfaX#RF4>lYd9_3=O-am)NW(4W3+o~>sa@nt^p@w&I>$UZtjtimL ze#_+yCRGhJ=sr6-W0AN1mA#pB#CSj*@QYH`i7#?>v%k6k6*esEuEmh*3MoYp7E73pan$ zsqR#NQ;?FPJQ=?=+0kx~{NNussU@|i9JW%~TSIHTx3103_GdftPGF=X9q@}h3dPxq#P1#&^Ug3H;|> z;*Z{}&ry*`MSpNFtRoLJP1n3Cbg3$wa|0Wd%jt;b^=P!-zhSMiZn>g*C0Ot1LZZqi z;Bf>NpfSKPx%8m@ zz(CxgM`lGKhm=HO3^SKZU#DrqPOoLhkp4M4Q)b-fW9GivGxlQcVD*#5J}a!gI)s+c z#6w=y)#i&A(X>;WWPU#ga6IW%GGm*eZIn$eo0X8ugXU)3;icw@=u%4X^;uf&YtDT> zNcx({b2gcqdsR1~6Yx>O{RKOjn7<#q9bYrA%0Y8qyinp|pXe!VGot&6o>@BT^DUUU z8X_Fpy_}?GC+7h&BE)(qOegk484}#V8`d_6Vv{_=8r??|8g~}a>UalOL)iEEQ|Fo{)7LToU9*h zj-yiA;+j4>kk^*fVQQD+9oM|&NLy%!`!RWKqD#q<2P4x!^&2Sd#OJMPaTZ3TsXRBJ;u=7*`RwHgx+D3iMLl5*it zdAp{ZTpKun4D8c>7-Jt>8q4T!l5D%A|F1d68a99dL06F)*NhQwSA58l%UWMps|nP$ zP!iLgp{R=j7vUYuW`}_;huvN(1E?x1hZx^Hh|12R|LFsp$dRcSUr3gon#U71><0xx zW`!}v*oAG$vgNCwX8NfO-I*+1aP_ zANBs_`ET8SLP>aB)AZSU&$4_;Y;v6Wc76VD!~4Ixk|E?jiVg!tSfqYr!hZ^ok6(YJ z_gr|u$RpiC>7lmX0zZClZ@$h@R+TbR8O3C91J>O^Mv28KyN0Yo`v+EMnPFo^U-rRi z@?Gu+J?+E;M;kG8jfv46Wx}VYSkB4IlAQ3U zen~p3jtc0SzcSWeo~rK%BiI3dj#sZ8+yB)1i&kuV`9O) zm^X>#oG^)qG)i z&koS^jKE#V&grsq$MadwD29us(qEOGA07j97o(6Jl{I`05cFeUD;Q`=6R`Z07L7TN zz>;}3il8i*E)$X;3dkq+!kO7!NL;rg;cslFc|A1U`iVQ;oOY+27&Zz^P#rKZ5I`6q zTX=j@SXpZXf1~3NUVo@WKfaK!Slq++8wl?W{beB9^bo(Ttx)#Mv+$m$eu}3!;hP6A zudT;Vi#d-dM%uCpM)>->>#f>P>7M5y9LbMwsiftz_~uX?Au`I;%OknuO!}EoW3npTnFV)pcFmk^D+j zJG%~ccWN;$M{n&ctF``X{lnHoo%f$S^$p@p$d>C>EDLuGd%UI>FXm-Z*BN= zO2e6Er&Du3)lZl1m(l$cETsiyFlJyts>dE&>C$H&3zdi0t@rAP%39}{RGCUIu#-r* z!R^7T7(=32BDXUxW*>UBUFS_nqswy_oG~pm&&+mGvQ8UXLO&CjPFlAz2@HNKYBZVI~H?nJ=G>W zVN0HPjoTs$ocr#B#xnNRfsJJIk)zNoz;ENUKi`H9jnQ5o%2WdNh)eP9*LE$R;1 zDcNfJM+2XTetxIpmw+6D3k6h8L@rmB;nFd#*`;+e`(KJ_oI2% z_qC@%)BK!79OIB(#o~$*P(}#h{xuR>*$QosLIWyhBtAkr_ZSI(Mq9A(N*l3C_ZIL+ zdS9)U9%&mec{(xxU|ro-@YJujsPGWKH>>9_N7~wYp7K!xLKO&732>sWYUuQXAF}{v z%0i%^k-Sa<4xz1=dJ%VN9Sc2OXtjjBt_k=#iR@7 z%qw=?^;dIGgRXjINMy?#vyQx&H_Wc+u;^Xf4UXy$6q<)*W?n^J*D>xKi&cy%jfTRE zp-jWLpsj@N?sg*G)>^NURsOqVtHbyq>sedr=XDM(*Q<<>9$Fo6g5%%Kuu8vOkQp#W znDqgKSwC}1t4@qz9Ic+w7qQBE)_!`cVH~5L*0`HckuQk0{Hy}{M zi5SP@`eYWX4BT)8jv3l-RT+pk`NfoGI^G|Pykt6=7Sc+*Y#c)=5o8Q)cJrwvIXyzm z2Aq)W-;);F)Y*MIqt4x?(C)iZ(1TMtkMxlI5D36F4T4F60|*xWr}-P;etluxcSjF6 zU75~rjjx@1Jf*O-v88YC^wH4pukZivW0vJqvwrOrgyBm}v(^gCvNj%^{paqpFTWeu zxBD|e&w!&>a!UQtrui61E{&_hp;J%4XDA;C=_>z8fw>0IEeq{(@1UQ-v(caJqTyId z8_F&18ycje<&KuZoROU3l7g#ut`#4-Dp%jhyP9{WD5pRfDhN$UTNWIek`j`Vc)-iW z#nZ*J#lFPS(cZG5WIWn3+Ook?W>8R2pbQ!A>`X{V3Jpz32n`OUKoauz^z?GY18_Db zav@h-eSFCHKK6;x2R`}1cW=hmvk}kChK4Spbc1|GXnSx zb9G^pw~RlyW~F+3V|-DBqAohJ@tBqg6j_sb%Bc<%q3lGHIzlFjXqc|)sS5(JX#Ia0 z-!Utl^l$!Vv+?}d=E|$z|1C7(Z9;dyngPnnv}moA&pm_s6|H+xffK z$pXyYNC@K@GVxt$fkI2~{A3FJ*_16gnEExNR-f@+% z%t?%^Hr4An8Rc2#tt?oay8K>7Q>d%aH`gu0zBy8`&AL-#vL%dA)l+ubRe(bIf9=#b~Bx(z^8Jmwc;mgbPdc8LTZ&?|#=YY4**0r_X z-eBT`x_=T=%4b8^lX8yZ*vZW>8+RP@Ml;HoPYRl_Ol79xavws`6&=@J5tWo{4ZvaM z?Z}B@`=k}xl(A2yT@yz);pvGnxpz__q4*MaaD*=K1F!lbY4bm9&hua$J|5lJas>ok zzb>;eXTCy9#;qSH0#{f5v?e}ZH($5gjHrhAb-~jf#_#De{kdVIjsCTs$HD|5vlHg6 zExB@N+hM(%0qJ_5I!g6@LcSlko!;{IWi3S;9R~i(D=}rnBIR1??#~jxA29y;r;ANK zN$5Fk)Vz91))Ozg0;MaPXG^b%Tf?nfr(h4o#d4U&I6PB(yn0HkjO=q z(-Z1>amLr{LS5EMpO}V<*zvw@m`?bDVq*EYl3C{JBJ%WfX|o^YIa*?FmReetkeQiN zjBkEudrELpXE3H`aBxC!VnQgUYFKD{C}vr3C!!%sdrQn|X#*bAz$}Y~Q50OgiaDgf42Qffn~d+|qPeIq?D}B%&G@>EVp4(tn9oQK?Mw&`4Gm352@UNO zbPXNwaut#H=E~uf+hDHUk>@j#f%MN>J?nj@9Mp|(AHzL)w9(#xLDmU-sF!P95CteSp^F~Gwe1{_scBHW_~2d zQRXOC1h5_BC<3 z`gXT5jv(XvU*6vVzuNZY+x_?a!`AVqDP^NC&Ux6JvpHuwRek%)ME!^0Ze041J$(7A z;jfeet&U5_wn=JWY*Q@9!@2dvxpPt+EIn5?*>VmSWK%CGFH@T|91l&|=x^h3c-eeM z=WX4Zp>25G*59O0ZO%EodJ!{z4i|9n?N6?WaZKKA_3cZ_D?{5r)!Kx29Wx&H;e-;J zF+RWyFeckBKi4owGSaVrGQSf-S9TztK zyYU^1cYV3o#GI|~(aOmGRVaKns3CCVwBjSlnv`pYF6Up|eYW#zY{-JGs& zh4dGUnkhrQ!*-IKMGI*6AI_v#PVX}^wIm(y#cQ*1hUPb!X}4ZS(s0V(h6>w!7LGQh zZcg#0^(HN7i_bgyZVi{PsaB{NMkuB@Is?+Hp*@2UXz{DYq%FSkWN-{#v1CLh(IL|J8E`SGx6v zlE~=?L5kqrq|T14T|Pc+ANE3a?r2LxdAZCqU+=L+N~+uN-!7!`^xpm3q~Kl3hn+jN za3R|VpAKmml$P6H{G5Drcn)fzSNzVVbhZx%YuS}0s)B4F8_t97LAO7)2@~^$I zqN}VG?|&Bbr_at;GMbELOB>Ii2^b0kZ|Eo(|2VU1tD7WJm8mKwt2f7H6lCX!v%HNv zyLMjaNL+2}(j1yQoe-Y$`lVGGzs=nS-3FJGuA`~~MX-oxDQ^#rx3tR%rq((XE#LXh zjEn1%sx0NX>ZBR=IR#1aJX=Ph$b$#X{~%XO15LM{>)J(O!7e-tcmuF4#lm2LD}GVh zYc@IR_QZ^-(ws}}c!*ujH)M-w7vXVQ{^XB7wTAO)+seLqvp&;ZE$yxa$MY--lrG+z zuTSv`Exu;!8k(=^Hn^g>=HL~aQ*XJ+v(=yDGTMs2qHro&Wb*Cfr)yNbY6_Q^9*c^-e0B zswwl11!}TCeG-S-S1%jdAF}_W%_UNam&D}06+ca3D4{3Q#wjaWS})D(;OahTVhfE_ zrK(xU#-KScR5~2!oC*WdWwd{XZvY&2U8Nh!;l#w-+Hxa;l#wl-g|_sP2x8=It+4S4 zsOqQ%HJERA5mirAL&1!>V6l;Sm7t=}(YHA$JvfenUK@bQf$%)wjYDxmahjp{#Q0cD z0}=`~Ugy|qO1Er@-Xc_XuQKX3X;vzI5HAr7dCU=DGsy)St}NgoU>ZYm6X4Cz4GiEQ zr9qNXMn?wJ7h$R=>GSV4&}KxkdyXfxPqngp74tw$jDSkaK0r}etFmijex%;>Y`xKV zqXkDO|2nzZz+aoU1hDahWd1(&`8)a-wu{fc8DEm3*jl7dAT^OtI7nvUe~n4&i!e5r zj&JHDxzXFfoWm-kn@cODl@3UfbmC}WZGKa@B24MMb?Ha(@u5Bl!9wCcBIHe(UO~!c zC3u{O{ZJ}nYyY@L6wGdeFiG9R-RM>?eTqOXsSZ@~)5ZHEdv# zxUnK#wix~U73kD|2s9Jb<$Wmih7&H>DK5bkr8!Xlm~ov))npjcl<7$6Af3$N6q1-|+jbK(uHXwYgGbGl z&vDPB+p$2)QCMpi1ftg)q+G)>#ZccHN7V;JUzC@wg=tyLDEz29yLB-g&_|4^U+oaJ zfQLGgV(6{3dWtJ{+qJ-o559f2Wo4E(JQ<)sTQSROc~X{YZhvfH!4k` z3mm8d-g(dSF0G}7reu5{`-B^_fM3C&{WX75LlP03nl=eYrsH0E<|E2SkaDfeE2$e@ zi&v1}IHIfmiB0*E;w8lpTNnXCt>H`?N`|rnUxQxY!D5U&5se^C#DH`d8Xl7u%(m#T zP!h1&fWm+dTPtT!$3!`lynU*=o!r}Qfnkot{3EMkZNL?}2bwYxNt3XK9jlpWzG^cB zttMksGBiM&3E5z4MvH+CQB;8d{AutF=Z2oSMdakq{TvHgj|O5Z|1G{^vz%W}07w#! zz${is#i3m^FUi5wCf|hO@HRI?(iN?ui`7?|nL7E*Y*8z?o|!k4?zvz~Ev09K8Mu*= z1sGY{C#KS6#>QsHAreTINbPbNBlzo3=8eXioHRCvS3^b!W&IQ2W3U}Z&wFrK=|F?D z3opoMfOgWnnR)c)Db3o2dAzbRU1njHO;cuJfnB17g~;+s(4a~OCYDE~WV6AnJlz05 zMds{)QGynEGYBHochpp;FG7kH*Qk$}Y&4V8x17<|HtO>UGQOh=&zjo(YyDS=f8V}b zkiS}VeZi+Izjf$1a2|%IikPH2X_;?YL4VW9?Y$*PT1l2`k5*Oe{`t^cz_$XqYbe@N zeW-=Qn+I-qzy5mQpz6?+0W`ff`J^bfPbm7MuW%eV4jQO7Y*$~iC7H6YUwDr+pK9Aq zp^j-j+R%~F$xoNRn53Kq{u97lr(th^*T+!CFh7-o2qv2oBO(GmU-=k^qg(R0ss`Y1 zN9{Zh)TzJT;!nO0*8ih_;dNns1WiWBZbmLgb=gfvY_iy{OH;NHo<&dqCCiBp(` zRX0d@+J)apU_de1%+W@XlDryoHrvhTh=|bfR5K1|$_yv4)DWinKi(W!FrYKr;-Uss z9WMyRIvL*|ea;%P8n^RD-^^d!7l<|pr8^c7cvxU>RQAm||SLU;^L z7*)}3p;&5-n_#CWuaf2u&pC$j&r)j~(m@CuQbIZmYFI_LtFp-v%*IA0`TIP7^m5lV z60tPs(gvKLFsYC*x3q_r?{6+JBAI9e71 zG8>o~dNTuaEkwcNF{8NRKPd{fE14xSYPZSd7_tFAj8h*s%mP)|n7&szQ#rdE5B*s6 zal`bXZ3Qbw8f&n08659nEiICRnu0b4hqO-8(i=w~r=r5qPeu+lnY*)j_$cpGSRB19P0Ly_v_k`K zZV#j#(4K3#Xn#0L<)_e9R%F$~3Thsdk#XW&ln3Tft3E}2d zXw@%YBG^b&P@$t(Oo&<kqI-P0Lhe>M;hcGJ!2DDKyTpq!l8oYu+UsDsQ_l+e&rP-^g5fb^n zAX$txlE_zxx;r^(85SUU44N(GKF96R142^zLtU_~hyS#-oV|ay!MEt}V%*NE%P%#m(D{jh1Ah@J=NiPJidO1$X&ikAwVv5B_+>03a z6<;wpGRS|}G;!Up7YbR&P^#DZAEAHW82j%(`I8;{jxR%MLIhRdhV@wH>qUa-#XTtc z+jnV7yTp*(GG9_Owz#ona>GFMu($(9*&H9rg2D=d?jN9s)hV7NDk}|P(fL+lZMO7H=2+KBZXX?fd)k5s5h45S^NtY{Ha9SW}5mxU1M2VeN zgFd!gG@K&5y7L^O{MPP7eUj+!Fi+oLL#5K?4AgO}svyKUcTRsaR88mZ%=>ZYsehLN zzr`wfv_P*{PHYRm*8aY(z`oX&zODeJVjkDejLY}sj^MvJ*Uy}H1itc#l9ZMc%b}y= zP5#SkO>OCI#zEJ)&GWcQ4tH}DTd%;;I5 z_Y%rgR1Ec_ph^8oX6tb4@-|zCj@t8a_0dDwwGCT-iP7I zj5}6^>5Voy#NePTf<{%~_p~<)soB6vq-S}(=|gV!$?j1^a7;@y=VgR~r>*Ypft>IL&gNtBGp_`c#d zYUl5}Z^oA#1i+!ozU%n1@d`502S*-45(!hcv`gkoWazLCAp7aYe71j#xmzKCh#qc! zco{bVgV2;dwl!y4V-Krk9bnbu<_&|-v&*gu6E=4ky&h!3vI$+9Isn#5qW}jjLohe{JfLm78G^;aY-lm`uZI!?z<+4vY_Vyz z(S*dVZ5E(c`4<6JYO+p;yv>jPJEe|2wcIU@V#sH161_+jT2KtBbQOJ!VVu`7?7^_f zH0tOO9-eC4P0N~^Y1G2ZNX-m%GhoPB(1Q)k$93_s?8$BWVfB z+b5`pNQW}?E(PYIgkmS1#O-GnwE8KQL2*Gu9&?~4nr zOtJ9e#;}k5l*>wIN)HKrDO!teHy~UM8c)onnF2%M6s0juj8>_Y*dl1Uj46G0iUP5+a)yqgo+mQ5ge-TuR@ECKiB8^s&(O->ilWblPa|HU6Y~M9~*?4SX z^7Q1=yT_BU?;Y>|71mZ2KoOpfLW3-?7gCPJ_<`mmA&6+VO0x4F+qXM;cd`?O9T#Ge zJ(Cl!7GFQBomr1dyT-9$cBcN2d>kmCR)6xbPb8VY(evNTUy84M|M)vLzmgqTj6FE( zl45XiHIyXDek=t()o@RGY(qa(EPDpKr%8_v6+z_k59nD!c(U}d1_YJj(Uu^jOEZsZ9w_%7wrMjnea->lPirkK)&RRfzG;4+T4;TI3m?Z9GbwS0XGMRmhZqF z&r_mV^mNj5e*lkhg2xEp(E>QuvoaX%mbyI-dfl{vqUsab8FAPXW9?#W)5o(jwq&13 zmRT4ZECB~4dN05W?3o-kL3rEhWU}(QEy89cN>nw8HJfMcw+aXx3kc*y05S#;0l*gU zv8}~w9a`UP9a90A@fupTjSYu`1Y9^zeQLo!=;Fc96Abc0eYoZH)i~Unt&9!_P zFl#DVvN?DPLdd}*0EGZ1-fWv>wuu3y!X-x^X^x+g^Yb^=t}%LUa9= zxMXSlg8Y@=+SD)jitONIeb)Fom`;H{;oJ2Y8}{h=+Vk6`_1L%nI^3`&rnU7(>&+PK zxZl3s%>O!sy}k832@rm$$tHXHDr0|5xA9)(=jtvnzW7=qSQluc2#Vj=KyHh8=|CFG z4DzW3aFE9xJ$uHSQJmtpG9ktTSz)D>8d^Hp2&lEHK}0hM>{2SqKzoT0tTor59!F%e zn4rpPiKmbXx`|Zx7zA_Cv@`&n*>=dEL33Aj_)jn@V6E53{*!()@qeAghC=6$TKa|R z_$GJiDUpfQEx3vp3XwH;&r_v2$9`|u(rS8cPdGYMjJl*>EDSIBu&Oi4`Dxb|ytYe< z)MCtK`EQ#*YAfQHEsQVtu-X~wvJ00Ij={A%V`+RsS%YjW9(b*9Z2F}Hp#H;T*f>4(#Og)w=e1=In zM#5=Gy+$LX6+)F&D>aMIHcS1AvztP^j2lVxoYzYfT``T1rB=K@PR93%58`K?UD2^` z=5GZ?q#_y1K(ezIOD2M~SS!9DeA(WS-o?VYgH-Lp1Q{+JSXK9O=3haCIpi@xH1HHqBour;jvX6D0>Ckk&zs|b@!fAkMC9G+ zee8+m|Bg@oGj9Ga_~&;IduA0lneqCbwzO5l^kTO;!mggIM%B}n!6t)pCb5;A{j`>s-|TC1FB8;`R3I7`>Do-?QUkTN9Xw zeq{Y7-;T(Jj(6495f}LpbzdybsHBjk)_GrrAoch?=`RS6QeO@Tu`**}-G?hU>h&?n z&f1X}^sge&Z#7m+BsAw5F^*XL|M2Qv@+H7wV8NW}ILAVY0rBsihR1648J_r*>67(& z$(!*_o<58bB^8BNi+d2AVVHVJg4C^VgBXpxDS>u0p)1~z46X4ZHZ}sj_IH5w?+R61+Xtizssaz-`Ve|#2wA#FPo~-Tb~2t+@F_awOYwyP zrG2v`ddz-QB6swn=*VTQGV7-KDIR3dl4*7pdOMzcv76d0q=~;!3TO&EPB4Ok|#%xvT8D}5#`(s35zVHKf+V^SoH+9-{ ztC_8zuQGG2wAJC#7D6p?6pzaQ(9`0DNcTaV)y=>8sALeUhuY4p!m ztBlEuvBY0MNk3zNax<=_>FL(em=Y6obAvs&XxP9__rM7ApovA8{wkWxiH&dy{6QL9 zzndnN2x$Vt3p$xPlp7whQpMIrTj|oQAt7Qd^Ks#Hj_#1qjwRE9T))0F1N?o!#|m?( z`76=24glIvVlI^k(7G-pZRt#jk!nrpqfI7+>bci8dI z@FSx~>wf8oIzz_y3|qgv_S>X@9TxL5l5A&5bGzxgTy>!FP1?%Ld3XGjzT7>2Ep1Ku zc-IdbZp;0gAUXM^v*0OKVrRj~-C}q}^>*uC>x1pn!KABFv&to&&tRgoS;n5EGo zA;@$5#sJ4*m2jr`q`KGpIzOr<^@ZrS`>$3Qc5rbM4u05l9r@Hm?@wyrCP;Xe%9l>r zd9f!jJk?~aNg%3xner-PCZL>=gDJRiQVU3weA52I&Pd$|@77J@Fe4N?FJpe7oKvR@7 zwA8I%ykYAJ)q>cU%V+3Bofn*OGCvDgRael3N{&f@*`SLK*}zkmc=kGm}1CXKC`3sp^A0&K`ghEC6g(J8qe&#>SaV+4vfvr?99tVdZ7+TaS=5~6fDglsS5$OJ$y5b=|;CT zM8?fXEj=>k?xdD-Y{npIXXBX-V*LrI$9a}?GmV*hmFtvIV0<>#Mx2sLiSV2mFQ^6b za-&*KXp}EF3tkeVyEYYUx-i@xG6}PU#>gp#FLac+uegS_lu2GIl6v$=8BvtB8DwPURHZ4FlWlP{bjEX%nT%`dU4W)z;he7wL82Z(73U2Ub|h! zES#JL7m#{d;8}8bE@3MNr_5Bks@sv>mLORc27Uwdz3hdX%Lxkkukym}TsG2OIVgcv zz}}bRw)R{$-h$GVjE;;bdf?fO+U-gJ$_p;x-CIx_xBVzDSt4SH@f4RZm>UHw#Tt2u zGZJk4c{!VK;LIP9S&M=lUXt7 zNZ=0w6rflmW61H*%wevbsrx~}oI~6^9i6NDXg^3o>_J|G3qAZ>ew?LsYiZdZVTkI% z5nT=Z=IfqCsng_jg+GeJ|KwRe;T?H(JWSZ4-Yu_*`)ZB*FCChwFaG`h%M~9Pec{qD zK=HelHsaR^&}dKiiY?~#`6-;^9w%lsp+A>2(Si)Y@_@Z=b0$$B>p|xh)U-n+QH*6A zx=kj;#|~%On&s;!CWktqRX;hfBaXEEwsh^;9oLma)WbT{a24&p$#M6FopuY-QZfK* z1II?Jdi`kw;aF%%6r{;(>?xMZ>Pw1IAkFrVOYhqMiF35q1C=7RbP__d`G5GmdfJO{ z7T__$WE??&rE}-VimW6H^fMBB^*#f(MVuI3@4sbaeHOosuYgN;iaQZ{=j)FXrbaS+ z6`Tk?1}_Diimck|>k#(F3;w}gd>!%E1Q;6~$>Y&KZ0N^>e*pJ`;8}d-PA0^>g+%$8 z<#6NtVYqTMqT@Ga%`e1u(wKz3&{SN}h;?{%G3-AM1E%^4jG2sp^@kJc6NoyZjxDT{ z(06#q<4?Hi{pk}GMnpm(;oK(OSp^Ndj=V!xbsG(p?(8_FyKxVvoD~AsDAgXT1f|j@ zK6>X(c#p6shv)NY>ko0h-Pl~Y&3I)@nh|@W!52&!mk5$4+-^5b-9q~oTGzPA_W6R2 zQ=IwKM$z?b8e*IIO#=oVFfKy%@R?TbOXs&u%-5U$1-dBj%y>j;^A=5C@esVwIt?1oQ}V zJn%D4y?!q@6Y2TOZ8&Pq3`pBZ9YW&g)bK-s8oxzIY_9z7>d|sfvv6&kOYoUH&ycGMQ?sS_KUxJ3K8+wxk!Rrz-qysvWP=z9*L$ zEesXj>h}n)8Fb)`GAHgv#KgE+LH~TOg05O3q~nB`SdOq;BZF=dB5iywo?&wi5f3(GDzib4wWjRFtn5x!V9rLtg+FuI7Vm8>d!RRS#Vy1{?H;f? zYIpeYvfzSpCfToSay2{`W9(u0u`z0wXAfV%uD)$QsiPa?P_j>wdO8a3%a60CP!@8|nnwO+t?iHfPxg$;gJ? zd0_?KEAzcIcYtBv_TbFt8H4tY_k{gfh#!P1XfhCJH)eq6vo+mx|A|&z13v{*BKM^X z_q);3V0eKG*M6TPJQz#p4oFMUq39k6Ru|`JJ^7t7rr{CiBgmC z1=jrY7At+TD?MqzFi`n@sN!oY!39pfY*dS!gwMrPg(6I}V+Qz*=$C#<+M`MR_5oqO zb_lbDwO~r3dL4_h4k2Knc*OU^K~D~$mb>T(disAp^i22Kk*D=X9?kTtKZK@Fz8{+W z^+TP|8t;~-CM6(JPKIJekn&+ub)v6N6cNlAtA4?g(T; z`ThhCAto0sKP3P5cmP#k02)isow(!=E!xC?<=4>v5;>C=a_-nydUsq4Uq90TwkEDf zd@{a&@E?-+`JeS))P~c6BzKVt5@42N`#@QpOh%qX2$U#1iI3g`Q!^i@fKpBrY#*>8oz`8Qbx=iZ5<3Oxp*@OUe9Br1QDBNI7 zmud^aU<0A-)DX7T2s$lB?PFm2>_uyMkO~CMDq9AwtF8=zHG~fDjJ=9AFX3{em9%w0 zjUR=hkoEkK?JxPipx(bov-aFe2d_%ySag-D!6t0U^pGf1>wKL?Wq&^Nz&u-9vl>yG zZN3pGTef!16lH5`-EE{5!lcqXZ?v{-Aq^7bWFNO(Dl3iKBJ;J6uX|@)_JxfM)Mjvv zM-tE0f{PrJ`1$b22z7rr^C@2*ETGdr>7;L`WzdEU6^dE+LB!zQ&{7#k^@G}pFXAEQ3U`GQYP zMX2d^tzn}^EY`XfWfu`x*bk61{?*3-DMsygm9eR)jUp)WyaoBDTkkC(%Zy!Zcf*A&EHa zV3IGcCYepM8*viePo?}?2CGR(4Mduc8Lv#uYU!+SuZ57On1ZxD5akAfQ=*@`YY6Sv za|QuUqw1u-D=wZk3|F(%A}7jOJPr6I{!YLfKrjd3zXxQwXV0MX2>Rl7zk9U2q0`?T z_GdU?cqA3g+d$}Hmv)D=SdS>tZ4Ha=Y2>3Km_1H-$x8(7z{#9AVJM%DE?&4auS@=7 zD1HJDg)GN2JUt*01l50FCX(^}oqtwaKlf^gBLQY zx&rO1Ja^!f5N8ms;TVY^s{|e&`f}u%ubAloF`~Cb!9r&_L^dlI3NQvd;oXvwRQr^|m#1Uds-Wl(_JXp^z_%RjyjRoAgJ6B)n z8ZiThv-~(N)Z9c@TO(VN=Vi>wJm^#>ox8zKQP(*~0FC;d6G1Ie1(2#o2kP8)iW3`m z%~^MrCnbvWAe{-;4N7qyq5h3y&9NcC#7ebP3YMJ_Vc8u^G#Z!ArRGrV;YeOIT2%%+ z0c#9OyOwl>^B_yFK0zQrLOz){%3f#k*-?0kMN(g*m z+8d5&*x(!XNLQ;4OG(WPJZz zbk-30(LechrOSr^%N&>qNI<*!B1YKOZkIiLQJj1Fz2KvPZY0$gTx$1VpB8P!mW=*A zUCE09zAWe|g3wUxBc14Cp281~XyZ ziqHt7s^!e*Tg(GI#)WwWEPL8LIGV1b&r$0fSxviKweoF{rA>u+Hv;b3?;_-@uTU-O z3V5g*!C6CpoC~FIH$u2r{}*#_0@dWXwvC2B7y>8=2>~YrMIccLAd^&4Au2|hW1w0? zn3dK6hth2eVGWRnYEYk(}#^t-E{w-&tqv zwa&NJ4;n&-HzCjaO!qbAp~o~jY}qScfU@9j2CyJ>RDuw3ej&P z&;;&dbef%oS1!6nWDPSNNv#q%(I9sLu<0Ew1(X0f6*LV5 z(7=`$NHq4;DmCZ~VZ#_+8e3j;>{o>C1)$+M`0k!>FgS#c$4mvO%2iT_?Vpecc^VLS z37@n5;c!_8v&zyZIUCAKg7#joy8gSxeG*H0hTug~Gf?XCR~5oZt8W#&cdX)*kh7tL zKRljpg1*g1G;~(xsi#}Bj^ii#@bRsJKz7X?sXkH&gF}Lj5VQZuK_zIV;g{CH|F(I6 zYKJlgkfLp1>7d+PF*OgjV)!MwuMiFc@7dvYqUQpt!6ZpY2Al{O<~Afi0v_CqO0ydY z3%bX+;91d?(C`%ieBtA5WKeF&o^e2-2emI zcE=O|20XmwpFO{A`Dg#3{`JvduMxZrMT`*?C-N5({O6rqgj6xwkts&-`YTvnA!G7a zBINxZ47~Hs`SExuocY?AYzd}9#x__i@-_?JO&lq#> zk;)0Fj=Za$oNmNuj5-RMnxOrEmaskL;MtKE0N^zaUIJO+kvGe=!79n9F0i%TQca*X zZ8K3RVQ!}y0F;kq)Qqfou)wK@6CwHGi||{ZNTgtmH5XP=pzKHhJ7LgHQeE5PiCa*j z##la23d4&f^`>#ahNxzU2spFGQORK%T`1M!n9f-p@L@BQVSwRTfQHFEte>JHy8BezmQKwv%&QaCx4mow5J3*TL$FABk3^3OrwOP2 zR7B3a9fD_YTWRUOF-!&qHw+AT5f6aN(*RB2?d?Ve6;h~hFTRmrMcNnsu{uo3&VP;1 za-4Kz>>vvP0$zBK1LBQ)CJKRO;9gApIB7MNS4M@XI1WTWD~}Zv-c_e&*QW%@_Jmw| z^DsEJm1>SH9UR*wC7VIH1v?h8W?nG3s+(Pe^azrJrOsR9Xpx*WGZL8wED+Ejz`+kA|ME+SI8%)(sCkb+VZf zZv>tO718nip%pH%?<7Lm+Sdy3OFh4PvXeUJtP!u>-*9^UIHn6v(t20&)v(iW3u#~A zzlN+y37@^MKf~<{Cj%M(!A=-fkcTedKF(GXi`1k8*~4)_+4&HOV4+us6LS=Ay>0gw z1m_8!_Qgbdy(^8D9HXY_$;1z6>Ga~_EU{> zZp?Oc_WCvNvB|~mFPt1+*mg!Z-em68WoqgZ6k6L%vSe^J(h$-m+&#$|TGr-AO=By= zTdlNzHDQ_ZMS<=Tt-)p1#cp;fDJ356BGbCktI&xU$ycMG$I(XsE-H$^u7?#3G+Bec zg}-iLfYht-U9qk>OE9Ag`%v2q+s1(eMyAk6ov-oIHP6seBOy`t z*ZAxQ!c-RW!tF8v#$!lLlBHnB3jAGF1i48jhF2JoJpzAO&1t3$w*vCRLxIl|o7mY! znL>CLLPrKuSboAap=p~jFXdF)7g*ey&O=1#t&dlme#~pUwdpO7e1F48A_T~{f%XYn z%#*b(slktoG$k9xYd=3ee@ETzyI%@|$7}B%6){iVW-JKakS?;{QBxasQLy~gce{>E zpBuK!Y^Lcye!TYfkWfz0m2+d6qeJPSZ*Pp1vbCK@mC1BP{v?f16ezzyW27saANbe9 z#oJP?9$HDsSe^6n%y!+pkIe_t#OL<&Vx7ez?v{pG<`RjFWu9Cw%SJu8iwJ&JQVyf} zh!!a&g=h@-OZAWm)6>&+_)O2wtES1D#5tx$S9@}b%bJXHWDq(LwbKeE6iEg~CSAB| zPma++wM%%NsT{?Y%Cv-K$dQ5i$x)mzJI*06AbTY?SFj?K>U$(C(xit@p^r2oEj zmFn3}O8E5s{)i)f^dMkcKROi7-Z=vxzF;_nYt(h=k+;;wu``z?9}z0+jCI+*60e>3 z0}4WU1wz|O3)O2k43b#jY=cdDK&8d2HM-~hvh{s}0{4HsdeugDXl=L|-EOt7pjdYUd)O4&a11nIQ#GY z_isb=|8UitGU=vF(IH@ZRXFZbwrg`85mn_Is!>IWd0Ce-=*uq+FA3kWCw)(}gT72L z7odVBE4g`us-aH_RG7-O94rsH7-6shLqC%#Zl6^($H6@FX8(k1B14YQ*ZG*-dPer5 z#74)-mWd6JB6KbE?q5xu%<@n%dlTtZ{*$9r7{yIYjWl!p!kGiwuz`My*E-mSK<|r#L;K&m2(6DLDoEuVmhB&>KjR2bU4DUVOD8t$K=R65eZ{fcwU=VclA> zymshh^}=eykT2&)t%f1{1S;&Dg>Lqxawax?CMDF(PLL7hOE*iy`00gO)JtD_uu46viAb}41;tJ8bE(PSYv`) zFd7H3pQ%yPVtSK4C4){!mt2ydwTz7)5i~7l5fp{a+aj`KU1Sy6Ja_}3x0)1taNEw9 zyJ3uNT7j3WEA44AkeN2=b;-~XaR7g41CiwP?$IeE$w_3S?x7;|_+f=zC^sd=+?|`U zp9yuc?CRAVQ&~?>ZeyLhQwK0T# z-~7G48?Wc5_D_dN=(}pd2QL1gv?1omr&9CGNyW}81@p>3G`@#{vsWYh)*DqMmN5-J zqS)XNUG6LW1#8dLFc6!BEfHp(tEb@!EJ*@!ZWBj%5)>*RU);F8wymXdR60bUd%#{Y z!!}*5VBx@;(jWkIOxTcICgK&M7naYsh8X~5kP%Zzj*8q~TR195>|VS3h#Q*Ckb~wO z%;qTPHCI}dmzFmmc$#ES*W4|=?Xacrrqg4`yll!Nh3A!s(^Nf>A{{JCLk6rC=<^j z8ziO{$j|SDqojs)8HYiH1!`FW@A zPqU2olPshtOdjwp*$>k|JE82-NYY9g zbzNd;h5kF?ul3b@Eh?idp*&f%&EBT`iwa!$q`}Y7$Re3*;$y-SH_vuf3`qtpI+@yr z{GlO-UW?Qzk*$31R<4x4)FPD?wSd3fvwB5lb+%^46ye$@Q@Q$@wB1}OSGrKjj|dgX z2OA!4DI98NwNG()&_chWpTmmDU{&)Pu4Y!pWmfwpuDgtgl}xEcC6 z8#xgMLg9*~w#Rg({L2bzvT(A2Ie)Url&;B~%+7fiW8y3SFm~uqrcIu8;2BS-mlXeH z<9AMx|I_@-rn#Mo(Be^N9UQw_nO(e(d&1hHcT3xuqWv;k`*=y8jyQ8>tlhvdEcm5I zT9Rv*rViBz19NTf)bzw<`-XYV+8;Tg_nZAu)U4<5!?N6o=pjm{LA}UP;n>`uhjMMks3}!pSrk0jFZ(Rc|DL#<4yy4pRRfk*hZ4F;S4(f zoqLTb?6nc3MEdW~F5l|gQZ`w<&!DlWzBdoI!HhsJ&G;@Pox zg0@JvXzzwW3jJ8{Bj^^s+`Ys|!-xX1UD)Y0!6yO&cT;hB{5Uuq7RMdjCOE#Bz|j~G zB_7!GGN&vte3A3=CmgF71IeI{Wbh+{qZC-O|L#AHW00XE8A$#BZwQ+h0$CIWJJ`}E zP;@9Q$#XuuTV~2Z(wCiFU^Ztq;)7IUq_FAMRdE^xF(p{kL-L7Eme1;@37}qkb|rcV zAjrX>shU|$Kbh#rJhw%AON5?Ryhe2L09k+qL_kW+BZuFWV z@yei4N6A(4ATK+isd{|&plaGsPQrcz{u>%z6iOXwA1#ck9E~KUqB3~~Hg+_V;wd?T zNHtt4#H-&YK>beqop5{r#Nyec7||gFCMUbSS9nVlo1(ZOdS2kyE6e+Hf; zQlTPPhk}fG;WXehBK(QKWO0KI>ltoHOvQ7atMCcP!CVDQJ2Ew|lk(Wn++YVLOFRw_ z|8xO9BM!K%k99N~$r}Kc{AvxdFA;$WH0f9{@FM_+r(U}SR8Gc^$?rR{y8cX|%(SN@ zQ<~ciqO`82bp9GKBi5L5bZY*N z#ItRTz9ppi>(*y6XRhtiSFL+6M@PfKu#e(zVE%pbOvU9vSyN=!+D9F-qdxnbOwx*_ z{5IBa%8TFGKUcyfJgN>>hg8zR45{-&VuzCoc^L zC+dMsQa+Ev+_Ue5eaGm{yJyQ*pXk zU)NcV`Mml_f2;nt4KCZi8^8F$j=9fqE(~|*InBExUA*JKFFVfp8*i;&ZmeO(*(35V zSZs9ED))#=VWqH2rXJ*GIiTPX)j;t%w^eaWPq(dpQFG@gVQrS3SvpI-&tsr_VYax>v2*_8>uR$SEO zlY57{^GITJX66apI@Z^b)zJ;3*36teaBI!Y$?WJ;_9;8Ma3vGxJMoAVz@IfL(ucI~ zi0dQTT*@!;@7h;Ep+v4H_u<|Y;(}@A*kT-NF3#-h=*rFPfH^EoX@yqsp|!GRTXBqw z&8H*0Gl#fN`b6@0_yNC4AjdpMWaLNW9}aB&_x629r2F%7j%mtT*+WbYG|2di}hZi3i zcG&qe8vnM%(Pa9u-%dO|6nNI}w-adRaly`_L-d`wut_TWq{O`Qh`u?S}1unB*P z1m7I@Ld3NYcL5CdN$_5iPe-@|F%2N?JLuvf?K+q}PrC2 zK+GLSAr~MedpF?Wq_Q7HAoG8ee--TpY;7CL>_QdxG$`^Cg~9gs?TdN_=7B3Ko6kN> zy%Jrpr{vW(p-#z?UBao8D{n>z%@9qA4CjjYk)jz;eW$qZQB;bC6_`(jI<;4Ya~Q#~ zb*3nLNwnH&R3-)GECoVFgnrHsNd;YBgOa{qWN-$7(`8^O3{)fLlVr{#?gfSzbV2DA zY<+}~F)(!iArYhkvIvy%@r?6$4*Urs=}53ZhHfCO=m2M;T7CDY2#>%B1h?8fDncQc zNcAAld%R;vjs1f{AXr3{4j=>E%jb@+oRHh6a$VYe0}O&I*pjLB-6O6)=n#J0dw%;n zS^&VYM_P!Xi%rbKpq9e5i9@_log|b)Cn4gVd%+0 zs}%uX1P_-8ortZ2bOF&XjX|#Zd;}sXD9pjteRPGu|D97ukKBP(-65idBPQqrWWFXo z4})7&U7F*w7+u}~mS`o)3_A>VthcjU}la|4>Y_IX; zdC)RakK0sLMFeEH920KmFq!)N`ra=2nYuG_XLvFPa*x7?dAFL61&fZ+-8!n#3(_K~ zbgJRa-@l!+@EgD5H_tW>q*rc)oiVY&&$>dCX=C^DoT6bd1taBy!xxtM79Vuk)=lt( zJJ1$CZ1>zL`zH^&m*?ahg(KA9SrBN>X4J>P2@4|5vsJM@u$_V@f~p8^WXf)IR@fkj z`g_&Z%NDnH_dx7Sb- zPk@aPWHsv^cIt>Gc?Ul0W$((L|i)YPVe(l z&G_-9%>{*;HaBbuV(9nIf%D2?V#ksO7+Igd$ook99`~wX?MrUArb-Ndh;^_v>KNP) z`O)e)=fE>PUO^X}a`I+#-OE7TnSC8)T&>xfUq}lLgpv(gE5fNA$HOBA!avH&tf|(m zgYZoYXh4Jm*CmMB^0LcOQmNQaJ4VfBmeM(4L0UF!0t;}cMJYuo$e1J+SBWX0!87$p zMOm*?YhX(>|Ar{J+>|a`qScYiwwgjaK0j&0(?YjN#|S|w5SF`e&`E^-Oc33qQ^U|P zf(T;OF)SqPX$deJ6Pa+(NFDAnPl_#-dY~W#41y?qryek_A;Tzoq<^qIxjy3;$Q`elJM$pK~&wbKt;db0uXTSzfi z2L$!3hZOT0f&u{(ViW{m-wsoMb$wb&CD(ASqz|7NG{qSq^gsGz2%WAwH!EEX0R968 zaxL&Qz(^__?PDH5hky{?n0yZJ)=xdcx=@k-;_Yre>*?8G&P56WjtOx2+5%gi*VqVw zpy3v93K)j!x4PSJznl*+p-Y$ME2P|JxE|lW>X*>t_K3v@ne%>p{vYs!|E-%MDf4tz z>&zJ0v9s&v3M%EAhpQL=msH9&MIRVTItKcu-9ScqMYS~N8Np`Qz%n`bN|16#^`*M4 zC}tU$8R&rJt{~q!W61m^s>@j-Da4*lI!Xn5+r4dnAg&i&d962E_lE9KPE~B9wp4z? zND*j=pg`5q1aAovPNX)X(ni`h+y17$v(-lKd;EwH=?#ofGiyT4OiQl!;qLD;@DfK* zphKZxw}F}GRHON{NnTbtWena*-I{$lHTyJf#oStXYXxtqX@*_v4X#&_UQN-SQ(n=)b+VY+zB9#zgqT&tfS(X-FD znThoc{Ofx%Eo&}lcW6KHW*P;HGv{)xP1i-3ur7&pIhN;J&BOx}WFA(g9{xiT{Rzoh z@+UsZsSev_Y(CmEvH7R`A8p5%Ol(oj(D{GMr~BV(+W+>r!{_gV!9iaQtVD(>}&nKpVEHhJ^?;6wpzd$qC9HdJ)hZO**1HPRzDZ0Yr=q@>p>L29Y+btdgAniZ|#NZB~={rH^K@#TZT$-ClTo|_PGy1mc6 z@u9bdk=9}!Q(^oCbBt=F>otRtW}6+@LD4<9r#?0;xhj-dSsWRvR~v0p9v{lAZt_@o z{qU~oF+Cm&C+4l5c>4K~+5NwLp77uglht8NaU5dtRO4aHbUZx0cta?q%qH0u7bzQT z24n}BNqf#8l&)8G!n!+qb*QS-?>e6E`DXUArWd=8L=Kgjly4t>dG-2FSAM$lBYtbx zvFnI-N~HLUVis>-Y!AOgJGph$c=0L=z5R=wi;c5VKq zcdB3dwP^RyY%_55?Ore7ug8CX`O_V;<2$Oeu5cz)`+mOD1pzLNcQ!`FcNd&Sm`nAF z99eCaEIoqNQe_`u*Ds3l;D;<^B?_NzC+(ZZw%3QhxB0@q)>lt=$z7}L59?1b7ay|g z7h;$H^dE0m-+IC|eGcLDKdU?<-W&<4{9s(RC;w@pUpn4i_BiXaBX}czM6>sU?CIyL zn;)lN;cq?L6`p^b+QND~8Y+%9k3UHJN6fl`H|u`>8p+)|&k3&o{PoVcgVzu?6B*w& z`rEbsgI2%()=^#f#80KF{jr^BwHsBp{`uxmQtiz{HXadGR)=4;2c7lcZ>_(jI{Dl6 zpX;fH1v}4e{o`D8mxTIO>a%0dTnVl3;z4%gJmvFsjpO|r{(o#=4rRfFY>Gb}11hiU zqW+MqGWlv&vbe#X`y^9#xq?3Hm!ci`my3`+adNF#OH-{(b*}P3Lwf`$ezaQbpHECZ1=V&4lS* zpZ_GK3iX6^MRHO=(WKWVh{j1wvzfqx9EnL0O_VLcxpne+d~Tuq?A=)*cV~&mnBG$) zl&3aubaHUsQ}C(5>IsLpn`l;R>Q27Z7OAgy{dUm$p*#Noy%-{(E59E719D+#flqSv zyEpHj1cA2j2oWD??70=$F*_h1Mw;DRlh3>@yr*P#6gXeKF+m`+D1B}0sI@qM71zYd zyJO=A-m5-)Tr*QX^UkReXoz*m(+!sAqftn{l~^D34U6~uSQu$p5n&U5$)mGS9ebs& z4}Ka9Z>_TZlV(vK+_dp|@{}LT&9rHJc>t*VuF4rv3EFjODk`EUvFFD@x4+T1_bY^gjUYiUzO=_Cd zFe}A=*0`Z%+zXSO3NHb-Fa5@0#wACZD8$ zfL_73T=+r`l~_y55MBUEdfZ7)@I{>zj#iz`G^oU8g*!k7rn_<~1p6j1;ebfY1{!`L zxb4v9;|Y@#zHiVCJl{+oc|eY7FvH=6oGKrMe4njT^rCJdF#?#uhOUs7$66AuC-ghY zej=K-=B5IAf4#KXH>6a7$xohg9Vvz5_PK{}+X;BL>r`l-2Jos6bsrHtvL-;XICpX$ zbBgQY5sJw#|40A*|8B35b-{8=jybon*N9`jw6XMPb zm?Uki@Q5S)?Tb_ZNuSBdqL-?eJ|L2k#Vc9Dx(ObNCVfS8m_TyhIh?FxLnB(0B61S} z;?k!i!GV3D&|1}%qOX;>cNuY;lj;sP=xZJNv3O=N?B&c|i|!Au&|Wc%^xtkLI+=ex z-jB~IyXsP!rlV}SV&4i;N)s(os>k^#a&6>Z!HOSnQ|Q9TwL2#X788p)*xPy5ZxQ9Q ztyL=Juk4xf6`|TIW+&2in$Ki>FIW+(^Z5XgF`iPZ{OfIIB<}eL?gAy1>FqP%>^JFB zs~)TnOo$oZDOCSoulh7afdT3ctPLQ0od0v~Ru%D-fGkp?Ju6YPIDE?=M%!U^p#Alh zlOJuo!^H45urc?u$F*Wz;;ixz$QZF%GLLJ*F|tLZLTLp751KxS9KS@GC{ z?@uC_!)vF;U0VIRoUb=KzOWNHKIW_)Hq)F(J+La}rc~A`1tnO2H7uUUE}zOYVR;c| z@!{~GqIBmbcmy|ZV(Z3Lu!7YN!Fi=})85$*l2slFbeSATuEU+D_vx+oh|@PIAK{_@Jr z$>dGf#RE~TfgLmAfXUS~lw8vJIL;EpLK_j!WixmjqafrrEdL*u&@9iNHTW{4E?Tzs ztSaY7aRcyvx7&{Or!6cvwAv1@qoB8f^r&SyqjlBVCDhuxr`NE7F_)M5b1cW~nMSro z*nPNE+ty_r^*R;;TpgZjq&na$iFS4!Bw-2&ctiXf(nXMucmrvPiNgNc+G+L$wO>fB zSFLt<9GvGs0vHgwouRyyg&c=3gcidHS8PPQi?(WvBs-`>8A#?g@=CslSn4AwKi$w` zcSr|09lMt!q4`?Zlfr9IvJTZP8k0{Gpqx1_MDel{Zu*_@(>I5gE9t*OE}y@(wcHf? z9$(1T?m~eSkIMmR+9x$C^`q$SL-tW9n>K*Ty%FPoC*FY^RZ#PaDBSIn8XenzaQ;vH z9uy}*-_M)!Nkb7x{zTUX);qBXBL#Be7X$_os^ky`xPz8N2-bvG4K{v#8zett-6bB; zjjFDG!Wc)e9ZIYBQD)_l*UQ$v)Lp`M9gps2lpx`O7kgiJns~yC^~VVrNWBGwhZ-Sq zfi1)!JYJ|HIC_7Zf7#TMGkl?mbZ>vQrQPqpo*XgJ%l**Eowcju=f^9l2xjK$YJABk z)}p6@hE1!wBBl0(t`2|)aU7h88K1AFH?5>pCI^Bt_l}YL&g5QK2$pJ)4Q&`v?bef* zxPP0%niGd2l_Ph*z=F$7F_G$ZPh~wSF|^<`>E#|X>S6zN z6}N}z$g4~1;5!Z#fe8~u$x-6C-8hh3!#qgZ_m%7DbCS>ge*e4h;51?f2i_b`%y(W8 zzA%A6ElZ!KY)%xe2RvdlPc@2eVlq}YTNaF{b}%#WjdA0szFw{aGWi5Xy;|h5InDVY zG@qn+if07M50fIQ%qv43zOGKWHn3c(YounMST0cn@!>~K4w8jY21V87ZOQ16uqih^ z;T{&DYgz{h83rOIwFo8DKm~CxG$SQcbkxI=IC&Uoy)$440hgt=DmZ!+{H=YVP|>mQ zLWTwi1~=I^6+?IO_QUO4x!{8iS*xIqB*;aLTe9AIlW4njGER1_L+w-W^-m>cOS#C%u@ zL=);ZNRot$FewwEHyruk%cW8qlma6H?upbD_jHn{(k5#o;W$C2OTNAq&R=mYoJ2JM zl_fkn^hZljDrtxSH|tugz5`rJD8-MAThflRKynjOdMNTeW%_G2G&LO;D-72iKOQL8 zva}Z~?27}>`tU7j|JJ^LTU_a-diBey?6O2XS&?x^j)kX+i<^H8&x)Q=CDoH13zwB2 zkyY`EQhA1&QQ~5e1($CzqAe+xZ|v0Q2POv-w!x_e8CA{^OFxQ;SWnVwmSQ-@sJt`^ zsZYEr61{1G{1i5cvjQe7)x196)Bvw(_&P!m6_kSoQ=DKb&Ea`M62*Ljn zeL@fnGs}$Kc|6BB28+j@}5!Z{9 z)8f<7@jWHZLKiSxA^##!E)cRuO%<-oPaYW!ZFGvx`gSy|B@7RTxPDVf8+_5@>V#`< zeY=ea@TJV6x)GuLTh9D1SL*BQ`+f_<^e|}jS8Zr+zONbv3|N_tuZ_e8mdNVi-a6HA z(~_p}3l|s{%p;D5#(h}(VXahc7Qj68G8~S!LV-{)BnU+fq`dwVNfcAIBLg{JtxtLG z%F~}h+4#*L-Z{7C^FQzX!7#rf(9Md7C@c^{0H!!t)UtSzdtTS z;uCu!d*ZNMh3qUH^PYZTq7VsRaMw2pckylOjX|8z56C$|w28Esy4Q|+36{T6$1vQ% z@f8N5h5DfpH_i>MMWqm!kAibmx z@w`eB%iDenQN49YW5be%1oTGX166k~w#EGU^VEq)6&8~@AS`5Sw4tx6^Bcgmkdu7s zAob^XX=$JZx6&zR^86X3Tau4f(Vv@3Qr=(X!dS5@T_KP-w))Omt9VY`2&)G$2!9Cj08W-AgXm@c>&!I!C&1%vI zRzh&A#{_`T`2`LSo|Bg6OjSu`b?c~mZ4JT>+M0p#-iSdTley6xI1;)9!4yj_`G`0W zmiI#A+YF6B_KIq6Y6H}%sFy2>mAW1&nf>*)BhatoQ zu1SR<;MRfi9^&q^{xwjhfKx#w@B8ncHRogsOB;RIqpY2oo}r^L5L+PWD8Q;TCv`@P z-l*h9$rHQL-BC{(e%vEeMm@0#@0${{e1~ZBG{N%3-HUsKJ7NO0r%j9DXKd?GpVy$! z5Wju^{0w{$_oZq|OKO*=Cvoo2As#HsYe*=hREkLU5!PmiH8X;Mhz_Tf7taw{_q{|N zmlaAP0Hz4Gk%KywJys9lFYxgZKNmH<q*0#e9(65j017T_IW|&Vs_p!V-otnP6mrmn9V>p!gH%nkWnMIc! z=Y~(Ymp-8)ekdD>2X!-l_@O)GncOIz9~B?n+8*x>mY5J^G)4uhoEyd!9u@i51%;NUhIKiD67l&Mpm`WRZ2-7Dw{m~vvevL`37Ou1}f+XQYPv1(8_6QjxGp+V{2FG>w^-3IFo<9)+WVo=8JDeHc6~m?RWQhQTwWrNR?o zkEWGZ$6m*oMh4}+!~#L3LQyhRSJQ-X$@Q%M#Kk{!yVEAQp5>=(ZI0kf;l^dm-LO=u znVxNT6Yzt+j=}VYsisd>7rqp3YkTpvSg}>!RM=uxJFM95;qWk}>DK6O4m=693$#(K z4A|F13D>mr>Ztaz+Ed2xz7_M<`;y$dqd>bLpvVMHSv5)xBYOFIY}fD!8pwV?1Q^UH zZ$0tTHsEOMoAE-e~z$SAtt1mtxyuKi%GWn<|A2e_d&9jM;KcYqTZJH%XDoWuS>2XDf*VSG@R2S??Y~fRat4~I% zCJ_Hoq5c$yZA~}jN8G|SgcHly6TumP`6whM&qskoIB1gBCtu2Dq0VTU+O+f`3Y>|l z855>cL{$Fbk*&H}UD0ARrS~UG&1Gr4a;_{_Ddw7I9?K9LA1vf;m|=W%N~PF(D=V7{ z;;K4>oEO_%DXx?(H6>lE9>M83AcFb~WFWfSW8qTk3P(rB7W0DogXZR;#svlHymUrA zqd>}1b|8XS&F2(&3(GlNa}#56=1@)79$Z}C2ma}GY0gllvX9VTWjT4^yUJSy?Luod z7^h6#ZbFJ3lehh}$E?KD2Qg%h?bhCzJrTo~kVS@cU8mOkPJHN|N|>p@y#yc&_b%_AWX(f-nzOKb5^{fAaXxvH+q`pLTCsh`V+$;tePSDt z8+_>mo)+eauwe7@b`rS8_WrbhsSW{V^YHGDimHnWRU`KPa@f|0@Er<>kWJbf0}Xp1 z>fMX0lnR!S4do%RX<->*NZB1t(I{<*p%k}N#u#X{z@+6OF=>$*%@Pr77bfNQfAn9D z;q+(W!v5TLrgq+ZJ{B@@{Py~l8X$(oEW?uInDd&fh^0Q6Hx)giP%2jRha(l~Wa?Dd z5mPgy*Can5ez#e{9)nwnm5!nx;hlw@^cYr^plU;e&zAHJQR!A^9dQJ#!`cgTSMl2) z8#r5V78z6pxv*a-qpV9R44i}5U$Ej`1c44W;(OAa3^SBHhlPfW!mO64C@V#*a&dzq z{sb$$q4=Qlwr*16LB$F3{hRD6!NzL^B>6K|z7YgS)P)exh^;eR2C$6(xx2TK5Z_>) zP=Ei)=^+Mb-xsdYhRBC4Y51%Esz`DlY(Rtcy-v+>B;NRz>!I>}_Zh3}Rz)QwK~xYg zk3CsTqK;ho-4>i6IA6(Q;fIBEyGwOYQ!a6W7`c*r`a6$P|V^E<|6aYh+hjtXJPSJIlH<5)B1i7+c4*QEB+oUzKg5A z*5WDyyDII6VWP+t=cAvT7kHZ+c}sOuT@9P%-zi(f^6y(V_cc@b=Vy2OpYKdrZP{1X zS8s&;a2J(Rg!uH8?vx-_M~ZtRY5daSUk?_wssqQb*;v-~bw4t= zJM)oY*Imo*ulPUUT29~hk6iiSv6uTk(l@sGc+wW*SJn}=DqB8jmalgD5#LbnI^n1{{dSgV7%d4lXQd48vEKEe-u za(MY>MHSA=m541L!QfLdeD3~0hH4;VT(}8Mhbx2qU!VKrpv8q8^~zCS0CP3 zbdKY~ju(D{zJ6-7hzv@3u!Rz~2P2CSSC$V3<1Od;|G{&tT>0*8D^1DOMV&M@XHqkk;i58y8_I(9ckPr8JR_Z-hY8lVW6+c?rDDbOi z%St~Ph?>A%Pkg+yl*Pg;)9bI~ZOJ;a6>eEYg~R8^w!f=c?}`(a1=ir<*WV#h?mx3*l`tNoXZp1A(*lq8IejQj<6&> zB|VBffwb>yw~xsF=wJSm#LEjGqm*vGr1CG{t{gHk78R0LIE<)Cv^_q%uA_b<_CZQz zl9bNC+&aM>69DdnPuB=l3vi+VK^3Pvq+)K=NYgfAq%}{Hk0M^D8Mq~fQs1lVc`7~m z2sK(R zINE>ggN(h)85OHolO1L8in66X#>(z#5N#EtwTYHmOX!O9WV?cnb_lpA>EZ+`a#Uy> zn%XL4j$JyGzZN9x(H1Ut5YI_SrewNWct_N9>8Q!dNbJk%Qn^uYwHjpc2FpENS8%XX zLtii3I9r2nzg4kxwXAMNHDV>z=twCUxR#Ve4i+yeiEKBT2h=8w04a3gY9WMtBvuBP zk9`s{Aj9ds0EJpHKQN<>>jsIo5J@2+~Pq_?LXcWR52a?GJ z0yI6F(?)cSPPkW-$Kx<;-LTD$H=9Iuk@~0|OIIb&i0p&u1MslV;w{$@k**9he#rAA zK2dOhs4JMjX9yaj)XQzgZ(MK0s)3b*)^e03*&az(cp=alCoVKmxLEz|CtJQelS4JV zcwo!t{#G+N>!a>cW8K0`E^PELs@S)|xP+49wPYZgpuep66H^^E6VX z5p(qXcl~#Qi7lo3*}L{lrchppB%kMSN@-MUmP12=jk&`a&DAUox^y{}DKy8j$B#2x zYSK6+KQg4PW5m?E)T-qVd*9^klU`}s?#oFk(Xym%Yj8Sr?;vQD5;6uQ@5-R>PpI0Qb|ZudoF_1lDahm2~e zNw%Jnv@iNChx*AYp@v`WtM-6&RzQ&lx1A4q$AF~qhOPki$pF{aUGBdV5zP+)>0_bn zYA&{1)+1ICY8fGWE?aL{u{cdjRa26aRHNKeqs)PDV4H$MSMaqcb@1U6P=sI2*kOpJTD=ElYa^+g4vSeqLs78oC#Y+g`MR3Oz=c0iFP_l8Ez(UEA>>gz{C zAyhLrF|RKuHphCNEaVFgnoq|s(0=9gb&&S`-R%p_xs+#rwy%Oh>B`K;+ZmHDBQi6d zH;zZtUpCkqu&rFTZB-o2BHMAY5oa1_wLu`)Nlh0Y7N=y`+G0mq79>*D=;=p1{?&iq zZ-9d{jC9X#iZK{a#hoaqUeu8E+m0W<{>1OH-@%77lOl)xe%pMlxYs?#@I%#Y@aJD7 zCW!8*mJPWWfTy|ncc)XTm@jUJEC>nCD*E#5cHi3{@Csw=m_iziA!?gTYnw!jp_2?6 z^GI)m3T%92P{%UE{2k;5;nYd{MRjw9`ZA$(%l^^2t6u(J=V)tlStTN6v{(JUltJz+ znNUP?l;rqGl3BvA1btd^+k*h{X{w?g=>I`&sW?8vMp>wRV2)jAGV45SRp!ES#lgk9 z%}RO<4?k#%4HD2^9ORvy&zN#uVG1ANlH&s?7BxnX|<0q{G03Q~4q_Y@`+YYU?~ zB~lG(akTcjESiK>RIfQtvuIVaqr%4DJ%OVzOzxDr(3G8EzV9XKD#Ls{Z{3h-n8@9W zQz2?Su_w;ow(|h|uH2;}+T17^JnySG%;AQ%s7_i|CqwG^jMC>-+4c3-h6hU&QJKU| z6b>G2j`l}dbG7D1b)sIK)v0knTG)AjRH~CxGCmCuo<=lmj_^#d(@)LQN_@<&lcuTK2H|Vzx;Yb>qY;W7snb6L3`wN zte0N9_#rf*-9^V&^s{H|J9!(AYQ>-`FlsHm>iH z+A90<3vxzTMEr@l0CpCVOvmZzwzs)O~WeVV-LEUHks3=}3v5jGp_heVfo-LXFj3 zUM>8LjBbVO5(3Y5%vRAn=ry4QjfZ+PLcJ+(gdSBrt95jAsox@$u94onc5easZWll; z`ffZi{t!abf*vxGU=TrFpm?frIC<-R5S>CY8u^J*N>pLs01E2l6m63im)$-=px$Qq(yz`GJ z&~*LY9I~;GsB#l}fEWxKFkjFSs@>5)*%7SCg40Q;0qr#?kJ_Ccsgb795gegZFeJmE ze`->6R9z2DJT}X9#_F*5(27c#hhf@^4AU5pF)Dl5Sc@w-+#_uB9x@V}W2C~^R#eZ_ z&BBL?5(vuvIeoIM4k&$zet00o38TCXp6=*^k&Qfl`Y48C6i)DPBE`js+fII<6oZq4 zwC~R@N=*mK#!v75)xL?Md=(KDci`9^nK*V1d1j{I-bGe{kGA@jnEc^*M`FNqeOq_W{uH2oA zKVlo72LZ8|trkM7o7xdCgj1Ov;b=8aMdZ=*hMSD*jitJm>esVRjjr>3y8Tp9@3lg9 zwbXu1(1{5}?An{@+d2L}2exhapW63tP5pmd9m!jR3a8y`^VjCk z)Hh>qV@?o+uFj?yP^f5ah{;8}6v+;lwHe7AY#gnbpd(GEU6Ag?g!UN`{s3 zl)b#@8OEa4Xs)@4ClIT+MwzWktc`WEsZPdtu#b2VM_1X3%`XZ`nJ>%PJfY7xYDN{# zvAL7xjdg?2-gZqTJ7FNwx?bv4blyj_|h1sZ8O_v&vmMpPen;p;iq(!Ci^3BVy48)Gfu}6RG{b@auTl5GAi5)vJ9jm43QZ56> zRFN{-BInpn-K-v_v6NmMSQ+I>Dw;)kWHTmLn==dT*G$2t9ycEqVQ0r6yL{{g|FeGe zeiHs~t^}kO(^z7u2Pq_sRF|R{JSL~Fz7oMk9E~S3G+Gmc_KXjV* zZkw)c*E*&83)a@duNPJ~s_rDI&PDxD?Mo`-tM{`wZsCV5_8orhS?#+`c6?j4@23e98zh4t^?`M0-YJ=DGoO{gI>q z(~$+t(N(DsLxfBsFw601OLchF=$=VJb^pESa1H#}n9#nz+IIldbBloiv4Jn)A*z## zIeM5*>Su|`;YdCgDRCNjfrV|jd0+Emqf5wDBC$~to39g3qTo#k?AmaNHW^2;+em0k z@`PvJ(POWLP4r8`X-6WG5op*RAYyg#z8j?CQG zv`96$V*23XMFWvVdxbNvTFU*3&7`{QnRO<#n{Xi-R?HF=Nd`V)QikEfi{v`w*CFE% z-DQ@DK?3dmh9CVOUG;zUh7qgu?o%vkTnIu z=AEnQo2%(tvy{##>d}VfjVfqGtNC%wQ#NN9yQlL7rOh^OPjWprLHt>(Y-W-EyW2io z@+#uB(NEsDXMsiH$ zflVuEDvnWIWuzlYY)WP1jKaeQXYJHHLns}asKjMQD4+V%Mrkqp)`)T5-Z`)xMJ~@< zmqp+#t{;Mrdrh^y>r>Z*$-oZX8554_;njdW>f0mwS8Rg^{!W6Vw?!tcNX-QhFsFVk+Yt>1^;0W*DMF|JIZ5RN9SILAc6-o0-jLTjL(Eb&u zQ}|y9&-H%55?~&FM!1LguPPHxEgP6@Iq*0#-cTZ+u&MouDz~7%>FV#_O#*XHTPutyyxHWvJ%-H*eJ}}!zO-lsR zuR>zIH!OaBU6)(ja4zcpFU6~GJ(DK|J;j8c$PIpuKKuGhS(YOE=5p~~ z&GUPQ>`Pt+m|R;nXL7th4?Dtz~0A!5bL%d>Z1%o-fOt zFU#AC4xQhT&vc$zY0Ib7eD$-PwXVRfCw=4e8&|9sKalNPp*!pLl`%V-t^jE_@AFr8 zZ{3am=FL3AoBNMn^o(+RZ}HQid4~Dlyy-n95j5LAdE4d@`9L}NNMhG)kzsoc6Ei$E zF>J6soF8VUT~W266%60dQ_~Z-*k-(}sJHdeIFg%Xn_{y}aQoFVL5EfR^5IO6=}QOi zHty^N2x(g>X{Sdr}B$ zX5^V3m%YdH5E<7P)xz9bjL1d+s{*(ZKuVPipxUP^%-XAJ&%wPMVj_Sb@^~ zlVlnPv2DBpUQocO5}+S8iT+}k98xbA78U-0m%?3F9SBCIzmUR3e_yiZZ~wu;PLjJI zIQvrJ=iz#ONo=J|O`t}fL9m#@D)$-zr~y5J(L>?;+3>d)R}5!aZu)}YPr-$Qx+{*g zM=Rlch`uq*Fg8>?27GR*$;)zRw7;hu0!%9U%W#u)k=&bh2xJ(_*V)l|W!aO@72Q4G z&PA2)HFw5?H){-Z5ldcK_4L#QU*E*j#WUTC@7;n5__mEXMbVFLDc|^R9M1b7?dG=Q zPQTS=pG|t6r(@Z&e_-E4`^Xl*_QN0d{FwCo_y5KGXx7oqZ)&FiZ#ly!IUB4P`;el{ z^5EEBh`~+}*{<;@X@gf0RPgynTKL|OCYh854S%9+pygf(Z5-luP*=XuI2mM&GUl$$ zkkN7EIdP%*zjZ_?351Yayz$h#2Tdg8EtH)|gl0ax7#NLx1D+E(0lc!^T=F0@-!%lh zj`Dz>5bE9c!~(*|xFnEqLn;*Z@nWz#HiPe>v~dFw1g}I#p6L(XnRSZ#gZ@l^0T(Ph zgpUSFNCU_qWcdIK7n1maOAI9W1=Tp>09;=HuV)l*)|M%L*^{qg)F0rA@_c*BUfjgVP}Goe{!z;LC+8n+DeQ3CH_)*wUiKXWJhG`vTspJYAml+nsmwm)2bV-of8@LPrs<-4KavItY9hC$70keA5IXBsg0HGEsa-tvs4C6Wyk=pGV%5kzu zQ!<7ssW4VaBY#)e<1p5Cv_22azNq^ze;jz&MQ8qD@+&OqU`R)L!r&X+wYd%)CGirT z5L02`X49(%*wF$(jn$_%6xLT0;p7rA46hz|v*6ufk3)!fEFkCj2WJ#oo>BH%7Q!0E zW5GU10$Uv>0b$%ea;wS9!;9?c0Fz&2?5nF}zYl&)n6bb26)X%d%U_m}6~oDnP{`3p z*tc-s5jm7TC-xP6yWJ=+<@bf>&nKQQ&fmBr@m61P;`yS!OXwKRqf~K5$vK^OAul*O= z@c_}{Oo2Gr4-FVAh?xz-90#7hHfdxV*gPlk15#>Ei1m~ZfdS)K5NH}FQ-l42)w;*b z%_vb3XpyUri%ZGemmG``M-P$p5&F3j`1rEX#6=VM07+F1T#304(^*{)a2$kk3u7I2 z3E91dO$@XwxyJa`TUoBvP)L-wm2yRGFc5ORR zsTfc=)Qh{#<6%Dus#4`s@c1jAl0}jQ3~WfVt1ESOQ1Zv+7Ni+h-Dp61azEn7$K|{F z;X5}Ws7$8GG3Wegdc@Jpc;V&{EhR&{tw}JQk(ryM%?={7zGO_*etBQL(?FfiE)o_a zv2WPoVC9_+fp1WewjLtMySh2FG}N14C?)J;00$l011~%8` zy`J})3r%-;0uGCunqJ*_O^*i0C*Gy;{m#CQ{H>zi&0jJAZG?3<220%S(2XOLF*tn6 zUd=Cee$J?}>$0<-(~>~eR3yXB2(~XWhVkbD#@iEXk!hZ3oHQ}k^_=S3ohos5D**meWDQG``Ee}Q-UhZw2oFE^JIK;{Z01OVL!SHs4NoXDiG zHF6jfa=DFGU)Cpyjimx^AS?HCtu&hqGC;_mcnd*V`57`M-fVa#tkOt5{#FHfg$7 z@>O1`scs()yJh#UdE|@C76Z2{R_)b2_tm+Fnyu5entZm^L8PXL?1FfuJYM4Ni}OsL^Z_Q-MdhujnT7cG?1zAp^;VE z!NM#1$3&_|j=rkH=!^E*Zzcc1Iq%{tI*|OC81QxoZOK(6cW1Ns6K15QSI6 z3oE&5C8Mvx1ln?Z?)Y5kh_=>59$RQAn}A;;zE?{<##x~Tz4`_ARr$tW8oLamm%K;FNqNv7CjeiMzkuFX$Mxvgk>n z<7ssWf$ihX!eg~Qw+8D6AsS!n5de`HpGzK*wW#O2@At%Wr%yipnyUNj&$+)F-+#>C ze_!S5pZO##OwlIllVdsVYmi*xcY4U(1u->xA8@Cb8hHdp%+K_&73pVeF2qF`u9?!s zkZ5zMnaDmeMW)ZuA39|(Dg;jSC8B2Emdvti!68le&?qTs3rx?l6^30*hjRiGqd{i$ z^boww`b#?f$^kfq*=iPWq`;Skbq1yfX8P43VuHf#e)5Yu-3Ft3Lz`4g>s^f2Cs8oT z3ow#Q31PqMETnk^Li{{gx{|2b1)rSBGi-cG-<);;TJ>?Z2Y= zSPA-DoG*7`*+n>jX$+>yr7{EtAb+-S5zJ*r%N!|&I^FrFKVdDRI9i@FUvLbVby}K@ zRRxO)MPW$tm?e5S8)m=iT;_%XX21t6&rq_Zr1crmf(|O#W1A3bagI~Uj2!Wu-M;9y+vc(dB8Z+&E!l@;-vgFaB);m(VV0xvtKADY$}Ma1!Y(i8{rA zkAZi#2hbT(q%h@$ubNu}q)3C~KY?``@}=Z^lgmE|DdJdV76*qkY9!)Sc0J%77U1g^ z@I@ln4%7Jd+xO~@20oVk*Z9I-7+-69uY}TjgTbh)*OrIhFS!lwX>aj%gnpjg1Ll5k zyek}l>6ZtIAAbOiTMbo12UhkPTY-_C0Vp47F_3z{Gs#rCSD75(O^1~Kj72)xr0F0c zA75xOpq=4fB~4HwLB~k$(^{t|hfe59C8Kz;*)l*$yhCfEf%*d6flV3a0zws^nIK~; z9$kgcyRUDRkBQ!U)n3d5m%d}yZOf_FMzD!{u-rgYI zGeOeYAX2?ix5}Yf-lzDfA6FDV^(me@mpJmB9r=An`Tb7(en*~W$kJoeVw|gUp#)=s z4cNI9vIa1L>#x5CpURzO3N+$+VfH%_7Zh%1{M`H9r0Xyk%_=GUzAdd`>mliNp22L$ z_rBWY468YK$l(gSORJN}{SQQLBw0aaQW&s^CJt{MASXkFSB`kx0Ics_-$5){(Y?!| z3+AUmG`>nlHjS@$$zS7JPa>{J_WpDs{5TKRnF#9``C`WU6uBLu!rwyrqSAJW$ej;j zuF2bBlZa0M6V3iEWQjZjWb%-eg<0cuWPf|WQztbBXhDu%-;%g_ zdQa!|P24x-*~H2-T0bqRa@PDq>+O`^ZQ|xv&Cp0vU9GzKfKX3lc3vPcR`u2R;Az;= zM3uaFj-E>(F4m3LSoqzuk1FM1l4^so>-FOo8RGhfdp_J_ulLinXNPE!Ud}qp%mAw4!Iqxflm&1R7eYVVTqUXWOl7K}->>Zh{#QrVu z>)F2|5UYVmB*e@w6=51z+~@JY{ng)ynOwS!7J+TjQV@eV9F9rpJhJM(f_0&U<{!;) zq1nB{t3}ZCExZy6mEigSVg&1eG|V9$F-XSrRQ%h|F~Q9WIo_XV4IgkYjT!#JFAf^o zMy%bUcl`w{F|hlty(uh*LU4({5JxD*rTpq!=1_Q@S?!@OqQ+dd3EUXJr+etdViBm5O(iu#Hu~N(O|a2*8|llE%^Rn_WXt? z%ATz}XZepjhti&1Bya8YVdILPv)nRum9N1$M6r#-0}ytg_H6~^Xo8c5NRkznTI`O{ zhA&0lGdadU!zP}ANo)fwowuVv3(3WE^Acg}tg}6Tf$Qs(ng8$m@5pWp?eBpfj%%Lv z__!Pr$T04$&a2L!;N0a%zD3eB=Zi>!EXe7o(}S**M6BzH_3+YLmRPGCsCSJ={M+5| zng^Hqx@fnE+SFtwuS8B60h6I_P7n;93IemAQ;u3Ywh0Q^<6fBLtmzd02TNgF5-@S6^cA%K|7S$|}tJ2Pg+o6;)V^@wk1FzY^2>Ql; zik*}+xA9ac9jv4I`@nWR_zOGzbpD#Zb*MtP_s*$>E`m|XB2G{?nu10P;WCIS;s(H7N*<{ z_N2iKWV1bu9jX~=C*SlQ)3zMCxh-+oO^;_NOroip2?eovn_vFCZzA=~K1h1;ZhS}L zXF=Q1E>0ffdqPeojy6^&>KB!3s*?Rxzub{3SCOzV5$_7|YJ(xW0b|}C;df%d5E(DZ z7hQ zxq>oY;!pxTG6?6lq%uPH$#YL!bb+FNJf!f6DK(%#nG56l)9k$zU;WMBhtj^}S+B*! zo)}U(lOBp9{rpCiCp|FX?luIQnS=DMP7B^@uik?j4xH5%y$74Mg=IU{;h^ZPfWKO$ ziVmC+mqWeT+x;CR1QS4qMN z0*VpU)lnm-|F>Xwru@U+C)j@ZikEWgP~v2W4H&-Xe|{y*j2$h(mHmWQK+hC{XnY&Xkt?Mz!Nveq$-QFLq*5=dhNc$sp)m@7wo}dxHJ_uiD zvU`48Zz%WyyAlj`Q68R8Ze%jeC~*{uxk@Tickk-&f(AN_bzcNSfF&8*8VVdR5bj2C zmuzWolj;M?7f{(p1+;$rr{Iy1mpD%o-V3J>jcbiq7klV*=ELZWJrRdyN#UrF1l`8x za(sdOO-~Z-mL8(<{ncTn_P@rL9)oiN86{YV3~ciAB1!z2@KdN44RspuISm%yS=mz5*b66%O^b42RU&46id%#6=PmtkTA zK0+}23w9E*u>pxiZA+R2Xoe9F<;Yk#4lX*Y8O)pCC5_C6Gj05c) zCwwlPLn-iDn9ul$&xOQ)#?N(g+SXn|6xJ6FnfzE9U--jn8|UrbtM%9V4CQ-+8)!(t zeuJrtVum{Y>^K9d9Rhxf zG=bckA#Rl6a%NZrjkQLVeTc5rd9dP=utBeZ^K&$`9bom0t`jG#Lt;Od4Jr3K=s7_- z45drlxD9Ok#vwbdY>D=vBx+w4eXB;;}G*a5^t9_mGT{5@!I;h{MKN|4;ib zT$^6pFV{>>o)FA6H|?7cly8+9TN`h6NHfc-?X)=zKKuGxQj;{@EZ|HcX)2t+NF(Gs zGX5bl7=k3DvK61Y*txoe1R3HJaYtWCe+!n(KDtFBB`r<;&}c}3O-cO7l6lDC5dWMl zQbU+6a;9UW+nw13J)ze&ueoR(hzHOaxFM(Hb7F6_E2>;g-FNe`xxY$8VGBiK?67B0 zI&^;JFi;ZqLPA+#`le1-ZXwKRm$Qn+LE;LYH6h^*X?-d6@k3@U=v z!MpHr=$W3gSbCBC9JNB%(cf2RP-hSeFIM1Am%Mh_85AD-#-hIz`4IXU13*8a3_?!dl3PJjH((E0f1zmK&Z z*mvf=%7IXiAD@`f`iuxl(0X%D*K>Zwt$7!8%~xzhfO=WtU;Qm2S+w6Sd%y}CEi;6V zNM_q_7VLMwFXM0=&F&>66r>)!68;N?vWRa_;f|Y|glAk2UI{%_Td--l?ux)_#LFKI zlvA#_UQ`%TWCGud)XZ{3#&`p|`~|@(hjYd?XOj^5bXc2TS!G@&DjOHmk>r9-YvT*5 z58`MUXGjuWN+Gkq2-;EzwC|TtkPR6gIfCNLt@ld?s;c0%fvCI`$CPn^XlHJdTn zY6#@$&~^@?rsOTIy@;wNG`_z(gyL!Of|+N3%d!E^Dt6#O=y@vf~OAa zM#Qy=Bi$~tV#jJit{PO@2F*<3-Z2Ds2e95CA!uYJ@&>s#fP@7hJb06I&`RLsz>n2c z(VGE*G97q9K(P!pO1b~e_eHRap+2FAhgwGNs&Rp^LRrP_2x{~|!-#{l7ZkJ+aL{3w zVpoqMcSZ36Ee?eExob8K-`js-&vrNfaZ0~mbII z-yuvuqO7k~pXxdD4=6_0ugv8|mQ35vRS8%b%TDYu7PK86*9v3UablhFle~dJKeJ4f$%&g2l&G2-yl}Ii#B> z4U#T-M&iDb#~GgnSpo6;<@YPCoqV@Jd#M+6V#V1||O`mnT2(LL? zr}XfxM|BZ}Mp%jYTygS&tVYMUl|?v2_+cEyv-0ULroafDuR3CBzWAtCOaRbCGhiWh zOr=#TUrS?96$YKhl4n$@_%3@FUor7BpEaXC_;?rfdI>Zh0u_xLC2VvDBOQWcp2e`H zvsk8mEU}Twb$Eh?dU9AYmdqe3#zCmo!m>H%p;{)__~DLNCDu`dVH3NIQ((|ev&H5Z zJ(bQF7KVAOTjYz(j)N?fSd^KgV%S18g$j?ww{(`XT2yRuJy>JFoZYU7wTbc2Qk6%_ zS&Gwqn38s>Df&yRfa*1c{4Ga?t48%}1!~#~wbFK|;|{@wL4B(F4>^{%c@zy(h-~ zYdqJ5zkn*nJV(*?5EtlcS0kdc7 zf%~UZIl_Biz+SPPd1K3EhXY39t5b-d|FJ&z06RY|p>|VZO>{=O1r#_I zfR{d{p~eINd7&i(Wml7_;W0q$r!F{*&qbdIPcTiNhrh;{-l>pepB6h0xORxREC>DC z9l9CQ#3YDeg0xJP4R6SOC6MTeftRg_Ez|_d1~Sob7h!p587jR#_=VBOIi_;dAaCQ; zm~CDkcugA(Sr?4_2g=vUpt$CaftQD|WMxs0^SR>_!3^ja=LwWyG1Q&UtRZ1-9cM#|h9ujR zL#5xrr%Dn^4=nLKi53JS)tXlx>e1G=Pw{9}Z;`5wXKe z%76-h0}9#&d$uA!`z?eqkS06{>V%YBBa>!|)NPl5(h2MI%Lk~ZP2A>3%cY}Xt11dyYU#Ac5Esjym9h;^z??ddrHxcmW~@Z)e`O3>{!0l?t^_~i(X z)oq^M}N4s5E57W*Wsh3R zF#IpE=F20|d*GUTSfd76C9kMzt&npv{5hr~#|#lirn}UvbsBZm5BiIO z3l94G3q@NC&h+~CZM|5C;K-V5Jy0nTZ?3CrUbH330o6_3*w%j@qtPsU{vTm;J~4o;gq;N7$QZz zx-an-bq7gXnAGZQMc$CS{Irbp@EF;b=jaU4_>%rJ;;X24{e5cvv`}*yP5VAzbGUtS zMBr!?;W8j3syC{>6NR&S8Ome(srTINz~BQ>D%(9oZGJSEqGg6mvl#I;U;!g=5Hy~X zG{K9e1~-9E(CHE~Dx@%;yX!Q9D0!=MIefXN$d8@m2?jqbidSOP&?{m+*t;1leN*L2fNE;QG4jp;+%z`>ELn)0wavQORz zq{a_PswrPl^^yuzU6jc(MR^o0fUY}@!WHu4-hSkfQ9H(8uKUqyz4ukOxt)?Wq|5zC zk6V&IS!Cgiw3Q|tlQcUI7z$caTAyRG(($n}H0|<1X&xo4`VF{A@6A9okZF5#Qwj7& z$#eYi&_PUXdUcBo*rhr9b3#v(Q9H*FfA$FU^CAWb6Ob+HL;JAruk{(@>$m=|{kH^J zSa*`S35?w_PzHqIfWu(CvQd)&8A!2ms01oUy%T@|fQf!U6Y%Z`t_$hX8JGr&obY6% zO0WxL@LHKK2scE?j&4{C!m$wWKLU)sl}8Ve8~h8p(aL|bLaZ6vL z{Br9*2G@4Om%U#+S8Q%r+Fot+Fu}%3qZw-68gHvzZLAI^spMN}Bx!Y69c(Hz;~6Ix znsMDS!t8_johRZy3FfyQOms7|GWmvU-aDr)4F$<@#Y@OLaAZ9W*_fVnHss~$DZ%k> zTQfhLRURDK)HR=4icCW=FY$&#fHq-Y0* zG-sW2o7*Oi>H#2!anW2`b~VHqOE#hT*5dRk?Zl>F50TS+S7c;(!D+{kt=K4#YXLhM z3BV?Blx=I*Z}|^6C^WuP44w7oY(O|(*15D*VTZC<~Q0jGwihYX92tqrJKNoz+%{k)wuC0g0f7vq+y)e(pVb};CJ^Zy)UVK0`^|MLvv;OE z_Q6w71bSEN;zB7Bf52|1JoM{(ce|mK0l?jLEO`#EzANtsVCF6?V*$Qvad?AXLV440 z17Y!)#`~H5COP}F*Rod->DkCVBS2#+^(2MkdiVDGWPUt`pn3MGfyOpa%Z~wfp z|4M7VS$jw9CA9Y~19I0ufdwj~RDuqL%xGw%dzQAm2$U>!PZ-icC!q*nf7BzRG1vjV zrGDNs-7bB<`hwlR{RerY&h1M-XmzF_2FxlOgZMl}B&0(n4=qF}-gd?>4u`zG6-Omc zVH=oXFVr7rrY}>;*SKA+kubT^Ebyp3{)=1fMH~m!3jj9|u719+-g6 zvRWG;s5h5%W}PLm{kM1{(iJ1QuoM$IBx1NB9AYINxtQK=*Sj^d!#46|CW#ie6FeXV zcoNAaC3joHS2TYIW|vWY!!P--@ddu4LbR#j(LlA^S0P^oA5=E~=f+oIrR5lRn!6+L ztcgox23&%8VA-VwHU}#?upI!*;`=$2xdGs85g7xyL9+R`dZ*USxCtF;5Y`T&J+-eE zj}S;U@jaOucMR0KVJ|@?E)$q^qT{p~Kf^Eg84IsMP!>{Uh^eMmfy)H79e_80z5^IK z`u9~JWWkPb4i&mwp&M)F>gs?Q?~{f(FC7m`oll~m^_H{cM~JTi6DgRIhaqUHYX7~e z?Mu17X(5fD?hxXcm@v7%^bllOMUdM@BqNhW;eU$0>BCO{bkgF@fEKTnNl|uS zW|0-NgHuvC8cFI0`}}$lSP=%q+M35Pk&TmVlj%B_AadH*xZGKO7D~d9+r6CcZN{_0 zp`f0ZVs;ejc6Ip%wqd19OYmJ+mmCId;D$Q*F`$Tsb}-^8l(BlC0!;$ZRNLA{!{5*h zxCYk!#O5LR#D9l0U87@ zXLRu^ZqD1HDfklpz8~36;PJ0$^3b*aQtoKlhyR?R)PkRx=Dq|&$Y?D$ZMDUp+(Yg! zvYypmRVne4T$7Cp5BHOZ$SYv!03Qi?q46ZftAQPjQl|(aS|prZ26Qi(>t=^T1Ze}~V4i2h$rFPb|edHwtR1-`Yli^j(3jsBSL z@|k=!E-EMgDA;3JIk|hR)MK=^V7q0_$99eI_PXF(^FDWd;Mh|=b(RZ`j1Js?>e;l! zf=$iQ`R6Ua1!MgqwNpQ9Oxm46HIg^$s2l>zwuIiP3ogQ)%&ez&Q`1t^2Aa1YyR434 zfUa)Jj_0U(L>#7?iL?$Jcu0KKXgkK~ljPmB2=Rg=FnWXXolI@$*aC6f^+vT!HDa{O zmwi|p=YD6_vRT^PsXt6oy$Q(AB$4oesB+w|?Z6aXPoTAkg3b>^7MkO<$(I;q*5q$3&L$ z1;FCJGA-U{(R)qD_Tef%VM!Wp zT9zEQOyl8Y%FL;dum42 zkz8}Pxo$=}F}^i~oP=CeLzL$qD+^~Ui%`|v7$rFII1$Zh4#j_LDxwtaF$DeZJ=Zb;a2EF$#7Z+c(mMly>A)i(tR$f3L1=gE#ju575m6yGAlH90 zI!*5Pf!L89Z{;C5!t%V|p85=})e|A8Fz|VynQ@o0=gcKLud#@O= z)+zb+6>A^mukkJYH~tv~3_<%YT*8>0XRde4T+dq9Ba*J`>M*kOLVNn!ieMSeA8hHx4VVhsMZP-hID;q}2W+5;Vq8F_XDH=zc zY)H~be05Ee_TP8?1&z1B=3=>}#TLOH)L)|j)y1|=5uh^b)5l<(5mh1xkP}Th=XxFB zE&+%jfJ-Q@DS=mb3porV{-OYKkzT3fso6fmCag22p-Ku!rM^SJ09o5qq3#7+@(~4y zj8#~a6STlRv7V`p`W2Y0>La~?Zxseld_Kof#OgAe&pt8 z(8=NcvN<;ZX9l5v{_Fhxa~d;0Tta4Px-DHa-TaW7m8zRndZGTP0&!GwY+G_}L@YoM z3)&7z+G3w|iV$T-iuc^#$8h^gh+l^KL48|q)Dz->5hqM~x`)QM z+wQS0$Lk;S7gOBiG(&VqeuXl{87el>yhj~sH(itdO7gtGR-9%hk;Av{a^yW0vEtQA z{F#SAJRPB|!UhFh_*v?*)Ap!UMCyRnkB%?nsL?slfdC0v&t}Y3WXe!lzEcLFjS{tW z6}1JbA|ViVC8Q8=5}ke{p8htVJDjQ8k3)CjoeS%WcgX$A&iOv8wA%?3zm8&Kil{_= z+4nw=E(kY%|KWuzyUyR+?|miY{JZgGvR{5{*qhfu`Wg)lZsYYiswi2E^C(31LKM~z z)?St=wiUUJTY6SfmVU6>ZM@ZpKy#aie7wK`Chd#Z>4_k32>Fx9hbn3?eHW+p|Ft zg|;xpL4`1A(I+BrJG;q@VMbBL!%p$$O6^A0O>qpNlyrnEd$qKCfaSZ4ody|p%M9BD{k&%Xev=sRkijs}0!LhfvI7*(P ztw?_)9H-zLiq=~^O2S~30mq(boIe;}^pA*t^ml#o6QNzt7U_Ss*b3VudDQ{K!p^mB z9c~@#x7ecOvH7cQ&HrgG1l;{5#vTmAmOfzUvq61BaT571&YWS9!y8bS%5T8-C>mI+ zqcijaDZSxj!+wDyTBjcs?s6fv1mgrE@=^dL!7N&3jmVi0&dja-Y{h-E#_9EV03s?N zD+7!ZoPY@ly}wV88qb%VL@Eqwal%=g1xjUZ003MB)63F^s5N-TbMU4WM-Xnm8(*5S zDoO01Os6ItHNK^>T#Yvhj_!gv6XapsF{ar%Ax9^P#lm14%XM;fo_1mlEbq>xhF1Yx z5>AeMWV@A~Jb6dO3-dV%o)RC^RfU;kj|YX-IwLFGgIvM(nLD0eczTY1AVNjV0lrCW zxJyCILq(&{2Ay?P)45v0F_@lJJFsXG>Kn+G1GYambU=XBGdxbl9Abq73BLm0G6|%; zDDg_J(F+LFtm~zO^KhUJ)M~7b%t;b}f9YLQyAz}>X}N)mfX}i?>RgCJe!l5_{~aBB%%0A? zO+5i2gR4RoYS6U zJn+DS3oyNZnu#mtz%WOe%N*sngfJhdgB``b>r!B*BLghf#LCkb+_JDq8-!YZCOVB? z`7B!NdygCa{AsIb^ygLe%D;PR24R`k1Cg zFt@n@AHlk0%%HmD@TDmJGuN5of5=&y-S%WSWNX*^mnk7qXH|G%Z48br_ZjcdmOtJH ztp|qXf=v)^fbqeJ5NU7D;@U>SZ;8^92huF!Zc|=LJMNGe66)YLYwqrl6x;#q1Bk4@<16lziQ(BBcFZ|a)4POiYSbDinUj9F zH;f$>t5z%anWEj~IbFcduBi$dqvGMI#v5~LjsQM`lW>vApAxLR8}dhlu0Eu`?_IHobGF$z(!XrLJ19mgfdVp{LGH($JATkvA`L#ksQTxc4~=?E(=8XNo1cEn!mtL>H_~9 z^)7yn@bFrWc@CeOBZ9zao>pK|Pb}D-jNHS`K;jC~OCkGg6&Q%4yZ3U*-wX!~Bqne| zU`8Ci%7;V=vAXayOAJiJD~=F({~_=FSl0p!8%0Q%y57)>pgvzw8HS9E&D04UhZPr= zId{TiE&$+;H9Fw6x+Q8HY;48zp#zN?<_CXZH*QA6EG)kF=(=!Y^XlTmDjSQ_ecr5r zc*7%52XB4vcfk&OFf+cZR|`%Yol|>~J5}$fL9!FoB9mh~k4>=>Z{X>TL{wq}ejoGS z*%0Mv`rlo?!H^pa+-P~ep#j&C|EjF!NEv1>!A?)pk0`$<84;aiE(bF&j%u=l!^Op= zCkRpZV?%ZGi9Y#mm=y^lg~S?tRvQ{JVPfxu389i&Nl#3-p(FpNS5ELc0@^jI^K@Ah zJdDgyI6Gv{0R%#&*8(r;-5r92rRf!Tq^Vqo*YleY3xRhQI&sOFC1BG7NcSh>tad-P zM~gL`gJ}2wdN)w{qK(SO6nPoa_`=@``|s|*#}^=ZFcMI728eFp=G~hk*%u{TOeTgt z6*^T6{fugp)Fwo4NI8-|HQXidL2^nWa2;a3ofvf*_rqY2(cY0reA0wi=MwMBx@_8B z#x{bxFPwza55#l$-BJ9j4n3w-C_f{3GvYE>T4n18jVt?1FwZSwUC}=p)PegYaI~G) z^RbNYCHgnk{V2j&=AdN}hH^I*wWaUY=YIyyxDFDMU9st`pA5OGO&e4x-!(t;Az1V$ zDq>ulen`@k4kCZd*>64^I!L8_5CfR}GCE{;NHn^0!)TekW~k6WHImK&z@vuf%11hP zU8kPIKy?`bU_m^3ei;aNgv!)9yoQ`31x@^Z2H9*l+mkTz!G~P8zC~Ew(x{r)1u_!p zUQk-X0brWB$=I$EF1kLAXg?&4!9a8YQAHPnP^@5ry;1)13rh#QPBC_gDRgcoXIS(LIX?((S@uANY*120w;J!8QFb+ zC|sdSKua)F(n0jsT@J)66k8B(-+c`&_mdcFjNqAL?d2|R_ zk;UJ7OD%b7JE74-o!f|jPtf^AFNZ*`Euf@OZx8ME7ZaCcf{jnzffffE-+Q(?>Kv7w zel!0XUwIU|wkU<5TTA^~jsY4}3;>sUfZ&$bb1-F@CNSQOW-uC5(EIqaPdL&-!aQcW z{5Girm6VVn?Xi=&lwE1K4^zm=6l2lKOTjg-A_2bDn@ui~UrSlXh!jHCa&%!Z95aL(9dIcWS_l@k!&_aXYg^uQPi*e=j*AozzinOIUVCZ(eF#=pl`J65YM z{A*JqY)}`2a&{JAN#r^W7Xo|(weqP4oDgDls!WjV zbE(z}SWEVXUdH!&jM?_W468!Sf)3c-f>p`P4j^A7W5_ETb_fW|6L=JN8!_o(70_w?dUs#BjO9a5aqIwK7VCgyPCZIe_4FxJUgA0mZ4d5Hm zBBp78WJW8Xp^^4mj7Jdg7jv0{6ec9XV;>3XQ@F$0(|6M!o?dIee9`7pmDlFwhBp0| zn=Nw}1Sm;WalEUm8PL%_Y49C%-O#D}{)!l@XyORFTtWcnuOY(O;HzUjACdbXnEm_N ztuqCeGx{E_8tqXf{>kMh%fdGq`;OWk73|^ZEf@U$*=QY0w$2F`R>cigOl*vOSYYd( z85X&LmR9i#p2m(E3Szs&HkrEgX^8h+-Z{@w z-62xt$e8!|iYB9@MuVecRXD6%`P6Z{mftWKowjhAM*5^fI^rD}JykKy4m;IYtrG;f zy8a>Y=Vlliu~Z}i))?OzZSnldBdXf$Q7W1o-gAwN6D`lg3-!|U4eS@=-Ps4qzELEP zK5lUI=(zY*VXkAO>YOcKs~usht&2M{q09&vWXsr?8kJ;Al~y-3FTuh|y)1{mHDax) z)dy&Nm2*N2%~=2N*WgQ%wc>Jj7%gUNsmZ(}Pq5ndhAE!L8=3mH`npTw)sAcNB?r|E z)cE2CjZwM+l_QHk-K5}&56>|&+aX`A%}!TI<$5P^9`j2S;>1$*$@?BnEXdr@5w-9r z@AL3$M>pw}bxMK}^;)rREwlad&g0 z1IE7WTXbTfMb_G1w%mI+f&a?}{O_AANxG@(+8r7xr^}BdsAo@L+hjOQ=BUTivsSq1 zH7tzDjbY`h>sfamFv}PfHuN5!otO}?VsX6A`NY1BW%qP6liZG+)7dyR&^LOzx59tX zQX{)6&Kd7G%8td`$t>6 zdWI*69e!%jUh)1ZweeOqJ)bYIys#p}#BopF;Tanayx;DSTXwm0`)4sv8^4WDEIg$Z zC*~|&w#DzOt>vo~?HeB&Ovsq(7OH2;8y6B&lMv58AS&{6zCE4B_n~c^`E*6|Hw?Z` zC5FXn^uND`yJ)9Ney;2G+3z{CbUxBKR#~gLQ0?=ni)Spfss8z!UA_&DH6or)+J-!b zFx8d8QzvWhcGjRYcr6MGkozu(^P z*Zf+3!-H3GVYGM56qbeqf6S5-=j|LfT&W$fb$qia#;Cnyj)p}-_%qMWqV@G%tBTh8 zl}2U{(A?`&r$u<~i9+-f613sCtyetYh5* zKH9e&6FvO}`T86|yXv&*vvw?+Vj)OZ_t&><6Lp4tMPJ*W9PZAzzo8xIix%6436>@k zX8F$UpIzW^#$IZxFNj?stntq`t~P~I3BTNO6@NzFw>7zgsRDN!k6E_`3;8v(JZ$gV zKef%YliD7)NfE^J?(A58EKv~IV|`@9^2uVhL?>wUhSBRnKmTRZ#r~^i<$7vH+J~2A z965jIbL~5G+WhVnHAXy%_{!_OQq4i9Iizj>Y59BU&SBp{8 ztHKI@-TK>h_JN?I!P$X28tTUD{3d5|_EekRnW518tXQW;@<@Fq9zG$_(NMc#HTtr6 z&9X5upR!b;RMNm&EBt2N6um_gs*R2tjqbluYBee8#=5-Dv5_~d;u=FIOucD+R85Ut zbm7AtA9g-iW)W-2si9*8WV|w1HCZ`!5KkMpwrpQwgI-Pc$z5FA`?7H^(pf)52Y+_& z#I1|Vk558h>3YFs>tX}B-@?^@w+H{bnCgH0eKfw9w<>3Gfqx_Z3-6Y+{{g@L|D@mL Ie_=TOFVfIJ*Z=?k literal 0 HcmV?d00001 diff --git a/samples/python/audio-transcription/requirements.txt b/samples/python/audio-transcription/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/audio-transcription/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/audio-transcription/src/app.py b/samples/python/audio-transcription/src/app.py new file mode 100644 index 00000000..20f9be04 --- /dev/null +++ b/samples/python/audio-transcription/src/app.py @@ -0,0 +1,39 @@ +# +# +import sys +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +# +# Initialize the Foundry Local SDK +config = Configuration(app_name="foundry_local_samples") +FoundryLocalManager.initialize(config) +manager = FoundryLocalManager.instance + +# Load the whisper model for speech-to-text +model = manager.catalog.get_model("whisper-tiny") +model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True, + ) +) +print() +model.load() +print("Model loaded.") +# + +# +# Get the audio client and transcribe +audio_client = model.get_audio_client() +audio_file = sys.argv[1] if len(sys.argv) > 1 else "Recording.mp3" +result = audio_client.transcribe(audio_file) +print("Transcription:") +print(result.text) +# + +# Clean up +model.unload() +# diff --git a/samples/python/functioncalling/README.md b/samples/python/functioncalling/README.md deleted file mode 100644 index 71048eae..00000000 --- a/samples/python/functioncalling/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# Foundry Local Function Calling Configuration Guide - -This guide walks you through enabling function calling support in Foundry Local with Phi-4-mini. - -## Prerequisites - -- Foundry Local version 0.5.100 or higher -- Access to modify model configuration files - -## Setup Instructions - -### Step 1: Install Foundry Local - -Ensure you have Foundry Local version 0.5.100 or higher installed on your system. - -### Step 2: Configure Phi-4-mini Chat Template - -Replace the existing **inference_model.json** file for Phi-4-mini with the following configuration: - -```json -{ - "Name": "Phi-4-mini-instruct-generic-cpu", - "PromptTemplate": { - "system": "<|system|>{Content}<|tool|>{Tool}<|/tool|><|end|>", - "user": "<|user|>{Content}<|end|>", - "assistant": "<|assistant|>{Content}<|end|>", - "tool": "<|tool|>{Tool}<|/tool|>", - "prompt": "<|system|> You are a helpful assistant with these tools. If you decide to call functions:\n* prefix function calls with functools marker (no closing marker required)\n* all function calls should be generated in a single JSON list formatted as functools[{\"name\": [function name], \"arguments\": [function arguments as JSON]}, ...]\n * follow the provided JSON schema. Do not hallucinate arguments or values. Do not blindly copy values from the provided samples\n * respect the argument type formatting. E.g., if the type is number and format is float, write value 7 as 7.0\n * make sure you pick the right functions that match the user intent<|end|><|user|>{Content}<|end|><|assistant|>" - } -} -``` - -### Step 3: Restart Foundry Service - -Execute the following command in your terminal to restart the Foundry service: - -```bash -foundry service restart -``` - -### Step 4: Test the Configuration - -Run the provided [Notebook](./fl_tools..ipynb) to test and validate the function calling functionality. - -## Related Resources - -- **Test Notebook**: [fl_tools.ipynb](./fl_tools..ipynb) - -## Notes - -- The configuration enables proper function calling syntax with the `functools` marker -- Ensure all JSON formatting rules are followed when the model generates function calls -- The system prompt includes specific instructions for proper function argument handling \ No newline at end of file diff --git a/samples/python/functioncalling/fl_tools.ipynb b/samples/python/functioncalling/fl_tools.ipynb deleted file mode 100644 index 0f9c76ed..00000000 --- a/samples/python/functioncalling/fl_tools.ipynb +++ /dev/null @@ -1,362 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "1fd99963", - "metadata": {}, - "source": [ - "# Function Calling Examples with Foundry Local\n", - "\n", - "This notebook demonstrates how to use function calling capabilities with Foundry Local SDK and OpenAI API.\n", - "\n", - "## Package Installation\n", - "\n", - "Install the OpenAI package for API communication:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "537d41f7", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: openai in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (1.93.0)\n", - "Requirement already satisfied: anyio<5,>=3.5.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (4.9.0)\n", - "Requirement already satisfied: distro<2,>=1.7.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (1.9.0)\n", - "Requirement already satisfied: httpx<1,>=0.23.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (0.28.1)\n", - "Requirement already satisfied: jiter<1,>=0.4.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (0.10.0)\n", - "Requirement already satisfied: pydantic<3,>=1.9.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (2.11.7)\n", - "Requirement already satisfied: sniffio in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (1.3.1)\n", - "Requirement already satisfied: tqdm>4 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (4.67.1)\n", - "Requirement already satisfied: typing-extensions<5,>=4.11 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from openai) (4.14.0)\n", - "Requirement already satisfied: idna>=2.8 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from anyio<5,>=3.5.0->openai) (3.10)\n", - "Requirement already satisfied: certifi in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpx<1,>=0.23.0->openai) (2025.6.15)\n", - "Requirement already satisfied: httpcore==1.* in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpx<1,>=0.23.0->openai) (1.0.9)\n", - "Requirement already satisfied: h11>=0.16 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.16.0)\n", - "Requirement already satisfied: annotated-types>=0.6.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic<3,>=1.9.0->openai) (0.7.0)\n", - "Requirement already satisfied: pydantic-core==2.33.2 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic<3,>=1.9.0->openai) (2.33.2)\n", - "Requirement already satisfied: typing-inspection>=0.4.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic<3,>=1.9.0->openai) (0.4.1)\n", - "Requirement already satisfied: colorama in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from tqdm>4->openai) (0.4.6)\n" - ] - } - ], - "source": [ - "! pip install openai" - ] - }, - { - "cell_type": "markdown", - "id": "6ee98d72", - "metadata": {}, - "source": [ - "Install the Foundry Local SDK for local model management:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "810ac3f4", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: foundry-local-sdk in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (0.3.1)\n", - "Requirement already satisfied: httpx in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from foundry-local-sdk) (0.28.1)\n", - "Requirement already satisfied: pydantic>=2.0.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from foundry-local-sdk) (2.11.7)\n", - "Requirement already satisfied: tqdm in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from foundry-local-sdk) (4.67.1)\n", - "Requirement already satisfied: annotated-types>=0.6.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic>=2.0.0->foundry-local-sdk) (0.7.0)\n", - "Requirement already satisfied: pydantic-core==2.33.2 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic>=2.0.0->foundry-local-sdk) (2.33.2)\n", - "Requirement already satisfied: typing-extensions>=4.12.2 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic>=2.0.0->foundry-local-sdk) (4.14.0)\n", - "Requirement already satisfied: typing-inspection>=0.4.0 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from pydantic>=2.0.0->foundry-local-sdk) (0.4.1)\n", - "Requirement already satisfied: anyio in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpx->foundry-local-sdk) (4.9.0)\n", - "Requirement already satisfied: certifi in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpx->foundry-local-sdk) (2025.6.15)\n", - "Requirement already satisfied: httpcore==1.* in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpx->foundry-local-sdk) (1.0.9)\n", - "Requirement already satisfied: idna in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpx->foundry-local-sdk) (3.10)\n", - "Requirement already satisfied: h11>=0.16 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from httpcore==1.*->httpx->foundry-local-sdk) (0.16.0)\n", - "Requirement already satisfied: sniffio>=1.1 in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from anyio->httpx->foundry-local-sdk) (1.3.1)\n", - "Requirement already satisfied: colorama in c:\\users\\kinfeylo\\appdata\\local\\miniforge3\\envs\\pydev\\lib\\site-packages (from tqdm->foundry-local-sdk) (0.4.6)\n" - ] - } - ], - "source": [ - "! pip install foundry-local-sdk" - ] - }, - { - "cell_type": "markdown", - "id": "abe07aeb", - "metadata": {}, - "source": [ - "## Setup and Configuration\n", - "\n", - "Import the FoundryLocalManager for managing local models:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "b21785a2", - "metadata": {}, - "outputs": [], - "source": [ - "from foundry_local import FoundryLocalManager" - ] - }, - { - "cell_type": "markdown", - "id": "9335da67", - "metadata": {}, - "source": [ - "Define the model alias that will be used throughout this example:" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "503f23fa", - "metadata": {}, - "outputs": [], - "source": [ - "alias = \"phi-4-mini\"" - ] - }, - { - "cell_type": "markdown", - "id": "5a9b1ecf", - "metadata": {}, - "source": [ - "Create a FoundryLocalManager instance using the specified model alias:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "804611d5", - "metadata": {}, - "outputs": [], - "source": [ - "manager = FoundryLocalManager(alias)" - ] - }, - { - "cell_type": "markdown", - "id": "c81e6f38", - "metadata": {}, - "source": [ - "Import the OpenAI library for API interactions:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "1ab277b9", - "metadata": {}, - "outputs": [], - "source": [ - "import openai" - ] - }, - { - "cell_type": "markdown", - "id": "e8a95a1b", - "metadata": {}, - "source": [ - "Create an OpenAI client using the local endpoint and API key from the manager:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "e837fa0a", - "metadata": {}, - "outputs": [], - "source": [ - "client = openai.OpenAI(\n", - " base_url=manager.endpoint,\n", - " api_key=manager.api_key # API key is not required for local usage\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "ac745238", - "metadata": {}, - "source": [ - "## Function Definitions\n", - "\n", - "Define the available tools/functions for the AI model. This includes flight booking and hotel booking functions:" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "bea7d21e", - "metadata": {}, - "outputs": [], - "source": [ - "tool_list = '[{\"name\": \"booking_flight_tickets\", \"description\": \"booking flights\", \"parameters\": {\"origin_airport_code\": {\"description\": \"The name of Departure airport code\", \"type\": \"string\"}, \"destination_airport_code\": {\"description\": \"The name of Destination airport code\", \"type\": \"string\"}, \"departure_date\": {\"description\": \"The date of outbound flight\", \"type\": \"string\"}, \"return_date\": {\"description\": \"The date of return flight\", \"type\": \"string\"}}}, {\"name\": \"booking_hotels\", \"description\": \"booking hotel\", \"parameters\": {\"destination\": {\"description\": \"The name of the city\", \"type\": \"string\"}, \"check_in_date\": {\"description\": \"The date of check in\", \"type\": \"string\"}, \"checkout_date\": {\"description\": \"The date of check out\", \"type\": \"string\"}}}]'" - ] - }, - { - "cell_type": "markdown", - "id": "3d255cf1", - "metadata": {}, - "source": [ - "## Parallel Function Support" - ] - }, - { - "cell_type": "markdown", - "id": "c98886fc", - "metadata": {}, - "source": [ - "Create a chat completion request that will trigger multiple function calls (flight booking and hotel booking) in parallel:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "8e616290", - "metadata": {}, - "outputs": [], - "source": [ - "stream = client.chat.completions.create(\n", - " model=manager.get_model_info(alias).id,\n", - " messages=[{\"role\": \"user\", \"content\": \"book flight ticket from Beijing to Paris(using airport code) in 2025-12-04 to 2025-12-10 , then book hotel from 2025-12-04 to 2025-12-10 in Paris\"}],\n", - " tools=[{\"name\": \"booking_flight_tickets\", \"description\": \"booking flights\", \"parameters\": {\"origin_airport_code\": {\"description\": \"The name of Departure airport code\", \"type\": \"string\"}, \"destination_airport_code\": {\"description\": \"The name of Destination airport code\", \"type\": \"string\"}, \"departure_date\": {\"description\": \"The date of outbound flight\", \"type\": \"string\"}, \"return_date\": {\"description\": \"The date of return flight\", \"type\": \"string\"}}}, {\"name\": \"booking_hotels\", \"description\": \"booking hotel\", \"parameters\": {\"destination\": {\"description\": \"The name of the city\", \"type\": \"string\"}, \"check_in_date\": {\"description\": \"The date of check in\", \"type\": \"string\"}, \"checkout_date\": {\"description\": \"The date of check out\", \"type\": \"string\"}}}],\n", - " temperature=0.00001,\n", - " max_tokens=4096,\n", - " top_p = 1.0,\n", - " stream=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "3b8840e8", - "metadata": {}, - "source": [ - "Process and display the streaming response from the model:" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "7f96f7c2", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "functools[{\"name\": \"booking_flight_tickets\", \"arguments\": {\"origin_airport_code\": \"PEK\", \"destination_airport_code\": \"CDG\", \"departure_date\": \"2025-12-04\", \"return_date\": \"2025-12-10\"}}, {\"name\": \"booking_hotels\", \"arguments\": {\"destination\": \"Paris\", \"check_in_date\": \"2025-12-04\", \"checkout_date\": \"2025-12-10\"}}]" - ] - } - ], - "source": [ - "for chunk in stream:\n", - " if chunk.choices[0].delta.content is not None:\n", - " print(chunk.choices[0].delta.content, end=\"\", flush=True)" - ] - }, - { - "cell_type": "markdown", - "id": "3589be65", - "metadata": {}, - "source": [ - "## Single Function Support" - ] - }, - { - "cell_type": "markdown", - "id": "def59c2b", - "metadata": {}, - "source": [ - "Create a chat completion request for a single function call (weather inquiry). Note: This example shows a different format for defining tools:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "id": "2d5ed823", - "metadata": {}, - "outputs": [], - "source": [ - "stream = client.chat.completions.create(\n", - " model=manager.get_model_info(alias).id,\n", - " messages=[{\"role\": \"user\", \"content\": \"What is the weather today in Paris?\"}],\n", - " tools=[\n", - " {\n", - " \"function\": {\n", - " \"name\": \"get_current_weather\",\n", - " \"arguments\": {\n", - " \"format\": \"celsius\",\n", - " \"location\": \"Paris\"\n", - " }\n", - " }\n", - " }\n", - " ],\n", - " temperature=0.00001,\n", - " max_tokens=4096,\n", - " top_p = 1.0,\n", - " stream=True\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "2907e462", - "metadata": {}, - "source": [ - "Process and display the streaming response for the single function call:" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "e8ef8b66", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "functools[{\"name\": \"get_current_weather\", \"arguments\": {\"format\": \"celsius\", \"location\": \"Paris\"}}]" - ] - } - ], - "source": [ - "for chunk in stream:\n", - " if chunk.choices[0].delta.content is not None:\n", - " print(chunk.choices[0].delta.content, end=\"\", flush=True)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "pydev", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.10" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/samples/python/hello-foundry-local/README.md b/samples/python/hello-foundry-local/README.md deleted file mode 100644 index c7753a88..00000000 --- a/samples/python/hello-foundry-local/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Sample: Hello Foundry Local! - -This is a simple example of how to use the Foundry Local SDK to run a model locally and make requests to it. The example demonstrates how to set up the SDK, initialize a model, and make a request to the model. - -Install the Foundry Local SDK and OpenAI packages using pip: - -```bash -pip install foundry-local-sdk openai -``` - -> [!TIP] -> We recommend using a virtual environment to manage your Python packages using `venv` or `conda` to avoid conflicts with other packages. - -Run the application using Python: - -```bash -python src/app.py -``` diff --git a/samples/python/hello-foundry-local/src/app.py b/samples/python/hello-foundry-local/src/app.py deleted file mode 100644 index 8bd21c62..00000000 --- a/samples/python/hello-foundry-local/src/app.py +++ /dev/null @@ -1,33 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -import openai -from foundry_local import FoundryLocalManager - -# By using an alias, the most suitable model will be downloaded -# to your end-user's device. -alias = "qwen2.5-coder-0.5b" - -# Create a FoundryLocalManager instance. This will start the Foundry -# Local service if it is not already running and load the specified model. -manager = FoundryLocalManager(alias) - -# The remaining code uses the OpenAI Python SDK to interact with the local model. - -# Configure the client to use the local Foundry service -client = openai.OpenAI( - base_url=manager.endpoint, - api_key=manager.api_key, # API key is not required for local usage -) - -# Set the model to use and generate a streaming response -stream = client.chat.completions.create( - model=manager.get_model_info(alias).id, - messages=[{"role": "user", "content": "What is the golden ratio?"}], - stream=True, -) - -# Print the streaming response -for chunk in stream: - if chunk.choices[0].delta.content is not None: - print(chunk.choices[0].delta.content, end="", flush=True) diff --git a/samples/python/langchain-integration/requirements.txt b/samples/python/langchain-integration/requirements.txt new file mode 100644 index 00000000..0ded700a --- /dev/null +++ b/samples/python/langchain-integration/requirements.txt @@ -0,0 +1,4 @@ +foundry-local-sdk +openai +langchain-openai +langchain-core diff --git a/samples/python/langchain-integration/src/app.py b/samples/python/langchain-integration/src/app.py new file mode 100644 index 00000000..1dd00224 --- /dev/null +++ b/samples/python/langchain-integration/src/app.py @@ -0,0 +1,59 @@ +# +# +from foundry_local_sdk import Configuration, FoundryLocalManager +from langchain_openai import ChatOpenAI +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import StrOutputParser +# + +# +# Initialize the Foundry Local SDK +config = Configuration(app_name="foundry_local_samples") +FoundryLocalManager.initialize(config) +manager = FoundryLocalManager.instance + +# Load a model +model = manager.catalog.get_model("qwen2.5-0.5b") +model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True, + ) +) +print() +model.load() +print("Model loaded.") + +# Start the web service to expose an OpenAI-compatible endpoint +manager.start_web_service() +base_url = f"{manager.urls[0]}/v1" +# + +# +# Create a LangChain ChatOpenAI instance pointing to the local endpoint +llm = ChatOpenAI( + base_url=base_url, + api_key="none", + model=model.id, +) +# + +# +# Create a translation chain +prompt = ChatPromptTemplate.from_messages([ + ("system", "You are a translator. Translate the following text to {language}. Only output the translation, nothing else."), + ("user", "{text}") +]) + +chain = prompt | llm | StrOutputParser() + +# Run the chain +result = chain.invoke({"language": "Spanish", "text": "Hello, how are you today?"}) +print(f"Translation: {result}") +# + +# Clean up +model.unload() +manager.stop_web_service() +# diff --git a/samples/python/native-chat-completions/requirements.txt b/samples/python/native-chat-completions/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/native-chat-completions/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/native-chat-completions/src/app.py b/samples/python/native-chat-completions/src/app.py new file mode 100644 index 00000000..ca087b77 --- /dev/null +++ b/samples/python/native-chat-completions/src/app.py @@ -0,0 +1,54 @@ +# +# +import asyncio +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +async def main(): + # + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load a model from the catalog + model = manager.catalog.get_model("qwen2.5-0.5b") + model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True, + ) + ) + print() + model.load() + print("Model loaded and ready.") + + # Get a chat client + client = model.get_chat_client() + # + + # + # Create the conversation messages + messages = [ + {"role": "user", "content": "What is the golden ratio?"} + ] + + # Stream the response token by token + print("Assistant: ", end="", flush=True) + for chunk in client.complete_streaming_chat(messages): + content = chunk.choices[0].delta.content + if content: + print(content, end="", flush=True) + print() + # + + # Clean up + model.unload() + print("Model unloaded.") + + +if __name__ == "__main__": + asyncio.run(main()) +# diff --git a/samples/python/summarize/.vscode/launch.json b/samples/python/summarize/.vscode/launch.json deleted file mode 100644 index 62c83dcf..00000000 --- a/samples/python/summarize/.vscode/launch.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "version": "0.2.0", - "configurations": [ - - { - "name": "Python Debugger: Current File with Arguments", - "type": "debugpy", - "request": "launch", - "program": "${file}", - "console": "integratedTerminal", - "args": "\"The quick brown fox jumps over the lazy dog, packing my box with five dozen liquor jugs, and then the dog chased the fox around the corner of the house.\" --text" - } - ] -} diff --git a/samples/python/summarize/README.md b/samples/python/summarize/README.md deleted file mode 100644 index 9fa753d1..00000000 --- a/samples/python/summarize/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Text Summarizer - -A simple command-line utility that uses Foundry Local to generate summaries of text files or direct text input. - -## Setup - -1. Install the required dependencies: - ```bash - pip install -r requirements.txt - ``` - -## Usage - -The utility can be used in two ways: - -1. Summarize a text file: - ```bash - python summarize.py path/to/your/file.txt - ``` - -2. Summarize direct text input: - ```bash - python summarize.py "Your text to summarize here" --text - ``` - -You can also specify which model to use with the `--model` parameter: - ```bash - python summarize.py path/to/your/file.txt --model "your-model-alias" - ``` - -If the specified model is not found, the script will use the first available model. - -## Requirements - -- Python 3.6 or higher -- Foundry Local Service -- Required Python packages (see requirements.txt) - diff --git a/samples/python/summarize/requirements.txt b/samples/python/summarize/requirements.txt deleted file mode 100644 index 7b37f256..00000000 --- a/samples/python/summarize/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -openai>=1.0.0 -python-dotenv>=0.19.0 -foundry-local-sdk>=0.3.1 diff --git a/samples/python/summarize/summarize.py b/samples/python/summarize/summarize.py deleted file mode 100644 index c2b00ba7..00000000 --- a/samples/python/summarize/summarize.py +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env python3 - -import sys -import argparse -from openai import OpenAI -from foundry_local import FoundryLocalManager - - -def read_file_content(file_path): - """Read content from a file.""" - try: - with open(file_path, "r", encoding="utf-8") as file: - return file.read() - except Exception as e: - print(f"Error reading file: {e}") - sys.exit(1) - - -def get_summary(text, client, model_name): - """Get summary from OpenAI API.""" - try: - response = client.chat.completions.create( - model=model_name, - messages=[ - { - "role": "system", - "content": "You are a helpful assistant that summarizes text. Provide a concise summary.", - }, - {"role": "user", "content": f"Please summarize the following text:\n\n{text}"}, - ], - ) - return response.choices[0].message.content - except Exception as e: - print(f"Error getting summary from OpenAI: {e}") - sys.exit(1) - - -def main(): - parser = argparse.ArgumentParser(description="Summarize text from a file or string using OpenAI.") - parser.add_argument("input", help="File path or text string to summarize") - parser.add_argument("--text", action="store_true", help="Treat input as direct text instead of a file path") - parser.add_argument("--model", help="Model alias to use for summarization") - args = parser.parse_args() - - fl_manager = FoundryLocalManager() - - fl_manager.start_service() - - model_list = fl_manager.list_cached_models() - - if not model_list: - print("No downloaded models available") - sys.exit(1) - - # Select model based on alias or use first one - if args.model: - selected_model = next((model for model in model_list if model.alias == args.model), None) - if selected_model: - model_name = selected_model.id - else: - model_name = model_list[0].id - print(f"Model alias '{args.model}' not found, using default model: {model_name}") - else: - model_name = model_list[0].id - - print(f"Using model: {model_name}") - - # Initialize OpenAI client - client = OpenAI(base_url=fl_manager.endpoint, api_key=fl_manager.api_key) - - # Get input text - if args.text: - text = args.input - else: - text = read_file_content(args.input) - - # Get and print summary - summary = get_summary(text, client, model_name) - print("\nSummary:") - print("-" * 50) - print(summary) - print("-" * 50) - - -if __name__ == "__main__": - main() diff --git a/samples/python/tool-calling/requirements.txt b/samples/python/tool-calling/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/tool-calling/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/tool-calling/src/app.py b/samples/python/tool-calling/src/app.py new file mode 100644 index 00000000..ac00b023 --- /dev/null +++ b/samples/python/tool-calling/src/app.py @@ -0,0 +1,182 @@ +# +# +import asyncio +import json +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +# +# --- Tool definitions --- +tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city or location" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "Temperature unit" + } + }, + "required": ["location"] + } + } + }, + { + "type": "function", + "function": { + "name": "calculate", + "description": "Perform a math calculation", + "parameters": { + "type": "object", + "properties": { + "expression": { + "type": "string", + "description": ( + "The math expression to evaluate" + ) + } + }, + "required": ["expression"] + } + } + } +] + + +# --- Tool implementations --- +def get_weather(location, unit="celsius"): + """Simulate a weather lookup.""" + return { + "location": location, + "temperature": 22 if unit == "celsius" else 72, + "unit": unit, + "condition": "Sunny" + } + + +def calculate(expression): + """Evaluate a math expression safely.""" + allowed = set("0123456789+-*/(). ") + if not all(c in allowed for c in expression): + return {"error": "Invalid expression"} + try: + result = eval(expression) + return {"expression": expression, "result": result} + except Exception as e: + return {"error": str(e)} + + +tool_functions = { + "get_weather": get_weather, + "calculate": calculate +} +# + + +# +def process_tool_calls(messages, response, client): + """Handle tool calls in a loop until the model produces a final answer.""" + choice = response.choices[0].message + + while choice.tool_calls: + # Convert the assistant message to a dict for the SDK + assistant_msg = { + "role": "assistant", + "content": choice.content, + "tool_calls": [ + { + "id": tc.id, + "type": tc.type, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in choice.tool_calls + ], + } + messages.append(assistant_msg) + + for tool_call in choice.tool_calls: + function_name = tool_call.function.name + arguments = json.loads(tool_call.function.arguments) + print(f" Tool call: {function_name}({arguments})") + + # Execute the function and add the result + func = tool_functions[function_name] + result = func(**arguments) + messages.append({ + "role": "tool", + "tool_call_id": tool_call.id, + "content": json.dumps(result) + }) + + # Send the updated conversation back + response = client.complete_chat(messages, tools=tools) + choice = response.choices[0].message + + return choice.content +# + + +# +async def main(): + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load a model + model = manager.catalog.get_model("qwen2.5-0.5b") + model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True + ) + ) + print() + model.load() + print("Model loaded and ready.") + + # Get a chat client + client = model.get_chat_client() + + # Conversation with a system prompt + messages = [ + { + "role": "system", + "content": "You are a helpful assistant with access to tools. " + "Use them when needed to answer questions accurately." + }, + { + "role": "user", + "content": "What is the weather in Seattle and what is 42 * 17?" + } + ] + + print("Sending request with tools...") + response = client.complete_chat(messages, tools=tools) + answer = process_tool_calls(messages, response, client) + + print(f"\nAssistant: {answer}") + + # Clean up + model.unload() + print("Model unloaded.") +# + + +if __name__ == "__main__": + asyncio.run(main()) +# diff --git a/samples/python/tutorial-chat-assistant/requirements.txt b/samples/python/tutorial-chat-assistant/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/tutorial-chat-assistant/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/tutorial-chat-assistant/src/app.py b/samples/python/tutorial-chat-assistant/src/app.py new file mode 100644 index 00000000..05fa0bcc --- /dev/null +++ b/samples/python/tutorial-chat-assistant/src/app.py @@ -0,0 +1,71 @@ +# +# +import asyncio +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +async def main(): + # + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load a model from the catalog + model = manager.catalog.get_model("qwen2.5-0.5b") + model.download(lambda progress: print(f"\rDownloading model: {progress:.2f}%", end="", flush=True)) + print() + model.load() + print("Model loaded and ready.") + + # Get a chat client + client = model.get_chat_client() + # + + # + # Start the conversation with a system prompt + messages = [ + { + "role": "system", + "content": "You are a helpful, friendly assistant. Keep your responses " + "concise and conversational. If you don't know something, say so." + } + ] + # + + print("\nChat assistant ready! Type 'quit' to exit.\n") + + # + while True: + user_input = input("You: ") + if user_input.strip().lower() in ("quit", "exit"): + break + + # Add the user's message to conversation history + messages.append({"role": "user", "content": user_input}) + + # + # Stream the response token by token + print("Assistant: ", end="", flush=True) + full_response = "" + for chunk in client.complete_streaming_chat(messages): + content = chunk.choices[0].message.content + if content: + print(content, end="", flush=True) + full_response += content + print("\n") + # + + # Add the complete response to conversation history + messages.append({"role": "assistant", "content": full_response}) + # + + # Clean up - unload the model + model.unload() + print("Model unloaded. Goodbye!") + + +if __name__ == "__main__": + asyncio.run(main()) +# diff --git a/samples/python/tutorial-document-summarizer/requirements.txt b/samples/python/tutorial-document-summarizer/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/tutorial-document-summarizer/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/tutorial-document-summarizer/src/app.py b/samples/python/tutorial-document-summarizer/src/app.py new file mode 100644 index 00000000..3a62fe24 --- /dev/null +++ b/samples/python/tutorial-document-summarizer/src/app.py @@ -0,0 +1,78 @@ +# +# +import asyncio +import sys +from pathlib import Path +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +async def summarize_file(client, file_path, system_prompt): + """Summarize a single file and print the result.""" + content = Path(file_path).read_text(encoding="utf-8") + messages = [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": content} + ] + response = client.complete_chat(messages) + print(response.choices[0].message.content) + + +async def summarize_directory(client, directory, system_prompt): + """Summarize all .txt files in a directory.""" + txt_files = sorted(Path(directory).glob("*.txt")) + + if not txt_files: + print(f"No .txt files found in {directory}") + return + + for txt_file in txt_files: + print(f"--- {txt_file.name} ---") + await summarize_file(client, txt_file, system_prompt) + print() + + +async def main(): + # + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load a model from the catalog + model = manager.catalog.get_model("qwen2.5-0.5b") + model.download(lambda p: print(f"\rDownloading model: {p:.2f}%", end="", flush=True)) + print() + model.load() + print("Model loaded and ready.\n") + + # Get a chat client + client = model.get_chat_client() + # + + # + system_prompt = ( + "Summarize the following document into concise bullet points. " + "Focus on the key points and main ideas." + ) + + # + target = sys.argv[1] if len(sys.argv) > 1 else "document.txt" + target_path = Path(target) + # + + if target_path.is_dir(): + await summarize_directory(client, target_path, system_prompt) + else: + print(f"--- {target_path.name} ---") + await summarize_file(client, target_path, system_prompt) + # + + # Clean up + model.unload() + print("\nModel unloaded. Done!") + + +if __name__ == "__main__": + asyncio.run(main()) +# diff --git a/samples/python/tutorial-tool-calling/requirements.txt b/samples/python/tutorial-tool-calling/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/tutorial-tool-calling/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/tutorial-tool-calling/src/app.py b/samples/python/tutorial-tool-calling/src/app.py new file mode 100644 index 00000000..b26085f6 --- /dev/null +++ b/samples/python/tutorial-tool-calling/src/app.py @@ -0,0 +1,187 @@ +# +# +import asyncio +import json +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +# +# --- Tool definitions --- +tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city or location" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "Temperature unit" + } + }, + "required": ["location"] + } + } + }, + { + "type": "function", + "function": { + "name": "calculate", + "description": "Perform a math calculation", + "parameters": { + "type": "object", + "properties": { + "expression": { + "type": "string", + "description": ( + "The math expression to evaluate" + ) + } + }, + "required": ["expression"] + } + } + } +] + + +# --- Tool implementations --- +def get_weather(location, unit="celsius"): + """Simulate a weather lookup.""" + return { + "location": location, + "temperature": 22 if unit == "celsius" else 72, + "unit": unit, + "condition": "Sunny" + } + + +def calculate(expression): + """Evaluate a math expression safely.""" + allowed = set("0123456789+-*/(). ") + if not all(c in allowed for c in expression): + return {"error": "Invalid expression"} + try: + result = eval(expression) + return {"expression": expression, "result": result} + except Exception as e: + return {"error": str(e)} + + +tool_functions = { + "get_weather": get_weather, + "calculate": calculate +} +# + + +# +def process_tool_calls(messages, response, client): + """Handle tool calls in a loop until the model produces a final answer.""" + choice = response.choices[0].message + + while choice.tool_calls: + # Convert the assistant message to a dict for the SDK + assistant_msg = { + "role": "assistant", + "content": choice.content, + "tool_calls": [ + { + "id": tc.id, + "type": tc.type, + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments, + }, + } + for tc in choice.tool_calls + ], + } + messages.append(assistant_msg) + + for tool_call in choice.tool_calls: + function_name = tool_call.function.name + arguments = json.loads(tool_call.function.arguments) + print(f" Tool call: {function_name}({arguments})") + + # Execute the function and add the result + func = tool_functions[function_name] + result = func(**arguments) + messages.append({ + "role": "tool", + "tool_call_id": tool_call.id, + "content": json.dumps(result) + }) + + # Send the updated conversation back + response = client.complete_chat(messages, tools=tools) + choice = response.choices[0].message + + return choice.content +# + + +# +async def main(): + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load a model + model = manager.catalog.get_model("qwen2.5-0.5b") + model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True + ) + ) + print() + model.load() + print("Model loaded and ready.") + + # Get a chat client + client = model.get_chat_client() + + # Conversation with a system prompt + messages = [ + { + "role": "system", + "content": "You are a helpful assistant with access to tools. " + "Use them when needed to answer questions accurately." + } + ] + + print("\nTool-calling assistant ready! Type 'quit' to exit.\n") + + while True: + user_input = input("You: ") + if user_input.strip().lower() in ("quit", "exit"): + break + + messages.append({"role": "user", "content": user_input}) + + response = client.complete_chat(messages, tools=tools) + answer = process_tool_calls(messages, response, client) + + messages.append({"role": "assistant", "content": answer}) + print(f"Assistant: {answer}\n") + + # Clean up + model.unload() + print("Model unloaded. Goodbye!") +# + + +if __name__ == "__main__": + asyncio.run(main()) +# diff --git a/samples/python/tutorial-voice-to-text/requirements.txt b/samples/python/tutorial-voice-to-text/requirements.txt new file mode 100644 index 00000000..c79aa6dd --- /dev/null +++ b/samples/python/tutorial-voice-to-text/requirements.txt @@ -0,0 +1 @@ +foundry-local-sdk diff --git a/samples/python/tutorial-voice-to-text/src/app.py b/samples/python/tutorial-voice-to-text/src/app.py new file mode 100644 index 00000000..4174e5ac --- /dev/null +++ b/samples/python/tutorial-voice-to-text/src/app.py @@ -0,0 +1,78 @@ +# +# +import asyncio +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +async def main(): + # + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + # + + # + # Load the speech-to-text model + speech_model = manager.catalog.get_model("whisper-tiny") + speech_model.download( + lambda progress: print( + f"\rDownloading speech model: {progress:.2f}%", + end="", + flush=True, + ) + ) + print() + speech_model.load() + print("Speech model loaded.") + + # Transcribe the audio file + audio_client = speech_model.get_audio_client() + transcription = audio_client.transcribe("meeting-notes.wav") + print(f"\nTranscription:\n{transcription.text}") + + # Unload the speech model to free memory + speech_model.unload() + # + + # + # Load the chat model for summarization + chat_model = manager.catalog.get_model("qwen2.5-0.5b") + chat_model.download( + lambda progress: print( + f"\rDownloading chat model: {progress:.2f}%", + end="", + flush=True, + ) + ) + print() + chat_model.load() + print("Chat model loaded.") + + # Summarize the transcription into organized notes + client = chat_model.get_chat_client() + messages = [ + { + "role": "system", + "content": "You are a note-taking assistant. " + "Summarize the following transcription " + "into organized, concise notes with " + "bullet points.", + }, + {"role": "user", "content": transcription.text}, + ] + + response = client.complete_chat(messages) + summary = response.choices[0].message.content + print(f"\nSummary:\n{summary}") + + # Clean up + chat_model.unload() + print("\nDone. Models unloaded.") + # + + +if __name__ == "__main__": + asyncio.run(main()) +# diff --git a/samples/python/web-server/requirements.txt b/samples/python/web-server/requirements.txt new file mode 100644 index 00000000..5a0f14ae --- /dev/null +++ b/samples/python/web-server/requirements.txt @@ -0,0 +1,2 @@ +foundry-local-sdk +openai diff --git a/samples/python/web-server/src/app.py b/samples/python/web-server/src/app.py new file mode 100644 index 00000000..dc554ad9 --- /dev/null +++ b/samples/python/web-server/src/app.py @@ -0,0 +1,59 @@ +# +# +import openai +from foundry_local_sdk import Configuration, FoundryLocalManager +# + +# +# Initialize the Foundry Local SDK +config = Configuration(app_name="foundry_local_samples") +FoundryLocalManager.initialize(config) +manager = FoundryLocalManager.instance + +# Load a model +model = manager.catalog.get_model("qwen2.5-0.5b") +model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True, + ) +) +print() +model.load() +print("Model loaded.") + +# Start the web service to expose an OpenAI-compatible REST endpoint +manager.start_web_service() +base_url = f"{manager.urls[0]}/v1" +# + +# +# Use the OpenAI SDK to connect to the local REST endpoint +client = openai.OpenAI( + base_url=base_url, + api_key="none", +) +# + +# +# Make a chat completion request via the REST API +response = client.chat.completions.create( + model=model.id, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is the golden ratio?"} + ], + stream=True, +) + +for chunk in response: + if chunk.choices[0].delta.content is not None: + print(chunk.choices[0].delta.content, end="", flush=True) +print() +# + +# Clean up +model.unload() +manager.stop_web_service() +# diff --git a/samples/rag/README.md b/samples/rag/README.md deleted file mode 100644 index 2225fd01..00000000 --- a/samples/rag/README.md +++ /dev/null @@ -1,206 +0,0 @@ -# Foundry Local RAG Implementation Guide - -## Overview - -This guide demonstrates how to build a complete offline RAG (Retrieval-Augmented Generation) solution using Foundry Local, combining local embedding models with vector search capabilities for enhanced AI inference on edge devices. - -## Prerequisites - -- **Qdrant**: Local vector database installation -- **.NET 8+**: Runtime environment -- **.NET Interactive Notebook**: For development and testing -- **Foundry Local 0.5.100+**: Local AI model execution platform - -### Hardware Considerations - -- **CPU-only environments**: Use Qwen2.5-0.5b model for optimal performance -- **GPU environments**: Can leverage more powerful models through ONNX Runtime providers - -## What is RAG? - -RAG (Retrieval-Augmented Generation) combines information retrieval with text generation to provide contextually relevant responses. In this implementation, we create a fully offline RAG system that: - -1. **Embeds documents** using local embedding models -2. **Stores vectors** in Qdrant for efficient similarity search -3. **Retrieves relevant context** based on user queries -4. **Generates responses** using Foundry Local's language models - -## Local Embedding Model Setup - -For a complete offline RAG solution, we use ONNX-based embedding models that run locally alongside Foundry Local. The recommended model is JinaAI's [jina-embeddings-v2-base-en](https://huggingface.co/jinaai/jina-embeddings-v2-base-en). - -### Required Files - -Download and place these files in a `./jina/` directory: - -1. **ONNX Model**: [model.onnx](https://huggingface.co/jinaai/jina-embeddings-v2-base-en/resolve/main/model.onnx) -2. **Vocabulary**: [vocab.txt](https://huggingface.co/jinaai/jina-embeddings-v2-base-en/resolve/main/vocab.txt) - -## Building RAG with Semantic Kernel - -### 1. Core Dependencies - -```csharp -#r "nuget: Microsoft.SemanticKernel, 1.60.0" -#r "nuget: Microsoft.SemanticKernel.Connectors.Onnx, 1.60.0-alpha" -#r "nuget: Microsoft.SemanticKernel.Connectors.Qdrant, 1.60.0-preview" -#r "nuget: Qdrant.Client, 1.14.1" -``` - -### 2. Kernel Configuration - -```csharp -var builder = Kernel.CreateBuilder(); - -// Local embedding model -builder.AddBertOnnxEmbeddingGenerator("./jina/model.onnx", "./jina/vocab.txt"); - -// Foundry Local chat completion -builder.AddOpenAIChatCompletion( - "qwen2.5-0.5b-instruct-generic-gpu", - new Uri("http://localhost:5273/v1"), - apiKey: "", - serviceId: "qwen2.5-0.5b"); - -var kernel = builder.Build(); -``` - -### 3. Vector Store Service - -The `VectorStoreService` class manages interactions with Qdrant: - -```csharp -public class VectorStoreService -{ - private readonly QdrantClient _client; - private readonly string _collectionName; - - public async Task InitializeAsync(int vectorSize = 768) - { - // Create collection if it doesn't exist - await _client.CreateCollectionAsync(_collectionName, new VectorParams - { - Size = (ulong)vectorSize, - Distance = Distance.Cosine - }); - } - - public async Task UpsertAsync(string id, ReadOnlyMemory embedding, - Dictionary metadata) - { - // Store document chunks with embeddings - } - - public async Task> SearchAsync(ReadOnlyMemory queryEmbedding, - int limit = 3) - { - // Perform similarity search - } -} -``` - -### 4. Document Ingestion - -The `DocumentIngestionService` processes documents into searchable chunks: - -```csharp -public class DocumentIngestionService -{ - public async Task IngestDocumentAsync(string documentPath, string documentId) - { - var content = await File.ReadAllTextAsync(documentPath); - var chunks = ChunkText(content, 300, 60); // 300 words, 60 word overlap - - foreach (var chunk in chunks) - { - var embedding = await _embeddingService.GenerateAsync(chunk); - await _vectorStoreService.UpsertAsync( - id: Guid.NewGuid().ToString(), - embedding: embedding.Vector, - metadata: new Dictionary - { - ["document_id"] = documentId, - ["text"] = chunk, - ["document_path"] = documentPath - }); - } - } -} -``` - -### 5. RAG Query Service - -The `RagQueryService` combines retrieval and generation: - -```csharp -public class RagQueryService -{ - public async Task QueryAsync(string question) - { - // 1. Generate query embedding - var queryEmbedding = await _embeddingService.GenerateAsync(question); - - // 2. Search for relevant chunks - var searchResults = await _vectorStoreService.SearchAsync( - queryEmbedding.Vector, limit: 5); - - // 3. Build context from retrieved chunks - var context = string.Join("", searchResults - .Select(r => r.Payload["text"].ToString())); - - // 4. Generate response using context - var prompt = $"Question: {question}\nContext: {context}"; - var chatHistory = new ChatHistory(); - chatHistory.AddSystemMessage( - "You are a helpful assistant that answers questions based on the provided context."); - chatHistory.AddUserMessage(prompt); - - // 5. Stream response from Foundry Local - var fullMessage = string.Empty; - await foreach (var chatUpdate in _chatService.GetStreamingChatMessageContentsAsync(chatHistory)) - { - if (chatUpdate.Content?.Length > 0) - fullMessage += chatUpdate.Content; - } - - return fullMessage ?? "I couldn't generate a response."; - } -} -``` - -## Usage Example - -```csharp -// Initialize services -var vectorStoreService = new VectorStoreService("http://localhost:6334", "", "demodocs"); -await vectorStoreService.InitializeAsync(); - -var documentIngestionService = new DocumentIngestionService(embeddingService, vectorStoreService); -var ragQueryService = new RagQueryService(embeddingService, chatService, vectorStoreService); - -// Ingest a document -await documentIngestionService.IngestDocumentAsync("./foundry-local-architecture.md", "doc1"); - -// Query the RAG system -var answer = await ragQueryService.QueryAsync("What's Foundry Local?"); -Console.WriteLine(answer); -``` - -## Architecture Benefits - -1. **Complete Offline Operation**: No external API dependencies -2. **Edge-Optimized**: Runs efficiently on local hardware -3. **Scalable Vector Search**: Qdrant provides high-performance similarity search -4. **Flexible Model Support**: ONNX Runtime supports multiple hardware providers -5. **Streaming Responses**: Real-time response generation - -## Performance Considerations - -- **Chunk Size**: 300 words with 60-word overlap balances context and performance -- **Vector Dimensions**: 768-dimensional embeddings from jina-embeddings-v2 -- **Search Limit**: Retrieve top 5 most relevant chunks for context -- **Memory Management**: TTL-based model caching in Foundry Local - -This implementation provides a robust foundation for building production-ready RAG applications that run entirely on local infrastructure while maintaining high performance and accuracy. - -***Note***Go to [demo](./rag_foundrylocal_demo.ipynb) diff --git a/samples/rag/foundry-local-architecture.md b/samples/rag/foundry-local-architecture.md deleted file mode 100644 index 6b04f790..00000000 --- a/samples/rag/foundry-local-architecture.md +++ /dev/null @@ -1,116 +0,0 @@ -# Foundry Local Architecture - -Foundry Local is designed to enable efficient, secure, and scalable AI model inference directly on local devices. This article explains the key components of the Foundry Local architecture and how they interact to deliver AI capabilities. - -The benefits of Foundry Local include: - -- **Low Latency**: By running models locally, Foundry Local minimizes the time it takes to process requests and return results. -- **Data Privacy**: Sensitive data can be processed locally without sending it to the cloud, ensuring compliance with data protection regulations. -- **Flexibility**: Foundry Local supports a wide range of hardware configurations, allowing users to choose the best setup for their needs. -- **Scalability**: Foundry Local can be deployed on various devices, from personal computers to powerful servers, making it suitable for different use cases. -- **Cost-Effectiveness**: Running models locally can reduce costs associated with cloud computing, especially for high-volume applications. -- **Offline Capabilities**: Foundry Local can operate without an internet connection, making it ideal for remote or disconnected environments. -- **Integration with Existing Workflows**: Foundry Local can be easily integrated into existing development and deployment workflows, allowing for a smooth transition to local inference. - -## Key Components - -The key components of the Foundry Local architecture are articulated in the following diagram: - -![Foundry Local Architecture Diagram](../media/architecture/foundry-local-arch.png) - -### Foundry Local Service - -The Foundry Local Service is an OpenAI compatible REST server that provides a standardized interface for interacting with the inference engine and model management. Developers can use this API to send requests, run models, and retrieve results programmatically. - -- **Endpoint**: `http://localhost:PORT/v1` - - Note: The port is dynamically assigned, so check the logs for the correct port. -- **Use Cases**: - - Integrating Foundry Local with custom applications. - - Running models via HTTP requests. - -### ONNX Runtime - -The ONNX runtime is a core component responsible for running AI models. It uses optimized ONNX models to perform inference efficiently on local hardware, such as CPUs, GPUs, or NPUs. - -**Features**: - -- Supports multiple hardware providers (for example: NVIDIA, AMD, Intel) and devices (for example: NPUs, CPUs, GPUs). -- Provides a unified interface for running models on different hardware platforms. -- Best-in-class performance. -- Supports quantized models for faster inference. - -### Model Management - -Foundry Local provides robust tools for managing AI models, ensuring that they're readily available for inference and easy to maintain. Model management is handled through the **Model Cache** and the **Command-Line Interface (CLI)**. - -#### Model Cache - -The model cache is a local storage system where AI models are downloaded and stored. It ensures that models are available for inference without requiring repeated downloads. The cache can be managed using the Foundry CLI or REST API. - -- **Purpose**: Reduces latency by storing models locally. -- **Management Commands**: - - `foundry cache list`: Lists all models stored in the local cache. - - `foundry cache remove `: Deletes a specific model from the cache. - - `foundry cache cd `: Changes the directory where models are stored. - -#### Model Lifecycle - -1. **Download**: Models are downloaded from the Azure AI Foundry model catalog to local disk. -2. **Load**: Models are loaded into the Foundry Local service (and therefore memory) for inference. You can set a TTL (time-to-live) for how long the model should remain in memory (the default is 10 minutes). -3. **Run**: Models are inferenced. -4. **Unload**: Models can be unloaded from the inference engine to free up resources. -5. **Delete**: Models can be deleted from the local cache to free up disk space. - -#### Model Compilation using Olive - -Before models can be used with Foundry Local, they must be compiled and optimized in the [ONNX](https://onnx.ai) format. Microsoft provides a selection of published models in the Azure AI Foundry Model Catalog that are already optimized for Foundry Local. However, you aren't limited to those models - by using [Olive](https://microsoft.github.io/Olive/). Olive is a powerful framework for preparing AI models for efficient inference. It converts models into the ONNX format, optimizes their graph structure, and applies techniques like quantization to improve performance on local hardware. - -**💡 TIP**: To learn more about compiling models for Foundry Local, read [Compile Hugging Face models for Foundry Local](../how-to/compile-models-for-foundry-local.md). - -### Hardware Abstraction Layer - -The hardware abstraction layer ensures that Foundry Local can run on various devices by abstracting the underlying hardware. To optimize performance based on the available hardware, Foundry Local supports: - -- **multiple _execution providers_**, such as NVIDIA CUDA, AMD, Qualcomm, Intel. -- **multiple _device types_**, such as CPU, GPU, NPU. - -### Developer Experiences - -The Foundry Local architecture is designed to provide a seamless developer experience, enabling easy integration and interaction with AI models. - -Developers can choose from various interfaces to interact with the system, including: - -#### Command-Line Interface (CLI) - -The Foundry CLI is a powerful tool for managing models, the inference engine, and the local cache. - -**Examples**: - -- `foundry model list`: Lists all available models in the local cache. -- `foundry model run `: Runs a model. -- `foundry service status`: Checks the status of the service. - -**💡 TIP**: To learn more about the CLI commands, read [Foundry Local CLI Reference](../reference/reference-cli.md). - -#### Inferencing SDK Integration - -Foundry Local supports integration with various SDKs, such as the OpenAI SDK, enabling developers to use familiar programming interfaces to interact with the local inference engine. - -- **Supported SDKs**: Python, JavaScript, C#, and more. - -**💡 TIP**: To learn more about integrating with inferencing SDKs, read [Integrate Foundry Local with Inferencing SDKs](../how-to/integrate-with-inference-sdks.md). - -#### AI Toolkit for Visual Studio Code - -The AI Toolkit for Visual Studio Code provides a user-friendly interface for developers to interact with Foundry Local. It allows users to run models, manage the local cache, and visualize results directly within the IDE. - -- **Features**: - - Model management: Download, load, and run models from within the IDE. - - Interactive console: Send requests and view responses in real-time. - - Visualization tools: Graphical representation of model performance and results. - -## Next Steps - -- [Get started with Foundry Local](../get-started.md) -- [Integrate with Inference SDKs](../how-to/integrate-with-inference-sdks.md) -- [Foundry Local CLI Reference](../reference/reference-cli.md) diff --git a/samples/rag/rag_foundrylocal_demo.ipynb b/samples/rag/rag_foundrylocal_demo.ipynb deleted file mode 100644 index d12cd5d1..00000000 --- a/samples/rag/rag_foundrylocal_demo.ipynb +++ /dev/null @@ -1,1042 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "6729525b", - "metadata": {}, - "source": [ - "# Foundry Local RAG Implementation Guide\n", - "\n", - "This notebook demonstrates how to build a Retrieval-Augmented Generation (RAG) system using Foundry Local with Semantic Kernel, ONNX embeddings, and Qdrant vector database.\n", - "\n", - "## Package Installation\n", - "\n", - "First, we install the required NuGet packages for Semantic Kernel and related components." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "22f573fa", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "

Installed Packages
  • Microsoft.SemanticKernel, 1.60.0
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "#r \"nuget: Microsoft.SemanticKernel, 1.60.0\"" - ] - }, - { - "cell_type": "markdown", - "id": "87f0b48a", - "metadata": {}, - "source": [ - "### Install Microsoft Semantic Kernel Core Package\n", - "\n", - "Installing the main Semantic Kernel package which provides the core functionality for building AI applications." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "2beb6393", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
Installed Packages
  • Microsoft.SemanticKernel.Connectors.Onnx, 1.60.0-alpha
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "#r \"nuget: Microsoft.SemanticKernel.Connectors.Onnx, 1.60.0-alpha\"" - ] - }, - { - "cell_type": "markdown", - "id": "41c548be", - "metadata": {}, - "source": [ - "### Install Semantic Kernel ONNX Connector\n", - "\n", - "Installing the ONNX connector package which enables using ONNX models for embeddings generation in Semantic Kernel." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "bc62e7be", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
Installed Packages
  • Microsoft.SemanticKernel.Connectors.Onnx, 1.60.0-alpha
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "#r \"nuget: Microsoft.SemanticKernel.Connectors.Onnx, 1.60.0-alpha\"" - ] - }, - { - "cell_type": "markdown", - "id": "70bff756", - "metadata": {}, - "source": [ - "### Duplicate ONNX Connector Installation\n", - "\n", - "Note: This is a duplicate installation of the ONNX connector package (same as the previous cell)." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
Installed Packages
  • Microsoft.SemanticKernel.Connectors.Qdrant, 1.60.0-preview
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "#r \"nuget: Microsoft.SemanticKernel.Connectors.Qdrant, 1.60.0-preview\"" - ] - }, - { - "cell_type": "markdown", - "id": "d21d8590", - "metadata": {}, - "source": [ - "### Install Semantic Kernel Qdrant Connector\n", - "\n", - "Installing the Qdrant connector package to enable vector database operations with Semantic Kernel." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
Installed Packages
  • qdrant.client, 1.14.1
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "#r \"nuget: Qdrant.Client, 1.14.1\"" - ] - }, - { - "cell_type": "markdown", - "id": "a887bc53", - "metadata": {}, - "source": [ - "### Install Qdrant Client\n", - "\n", - "Installing the official Qdrant client library for direct communication with the Qdrant vector database." - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "6ab040e4", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "using Microsoft.SemanticKernel;" - ] - }, - { - "cell_type": "markdown", - "id": "d7ab7920", - "metadata": {}, - "source": [ - "## Setup and Configuration\n", - "\n", - "### Import Semantic Kernel\n", - "\n", - "Importing the core Semantic Kernel namespace to access the main functionality." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "e4c08e21", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var builder = Kernel.CreateBuilder();" - ] - }, - { - "cell_type": "markdown", - "id": "c93e70fc", - "metadata": {}, - "source": [ - "### Create Kernel Builder\n", - "\n", - "Creating a kernel builder instance which will be used to configure and build the Semantic Kernel with various services." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "0a0eb9fc", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var embeddModelPath = \"Your Jinaai jina-embeddings-v2-base-en onnx model path\";\n", - "var embedVocab = \"Your Jinaai ina-embeddings-v2-base-en vocab file path\";" - ] - }, - { - "cell_type": "markdown", - "id": "9cf4ae93", - "metadata": {}, - "source": [ - "### Define Embedding Model Paths\n", - "\n", - "Setting up file paths for the JINA embedding model files - the ONNX model file and vocabulary file needed for text embeddings." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "f48625de", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "builder.AddBertOnnxEmbeddingGenerator(embeddModelPath, embedVocab);\n", - "builder.AddOpenAIChatCompletion(\"qwen2.5-0.5b-instruct-generic-gpu\", new Uri(\"http://localhost:5273/v1\"), apiKey: \"\", serviceId: \"qwen2.5-0.5b\");" - ] - }, - { - "cell_type": "markdown", - "id": "d6cf5a34", - "metadata": {}, - "source": [ - "### Configure AI Services\n", - "\n", - "Adding the BERT ONNX embedding generator and OpenAI-compatible chat completion service to the kernel builder. The chat service connects to a local Foundry Local instance running the Qwen2.5 model." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "id": "e5efe8c9", - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var kernel = builder.Build();" - ] - }, - { - "cell_type": "markdown", - "id": "58c210d5", - "metadata": {}, - "source": [ - "### Build the Kernel\n", - "\n", - "Building the final kernel instance with all configured services (embedding generator and chat completion service)." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "using Microsoft.SemanticKernel.Embeddings;\n", - "using Microsoft.SemanticKernel.ChatCompletion;\n", - "using Microsoft.Extensions.AI;\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "id": "fb43e167", - "metadata": {}, - "source": [ - "### Import Additional Required Namespaces\n", - "\n", - "Importing namespaces for embeddings, chat completion, and Microsoft Extensions AI functionality." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "using System.Net.Http;" - ] - }, - { - "cell_type": "markdown", - "id": "5f690259", - "metadata": {}, - "source": [ - "### Import HTTP Client\n", - "\n", - "Importing System.Net.Http for HTTP communication capabilities." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "\n", - "using Microsoft.SemanticKernel.Memory;\n", - "using Microsoft.SemanticKernel.Connectors.Qdrant;" - ] - }, - { - "cell_type": "markdown", - "id": "376b8ade", - "metadata": {}, - "source": [ - "### Import Memory and Vector Database Connectors\n", - "\n", - "Importing Semantic Kernel memory functionality and Qdrant connector for vector database operations." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "using Qdrant.Client;\n", - "using Qdrant.Client.Grpc;" - ] - }, - { - "cell_type": "markdown", - "id": "326b2cda", - "metadata": {}, - "source": [ - "### Import Qdrant Client Libraries\n", - "\n", - "Importing the Qdrant client and gRPC libraries for direct communication with the Qdrant vector database." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "\n", - "public class VectorStoreService\n", - "{\n", - " private readonly QdrantClient _client;\n", - " private readonly string _collectionName;\n", - "\n", - " public VectorStoreService(string endpoint, string apiKey, string collectionName)\n", - " {\n", - " _client = new QdrantClient(new Uri(endpoint));\n", - " _collectionName = collectionName;\n", - " }\n", - "\n", - " public async Task InitializeAsync(int vectorSize = 768)\n", - " {\n", - " try\n", - " {\n", - " await _client.GetCollectionInfoAsync(_collectionName);\n", - " }\n", - " catch\n", - " {\n", - " await _client.CreateCollectionAsync(_collectionName, new VectorParams\n", - " {\n", - " Size = (ulong)vectorSize,\n", - " Distance = Distance.Cosine\n", - " });\n", - " }\n", - " }\n", - "\n", - " public async Task UpsertAsync(string id, ReadOnlyMemory embedding, Dictionary metadata)\n", - " {\n", - " var point = new PointStruct\n", - " {\n", - " Id = new PointId { Uuid = id },\n", - " Vectors = embedding.ToArray(),\n", - " Payload = { }\n", - " };\n", - "\n", - " foreach (var kvp in metadata)\n", - " {\n", - " point.Payload[kvp.Key] = kvp.Value switch\n", - " {\n", - " string s => s,\n", - " int i => i,\n", - " bool b => b,\n", - " _ => kvp.Value.ToString() ?? string.Empty\n", - " };\n", - " }\n", - "\n", - " await _client.UpsertAsync(_collectionName, new[] { point });\n", - " }\n", - "\n", - " public async Task> SearchAsync(ReadOnlyMemory queryEmbedding, int limit = 3)\n", - " {\n", - " var searchResult = await _client.SearchAsync(_collectionName, queryEmbedding.ToArray(), limit: (ulong)limit);\n", - " return searchResult.ToList();\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "499f7d8f", - "metadata": {}, - "source": [ - "## Service Classes\n", - "\n", - "### Vector Store Service Class\n", - "\n", - "This class provides a wrapper around the Qdrant client to handle vector database operations including:\n", - "- Collection initialization with proper vector configuration\n", - "- Upserting vectors with metadata\n", - "- Searching for similar vectors using cosine similarity" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "\n", - "public class RagQueryService\n", - "{\n", - " private readonly IEmbeddingGenerator> _embeddingService;\n", - " private readonly IChatCompletionService _chatService;\n", - " private readonly VectorStoreService _vectorStoreService;\n", - "\n", - " public RagQueryService(\n", - " IEmbeddingGenerator> embeddingService,\n", - " IChatCompletionService chatService,\n", - " VectorStoreService vectorStoreService)\n", - " {\n", - " _embeddingService = embeddingService;\n", - " _chatService = chatService;\n", - " _vectorStoreService = vectorStoreService;\n", - " }\n", - "\n", - " public async Task QueryAsync(string question)\n", - " {\n", - " // return question; // For now, just return the question as a placeholder\n", - " var queryEmbeddingResult = await _embeddingService.GenerateAsync(question);\n", - "// Console.WriteLine(question);\n", - " var queryEmbedding = queryEmbeddingResult.Vector;\n", - " var searchResults = await _vectorStoreService.SearchAsync(queryEmbedding, limit: 5);\n", - "\n", - " string str_context = \"\";\n", - " foreach (var result in searchResults)\n", - " {\n", - " if (result.Payload.TryGetValue(\"text\", out var text))\n", - " {\n", - " str_context += text.ToString();\n", - " }\n", - " }\n", - " var prompt = $@\"According to the question {question},, optimize and simplify the content. {str_context}\";\n", - "\n", - "\n", - " var chatHistory = new ChatHistory();\n", - " chatHistory.AddSystemMessage(\"You are a helpful assistant that answers questions based on the provided context.\");\n", - " chatHistory.AddUserMessage(prompt);\n", - "\n", - " var fullMessage = string.Empty;\n", - "\n", - " await foreach (var chatUpdate in _chatService.GetStreamingChatMessageContentsAsync(chatHistory, cancellationToken: default))\n", - " { \n", - " if (chatUpdate.Content is { Length: > 0 })\n", - " {\n", - " fullMessage += chatUpdate.Content;\n", - " }\n", - " }\n", - " return fullMessage ?? \"I couldn't generate a response.\";\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "4fc8eee3", - "metadata": {}, - "source": [ - "### RAG Query Service Class\n", - "\n", - "This service implements the core RAG (Retrieval-Augmented Generation) functionality:\n", - "1. Converts user questions into embeddings\n", - "2. Searches for relevant context from the vector database\n", - "3. Combines the retrieved context with the user question\n", - "4. Generates responses using the chat completion service" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "using System.IO;" - ] - }, - { - "cell_type": "markdown", - "id": "04b2e2e9", - "metadata": {}, - "source": [ - "### Import File I/O\n", - "\n", - "Importing System.IO for file reading operations needed for document ingestion." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "\n", - "public class DocumentIngestionService\n", - "{\n", - " private readonly IEmbeddingGenerator> _embeddingService;\n", - " private readonly VectorStoreService _vectorStoreService;\n", - "\n", - " public DocumentIngestionService(IEmbeddingGenerator> embeddingService, VectorStoreService vectorStoreService)\n", - " {\n", - " _embeddingService = embeddingService;\n", - " _vectorStoreService = vectorStoreService;\n", - " }\n", - "\n", - " public async Task IngestDocumentAsync(string documentPath, string documentId)\n", - " {\n", - " var content = await File.ReadAllTextAsync(documentPath);\n", - " var chunks = ChunkText(content, 300, 60);\n", - "\n", - " for (int i = 0; i < chunks.Count; i++)\n", - " {\n", - " var chunk = chunks[i];\n", - " var embeddingResult = await _embeddingService.GenerateAsync(chunk);\n", - " var embedding = embeddingResult.Vector;\n", - " \n", - " await _vectorStoreService.UpsertAsync(\n", - " id: Guid.NewGuid().ToString(),\n", - " embedding: embedding,\n", - " metadata: new Dictionary\n", - " {\n", - " [\"document_id\"] = documentId,\n", - " [\"chunk_index\"] = i,\n", - " [\"text\"] = chunk,\n", - " [\"document_path\"] = documentPath\n", - " }\n", - " );\n", - " }\n", - " }\n", - "\n", - " private List ChunkText(string text, int chunkSize, int overlap)\n", - " {\n", - " var chunks = new List();\n", - " var words = text.Split(' ', StringSplitOptions.RemoveEmptyEntries);\n", - " \n", - " for (int i = 0; i < words.Length; i += chunkSize - overlap)\n", - " {\n", - " var chunkWords = words.Skip(i).Take(chunkSize).ToArray();\n", - " var chunk = string.Join(\" \", chunkWords);\n", - " chunks.Add(chunk);\n", - " \n", - " if (i + chunkSize >= words.Length)\n", - " break;\n", - " }\n", - " \n", - " return chunks;\n", - " }\n", - "}" - ] - }, - { - "cell_type": "markdown", - "id": "8a5845c7", - "metadata": {}, - "source": [ - "### Document Ingestion Service Class\n", - "\n", - "This service handles the process of ingesting documents into the vector database:\n", - "1. Reads document content from files\n", - "2. Splits text into chunks with configurable size and overlap\n", - "3. Generates embeddings for each chunk\n", - "4. Stores chunks with embeddings and metadata in the vector database" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "\n", - "using Microsoft.SemanticKernel.ChatCompletion;" - ] - }, - { - "cell_type": "markdown", - "id": "1828967e", - "metadata": {}, - "source": [ - "### Additional Chat Completion Import\n", - "\n", - "Additional import for chat completion functionality (note: this might be a duplicate import)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var chatService = kernel.GetRequiredService(serviceKey: \"qwen2.5-0.5b\");\n", - "var embeddingService = kernel.GetRequiredService>>();" - ] - }, - { - "cell_type": "markdown", - "id": "8dfafaac", - "metadata": {}, - "source": [ - "## Initialize Services\n", - "\n", - "### Get Services from Kernel\n", - "\n", - "Retrieving the chat completion service and embedding generator from the configured kernel using their service keys." - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var vectorStoreService = new VectorStoreService(\n", - " \"http://localhost:6334\",\n", - " \"\",\n", - " \"demodocs\");\n", - "\n", - "await vectorStoreService.InitializeAsync();" - ] - }, - { - "cell_type": "markdown", - "id": "9b29fd60", - "metadata": {}, - "source": [ - "### Create and Initialize Vector Store Service\n", - "\n", - "Creating a VectorStoreService instance pointing to a local Qdrant instance and initializing the collection for storing document embeddings." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var documentIngestionService = new DocumentIngestionService(embeddingService, vectorStoreService);\n", - "var ragQueryService = new RagQueryService(embeddingService, chatService, vectorStoreService);" - ] - }, - { - "cell_type": "markdown", - "id": "fb5a4751", - "metadata": {}, - "source": [ - "### Create Service Instances\n", - "\n", - "Creating instances of the DocumentIngestionService and RagQueryService with the necessary dependencies (embedding service, chat service, and vector store service)." - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var filePath = \"./foundry-local-architecture.md\";\n", - "var fileID = \"3\";" - ] - }, - { - "cell_type": "markdown", - "id": "07b13842", - "metadata": {}, - "source": [ - "## Document Ingestion Demo\n", - "\n", - "### Define Document Information\n", - "\n", - "Setting up the file path and document ID for the Foundry Local architecture document that will be ingested into the vector database." - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "await documentIngestionService.IngestDocumentAsync(filePath, fileID);" - ] - }, - { - "cell_type": "markdown", - "id": "d2c08b5e", - "metadata": {}, - "source": [ - "### Ingest Document into Vector Database\n", - "\n", - "Processing the Foundry Local architecture document by reading its content, chunking it, generating embeddings for each chunk, and storing them in the vector database with metadata." - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var question = \"What's Foundry Local?\";" - ] - }, - { - "cell_type": "markdown", - "id": "e26a25d4", - "metadata": {}, - "source": [ - "## RAG Query Demo\n", - "\n", - "### Define Query Question\n", - "\n", - "Setting up a test question to demonstrate the RAG functionality - asking about what Foundry Local is." - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [], - "source": [ - "var answer = await ragQueryService.QueryAsync(question);" - ] - }, - { - "cell_type": "markdown", - "id": "59a1803c", - "metadata": {}, - "source": [ - "### Execute RAG Query\n", - "\n", - "Running the RAG query which will:\n", - "1. Convert the question to embeddings\n", - "2. Search for relevant context in the vector database\n", - "3. Combine retrieved context with the question\n", - "4. Generate a response using the chat completion service" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": { - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelName": "csharp" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - " Here's a simplified version of the text:\n", - "\n", - "---\n", - "\n", - "**Title:** Introduction to Foundry Local\n", - "\n", - "**Overview:** Foundry Local is a design focused on optimizing AI model inference on local devices. This guide explores the core components of Foundry Local and their interactions.\n", - "\n", - "**Key Components**:\n", - "- Built-in System Platform (OSX)\n", - "- REST Server Framework (API)\n", - "- Local Execution Provider\n", - "- Model Manager\n", - "- Cloud Connectivity Framework\n", - "\n", - "### Foundry Local Services Overview\n", - "\n", - "- Endpoint: http://localhost:PORT/v1 \n", - "- Use Case: Run Models Locally, Access the Local Executor.\n", - "- ONNX Runtime: Utilizes optimized ONNX models to support local inference.\n", - "\n", - "### ONNX Runtime\n", - "\n", - "- Supported by Multiple Providers: NVIDIA, AMD, Intel (supported by OSLC).\n", - "- Provides Unified Interface for All Providers.\n", - "\n", - "### Model Management\n", - "- Model Cache (local storage): Automatically generated when models are downloaded from the OSX platform.\n", - "- TTL for Memory Storage: Determines how long models" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "answer" - ] - }, - { - "cell_type": "markdown", - "id": "be4a335e", - "metadata": {}, - "source": [ - "### Display RAG Response\n", - "\n", - "Displaying the final answer generated by the RAG system, which should contain information about Foundry Local based on the ingested document." - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".NET (C#)", - "language": "C#", - "name": ".net-csharp" - }, - "language_info": { - "name": "polyglot-notebook" - }, - "polyglot_notebook": { - "kernelInfo": { - "defaultKernelName": "csharp", - "items": [ - { - "aliases": [], - "name": "csharp" - } - ] - } - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/samples/rust/Cargo.toml b/samples/rust/Cargo.toml index bdc9ee44..42d1293f 100644 --- a/samples/rust/Cargo.toml +++ b/samples/rust/Cargo.toml @@ -4,5 +4,9 @@ members = [ "tool-calling-foundry-local", "native-chat-completions", "audio-transcription-example", + "tutorial-chat-assistant", + "tutorial-document-summarizer", + "tutorial-tool-calling", + "tutorial-voice-to-text", ] resolver = "2" diff --git a/samples/rust/audio-transcription-example/Recording.mp3 b/samples/rust/audio-transcription-example/Recording.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..deb38418bf5fde82fe380add4a999d513baa9536 GIT binary patch literal 329760 zcmd4330xC*w>Lgn2_#_HLRiFrf{_Y;fg&QZ83&3w34(wqi?)?Mv?OdIi#st0 zDzb`=8W!7XttMen2`G!UQf)IcE&h-)}+YON^auTKI?*A%zo!2*N1LIYgGGnHpDbw&ft|fCY#z$?S?P1vb+h9wcZe=@oA zx2YqOkv+j%*4Ff8W7x-Eel(HN+0F^)2ey3o+Y{*I>Rs=ZSKj}0Zs{h2p5?$cmLwee zG$tp@%=YPbCXJ&%;L=dQn@ByqxG zg(B#F>ThGURTEhs8D{>)BJ~t;!G5pzCx;Uk^*v6uHyvb_84P}vk1&sapcx&fXgJGaJ%WHD?NO(7Lv-0ScfTwk;9o&y?=}6`>(M zwyTqc6vcbM8SZBpc=oz^=b@Fs0QipkfApA!+{#7}9A7DdEFal@J1u#swT!bfk~a9V z(`%KYKXsZ~!Q35U&%c=TAa(kqE9HZ|MR`}M;EM7RK3DDF+0Ux7UUz8Vt7yX^GMC-$ z2X1hR=m#nTxqMD`U$-XaU=caY*3{_$hV>UR>!p3&5-q2%J2|APf0!b>h~CBT53H|1 z?X9bHq}=xQs?rC4c4?Sk(b(~okzBrNi}qA-aNrhgkOlL=L)sBOHlA0yifV0gsCeo~ zjg{*n&L?Z!1*!+L${nb^;yx@`)bEmWE@IpD$rAN_Wu})jC?I=|XXVN%qsYjr=$Ufi zSW)m%IoYB^dM&z3$>VqNc$5-pmsT^RVpW%vh{Ywcbj*%R)gi;>*f8poQgQ|5d1*&I zJ-IxtaH@;gWzj_v2KiW72El?gQP)bso3r9M- zdAz9(ttMT*+*&^K?)hqYwcIvcESFpV`7aMsDZ6y^2;CY=i=Us!5497tH`?2A;|yFo zo~d<}VrZRCZVRQ_Ij#$)z%(T8f9VyJmCpkGZIYrjd-l2Y=$Qj#X z#3Bs@2C5uI4x;&b@f>Q#e>SC>o;QzYG=qL1CtX$sbQhI(U+wQc;OX}^aUcn$@nqqw z#P~7K#qw(60bbXnwZ4T%=Oj|HWR0h@&Zf@%pHkgs6g7VP8hYYz+I-I(~c9N7lEuhaE`| zGrWSWu-lJ#^m*4Fb2lrxCb!0&s-U#I$Eq1ZMLa?JZIvgNRb5)yWY2Y%lTz(XS)^1R ze3>X27GsGbJD$~mkGCbe;5w%K*qAb?@2pyeLQaGYXzr?CZPZx)u4eu+h zAf?C}+8Zt`w2z&(x0`D3jI|eB@KS%Rxcfj(XLNNbVN$(2k%;anIg!%byeAmP-RtXB zyFC44@wJf$@bK_>tURnssZu5A3r9V9)1I(vN|TMA=0;BDOozys8# zfIgOX8xR6;lEeIbL^!@b`Tq|1?#$bV%P)Y8_|J)a^|D4gd#+u;9q0C|2A(@S3Vju% zP#It?4!_+u+gIA(SDBeLFn#%E;H?1Vlx8BYcdJ1@j|=OaIwO{u;q_Yi%@zB3^9}l) z>;@KPO5NgCBBK-Y%ACsGQGBhs9JTE46@mYtO;ovC#Aw~Mw%#8oIL7j8_c+EYh<3@7Z$RFW}&OD`; zKA!iMUszm<8F#tgs>XS5@u#6x$8i>^zk62o!GwbY`WAN{*UVdm_wWoB)62K;A2*tr zaZ{EtAJ3b|AAg);#+z?Dl+$69w`V@fOh=gLe;-7}LFBmAVZ-o=oa1A4c?o?vKa5=) zzV=}2F#I3uEvhhLE$2CGbwmS|j{_fd>gHFU{9(Aa=&~vgn}%(}t;X_OIKD5JnHb~v zZhC|7iv3&w?N;FL0qxLlaz|(7bcK=JjOV!Zedz7S_OTZ^*yDgln!cRA9R1enU(tY@ zIK+K9_&Ez^u%Eqedf(*DpUBe}%_^q@e&z-$T!R;$& zRZi#$kvrNf%ZE)y44r6Qjxv>T%*`>)%VN&QG-!fib(G(^4W|;)PB(tsYLd%5-*)Tw zsDE9wU4=K_+|CnosZ(qq(q2JTH*&cp7BkIYJ2;P;llf;+pOPO|Lf^C4*^oo zdpD)%rHW%4%#o892#`}=#X``;-1+&qQugGTHG{I|SeLoT$V#Dr3~G15R4*aXSd5${ z4>2Q#=rosd(tEqSw76zv&48qe)9X3b_0>airYQj#N{bjWC6IOVK{R!^eCj|c58-N- z(WM%S&R0oAg=ucC(n3)o8pPw+5eY-})%C|s36kPTH&Y7vENONq8ag;jRzZX5AR&(< zXBNOdI*n31k1O=Cv(OsX*B>(>h{Z2Z(PujP8akbBDtX}hNyJxjmi1ZRPu5CCw6}OX zZsIo9j4}~Lw}}?4KY`--{=WPxIurTe!yRw%MMWx=>Z~|XtL;wgY9Aexj9e%wX)Y-# zDQy=gcKfgEf0BoNUDKG7IxyDJxmllDId;`Mta7YD1OG0oGF97j)XPeD%pF}tQz+fV zf)*2T99wur9A{m^BXdo}!PW4eN8!sN-GbGzs^;|aQAKnq(+P7brlBSxE9K(q626S# z7OP&zm(d9(+8J!IoT!Ly7UwJ7OK2j?eyyCChi>MdGSONW^JOb_(8lmGC}e$HwH=pL zekQxT+^5xvaxftZm+k&uw+C(7s^9C}zBaV=Mh2}7^_*kVda#TkVvQp zAOQTd*V8VSVoMl$$uJ~q2@>g9WD)>!EtH~9Cfd`_J{5g7DA8VMxgeGhlIC%x*&I2i zT5aGKW@E>COFP>($0e%1df{8K($N1^^;rWB-OO3Qsap9bZ17zDk}xOgI)CcyyL!N* zWDh-xTbihPrllJjWyp;yp!Jw$L8O2~J5c}Ws3wD<+Fij1I`AHjv5R);zOXiQ?fNw)Gvc`QvD%UhmNhq8b$%wXxy0lS zH~vnX&cv>~p!$rc)i8q{UUYIwvvOj!%Ln*6Oq`QErf7A7%a$6lL2f+N5pjL0Bmc(Y z{u^Af(vnP6JBkQW0!qQ;1io-e6{xgOQ)w$FXcTF_ihx+Ofqj)x9i}&gaCzb1_)PVXx6h6Zva5Oj% zTgSD$tb`8wdmgn1_B_XD)#ALPs|Dx@$?iJIhf$L3I?-ag$i>x1Q)B_E#nyW#>gv*O z%%M?J+Pv1q1#-&@V}0?IHb)Ggj-IP9?oIDIaa>>gNSo8TxVqIYH%}ZedbsNC099T3 zlim+fPns z^C4@f0;|ZP`WL@D>-?ZNqBufU?P0`jVw{TJEA%Q6_8*~IX4 zad^l%a}S0))tF?lm}*ShOWRw#fly{)&ZIhh%&?#?dZ&m?BFoAw;kz<*PZ_ODR%Tu- zEJBJ9f|STWU?NZnB*cg&rAdp0L3{GqLf>G`7GFuD?FDd{Ay`UF#jzKbVL9Jk>rY3cR+T>mXXGBMlBcYS-iVHZNfrI}mm* zH+zGx?7QS^)%&>%^vM;ZfoxEq$pgEm+IwXWK*x##RBz3ytW;J?_hOp(&p}aDr?S#g zrcIdIRs)a1rO%0pY~kK^eT%0*_%0t(T(7mFm`mL3>0iI0e)I2?g5T>-k8qvORN-?w+Po}O(*AL)hwsy!lc!rf zb{gi&ue5q(x2|Yi@KfBYIFEvi!MJZ4+3@F# ztSDy8FG3hJHi?8Fnjt>Ia3ax&fwUJmzWcdRMjT{k++X7_10hWcyVZO$1EG@H2u5_G z5*Z8zEX`@`Rj+p;-Rd%-`t><5>}C-HUj$Q;;$Q|6;=~+R%Zd>(!X^|8&5;l$1`VMP zEu2!!40bABMJsk@u$dU2zlT-?O`uO&sX4S$k^d@$&+MK3?+JY9k|tlFSUkE|=WxvDw}G*n(64g-s6S(d5Q5_sXEXFTY@) ztc;XA|G?(GzwrIPkkJ3nhk&DV`7(ce>a*JWSvgH|Y{nP{n^(7FuwP;h(?aXaW7 zB+HigMt;Fh-4n2fU$T?681FG%{q^q@0!!FM7j}(#Iwjuds9lk`Yiw=dXv?jc^e_Da zO+IVsrw}$6UYM-izqXj}DM%mUz5?^c`s(fUFx=eP4tA{7cIz*JbqI7S03$c9I-;%G zqs5H!iAPFgTfrUfhK!$LKKUa)qiX#p7YZ_-)||NzcJ7YdXW-ES@th8lPM_{?d<8#4!|&-zwtV z`G1NpP)oh9z?}ZMF<^RS=l0I+4WAA??!P&uE_FMx{nPEAqG{m%l`fj#lrGZmR5a9S zS!6Cibs{HW4h>KaOKKzs6B;y~EtyY_d8bwmH~L0CvFEm+KDa(s=IwXbv1bOIb_i7F z>~jzWDu)xUa$7)A+f8XYG!31hsr^snrnYCcMn0%MHU$d(D!46Rv*zssdo~Zlk9q4g zKiS~fVdQ{h{X;(w{H;R`y1Z952My6_$F~eR(kvJ-dFW%ufNT4BW}xR$U&BP&gX-F8 zpG@Tg7(QSCxoViwT;=-J8mnquTH$FR@jSgU;Q=5XSUQ?Ky45g>b(OTNJv4H9C@5YO z`d&b2ie23PxMIt7YV2J68#~*+a`U)B%JC~d{YB_pVdHGrb?zyqe`V| zG?yJEd)7?@tLe$(93vVh8XY4N>g%oR!GWeA=V#9v^$)?yS+b5^Fbnk7XF5s?nuvlY zJR>&J(Oin>`B+(i-(|dUPfBxz*}eR8+Aku0;=OCoSLC;+-%HNUO>`K5H0_=CKtH}ZS(9qT%Hz8-ZBOjl0#<66W~l$X%EwfdyNpxVW3}wXxLzTj*OlA}EXj z;4xY_S5PaJI~J_I0$U}rh#s!ZKu}~*L4Oa=MC6Xewig(5MBpe{$Nk>+vrO@0E*;&{ z)m(Bo#gD%FN#~p_+-6Re-cf7ZIcYP=s$Ka6R#mc(WvgLPyI>AxS-MJ=(!BP_-Ko)n z-!nb_!q-b$?>D`S%ZvE%%L8k(z2 znC9k^MkxkAc;me4dLT4#HpI>p5Cv?-WR*0{XK}DpBu#Up6qD*t@uKYja^s!D*5 z0<=-^REF*=w@I)|K%TfYue9Z7i~g#3KBHGEnnGYWoiO_j)A@ zKe&UD$UvzVFhQDKuWE0-cdDjTQz|QcwpX?H^NA3=)Ed&y?0ol z&nZhRwW@fnrnJyi1`Vz-e5v*iEu+Pk**h4H=8lZ{wEeVwumAQHze6%28Hvpwnh%vd zD|;q*tSK7T3<c}afwrui{81S=mJrchJ{I}oBVDivk)5+=kM(ijDxsa@R(dGhE8iw+0^n$z+OhFwoZe*BrB? z97cMu3DnIJb_4Ls6ajfk4PvZ}Z4U%d2GUa9!fBR(S@2qZ`@ep1S9le|2)}2t4s`WG zh+%|v5`_4)^%Go3SJbYMr8Ehf=!Uf?L3#gT|04dEfY5cgY6o==%DZG)-2RxWG0pZd z7na2||0Tcwp0Pw^%p11#LVLbiMOsPezY!*IAykIhxDXE2i_rA&en$M=!1)|iFtO!y05QsM>~gm0*-uI6nm*Q(E6CuJlL+(S6rx(eIyvM9 zxQvGmx%<(>DM?-WW-!s#o}3y57fF72bStiVTe+K|)fHKq&7?I5;mI^KJcg4INd6->Vv~He9OvusP#Q^(SXi&)rFE{bPf{ zi~v?z{E0i?T)mJA?$H;b{}A6rRDRx(SB;HEMf(?aIs|_E1#g~z@~knB@!3H3G?*Ee zd^C~HJ58(}lXXYFw zb|rV%e-^fEct0?G$$Mn<4>MT`#?>KrV!uxDT)!tYSw{k2^RrFKK>o3x>t%g!4`Vsv z;eESye9L~=E9(VM^pW~e`$S+4mQzAYAFXcTMv0g>mFJ@|(XyjU6^j+ZX6Y4iu=P=R zz}#3=l)4e97U1;ZsV6Pvv0jXUeXENU6LCiOB#!Td-wHZAc4y-2_#hrX(o0LE&EmBI zvFccLuzK@!6jMv)w+(n?acL-RgTp9UAUT-i=Emvpco`pq*4Hq=erw^sCQ=hTl2% z#rhZYwMWK0-P5DpAr^Su3gyW{o4`c>Gh)a(I+{ZKl875=Te?bGh39E*<&)=7R{8aF z<#TW{lm(u!W8}$x4IC^Bx1#V;DrD_)#XKi? z{HZAUh$uXg!<|6|?zv)+_5^ ztGENFGJHhQ)>)oz*14nLKXGDTM{E<)XS=Q>W_d1!U9J%14X<@q)zVnCF3s`%)p2D= z#!pEqQhdf>!Y3Z0Gk0_d3yD8d^NEL;=A`jmAJKN~vooS|4g`#PrtR(63-2qPf<7_4 z%ud06Z_W7b&f9SpM5jfh_+OQG>^{#nsX62O-nzf={rB|qdPFP>z2(d6?H$m(97u>5 z5SF^6r(Qpj>K=BTm+I=uJCaIWT-*QA_dg_9Va57XBqn#1K&gmI3?7jw22i!q)0*MN z<3zi$m^>M_3h^d1ds#QLFYZ++WTBNPIuIGrJD^S=N_(X-?=7QQ!vJDJX}cbuuhmvw zccV!XcSf)TyE@mf{GSx1MEFkxYf;;>uF{^1@*`Aj<{H@=P~%4jI+2Bzk|Y|c?8wXm zZ$zqG6Pd|o(PjM(ms>!cW9SlU5*395;0o>%k|V`W@mueE#A<3^jMAu!+3y=2-Xg`~`m5HxCIfX4QUQyRK*%i+|0* zYUlTdFj>VRli4Au22qkI){&TYgK@n~N(0C)VB=&?b)f@kJ?<6ziq5sIUwHu+{f4Ia zrzgMZ+>rle-QUJ*Gbb;aaFRY74B2dSV|we-ih%q>Z$HmZ=w6n8(&p9!-+$msWJJy? zV6bH@5me0HdYJ9byMHQoRH5BM?Xayrwr#P~q5GvZEef?{OXKrHeXcfiMUOmjmkG(H zKBl6%rtEVol!nzJl4qQY6$%-pQmvletXnW)m=n!r&F)E+q#9%CaRL@ez*b$Xpc5$S z-Ybc@qw*uljW}Vzyv6yQ{Ys zAJ2t0&O4&hik zw`4q2uaaF@g&+Zz>*Ie)i1TzyQ!Q$dl}_(k&=?L>76g{7{;xsd_8_HA{IT$XETy{;wDmfGSx8u{L7 z8I$>&8)BQ``OcDgOY-fhXBQORziKF|{s+GQB*4a4lw{}XgT=j)otEAMQA#gC(<5^7 z!p?#H=G+hLmpkwDXgfIQN?E9I78F`KB*h%IwBx%c2nr8Z7v>dO$_Y`P4v!8F)Ua3Y6#(HU~ zl@q#|N9MOzy2VvFX#4o5B6-OsR;oJfVjg9!6!M0E`bA*$z@5bXQ-6-X?gm%F&JWkU zkzW`iT5kPLY0;#QCcpA40EJvI3qxY9OAEM_AlFl>D<};$){9rZ4(qx4JY2gxSDQ?$ zJG-ofmR`t?Zn45+zf9nyBbjFf*it-nOD9BVt07p6$NP#6d|AP)))g+!EiTe>z%6d% zWWUzBg?|bLsffh5B^&V}AW0_xQvn|S_`AZcyLcwn+Wm&Lg(Kh{%MWy^V|!bKjZ5Bz z_R&t+UbpF<9+@9fU?}+x55*+UOUTLe#A_fTzCzSo%$g@g`HF@cZ@6M7pa+QNf9}>R}{-@USQqkw7a7uX=h= ze$SL22ljw?700*Ni)HMA?0okx{uwfrj%0!6y?XocD+sFMG4kQfKs8?O+6%P895^X1 zO~MBCMGP$U4Uc+mGF_6!53v6|Yc_x#n z6YJWp8BRX{quCt-_1t0;7V*fqR;xXg;s0~d_{^1bhno);-{SEc&-p!SfP*4}6b8E~C( zu(-;#o3^*dli;eWtgF~VUn{;jS>0xLsHV!4Jv5wn@|a?{?UCKFgOA!4{QN{5dHZo> zCZxh8uiBKHycvii$e1daL+z*3V^|uTCUe_tUti_+@Y}cXMQQm>9ih+hy7*zQ^zm{? zYct46Is!Py(kT`G@ z_Hbop-|d_JOhee<+Q{ctRgsx0pN!>hO!&1<_%vlCzr%*#m)g`M@=1SnJsu^n4KtThy)QPbVi!7V zu4i#0lG$NmW*C|f*~qSRruE;wSzb1&*jH9LlXi1&nJO%EOK9dMbfHg9Y4QKDO8GXtmTJhe(wpicG1E_eteOf8x7> z?q-$s1^W%Y5o(B#PY{V1sTp%ckYp(}AV7qO!nSNlfoU^59tM5kLT9+r>VxxbZLzY+ z)BTf`Rh8FI50sT%zdKM_dA)3KZ`oZ%`Q85OI&x}QD1HcjkXOe_uJg3B1plE6@x$e` z3Sxy?y9THGo{?h|evsMS>aW&_WpUjFA&zG*Zjag|h2t5e#TZiaRU7mVBUHl*F=jnxnHAtM#-bxyU zOSE<5q@5D%l{ObAe6q2RY^hLG=2#_sYf4?b@R-|3#;`y}?sHvOQ(q~f@Hxz$r`+lB+gRFOW#sigMe2$fNvClWA{8coo(RTu#47f!*<$mhY3)wGO&+XbU7WyQn1#u zaMEBu*LV&!p0IK%F-b41+tpX+-V%lGLm~Cax|J)TZj!BT2lfEGo|d)6CI(Oc*LVCi z|2*O6`FIKF=Y4PRoxzqu0u+pmt4yr4*TD02i&d3~P5E(=Ocf0A>+nN)MC(H9LWP@? z*g>@qJFVZ+o0GaPYRmV1bQC)nT3VEz<|kd{O6HRJ!O3JYKYEqxVlH`_Ou>(xrcn68 zSP(!OajbSu_|xrv2s$F-$l zFg}!8n>hH!q6-!4V|V^!eCv9jl}QcTV~b?jHIEb%*20q?|KPYxvg}CZj&Bu~E!j^n zLLDL^tEGH|P6<@DVXbl#mWlNduA69Ke-FF5x4d%OBy^L6tdO}#qUwv7i@rAI zCZQIn`2;5jEmRk(52;VWowtI?+N;D&u+FQ!@>Fa`ORNApb&0s+W_6waJArB`7s0@- zR4`qnE9Y?e{lPDS&X4)vSlAnUQ38$veC9-pREr0Mh3$)D^3F5i)FF1ix5lq7OG1;j z6|=}QC}0d_sn3cv31pu*Z;cp z{;76X&gwD?NeBym$qM*~RLgh-J`#yos7b^R`qgEz8O^GZDU~B8WFdK&kwHPa zs>jw2EtocIQg%ov-3m;Ttrr(+x{cV`6G_X^T|JSZZ!sKOk4zn>bhrH+*Z-bjuJ!2~ ze5Y-ZM{0-U28L+4oTybNW>olySnNcW+_p35Kr-EE)%ReZea`SHj}JQaHUhhTM= zGEv0yuJ7t?7*TiKthQvuGZH<0UpU;Xwz%-Tf>Kg+>ilfjdqp!?qC_Lc@Kk8ClC5#9 ziZ#%Gm-1b`Ok5Fqbxy?NvS!dOmFJYYt9MVVT6MEwboqQUQ>s9GySG8x*@i-f^S5-< z+bX3BPrX#>)<9NiqUc??S^C0mPsoMt_S}Upc>b}Fz4_ol?I))mN$S#utK6-@HaIzh z{*|8o`VPaQzpuaVGM=dZF^=zV0TO}-@*r}m3?7F)GiV+P8NhPL9PaO~AWS&I*Hh|(>OHS>=&Se4VBmH{ zXu$ipv3G!}7?v@t>NJE%EATQZK$J^Bwu52h1vN8aVyp+jro6$I|4;J!Z#JbSENa!- zNL7<)1gkHRt_rINXxJG>@!GO8tRnEyPFGic$j+Neze8DE3fGlOnWm7va$H#>x+apEl(=OK=MFrV0+U{iXWt7;bc8@H=wt?{ zMy9s!Z9!E)Fi?cCn%oiB8wr@#$Y@@=P#2d1v9kiW&4e`s6L324lv9?a-L5_>_ZCP# z#HmNQB|ExAyoLZB!B{|nOhouhsn_B8GscjMSZpF*cnm8MnqPA#;YQ8o&m7ZdM2GF_VonXl`nR43;|OcfInbs(=*ysVg(u4RYo z%YIkopl57)jX0-}1r9pIsZIvP#@P&d^&0xKl1 zorM@Nu=k$eUO)~zq_w*f4Z83|%xl~obJ{W{Y~-OpP-VWJQc z??ir%1W@99gJ{xhb1AJ%NGePG405(H4ndttbatTL3E`MVE6G9*>SVUi%qjkCe1I`< zyJwut`3QkVlL~tfyGzX(-?bH$(TN_%<2+JNw^c7ald}40-7WR1vSW~~^c{xdYgnFZ zFfCjC0vBSMGTO? zjIDtmC%ul4Ij%(KwpG%8wVkS%@`(bWWs{(xMLY7yLPIO2#KF}AaabI5=1d&ZoJ#=4 zg@UPBsSkI&36d)+AUQB z5)O#KI8ZbGNOR;`a;H(z;D?t3YU>B&onA#azP~Q>vk_tg|EMqPzZQx;o?leOS-zz9 zh}hh=BDCJ=81v}c+cZPT2aV1WCtU*_`O)4?6&98pR8Mmx&=j{5easS_vIA&g36>1m z__X={fsB>>-IJ;>{lkA8S=fokT9*s%PAVNyBSnBivML9AKe>Il#wcw0e8uDfcKpwm zca)cw^1okc74Nf4aA~E%@XS0vV1=1km~NAF*BZ}+El0#i3yE<0I@v-~O~_bqY^ec$ zop6cS5^JfQDtA`7DS2w=24-0ZVo5_7M00)_)Gmq%#?lainmJC4>-(Gg*T-VhS2wR5 zzfN+N7Vr?Q#f?wbkIjwQJ6C;Ixg&YYYB5vfFZld! zyFhSh(OC=F@ox4ot!Rg{DRZw+1m|93ZdVX`|Exut>|#}EpWojd`mp32D8z$atDs8n zYIRj*roT_{qQ35(_NG-9ey_*h<+&t3hExsgfa|!g^5@#yIa#0~iZT4cE-g~|@!&`F`Gp*F!Y=~LOn(1i zLkuOY$liG^6B@I>Wu_g3-J-pIOWH4@_wireKqo=yE$5+BQEC=Z3Qb8f9zi67SHP$U1xRwU5?s*?ME+e9WJ7&A#|2I z;p)X$Bw5|^%{#x>HC(D{>kh~t5gcE^co+*hR9Xz`Q``eyBgP>znd9>^%Bw%|-JCJF z`I}BrM%4x=&$P1a*D$P@H}>kteKcCC^Anuu2cJcF9u~mBz5WQ=&jVechT1R|wf}bi z?W!oBz9W^HX-cTyE63~mUj31KM=YH%^jfCG`o^u9jE0R~G zlz7|OP1{uon!V$IBdFVwp8VqZ%E+{|+c>`e63|cgL9#wP_r`v{z0VhF$jWaIu3ZBO zM7VVnnt}5*K(gb7%yF~;v z57a1h+5y?~n*I=6SQr<}wexlAP=8$wzpgS3*qVLSUYAlc#~>@XRi8RLXy+YPL9HC_ z^h+D81+zf|l`mW$(Y9G7p61m6sbm9C4Aa?^IcP8FhJa~1p12F~)@v-Iyi4;Tn>YKh z1LT=uIT%D{ts!d5g%UT&^*uBF9+c!AJ9ZG5lNMlUO1vv${ewq&p8mlPyhgMg!w9i8 z<7v)Q->12a8QG0#%9|qszgKlv9B~`aFl`1kVkW?@#X_M=o7Thao4OA{oi}_2dA#7B z;}lB}gINd6F?fG*WU@fK%S4cq1vZ8O?5m@?H8`+yP+B?yR&c`B+BUI#dF?6jrOn5m zLwRWJsr1*Sg6NKOrISx#@#N!_lG4PWBH;W$G)j9j^{557RD7vG$1WFR27!}8NYFUfv62SMw8+>{1@iyn-6{)Yg z(ygIWu87_RTOIP{v?CgE0$j z$Jg!6xSPw)xusi;>W2h0=?OIw`xp1!`E zEr7b2PQUkW>Ms@;edHi1t?GZdeBN)K*G05tDCfGb?$BzxFJ^CS{`v4ZL*`(0=kGsf zR&+oT=&hH_bh-EUINit{|2{r9egYCib#V&+@z=8di)D-pTe$M{L5!$kFeAZE;=xFv zGA=YmU9f5?3e_FRJRWgJAy)X>zBu&5v)*zx2=`32>c-a@)c75<^CJF zyUun~x)F;Qwmm$X>RVI1&6)U*R)+CFmvq>4joHsKI8yxO?MCA&AqgOKTZpqEhIa}NGVw(Ui$ zA7!|f|C4|8Z1S5gAadxt?r9BJ?X`}OlrTEf-n{bQ;yKsZXx|U?vSpjT*F4*KNTJ)9 zG0SfK_`-qh{kd`_`+Lp#h8B!RsSSF5)cx&|w~znr`3}r_nCjHX&|!o!>Kp&>yx~2 z!2)l7qoaul#V!C15!W=!QLy5W}dRP*$WeBG;4f17)59=QMH>Tmw|^xWUS5E*9LU%q(u zp*{Wk64Ufm1+PvupDHlyJzWjYvh<2DVEF=}M`gWrSiBhN_clCT{B)5N$2ZyK5hWa3 zv25rKzVBJaECn9Saw5p3FtU(Nu0F8E0_EITq%7<(o1&9v4(EIqbYOLTnJ+W1xj24x zn8`3s`L1~tMmWte54kOd6NG5Wj#*aMo0WvEp^+M5ib-zQ1=bcXNN&d}-TL>s-7=iM zcKT4|OJ9nu&~5o5B3#tlA+oaSdUoz{szd850pq>$YmsrbyRHe2GJFMxZPyvvMkGK= zHTA_k-KFT2b(_UCT?0FJo%;ya?fwGlvK_1B-!F?U(432ac#7tsaQx-6{?If(v%lo` zzY4~G`|;DP+@5PMQV!7fc|P-q)jPPI35`r%Wa4`qlG~2^#icKVgoWq%A3fii`LGTG z;757!yS@=zWAQC&a%=7emlSGkepwc}W&JPVZQpdX{t|xY%UFY)GSwc@J9V)#VV^iM zhFD_Xi{qQ(>NFF8r0&1_2H$Eq)KNoqPh&*R(;S20q4~>@-R$TCK2N0D5?TqZm{vlf zSvfP=R%}ZsMFBTyT+scJ{qUjrc=9!pQBaYpY#e)rP4eHT2JB`(hn?hi$Qz4{NJh{? z^PTwu**-_}1=@w`Yi1;eK<5DG0CoV|is2?KOLDeywqkf-Whj5KK_eqNAzJdwcaac+ z4N~kWbvEJCq|Ws0Mx>0$r>&gz1TFY7hQcNXZP%7%KFh5-u+vnUYbxvV3mY1|8w#EcORH(CZ|1GdFi;T`{oAqVT(ntXgp&9xBm|XsWfN!?yjvuHmPLWY!8iV z{xJJ#K9Ai^HCh-`O#G0bX^IRlO5*e^ncl?dF@BbAZ2@5=%U^JOqZe*}9)M(6e)R@l z+qmx(Ct7d+ev)gZcmO6JP{nd`^*J7m%apFSG;)q9v5fJxG{h_-84OyEKvpj(j-Dzx zxavA9gtdz9G|w$>)pbYVw9(Ode&$G@c>tdpz$XcPNz5_^r-wo0uw`We+9@jrp^|MN z3CkG5A!itEY4ts9vQPj8Qd$HH=0LfW5%dm-GWkRX*%`rt>PX{4@uM_$Q$&%~UQ|-a z#(K>?UP?}JXCt0_Se)|d`AQ%$p?R1NSA2ennIvI^_ptwY{QWnyRT?_we_8__Z<^gvG|@V%R<(>6&I`56mwjoqD1>dG?WY$ zx@%ZK@4_nFErHj13=LzMU24hHs4HI3SC5v5Bsyz(HuOLB^Bs`i9aVqXSLW-qK3Z4l zpk47=idJ?a(46I5=;o}+RwMwWRYU-jsGaDWHO;GG>`uIgfLqD}oA(la@Gy7RNd_xEsovLdi*?+J}V?$!KU# z65B15_>de8J9;)bYq{oGv!TUFp&=q01~Ihkb2v3cOVO5SOHP|p)Lrz3mzP+I&q-aL z*xU3%L&c+)UsNc*C~4YcLmZ}~S8XIO6HlejCO!J;f290Mi3|8lqH4jkl@*u9v#~y! zhsB>|tK8E5*d%Fa{#%Nqxj5MhLZ&S9a&5krKC7iy-H(RwuLs0T-Mb_Wr5+xV3l!Np z?mN%Q4){LeNJ7Rn%rXR{g;33j48qHhdVYP#Xafd%C!vQGUCkxY;O|3+8fOX z;AQi6+1GQLf`ZC9S7%|lq!FqoII3P|yrgIs<*uI)9d`~05oc>Xf)B`sD-;Dz?@Te1 zirp>-er&@r%+PIkA3H64PPgID&Zk|Z1zQ!*mR!^SkHq(lYA(C1QBSE?&YB3O)apK` z0vJM{Ql%!JE+FxRR6y5@A@X|!+v|Y5$h&l&n`W*c-CEn z4Rr^VsJhc^43jpHR|oOpwZ(UZO=H3)69W+vNw?A0Oj;$zLir)Q0|)=K+^XO3(i+lz z@#4fpp>O@R!TdGhwUWJy{`ROg@<)qf+gCUj6&mHs-s#+u4$#8v&rCydPnMHmR@?d8+!nKqFLh-Z%2kXrm=a3ePbR{WKJ+wIcS*X=0#*I z!NhYcxNjbj_>IuJ52e=`(1zO z=Yxe#LM-0-9ZgXdSTG@B4D;fUAagdmGL%-P60j@T&PC+xvT}3#B4By%vdd&ueQHgY zv;Q+75ZV4k)MsUPo&8Dnrs`q00~o#<(jYch*>Sd$?5i|IyV>`%io+hf49TKZX&2_R z{ix88lnrDjwEQI6c@ZPx8Fm1ZVp88n!Op_3p;n)Ep0tU><~T3ZIul_Ec3wH>+K|Ck zt_?X&DWZByg#^ur z7jm;_uRGIqWb@B=8Yhs^?8+j}W#xNQDtO=7sE#No9(TgX0=BCct!PR^(HGK2d@6L7 z#-HybB!;B35R_9{7``9jTvi>lKJf7tTgn`@p0nE)k>*Lfy1fY+-;ZEVX9*8TOV& zYB0<)P){ly=`B3c)b_vdcBwEtlm3hhU0tBXT;P4dH)OL z8~F_>l+`y0(-yG>WNJ~6to}fDIbG`(=v+kQ)Tj9{1yp?R^#fSiBBq_O5%qOx8!{BV zmVl@Vp$;+)FKk-?V(CM+=pt&N8croegSf(Jc*z?Xf6vd*d=o9K<%?DOXtoaURJl`JD zQ3^*06-;*sn7$$!Ae{Im)IU5?;bny>O~U`6{%CrB@F@MA%{&^lplk)Yvj5JMIx*!X z3K9Rp=(=B{t;<|FpY%!ySP;DKZb~hcAVqpvxYC|SN)0OymDFH%g7CO0wGVc;1!Xek z+!#XU8VaGz^GU)idSZV7UZWUkV4C!hm}Fu*qQYwu6-|&(W8Hv z#oU{~HFd6Q-zy^_2@r;a01jc0DG|f0I4}r^5D)<|P?g0zNGqsSX>Ccu6d))liGWxc z1cgQopslu6!zcz&aH!JVDr#}6tqQgki{!l*Zr%Hwea`PY@ApoJXy#$TleM1VzOMf@ zZ@U{QpHNrlWrFY0nc8m*S{k9{eq**hR{y49hsAMp)}Qj&qlU&qc5Zt=Kf;zt!FQ?v z?o%j^INNZ?XDz7nC%SZFadn)j;qV%gt`-nZ%(}T$1BLzWcMCTCM{+Z&k3Kg9;mWXEN841~&XzI=E%zVFB z>`jh!AH7^ez&~flS>PBK2#)oi7+N6e3uMwXy;ZNAFJa)VL~MJ$)Q3_ga#opkig(jn zZR*??sPd8TD~>uefLdnY|?8u#6Z=s<6*-F-Yt%cJ#*clEy5ty)xdev!QNHhmE> zM4deGDxi(9=qp^A$mPW@tGN6Sb=PzM@A5;2_Db~?y2^@x>4Z{UTwDJ*` zQ#eU@0|K`HPA{I(v(f$F^!xD@hmi%Z^PwE`12Tg-!Ud7uiJx@_2HLm^U#D#gN`j(l%!_Yyk3E-m{hI;uceHpj-9{=3` zIy0n$8Loy^vOp{6xs^W`TCZha9!Og1DE;}Hm4?UQUfycGzy|1m2K}JgBpnP$@OEWO zm8-_lyR|08A`F@ zl`~~9(3b88Tg##h7R!!IcMYf_e68vRW zQavO7vLJPj6XgW3aTqpqA1PZt)`@h&pq>Cg93#3RMVjwV^zW4TS>Yr;J<|O3d8S0T zBi@idL7*V%NV~lmp3`M%gfn7Hc5CJdc=^l9CuiVNx~x9Q@@E6Pdz+2JUZUmiY0r=cwyWh9K7&4_KCOb=TBk*e}7-5cmcluPKXPR_f| zbXwh0trnh9QMis%5>hX^Pwg8RwHj&V3yDk#-H5`&@9sqMOyyKf^E$_>a~l%wwa9}D zKV8l~XOu8eFZZ=wWXiVLfR?Y8lg5)*nleL>eVtKPeD<~F$Z@t5qJrAoK&+}DpUsJV z2Ff``G^a=Ees9^$bzYJXJPl6|0tsE)0x_B68=rFmgmyJ|Z8@tHM3CPl?C#NZrVR#9 zd{f9-UCc-zDu_8!Bap}8M7r@^YBEBmkWg(EB9}xPXs3kUaBQA}m3%tZzP~S?r)v-X z&{OZ)7pq7R9zX>EUIK6_M9W2hC}oTobuQRxW;(a0+GcJK0I50`B(W$Ho%50yaN3k) z6ulbV>#~|>6zz*j+6AW6PCml-ffIu9s2Dg!xmLOQTyd8payaOCL~M}t3p zEURkSQ>EE{G(KB%F;aDA26T3Z1s^Zxr$Jx&#?{2!SrF;UbYJ4r%TFtFyYZ2$Nu+bA zn~R$Tmps_>f#yoLiL~3=w}UKhG%0W;_d+izBx^bwfX1hnt01|k?sYU8VDLzc7C8yU zP9TOxLaEatf@=X<_@y8pn^0#L%>|`c&ZrQxXqSP6TmwbcQfi7DR-(o9XCPNIj|2@j zJb=m1QvlPt5-6ZTP_lvWJ0g+`SFo!fRKq?B1{^s>?JR<9*|N*kA-G=Ar2bNnslf&q z7`&t{Tq0Dl+wl8S)x9n4B-4qj*wRL3Zu1p+5sRSJbeDjD?GDzyTCZ4L4szn}{8-Rm zX~#v_#sYgI8}*(BZ~9{JuY>74{_Q6`zxNiDd9QN>U8V#Ar%<13z`x(258DHyB#_8u zqszMUWZ0`N&wAd$178AyIlygpgC!gWNSJUAo{eof=8s?Ot%bX1qT%0LsAg*5yg8|< zDciRLnN#1j@dE#CMP^p3&xg;4Ur@_6fFT4A58uELkk*|S5<&V#1Q9MjB9R1w+(;6L zX3an`YbTdXp_oqv=R>CuK0D=wtKYqOm`${Og^M9@{M9%|dn|8h>cY5g5d~2oQ$l7K2f1;>p&Xf$_d{!wKK=%1kf=={MGbNUDM&^kfu$2#w|q%A`tEfk(Qbkw73G_a>D_|q4;KZwI208 z9vQ9xqZmgI&&rxwk@@RH>Y{^VQwJ7KIK{5E*?B&5ted>pHF&J#E1uDke6e2VO4+oL zddnZmme_CCM?I;Ia9r~OLB^IV91g~8UbN6wl*7meNW($ABx)vQ}* zk;i%E4v3y(F7(V_D|Tp4jNcwzpCHS9U3~0t9b-DM4xZ3E7l=DLuu%C(}P!;?ix{W@uq(SWzLb zFf#=6Lw~<^0Q%GbuoDCZ_9elUxbfLr$zZfFME7Do0_dUxl2QUO*Fk1ruYab0pg-1K zTXVrS0p^2AYb&nxtiJu3XCTf)_4M!iFX(f4FbJ4mZD>|eaS(Hg@%G!OZ%te!>cEr{c&Yx z^xOsR=?;(F%wPAv+2M3$!|x|R!bS1dzeyZD{7Ctc$P2vquT^w z8L}njk9Ykq)GW9gHXmRSYsNu2U%YAallg6AtNO%|*G38J&F{izU*GWYa)l}LBySNs z)oAlg7&Dv$h5JcRxaZ1?C)E3(_QB~n$%TP{{Djw+rL^guMee^0`rSWvu1A-LZM6HE ztCNP|H`l?|hek3uPbltPR44~Db3h8O#oliA%~&vwkQZ||fTq7<9{e)vt11?3o>r|c z_uaQT#@3hY3qycKEC%x));i6q&F`PW|h%fG(!r9>ekd? zAX^NU4GH$QCyNi0XMbG@e-5Ce)p#4xWD|Na)_+b!;n$|X9 zvU7J!Fs)fwOmu;!9G=kE!oD?H)&jz)GKZSVc(A(x!#neM(EY$tLZJKWX!w)>KzVy) zWF%(*cOA29VWf9?nY;xYT-fn>X2}Yi8?yNm5J11>dtWac%mjHC$i4btcn}Di2j;K# zo_G{gU-zx)AMx*#Wgqdc_sKHclxfN>uyK8jp+RZNOy!p8>!8+3aRb>Ed@zWxKo##? zk8Lf+fCntp)#|c9^K#f!%u!c?g7*w)@E%7pvYVSdvhz~hj$}8&U%~=55wPczn0u|g z4VdDXW_~obBDNYfAyc^-`-mo>IL5ShHhOHHZWE}|RB6gPfGo}nOuH$s;Wmw@l$uOq z=*EA4e15k~xzKq$frv6$CMi2c71m}CR+&5R7xBu&mmCe7Mc2+V(WjbkIJFn#u{L*f zh7Q?eNYC@eU!<-ndfgnQsfct>wg8d~ls_UXqXUwn1ESXbZoc+3cHe|rt98CF+t#st znG*D0oH&eB(1lqxt3eI!k*n#0of|Z2(H@D3$=FHU7%5wP!@4ZOeF4bioZEo=4!?2W z;Rg;a74QYzJ`~Mx}fVm`Y%! z{bbcSOH0Gr_n}7w%?aAK=Uw}H1rCt+9J2Nd8nT$^JPBwt_^ZL6DmrrrFl!}4Ip`=l zau6VEfXIAo){&An3SU+?p@+~TrKS2Ccq-7&Mq^I827&?~Q^~o(w|WY0Z0R&BS{yI0{cxfuPPc zaFByhK_V$8UCnNHF-UZzp!S*_pg};`UxQwN-VgX8U`+=3zkm~qwQnmIWBdZ#x1{I3 zasFh@zC^w5gl=W_+B2A0(0y_!a0k z>-p{kUjwc`yZEi_$&{``gOQhuE|>G0_4K84v+{@C{npbem_EQnqYA0gm;d>IVhghn zm>PgWN{uLuv%A#Ti~s~p_-LF&7sUhfmQ0zCttj0AvhQCCPu@rC&n70`N%Zub^z?L1 zOz7C!vCYGi>jAeWI?rzKjrqFz?Ae$LQ^I=TNd-x74%C&DSZ&E3Do+Vb3Fo81gI8#8 zAH7G&boPzF+V}P1R+#_pT>8YClDHLhUPDRppy=z*K<`?xNwyMn>4sM1cnnxTY%@PTqqyHeG&ja{#(B!*LP3 zh)1b2Lkh$u1o?h;u@`Qb-Vp2zLX;>%9ea+yC>hLzGW&hS$27&qF4ma^{?q*T-Jbhj zx}2m|3_7mTx&_+}ekcJw@f>S%zHgV6T6#QST$~*f*)^8bM-vY^kux6|`W!fLXMpJ2 ze?9Sf@|tFsM2DMDSNJMWGaB-nO;5X@HMvS$VWk)LaZpW0Q44>G?$J;5zhIwoV)L_w ziEFbKZU2eo>03$-{oCmk@z-l_c!S)!*(;%WNs)fES=a^b076wW#jZyMv_?E~xpu`x z`Y#gmclm_GN45z+ zf7iYUQw$H{&=v8xd{R72KCAqqjVaXELL5qp=ObP7xw_aD9Zi=IC#0%{1`2#MUd6@> zartCKfk$;k3q~3Bl*YCVcWQb*G|2+M@_lVo5{pH)gc|F@W@PC3IJBjQmQIm0qg3!F5 z$3kr9yf*Lg66#kcsiF}+<eM2uLj|HIA3v5J{hQ;j zna3C~3h&U$F;X=b$thadgaX=Fuw%|7a;Rhgr1N|BZ4&A$z{fjRzNZw|z4}N=ci|jF zzGnb2-u~#sB~(`o<~e9q@g$z0omL+Lu&5A0!{&-?5!^<}*PnkR0{dWI(`PcQeU*#w zBsXZ^zIW~WxT~`pPgD^BUQ1N!qHq$zv_lQWi{Fj3WW%)uY-}g#!fE>LD`N0nNrcz3 zV>PnZxTsM7@BH8RSEdeA8y)$L^Pku>%uYUq9g)H9(Ay9gId7hi#odz zI*zwpIS)0cf<=rOxH|+ITZw&1;&q?n5}CDwXkVfGn(94OPqwGDAEu{oEY;zHwH5pX zk$0dQn>o7QEbL9kU&)!jR=4N=mAtX6dglr4@3+4-P53RdOuqAzr~g_1`(N6N|L33N z(?}z!>$iXtc*JK7FS+DKlgOcXXaySXP~-E3kR~rUck1wX(|)%L`$b(@@(%4bq)wKRz zNn#*zqSHBciSVt_U4qX+;mPml^Xb-UVE$3#C7XNd;hJ7>d0h}K4n~F<~*%Bbl zR`oE$LJaa98Q+v0yV=kHQ0CeKYQUPcN20Wmhku743@x0UUJ?*qFxXHsKv^4R^6+|k znD=zAF*E}Fz=r|qJmGS*!)$iS`2>NG(Nn8(kUB^g$)!Su$`Xy3h1h8oM9c@*l%SOt zKd^5~VA(;@m;i|q&1&o^Ll) z>)O2im230IGnfDW9-sf`TlU|-V$xVlr(kYyb$+5TEWu0$^^|?$#nT_$;WeRts+{|P zh?k3(d%Uq;{<{8i^yb|+xkt`ld((Ovc)EF_O*eKcDMI|)G#8}V4$n5B0J(eVvOG-2 zEdxpaZQ*0pl~Bb`k3@ln3>*-}!E5o6VDGH} zt5|DOR2O4j3;5{;EppfBjN_1m0>+Q^HKRf7LdU7eIcoW$g{BiD%F z9M#>kEa4i^(App>1l*VDAwlBsM8(lssn;G!3IE1AuVx^V*FCgM_kF&(g8Wt!$)iFJ z0B~3Zt4r~@un#x~@Mz-+RSi`Q3cx)=wh)|@epdl1v+UI4cA%{QwhUAgaajB2*zeQj zBbyh!_b)(c^0HlRm%WJ5hHIf}+XP2%hzO;0OZs!s^bm0p(a4`TI^Z9^L2d5x&0 z=Qr8I?K)L@5!p9CBmHg2GD`c1g10FN>HN6Y0E)_xMYNEe0r|VS*kqJJrE4K*3mpX3s5lB z&Jd)KLXC#5v{?{_ikh#6C8-Mq0#sB;*^{00fJ({U54cGa%jU3nzbcRp2%$OBT4sS$GU?S!jaU!{G+orzw7!%2cZXk2wzljo=eR z%~+024GI^{d*botN&RpXKj;m++vB#G*ew$nPf}5-3%C)dSiftE{p#PIFBn)e-t>}+ zzoU5yEVO_o0HlB=OZyu9-5~U2^_!5+rc2iF2Vv1*!EJ!S(Z8v;|L$SBKbX@E`xHbgO zyqr)ni|Q^&fDeSRQR$MQC1@0&`%kn1UGfr8e1S|5dKmgwPE0yuA@F8#ri57HMi9<` ze9jLT1mNPQ1p(ty9I#n@9ALnbYq=0QSQl3pkqKw97@0C8m=uG@O!`e2Wi%#sCKh$S zEe<&PlJ@tWT&s<`4VG~CkAqtTl$G@v4Gi-OQxZ{DAVm)=fkYp>k^81~-9Pr<5#b|c z|K=NytBrJw|6o7r2M_Wdao;%|c{P2z0a%ke)su4x=Pnh4yR(X++@RcW>X=EaKNZi$ zl|o^2Mo8kpuof49aR;NQT-HKw1wr-{^h7GqHQvVxgp&qP?BeMF%^Ay>8&dfH95z5N zNDZ8`BmQBEW?uZoRaTnR)a1Ml08~7m+=21%ffTzqTL{Eh^)sG67lu6NFibvXdsNJo zf9u>08B;ne9U#jlPAjufhXO48%TfqP2Bl>|SURMFHP=Gm7ZzuN5n&FfDkx5N(VT;z z6xD@DgDN$f;doG^W{UL5EjN~~(`J-Zv{oTxA0*2?=*BwNC_s)f|Ixnd)^Tz4K%C&2 zZYkn`rnwK>@+f`Mm4yS$LThPXfOK5M=-GOzC(A?$1S5YAl5ZGwzNK44q-pV`iv5FN zx(C4NjW>88NE~&}-rAPsAZ35&A=uD!s#lzFRZ-iiE30OYbnH=8T`C^3SUfT^)HG-E z2SF|4(!I;PS~xoel#o5`_CMfYo};W%QOK3f+1p25jg8upMxEo6s00?Um(QI#AB~|5 zsc=>pwkK)qeA=UNv>-pR#14QUc0?C+B{7h@jLBkzbLygEgr_Z>S9M7SbR;wfweJ=P z>Uv}_xd(KhG!68Nq` zqq=qK@guqt-p0-C2H3E8*=+Ly$jGOVVFwhbmNE<)b=*09NW8VZH|falwYc%jMh(gX7dJz)u-zGk}f_Mo)RS*i2d%W zS*<7oe>4hGKXj1KBHW{owV1az0c@Nv-0eu?3wbIkwMZtL7vLj0TOs!KSzA;X=;~=M zWa+q82`l5bS?;@(!e7vQso=_kjueG4*_mdJ&LkSGl?B?fqWLZ{ue!QnGd5I`O>s2b z0amR_t!zsN@E{hhdc+nd5Wg?~F0tG|UQw8kw+a0rEeSo6 zP{IDtCF4VWbBc{qioF{;+?=`oZmgRo+10C*rUR@a8ggGpUvn{TCVC;rQG1U@>f}N0 zD374&>_zVZWWqzJ$If(%j)3ggBR$-}+UrNQTzuSS*lh8+#mggUJ?Zi6%$WKD8j6@j+M;Ygs5Nz7g*oDo*mM z$yAMknghtT`k09J0!aZZt3ml zl`aJ#lyfTSh_zeVIaMjvkOLHG73p!aM8i$q+*Q~*B-}kf(Trff=K!S)XbjSkhGl86 zmQFc$+zk6Y(@No*sT>|Zcib@6+kId#2ykvaQXZ71!D4qjr+CxHrtAt^s7@P!{f)?! zP8yx*adNiO%i8N~72Lyy-+T>Fn#KQU-+vt_|LZ+m*1R-GeFo%MZ|Nj!uHX^RRmmQE zxH4{TaBqP){##fN9{&gk`&sv?7<3Bync%+UkXc05F?~|^tR3grC>cXs8}X% zrqM#+q?Z8Xfv)ZkhM`_IT608rBxR_&BULa82}l=~e#kx5mO7Pi_1h4ZVj=gGD*Mk7 zh_@)Y0)bW8U%bup9lT$11>$}WJ;+VYI|ql3dmWm~ygAX?5Nv~m+vK2F2=KYtK#bY6 zD+tJ#nazj2>e@=11Nyr6?duJx#PX5^EU(5-T>zmNOc+r2KpK|1Kuqm%Cc)B)I0P0h z6(k-7QXF6&!#oN7fcH3n^R=_l7;rrQ4quAxS+F9xXbOeZ$qVBUhBMLTJ%DExkBh0j zToV|cgTfr*a_nF-1OTu%9^S)TU{*CXEnIB4^>ir>Vl`3+LT50}l?w?V767~si;Xl{ z1Islgy|A3KyT{}~?E{G0!9HT8$r^|i#zMPRnqVoz3JCFN5<9-%fB&^L|Kof3BZ9xi zT@xy0J76(Uw!>he%T4K)8BtMKlAH``293IzWL#w;h_>Tr{W5!~9zgD?7puJZSrEi> zPo1Xp9FK2TXR0E3I6Bpq3yY9~u)qnmsir$1zR{slquCT~&JBb*Y9^Fbp`jsSaOj$A zAkx@T8?vXM>e>u7*Tzj)3?ZajLEhlt2$rSQ0P!hiZ*pEj+kcT; z4dE(~rVVLCd78^`1&e0xg*=tRt5^ou{(ITgMVAfj`#xV1(p8pShA^YroeGe#c~4(i zItT&JW*4h3%D)hLW7nr*WjQH4P5{++YYdAchTl>NY#!^It5T~ zh~O5-%|H&jPXln9X9H1yC36dM&h}~-E>i4=6=cm#jP4ARAea=HZ0;(s|AFh!RSS{5 zR|;rrFK}}LD`Fs(JIrL9dHp`d{x?Yve1k=zEn7T4s^m~aYa9dLV*cg2j3%)(%4_Ha12IOLTz*&)C7YPD4{Bq)#V4ydiIoBVs`3U|1px!E#7wXm?aDp+;_z?)HhlCD= zP7VqYVmoH%E>$lcJcTu6h>C>3$cZ*MK0qoY#O(Ga%CcZCrNyRG+6RkwPVI$tXl$Z2 zBgAMV7v*~(dH15eXIR;_#+{WvVeR|W6Hl9m+Y+~HV7ZEqjJEeH;XXz%iNDfPUwwyq_9KG8mBnb3jx9kcAA ztYNQR*}V&Im{iT$3jqP<0paz5F=;1~A8b)DzNww63>PDR4q9|~^Vf0%TGl_*=Sqa48k*INz9+A1EL7Hz&WSxjM`0F3}DgW`_+S)qlbtpt<V5Q!e;k%568m0R1lg)y$d{4rfplhqV2;H(tDEEB~}u(Ff_$Nk*uMyx+~ zOKF*F4F`gQ&Cy%1OzmOD8Ae+={cIAIXUYX5U-E4I=(f8H$p-r0_v2^v8MCdK3YuKO zRg#J-8&B&U=-W;uutR40rjA7cTKP%?f5Z@O^N6f91_ zI2v`CXdiDT@Cdw@RIU>VO$I9K{r<&2c{z8N{KV3!y4$xHCh}D;W)gpb=ox+O+b^UX za-uIxW6GAP^hgNM0pjLH2u^u}R~HPB20|XU!GIZt%&>?a2RwHe+iRhJz=(txJNylV ztYFJwu!pq6q8Kf-=0DhfTYWl6g)8a)D!!UDe^9%GdU4YGw~qQ%W1N}9SG?FrO3*cf zuKN@p0=oe0Or_;K{Zzr-*3)ZvhyX!SN&UbN_cyx=Os)b#5v+R3-L6=j| zH5Puhppcy8au6aNJg1b0dqrnufwt`n%qnLRO*)l*wjD7FUSATMEp$rpTU$^ZXu5#i z? zEu +/// Describes a discoverable execution provider bootstrapper. +/// +public record EpInfo +{ + /// The identifier of the bootstrapper/execution provider (e.g. "CUDAExecutionProvider"). + [JsonPropertyName("Name")] + public required string Name { get; init; } + + /// True if this EP has already been successfully downloaded and registered. + [JsonPropertyName("IsRegistered")] + public required bool IsRegistered { get; init; } +} + +/// +/// Result of an explicit EP download and registration operation. +/// +public record EpDownloadResult +{ + /// True if all requested EPs were successfully downloaded and registered. + [JsonPropertyName("Success")] + public required bool Success { get; init; } + + /// Human-readable status message. + [JsonPropertyName("Status")] + public required string Status { get; init; } + + /// Names of EPs that were successfully registered. + [JsonPropertyName("RegisteredEps")] + public required string[] RegisteredEps { get; init; } + + /// Names of EPs that failed to register. + [JsonPropertyName("FailedEps")] + public required string[] FailedEps { get; init; } +} diff --git a/sdk/cs/src/FoundryLocalManager.cs b/sdk/cs/src/FoundryLocalManager.cs index d3e4fb79..10b51285 100644 --- a/sdk/cs/src/FoundryLocalManager.cs +++ b/sdk/cs/src/FoundryLocalManager.cs @@ -97,9 +97,9 @@ public static async Task CreateAsync(Configuration configuration, ILogger logger /// Optional cancellation token. /// The model catalog. /// - /// The catalog is populated on first use. - /// If you are using a WinML build this will trigger a one-off execution provider download if not already done. - /// It is recommended to call first to separate out the two steps. + /// The catalog is populated on first use and returns models based on currently available execution providers. + /// To ensure all hardware-accelerated models are listed, call first to + /// register execution providers, then access the catalog. /// public async Task GetCatalogAsync(CancellationToken? ct = null) { @@ -135,19 +135,94 @@ await Utils.CallWithExceptionHandling(() => StopWebServiceImplAsync(ct), } /// - /// Download and register execution providers. - /// Only relevant when using WinML. - /// - /// Execution provider download can be time consuming due to the size of the packages. - /// Once downloaded, EPs are not re-downloaded unless a new version is available, so this method will be fast - /// on subsequent calls. + /// Discovers all available execution provider bootstrappers. + /// Returns metadata about each EP including whether it is already registered. + /// + /// Array of EP bootstrapper info describing available EPs. + public EpInfo[] DiscoverEps() + { + return Utils.CallWithExceptionHandling(DiscoverEpsImpl, + "Error discovering execution providers.", _logger); + } + + /// + /// Downloads and registers all available execution providers. + /// + /// Optional cancellation token. + /// Result describing which EPs succeeded and which failed. + /// + /// Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + /// After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + /// + public async Task DownloadAndRegisterEpsAsync(CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling(() => DownloadAndRegisterEpsImplAsync(null, null, ct), + "Error downloading execution providers.", _logger) + .ConfigureAwait(false); + } + + /// + /// Downloads and registers the specified execution providers. + /// + /// + /// Subset of EP bootstrapper names to download (as returned by ). + /// + /// Optional cancellation token. + /// Result describing which EPs succeeded and which failed. + /// + /// Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + /// After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + /// + public async Task DownloadAndRegisterEpsAsync(IEnumerable names, + CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling(() => DownloadAndRegisterEpsImplAsync(names, null, ct), + "Error downloading execution providers.", _logger) + .ConfigureAwait(false); + } + + /// + /// Downloads and registers all available execution providers, reporting progress. /// + /// + /// Callback invoked as each EP downloads. Parameters are (epName, percentComplete) where percentComplete is 0-100. + /// /// Optional cancellation token. - public async Task DownloadAndRegisterEpsAsync(CancellationToken? ct = null) + /// Result describing which EPs succeeded and which failed. + /// + /// Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + /// After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + /// + public async Task DownloadAndRegisterEpsAsync(Action progressCallback, + CancellationToken? ct = null) { - await Utils.CallWithExceptionHandling(() => DownloadAndRegisterEpsImplAsync(ct), - "Error downloading and registering execution providers.", _logger) - .ConfigureAwait(false); + return await Utils.CallWithExceptionHandling(() => DownloadAndRegisterEpsImplAsync(null, progressCallback, ct), + "Error downloading execution providers.", _logger) + .ConfigureAwait(false); + } + + /// + /// Downloads and registers the specified execution providers, reporting progress. + /// + /// + /// Subset of EP bootstrapper names to download (as returned by ). + /// + /// + /// Callback invoked as each EP downloads. Parameters are (epName, percentComplete) where percentComplete is 0-100. + /// + /// Optional cancellation token. + /// Result describing which EPs succeeded and which failed. + /// + /// Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + /// After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + /// + public async Task DownloadAndRegisterEpsAsync(IEnumerable names, + Action progressCallback, + CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling(() => DownloadAndRegisterEpsImplAsync(names, progressCallback, ct), + "Error downloading execution providers.", _logger) + .ConfigureAwait(false); } private FoundryLocalManager(Configuration configuration, ILogger logger) @@ -197,6 +272,24 @@ private async Task InitializeAsync(CancellationToken? ct = null) return; } + private EpInfo[] DiscoverEpsImpl() + { + var result = _coreInterop!.ExecuteCommand("discover_eps"); + if (result.Error != null) + { + throw new FoundryLocalException($"Error discovering execution providers: {result.Error}", _logger); + } + + var data = result.Data; + if (string.IsNullOrWhiteSpace(data)) + { + return Array.Empty(); + } + + return JsonSerializer.Deserialize(data, JsonSerializationContext.Default.EpInfoArray) + ?? Array.Empty(); + } + private async Task GetCatalogImplAsync(CancellationToken? ct = null) { // create on first use @@ -259,17 +352,78 @@ private async Task StopWebServiceImplAsync(CancellationToken? ct = null) Urls = null; } - private async Task DownloadAndRegisterEpsImplAsync(CancellationToken? ct = null) + private async Task DownloadAndRegisterEpsImplAsync(IEnumerable? names = null, + Action? progressCallback = null, + CancellationToken? ct = null) { - using var disposable = await asyncLock.LockAsync().ConfigureAwait(false); CoreInteropRequest? input = null; - var result = await _coreInterop!.ExecuteCommandAsync("download_and_register_eps", input, ct).ConfigureAwait(false); + if (names != null) + { + var namesList = string.Join(",", names); + if (!string.IsNullOrEmpty(namesList)) + { + input = new CoreInteropRequest + { + Params = new Dictionary { { "Names", namesList } } + }; + } + } + + ICoreInterop.Response result; + + if (progressCallback != null) + { + var callback = new ICoreInterop.CallbackFn(progressString => + { + var sepIndex = progressString.IndexOf('|'); + if (sepIndex >= 0) + { + var name = progressString[..sepIndex]; + if (double.TryParse(progressString[(sepIndex + 1)..], + System.Globalization.NumberStyles.Float, + System.Globalization.CultureInfo.InvariantCulture, + out var percent)) + { + progressCallback(string.IsNullOrEmpty(name) ? "" : name, percent); + } + } + }); + + result = await _coreInterop!.ExecuteCommandWithCallbackAsync("download_and_register_eps", input, + callback, ct).ConfigureAwait(false); + } + else + { + result = await _coreInterop!.ExecuteCommandAsync("download_and_register_eps", input, ct).ConfigureAwait(false); + } + if (result.Error != null) { - throw new FoundryLocalException($"Error downloading and registering execution providers: {result.Error}", _logger); + throw new FoundryLocalException($"Error downloading execution providers: {result.Error}", _logger); } + + EpDownloadResult epResult; + + if (!string.IsNullOrEmpty(result.Data)) + { + epResult = JsonSerializer.Deserialize(result.Data!, JsonSerializationContext.Default.EpDownloadResult) + ?? throw new FoundryLocalException("Failed to deserialize EP download result.", _logger); + } + else + { + epResult = new EpDownloadResult { Success = true, Status = "Completed", RegisteredEps = [], FailedEps = [] }; + } + + // Invalidate the catalog cache if any EP was newly registered so the next access + // re-fetches models with the updated set of available EPs. + if ((epResult.Success || epResult.RegisteredEps.Length > 0) && _catalog != null) + { + _catalog.InvalidateCache(); + } + + return epResult; } protected virtual void Dispose(bool disposing) diff --git a/sdk/cs/src/ICatalog.cs b/sdk/cs/src/ICatalog.cs index b50f8c40..4dca8e7d 100644 --- a/sdk/cs/src/ICatalog.cs +++ b/sdk/cs/src/ICatalog.cs @@ -31,7 +31,7 @@ public interface ICatalog /// /// Lookup a model variant by its unique model id. - /// NOTE: This will return an IModel with a single variant. Use GetModelAsync to get an IModel with all avaialable + /// NOTE: This will return an IModel with a single variant. Use GetModelAsync to get an IModel with all available /// variants. /// /// Model id. diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index bec1cc22..e8a7b755 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -100,8 +100,8 @@ $(FoundryLocalCoreVersion) - 0.9.0-dev-20260325T055840-33ebe7c - 0.9.0-dev-20260325T055742-33ebe7c + 0.9.0-dev-202603310538-f6efa8d3 + 0.9.0-dev-202603310538-f6efa8d3 True diff --git a/sdk/js/README.md b/sdk/js/README.md index 9b08f9ac..9e56ec52 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -34,6 +34,47 @@ When WinML is enabled: > **Note:** The `--winml` flag is only relevant on Windows. On macOS and Linux, the standard installation is used regardless of this flag. +### Explicit EP Management + +You can explicitly discover and download execution providers using the `discoverEps()` and `downloadAndRegisterEps()` methods: + +```typescript +// Discover available EPs and their status +const eps = manager.discoverEps(); +for (const ep of eps) { + console.log(`${ep.name} — registered: ${ep.isRegistered}`); +} + +// Download and register all available EPs +const result = await manager.downloadAndRegisterEps(); +console.log(`Success: ${result.success}, Status: ${result.status}`); + +// Download only specific EPs +const result2 = await manager.downloadAndRegisterEps([eps[0].name]); +``` + +#### Per-EP download progress + +Pass an optional `progressCallback` to receive `(epName, percent)` updates as each EP downloads (`percent` is 0–100): + +```typescript +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') { + process.stdout.write('\n'); + } + currentEp = epName; + } + process.stdout.write(`\r ${epName} ${percent.toFixed(1)}%`); + if (percent >= 100) { + process.stdout.write('\n'); + } +}); +``` + +Catalog access does not block on EP downloads. Call `downloadAndRegisterEps()` when you need hardware-accelerated execution providers. + ## Quick Start ```typescript diff --git a/sdk/js/docs/README.md b/sdk/js/docs/README.md index 5e50e636..0cb39e1b 100644 --- a/sdk/js/docs/README.md +++ b/sdk/js/docs/README.md @@ -153,6 +153,70 @@ object: string; *** +### EpDownloadResult + +Result of an explicit EP download and registration operation. + +#### Properties + +##### failedEps + +```ts +failedEps: string[]; +``` + +Names of EPs that failed to register. + +##### registeredEps + +```ts +registeredEps: string[]; +``` + +Names of EPs that were successfully registered. + +##### status + +```ts +status: string; +``` + +Human-readable status message. + +##### success + +```ts +success: boolean; +``` + +True if all requested EPs were successfully downloaded and registered. + +*** + +### EpInfo + +Describes a discoverable execution provider bootstrapper. + +#### Properties + +##### isRegistered + +```ts +isRegistered: boolean; +``` + +True if this EP has already been successfully downloaded and registered. + +##### name + +```ts +name: string; +``` + +The identifier of the bootstrapper/execution provider (e.g. "CUDAExecutionProvider"). + +*** + ### FoundryLocalConfig Configuration options for the Foundry Local SDK. diff --git a/sdk/js/docs/classes/FoundryLocalManager.md b/sdk/js/docs/classes/FoundryLocalManager.md index dc4908a6..6ca963f7 100644 --- a/sdk/js/docs/classes/FoundryLocalManager.md +++ b/sdk/js/docs/classes/FoundryLocalManager.md @@ -87,26 +87,98 @@ Error - If the web service is not running. *** +### discoverEps() + +```ts +discoverEps(): EpInfo[]; +``` + +Discovers available execution providers (EPs) and their registration status. + +#### Returns + +[`EpInfo`](../README.md#epinfo)[] + +An array of EpInfo describing each available EP. + +*** + ### downloadAndRegisterEps() +#### Call Signature + ```ts -downloadAndRegisterEps(): void; +downloadAndRegisterEps(): Promise; ``` -Download and register execution providers. -Only relevant when using the WinML variant. On non-WinML builds this is a no-op. +Downloads and registers execution providers. -Call this after initialization to trigger EP download before accessing the catalog, -so that hardware-accelerated execution providers (e.g. QNN for NPU) are available -when listing and loading models. +##### Returns -#### Returns +`Promise`\<[`EpDownloadResult`](../README.md#epdownloadresult)\> -`void` +A promise that resolves with an EpDownloadResult describing the outcome. -#### Throws +#### Call Signature + +```ts +downloadAndRegisterEps(names): Promise; +``` + +Downloads and registers execution providers. + +##### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `names` | `string`[] | Array of EP names to download. | + +##### Returns + +`Promise`\<[`EpDownloadResult`](../README.md#epdownloadresult)\> + +A promise that resolves with an EpDownloadResult describing the outcome. + +#### Call Signature + +```ts +downloadAndRegisterEps(progressCallback): Promise; +``` + +Downloads and registers execution providers, reporting progress. + +##### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `progressCallback` | (`epName`, `percent`) => `void` | Callback invoked with (epName, percent) as each EP downloads. Percent is 0-100. | + +##### Returns + +`Promise`\<[`EpDownloadResult`](../README.md#epdownloadresult)\> + +A promise that resolves with an EpDownloadResult describing the outcome. + +#### Call Signature + +```ts +downloadAndRegisterEps(names, progressCallback): Promise; +``` + +Downloads and registers execution providers, reporting progress. + +##### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `names` | `string`[] | Array of EP names to download. | +| `progressCallback` | (`epName`, `percent`) => `void` | Callback invoked with (epName, percent) as each EP downloads. Percent is 0-100. | + +##### Returns + +`Promise`\<[`EpDownloadResult`](../README.md#epdownloadresult)\> -Error - If execution provider download or registration fails. +A promise that resolves with an EpDownloadResult describing the outcome. *** diff --git a/sdk/js/examples/chat-completion.ts b/sdk/js/examples/chat-completion.ts index a9e2d59a..f18b989c 100644 --- a/sdk/js/examples/chat-completion.ts +++ b/sdk/js/examples/chat-completion.ts @@ -18,6 +18,17 @@ async function main() { }); console.log('✓ SDK initialized successfully'); + const availableEps = manager.discoverEps(); + console.log(`\nAvailable execution providers: ${availableEps.map((ep) => ep.name).join(', ')}`); + + console.log('\nDownloading and registering execution providers...'); + const downloadResult = await manager.downloadAndRegisterEps(); + if (downloadResult.success) { + console.log('✓ All execution providers registered successfully'); + } else { + console.log(`⚠️ Some execution providers failed to download and/or register: ${downloadResult.failedEps.join(', ')}`); + } + // Explore available models console.log('\nFetching available models...'); const catalog = manager.catalog; @@ -37,7 +48,7 @@ async function main() { console.log(` - ${cachedModel.alias}`); } - const modelAlias = 'MODEL_ALIAS'; // Replace with a valid model alias from the list above + const modelAlias = 'qwen2.5-0.5b'; // Load the model first console.log(`\nLoading model ${modelAlias}...`); diff --git a/sdk/js/src/catalog.ts b/sdk/js/src/catalog.ts index bf2ae5c9..2efba66a 100644 --- a/sdk/js/src/catalog.ts +++ b/sdk/js/src/catalog.ts @@ -31,6 +31,11 @@ export class Catalog { return this._name; } + /** @internal */ + invalidateCache(): void { + this.lastFetch = 0; + } + private async updateModels(): Promise { // TODO: make this configurable if ((Date.now() - this.lastFetch) < 6 * 60 * 60 * 1000) { // 6 hours diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index 3116faa9..9b723e84 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -188,7 +188,7 @@ export class CoreInterop { } } - public executeCommandStreaming(command: string, params: any, callback: (chunk: string) => void): Promise { + public executeCommandStreaming(command: string, params: any, callback: (chunk: string) => void): Promise { const cmdBuf = koffi.alloc('char', command.length + 1); koffi.encode(cmdBuf, 'char', command, command.length + 1); @@ -202,7 +202,7 @@ export class CoreInterop { callback(chunk); }, koffi.pointer(CallbackType)); - return new Promise((resolve, reject) => { + return new Promise((resolve, reject) => { const req = { Command: koffi.address(cmdBuf), CommandLength: command.length, @@ -226,7 +226,8 @@ export class CoreInterop { const errorMsg = koffi.decode(res.Error, 'char', res.ErrorLength); reject(new Error(`Command '${command}' failed: ${errorMsg}`)); } else { - resolve(); + const responseData = res.Data ? koffi.decode(res.Data, 'char', res.DataLength) : ''; + resolve(responseData); } } finally { // Free the heap-allocated response strings using koffi.free() diff --git a/sdk/js/src/foundryLocalManager.ts b/sdk/js/src/foundryLocalManager.ts index 6da0bcc7..f22acdc0 100644 --- a/sdk/js/src/foundryLocalManager.ts +++ b/sdk/js/src/foundryLocalManager.ts @@ -3,6 +3,7 @@ import { CoreInterop } from './detail/coreInterop.js'; import { ModelLoadManager } from './detail/modelLoadManager.js'; import { Catalog } from './catalog.js'; import { ResponsesClient } from './openai/responsesClient.js'; +import { EpInfo, EpDownloadResult } from './types.js'; /** * The main entry point for the Foundry Local SDK. @@ -61,23 +62,6 @@ export class FoundryLocalManager { return this._urls; } - /** - * Download and register execution providers. - * Only relevant when using the WinML variant. On non-WinML builds this is a no-op. - * - * Call this after initialization to trigger EP download before accessing the catalog, - * so that hardware-accelerated execution providers (e.g. QNN for NPU) are available - * when listing and loading models. - * - * @throws Error - If execution provider download or registration fails. - */ - public downloadAndRegisterEps(): void { - try { - this.coreInterop.executeCommand("download_and_register_eps"); - } catch (error) { - throw new Error(`Error downloading and registering execution providers: ${error}`); - } - } /** * Starts the local web service. @@ -112,6 +96,122 @@ export class FoundryLocalManager { return this._urls.length > 0; } + /** + * Discovers available execution providers (EPs) and their registration status. + * @returns An array of EpInfo describing each available EP. + */ + public discoverEps(): EpInfo[] { + const response = this.coreInterop.executeCommand("discover_eps"); + type RawEpInfo = { + Name: string; + IsRegistered: boolean; + }; + + try { + const raw = JSON.parse(response) as RawEpInfo[]; + return raw.map((ep) => ({ + name: ep.Name, + isRegistered: ep.IsRegistered + })); + } catch (error) { + throw new Error(`Failed to decode JSON response from discover_eps: ${error}. Response was: ${response}`); + } + } + + /** + * Downloads and registers execution providers. + * @returns A promise that resolves with an EpDownloadResult describing the outcome. + */ + public downloadAndRegisterEps(): Promise; + /** + * Downloads and registers execution providers. + * @param names - Array of EP names to download. + * @returns A promise that resolves with an EpDownloadResult describing the outcome. + */ + public downloadAndRegisterEps(names: string[]): Promise; + /** + * Downloads and registers execution providers, reporting progress. + * @param progressCallback - Callback invoked with (epName, percent) as each EP downloads. Percent is 0-100. + * @returns A promise that resolves with an EpDownloadResult describing the outcome. + */ + public downloadAndRegisterEps(progressCallback: (epName: string, percent: number) => void): Promise; + /** + * Downloads and registers execution providers, reporting progress. + * @param names - Array of EP names to download. + * @param progressCallback - Callback invoked with (epName, percent) as each EP downloads. Percent is 0-100. + * @returns A promise that resolves with an EpDownloadResult describing the outcome. + */ + public downloadAndRegisterEps(names: string[], progressCallback: (epName: string, percent: number) => void): Promise; + public async downloadAndRegisterEps( + namesOrCallback?: string[] | ((epName: string, percent: number) => void), + progressCallback?: (epName: string, percent: number) => void + ): Promise { + let names: string[] | undefined; + if (typeof namesOrCallback === 'function') { + progressCallback = namesOrCallback; + } else { + names = namesOrCallback; + } + + const params: { Params?: { Names: string } } = {}; + if (names && names.length > 0) { + params.Params = { Names: names.join(",") }; + } + + type RawEpDownloadResult = { + Success: boolean; + Status: string; + RegisteredEps: string[]; + FailedEps: string[]; + }; + + let response: string; + + if (progressCallback) { + response = await this.coreInterop.executeCommandStreaming( + "download_and_register_eps", + Object.keys(params).length > 0 ? params : undefined, + (chunk: string) => { + const sepIndex = chunk.indexOf('|'); + if (sepIndex >= 0) { + const epName = chunk.substring(0, sepIndex); + const percent = parseFloat(chunk.substring(sepIndex + 1)); + if (!isNaN(percent)) { + progressCallback(epName || '', percent); + } + } + } + ); + } else { + response = await this.coreInterop.executeCommandStreaming( + "download_and_register_eps", + Object.keys(params).length > 0 ? params : undefined, + () => {} // no-op callback + ); + } + + let epResult: EpDownloadResult; + try { + const raw = JSON.parse(response) as RawEpDownloadResult; + epResult = { + success: raw.Success, + status: raw.Status, + registeredEps: raw.RegisteredEps, + failedEps: raw.FailedEps + }; + } catch (error) { + throw new Error(`Failed to decode JSON response from download_and_register_eps: ${error}. Response was: ${response}`); + } + + // Invalidate the catalog cache if any EP was newly registered so the next access + // re-fetches models with the updated set of available EPs. + if (epResult.success || epResult.registeredEps.length > 0) { + this._catalog.invalidateCache(); + } + + return epResult; + } + /** * Creates a ResponsesClient for interacting with the Responses API. * The web service must be started first via `startWebService()`. diff --git a/sdk/js/src/types.ts b/sdk/js/src/types.ts index 40a9110b..521ae34b 100644 --- a/sdk/js/src/types.ts +++ b/sdk/js/src/types.ts @@ -67,6 +67,30 @@ export interface ToolChoice { name?: string; } +// ============================================================================ +// Execution Provider Types +// ============================================================================ + +/** Describes a discoverable execution provider bootstrapper. */ +export interface EpInfo { + /** The identifier of the bootstrapper/execution provider (e.g. "CUDAExecutionProvider"). */ + name: string; + /** True if this EP has already been successfully downloaded and registered. */ + isRegistered: boolean; +} + +/** Result of an explicit EP download and registration operation. */ +export interface EpDownloadResult { + /** True if all requested EPs were successfully downloaded and registered. */ + success: boolean; + /** Human-readable status message. */ + status: string; + /** Names of EPs that were successfully registered. */ + registeredEps: string[]; + /** Names of EPs that failed to register. */ + failedEps: string[]; +} + // ============================================================================ // Responses API Types // Aligned with OpenAI Responses API / OpenResponses spec and diff --git a/sdk/js/test/foundryLocalManager.test.ts b/sdk/js/test/foundryLocalManager.test.ts index 5ab40043..48adcff4 100644 --- a/sdk/js/test/foundryLocalManager.test.ts +++ b/sdk/js/test/foundryLocalManager.test.ts @@ -16,4 +16,66 @@ describe('Foundry Local Manager Tests', () => { // We don't assert the exact name as it might change, but we ensure it exists expect(catalog.name).to.be.a('string'); }); + + it('downloadAndRegisterEps should call command without params when names are omitted', async function() { + const manager = getTestManager() as any; + const calls: unknown[][] = []; + const originalExecuteCommandStreaming = manager.coreInterop.executeCommandStreaming; + + manager.coreInterop.executeCommandStreaming = (...args: unknown[]) => { + calls.push(args); + return Promise.resolve(JSON.stringify({ + Success: true, + Status: 'All providers registered', + RegisteredEps: ['CUDAExecutionProvider'], + FailedEps: [] + })); + }; + + try { + const result = await manager.downloadAndRegisterEps(); + expect(calls.length).to.equal(1); + expect(calls[0][0]).to.equal('download_and_register_eps'); + expect(calls[0][1]).to.be.undefined; + expect(result).to.deep.equal({ + success: true, + status: 'All providers registered', + registeredEps: ['CUDAExecutionProvider'], + failedEps: [] + }); + } finally { + manager.coreInterop.executeCommandStreaming = originalExecuteCommandStreaming; + } + }); + + it('downloadAndRegisterEps should send Names param when subset is provided', async function() { + const manager = getTestManager() as any; + const calls: unknown[][] = []; + const originalExecuteCommandStreaming = manager.coreInterop.executeCommandStreaming; + + manager.coreInterop.executeCommandStreaming = (...args: unknown[]) => { + calls.push(args); + return Promise.resolve(JSON.stringify({ + Success: false, + Status: 'Some providers failed', + RegisteredEps: ['CUDAExecutionProvider'], + FailedEps: ['OpenVINOExecutionProvider'] + })); + }; + + try { + const result = await manager.downloadAndRegisterEps(['CUDAExecutionProvider', 'OpenVINOExecutionProvider']); + expect(calls.length).to.equal(1); + expect(calls[0][0]).to.equal('download_and_register_eps'); + expect(calls[0][1]).to.deep.equal({ Params: { Names: 'CUDAExecutionProvider,OpenVINOExecutionProvider' } }); + expect(result).to.deep.equal({ + success: false, + status: 'Some providers failed', + registeredEps: ['CUDAExecutionProvider'], + failedEps: ['OpenVINOExecutionProvider'] + }); + } finally { + manager.coreInterop.executeCommandStreaming = originalExecuteCommandStreaming; + } + }); }); diff --git a/sdk/python/README.md b/sdk/python/README.md index ace19bac..4c1fb84a 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -18,7 +18,7 @@ Two package variants are published — choose the one that matches your target h | Variant | Package | Native backends | |---|---|---| -| Standard (cross-platform) | `foundry-local-sdk` | CPU / DirectML / CUDA | +| Standard (cross-platform) | `foundry-local-sdk` | CPU / WebGPU / CUDA | | WinML (Windows only) | `foundry-local-sdk-winml` | Windows ML + all standard backends | ```bash @@ -70,6 +70,46 @@ foundry-local-install --winml --verbose > **Note:** The standard and WinML native packages use different PyPI package names (`foundry-local-core` vs `foundry-local-core-winml`) so they can coexist in the same pip index, but they should not be installed in the same Python environment simultaneously. +## Explicit EP Management + +You can explicitly discover and download execution providers (EPs): + +```python +# Discover available EPs and registration status +eps = manager.discover_eps() +for ep in eps: + print(f"{ep.name} - registered: {ep.is_registered}") + +# Download and register all available EPs +result = manager.download_and_register_eps() +print(f"Success: {result.success}, Status: {result.status}") + +# Download only specific EPs +result2 = manager.download_and_register_eps([eps[0].name]) +``` + +### Per-EP download progress + +Pass a `progress_callback` to receive `(ep_name, percent)` updates as each EP downloads (`percent` is 0–100): + +```python +current_ep = "" + +def on_progress(ep_name: str, percent: float) -> None: + global current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name} {percent:5.1f}%", end="", flush=True) + if percent >= 100: + print() + +manager.download_and_register_eps(progress_callback=on_progress) +``` + +Catalog access does not block on EP downloads. Call `download_and_register_eps()` when you need hardware-accelerated execution providers. + ## Quick Start ```python @@ -225,6 +265,8 @@ manager.stop_web_service() |---|---| | `Configuration` | SDK configuration (app name, cache dir, log level, web service settings) | | `FoundryLocalManager` | Singleton entry point – initialization, catalog access, web service | +| `EpInfo` | Discoverable execution provider info (`name`, `is_registered`) | +| `EpDownloadResult` | Result of EP download/registration (`success`, `status`, `registered_eps`, `failed_eps`) | | `Catalog` | Model discovery – listing, lookup by alias/ID, cached/loaded queries | | `Model` | Groups variants under one alias – select, load, unload, create clients | | `ModelVariant` | Specific model variant – download, cache, load/unload, create clients | diff --git a/sdk/python/examples/chat_completion.py b/sdk/python/examples/chat_completion.py index 60eefd5e..c0c58048 100644 --- a/sdk/python/examples/chat_completion.py +++ b/sdk/python/examples/chat_completion.py @@ -19,6 +19,15 @@ def main(): FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance + # Discover available EPs and register them explicitly when needed. + eps = manager.discover_eps() + print("Available execution providers:") + for ep in eps: + print(f" - {ep.name} (registered: {ep.is_registered})") + + ep_result = manager.download_and_register_eps() + print(f"EP registration success: {ep_result.success} ({ep_result.status})") + # 2. Print available models in the catalog and cache models = manager.catalog.list_models() print("Available models in catalog:") diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index 0fb9f9c2..9a3990b7 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -2,6 +2,6 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. -foundry-local-core-winml +foundry-local-core-winml==0.9.0.dev20260331004032 onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 \ No newline at end of file diff --git a/sdk/python/src/catalog.py b/sdk/python/src/catalog.py index 767a9f08..afccd85b 100644 --- a/sdk/python/src/catalog.py +++ b/sdk/python/src/catalog.py @@ -80,8 +80,12 @@ def _update_models(self): self._model_id_to_model_variant[variant.id] = variant - self._last_fetch = datetime.datetime.now() self._models = models + self._last_fetch = datetime.datetime.now() + + def _invalidate_cache(self): + with self._lock: + self._last_fetch = datetime.datetime.min def list_models(self) -> List[Model]: """ diff --git a/sdk/python/src/ep_types.py b/sdk/python/src/ep_types.py new file mode 100644 index 00000000..42d84acf --- /dev/null +++ b/sdk/python/src/ep_types.py @@ -0,0 +1,24 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from typing import List + +from pydantic import BaseModel, Field + + +class EpInfo(BaseModel): + """Metadata describing a discoverable execution provider (EP).""" + + name: str = Field(alias="Name") + is_registered: bool = Field(alias="IsRegistered") + + +class EpDownloadResult(BaseModel): + """Result of an explicit EP download and registration operation.""" + + success: bool = Field(alias="Success") + status: str = Field(alias="Status") + registered_eps: List[str] = Field(alias="RegisteredEps") + failed_eps: List[str] = Field(alias="FailedEps") diff --git a/sdk/python/src/foundry_local_manager.py b/sdk/python/src/foundry_local_manager.py index 4c02a127..a649f8e5 100644 --- a/sdk/python/src/foundry_local_manager.py +++ b/sdk/python/src/foundry_local_manager.py @@ -9,10 +9,15 @@ import logging import threading +from typing import Callable, List, Optional + +from pydantic import TypeAdapter + from .catalog import Catalog from .configuration import Configuration +from .ep_types import EpDownloadResult, EpInfo from .logging_helper import set_default_logger_severity -from .detail.core_interop import CoreInterop +from .detail.core_interop import CoreInterop, InteropRequest from .detail.model_load_manager import ModelLoadManager from .exception import FoundryLocalException @@ -71,17 +76,90 @@ def _initialize(self): self._model_load_manager = ModelLoadManager(self._core_interop, external_service_url) self.catalog = Catalog(self._model_load_manager, self._core_interop) - def download_and_register_eps(self) -> None: - """Download and register execution providers. - Only relevant when using WinML. + def discover_eps(self) -> list[EpInfo]: + """Discover available execution providers and their registration status. + + Returns: + List of ``EpInfo`` entries for all discoverable EPs. Raises: - FoundryLocalException: If execution provider download or registration fails. + FoundryLocalException: If EP discovery fails or response JSON is invalid. """ - result = self._core_interop.execute_command("download_and_register_eps") + response = self._core_interop.execute_command("discover_eps") + if response.error is not None: + raise FoundryLocalException(f"Error discovering execution providers: {response.error}") + + try: + adapter = TypeAdapter(List[EpInfo]) + return adapter.validate_json(response.data or "[]") + except Exception as e: + raise FoundryLocalException( + f"Failed to decode JSON response from discover_eps: {e}. Response was: {response.data}" + ) from e + + def download_and_register_eps( + self, + names: Optional[list[str]] = None, + progress_callback: Optional[Callable[[str, float], None]] = None, + ) -> EpDownloadResult: + """Download and register execution providers. - if result.error is not None: - raise FoundryLocalException(f"Error downloading and registering execution providers: {result.error}") + Args: + names: Optional subset of EP names to download. If omitted or empty, + all discoverable EPs are downloaded. + progress_callback: Optional callback ``(ep_name: str, percent: float) -> None`` + invoked as each EP downloads. ``percent`` is 0-100. + + Returns: + ``EpDownloadResult`` describing operation status and per-EP outcomes. + + Raises: + FoundryLocalException: If the operation fails or response JSON is invalid. + """ + request = None + if names is not None and len(names) > 0: + request = InteropRequest(params={"Names": ",".join(names)}) + + if progress_callback is not None: + def _on_chunk(chunk: str) -> None: + sep = chunk.find("|") + if sep >= 0: + ep_name = chunk[:sep] or "" + try: + percent = float(chunk[sep + 1:]) + progress_callback(ep_name, percent) + except ValueError: + pass + + response = self._core_interop.execute_command_with_callback( + "download_and_register_eps", request, _on_chunk + ) + else: + response = self._core_interop.execute_command("download_and_register_eps", request) + + if response.error is not None: + raise FoundryLocalException(f"Error downloading execution providers: {response.error}") + + if response.data: + try: + adapter = TypeAdapter(EpDownloadResult) + ep_result = adapter.validate_json(response.data) + except Exception as e: + raise FoundryLocalException( + "Failed to decode JSON response from download_and_register_eps: " + f"{e}. Response was: {response.data}" + ) from e + else: + ep_result = EpDownloadResult( + Success=True, Status="Completed", RegisteredEps=[], FailedEps=[] + ) + + # Invalidate the catalog cache if any EP was newly registered so the next access + # re-fetches models with the updated set of available EPs. + if ep_result.success or len(ep_result.registered_eps) > 0: + self.catalog._invalidate_cache() + + return ep_result def start_web_service(self): """Start the optional web service. diff --git a/sdk/python/test/test_foundry_local_manager.py b/sdk/python/test/test_foundry_local_manager.py index b0a9c4e2..31528891 100644 --- a/sdk/python/test/test_foundry_local_manager.py +++ b/sdk/python/test/test_foundry_local_manager.py @@ -7,6 +7,22 @@ from __future__ import annotations +class _Response: + def __init__(self, data=None, error=None): + self.data = data + self.error = error + + +class _FakeCoreInterop: + def __init__(self, responses): + self._responses = responses + self.calls = [] + + def execute_command(self, command_name, command_input=None): + self.calls.append((command_name, command_input)) + return self._responses[command_name] + + class TestFoundryLocalManager: """Foundry Local Manager Tests.""" @@ -20,3 +36,48 @@ def test_should_return_catalog(self, manager): assert catalog is not None assert isinstance(catalog.name, str) assert len(catalog.name) > 0 + + def test_discover_eps_returns_ep_info(self, manager): + original_core = manager._core_interop + manager._core_interop = _FakeCoreInterop( + { + "discover_eps": _Response( + data='[{"Name":"CUDAExecutionProvider","IsRegistered":true}]', + error=None, + ) + } + ) + + try: + eps = manager.discover_eps() + finally: + manager._core_interop = original_core + + assert isinstance(eps, list) + assert len(eps) == 1 + assert eps[0].name == "CUDAExecutionProvider" + assert eps[0].is_registered is True + + def test_download_and_register_eps_returns_result(self, manager): + original_core = manager._core_interop + manager._core_interop = _FakeCoreInterop( + { + "download_and_register_eps": _Response( + data=( + '{"Success":true,"Status":"ok",' + '"RegisteredEps":["CUDAExecutionProvider"],"FailedEps":[]}' + ), + error=None, + ) + } + ) + + try: + result = manager.download_and_register_eps(["CUDAExecutionProvider"]) + finally: + manager._core_interop = original_core + + assert result.success is True + assert result.status == "ok" + assert result.registered_eps == ["CUDAExecutionProvider"] + assert result.failed_eps == [] diff --git a/sdk/rust/README.md b/sdk/rust/README.md index d76a7589..aa848b03 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -60,6 +60,56 @@ foundry-local-sdk = { version = "0.1", features = ["winml"] } > **Note:** The `winml` feature is only relevant on Windows. On macOS and Linux, the standard build is used regardless. No code changes are needed — your application code stays the same. +### Explicit EP Management + +You can explicitly discover and download execution providers: + +```rust +use foundry_local_sdk::{FoundryLocalConfig, FoundryLocalManager}; + +let manager = FoundryLocalManager::create(FoundryLocalConfig::new("my_app"))?; + +// Discover available EPs and their status +let eps = manager.discover_eps()?; +for ep in &eps { + println!("{} — registered: {}", ep.name, ep.is_registered); +} + +// Download and register all available EPs +let result = manager.download_and_register_eps(None).await?; +println!("Success: {}, Status: {}", result.success, result.status); + +// Download only specific EPs +let result = manager.download_and_register_eps(Some(&[eps[0].name.as_str()])).await?; +``` + +#### Per-EP download progress + +Use `download_and_register_eps_with_progress` to receive typed `(ep_name, percent)` updates +as each EP downloads (`percent` is 0.0–100.0): + +```rust +use std::sync::{Arc, Mutex}; + +let current_ep = Arc::new(Mutex::new(String::new())); +let ep = Arc::clone(¤t_ep); +manager.download_and_register_eps_with_progress(None, move |ep_name: &str, percent: f64| { + let mut current = ep.lock().unwrap(); + if ep_name != current.as_str() { + if !current.is_empty() { + println!(); + } + *current = ep_name.to_string(); + } + print!("\r {} {:5.1}%", ep_name, percent); + if percent >= 100.0 { + println!(); + } +}).await?; +``` + +Catalog access does not block on EP downloads. Call `download_and_register_eps` when you need hardware-accelerated execution providers. + ## Quick Start ```rust diff --git a/sdk/rust/src/catalog.rs b/sdk/rust/src/catalog.rs index 9e04c943..d9d5bb51 100644 --- a/sdk/rust/src/catalog.rs +++ b/sdk/rust/src/catalog.rs @@ -87,6 +87,11 @@ impl Catalog { &self.name } + /// Invalidate the catalog cache so the next access re-fetches models. + pub(crate) fn invalidate_cache(&self) { + self.invalidator.invalidate(); + } + /// Refresh the catalog from the native core if the cache has expired or /// has been explicitly invalidated (e.g. after a download or removal). pub async fn update_models(&self) -> Result<()> { diff --git a/sdk/rust/src/foundry_local_manager.rs b/sdk/rust/src/foundry_local_manager.rs index 9cf2477f..0c22ef15 100644 --- a/sdk/rust/src/foundry_local_manager.rs +++ b/sdk/rust/src/foundry_local_manager.rs @@ -13,6 +13,7 @@ use crate::configuration::{Configuration, FoundryLocalConfig, Logger}; use crate::detail::core_interop::CoreInterop; use crate::detail::ModelLoadManager; use crate::error::{FoundryLocalError, Result}; +use crate::types::{EpDownloadResult, EpInfo}; /// Global singleton holder — only stores a successfully initialised manager. static INSTANCE: OnceLock = OnceLock::new(); @@ -134,17 +135,92 @@ impl FoundryLocalManager { Ok(()) } + /// Discover available execution providers and their registration status. + pub fn discover_eps(&self) -> Result> { + let raw = self.core.execute_command("discover_eps", None)?; + let eps: Vec = serde_json::from_str(&raw)?; + Ok(eps) + } + /// Download and register execution providers. /// - /// Only relevant when using the WinML variant. On non-WinML builds this - /// is a no-op. Call this after initialisation to trigger EP download - /// before accessing the catalog, so that hardware-accelerated execution - /// providers (e.g. QNN for NPU) are available when listing and loading - /// models. - pub async fn download_and_register_eps(&self) -> Result<()> { - self.core - .execute_command_async("download_and_register_eps".into(), None) - .await?; - Ok(()) + /// If `names` is `None` or empty, all available EPs are downloaded. + /// Otherwise only the named EPs are downloaded and registered. + pub async fn download_and_register_eps( + &self, + names: Option<&[&str]>, + ) -> Result { + self.download_and_register_eps_impl(names, None::) + .await + } + + /// Download and register execution providers, reporting per-EP progress. + /// + /// If `names` is `None` or empty, all available EPs are downloaded. + /// Otherwise only the named EPs are downloaded and registered. + /// + /// `progress_callback` receives `(ep_name, percent)` where `percent` + /// ranges from 0.0 to 100.0 as each EP downloads. + pub async fn download_and_register_eps_with_progress( + &self, + names: Option<&[&str]>, + progress_callback: F, + ) -> Result + where + F: FnMut(&str, f64) + Send + 'static, + { + self.download_and_register_eps_impl(names, Some(progress_callback)) + .await + } + + async fn download_and_register_eps_impl( + &self, + names: Option<&[&str]>, + progress_callback: Option, + ) -> Result + where + F: FnMut(&str, f64) + Send + 'static, + { + let params = match names { + Some(n) if !n.is_empty() => Some(json!({ "Params": { "Names": n.join(",") } })), + _ => None, + }; + + let raw = match progress_callback { + Some(cb) => { + let mut callback = cb; + let wrapper = move |chunk: &str| { + if let Some(sep) = chunk.find('|') { + let name = &chunk[..sep]; + if let Ok(percent) = chunk[sep + 1..].parse::() { + callback(if name.is_empty() { "" } else { name }, percent); + } + } + }; + + self.core + .execute_command_streaming_async( + "download_and_register_eps".into(), + params, + wrapper, + ) + .await? + } + None => { + self.core + .execute_command_async("download_and_register_eps".into(), params) + .await? + } + }; + + let result: EpDownloadResult = serde_json::from_str(&raw)?; + + // Invalidate the catalog cache if any EP was newly registered so the next + // access re-fetches models with the updated set of available EPs. + if result.success || !result.registered_eps.is_empty() { + self.catalog.invalidate_cache(); + } + + Ok(result) } } diff --git a/sdk/rust/src/lib.rs b/sdk/rust/src/lib.rs index c6d6e6c4..c12feef1 100644 --- a/sdk/rust/src/lib.rs +++ b/sdk/rust/src/lib.rs @@ -20,8 +20,8 @@ pub use self::foundry_local_manager::FoundryLocalManager; pub use self::model::Model; pub use self::model_variant::ModelVariant; pub use self::types::{ - ChatResponseFormat, ChatToolChoice, DeviceType, ModelInfo, ModelSettings, Parameter, - PromptTemplate, Runtime, + ChatResponseFormat, ChatToolChoice, DeviceType, EpDownloadResult, EpInfo, ModelInfo, + ModelSettings, Parameter, PromptTemplate, Runtime, }; // Re-export OpenAI request types so callers can construct typed messages. diff --git a/sdk/rust/src/types.rs b/sdk/rust/src/types.rs index bab2f9c8..28b37ed2 100644 --- a/sdk/rust/src/types.rs +++ b/sdk/rust/src/types.rs @@ -125,3 +125,27 @@ pub enum ChatToolChoice { /// Model must call the named function. Function(String), } + +/// Information about an available execution provider bootstrapper. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct EpInfo { + /// The name of the execution provider. + pub name: String, + /// Whether this EP is currently registered and ready for use. + pub is_registered: bool, +} + +/// Result of a download-and-register execution-provider operation. +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct EpDownloadResult { + /// Whether all requested EPs were successfully registered. + pub success: bool, + /// Human-readable status message. + pub status: String, + /// Names of EPs that were successfully registered. + pub registered_eps: Vec, + /// Names of EPs that failed to register. + pub failed_eps: Vec, +} diff --git a/www/src/routes/models/service.ts b/www/src/routes/models/service.ts index de49a539..75e2901c 100644 --- a/www/src/routes/models/service.ts +++ b/www/src/routes/models/service.ts @@ -188,7 +188,6 @@ export class FoundryModelService { device: 'GPU', executionProviders: [ 'CUDAExecutionProvider', // NVIDIA CUDA - 'DmlExecutionProvider', // DirectML (Windows) 'TensorrtExecutionProvider', // NVIDIA TensorRT 'NvTensorRTRTXExecutionProvider', // NVIDIA TensorRT RTX (TRTRTX) 'WebGpuExecutionProvider', // WebGPU From e99ff06794f52e85d2ee70078b97f3e6f2c98d98 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Wed, 1 Apr 2026 21:53:55 -0700 Subject: [PATCH 17/83] Propagate IModel API changes across Python, JS, Rust SDKs and update C# docs (#565) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mirrors the C# changes from #556 across all language bindings: public APIs use the `IModel` interface instead of concrete `Model`/`ModelVariant` types, `GetLatestVersion` moves from `Model` to `Catalog`, and `ModelVariant` becomes an implementation detail. ### IModel interface extended (Python, JS) - Added `info`, `variants`, `selected_variant`/`selectedVariant`, `select_variant`/`selectVariant` to the abstract interface - `ModelVariant` implements these as self-referential (`variants=[self]`, `selected_variant=self`, `select_variant` throws) ```python # Python - IModel now exposes variant info model = catalog.get_model("qwen2.5-0.5b") for v in model.variants: # List[IModel], not List[ModelVariant] print(v.info.name, v.id) model.select_variant(v) # takes IModel, not ModelVariant latest = catalog.get_latest_version(model) # moved from Model to Catalog ``` ### Catalog return types changed (Python, JS) - `list_models()` → `List[IModel]` (was `List[Model]`) - `get_model()` → `Optional[IModel]` (was `Optional[Model]`) - `get_model_variant()` → `Optional[IModel]` (was `Optional[ModelVariant]`) - `get_cached_models()` / `get_loaded_models()` → `List[IModel]` (was `List[ModelVariant]`) ### `get_latest_version` added to Catalog (Python, JS, Rust) Moved from `Model` to `Catalog` since `ModelVariant` lacks sufficient context to implement it. Takes any `IModel` and resolves the latest version by name matching against the variant list. ### Rust SDK - Added `Model::info()` (delegates to selected variant) - Added `Catalog::get_latest_version(&self, model: &Arc) -> Result>` ### C# docs and samples updated - README, API docs (`ICatalog`, `IModel`, `Model`, `ModelVariant`) updated to reflect `IModel` return types - `ModelVariant` docs marked as internal - Samples updated to avoid direct `ModelVariant` type references - `GetLatestVersionAsync` added to `ICatalog` docs --------- Co-authored-by: Baiju Meswani Co-authored-by: Nenad Banfic <46795300+nenad1002@users.noreply.github.com> --- sdk/cs/README.md | 8 +- sdk/js/README.md | 5 +- sdk/js/docs/README.md | 50 ++- sdk/js/docs/classes/Catalog.md | 55 ++- sdk/js/docs/classes/Model.md | 65 +-- sdk/js/docs/classes/ModelVariant.md | 397 ------------------- sdk/js/src/catalog.ts | 60 ++- sdk/js/src/{ => detail}/model.ts | 47 +-- sdk/js/src/{ => detail}/modelVariant.ts | 38 +- sdk/js/src/imodel.ts | 15 + sdk/js/src/index.ts | 6 +- sdk/js/test/catalog.test.ts | 95 +++++ sdk/js/test/model.test.ts | 7 +- sdk/js/test/openai/responsesClient.test.ts | 4 +- sdk/python/README.md | 7 +- sdk/python/src/catalog.py | 59 ++- sdk/python/src/{ => detail}/model.py | 61 ++- sdk/python/src/detail/model_data_types.py | 28 +- sdk/python/src/{ => detail}/model_variant.py | 35 +- sdk/python/src/imodel.py | 26 +- sdk/python/test/test_catalog.py | 93 +++++ sdk/rust/README.md | 10 +- sdk/rust/examples/tool_calling.rs | 2 +- sdk/rust/src/catalog.rs | 56 ++- sdk/rust/src/detail/mod.rs | 2 + sdk/rust/src/detail/model.rs | 300 ++++++++++++++ sdk/rust/src/{ => detail}/model_variant.rs | 67 ++-- sdk/rust/src/lib.rs | 5 +- sdk/rust/src/model.rs | 183 --------- sdk/rust/tests/integration/model_test.rs | 9 +- 30 files changed, 967 insertions(+), 828 deletions(-) delete mode 100644 sdk/js/docs/classes/ModelVariant.md rename sdk/js/src/{ => detail}/model.ts (78%) rename sdk/js/src/{ => detail}/modelVariant.ts (82%) rename sdk/python/src/{ => detail}/model.py (71%) rename sdk/python/src/{ => detail}/model_variant.py (84%) create mode 100644 sdk/rust/src/detail/model.rs rename sdk/rust/src/{ => detail}/model_variant.rs (63%) delete mode 100644 sdk/rust/src/model.rs diff --git a/sdk/cs/README.md b/sdk/cs/README.md index ad6f477a..26287217 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -181,11 +181,11 @@ var loaded = await catalog.GetLoadedModelsAsync(); ### Model Lifecycle -Each `Model` wraps one or more `ModelVariant` entries (different quantizations, hardware targets). The SDK auto-selects the best variant, or you can pick one: +Each model may have multiple variants (different quantizations, hardware targets). The SDK auto-selects the best variant, or you can pick one. All models implement the `IModel` interface. ```csharp // Check and select variants -Console.WriteLine($"Selected: {model.SelectedVariant.Id}"); +Console.WriteLine($"Selected: {model.Id}"); foreach (var v in model.Variants) Console.WriteLine($" {v.Id} (cached: {await v.IsCachedAsync()})"); @@ -389,8 +389,8 @@ Key types: | [`FoundryLocalManager`](./docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md) | Singleton entry point — create, catalog, web service | | [`Configuration`](./docs/api/microsoft.ai.foundry.local.configuration.md) | Initialization settings | | [`ICatalog`](./docs/api/microsoft.ai.foundry.local.icatalog.md) | Model catalog interface | -| [`Model`](./docs/api/microsoft.ai.foundry.local.model.md) | Model with variant selection | -| [`ModelVariant`](./docs/api/microsoft.ai.foundry.local.modelvariant.md) | Specific model variant (hardware/quantization) | +| [`IModel`](./docs/api/microsoft.ai.foundry.local.imodel.md) | Model interface — identity, metadata, lifecycle, variant selection | +| [`Model`](./docs/api/microsoft.ai.foundry.local.model.md) | Model with variant selection (implements `IModel`) | | [`OpenAIChatClient`](./docs/api/microsoft.ai.foundry.local.openaichatclient.md) | Chat completions (sync + streaming) | | [`OpenAIAudioClient`](./docs/api/microsoft.ai.foundry.local.openaiaudioclient.md) | Audio transcription (sync + streaming) | | [`LiveAudioTranscriptionSession`](./docs/api/microsoft.ai.foundry.local.openai.liveaudiotranscriptionsession.md) | Real-time audio streaming session | diff --git a/sdk/js/README.md b/sdk/js/README.md index 9e56ec52..5590ab12 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -148,7 +148,7 @@ const loaded = await catalog.getLoadedModels(); ### Loading and Running Models -Each `Model` can have multiple variants (different quantizations or formats). The SDK automatically selects the best available variant, preferring cached versions. +Each model can have multiple variants (different quantizations or formats). The SDK automatically selects the best available variant, preferring cached versions. All models implement the `IModel` interface. ```typescript const model = await catalog.getModel('qwen2.5-0.5b'); @@ -259,8 +259,7 @@ Auto-generated class documentation lives in [`docs/classes/`](docs/classes/): - [FoundryLocalManager](docs/classes/FoundryLocalManager.md) — SDK entry point, web service management - [Catalog](docs/classes/Catalog.md) — Model discovery and browsing -- [Model](docs/classes/Model.md) — High-level model with variant selection -- [ModelVariant](docs/classes/ModelVariant.md) — Specific model variant: download, load, inference +- [IModel](docs/README.md#imodel) — Model interface: variant selection, download, load, inference - [ChatClient](docs/classes/ChatClient.md) — Chat completions (sync and streaming) - [AudioClient](docs/classes/AudioClient.md) — Audio transcription (sync and streaming) - [ModelLoadManager](docs/classes/ModelLoadManager.md) — Low-level model loading management diff --git a/sdk/js/docs/README.md b/sdk/js/docs/README.md index 0cb39e1b..b0167b4d 100644 --- a/sdk/js/docs/README.md +++ b/sdk/js/docs/README.md @@ -23,7 +23,6 @@ - [FoundryLocalManager](classes/FoundryLocalManager.md) - [Model](classes/Model.md) - [ModelLoadManager](classes/ModelLoadManager.md) -- [ModelVariant](classes/ModelVariant.md) - [ResponsesClient](classes/ResponsesClient.md) - [ResponsesClientSettings](classes/ResponsesClientSettings.md) @@ -562,6 +561,18 @@ get id(): string; `string` +##### info + +###### Get Signature + +```ts +get info(): ModelInfo; +``` + +###### Returns + +[`ModelInfo`](#modelinfo) + ##### inputModalities ###### Get Signature @@ -622,6 +633,20 @@ get supportsToolCalling(): boolean | null; `boolean` \| `null` +##### variants + +###### Get Signature + +```ts +get variants(): IModel[]; +``` + +Variants of the model that are available. Variants of the model are optimized for different devices. + +###### Returns + +[`IModel`](#imodel)[] + #### Methods ##### createAudioClient() @@ -710,6 +735,29 @@ removeFromCache(): void; `void` +##### selectVariant() + +```ts +selectVariant(variant): void; +``` + +Select a model variant from variants to use for IModel operations. +An IModel from `variants` can also be used directly. + +###### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `variant` | [`IModel`](#imodel) | Model variant to select. Must be one of the variants in `variants`. | + +###### Returns + +`void` + +###### Throws + +Error if variant is not valid for this model. + ##### unload() ```ts diff --git a/sdk/js/docs/classes/Catalog.md b/sdk/js/docs/classes/Catalog.md index 23f7cff3..78ce821c 100644 --- a/sdk/js/docs/classes/Catalog.md +++ b/sdk/js/docs/classes/Catalog.md @@ -47,7 +47,7 @@ The name of the catalog. ### getCachedModels() ```ts -getCachedModels(): Promise; +getCachedModels(): Promise; ``` Retrieves a list of all locally cached model variants. @@ -55,16 +55,39 @@ This method is asynchronous as it may involve file I/O or querying the underlyin #### Returns -`Promise`\<[`ModelVariant`](ModelVariant.md)[]\> +`Promise`\<[`IModel`](../README.md#imodel)[]\> -A Promise that resolves to an array of cached ModelVariant objects. +A Promise that resolves to an array of cached IModel objects. + +*** + +### getLatestVersion() + +```ts +getLatestVersion(modelOrModelVariant): Promise; +``` + +Get the latest version of a model. +This is used to check if a newer version of a model is available in the catalog for download. + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `modelOrModelVariant` | [`IModel`](../README.md#imodel) | The model to check for the latest version. | + +#### Returns + +`Promise`\<[`IModel`](../README.md#imodel)\> + +The latest version of the model. Will match the input if it is the latest version. *** ### getLoadedModels() ```ts -getLoadedModels(): Promise; +getLoadedModels(): Promise; ``` Retrieves a list of all currently loaded model variants. @@ -73,16 +96,16 @@ the underlying core or an external service, which can be an I/O bound operation. #### Returns -`Promise`\<[`ModelVariant`](ModelVariant.md)[]\> +`Promise`\<[`IModel`](../README.md#imodel)[]\> -A Promise that resolves to an array of loaded ModelVariant objects. +A Promise that resolves to an array of loaded IModel objects. *** ### getModel() ```ts -getModel(alias): Promise; +getModel(alias): Promise; ``` Retrieves a model by its alias. @@ -96,9 +119,9 @@ This method is asynchronous as it may ensure the catalog is up-to-date by fetchi #### Returns -`Promise`\<[`Model`](Model.md)\> +`Promise`\<[`IModel`](../README.md#imodel)\> -A Promise that resolves to the Model object if found, otherwise throws an error. +A Promise that resolves to the IModel object if found, otherwise throws an error. #### Throws @@ -109,7 +132,7 @@ Error - If alias is null, undefined, or empty. ### getModels() ```ts -getModels(): Promise; +getModels(): Promise; ``` Lists all available models in the catalog. @@ -117,19 +140,21 @@ This method is asynchronous as it may fetch the model list from a remote service #### Returns -`Promise`\<[`Model`](Model.md)[]\> +`Promise`\<[`IModel`](../README.md#imodel)[]\> -A Promise that resolves to an array of Model objects. +A Promise that resolves to an array of IModel objects. *** ### getModelVariant() ```ts -getModelVariant(modelId): Promise; +getModelVariant(modelId): Promise; ``` Retrieves a specific model variant by its ID. +NOTE: This will return an IModel with a single variant. Use getModel to get an IModel with all available +variants. This method is asynchronous as it may ensure the catalog is up-to-date by fetching from a remote service. #### Parameters @@ -140,9 +165,9 @@ This method is asynchronous as it may ensure the catalog is up-to-date by fetchi #### Returns -`Promise`\<[`ModelVariant`](ModelVariant.md)\> +`Promise`\<[`IModel`](../README.md#imodel)\> -A Promise that resolves to the ModelVariant object if found, otherwise throws an error. +A Promise that resolves to the IModel object if found, otherwise throws an error. #### Throws diff --git a/sdk/js/docs/classes/Model.md b/sdk/js/docs/classes/Model.md index 0b2dcfa6..f678f873 100644 --- a/sdk/js/docs/classes/Model.md +++ b/sdk/js/docs/classes/Model.md @@ -21,7 +21,7 @@ new Model(variant): Model; | Parameter | Type | | ------ | ------ | -| `variant` | [`ModelVariant`](ModelVariant.md) | +| `variant` | `ModelVariant` | #### Returns @@ -109,6 +109,28 @@ The ID of the selected variant. *** +### info + +#### Get Signature + +```ts +get info(): ModelInfo; +``` + +Gets the ModelInfo of the currently selected variant. + +##### Returns + +[`ModelInfo`](../README.md#modelinfo) + +The ModelInfo object. + +#### Implementation of + +[`IModel`](../README.md#imodel).[`info`](../README.md#info) + +*** + ### inputModalities #### Get Signature @@ -212,43 +234,22 @@ get supportsToolCalling(): boolean | null; #### Get Signature ```ts -get variants(): ModelVariant[]; +get variants(): IModel[]; ``` Gets all available variants for this model. ##### Returns -[`ModelVariant`](ModelVariant.md)[] - -An array of ModelVariant objects. +[`IModel`](../README.md#imodel)[] -## Methods +An array of IModel objects. -### addVariant() - -```ts -addVariant(variant): void; -``` - -Adds a new variant to this model. -Automatically selects the new variant if it is cached and the current one is not. - -#### Parameters - -| Parameter | Type | Description | -| ------ | ------ | ------ | -| `variant` | [`ModelVariant`](ModelVariant.md) | The model variant to add. | - -#### Returns - -`void` - -#### Throws +#### Implementation of -Error - If the argument is not a ModelVariant object, or if the variant's alias does not match the model's alias. +[`IModel`](../README.md#imodel).[`variants`](../README.md#variants) -*** +## Methods ### createAudioClient() @@ -410,7 +411,7 @@ Selects a specific variant. | Parameter | Type | Description | | ------ | ------ | ------ | -| `variant` | [`ModelVariant`](ModelVariant.md) | The model variant to select. | +| `variant` | [`IModel`](../README.md#imodel) | The model variant to select. Must be one of the variants in `variants`. | #### Returns @@ -418,7 +419,11 @@ Selects a specific variant. #### Throws -Error - If the argument is not a ModelVariant object, or if the variant does not belong to this model. +Error - If the variant does not belong to this model. + +#### Implementation of + +[`IModel`](../README.md#imodel).[`selectVariant`](../README.md#selectvariant) *** diff --git a/sdk/js/docs/classes/ModelVariant.md b/sdk/js/docs/classes/ModelVariant.md deleted file mode 100644 index 6f4e5ee8..00000000 --- a/sdk/js/docs/classes/ModelVariant.md +++ /dev/null @@ -1,397 +0,0 @@ -[foundry-local-sdk](../README.md) / ModelVariant - -# Class: ModelVariant - -Represents a specific variant of a model (e.g., a specific quantization or format). -Contains the low-level implementation for interacting with the model. - -## Implements - -- [`IModel`](../README.md#imodel) - -## Constructors - -### Constructor - -```ts -new ModelVariant( - modelInfo, - coreInterop, - modelLoadManager): ModelVariant; -``` - -#### Parameters - -| Parameter | Type | -| ------ | ------ | -| `modelInfo` | [`ModelInfo`](../README.md#modelinfo) | -| `coreInterop` | `CoreInterop` | -| `modelLoadManager` | [`ModelLoadManager`](ModelLoadManager.md) | - -#### Returns - -`ModelVariant` - -## Accessors - -### alias - -#### Get Signature - -```ts -get alias(): string; -``` - -Gets the alias of the model. - -##### Returns - -`string` - -The model alias. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`alias`](../README.md#alias) - -*** - -### capabilities - -#### Get Signature - -```ts -get capabilities(): string | null; -``` - -##### Returns - -`string` \| `null` - -#### Implementation of - -[`IModel`](../README.md#imodel).[`capabilities`](../README.md#capabilities) - -*** - -### contextLength - -#### Get Signature - -```ts -get contextLength(): number | null; -``` - -##### Returns - -`number` \| `null` - -#### Implementation of - -[`IModel`](../README.md#imodel).[`contextLength`](../README.md#contextlength) - -*** - -### id - -#### Get Signature - -```ts -get id(): string; -``` - -Gets the unique identifier of the model variant. - -##### Returns - -`string` - -The model ID. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`id`](../README.md#id-3) - -*** - -### inputModalities - -#### Get Signature - -```ts -get inputModalities(): string | null; -``` - -##### Returns - -`string` \| `null` - -#### Implementation of - -[`IModel`](../README.md#imodel).[`inputModalities`](../README.md#inputmodalities) - -*** - -### isCached - -#### Get Signature - -```ts -get isCached(): boolean; -``` - -Checks if the model variant is cached locally. - -##### Returns - -`boolean` - -True if cached, false otherwise. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`isCached`](../README.md#iscached) - -*** - -### modelInfo - -#### Get Signature - -```ts -get modelInfo(): ModelInfo; -``` - -Gets the detailed information about the model variant. - -##### Returns - -[`ModelInfo`](../README.md#modelinfo) - -The ModelInfo object. - -*** - -### outputModalities - -#### Get Signature - -```ts -get outputModalities(): string | null; -``` - -##### Returns - -`string` \| `null` - -#### Implementation of - -[`IModel`](../README.md#imodel).[`outputModalities`](../README.md#outputmodalities) - -*** - -### path - -#### Get Signature - -```ts -get path(): string; -``` - -Gets the local file path of the model variant. - -##### Returns - -`string` - -The local file path. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`path`](../README.md#path) - -*** - -### supportsToolCalling - -#### Get Signature - -```ts -get supportsToolCalling(): boolean | null; -``` - -##### Returns - -`boolean` \| `null` - -#### Implementation of - -[`IModel`](../README.md#imodel).[`supportsToolCalling`](../README.md#supportstoolcalling) - -## Methods - -### createAudioClient() - -```ts -createAudioClient(): AudioClient; -``` - -Creates an AudioClient for interacting with the model via audio operations. - -#### Returns - -[`AudioClient`](AudioClient.md) - -An AudioClient instance. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`createAudioClient`](../README.md#createaudioclient) - -*** - -### createChatClient() - -```ts -createChatClient(): ChatClient; -``` - -Creates a ChatClient for interacting with the model via chat completions. - -#### Returns - -[`ChatClient`](ChatClient.md) - -A ChatClient instance. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`createChatClient`](../README.md#createchatclient) - -*** - -### createResponsesClient() - -```ts -createResponsesClient(baseUrl): ResponsesClient; -``` - -Creates a ResponsesClient for interacting with the model via the Responses API. - -#### Parameters - -| Parameter | Type | Description | -| ------ | ------ | ------ | -| `baseUrl` | `string` | The base URL of the Foundry Local web service. | - -#### Returns - -[`ResponsesClient`](ResponsesClient.md) - -A ResponsesClient instance. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`createResponsesClient`](../README.md#createresponsesclient) - -*** - -### download() - -```ts -download(progressCallback?): Promise; -``` - -Downloads the model variant. - -#### Parameters - -| Parameter | Type | Description | -| ------ | ------ | ------ | -| `progressCallback?` | (`progress`) => `void` | Optional callback to report download progress (0-100). | - -#### Returns - -`Promise`\<`void`\> - -#### Implementation of - -[`IModel`](../README.md#imodel).[`download`](../README.md#download) - -*** - -### isLoaded() - -```ts -isLoaded(): Promise; -``` - -Checks if the model variant is loaded in memory. - -#### Returns - -`Promise`\<`boolean`\> - -True if loaded, false otherwise. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`isLoaded`](../README.md#isloaded) - -*** - -### load() - -```ts -load(): Promise; -``` - -Loads the model variant into memory. - -#### Returns - -`Promise`\<`void`\> - -A promise that resolves when the model is loaded. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`load`](../README.md#load) - -*** - -### removeFromCache() - -```ts -removeFromCache(): void; -``` - -Removes the model variant from the local cache. - -#### Returns - -`void` - -#### Implementation of - -[`IModel`](../README.md#imodel).[`removeFromCache`](../README.md#removefromcache) - -*** - -### unload() - -```ts -unload(): Promise; -``` - -Unloads the model variant from memory. - -#### Returns - -`Promise`\<`void`\> - -A promise that resolves when the model is unloaded. - -#### Implementation of - -[`IModel`](../README.md#imodel).[`unload`](../README.md#unload) diff --git a/sdk/js/src/catalog.ts b/sdk/js/src/catalog.ts index 2efba66a..d4331c38 100644 --- a/sdk/js/src/catalog.ts +++ b/sdk/js/src/catalog.ts @@ -1,8 +1,9 @@ import { CoreInterop } from './detail/coreInterop.js'; import { ModelLoadManager } from './detail/modelLoadManager.js'; -import { Model } from './model.js'; -import { ModelVariant } from './modelVariant.js'; +import { Model } from './detail/model.js'; +import { ModelVariant } from './detail/modelVariant.js'; import { ModelInfo } from './types.js'; +import { IModel } from './imodel.js'; /** * Represents a catalog of AI models available in the system. @@ -76,9 +77,9 @@ export class Catalog { /** * Lists all available models in the catalog. * This method is asynchronous as it may fetch the model list from a remote service or perform file I/O. - * @returns A Promise that resolves to an array of Model objects. + * @returns A Promise that resolves to an array of IModel objects. */ - public async getModels(): Promise { + public async getModels(): Promise { await this.updateModels(); return this._models; } @@ -87,10 +88,10 @@ export class Catalog { * Retrieves a model by its alias. * This method is asynchronous as it may ensure the catalog is up-to-date by fetching from a remote service. * @param alias - The alias of the model to retrieve. - * @returns A Promise that resolves to the Model object if found, otherwise throws an error. + * @returns A Promise that resolves to the IModel object if found, otherwise throws an error. * @throws Error - If alias is null, undefined, or empty. */ - public async getModel(alias: string): Promise { + public async getModel(alias: string): Promise { if (typeof alias !== 'string' || alias.trim() === '') { throw new Error('Model alias must be a non-empty string.'); } @@ -105,12 +106,14 @@ export class Catalog { /** * Retrieves a specific model variant by its ID. + * NOTE: This will return an IModel with a single variant. Use getModel to get an IModel with all available + * variants. * This method is asynchronous as it may ensure the catalog is up-to-date by fetching from a remote service. * @param modelId - The unique identifier of the model variant. - * @returns A Promise that resolves to the ModelVariant object if found, otherwise throws an error. + * @returns A Promise that resolves to the IModel object if found, otherwise throws an error. * @throws Error - If modelId is null, undefined, or empty. */ - public async getModelVariant(modelId: string): Promise { + public async getModelVariant(modelId: string): Promise { if (typeof modelId !== 'string' || modelId.trim() === '') { throw new Error('Model ID must be a non-empty string.'); } @@ -126,9 +129,9 @@ export class Catalog { /** * Retrieves a list of all locally cached model variants. * This method is asynchronous as it may involve file I/O or querying the underlying core. - * @returns A Promise that resolves to an array of cached ModelVariant objects. + * @returns A Promise that resolves to an array of cached IModel objects. */ - public async getCachedModels(): Promise { + public async getCachedModels(): Promise { await this.updateModels(); const cachedModelListJson = this.coreInterop.executeCommand("get_cached_models"); let cachedModelIds: string[] = []; @@ -137,7 +140,7 @@ export class Catalog { } catch (error) { throw new Error(`Failed to parse cached model list JSON: ${error}`); } - const cachedModels: Set = new Set(); + const cachedModels: Set = new Set(); for (const modelId of cachedModelIds) { const variant = this.modelIdToModelVariant.get(modelId); @@ -152,9 +155,9 @@ export class Catalog { * Retrieves a list of all currently loaded model variants. * This operation is asynchronous because checking the loaded status may involve querying * the underlying core or an external service, which can be an I/O bound operation. - * @returns A Promise that resolves to an array of loaded ModelVariant objects. + * @returns A Promise that resolves to an array of loaded IModel objects. */ - public async getLoadedModels(): Promise { + public async getLoadedModels(): Promise { await this.updateModels(); let loadedModelIds: string[] = []; try { @@ -162,7 +165,7 @@ export class Catalog { } catch (error) { throw new Error(`Failed to list loaded models: ${error}`); } - const loadedModels: ModelVariant[] = []; + const loadedModels: IModel[] = []; for (const modelId of loadedModelIds) { const variant = this.modelIdToModelVariant.get(modelId); @@ -172,4 +175,33 @@ export class Catalog { } return loadedModels; } + + /** + * Get the latest version of a model. + * This is used to check if a newer version of a model is available in the catalog for download. + * @param modelOrModelVariant - The model to check for the latest version. + * @returns The latest version of the model. Will match the input if it is the latest version. + */ + public async getLatestVersion(modelOrModelVariant: IModel): Promise { + await this.updateModels(); + + // Resolve to the parent Model by alias + const model = this.modelAliasToModel.get(modelOrModelVariant.alias); + if (!model) { + throw new Error(`Model with alias '${modelOrModelVariant.alias}' not found in catalog.`); + } + + // variants are sorted by version, so the first one matching the name is the latest version + const latest = model.variants.find(v => v.info.name === modelOrModelVariant.info.name); + if (!latest) { + throw new Error( + `Internal error. Mismatch between model (alias:${model.alias}) and ` + + `model variant (alias:${modelOrModelVariant.alias}).` + ); + } + + // if input was the latest return the input (could be model or model variant) + // otherwise return the latest model variant + return latest.id === modelOrModelVariant.id ? modelOrModelVariant : latest; + } } \ No newline at end of file diff --git a/sdk/js/src/model.ts b/sdk/js/src/detail/model.ts similarity index 78% rename from sdk/js/src/model.ts rename to sdk/js/src/detail/model.ts index b4f60040..46245ee5 100644 --- a/sdk/js/src/model.ts +++ b/sdk/js/src/detail/model.ts @@ -1,9 +1,10 @@ import { ModelVariant } from './modelVariant.js'; -import { ChatClient } from './openai/chatClient.js'; -import { AudioClient } from './openai/audioClient.js'; -import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; -import { ResponsesClient } from './openai/responsesClient.js'; -import { IModel } from './imodel.js'; +import { ChatClient } from '../openai/chatClient.js'; +import { AudioClient } from '../openai/audioClient.js'; +import { ResponsesClient } from '../openai/responsesClient.js'; +import { LiveAudioTranscriptionSession } from '../openai/liveAudioTranscriptionClient.js'; +import { IModel } from '../imodel.js'; +import { ModelInfo } from '../types.js'; /** * Represents a high-level AI model that may have multiple variants (e.g., quantized versions, different formats). @@ -21,25 +22,14 @@ export class Model implements IModel { this.selectedVariant = variant; } - private validateVariantInput(variant: ModelVariant, caller: string): void { - if (variant === null || variant === undefined) { - throw new Error(`${caller}() requires a ModelVariant object but received ${variant}.`); - } - if (typeof variant !== 'object') { - throw new Error( - `${caller}() requires a ModelVariant object but received ${typeof variant}.` - ); - } - } - /** * Adds a new variant to this model. * Automatically selects the new variant if it is cached and the current one is not. * @param variant - The model variant to add. - * @throws Error - If the argument is not a ModelVariant object, or if the variant's alias does not match the model's alias. + * @throws Error - If the variant's alias does not match the model's alias. + * @internal */ public addVariant(variant: ModelVariant): void { - this.validateVariantInput(variant, 'addVariant'); if (!variant || variant.alias !== this._alias) { throw new Error(`Variant alias "${variant?.alias}" does not match model alias "${this._alias}".`); } @@ -53,14 +43,13 @@ export class Model implements IModel { /** * Selects a specific variant. - * @param variant - The model variant to select. - * @throws Error - If the argument is not a ModelVariant object, or if the variant does not belong to this model. + * @param variant - The model variant to select. Must be one of the variants in `variants`. + * @throws Error - If the variant does not belong to this model. */ - public selectVariant(variant: ModelVariant): void { - this.validateVariantInput(variant, 'selectVariant'); + public selectVariant(variant: IModel): void { const matchingVariant = this._variants.find(v => v.id === variant.id); if (!variant.id || !matchingVariant) { - throw new Error(`Model variant with ID ${variant.id} does not belong to model "${this._alias}".`); + throw new Error(`Input variant was not found in Variants.`); } this.selectedVariant = matchingVariant; } @@ -81,6 +70,14 @@ export class Model implements IModel { return this._alias; } + /** + * Gets the ModelInfo of the currently selected variant. + * @returns The ModelInfo object. + */ + public get info(): ModelInfo { + return this.selectedVariant.info; + } + /** * Checks if the currently selected variant is cached locally. * @returns True if cached, false otherwise. @@ -99,9 +96,9 @@ export class Model implements IModel { /** * Gets all available variants for this model. - * @returns An array of ModelVariant objects. + * @returns An array of IModel objects. */ - public get variants(): ModelVariant[] { + public get variants(): IModel[] { return this._variants; } diff --git a/sdk/js/src/modelVariant.ts b/sdk/js/src/detail/modelVariant.ts similarity index 82% rename from sdk/js/src/modelVariant.ts rename to sdk/js/src/detail/modelVariant.ts index 86c3d3f5..d1c1e20c 100644 --- a/sdk/js/src/modelVariant.ts +++ b/sdk/js/src/detail/modelVariant.ts @@ -1,15 +1,16 @@ -import { CoreInterop } from './detail/coreInterop.js'; -import { ModelLoadManager } from './detail/modelLoadManager.js'; -import { ModelInfo } from './types.js'; -import { ChatClient } from './openai/chatClient.js'; -import { AudioClient } from './openai/audioClient.js'; -import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; -import { ResponsesClient } from './openai/responsesClient.js'; -import { IModel } from './imodel.js'; +import { CoreInterop } from './coreInterop.js'; +import { ModelLoadManager } from './modelLoadManager.js'; +import { ModelInfo } from '../types.js'; +import { ChatClient } from '../openai/chatClient.js'; +import { AudioClient } from '../openai/audioClient.js'; +import { LiveAudioTranscriptionSession } from '../openai/liveAudioTranscriptionClient.js'; +import { ResponsesClient } from '../openai/responsesClient.js'; +import { IModel } from '../imodel.js'; /** * Represents a specific variant of a model (e.g., a specific quantization or format). * Contains the low-level implementation for interacting with the model. + * @internal */ export class ModelVariant implements IModel { private _modelInfo: ModelInfo; @@ -42,10 +43,29 @@ export class ModelVariant implements IModel { * Gets the detailed information about the model variant. * @returns The ModelInfo object. */ - public get modelInfo(): ModelInfo { + public get info(): ModelInfo { return this._modelInfo; } + /** + * A ModelVariant is a single variant, so variants returns itself. + */ + public get variants(): IModel[] { + return [this]; + } + + /** + * SelectVariant is not supported on a ModelVariant. + * Call Catalog.getModel() to get an IModel with all variants available. + * @throws Error always. + */ + public selectVariant(_variant: IModel): void { + throw new Error( + `selectVariant is not supported on a ModelVariant. ` + + `Call Catalog.getModel("${this.alias}") to get an IModel with all variants available.` + ); + } + public get contextLength(): number | null { return this._modelInfo.contextLength ?? null; } diff --git a/sdk/js/src/imodel.ts b/sdk/js/src/imodel.ts index 625afdec..7a2f5a2c 100644 --- a/sdk/js/src/imodel.ts +++ b/sdk/js/src/imodel.ts @@ -2,10 +2,12 @@ import { ChatClient } from './openai/chatClient.js'; import { AudioClient } from './openai/audioClient.js'; import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; import { ResponsesClient } from './openai/responsesClient.js'; +import { ModelInfo } from './types.js'; export interface IModel { get id(): string; get alias(): string; + get info(): ModelInfo; get isCached(): boolean; isLoaded(): Promise; @@ -37,4 +39,17 @@ export interface IModel { * @param baseUrl - The base URL of the Foundry Local web service. */ createResponsesClient(baseUrl: string): ResponsesClient; + + /** + * Variants of the model that are available. Variants of the model are optimized for different devices. + */ + get variants(): IModel[]; + + /** + * Select a model variant from variants to use for IModel operations. + * An IModel from `variants` can also be used directly. + * @param variant - Model variant to select. Must be one of the variants in `variants`. + * @throws Error if variant is not valid for this model. + */ + selectVariant(variant: IModel): void; } diff --git a/sdk/js/src/index.ts b/sdk/js/src/index.ts index 57d9fcf7..42b498c3 100644 --- a/sdk/js/src/index.ts +++ b/sdk/js/src/index.ts @@ -1,8 +1,10 @@ export { FoundryLocalManager } from './foundryLocalManager.js'; export type { FoundryLocalConfig } from './configuration.js'; export { Catalog } from './catalog.js'; -export { Model } from './model.js'; -export { ModelVariant } from './modelVariant.js'; +/** @internal */ +export { Model } from './detail/model.js'; +/** @internal */ +export { ModelVariant } from './detail/modelVariant.js'; export type { IModel } from './imodel.js'; export { ChatClient, ChatClientSettings } from './openai/chatClient.js'; export { AudioClient, AudioClientSettings } from './openai/audioClient.js'; diff --git a/sdk/js/test/catalog.test.ts b/sdk/js/test/catalog.test.ts index df47d4f6..8c320723 100644 --- a/sdk/js/test/catalog.test.ts +++ b/sdk/js/test/catalog.test.ts @@ -1,5 +1,7 @@ import { describe, it } from 'mocha'; import { expect } from 'chai'; +import { Catalog } from '../src/catalog.js'; +import { DeviceType, type ModelInfo } from '../src/types.js'; import { getTestManager, TEST_MODEL_ALIAS } from './testUtils.js'; describe('Catalog Tests', () => { @@ -106,4 +108,97 @@ describe('Catalog Tests', () => { expect((error as Error).message).to.include('Available variants:'); } }); + + it('should resolve latest version for model and variant inputs', async function() { + // Mirror the C# test by using synthetic model data sorted by version descending. + const testModelInfos: ModelInfo[] = [ + { + id: 'test-model:3', + name: 'test-model', + version: 3, + alias: 'test-alias', + displayName: 'Test Model', + providerType: 'test', + uri: 'test://model/3', + modelType: 'ONNX', + runtime: { deviceType: DeviceType.CPU, executionProvider: 'CPUExecutionProvider' }, + cached: false, + createdAtUnix: 1700000003 + }, + { + id: 'test-model:2', + name: 'test-model', + version: 2, + alias: 'test-alias', + displayName: 'Test Model', + providerType: 'test', + uri: 'test://model/2', + modelType: 'ONNX', + runtime: { deviceType: DeviceType.CPU, executionProvider: 'CPUExecutionProvider' }, + cached: false, + createdAtUnix: 1700000002 + }, + { + id: 'test-model:1', + name: 'test-model', + version: 1, + alias: 'test-alias', + displayName: 'Test Model', + providerType: 'test', + uri: 'test://model/1', + modelType: 'ONNX', + runtime: { deviceType: DeviceType.CPU, executionProvider: 'CPUExecutionProvider' }, + cached: false, + createdAtUnix: 1700000001 + } + ]; + + const mockCoreInterop = { + executeCommand(command: string): string { + if (command === 'get_catalog_name') { + return 'TestCatalog'; + } + if (command === 'get_model_list') { + return JSON.stringify(testModelInfos); + } + if (command === 'get_cached_models') { + return '[]'; + } + throw new Error(`Unexpected command: ${command}`); + } + } as any; + + const mockLoadManager = { + listLoaded: async () => [] + } as any; + + const catalog = new Catalog(mockCoreInterop, mockLoadManager); + + const model = await catalog.getModel('test-alias'); + expect(model).to.not.be.undefined; + + const variants = model.variants; + expect(variants).to.have.length(3); + + const latestVariant = variants[0]; + const middleVariant = variants[1]; + const oldestVariant = variants[2]; + + expect(latestVariant.id).to.equal('test-model:3'); + expect(middleVariant.id).to.equal('test-model:2'); + expect(oldestVariant.id).to.equal('test-model:1'); + + const result1 = await catalog.getLatestVersion(latestVariant); + expect(result1.id).to.equal('test-model:3'); + + const result2 = await catalog.getLatestVersion(middleVariant); + expect(result2.id).to.equal('test-model:3'); + + const result3 = await catalog.getLatestVersion(oldestVariant); + expect(result3.id).to.equal('test-model:3'); + + model.selectVariant(latestVariant); + const resultFromModel = await catalog.getLatestVersion(model); + expect(resultFromModel).to.equal(model); + }); }); diff --git a/sdk/js/test/model.test.ts b/sdk/js/test/model.test.ts index acc4d6e2..4048d9a1 100644 --- a/sdk/js/test/model.test.ts +++ b/sdk/js/test/model.test.ts @@ -39,7 +39,12 @@ describe('Model Tests', () => { expect(model).to.not.be.undefined; if (!model || !cachedVariant) return; - model.selectVariant(cachedVariant); + // Select the cached variant by finding it in the model's variants + const matchingVariant = model.variants.find(v => v.id === cachedVariant.id); + expect(matchingVariant).to.not.be.undefined; + if (matchingVariant) { + model.selectVariant(matchingVariant); + } // Ensure it's not loaded initially (or unload if it is) if (await model.isLoaded()) { diff --git a/sdk/js/test/openai/responsesClient.test.ts b/sdk/js/test/openai/responsesClient.test.ts index 925a2360..f0dbf4b0 100644 --- a/sdk/js/test/openai/responsesClient.test.ts +++ b/sdk/js/test/openai/responsesClient.test.ts @@ -10,7 +10,7 @@ import type { MessageItem, } from '../../src/types.js'; import { FoundryLocalManager } from '../../src/foundryLocalManager.js'; -import { Model } from '../../src/model.js'; +import type { IModel } from '../../src/imodel.js'; describe('ResponsesClient Tests', () => { @@ -371,7 +371,7 @@ describe('ResponsesClient Tests', () => { describe('Integration (requires model + web service)', function() { let manager: FoundryLocalManager; - let model: Model; + let model: IModel; let client: ResponsesClient; let skipped = false; diff --git a/sdk/python/README.md b/sdk/python/README.md index 4c1fb84a..4ee1f9cc 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -184,7 +184,7 @@ loaded = catalog.get_loaded_models() ### Inspecting Model Metadata -`Model` exposes metadata properties from the catalog: +`IModel` exposes metadata properties from the catalog: ```python model = catalog.get_model("phi-3.5-mini") @@ -268,8 +268,7 @@ manager.stop_web_service() | `EpInfo` | Discoverable execution provider info (`name`, `is_registered`) | | `EpDownloadResult` | Result of EP download/registration (`success`, `status`, `registered_eps`, `failed_eps`) | | `Catalog` | Model discovery – listing, lookup by alias/ID, cached/loaded queries | -| `Model` | Groups variants under one alias – select, load, unload, create clients | -| `ModelVariant` | Specific model variant – download, cache, load/unload, create clients | +| `IModel` | Abstract interface for models — identity, metadata, lifecycle, client creation, variant selection | ### OpenAI Clients @@ -282,6 +281,8 @@ manager.stop_web_service() | Class | Description | |---|---| +| `Model` | Alias-level `IModel` implementation used by `Catalog.get_model()` (implementation detail) | +| `ModelVariant` | Specific model variant (implementation detail — implements `IModel`) | | `CoreInterop` | ctypes FFI layer to the native Foundry Local Core library | | `ModelLoadManager` | Load/unload via core interop or external web service | | `ModelInfo` | Pydantic model for catalog entries | diff --git a/sdk/python/src/catalog.py b/sdk/python/src/catalog.py index afccd85b..51f5bd8f 100644 --- a/sdk/python/src/catalog.py +++ b/sdk/python/src/catalog.py @@ -11,8 +11,9 @@ from typing import List, Optional from pydantic import TypeAdapter -from .model import Model -from .model_variant import ModelVariant +from .imodel import IModel +from .detail.model import Model +from .detail.model_variant import ModelVariant from .detail.core_interop import CoreInterop, get_cached_model_ids from .detail.model_data_types import ModelInfo @@ -87,42 +88,72 @@ def _invalidate_cache(self): with self._lock: self._last_fetch = datetime.datetime.min - def list_models(self) -> List[Model]: + def list_models(self) -> List[IModel]: """ List the available models in the catalog. - :return: List of Model instances. + :return: List of IModel instances. """ self._update_models() return list(self._model_alias_to_model.values()) - def get_model(self, model_alias: str) -> Optional[Model]: + def get_model(self, model_alias: str) -> Optional[IModel]: """ Lookup a model by its alias. :param model_alias: Model alias. - :return: Model if found. + :return: IModel if found. """ self._update_models() return self._model_alias_to_model.get(model_alias) - def get_model_variant(self, model_id: str) -> Optional[ModelVariant]: + def get_model_variant(self, model_id: str) -> Optional[IModel]: """ Lookup a model variant by its unique model id. + NOTE: This will return an IModel with a single variant. Use get_model to get an IModel with all available + variants. :param model_id: Model id. - :return: Model variant if found. + :return: IModel if found. """ self._update_models() return self._model_id_to_model_variant.get(model_id) - def get_cached_models(self) -> List[ModelVariant]: + def get_latest_version(self, model_or_model_variant: IModel) -> IModel: + """ + Resolve the latest catalog version for the provided model or variant. + + :param model_or_model_variant: IModel to resolve. + :return: Latest catalog version for the same model name. + :raises FoundryLocalException: If the alias or name cannot be resolved. + """ + self._update_models() + + model = self._model_alias_to_model.get(model_or_model_variant.alias) + if model is None: + raise FoundryLocalException( + f"Model with alias '{model_or_model_variant.alias}' not found in catalog." + ) + + latest = next( + (variant for variant in model.variants if variant.info.name == model_or_model_variant.info.name), + None, + ) + if latest is None: + raise FoundryLocalException( + f"Internal error. Mismatch between model (alias:{model.alias}) and " + f"model variant (alias:{model_or_model_variant.alias})." + ) + + return model_or_model_variant if latest.id == model_or_model_variant.id else latest + + def get_cached_models(self) -> List[IModel]: """ Get a list of currently downloaded models from the model cache. - :return: List of ModelVariant instances. + :return: List of IModel instances. """ self._update_models() cached_model_ids = get_cached_model_ids(self._core_interop) - cached_models = [] + cached_models: List[IModel] = [] for model_id in cached_model_ids: model_variant = self._model_id_to_model_variant.get(model_id) if model_variant is not None: @@ -130,15 +161,15 @@ def get_cached_models(self) -> List[ModelVariant]: return cached_models - def get_loaded_models(self) -> List[ModelVariant]: + def get_loaded_models(self) -> List[IModel]: """ Get a list of the currently loaded models. - :return: List of ModelVariant instances. + :return: List of IModel instances. """ self._update_models() loaded_model_ids = self._model_load_manager.list_loaded() - loaded_models = [] + loaded_models: List[IModel] = [] for model_id in loaded_model_ids: model_variant = self._model_id_to_model_variant.get(model_id) diff --git a/sdk/python/src/model.py b/sdk/python/src/detail/model.py similarity index 71% rename from sdk/python/src/model.py rename to sdk/python/src/detail/model.py index f964a820..189920b1 100644 --- a/sdk/python/src/model.py +++ b/sdk/python/src/detail/model.py @@ -7,18 +7,19 @@ import logging from typing import Callable, List, Optional -from .imodel import IModel -from .openai.chat_client import ChatClient -from .openai.audio_client import AudioClient +from ..imodel import IModel +from ..openai.chat_client import ChatClient +from ..openai.audio_client import AudioClient from .model_variant import ModelVariant -from .exception import FoundryLocalException -from .detail.core_interop import CoreInterop +from ..exception import FoundryLocalException +from .core_interop import CoreInterop +from .model_data_types import ModelInfo logger = logging.getLogger(__name__) class Model(IModel): - """A model identified by an alias that groups one or more ``ModelVariant`` instances. + """A model identified by an alias that groups one or more variants. Operations are delegated to the currently selected variant. """ @@ -42,47 +43,26 @@ def _add_variant(self, variant: ModelVariant) -> None: if variant.info.cached and not self._selected_variant.info.cached: self._selected_variant = variant - def select_variant(self, variant: ModelVariant) -> None: + def select_variant(self, variant: IModel) -> None: """ - Select a specific model variant by its ModelVariant object. - The selected variant will be used for IModel operations. - - :param variant: ModelVariant to select + Select a specific model variant to use for IModel operations. + An IModel from ``variants`` can also be used directly. + + :param variant: IModel to select. Must be one of the variants in ``variants``. :raises FoundryLocalException: If variant is not valid for this model """ - if variant not in self._variants: + matching = next((v for v in self._variants if v.id == variant.id), None) + if matching is None: raise FoundryLocalException( - f"Model {self._alias} does not have a {variant.id} variant." + "Input variant was not found in Variants." ) - self._selected_variant = variant - - def get_latest_version(self, variant: ModelVariant) -> ModelVariant: - """ - Get the latest version of the specified model variant. - - :param variant: Model variant - :return: ModelVariant for latest version. Same as variant if that is the latest version - :raises FoundryLocalException: If variant is not valid for this model - """ - # Variants are sorted by version, so the first one matching the name is the latest version - for v in self._variants: - if v.info.name == variant.info.name: - return v - - raise FoundryLocalException( - f"Model {self._alias} does not have a {variant.id} variant." - ) + self._selected_variant = matching @property - def variants(self) -> List[ModelVariant]: + def variants(self) -> List[IModel]: """List of all variants for this model.""" - return self._variants.copy() # Return a copy to prevent external modification - - @property - def selected_variant(self) -> ModelVariant: - """Currently selected variant.""" - return self._selected_variant + return list(self._variants) # Return a copy to prevent external modification @property def id(self) -> str: @@ -94,6 +74,11 @@ def alias(self) -> str: """Alias of this model.""" return self._alias + @property + def info(self) -> ModelInfo: + """ModelInfo of the currently selected variant.""" + return self._selected_variant.info + @property def context_length(self) -> Optional[int]: """Maximum context length (in tokens) of the currently selected variant.""" diff --git a/sdk/python/src/detail/model_data_types.py b/sdk/python/src/detail/model_data_types.py index df367b44..46525dc7 100644 --- a/sdk/python/src/detail/model_data_types.py +++ b/sdk/python/src/detail/model_data_types.py @@ -57,24 +57,24 @@ class ModelInfo(BaseModel): name: str = Field(alias="name", description="Model variant name") version: int = Field(alias="version") alias: str = Field(..., description="Alias of the model") - display_name: Optional[str] = Field(alias="displayName") + display_name: Optional[str] = Field(default=None, alias="displayName") provider_type: str = Field(alias="providerType") uri: str = Field(alias="uri") model_type: str = Field(alias="modelType") prompt_template: Optional[PromptTemplate] = Field(default=None, alias="promptTemplate") - publisher: Optional[str] = Field(alias="publisher") + publisher: Optional[str] = Field(default=None, alias="publisher") model_settings: Optional[ModelSettings] = Field(default=None, alias="modelSettings") - license: Optional[str] = Field(alias="license") - license_description: Optional[str] = Field(alias="licenseDescription") + license: Optional[str] = Field(default=None, alias="license") + license_description: Optional[str] = Field(default=None, alias="licenseDescription") cached: bool = Field(alias="cached") - task: Optional[str] = Field(alias="task") - runtime: Optional[Runtime] = Field(alias="runtime") - file_size_mb: Optional[int] = Field(alias="fileSizeMb") - supports_tool_calling: Optional[bool] = Field(alias="supportsToolCalling") - max_output_tokens: Optional[int] = Field(alias="maxOutputTokens") - min_fl_version: Optional[str] = Field(alias="minFLVersion") + task: Optional[str] = Field(default=None, alias="task") + runtime: Optional[Runtime] = Field(default=None, alias="runtime") + file_size_mb: Optional[int] = Field(default=None, alias="fileSizeMb") + supports_tool_calling: Optional[bool] = Field(default=None, alias="supportsToolCalling") + max_output_tokens: Optional[int] = Field(default=None, alias="maxOutputTokens") + min_fl_version: Optional[str] = Field(default=None, alias="minFLVersion") created_at_unix: int = Field(alias="createdAt") - context_length: Optional[int] = Field(alias="contextLength") - input_modalities: Optional[str] = Field(alias="inputModalities") - output_modalities: Optional[str] = Field(alias="outputModalities") - capabilities: Optional[str] = Field(alias="capabilities") + context_length: Optional[int] = Field(default=None, alias="contextLength") + input_modalities: Optional[str] = Field(default=None, alias="inputModalities") + output_modalities: Optional[str] = Field(default=None, alias="outputModalities") + capabilities: Optional[str] = Field(default=None, alias="capabilities") diff --git a/sdk/python/src/model_variant.py b/sdk/python/src/detail/model_variant.py similarity index 84% rename from sdk/python/src/model_variant.py rename to sdk/python/src/detail/model_variant.py index 1c7ad717..a5ac02d4 100644 --- a/sdk/python/src/model_variant.py +++ b/sdk/python/src/detail/model_variant.py @@ -5,17 +5,17 @@ from __future__ import annotations import logging -from typing import Callable, Optional +from typing import Callable, List, Optional -from .imodel import IModel -from .exception import FoundryLocalException +from ..imodel import IModel +from ..exception import FoundryLocalException -from .detail.core_interop import CoreInterop, InteropRequest -from .detail.model_data_types import ModelInfo -from .detail.core_interop import get_cached_model_ids -from .detail.model_load_manager import ModelLoadManager -from .openai.audio_client import AudioClient -from .openai.chat_client import ChatClient +from .core_interop import CoreInterop, InteropRequest +from .model_data_types import ModelInfo +from .core_interop import get_cached_model_ids +from .model_load_manager import ModelLoadManager +from ..openai.audio_client import AudioClient +from ..openai.chat_client import ChatClient logger = logging.getLogger(__name__) @@ -62,6 +62,23 @@ def context_length(self) -> Optional[int]: """Maximum context length (in tokens) supported by this variant, or ``None`` if unknown.""" return self._model_info.context_length + @property + def variants(self) -> List[IModel]: + """A ModelVariant is a single variant, so variants returns itself.""" + return [self] + + def select_variant(self, variant: IModel) -> None: + """SelectVariant is not supported on a ModelVariant. + + Call ``Catalog.get_model()`` to get an IModel with all variants available. + + :raises FoundryLocalException: Always. + """ + raise FoundryLocalException( + f"select_variant is not supported on a ModelVariant. " + f'Call Catalog.get_model("{self._alias}") to get an IModel with all variants available.' + ) + @property def input_modalities(self) -> Optional[str]: """Comma-separated input modalities (e.g. ``"text,image"``), or ``None`` if unknown.""" diff --git a/sdk/python/src/imodel.py b/sdk/python/src/imodel.py index 7f83d1cc..8237aeb4 100644 --- a/sdk/python/src/imodel.py +++ b/sdk/python/src/imodel.py @@ -5,10 +5,11 @@ from __future__ import annotations from abc import ABC, abstractmethod -from typing import Callable, Optional +from typing import Callable, List, Optional from .openai.chat_client import ChatClient from .openai.audio_client import AudioClient +from .detail.model_data_types import ModelInfo class IModel(ABC): """Abstract interface for a model that can be downloaded, loaded, and used for inference.""" @@ -25,6 +26,12 @@ def alias(self) -> str: """Model alias.""" pass + @property + @abstractmethod + def info(self) -> ModelInfo: + """Full model metadata.""" + pass + @property @abstractmethod def is_cached(self) -> bool: @@ -119,3 +126,20 @@ def get_audio_client(self) -> AudioClient: :return: AudioClient instance. """ pass + + @property + @abstractmethod + def variants(self) -> List['IModel']: + """Variants of the model that are available. Variants of the model are optimized for different devices.""" + pass + + @abstractmethod + def select_variant(self, variant: 'IModel') -> None: + """ + Select a model variant from ``variants`` to use for IModel operations. + An IModel from ``variants`` can also be used directly. + + :param variant: Model variant to select. Must be one of the variants in ``variants``. + :raises FoundryLocalException: If variant is not valid for this model. + """ + pass diff --git a/sdk/python/test/test_catalog.py b/sdk/python/test/test_catalog.py index aeb39c20..2e5968cc 100644 --- a/sdk/python/test/test_catalog.py +++ b/sdk/python/test/test_catalog.py @@ -6,6 +6,11 @@ from __future__ import annotations +import json + +from foundry_local_sdk.catalog import Catalog +from foundry_local_sdk.detail.core_interop import Response + from .conftest import TEST_MODEL_ALIAS @@ -72,3 +77,91 @@ def test_should_return_none_for_unknown_variant_id(self, catalog): """get_model_variant() with a random ID should return None.""" result = catalog.get_model_variant("definitely-not-a-real-model-id-12345") assert result is None + + def test_should_resolve_latest_version_for_model_and_variant_inputs(self): + """get_latest_version() should resolve latest variant and preserve Model input when already latest.""" + + test_model_infos = [ + { + "id": "test-model:3", + "name": "test-model", + "version": 3, + "alias": "test-alias", + "displayName": "Test Model", + "providerType": "test", + "uri": "test://model/3", + "modelType": "ONNX", + "runtime": {"deviceType": "CPU", "executionProvider": "CPUExecutionProvider"}, + "cached": False, + "createdAt": 1700000003, + }, + { + "id": "test-model:2", + "name": "test-model", + "version": 2, + "alias": "test-alias", + "displayName": "Test Model", + "providerType": "test", + "uri": "test://model/2", + "modelType": "ONNX", + "runtime": {"deviceType": "CPU", "executionProvider": "CPUExecutionProvider"}, + "cached": False, + "createdAt": 1700000002, + }, + { + "id": "test-model:1", + "name": "test-model", + "version": 1, + "alias": "test-alias", + "displayName": "Test Model", + "providerType": "test", + "uri": "test://model/1", + "modelType": "ONNX", + "runtime": {"deviceType": "CPU", "executionProvider": "CPUExecutionProvider"}, + "cached": False, + "createdAt": 1700000001, + }, + ] + + class _MockCoreInterop: + def execute_command(self, command_name, command_input=None): + if command_name == "get_catalog_name": + return Response(data="TestCatalog", error=None) + if command_name == "get_model_list": + return Response(data=json.dumps(test_model_infos), error=None) + if command_name == "get_cached_models": + return Response(data="[]", error=None) + return Response(data=None, error=f"Unexpected command: {command_name}") + + class _MockModelLoadManager: + def list_loaded(self): + return [] + + catalog = Catalog(_MockModelLoadManager(), _MockCoreInterop()) + + model = catalog.get_model("test-alias") + assert model is not None + + variants = model.variants + assert len(variants) == 3 + + latest_variant = variants[0] + middle_variant = variants[1] + oldest_variant = variants[2] + + assert latest_variant.id == "test-model:3" + assert middle_variant.id == "test-model:2" + assert oldest_variant.id == "test-model:1" + + result1 = catalog.get_latest_version(latest_variant) + assert result1.id == "test-model:3" + + result2 = catalog.get_latest_version(middle_variant) + assert result2.id == "test-model:3" + + result3 = catalog.get_latest_version(oldest_variant) + assert result3.id == "test-model:3" + + model.select_variant(latest_variant) + result4 = catalog.get_latest_version(model) + assert result4 is model diff --git a/sdk/rust/README.md b/sdk/rust/README.md index aa848b03..6bcb9884 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -177,15 +177,15 @@ let loaded = catalog.get_loaded_models().await?; ### Model Lifecycle -Each `Model` wraps one or more `ModelVariant` entries (different quantizations, hardware targets). The SDK auto-selects the best available variant, preferring cached versions. +Each model may have multiple variants (different quantizations, hardware targets). The SDK auto-selects the best available variant, preferring cached versions. All models implement the `IModel` trait. ```rust let model = catalog.get_model("phi-3.5-mini").await?; // Inspect available variants -println!("Selected: {}", model.selected_variant().id()); +println!("Selected: {}", model.id()); for v in model.variants() { - println!(" {} (cached: {})", v.id(), v.info().cached); + println!(" {} (info.cached: {})", v.id(), v.info().cached); } ``` @@ -193,10 +193,10 @@ Download, load, and unload: ```rust // Download with progress reporting -model.download(Some(|progress: &str| { +model.download(Some(Box::new(|progress: &str| { print!("\r{progress}"); std::io::Write::flush(&mut std::io::stdout()).ok(); -})).await?; +}))).await?; // Load into memory model.load().await?; diff --git a/sdk/rust/examples/tool_calling.rs b/sdk/rust/examples/tool_calling.rs index 192b9ff0..fecf6bc5 100644 --- a/sdk/rust/examples/tool_calling.rs +++ b/sdk/rust/examples/tool_calling.rs @@ -61,7 +61,7 @@ async fn main() -> Result<()> { let models = manager.catalog().get_models().await?; let model = models .iter() - .find(|m| m.selected_variant().info().supports_tool_calling == Some(true)) + .find(|m| m.info().supports_tool_calling == Some(true)) .or_else(|| models.first()) .expect("No models available"); diff --git a/sdk/rust/src/catalog.rs b/sdk/rust/src/catalog.rs index d9d5bb51..26a737e9 100644 --- a/sdk/rust/src/catalog.rs +++ b/sdk/rust/src/catalog.rs @@ -6,10 +6,10 @@ use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; use crate::detail::core_interop::CoreInterop; +use crate::detail::model::Model; +use crate::detail::model_variant::ModelVariant; use crate::detail::ModelLoadManager; use crate::error::{FoundryLocalError, Result}; -use crate::model::Model; -use crate::model_variant::ModelVariant; use crate::types::ModelInfo; /// How long the catalog cache remains valid before a refresh. @@ -39,7 +39,7 @@ impl CacheInvalidator { /// All mutable catalog data behind a single lock to prevent split-brain reads. struct CatalogState { models_by_alias: HashMap>, - variants_by_id: HashMap>, + variants_by_id: HashMap>, last_refresh: Option, } @@ -148,7 +148,11 @@ impl Catalog { } /// Look up a specific model variant by its unique id. - pub async fn get_model_variant(&self, id: &str) -> Result> { + /// + /// NOTE: This will return a `Model` representing a single variant. Use + /// [`get_model`](Catalog::get_model) to obtain a `Model` with all + /// available variants. + pub async fn get_model_variant(&self, id: &str) -> Result> { if id.trim().is_empty() { return Err(FoundryLocalError::Validation { reason: "Variant id must be a non-empty string".into(), @@ -165,7 +169,7 @@ impl Catalog { } /// Return only the model variants that are currently cached on disk. - pub async fn get_cached_models(&self) -> Result>> { + pub async fn get_cached_models(&self) -> Result>> { self.update_models().await?; let raw = self .core @@ -183,7 +187,7 @@ impl Catalog { } /// Return model variants that are currently loaded into memory. - pub async fn get_loaded_models(&self) -> Result>> { + pub async fn get_loaded_models(&self) -> Result>> { self.update_models().await?; let loaded_ids = self.model_load_manager.list_loaded().await?; let s = self.lock_state()?; @@ -193,6 +197,36 @@ impl Catalog { .collect()) } + /// Resolve the latest catalog version for the provided model or variant. + pub async fn get_latest_version(&self, model_or_model_variant: &Model) -> Result> { + self.update_models().await?; + let s = self.lock_state()?; + + let model = s + .models_by_alias + .get(model_or_model_variant.alias()) + .ok_or_else(|| FoundryLocalError::ModelOperation { + reason: format!( + "Model with alias '{}' not found in catalog.", + model_or_model_variant.alias() + ), + })?; + + let latest = model + .variants() + .into_iter() + .find(|variant| variant.info().name == model_or_model_variant.info().name) + .ok_or_else(|| FoundryLocalError::Internal { + reason: format!( + "Mismatch between model (alias:{}) and model variant (alias:{}).", + model.alias(), + model_or_model_variant.alias() + ), + })?; + + Ok(latest) + } + async fn force_refresh(&self) -> Result<()> { let raw = self .core @@ -216,22 +250,22 @@ impl Catalog { }; let mut alias_map_build: HashMap = HashMap::new(); - let mut id_map: HashMap> = HashMap::new(); + let mut id_map: HashMap> = HashMap::new(); for info in infos { let id = info.id.clone(); let alias = info.alias.clone(); - let variant = Arc::new(ModelVariant::new( + let variant = ModelVariant::new( info, Arc::clone(&self.core), Arc::clone(&self.model_load_manager), self.invalidator.clone(), - )); - id_map.insert(id, Arc::clone(&variant)); + ); + id_map.insert(id, Arc::new(Model::from_variant(variant.clone()))); alias_map_build .entry(alias) - .or_insert_with_key(|a| Model::new(a.clone(), Arc::clone(&self.core))) + .or_insert_with_key(|a| Model::from_group(a.clone(), Arc::clone(&self.core))) .add_variant(variant); } diff --git a/sdk/rust/src/detail/mod.rs b/sdk/rust/src/detail/mod.rs index c7f2fd32..b153ed5b 100644 --- a/sdk/rust/src/detail/mod.rs +++ b/sdk/rust/src/detail/mod.rs @@ -1,4 +1,6 @@ pub(crate) mod core_interop; +pub(crate) mod model; mod model_load_manager; +pub(crate) mod model_variant; pub use self::model_load_manager::ModelLoadManager; diff --git a/sdk/rust/src/detail/model.rs b/sdk/rust/src/detail/model.rs new file mode 100644 index 00000000..196ebe35 --- /dev/null +++ b/sdk/rust/src/detail/model.rs @@ -0,0 +1,300 @@ +//! Public model type backed by an internal enum. +//! +//! Users interact solely with [`Model`]. The internal representation +//! distinguishes between a single variant and a group of variants sharing +//! the same alias, but callers never need to know which kind they hold. + +use std::fmt; +use std::path::PathBuf; +use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; +use std::sync::Arc; + +use super::core_interop::CoreInterop; +use super::model_variant::ModelVariant; +use crate::error::{FoundryLocalError, Result}; +use crate::openai::AudioClient; +use crate::openai::ChatClient; +use crate::types::ModelInfo; + +/// The public model type. +/// +/// A `Model` may represent either a group of variants (as returned by +/// [`Catalog::get_model`](crate::Catalog::get_model)) or a single variant (as +/// returned by [`Catalog::get_model_variant`](crate::Catalog::get_model_variant) +/// or [`Model::variants`]). +/// +/// When a `Model` groups multiple variants, operations are forwarded to +/// the currently selected variant. Use [`variants`](Model::variants) to +/// inspect the available variants and [`select_variant`](Model::select_variant) +/// to change the selection. +pub struct Model { + inner: ModelKind, +} + +#[allow(clippy::large_enum_variant)] +enum ModelKind { + /// A single model variant (from `get_model_variant` or `variants()`). + ModelVariant(ModelVariant), + /// A group of variants sharing the same alias (from `get_model`). + Model { + alias: String, + core: Arc, + variants: Vec, + selected: AtomicUsize, + }, +} + +impl Clone for Model { + fn clone(&self) -> Self { + Self { + inner: match &self.inner { + ModelKind::ModelVariant(v) => ModelKind::ModelVariant(v.clone()), + ModelKind::Model { + alias, + core, + variants, + selected, + } => ModelKind::Model { + alias: alias.clone(), + core: Arc::clone(core), + variants: variants.clone(), + selected: AtomicUsize::new(selected.load(Relaxed)), + }, + }, + } + } +} + +impl fmt::Debug for Model { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.inner { + ModelKind::ModelVariant(v) => f + .debug_struct("Model::ModelVariant") + .field("id", &v.id()) + .field("alias", &v.alias()) + .finish(), + ModelKind::Model { + alias, + variants, + selected, + .. + } => f + .debug_struct("Model::Model") + .field("alias", alias) + .field("id", &variants[selected.load(Relaxed)].id()) + .field("variants_count", &variants.len()) + .field("selected_index", &selected.load(Relaxed)) + .finish(), + } + } +} + +// ── Construction (crate-internal) ──────────────────────────────────────────── + +impl Model { + /// Create a `Model` wrapping a single variant. + pub(crate) fn from_variant(variant: ModelVariant) -> Self { + Self { + inner: ModelKind::ModelVariant(variant), + } + } + + /// Create a `Model` grouping multiple variants under one alias. + pub(crate) fn from_group(alias: String, core: Arc) -> Self { + Self { + inner: ModelKind::Model { + alias, + core, + variants: Vec::new(), + selected: AtomicUsize::new(0), + }, + } + } + + /// Add a variant to a group. Panics if called on a `ModelVariant` kind. + /// + /// If the new variant is cached and the current selection is not, the new + /// variant becomes the selected one. + pub(crate) fn add_variant(&mut self, variant: ModelVariant) { + match &mut self.inner { + ModelKind::Model { + variants, selected, .. + } => { + variants.push(variant); + let new_idx = variants.len() - 1; + let current = selected.load(Relaxed); + if variants[new_idx].info_ref().cached && !variants[current].info_ref().cached { + selected.store(new_idx, Relaxed); + } + } + ModelKind::ModelVariant(_) => { + panic!("add_variant called on a single-variant Model"); + } + } + } +} + +// ── Private helpers ────────────────────────────────────────────────────────── + +impl Model { + fn selected_variant(&self) -> &ModelVariant { + match &self.inner { + ModelKind::ModelVariant(v) => v, + ModelKind::Model { + variants, selected, .. + } => &variants[selected.load(Relaxed)], + } + } +} + +// ── Public API ─────────────────────────────────────────────────────────────── + +impl Model { + /// Unique identifier of the (selected) variant. + pub fn id(&self) -> &str { + self.selected_variant().id() + } + + /// Alias shared by all variants of this model. + pub fn alias(&self) -> &str { + match &self.inner { + ModelKind::ModelVariant(v) => v.alias(), + ModelKind::Model { alias, .. } => alias, + } + } + + /// Full catalog metadata for the (selected) variant. + pub fn info(&self) -> &ModelInfo { + self.selected_variant().info() + } + + /// Maximum context length (in tokens), or `None` if unknown. + pub fn context_length(&self) -> Option { + self.selected_variant().info().context_length + } + + /// Comma-separated input modalities (e.g. `"text,image"`), or `None`. + pub fn input_modalities(&self) -> Option<&str> { + self.selected_variant().info().input_modalities.as_deref() + } + + /// Comma-separated output modalities (e.g. `"text"`), or `None`. + pub fn output_modalities(&self) -> Option<&str> { + self.selected_variant().info().output_modalities.as_deref() + } + + /// Capability tags (e.g. `"reasoning"`), or `None`. + pub fn capabilities(&self) -> Option<&str> { + self.selected_variant().info().capabilities.as_deref() + } + + /// Whether the model supports tool/function calling, or `None`. + pub fn supports_tool_calling(&self) -> Option { + self.selected_variant().info().supports_tool_calling + } + + /// Whether the (selected) variant is cached on disk. + pub async fn is_cached(&self) -> Result { + self.selected_variant().is_cached().await + } + + /// Whether the (selected) variant is loaded into memory. + pub async fn is_loaded(&self) -> Result { + self.selected_variant().is_loaded().await + } + + /// Download the (selected) variant. If `progress` is provided it + /// receives human-readable progress strings as they arrive. + pub async fn download(&self, progress: Option) -> Result<()> + where + F: FnMut(&str) + Send + 'static, + { + self.selected_variant().download(progress).await + } + + /// Return the local file-system path of the (selected) variant. + pub async fn path(&self) -> Result { + self.selected_variant().path().await + } + + /// Load the (selected) variant into memory. + pub async fn load(&self) -> Result<()> { + self.selected_variant().load().await + } + + /// Unload the (selected) variant from memory. + pub async fn unload(&self) -> Result { + self.selected_variant().unload().await + } + + /// Remove the (selected) variant from the local cache. + pub async fn remove_from_cache(&self) -> Result { + self.selected_variant().remove_from_cache().await + } + + /// Create a [`ChatClient`] bound to the (selected) variant. + pub fn create_chat_client(&self) -> ChatClient { + self.selected_variant().create_chat_client() + } + + /// Create an [`AudioClient`] bound to the (selected) variant. + pub fn create_audio_client(&self) -> AudioClient { + self.selected_variant().create_audio_client() + } + + /// Available variants of this model. + /// + /// For a single-variant model (e.g. from + /// [`Catalog::get_model_variant`](crate::Catalog::get_model_variant)), + /// this returns a single-element list containing itself. + pub fn variants(&self) -> Vec> { + match &self.inner { + ModelKind::ModelVariant(v) => { + vec![Arc::new(Model::from_variant(v.clone()))] + } + ModelKind::Model { variants, .. } => variants + .iter() + .map(|v| Arc::new(Model::from_variant(v.clone()))) + .collect(), + } + } + + /// Select a variant by its unique id. + /// + /// # Errors + /// + /// Returns an error if no variant with the given id exists. + /// For single-variant models this always returns an error — use + /// [`Catalog::get_model`](crate::Catalog::get_model) to obtain a model + /// with all variants available. + pub fn select_variant(&self, id: &str) -> Result<()> { + match &self.inner { + ModelKind::ModelVariant(v) => Err(FoundryLocalError::ModelOperation { + reason: format!( + "select_variant is not supported on a single variant. \ + Call Catalog::get_model(\"{}\") to get a model with all variants available.", + v.alias() + ), + }), + ModelKind::Model { + variants, + selected, + alias, + .. + } => match variants.iter().position(|v| v.id() == id) { + Some(pos) => { + selected.store(pos, Relaxed); + Ok(()) + } + None => { + let available: Vec<&str> = variants.iter().map(|v| v.id()).collect(); + Err(FoundryLocalError::ModelOperation { + reason: format!( + "Variant '{id}' not found for model '{alias}'. Available: {available:?}", + ), + }) + } + }, + } + } +} diff --git a/sdk/rust/src/model_variant.rs b/sdk/rust/src/detail/model_variant.rs similarity index 63% rename from sdk/rust/src/model_variant.rs rename to sdk/rust/src/detail/model_variant.rs index 760306f6..636c5d5b 100644 --- a/sdk/rust/src/model_variant.rs +++ b/sdk/rust/src/detail/model_variant.rs @@ -1,4 +1,7 @@ //! A single model variant backed by [`ModelInfo`]. +//! +//! This type is an implementation detail. Public APIs return +//! [`Arc`](crate::Model) instead. use std::fmt; use std::path::PathBuf; @@ -6,9 +9,9 @@ use std::sync::Arc; use serde_json::json; +use super::core_interop::CoreInterop; +use super::ModelLoadManager; use crate::catalog::CacheInvalidator; -use crate::detail::core_interop::CoreInterop; -use crate::detail::ModelLoadManager; use crate::error::Result; use crate::openai::AudioClient; use crate::openai::ChatClient; @@ -16,8 +19,10 @@ use crate::types::ModelInfo; /// Represents one specific variant of a model (a particular id within an alias /// group). +/// +/// This is an implementation detail — callers should use [`Model`](crate::Model). #[derive(Clone)] -pub struct ModelVariant { +pub(crate) struct ModelVariant { info: ModelInfo, core: Arc, model_load_manager: Arc, @@ -27,8 +32,8 @@ pub struct ModelVariant { impl fmt::Debug for ModelVariant { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ModelVariant") - .field("id", &self.id()) - .field("alias", &self.alias()) + .field("id", &self.info.id) + .field("alias", &self.info.alias) .finish() } } @@ -48,28 +53,23 @@ impl ModelVariant { } } - /// The full [`ModelInfo`] metadata for this variant. - pub fn info(&self) -> &ModelInfo { - &self.info - } - - /// Unique identifier. - pub fn id(&self) -> &str { + pub(crate) fn id(&self) -> &str { &self.info.id } - /// Alias shared with sibling variants. - pub fn alias(&self) -> &str { + pub(crate) fn alias(&self) -> &str { &self.info.alias } - /// Check whether the variant is cached locally by querying the native - /// core. - /// - /// Each call performs a full IPC round-trip. When checking many variants, - /// prefer [`Catalog::get_cached_models`] which fetches the full list in a - /// single call. - pub async fn is_cached(&self) -> Result { + pub(crate) fn info(&self) -> &ModelInfo { + &self.info + } + + pub(crate) fn info_ref(&self) -> &ModelInfo { + &self.info + } + + pub(crate) async fn is_cached(&self) -> Result { let raw = self .core .execute_command_async("get_cached_models".into(), None) @@ -81,15 +81,12 @@ impl ModelVariant { Ok(cached_ids.iter().any(|id| id == &self.info.id)) } - /// Check whether the variant is currently loaded into memory. - pub async fn is_loaded(&self) -> Result { + pub(crate) async fn is_loaded(&self) -> Result { let loaded = self.model_load_manager.list_loaded().await?; Ok(loaded.iter().any(|id| id == &self.info.id)) } - /// Download the model variant. If `progress` is provided, it receives - /// human-readable progress strings as the download proceeds. - pub async fn download(&self, progress: Option) -> Result<()> + pub(crate) async fn download(&self, progress: Option) -> Result<()> where F: FnMut(&str) + Send + 'static, { @@ -110,8 +107,7 @@ impl ModelVariant { Ok(()) } - /// Return the local file-system path where this variant is stored. - pub async fn path(&self) -> Result { + pub(crate) async fn path(&self) -> Result { let params = json!({ "Params": { "Model": self.info.id } }); let path_str = self .core @@ -120,18 +116,15 @@ impl ModelVariant { Ok(PathBuf::from(path_str)) } - /// Load the variant into memory. - pub async fn load(&self) -> Result<()> { + pub(crate) async fn load(&self) -> Result<()> { self.model_load_manager.load(&self.info.id).await } - /// Unload the variant from memory. - pub async fn unload(&self) -> Result { + pub(crate) async fn unload(&self) -> Result { self.model_load_manager.unload(&self.info.id).await } - /// Remove the variant from the local cache. - pub async fn remove_from_cache(&self) -> Result { + pub(crate) async fn remove_from_cache(&self) -> Result { let params = json!({ "Params": { "Model": self.info.id } }); let result = self .core @@ -141,13 +134,11 @@ impl ModelVariant { Ok(result) } - /// Create a [`ChatClient`] bound to this variant. - pub fn create_chat_client(&self) -> ChatClient { + pub(crate) fn create_chat_client(&self) -> ChatClient { ChatClient::new(&self.info.id, Arc::clone(&self.core)) } - /// Create an [`AudioClient`] bound to this variant. - pub fn create_audio_client(&self) -> AudioClient { + pub(crate) fn create_audio_client(&self) -> AudioClient { AudioClient::new(&self.info.id, Arc::clone(&self.core)) } } diff --git a/sdk/rust/src/lib.rs b/sdk/rust/src/lib.rs index c12feef1..872a875c 100644 --- a/sdk/rust/src/lib.rs +++ b/sdk/rust/src/lib.rs @@ -6,8 +6,6 @@ mod catalog; mod configuration; mod error; mod foundry_local_manager; -mod model; -mod model_variant; mod types; pub(crate) mod detail; @@ -15,10 +13,9 @@ pub mod openai; pub use self::catalog::Catalog; pub use self::configuration::{FoundryLocalConfig, LogLevel, Logger}; +pub use self::detail::model::Model; pub use self::error::FoundryLocalError; pub use self::foundry_local_manager::FoundryLocalManager; -pub use self::model::Model; -pub use self::model_variant::ModelVariant; pub use self::types::{ ChatResponseFormat, ChatToolChoice, DeviceType, EpDownloadResult, EpInfo, ModelInfo, ModelSettings, Parameter, PromptTemplate, Runtime, diff --git a/sdk/rust/src/model.rs b/sdk/rust/src/model.rs deleted file mode 100644 index 9d08f9a5..00000000 --- a/sdk/rust/src/model.rs +++ /dev/null @@ -1,183 +0,0 @@ -//! High-level model abstraction that wraps one or more [`ModelVariant`]s -//! sharing the same alias. - -use std::fmt; -use std::path::PathBuf; -use std::sync::atomic::{AtomicUsize, Ordering::Relaxed}; -use std::sync::Arc; - -use crate::detail::core_interop::CoreInterop; -use crate::error::{FoundryLocalError, Result}; -use crate::model_variant::ModelVariant; -use crate::openai::AudioClient; -use crate::openai::ChatClient; - -/// A model groups one or more [`ModelVariant`]s that share the same alias. -/// -/// By default the variant that is already cached locally is selected. You -/// can override the selection with [`Model::select_variant`]. -pub struct Model { - alias: String, - core: Arc, - variants: Vec>, - selected_index: AtomicUsize, -} - -impl Clone for Model { - fn clone(&self) -> Self { - Self { - alias: self.alias.clone(), - core: Arc::clone(&self.core), - variants: self.variants.clone(), - selected_index: AtomicUsize::new(self.selected_index.load(Relaxed)), - } - } -} - -impl fmt::Debug for Model { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Model") - .field("alias", &self.alias()) - .field("id", &self.id()) - .field("variants_count", &self.variants.len()) - .field("selected_index", &self.selected_index.load(Relaxed)) - .finish() - } -} - -impl Model { - pub(crate) fn new(alias: String, core: Arc) -> Self { - Self { - alias, - core, - variants: Vec::new(), - selected_index: AtomicUsize::new(0), - } - } - - /// Add a variant. If the new variant is cached and the current selection - /// is not, the new variant becomes the selected one. - pub(crate) fn add_variant(&mut self, variant: Arc) { - self.variants.push(variant); - let new_idx = self.variants.len() - 1; - let current = self.selected_index.load(Relaxed); - - // Prefer a cached variant over a non-cached one. - if self.variants[new_idx].info().cached && !self.variants[current].info().cached { - self.selected_index.store(new_idx, Relaxed); - } - } - - /// Select a variant by its unique id. - pub fn select_variant(&self, id: &str) -> Result<()> { - match self.variants.iter().position(|v| v.id() == id) { - Some(pos) => { - self.selected_index.store(pos, Relaxed); - Ok(()) - } - None => { - let available: Vec<&str> = self.variants.iter().map(|v| v.id()).collect(); - Err(FoundryLocalError::ModelOperation { - reason: format!( - "Variant '{id}' not found for model '{}'. Available: {available:?}", - self.alias - ), - }) - } - } - } - - /// Returns a reference to the currently selected variant. - pub fn selected_variant(&self) -> &ModelVariant { - &self.variants[self.selected_index.load(Relaxed)] - } - - /// Returns all variants that belong to this model. - pub fn variants(&self) -> &[Arc] { - &self.variants - } - - /// Alias shared by all variants in this model. - pub fn alias(&self) -> &str { - &self.alias - } - - /// Unique identifier of the selected variant. - pub fn id(&self) -> &str { - self.selected_variant().id() - } - - /// Whether the selected variant is cached on disk. - pub async fn is_cached(&self) -> Result { - self.selected_variant().is_cached().await - } - - /// Whether the selected variant is loaded into memory. - pub async fn is_loaded(&self) -> Result { - self.selected_variant().is_loaded().await - } - - /// Context length (maximum input tokens) of the selected variant. - pub fn context_length(&self) -> Option { - self.selected_variant().info().context_length - } - - /// Input modalities of the selected variant (e.g. "text", "text,image"). - pub fn input_modalities(&self) -> Option<&str> { - self.selected_variant().info().input_modalities.as_deref() - } - - /// Output modalities of the selected variant (e.g. "text"). - pub fn output_modalities(&self) -> Option<&str> { - self.selected_variant().info().output_modalities.as_deref() - } - - /// Capabilities of the selected variant (e.g. "reasoning", "tool-calling"). - pub fn capabilities(&self) -> Option<&str> { - self.selected_variant().info().capabilities.as_deref() - } - - /// Whether the selected variant supports tool calling. - pub fn supports_tool_calling(&self) -> Option { - self.selected_variant().info().supports_tool_calling - } - - /// Download the selected variant. If `progress` is provided, it receives - /// human-readable progress strings as they arrive from the native core. - pub async fn download(&self, progress: Option) -> Result<()> - where - F: FnMut(&str) + Send + 'static, - { - self.selected_variant().download(progress).await - } - - /// Return the local file-system path of the selected variant. - pub async fn path(&self) -> Result { - self.selected_variant().path().await - } - - /// Load the selected variant into memory. - pub async fn load(&self) -> Result<()> { - self.selected_variant().load().await - } - - /// Unload the selected variant from memory. - pub async fn unload(&self) -> Result { - self.selected_variant().unload().await - } - - /// Remove the selected variant from the local cache. - pub async fn remove_from_cache(&self) -> Result { - self.selected_variant().remove_from_cache().await - } - - /// Create a [`ChatClient`] bound to the selected variant. - pub fn create_chat_client(&self) -> ChatClient { - ChatClient::new(self.id(), Arc::clone(&self.core)) - } - - /// Create an [`AudioClient`] bound to the selected variant. - pub fn create_audio_client(&self) -> AudioClient { - AudioClient::new(self.id(), Arc::clone(&self.core)) - } -} diff --git a/sdk/rust/tests/integration/model_test.rs b/sdk/rust/tests/integration/model_test.rs index d2b68b77..4e3b371b 100644 --- a/sdk/rust/tests/integration/model_test.rs +++ b/sdk/rust/tests/integration/model_test.rs @@ -111,11 +111,12 @@ async fn should_have_selected_variant_matching_id() { .await .expect("get_model failed"); - let selected = model.selected_variant(); + // The model's id() should return the selected variant's id + // info() delegates to the selected variant, so id() and info().id must agree assert_eq!( - selected.id(), model.id(), - "selected_variant().id() should match model.id()" + model.info().id, + "model.id() should match model.info().id (the selected variant's metadata)" ); } @@ -177,7 +178,7 @@ async fn should_select_variant_by_id() { ); // Restore the original variant so other tests sharing this - // Arc via the catalog are not affected. + // model via the catalog are not affected. model .select_variant(&original_id) .expect("restoring original variant should succeed"); From 63bc47962432ca29bf481bf61310faf9b9fdec4a Mon Sep 17 00:00:00 2001 From: bmehta001 Date: Thu, 2 Apr 2026 21:29:48 -0500 Subject: [PATCH 18/83] Fix progress display (#580) Improve EP progress download printing in samples --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/native-chat-completions/Program.cs | 12 ++++++------ samples/js/native-chat-completions/app.js | 11 ++++++----- sdk/cs/README.md | 5 +---- sdk/js/README.md | 4 +--- sdk/python/README.md | 3 +-- sdk/rust/README.md | 4 +--- 6 files changed, 16 insertions(+), 23 deletions(-) diff --git a/samples/cs/native-chat-completions/Program.cs b/samples/cs/native-chat-completions/Program.cs index d1527503..033786b1 100644 --- a/samples/cs/native-chat-completions/Program.cs +++ b/samples/cs/native-chat-completions/Program.cs @@ -21,19 +21,22 @@ // Discover available execution providers and their registration status. var eps = mgr.DiscoverEps(); +int maxNameLen = 30; Console.WriteLine("Available execution providers:"); +Console.WriteLine($" {"Name".PadRight(maxNameLen)} Registered"); +Console.WriteLine($" {new string('─', maxNameLen)} {"──────────"}"); foreach (var ep in eps) { - Console.WriteLine($" {ep.Name} (registered: {ep.IsRegistered})"); + Console.WriteLine($" {ep.Name.PadRight(maxNameLen)} {ep.IsRegistered}"); } // Download and register all execution providers with per-EP progress. // EP packages include dependencies and may be large. // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. +Console.WriteLine("\nDownloading execution providers:"); if (eps.Length > 0) { - int maxNameLen = eps.Max(e => e.Name.Length); string currentEp = ""; await mgr.DownloadAndRegisterEpsAsync((epName, percent) => { @@ -46,11 +49,8 @@ await mgr.DownloadAndRegisterEpsAsync((epName, percent) => currentEp = epName; } Console.Write($"\r {epName.PadRight(maxNameLen)} {percent,6:F1}%"); - if (percent >= 100) - { - Console.WriteLine(); - } }); + Console.WriteLine(); } else { diff --git a/samples/js/native-chat-completions/app.js b/samples/js/native-chat-completions/app.js index 4246f64f..9e34c90f 100644 --- a/samples/js/native-chat-completions/app.js +++ b/samples/js/native-chat-completions/app.js @@ -16,16 +16,19 @@ console.log('✓ SDK initialized successfully'); // Discover available execution providers and their registration status. const eps = manager.discoverEps(); +const maxNameLen = 30; console.log('\nAvailable execution providers:'); +console.log(` ${'Name'.padEnd(maxNameLen)} Registered`); +console.log(` ${'─'.repeat(maxNameLen)} ──────────`); for (const ep of eps) { - console.log(` ${ep.name} (registered: ${ep.isRegistered})`); + console.log(` ${ep.name.padEnd(maxNameLen)} ${ep.isRegistered}`); } // Download and register all execution providers with per-EP progress. // EP packages include dependencies and may be large. // Download is only required again if a new version of the EP is released. +console.log('\nDownloading execution providers:'); if (eps.length > 0) { - const maxNameLen = Math.max(...eps.map(e => e.name.length)); let currentEp = ''; await manager.downloadAndRegisterEps((epName, percent) => { if (epName !== currentEp) { @@ -35,10 +38,8 @@ if (eps.length > 0) { currentEp = epName; } process.stdout.write(`\r ${epName.padEnd(maxNameLen)} ${percent.toFixed(1).padStart(5)}%`); - if (percent >= 100) { - process.stdout.write('\n'); - } }); + process.stdout.write('\n'); } else { console.log('No execution providers to download.'); } diff --git a/sdk/cs/README.md b/sdk/cs/README.md index 26287217..3efdc242 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -94,11 +94,8 @@ await mgr.DownloadAndRegisterEpsAsync((epName, percent) => currentEp = epName; } Console.Write($"\r {epName} {percent,6:F1}%"); - if (percent >= 100) - { - Console.WriteLine(); - } }); +Console.WriteLine(); ``` Catalog access no longer blocks on EP downloads. Call `DownloadAndRegisterEpsAsync` explicitly when you need hardware-accelerated execution providers. diff --git a/sdk/js/README.md b/sdk/js/README.md index 5590ab12..c197e80e 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -67,10 +67,8 @@ await manager.downloadAndRegisterEps((epName, percent) => { currentEp = epName; } process.stdout.write(`\r ${epName} ${percent.toFixed(1)}%`); - if (percent >= 100) { - process.stdout.write('\n'); - } }); +process.stdout.write('\n'); ``` Catalog access does not block on EP downloads. Call `downloadAndRegisterEps()` when you need hardware-accelerated execution providers. diff --git a/sdk/python/README.md b/sdk/python/README.md index 4ee1f9cc..3ff677d2 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -102,10 +102,9 @@ def on_progress(ep_name: str, percent: float) -> None: print() current_ep = ep_name print(f"\r {ep_name} {percent:5.1f}%", end="", flush=True) - if percent >= 100: - print() manager.download_and_register_eps(progress_callback=on_progress) +print() ``` Catalog access does not block on EP downloads. Call `download_and_register_eps()` when you need hardware-accelerated execution providers. diff --git a/sdk/rust/README.md b/sdk/rust/README.md index 6bcb9884..d3983430 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -102,10 +102,8 @@ manager.download_and_register_eps_with_progress(None, move |ep_name: &str, perce *current = ep_name.to_string(); } print!("\r {} {:5.1}%", ep_name, percent); - if percent >= 100.0 { - println!(); - } }).await?; +println!(); ``` Catalog access does not block on EP downloads. Call `download_and_register_eps` when you need hardware-accelerated execution providers. From b651d29edbab38687ffcfe29feed8b6cd0e313c4 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Fri, 3 Apr 2026 00:34:15 -0700 Subject: [PATCH 19/83] update Core version (#574) Fix the CI pipeline issue, and update the core version --- sdk/js/script/install-standard.cjs | 4 ++-- sdk/js/script/install-winml.cjs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index 319a33d1..bd0558b5 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -11,9 +11,9 @@ const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cj const useNightly = process.env.npm_config_nightly === 'true'; const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0.8-rc3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, + { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.24.3', feed: NUGET_FEED, nightly: false }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.12.2', feed: NUGET_FEED, nightly: false }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.0-dev-20260319-1131106-439ca0d5', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, ]; (async () => { diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index b46770ca..dd0cb0d0 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -10,9 +10,9 @@ const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cj const useNightly = process.env.npm_config_nightly === 'true'; const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0.8-rc3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, + { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED, nightly: false }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.WinML', version: '0.12.2', feed: NUGET_FEED, nightly: false }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.WinML', version: '0.13.0-dev-20260319-1131106-439ca0d5', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, ]; (async () => { From e7deb86e122a0f5fdd17c091fa4e56b684cc68d6 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Fri, 3 Apr 2026 10:52:06 -0700 Subject: [PATCH 20/83] install python sdk with --no-deps in CI (#582) allows CI to install custom foundry-local-core from pipeline while built package installs foundry-local-core defined in requirements.txt/requirements-winml.txt --------- Co-authored-by: Prathik Rao --- .pipelines/templates/build-python-steps.yml | 18 +++-- .pipelines/templates/test-python-steps.yml | 15 ++-- sdk/python/build_backend.py | 76 ++------------------- 3 files changed, 26 insertions(+), 83 deletions(-) diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index 6fd0cd34..8ab4d8d1 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -12,7 +12,6 @@ parameters: default: false - name: flcWheelsDir type: string - default: '' displayName: 'Path to directory containing the FLC wheels (for overriding foundry-local-core)' - name: outputDir type: string @@ -111,16 +110,23 @@ steps: Write-Warning "No FLC wheel found matching $filter in ${{ parameters.flcWheelsDir }}" } +- script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 + displayName: 'Install ORT native packages' + +- script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" + displayName: 'Install pure python dependencies' + # Build wheel — standard or WinML variant -# skip-native-deps=true omits foundry-local-core/onnxruntime pinned versions -# from the wheel metadata, since the pipeline pre-installs its own builds. +# The wheel retains all dependencies in its metadata so end users get +# native packages installed automatically. CI uses --no-deps to avoid +# re-downloading packages that were pre-installed from pipeline builds. - ${{ if eq(parameters.isWinML, true) }}: - - script: python -m build --wheel -C winml=true -C skip-native-deps=true --outdir dist/ + - script: python -m build --wheel -C winml=true --outdir dist/ displayName: 'Build wheel (WinML)' workingDirectory: $(repoRoot)/sdk/python - ${{ else }}: - - script: python -m build --wheel -C skip-native-deps=true --outdir dist/ + - script: python -m build --wheel --outdir dist/ displayName: 'Build wheel' workingDirectory: $(repoRoot)/sdk/python @@ -131,7 +137,7 @@ steps: targetType: inline script: | $wheel = (Get-ChildItem "$(repoRoot)/sdk/python/dist/*.whl" | Select-Object -First 1).FullName - pip install $wheel + pip install --no-deps $wheel # Stage output - task: PowerShell@2 diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml index f54a9464..1da74ee2 100644 --- a/.pipelines/templates/test-python-steps.yml +++ b/.pipelines/templates/test-python-steps.yml @@ -8,7 +8,6 @@ parameters: default: false - name: flcWheelsDir type: string - default: '' displayName: 'Path to directory containing the FLC wheels' steps: @@ -99,19 +98,19 @@ steps: Write-Warning "No FLC wheel found matching $filter" } -# Install ORT native packages from the ORT-Nightly feed. -# skip-native-deps strips these from the SDK wheel metadata, so they -# must be installed explicitly for tests to locate the native binaries. -- script: pip install onnxruntime-core onnxruntime-genai-core +- script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 displayName: 'Install ORT native packages' +- script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" + displayName: 'Install pure python dependencies' + - ${{ if not(parameters.isWinML) }}: - - script: python -m build --wheel -C skip-native-deps=true --outdir dist/ + - script: python -m build --wheel --outdir dist/ displayName: 'Build wheel' workingDirectory: $(repoRoot)/sdk/python - ${{ if parameters.isWinML }}: - - script: python -m build --wheel -C winml=true -C skip-native-deps=true --outdir dist/ + - script: python -m build --wheel -C winml=true --outdir dist/ displayName: 'Build wheel (WinML)' workingDirectory: $(repoRoot)/sdk/python @@ -121,7 +120,7 @@ steps: targetType: inline script: | $wheel = (Get-ChildItem "$(repoRoot)/sdk/python/dist/*.whl" | Select-Object -First 1).FullName - pip install $wheel + pip install --no-deps $wheel - script: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 displayName: 'Install test dependencies' diff --git a/sdk/python/build_backend.py b/sdk/python/build_backend.py index 3789501b..1bdf6cbb 100644 --- a/sdk/python/build_backend.py +++ b/sdk/python/build_backend.py @@ -18,14 +18,13 @@ python -m build --wheel -C winml=true -Skip native deps (use pre-installed foundry-local-core / ORT / GenAI):: - - python -m build --wheel -C skip-native-deps=true - Environment variable fallback (useful in CI pipelines):: FOUNDRY_VARIANT=winml python -m build --wheel - FOUNDRY_SKIP_NATIVE_DEPS=1 python -m build --wheel + +CI usage (install without pulling dependencies):: + + pip install --no-deps """ from __future__ import annotations @@ -51,13 +50,6 @@ _STANDARD_NAME = 'name = "foundry-local-sdk"' _WINML_NAME = 'name = "foundry-local-sdk-winml"' -# Native binary package prefixes to strip when skip-native-deps is active. -_NATIVE_DEP_PREFIXES = ( - "foundry-local-core", - "onnxruntime-core", - "onnxruntime-genai-core", -) - # --------------------------------------------------------------------------- # Variant detection @@ -75,23 +67,6 @@ def _is_winml(config_settings: dict | None) -> bool: return os.environ.get("FOUNDRY_VARIANT", "").lower() == "winml" -def _is_skip_native_deps(config_settings: dict | None) -> bool: - """Return True when native binary dependencies should be omitted. - - When set, ``foundry-local-core``, ``onnxruntime-core``, and - ``onnxruntime-genai-core`` are stripped from requirements.txt so the - wheel is built against whatever versions are already installed. - Useful in CI pipelines that pre-install pipeline-built native wheels. - - Checks ``config_settings["skip-native-deps"]`` first - (set via ``-C skip-native-deps=true``), then falls back to the - ``FOUNDRY_SKIP_NATIVE_DEPS`` environment variable. - """ - if config_settings and str(config_settings.get("skip-native-deps", "")).lower() == "true": - return True - return os.environ.get("FOUNDRY_SKIP_NATIVE_DEPS", "").lower() in ("1", "true") - - # --------------------------------------------------------------------------- # In-place patching context manager # --------------------------------------------------------------------------- @@ -125,48 +100,11 @@ def _patch_for_winml() -> Generator[None, None, None]: _REQUIREMENTS.write_text(requirements_original, encoding="utf-8") -@contextlib.contextmanager -def _strip_native_deps() -> Generator[None, None, None]: - """Temporarily remove native binary deps from requirements.txt. - - Lines starting with any prefix in ``_NATIVE_DEP_PREFIXES`` (case- - insensitive) are removed. The file is restored in the ``finally`` - block. - """ - requirements_original = _REQUIREMENTS.read_text(encoding="utf-8") - try: - filtered = [ - line for line in requirements_original.splitlines(keepends=True) - if not any(line.lstrip().lower().startswith(p) for p in _NATIVE_DEP_PREFIXES) - ] - _REQUIREMENTS.write_text("".join(filtered), encoding="utf-8") - yield - finally: - _REQUIREMENTS.write_text(requirements_original, encoding="utf-8") - - def _apply_patches(config_settings: dict | None): """Return a context manager that applies the appropriate patches.""" - winml = _is_winml(config_settings) - skip_native = _is_skip_native_deps(config_settings) - - @contextlib.contextmanager - def _combined(): - # Stack contexts: WinML swaps requirements first, then strip_native - # removes native deps from whatever requirements are active. - if winml and skip_native: - with _patch_for_winml(), _strip_native_deps(): - yield - elif winml: - with _patch_for_winml(): - yield - elif skip_native: - with _strip_native_deps(): - yield - else: - yield - - return _combined() + if _is_winml(config_settings): + return _patch_for_winml() + return contextlib.nullcontext() # --------------------------------------------------------------------------- From 6aff39962f926c14b46f78a3971b4bda30f5944c Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Sat, 4 Apr 2026 10:35:01 -0700 Subject: [PATCH 21/83] set onnxruntime depdency for python winml package to 1.23.2.3 (#590) Co-authored-by: Prathik Rao --- .pipelines/templates/build-python-steps.yml | 8 ++++++-- .pipelines/templates/test-python-steps.yml | 8 ++++++-- sdk/python/requirements-winml.txt | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index 8ab4d8d1..f21d9508 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -110,8 +110,12 @@ steps: Write-Warning "No FLC wheel found matching $filter in ${{ parameters.flcWheelsDir }}" } -- script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 - displayName: 'Install ORT native packages' +- ${{ if eq(parameters.isWinML, true) }}: + - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.12.1 + displayName: 'Install ORT native packages (WinML)' +- ${{ else }}: + - script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 + displayName: 'Install ORT native packages' - script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" displayName: 'Install pure python dependencies' diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml index 1da74ee2..6fc86b3b 100644 --- a/.pipelines/templates/test-python-steps.yml +++ b/.pipelines/templates/test-python-steps.yml @@ -98,8 +98,12 @@ steps: Write-Warning "No FLC wheel found matching $filter" } -- script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 - displayName: 'Install ORT native packages' +- ${{ if eq(parameters.isWinML, true) }}: + - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.12.1 + displayName: 'Install ORT native packages (WinML)' +- ${{ else }}: + - script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 + displayName: 'Install ORT native packages' - script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" displayName: 'Install pure python dependencies' diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index 9a3990b7..e554890c 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -3,5 +3,5 @@ requests>=2.32.4 openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. foundry-local-core-winml==0.9.0.dev20260331004032 -onnxruntime-core==1.24.3 +onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.12.1 \ No newline at end of file From f85ab460c2d20e66e044b970e438018a40683cef Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Sun, 5 Apr 2026 18:10:23 -0700 Subject: [PATCH 22/83] updates ort and ort-genai versions to 1.24.4 and 0.13.0 (except ort for winml remains 1.23.2.3) (#595) - updates ort/ort-genai versions - updates core packaging to reflext new ort/ort-genai versions - removes useNightly from js sdk since we no longer need it NOTE: c# does not need updates since it gets dependencies transitively via Foundry Local Core --------- Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 2 +- .pipelines/templates/build-core-steps.yml | 1 - .pipelines/templates/package-core-steps.yml | 4 +-- .pipelines/templates/test-cs-steps.yml | 1 + sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 2 +- sdk/js/script/install-standard.cjs | 8 +++--- sdk/js/script/install-utils.cjs | 26 +++----------------- sdk/js/script/install-winml.cjs | 8 +++--- sdk/python/requirements-winml.txt | 2 +- sdk/python/requirements.txt | 4 +-- sdk/rust/build.rs | 10 ++++---- 11 files changed, 22 insertions(+), 46 deletions(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index 2cb9ee2a..c871cdf1 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -17,7 +17,7 @@ parameters: - name: version displayName: 'Package version' type: string - default: '0.9.0' + default: '1.0.0' - name: prereleaseId displayName: 'Pre-release identifier (e.g. rc1, beta).' type: string diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml index 9f024c42..3803ccf0 100644 --- a/.pipelines/templates/build-core-steps.yml +++ b/.pipelines/templates/build-core-steps.yml @@ -48,7 +48,6 @@ steps: - diff --git a/.pipelines/templates/package-core-steps.yml b/.pipelines/templates/package-core-steps.yml index e5755a21..15b8fb54 100644 --- a/.pipelines/templates/package-core-steps.yml +++ b/.pipelines/templates/package-core-steps.yml @@ -109,9 +109,9 @@ steps: $nuspec = "$nsRoot/src/FoundryLocalCore/Core/WinMLNuget.nuspec" $id = "Microsoft.AI.Foundry.Local.Core.WinML" $ortVer = $pg.OnnxRuntimeFoundryVersionForWinML - $genaiVer = $pg.OnnxRuntimeGenAIWinML + $genaiVer = $pg.OnnxRuntimeGenAIFoundryVersion $winAppSdkVer = $pg.WinAppSdkVersion - $props = "id=$id;version=$(flcVersion);commitId=$(Build.SourceVersion);OnnxRuntimeFoundryVersion=$ortVer;OnnxRuntimeGenAIWinML=$genaiVer;WinAppSdkVersion=$winAppSdkVer" + $props = "id=$id;version=$(flcVersion);commitId=$(Build.SourceVersion);OnnxRuntimeFoundryVersionForWinML=$ortVer;OnnxRuntimeGenAIFoundryVersion=$genaiVer;WinAppSdkVersion=$winAppSdkVer" } else { $nuspec = "$nsRoot/src/FoundryLocalCore/Core/NativeNuget.nuspec" $id = "Microsoft.AI.Foundry.Local.Core" diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml index f7dc1aff..92c9b6ee 100644 --- a/.pipelines/templates/test-cs-steps.yml +++ b/.pipelines/templates/test-cs-steps.yml @@ -68,6 +68,7 @@ steps: + diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index e8a7b755..26d74ff6 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -29,7 +29,7 @@ true false diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index bd0558b5..8b30135a 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -8,12 +8,10 @@ const os = require('os'); const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); -const useNightly = process.env.npm_config_nightly === 'true'; - const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, - { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.24.3', feed: NUGET_FEED, nightly: false }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.0-dev-20260319-1131106-439ca0d5', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, + { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, + { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.24.4', feed: NUGET_FEED }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.0', feed: NUGET_FEED }, ]; (async () => { diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs index f9a5186c..cc61f0db 100644 --- a/sdk/js/script/install-utils.cjs +++ b/sdk/js/script/install-utils.cjs @@ -104,24 +104,9 @@ async function getBaseAddress(feedUrl) { return baseAddress.endsWith('/') ? baseAddress : baseAddress + '/'; } -async function resolveLatestVersion(feedUrl, packageName) { - const baseAddress = await getBaseAddress(feedUrl); - const versionsUrl = `${baseAddress}${packageName.toLowerCase()}/index.json`; - const versionData = await downloadJson(versionsUrl); - const versions = versionData.versions || []; - if (versions.length === 0) throw new Error(`No versions found for ${packageName}`); - versions.sort((a, b) => b.localeCompare(a)); - console.log(`[foundry-local] Latest version of ${packageName}: ${versions[0]}`); - return versions[0]; -} - async function installPackage(artifact, tempDir) { const pkgName = artifact.name; - let pkgVer = artifact.version; - if (artifact.nightly) { - console.log(` Resolving latest version for ${pkgName}...`); - pkgVer = await resolveLatestVersion(artifact.feed, pkgName); - } + const pkgVer = artifact.version; const baseAddress = await getBaseAddress(artifact.feed); const nameLower = pkgName.toLowerCase(); @@ -167,13 +152,8 @@ async function runInstall(artifacts) { } if (fs.existsSync(BIN_DIR) && REQUIRED_FILES.every(f => fs.existsSync(path.join(BIN_DIR, f)))) { - if (process.env.npm_config_nightly === 'true') { - console.log(`[foundry-local] Nightly requested. Forcing reinstall...`); - fs.rmSync(BIN_DIR, { recursive: true, force: true }); - } else { - console.log(`[foundry-local] Native libraries already installed.`); - return; - } + console.log(`[foundry-local] Native libraries already installed.`); + return; } console.log(`[foundry-local] Installing native libraries for ${RID}...`); diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index dd0cb0d0..e6fda732 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -7,12 +7,10 @@ const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); -const useNightly = process.env.npm_config_nightly === 'true'; - const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, - { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED, nightly: false }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.WinML', version: '0.13.0-dev-20260319-1131106-439ca0d5', feed: ORT_NIGHTLY_FEED, nightly: useNightly }, + { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, + { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.0', feed: NUGET_FEED }, ]; (async () => { diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index e554890c..bcf02668 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -4,4 +4,4 @@ openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. foundry-local-core-winml==0.9.0.dev20260331004032 onnxruntime-core==1.23.2.3 -onnxruntime-genai-core==0.12.1 \ No newline at end of file +onnxruntime-genai-core==0.13.0 \ No newline at end of file diff --git a/sdk/python/requirements.txt b/sdk/python/requirements.txt index 801f577d..26da243f 100644 --- a/sdk/python/requirements.txt +++ b/sdk/python/requirements.txt @@ -3,5 +3,5 @@ requests>=2.32.4 openai>=2.24.0 # Standard native binary packages from the ORT-Nightly PyPI feed. foundry-local-core==0.9.0.dev20260327060216 -onnxruntime-core==1.24.3 -onnxruntime-genai-core==0.12.1 \ No newline at end of file +onnxruntime-core==1.24.4 +onnxruntime-genai-core==0.13.0 \ No newline at end of file diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index 996eaf2a..73d84d32 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -8,8 +8,8 @@ const ORT_NIGHTLY_FEED: &str = "https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json"; const CORE_VERSION: &str = "0.9.0.8-rc3"; -const ORT_VERSION: &str = "1.24.3"; -const GENAI_VERSION: &str = "0.13.0-dev-20260319-1131106-439ca0d5"; +const ORT_VERSION: &str = "1.24.4"; +const GENAI_VERSION: &str = "0.13.0"; const WINML_ORT_VERSION: &str = "1.23.2.3"; @@ -62,9 +62,9 @@ fn get_packages(rid: &str) -> Vec { feed_url: NUGET_FEED, }); packages.push(NuGetPackage { - name: "Microsoft.ML.OnnxRuntimeGenAI.WinML", + name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: GENAI_VERSION.to_string(), - feed_url: ORT_NIGHTLY_FEED, + feed_url: NUGET_FEED, }); } else { packages.push(NuGetPackage { @@ -90,7 +90,7 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: GENAI_VERSION.to_string(), - feed_url: ORT_NIGHTLY_FEED, + feed_url: NUGET_FEED, }); } From aa6c9bed6e02b54e4ee7cfc12ba85c9686326ce0 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Sun, 5 Apr 2026 22:45:08 -0700 Subject: [PATCH 23/83] Fix Linux Python SDK, Pydantic warning and updates the ExecuteCommandWithCallback signature (#593) This pull-request has 3 changes: - Fixes the pydantic warnings by intializing `model_config = ConfigDict(protected_namespaces=())` - Fixes Linux python sdk by depending on onnxruntime-gpu and onnxruntime-genai-cuda on Linux - Updates the callback signature because FLC updated the callback signature to return an int instead of void. Returning 1 means that the execute command operation should be cancelled, whereas 0 means to continue. The actual model download cancellation support is not part of this pull-request. Will add it in a subsequent PR. --------- Co-authored-by: Prathik Rao --- sdk/cs/src/Detail/CoreInterop.cs | 14 ++++++-- sdk/cs/src/Detail/ICoreInterop.cs | 3 +- sdk/js/src/detail/coreInterop.ts | 11 ++++-- sdk/python/requirements-winml.txt | 2 +- sdk/python/requirements.txt | 8 +++-- sdk/python/src/detail/core_interop.py | 11 +++--- sdk/python/src/detail/model_data_types.py | 4 ++- sdk/python/src/detail/utils.py | 41 ++++++++++++++++------- sdk/rust/src/detail/core_interop.rs | 14 +++++--- 9 files changed, 77 insertions(+), 31 deletions(-) diff --git a/sdk/cs/src/Detail/CoreInterop.cs b/sdk/cs/src/Detail/CoreInterop.cs index a7a43447..b88f5597 100644 --- a/sdk/cs/src/Detail/CoreInterop.cs +++ b/sdk/cs/src/Detail/CoreInterop.cs @@ -203,7 +203,7 @@ public CallbackHelper(CallbackFn callback) } } - private static void HandleCallback(nint data, int length, nint callbackHelper) + private static int HandleCallback(nint data, int length, nint callbackHelper) { var callbackData = string.Empty; CallbackHelper? helper = null; @@ -221,14 +221,24 @@ private static void HandleCallback(nint data, int length, nint callbackHelper) helper = (CallbackHelper)GCHandle.FromIntPtr(callbackHelper).Target!; helper.Callback.Invoke(callbackData); + return 0; // continue } - catch (Exception ex) when (ex is not OperationCanceledException) + catch (OperationCanceledException ex) + { + if (helper != null && helper.Exception == null) + { + helper.Exception = ex; + } + return 1; // cancel + } + catch (Exception ex) { FoundryLocalManager.Instance.Logger.LogError(ex, $"Error in callback. Callback data: {callbackData}"); if (helper != null && helper.Exception == null) { helper.Exception = ex; } + return 1; // cancel on error } } diff --git a/sdk/cs/src/Detail/ICoreInterop.cs b/sdk/cs/src/Detail/ICoreInterop.cs index b493dfb7..74e2a8ad 100644 --- a/sdk/cs/src/Detail/ICoreInterop.cs +++ b/sdk/cs/src/Detail/ICoreInterop.cs @@ -40,8 +40,9 @@ protected unsafe struct ResponseBuffer } // native callback function signature + // Return: 0 = continue, 1 = cancel [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - protected unsafe delegate void NativeCallbackFn(nint data, int length, nint userData); + protected unsafe delegate int NativeCallbackFn(nint data, int length, nint userData); Response ExecuteCommand(string commandName, CoreInteropRequest? commandInput = null); Response ExecuteCommandWithCallback(string commandName, CoreInteropRequest? commandInput, CallbackFn callback); diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index 9b723e84..5af32421 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -29,7 +29,7 @@ koffi.struct('StreamingRequestBuffer', { BinaryDataLength: 'int32_t', }); -const CallbackType = koffi.proto('void CallbackType(void *data, int32_t length, void *userData)'); +const CallbackType = koffi.proto('int32_t CallbackType(void *data, int32_t length, void *userData)'); const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); @@ -198,8 +198,13 @@ export class CoreInterop { koffi.encode(dataBuf, 'char', dataStr, dataBytes.length + 1); const cb = koffi.register((data: any, length: number, userData: any) => { - const chunk = koffi.decode(data, 'char', length); - callback(chunk); + try { + const chunk = koffi.decode(data, 'char', length); + callback(chunk); + return 0; // continue + } catch { + return 1; // cancel on error + } }, koffi.pointer(CallbackType)); return new Promise((resolve, reject) => { diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index bcf02668..68b76b56 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -2,6 +2,6 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. -foundry-local-core-winml==0.9.0.dev20260331004032 +foundry-local-core-winml==1.0.0rc1 onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.0 \ No newline at end of file diff --git a/sdk/python/requirements.txt b/sdk/python/requirements.txt index 26da243f..9295b832 100644 --- a/sdk/python/requirements.txt +++ b/sdk/python/requirements.txt @@ -2,6 +2,8 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 # Standard native binary packages from the ORT-Nightly PyPI feed. -foundry-local-core==0.9.0.dev20260327060216 -onnxruntime-core==1.24.4 -onnxruntime-genai-core==0.13.0 \ No newline at end of file +foundry-local-core==1.0.0rc1 +onnxruntime-core==1.24.4; sys_platform != "linux" +onnxruntime-gpu==1.24.4; sys_platform == "linux" +onnxruntime-genai-core==0.13.0; sys_platform != "linux" +onnxruntime-genai-cuda==0.13.0; sys_platform == "linux" diff --git a/sdk/python/src/detail/core_interop.py b/sdk/python/src/detail/core_interop.py index 4f4ddb67..1cd53e33 100644 --- a/sdk/python/src/detail/core_interop.py +++ b/sdk/python/src/detail/core_interop.py @@ -79,9 +79,11 @@ def callback(data_ptr, length, self_ptr): data_bytes = ctypes.string_at(data_ptr, length) data_str = data_bytes.decode('utf-8') self._py_callback(data_str) + return 0 # continue except Exception as e: if self is not None and self.exception is None: self.exception = e # keep the first only as they are likely all the same + return 1 # cancel on error def __init__(self, py_callback: Callable[[str], None]): self._py_callback = py_callback @@ -103,8 +105,8 @@ class CoreInterop: instance = None # Callback function for native interop. - # This returns a string and its length, and an optional user provided object. - CALLBACK_TYPE = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p) + # Returns c_int: 0 = continue, 1 = cancel. + CALLBACK_TYPE = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p) @staticmethod def _initialize_native_libraries() -> 'NativeBinaryPaths': @@ -129,8 +131,9 @@ def _initialize_native_libraries() -> 'NativeBinaryPaths': logger.info("Native libraries found — Core: %s ORT: %s GenAI: %s", paths.core, paths.ort, paths.genai) - # Create the onnxruntime.dll symlink on Linux/macOS if needed. - # create_ort_symlinks(paths) + # Create compatibility symlinks on Linux/macOS so Core can resolve + # ORT/GenAI names regardless of package layout. + create_ort_symlinks(paths) os.environ["ORT_LIB_PATH"] = str(paths.ort) # For ORT-GENAI to find ORT dependency if sys.platform.startswith("win"): diff --git a/sdk/python/src/detail/model_data_types.py b/sdk/python/src/detail/model_data_types.py index 46525dc7..e000c9c8 100644 --- a/sdk/python/src/detail/model_data_types.py +++ b/sdk/python/src/detail/model_data_types.py @@ -4,7 +4,7 @@ # -------------------------------------------------------------------------- from typing import Optional, List -from pydantic import BaseModel, Field +from pydantic import BaseModel, ConfigDict, Field from enum import StrEnum @@ -53,6 +53,8 @@ class ModelInfo(BaseModel): Fields are populated from the JSON response of the ``get_model_list`` command. """ + model_config = ConfigDict(protected_namespaces=()) + id: str = Field(alias="id", description="Unique identifier of the model. Generally :") name: str = Field(alias="name", description="Model variant name") version: int = Field(alias="version") diff --git a/sdk/python/src/detail/utils.py b/sdk/python/src/detail/utils.py index 5a054610..5780cfc9 100644 --- a/sdk/python/src/detail/utils.py +++ b/sdk/python/src/detail/utils.py @@ -12,7 +12,6 @@ import argparse import importlib.util -import json import logging import os import sys @@ -90,9 +89,9 @@ def _find_file_in_package(package_name: str, filename: str) -> Path | None: # Quick checks for well-known sub-directories first for candidate_dir in (pkg_root, pkg_root / "capi", pkg_root / "native", pkg_root / "lib", pkg_root / "bin"): - candidate = candidate_dir / filename - if candidate.exists(): - return candidate + candidates = list(candidate_dir.glob(f"*{filename}*")) + if candidates: + return candidates[0] # Recursive fallback for match in pkg_root.rglob(filename): @@ -144,8 +143,18 @@ def get_native_binary_paths() -> NativeBinaryPaths | None: # Probe WinML packages first; fall back to standard if not installed. core_path = _find_file_in_package("foundry-local-core-winml", core_name) or _find_file_in_package("foundry-local-core", core_name) - ort_path = _find_file_in_package("onnxruntime-core", ort_name) - genai_path = _find_file_in_package("onnxruntime-genai-core", genai_name) + + # On Linux, ORT is shipped by onnxruntime-gpu (libonnxruntime.so in capi/). + if sys.platform.startswith("linux"): + ort_path = _find_file_in_package("onnxruntime", ort_name) or _find_file_in_package("onnxruntime-core", ort_name) + else: + ort_path = _find_file_in_package("onnxruntime-core", ort_name) + + # On Linux, ORTGenAI is shipped by onnxruntime-genai-cuda (libonnxruntime-genai.so in the package root). + if sys.platform.startswith("linux"): + genai_path = _find_file_in_package("onnxruntime-genai", genai_name) or _find_file_in_package("onnxruntime-genai-core", genai_name) + else: + genai_path = _find_file_in_package("onnxruntime-genai-core", genai_name) if core_path and ort_path and genai_path: return NativeBinaryPaths(core=core_path, ort=ort_path, genai=genai_path) @@ -254,6 +263,9 @@ def foundry_local_install(args: list[str] | None = None) -> None: if parsed.winml: variant = "WinML" packages = ["foundry-local-core-winml", "onnxruntime-core", "onnxruntime-genai-core"] + elif sys.platform.startswith("linux"): + variant = "Linux (GPU)" + packages = ["foundry-local-core", "onnxruntime-gpu", "onnxruntime-genai-cuda"] else: variant = "standard" packages = ["foundry-local-core", "onnxruntime-core", "onnxruntime-genai-core"] @@ -271,10 +283,18 @@ def foundry_local_install(args: list[str] | None = None) -> None: else: if _find_file_in_package("foundry-local-core", core_name) is None: missing.append("foundry-local-core") - if _find_file_in_package("onnxruntime-core", ort_name) is None: + if sys.platform.startswith("linux"): + if _find_file_in_package("onnxruntime", ort_name) is None: + missing.append("onnxruntime-gpu") + else: + if _find_file_in_package("onnxruntime-core", ort_name) is None: missing.append("onnxruntime-core") - if _find_file_in_package("onnxruntime-genai-core", genai_name) is None: - missing.append("onnxruntime-genai-core") + if sys.platform.startswith("linux"): + if _find_file_in_package("onnxruntime-genai", genai_name) is None: + missing.append("onnxruntime-genai-cuda") + else: + if _find_file_in_package("onnxruntime-genai-core", genai_name) is None: + missing.append("onnxruntime-genai-core") print( "[foundry-local] ERROR: Could not locate native binaries after installation. " f"Missing: {', '.join(missing)}", @@ -289,6 +309,3 @@ def foundry_local_install(args: list[str] | None = None) -> None: print(f" Core : {paths.core}") print(f" ORT : {paths.ort}") print(f" GenAI : {paths.genai}") - - - diff --git a/sdk/rust/src/detail/core_interop.rs b/sdk/rust/src/detail/core_interop.rs index 75146164..43884d7f 100644 --- a/sdk/rust/src/detail/core_interop.rs +++ b/sdk/rust/src/detail/core_interop.rs @@ -52,7 +52,8 @@ impl ResponseBuffer { type ExecuteCommandFn = unsafe extern "C" fn(*const RequestBuffer, *mut ResponseBuffer); /// Signature for the streaming callback invoked by the native library. -type CallbackFn = unsafe extern "C" fn(*const u8, i32, *mut std::ffi::c_void); +/// Returns 0 to continue, 1 to cancel. +type CallbackFn = unsafe extern "C" fn(*const u8, i32, *mut std::ffi::c_void) -> i32; /// Signature for `execute_command_with_callback`. type ExecuteCommandWithCallbackFn = unsafe extern "C" fn( @@ -197,12 +198,12 @@ unsafe extern "C" fn streaming_trampoline( data: *const u8, length: i32, user_data: *mut std::ffi::c_void, -) { +) -> i32 { if data.is_null() || length <= 0 { - return; + return 0; } // catch_unwind prevents UB if the closure panics across the FFI boundary. - let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { // SAFETY: `user_data` points to a `StreamingCallbackState` kept alive // by the caller of `execute_command_with_callback` for the duration of // the native call. @@ -212,6 +213,11 @@ unsafe extern "C" fn streaming_trampoline( let slice = std::slice::from_raw_parts(data, length as usize); state.push(slice); })); + if result.is_err() { + 1 + } else { + 0 + } } // ── CoreInterop ────────────────────────────────────────────────────────────── From 7ad2ef0cff49ae3edf06ba438ae61c683bc620bd Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Mon, 6 Apr 2026 17:42:22 +0000 Subject: [PATCH 24/83] Fix documentation anomalies: incorrect API signatures, missing SDK links, wrong streaming patterns (#598) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Systematic audit of all SDK documentation against actual source code revealed **10 documentation anomalies** across 6 files. All fixes are documentation-only — no source code changes. ## Changes by File ### Root `README.md` | Fix | Before | After | |-----|--------|-------| | C# unload method name | `await model.Unload()` | `await model.UnloadAsync()` | | C# streaming missing parameter | `CompleteChatStreamingAsync(messages)` | `CompleteChatStreamingAsync(messages, CancellationToken.None)` | ### `sdk/cs/README.md` | Fix | Before | After | |-----|--------|-------| | Non-existent factory methods (3 occurrences) | `ChatMessage.FromUser("...")` / `ChatMessage.FromSystem("...")` | `new ChatMessage { Role = "user", Content = "..." }` | | Streaming property doesn't exist | `chunk.Choices?[0]?.Delta?.Content` | `chunk.Choices?[0]?.Message?.Content` | | Internal property used in public example | `m.SelectedVariant.Info.DisplayName` | `m.Info.DisplayName` | ### `sdk/js/README.md` | Fix | Before | After | |-----|--------|-------| | Streaming chunk access (2 occurrences) | `chunk.choices?.[0]?.message?.content` | `chunk.choices?.[0]?.delta?.content` | ### `sdk/python/README.md` | Fix | Before | After | |-----|--------|-------| | Streaming pattern | Callback: `client.complete_streaming_chat(messages, on_chunk)` | Generator: `for chunk in client.complete_streaming_chat(messages):` | ### `sdk/rust/README.md` | Fix | Before | After | |-----|--------|-------| | Non-existent trait | "All models implement the `IModel` trait" | "All models are represented by the `Model` type" | | Missing error variant | 8 variants listed | 9 variants (added `Internal { reason }`) | | Wrong license | "MIT" | "Microsoft Software License Terms" | ### `docs/README.md` | Fix | Before | After | |-----|--------|-------| | Missing SDK references | Only C# and JS listed | Added Python and Rust SDK links | ## Verification Method Each fix was verified by reading the actual source code: - C# `ChatMessage` constructor syntax confirmed in `ChatCompletionsTests.cs` - C# streaming `Message.Content` (not `Delta`) confirmed in `ChatClient.cs` — both streaming and non-streaming return `ChatCompletionCreateResponse` - JS streaming `delta.content` confirmed in `chatClient.ts` JSDoc - Python generator return confirmed in `chat_client.py:263-290` - Rust `Model` struct confirmed in `lib.rs:16`; no trait exists - Rust error variants confirmed in `error.rs:4-33` - LICENSE file confirmed as Microsoft Software License Terms Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: baijumeswani <12852605+baijumeswani@users.noreply.github.com> Co-authored-by: Baiju Meswani --- README.md | 4 ++-- docs/README.md | 2 ++ sdk/cs/README.md | 12 ++++++------ sdk/js/README.md | 4 ++-- sdk/python/README.md | 9 +++------ sdk/rust/README.md | 5 +++-- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 07bc9b4d..517b4b37 100644 --- a/README.md +++ b/README.md @@ -157,13 +157,13 @@ The Foundry Local SDK makes it easy to integrate local AI models into your appli new() { Role = "user", Content = "What is the golden ratio?" } }; - await foreach (var chunk in chatClient.CompleteChatStreamingAsync(messages)) + await foreach (var chunk in chatClient.CompleteChatStreamingAsync(messages, CancellationToken.None)) { Console.Write(chunk.Choices[0].Message.Content); } // Unload the model when done - await model.Unload(); + await model.UnloadAsync(); ``` diff --git a/docs/README.md b/docs/README.md index 066f2012..5fa298a0 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,6 +6,8 @@ Documentation for Foundry Local can be found in the following resources: - SDK Reference: - [C# SDK Reference](../sdk/cs/README.md): This documentation provides detailed information about the C# SDK for Foundry Local, including API references, usage examples, and best practices for integrating Foundry Local into your applications. - [JavaScript SDK Reference](../sdk/js/README.md): This documentation offers detailed information about the JavaScript SDK for Foundry Local, including API references, usage examples, and best practices for integrating Foundry Local into your web applications. + - [Python SDK Reference](../sdk/python/README.md): This documentation provides detailed information about the Python SDK for Foundry Local, including API references, usage examples, and best practices for integrating Foundry Local into your Python applications. + - [Rust SDK Reference](../sdk/rust/README.md): This documentation provides detailed information about the Rust SDK for Foundry Local, including API references, usage examples, and best practices for integrating Foundry Local into your Rust applications. - [Foundry Local Lab](https://github.com/Microsoft-foundry/foundry-local-lab): This GitHub repository contains a lab designed to help you learn how to use Foundry Local effectively. It includes hands-on exercises, sample code, and step-by-step instructions to guide you through the process of setting up and using Foundry Local in various scenarios. ## Supported Capabilities diff --git a/sdk/cs/README.md b/sdk/cs/README.md index 3efdc242..20580e65 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -126,7 +126,7 @@ await model.LoadAsync(); var chatClient = await model.GetChatClientAsync(); var response = await chatClient.CompleteChatAsync(new[] { - ChatMessage.FromUser("Why is the sky blue?") + new ChatMessage { Role = "user", Content = "Why is the sky blue?" } }); Console.WriteLine(response.Choices![0].Message.Content); @@ -159,7 +159,7 @@ var catalog = await FoundryLocalManager.Instance.GetCatalogAsync(); // List all available models var models = await catalog.ListModelsAsync(); foreach (var m in models) - Console.WriteLine($"{m.Alias} — {m.SelectedVariant.Info.DisplayName}"); + Console.WriteLine($"{m.Alias} — {m.Info.DisplayName}"); // Get a specific model by alias var model = await catalog.GetModelAsync("phi-3.5-mini") @@ -214,8 +214,8 @@ var chatClient = await model.GetChatClientAsync(); var response = await chatClient.CompleteChatAsync(new[] { - ChatMessage.FromSystem("You are a helpful assistant."), - ChatMessage.FromUser("Explain async/await in C#.") + new ChatMessage { Role = "system", Content = "You are a helpful assistant." }, + new ChatMessage { Role = "user", Content = "Explain async/await in C#." } }); Console.WriteLine(response.Choices![0].Message.Content); @@ -229,9 +229,9 @@ Use `IAsyncEnumerable` for token-by-token output: using var cts = new CancellationTokenSource(); await foreach (var chunk in chatClient.CompleteChatStreamingAsync( - new[] { ChatMessage.FromUser("Write a haiku about .NET") }, cts.Token)) + new[] { new ChatMessage { Role = "user", Content = "Write a haiku about .NET" } }, cts.Token)) { - Console.Write(chunk.Choices?[0]?.Delta?.Content); + Console.Write(chunk.Choices?[0]?.Message?.Content); } ``` diff --git a/sdk/js/README.md b/sdk/js/README.md index c197e80e..298efa28 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -111,7 +111,7 @@ console.log('\nTesting streaming completion...'); for await (const chunk of chatClient.completeStreamingChat( [{ role: 'user', content: 'Write a short poem about programming.' }] )) { - const content = chunk.choices?.[0]?.message?.content; + const content = chunk.choices?.[0]?.delta?.content; if (content) { process.stdout.write(content); } @@ -198,7 +198,7 @@ For real-time output, use streaming: for await (const chunk of chatClient.completeStreamingChat( [{ role: 'user', content: 'Write a short poem about programming.' }] )) { - const content = chunk.choices?.[0]?.message?.content; + const content = chunk.choices?.[0]?.delta?.content; if (content) { process.stdout.write(content); } diff --git a/sdk/python/README.md b/sdk/python/README.md index 3ff677d2..dbdef1f8 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -232,12 +232,9 @@ print(result.choices[0].message.content) # "42" # Streaming chat messages = [{"role": "user", "content": "Tell me a joke"}] -def on_chunk(chunk): - delta = chunk.choices[0].delta - if delta and delta.content: - print(delta.content, end="", flush=True) - -client.complete_streaming_chat(messages, on_chunk) +for chunk in client.complete_streaming_chat(messages): + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="", flush=True) # Unload when done model.unload() diff --git a/sdk/rust/README.md b/sdk/rust/README.md index d3983430..e98b8e4c 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -175,7 +175,7 @@ let loaded = catalog.get_loaded_models().await?; ### Model Lifecycle -Each model may have multiple variants (different quantizations, hardware targets). The SDK auto-selects the best available variant, preferring cached versions. All models implement the `IModel` trait. +Each model may have multiple variants (different quantizations, hardware targets). The SDK auto-selects the best available variant, preferring cached versions. All models are represented by the `Model` type. ```rust let model = catalog.get_model("phi-3.5-mini").await?; @@ -445,6 +445,7 @@ match manager.catalog().get_model("nonexistent").await { | `Serialization(serde_json::Error)` | JSON serialization/deserialization failed | | `Validation { reason }` | A validation check on user-supplied input failed | | `Io(std::io::Error)` | An I/O error occurred | +| `Internal { reason }` | An internal SDK error (e.g. poisoned lock) | ## Configuration @@ -520,4 +521,4 @@ cargo run -p native-chat-completions ## License -MIT — see [LICENSE](../../LICENSE) for details. +Microsoft Software License Terms — see [LICENSE](../../LICENSE) for details. From c3e5841b18e680ee56d2cb200b927e63dce80f0e Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Mon, 6 Apr 2026 21:52:24 +0000 Subject: [PATCH 25/83] Replace callback-based audio streaming with iterator pattern (#597) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `AudioClient.transcribe_streaming` required a callback argument, inconsistent with `ChatClient.complete_streaming_chat` which returns a `Generator`. Align the audio client to the same iterator pattern used across the SDK. ### Changes - **`audio_client.py`**: Replace `transcribe_streaming(path, callback) -> None` with `transcribe_streaming(path) -> Generator[AudioTranscriptionResponse]` using the same `threading.Thread` + `queue.Queue` + sentinel pattern from `ChatClient._stream_chunks` - **`test_audio_client.py`**: Update streaming tests to `for chunk in` consumption; remove `test_should_raise_for_streaming_invalid_callback` (no longer applicable) - **`test/README.md`**: Update test counts (7→6 audio, 32→31 total) ### Usage ```python # Before def on_chunk(chunk): print(chunk.text) audio_client.transcribe_streaming("recording.mp3", on_chunk) # After for chunk in audio_client.transcribe_streaming("recording.mp3"): print(chunk.text) ``` Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: baijumeswani <12852605+baijumeswani@users.noreply.github.com> Co-authored-by: Prathik Rao --- sdk/python/src/openai/audio_client.py | 68 ++++++++++++++------- sdk/python/test/README.md | 4 +- sdk/python/test/openai/test_audio_client.py | 22 +------ 3 files changed, 51 insertions(+), 43 deletions(-) diff --git a/sdk/python/src/openai/audio_client.py b/sdk/python/src/openai/audio_client.py index 8d3ffa29..0858e4aa 100644 --- a/sdk/python/src/openai/audio_client.py +++ b/sdk/python/src/openai/audio_client.py @@ -7,8 +7,10 @@ import json import logging +import queue +import threading from dataclasses import dataclass -from typing import Callable, Optional +from typing import Generator, List, Optional from ..detail.core_interop import CoreInterop, InteropRequest from ..exception import FoundryLocalException @@ -114,18 +116,56 @@ def transcribe(self, audio_file_path: str) -> AudioTranscriptionResponse: data = json.loads(response.data) return AudioTranscriptionResponse(text=data.get("text", "")) + def _stream_chunks(self, request_json: str) -> Generator[AudioTranscriptionResponse, None, None]: + """Background-thread generator that yields parsed chunks from the native streaming call.""" + _SENTINEL = object() + chunk_queue: queue.Queue = queue.Queue() + errors: List[Exception] = [] + + def _on_chunk(chunk_str: str) -> None: + chunk_data = json.loads(chunk_str) + chunk_queue.put(AudioTranscriptionResponse(text=chunk_data.get("text", ""))) + + def _run() -> None: + try: + resp = self._core_interop.execute_command_with_callback( + "audio_transcribe", + InteropRequest(params={"OpenAICreateRequest": request_json}), + _on_chunk, + ) + if resp.error is not None: + errors.append( + FoundryLocalException( + f"Streaming audio transcription failed for model '{self.model_id}': {resp.error}" + ) + ) + except Exception as exc: + errors.append(exc) + finally: + chunk_queue.put(_SENTINEL) + + threading.Thread(target=_run, daemon=True).start() + while (item := chunk_queue.get()) is not _SENTINEL: + yield item + if errors: + raise errors[0] + def transcribe_streaming( self, audio_file_path: str, - callback: Callable[[AudioTranscriptionResponse], None], - ) -> None: + ) -> Generator[AudioTranscriptionResponse, None, None]: """Transcribe an audio file with streaming chunks. - Each chunk is passed to *callback* as an ``AudioTranscriptionResponse``. + Consume with a standard ``for`` loop:: + + for chunk in audio_client.transcribe_streaming("recording.mp3"): + print(chunk.text, end="", flush=True) Args: audio_file_path: Path to the audio file to transcribe. - callback: Called with each incremental transcription chunk. + + Returns: + A generator of ``AudioTranscriptionResponse`` objects. Raises: ValueError: If *audio_file_path* is not a non-empty string. @@ -133,21 +173,5 @@ def transcribe_streaming( """ self._validate_audio_file_path(audio_file_path) - if not callable(callback): - raise TypeError("Callback must be a valid function.") - request_json = self._create_request_json(audio_file_path) - request = InteropRequest(params={"OpenAICreateRequest": request_json}) - - def callback_handler(chunk_str: str): - chunk_data = json.loads(chunk_str) - chunk = AudioTranscriptionResponse(text=chunk_data.get("text", "")) - callback(chunk) - - response = self._core_interop.execute_command_with_callback( - "audio_transcribe", request, callback_handler - ) - if response.error is not None: - raise FoundryLocalException( - f"Streaming audio transcription failed for model '{self.model_id}': {response.error}" - ) \ No newline at end of file + return self._stream_chunks(request_json) \ No newline at end of file diff --git a/sdk/python/test/README.md b/sdk/python/test/README.md index 92f389a8..ded38f5b 100644 --- a/sdk/python/test/README.md +++ b/sdk/python/test/README.md @@ -50,10 +50,10 @@ test/ │ └── test_model_load_manager.py # ModelLoadManager core interop & web service (5 tests) └── openai/ ├── test_chat_client.py # Chat completions, streaming, error validation (7 tests) - └── test_audio_client.py # Audio transcription (7 tests) + └── test_audio_client.py # Audio transcription (6 tests) ``` -**Total: 32 tests** +**Total: 31 tests** ## Key conventions diff --git a/sdk/python/test/openai/test_audio_client.py b/sdk/python/test/openai/test_audio_client.py index f430d8d5..0d365eef 100644 --- a/sdk/python/test/openai/test_audio_client.py +++ b/sdk/python/test/openai/test_audio_client.py @@ -88,16 +88,13 @@ def test_should_transcribe_audio_streaming(self, catalog): audio_client.settings.temperature = 0.0 chunks = [] - - def on_chunk(chunk): + for chunk in audio_client.transcribe_streaming(AUDIO_FILE_PATH): assert chunk is not None assert hasattr(chunk, "text") assert isinstance(chunk.text, str) assert len(chunk.text) > 0 chunks.append(chunk.text) - audio_client.transcribe_streaming(AUDIO_FILE_PATH, on_chunk) - full_text = "".join(chunks) assert full_text == EXPECTED_TEXT finally: @@ -114,14 +111,11 @@ def test_should_transcribe_audio_streaming_with_temperature(self, catalog): audio_client.settings.temperature = 0.0 chunks = [] - - def on_chunk(chunk): + for chunk in audio_client.transcribe_streaming(AUDIO_FILE_PATH): assert chunk is not None assert isinstance(chunk.text, str) chunks.append(chunk.text) - audio_client.transcribe_streaming(AUDIO_FILE_PATH, on_chunk) - full_text = "".join(chunks) assert full_text == EXPECTED_TEXT finally: @@ -143,14 +137,4 @@ def test_should_raise_for_streaming_empty_audio_file_path(self, catalog): audio_client = model.get_audio_client() with pytest.raises(ValueError, match="Audio file path must be a non-empty string"): - audio_client.transcribe_streaming("", lambda chunk: None) - - def test_should_raise_for_streaming_invalid_callback(self, catalog): - """transcribe_streaming with invalid callback should raise.""" - model = catalog.get_model(AUDIO_MODEL_ALIAS) - assert model is not None - audio_client = model.get_audio_client() - - for invalid_callback in [None, 42, {}, "not a function"]: - with pytest.raises(TypeError, match="Callback must be a valid function"): - audio_client.transcribe_streaming(AUDIO_FILE_PATH, invalid_callback) + audio_client.transcribe_streaming("") From e4230b4d06642d11627f78e08c00e81f16e0a811 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Mon, 6 Apr 2026 17:00:48 -0700 Subject: [PATCH 26/83] Fix npm resolution crash on reinstall/uninstall due to missing platform package skeletons (#599) - [x] Identify root cause: `preinstall.cjs` only creates a skeleton for the current platform, but `optionalDependencies` references all 4 platforms as `file:` deps - [x] Fix `preinstall.cjs` to create skeletons for all platforms so npm can resolve all `file:` dependencies - [x] Address review feedback: derive ALL_PLATFORMS from package.json optionalDependencies instead of hardcoding - [x] Address review feedback: remove unused `os` require - [x] Address review feedback: fix placeholder version comment to reference correct script --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: baijumeswani <12852605+baijumeswani@users.noreply.github.com> Co-authored-by: Baiju Meswani --- sdk/js/script/preinstall.cjs | 57 ++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 18 deletions(-) diff --git a/sdk/js/script/preinstall.cjs b/sdk/js/script/preinstall.cjs index 5ef43914..5590550b 100644 --- a/sdk/js/script/preinstall.cjs +++ b/sdk/js/script/preinstall.cjs @@ -1,31 +1,52 @@ const fs = require('fs'); const path = require('path'); -const os = require('os'); console.log('[foundry-local] Preinstall: creating platform package skeletons...'); -const platformKey = `${os.platform()}-${os.arch()}`; +// Derive all platform packages from optionalDependencies in package.json +// so this script stays in sync automatically. +const rootPackageJsonPath = path.join(__dirname, '..', 'package.json'); +const rootPackageJson = JSON.parse(fs.readFileSync(rootPackageJsonPath, 'utf8')); +const optionalDependencies = rootPackageJson.optionalDependencies || {}; +const platformPackagePrefix = '@foundry-local-core/'; + +const ALL_PLATFORMS = Object.keys(optionalDependencies) + .filter((packageName) => packageName.startsWith(platformPackagePrefix)) + .map((packageName) => { + const key = packageName.slice(platformPackagePrefix.length); + const parts = key.split('-'); + const cpu = parts[parts.length - 1]; + const platformOs = parts.slice(0, -1).join('-'); + + return { + key, + os: platformOs, + cpu, + }; + }); const packagesRoot = path.join(__dirname, '..', 'packages', '@foundry-local-core'); -const dir = path.join(packagesRoot, platformKey); +for (const platform of ALL_PLATFORMS) { + const dir = path.join(packagesRoot, platform.key); -if (!fs.existsSync(dir)) { - fs.mkdirSync(dir, { recursive: true }); -} + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } -const pkgJsonPath = path.join(dir, 'package.json'); -if (!fs.existsSync(pkgJsonPath)) { - const pkgContent = { - name: `@foundry-local-core/${platformKey}`, - version: "0.0.0", // Placeholder version, will be replaced during install.cjs - description: `Native binaries for Foundry Local SDK (${platformKey})`, - os: [os.platform()], - cpu: [os.arch()], - private: true - }; - fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); - console.log(` Created skeleton for ${platformKey}`); + const pkgJsonPath = path.join(dir, 'package.json'); + if (!fs.existsSync(pkgJsonPath)) { + const pkgContent = { + name: `@foundry-local-core/${platform.key}`, + version: "0.0.0", // Placeholder version, will be replaced during script/install-utils.cjs (installPackage()) + description: `Native binaries for Foundry Local SDK (${platform.key})`, + os: [platform.os], + cpu: [platform.cpu], + private: true + }; + fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); + console.log(` Created skeleton for ${platform.key}`); + } } console.log('[foundry-local] Preinstall complete.'); From 285f189c0595e3b98d0b7ee939f480ab622685c1 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Mon, 6 Apr 2026 20:46:55 -0700 Subject: [PATCH 27/83] adjusts js winml sdk to depend on standard sdk and replace dependencies with winml binaries (#596) - allows users to do `npm install foundry-local-sdk-winml` and then do `import { FoundryLocalManager } from 'foundry-local-sdk';` like the other SDKs do - creates a compute version stage at the beginning of mega pipeline so all artifacts are generated with the same version string down to the timestamp minute - new node_modules/ structure: image --------- Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 92 ++++++++++++++++++++- .pipelines/templates/build-cs-steps.yml | 13 +-- .pipelines/templates/build-js-steps.yml | 15 ++-- .pipelines/templates/build-python-steps.yml | 13 +-- .pipelines/templates/build-rust-steps.yml | 11 +-- .pipelines/templates/package-core-steps.yml | 14 +--- .pipelines/templates/test-js-steps.yml | 3 +- sdk/js/package.json | 6 -- sdk/js/script/install-utils.cjs | 21 +++-- sdk/js/script/install-winml.cjs | 14 +++- sdk/js/script/pack.cjs | 12 ++- sdk/js/script/preinstall.cjs | 2 +- sdk/js/src/detail/coreInterop.ts | 14 ++-- 13 files changed, 152 insertions(+), 78 deletions(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index c871cdf1..617ea587 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -70,9 +70,67 @@ extends: - repository: neutron-server - repository: test-data-shared stages: + # ── Compute Version ── + # A single version string is computed once and shared across all stages. + # This prevents timestamp drift between standard and WinML builds. + # Outputs three format variants: + # sdkVersion – semver for JS, C#, Rust (e.g. 1.0.0-dev.202604061234) + # pyVersion – PEP 440 for Python (e.g. 1.0.0.dev202604061234) + # flcVersion – NuGet/FLC style (e.g. 1.0.0-dev-202604061234-ab12cd34) + - stage: compute_version + displayName: 'Compute Version' + dependsOn: [] + jobs: + - job: version + displayName: 'Compute Version' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Build.ArtifactStagingDirectory)/version-info' + steps: + - checkout: none + - task: PowerShell@2 + displayName: 'Compute and write version files' + inputs: + targetType: inline + script: | + $base = "${{ parameters.version }}" + $preId = "${{ parameters.prereleaseId }}" + $ts = Get-Date -Format "yyyyMMddHHmm" + $commitId = "$(Build.SourceVersion)".Substring(0, 8) + + if ($preId -ne '' -and $preId -ne 'none') { + $sdkVersion = "$base-$preId" + $pyVersion = "$base$preId" + $flcVersion = "$base-$preId" + } elseif ("${{ parameters.isRelease }}" -ne "True") { + $sdkVersion = "$base-dev.$ts" + $pyVersion = "$base.dev$ts" + $flcVersion = "$base-dev-$ts-$commitId" + } else { + $sdkVersion = $base + $pyVersion = $base + $flcVersion = $base + } + + $outDir = "$(Build.ArtifactStagingDirectory)/version-info" + New-Item -ItemType Directory -Path $outDir -Force | Out-Null + Set-Content -Path "$outDir/sdkVersion.txt" -Value $sdkVersion -NoNewline + Set-Content -Path "$outDir/pyVersion.txt" -Value $pyVersion -NoNewline + Set-Content -Path "$outDir/flcVersion.txt" -Value $flcVersion -NoNewline + + Write-Host "SDK version: $sdkVersion" + Write-Host "Python version: $pyVersion" + Write-Host "FLC version: $flcVersion" + # ── Build & Test FLC ── - stage: build_core displayName: 'Build & Test FLC' + dependsOn: compute_version jobs: - job: flc_win_x64 displayName: 'FLC win-x64' @@ -160,6 +218,10 @@ extends: name: onnxruntime-Win-CPU-2022 os: windows templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' outputs: - output: pipelineArtifact artifactName: 'flc-nuget' @@ -229,6 +291,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-nuget' targetPath: '$(Pipeline.Workspace)/flc-nuget' @@ -261,6 +326,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-nuget' targetPath: '$(Pipeline.Workspace)/flc-nuget' @@ -293,6 +361,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-wheels' targetPath: '$(Pipeline.Workspace)/flc-wheels' @@ -325,6 +396,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-nuget' targetPath: '$(Pipeline.Workspace)/flc-nuget' @@ -467,7 +541,7 @@ extends: # ── Build & Test FLC (WinML) ── - stage: build_core_winml displayName: 'Build & Test FLC WinML' - dependsOn: [] + dependsOn: compute_version jobs: - job: flc_winml_win_x64 displayName: 'FLC win-x64 (WinML)' @@ -520,6 +594,10 @@ extends: name: onnxruntime-Win-CPU-2022 os: windows templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' outputs: - output: pipelineArtifact artifactName: 'flc-nuget-winml' @@ -575,6 +653,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-nuget-winml' targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' @@ -608,6 +689,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-nuget-winml' targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' @@ -640,6 +724,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-wheels-winml' targetPath: '$(Pipeline.Workspace)/flc-wheels-winml' @@ -673,6 +760,9 @@ extends: os: windows templateContext: inputs: + - input: pipelineArtifact + artifactName: 'version-info' + targetPath: '$(Pipeline.Workspace)/version-info' - input: pipelineArtifact artifactName: 'flc-nuget-winml' targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml index 978c2fff..38f5b8bf 100644 --- a/.pipelines/templates/build-cs-steps.yml +++ b/.pipelines/templates/build-cs-steps.yml @@ -38,22 +38,15 @@ steps: packageType: sdk version: '9.0.x' -# Compute package version +# Read version from the version-info artifact produced by compute_version stage. - task: PowerShell@2 displayName: 'Set package version' inputs: targetType: inline script: | - $v = "${{ parameters.version }}" - $preId = "${{ parameters.prereleaseId }}" - if ($preId -ne '' -and $preId -ne 'none') { - $v = "$v-$preId" - } elseif ("${{ parameters.isRelease }}" -ne "True") { - $ts = Get-Date -Format "yyyyMMddHHmm" - $v = "$v-dev.$ts" - } - Write-Host "##vso[task.setvariable variable=packageVersion]$v" + $v = (Get-Content "$(Pipeline.Workspace)/version-info/sdkVersion.txt" -Raw).Trim() Write-Host "Package version: $v" + Write-Host "##vso[task.setvariable variable=packageVersion]$v" # List downloaded artifact for debugging - task: PowerShell@2 diff --git a/.pipelines/templates/build-js-steps.yml b/.pipelines/templates/build-js-steps.yml index e288bbce..3aa2908d 100644 --- a/.pipelines/templates/build-js-steps.yml +++ b/.pipelines/templates/build-js-steps.yml @@ -45,20 +45,14 @@ steps: inputs: versionSpec: '20.x' -# Compute version +# Read version from the version-info artifact produced by compute_version stage. - task: PowerShell@2 displayName: 'Set package version' inputs: targetType: inline script: | - $v = "${{ parameters.version }}" - $preId = "${{ parameters.prereleaseId }}" - if ($preId -ne '' -and $preId -ne 'none') { - $v = "$v-$preId" - } elseif ("${{ parameters.isRelease }}" -ne "True") { - $ts = Get-Date -Format "yyyyMMddHHmm" - $v = "$v-dev.$ts" - } + $v = (Get-Content "$(Pipeline.Workspace)/version-info/sdkVersion.txt" -Raw).Trim() + Write-Host "Package version: $v" Write-Host "##vso[task.setvariable variable=packageVersion]$v" # Install dependencies including native binaries (FLC, ORT, GenAI) from NuGet feeds @@ -102,7 +96,8 @@ steps: Expand-Archive -Path $zip -DestinationPath $extractDir -Force # Overwrite FLC binary in the npm-installed location - $destDir = "$(repoRoot)/sdk/js/packages/@foundry-local-core/$platformKey" + $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null $nativeDir = "$extractDir/runtimes/$rid/native" if (Test-Path $nativeDir) { Get-ChildItem $nativeDir -File | ForEach-Object { diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index f21d9508..f94aa712 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -47,22 +47,15 @@ steps: Write-Host "Contents of ${{ parameters.flcWheelsDir }}:" Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } -# Compute package version +# Read version from the version-info artifact produced by compute_version stage. - task: PowerShell@2 displayName: 'Set package version' inputs: targetType: inline script: | - $v = "${{ parameters.version }}" - $preId = "${{ parameters.prereleaseId }}" - if ($preId -ne '' -and $preId -ne 'none') { - $v = "$v-$preId" - } elseif ("${{ parameters.isRelease }}" -ne "True") { - $ts = Get-Date -Format "yyyyMMddHHmm" - $v = "$v-dev.$ts" - } - Write-Host "##vso[task.setvariable variable=packageVersion]$v" + $v = (Get-Content "$(Pipeline.Workspace)/version-info/pyVersion.txt" -Raw).Trim() Write-Host "Package version: $v" + Write-Host "##vso[task.setvariable variable=packageVersion]$v" # Configure pip to use ORT-Nightly feed (plus PyPI as fallback) - task: PowerShell@2 diff --git a/.pipelines/templates/build-rust-steps.yml b/.pipelines/templates/build-rust-steps.yml index efccfaa4..ed3161e5 100644 --- a/.pipelines/templates/build-rust-steps.yml +++ b/.pipelines/templates/build-rust-steps.yml @@ -32,20 +32,13 @@ steps: Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" -# Compute package version and patch Cargo.toml +# Read version from the version-info artifact produced by compute_version stage. - task: PowerShell@2 displayName: 'Set crate version' inputs: targetType: inline script: | - $v = "${{ parameters.version }}" - $preId = "${{ parameters.prereleaseId }}" - if ($preId -ne '' -and $preId -ne 'none') { - $v = "$v-$preId" - } elseif ("${{ parameters.isRelease }}" -ne "True") { - $ts = Get-Date -Format "yyyyMMddHHmm" - $v = "$v-dev.$ts" - } + $v = (Get-Content "$(Pipeline.Workspace)/version-info/sdkVersion.txt" -Raw).Trim() Write-Host "Crate version: $v" # Patch Cargo.toml version field diff --git a/.pipelines/templates/package-core-steps.yml b/.pipelines/templates/package-core-steps.yml index 15b8fb54..01697085 100644 --- a/.pipelines/templates/package-core-steps.yml +++ b/.pipelines/templates/package-core-steps.yml @@ -74,23 +74,15 @@ steps: Copy-Item $license "$unifiedPath/LICENSE.txt" -Force } -# Compute version +# Read version from the version-info artifact produced by compute_version stage. - task: PowerShell@2 displayName: 'Set FLC package version' inputs: targetType: inline script: | - $v = "${{ parameters.version }}" - $preId = "${{ parameters.prereleaseId }}" - if ($preId -ne '' -and $preId -ne 'none') { - $v = "$v-$preId" - } elseif ("${{ parameters.isRelease }}" -ne "True") { - $ts = Get-Date -Format "yyyyMMddHHmm" - $commitId = "$(Build.SourceVersion)".Substring(0, 8) - $v = "$v-dev-$ts-$commitId" - } - Write-Host "##vso[task.setvariable variable=flcVersion]$v" + $v = (Get-Content "$(Pipeline.Workspace)/version-info/flcVersion.txt" -Raw).Trim() Write-Host "FLC version: $v" + Write-Host "##vso[task.setvariable variable=flcVersion]$v" # Pack NuGet - task: PowerShell@2 diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml index 41ef7f62..1814626a 100644 --- a/.pipelines/templates/test-js-steps.yml +++ b/.pipelines/templates/test-js-steps.yml @@ -93,7 +93,8 @@ steps: Copy-Item $nupkg.FullName $zip -Force Expand-Archive -Path $zip -DestinationPath $extractDir -Force - $destDir = "$(repoRoot)/sdk/js/packages/@foundry-local-core/$platformKey" + $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null $nativeDir = "$extractDir/runtimes/$rid/native" if (Test-Path $nativeDir) { Get-ChildItem $nativeDir -File | ForEach-Object { diff --git a/sdk/js/package.json b/sdk/js/package.json index 5830e3fe..6e4acf50 100644 --- a/sdk/js/package.json +++ b/sdk/js/package.json @@ -27,12 +27,6 @@ "koffi": "^2.9.0", "adm-zip": "^0.5.16" }, - "optionalDependencies": { - "@foundry-local-core/darwin-arm64": "file:packages/@foundry-local-core/darwin-arm64", - "@foundry-local-core/linux-x64": "file:packages/@foundry-local-core/linux-x64", - "@foundry-local-core/win32-arm64": "file:packages/@foundry-local-core/win32-arm64", - "@foundry-local-core/win32-x64": "file:packages/@foundry-local-core/win32-x64" - }, "devDependencies": { "@types/chai": "^5.2.3", "@types/mocha": "^10.0.10", diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs index cc61f0db..090a25e3 100644 --- a/sdk/js/script/install-utils.cjs +++ b/sdk/js/script/install-utils.cjs @@ -19,7 +19,9 @@ const PLATFORM_MAP = { }; const platformKey = `${os.platform()}-${os.arch()}`; const RID = PLATFORM_MAP[platformKey]; -const BIN_DIR = path.join(__dirname, '..', 'packages', '@foundry-local-core', platformKey); +// Install binaries into node_modules/@foundry-local-core/ so they +// are shared across foundry-local-sdk and foundry-local-sdk-winml. +const BIN_DIR = path.join(__dirname, '..', 'node_modules', '@foundry-local-core', platformKey); const EXT = os.platform() === 'win32' ? '.dll' : os.platform() === 'darwin' ? '.dylib' : '.so'; const REQUIRED_FILES = [ @@ -104,7 +106,7 @@ async function getBaseAddress(feedUrl) { return baseAddress.endsWith('/') ? baseAddress : baseAddress + '/'; } -async function installPackage(artifact, tempDir) { +async function installPackage(artifact, tempDir, binDir) { const pkgName = artifact.name; const pkgVer = artifact.version; @@ -127,7 +129,7 @@ async function installPackage(artifact, tempDir) { if (entries.length > 0) { entries.forEach(entry => { - zip.extractEntryTo(entry, BIN_DIR, false, true); + zip.extractEntryTo(entry, binDir, false, true); console.log(` Extracted ${entry.name}`); }); } else { @@ -136,7 +138,7 @@ async function installPackage(artifact, tempDir) { // Update platform package.json version for Core packages if (pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { - const pkgJsonPath = path.join(BIN_DIR, 'package.json'); + const pkgJsonPath = path.join(binDir, 'package.json'); if (fs.existsSync(pkgJsonPath)) { const pkgJson = JSON.parse(fs.readFileSync(pkgJsonPath, 'utf8')); pkgJson.version = pkgVer; @@ -145,24 +147,27 @@ async function installPackage(artifact, tempDir) { } } -async function runInstall(artifacts) { +async function runInstall(artifacts, options) { if (!RID) { console.warn(`[foundry-local] Unsupported platform: ${platformKey}. Skipping.`); return; } - if (fs.existsSync(BIN_DIR) && REQUIRED_FILES.every(f => fs.existsSync(path.join(BIN_DIR, f)))) { + const force = options && options.force; + const binDir = (options && options.binDir) || BIN_DIR; + + if (!force && fs.existsSync(binDir) && REQUIRED_FILES.every(f => fs.existsSync(path.join(binDir, f)))) { console.log(`[foundry-local] Native libraries already installed.`); return; } console.log(`[foundry-local] Installing native libraries for ${RID}...`); - fs.mkdirSync(BIN_DIR, { recursive: true }); + fs.mkdirSync(binDir, { recursive: true }); const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'foundry-install-')); try { for (const artifact of artifacts) { - await installPackage(artifact, tempDir); + await installPackage(artifact, tempDir, binDir); } console.log('[foundry-local] Installation complete.'); } finally { diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index e6fda732..e6fd554e 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -2,11 +2,22 @@ // Licensed under the MIT License. // Install script for foundry-local-sdk-winml variant. +// +// Overwrites the standard native binaries inside foundry-local-sdk's own +// directory tree with the WinML variants (Core.WinML, ORT, GenAI). +// After this runs, everything lives under foundry-local-sdk — users import +// from 'foundry-local-sdk' and get WinML binaries transparently. 'use strict'; +const path = require('path'); const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); +// Resolve foundry-local-sdk's binary directory +const sdkRoot = path.dirname(require.resolve('foundry-local-sdk/package.json')); +const platformKey = `${process.platform}-${process.arch}`; +const binDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platformKey); + const ARTIFACTS = [ { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED }, @@ -15,7 +26,8 @@ const ARTIFACTS = [ (async () => { try { - await runInstall(ARTIFACTS); + // Force override into foundry-local-sdk's binary directory + await runInstall(ARTIFACTS, { force: true, binDir }); } catch (err) { console.error('Failed to install WinML artifacts:', err); process.exit(1); diff --git a/sdk/js/script/pack.cjs b/sdk/js/script/pack.cjs index 32057c7e..79a00828 100644 --- a/sdk/js/script/pack.cjs +++ b/sdk/js/script/pack.cjs @@ -19,8 +19,16 @@ try { const pkg = JSON.parse(original); if (isWinML) { pkg.name = 'foundry-local-sdk-winml'; - pkg.scripts.install = 'node script/install-winml.cjs'; - pkg.files = ['dist', 'script/install-winml.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs']; + pkg.description = 'Foundry Local JavaScript SDK – WinML variant'; + // The winml package is a thin wrapper: it depends on the standard SDK for all JS code + // and only overrides the native binaries at install time. + pkg.dependencies = { 'foundry-local-sdk': pkg.version }; + pkg.scripts = { install: 'node script/install-winml.cjs' }; + // No dist/ or preinstall needed — the standard SDK provides the JS code + pkg.files = ['script/install-winml.cjs', 'script/install-utils.cjs']; + delete pkg.main; + delete pkg.types; + delete pkg.optionalDependencies; } else { pkg.files = ['dist', 'script/install-standard.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs']; } diff --git a/sdk/js/script/preinstall.cjs b/sdk/js/script/preinstall.cjs index 5590550b..8cd953d2 100644 --- a/sdk/js/script/preinstall.cjs +++ b/sdk/js/script/preinstall.cjs @@ -25,7 +25,7 @@ const ALL_PLATFORMS = Object.keys(optionalDependencies) }; }); -const packagesRoot = path.join(__dirname, '..', 'packages', '@foundry-local-core'); +const packagesRoot = path.join(__dirname, '..', 'node_modules', '@foundry-local-core'); for (const platform of ALL_PLATFORMS) { const dir = path.join(packagesRoot, platform.key); diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index 5af32421..6a0bc6b4 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -2,7 +2,6 @@ import koffi from 'koffi'; import path from 'path'; import fs from 'fs'; import { fileURLToPath } from 'url'; -import { createRequire } from 'module'; import { Configuration } from '../configuration.js'; koffi.struct('RequestBuffer', { @@ -49,15 +48,14 @@ export class CoreInterop { } private static _resolveDefaultCorePath(config: Configuration): string | null { - const require = createRequire(import.meta.url); const platform = process.platform; const arch = process.arch; - // Matches names generated by preinstall.cjs - const packageName = `@foundry-local-core/${platform}-${arch}`; - - // Resolve the package path. - const packagePath = require.resolve(`${packageName}/package.json`); - const packageDir = path.dirname(packagePath); + const platformKey = `${platform}-${arch}`; + + // Resolve the platform package directory at node_modules/@foundry-local-core/, + // the shared location where install scripts place the native binaries. + const sdkRoot = path.resolve(__dirname, '..', '..'); + const packageDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platformKey); const ext = CoreInterop._getLibraryExtension(); const corePath = path.join(packageDir, `Microsoft.AI.Foundry.Local.Core${ext}`); From f3a8039f3967858a0ae8524b9a24d17909ba01ab Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Tue, 7 Apr 2026 05:49:50 -0700 Subject: [PATCH 28/83] updates js docs to reflect new package structure (#604) Co-authored-by: Prathik Rao Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- sdk/js/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sdk/js/README.md b/sdk/js/README.md index 298efa28..13d50442 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -22,18 +22,17 @@ npm install foundry-local-sdk ## WinML: Automatic Hardware Acceleration (Windows) -On Windows, install with the `--winml` flag to enable automatic execution provider management. The SDK will automatically discover, download, and register hardware-specific execution providers (e.g., Qualcomm QNN for NPU acceleration) via the Windows App Runtime — no manual driver or EP setup required. +On Windows, install the WinML package to enable automatic execution provider management. The SDK will automatically discover, download, and register hardware-specific execution providers (e.g., Qualcomm QNN for NPU acceleration) via the Windows App Runtime — no manual driver or EP setup required. +> **Note:** `foundry-local-sdk-winml` is a Windows-only package. Its install script downloads WinML artifacts during installation and may fail on macOS or Linux. ```bash -npm install foundry-local-sdk --winml +npm install foundry-local-sdk-winml ``` When WinML is enabled: - Execution providers like `QNNExecutionProvider`, `OpenVINOExecutionProvider`, etc. are downloaded and registered on the fly, enabling NPU/GPU acceleration without manual configuration - **No code changes needed** — your application code stays the same whether WinML is enabled or not -> **Note:** The `--winml` flag is only relevant on Windows. On macOS and Linux, the standard installation is used regardless of this flag. - ### Explicit EP Management You can explicitly discover and download execution providers using the `discoverEps()` and `downloadAndRegisterEps()` methods: From a5bae06879c1e80f197323f5e721d6c265292282 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Tue, 7 Apr 2026 13:22:10 -0700 Subject: [PATCH 29/83] lib name is not prepended with lib for macos and linux (#605) Co-authored-by: Baiju Meswani --- sdk/rust/build.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index 73d84d32..b155c0b4 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -216,8 +216,8 @@ fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result fn libs_already_present(out_dir: &Path) -> bool { let core_lib = match env::consts::OS { "windows" => "Microsoft.AI.Foundry.Local.Core.dll", - "linux" => "libMicrosoft.AI.Foundry.Local.Core.so", - "macos" => "libMicrosoft.AI.Foundry.Local.Core.dylib", + "linux" => "Microsoft.AI.Foundry.Local.Core.so", + "macos" => "Microsoft.AI.Foundry.Local.Core.dylib", _ => return false, }; out_dir.join(core_lib).exists() From d1cf434c228f45d03ac38b3008282b48d366616e Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Tue, 7 Apr 2026 16:37:27 -0700 Subject: [PATCH 30/83] Update ort-genai to 0.13.1 (#606) --- .pipelines/templates/build-python-steps.yml | 4 ++-- .pipelines/templates/test-python-steps.yml | 4 ++-- sdk/js/script/install-standard.cjs | 2 +- sdk/js/script/install-winml.cjs | 2 +- sdk/python/requirements-winml.txt | 2 +- sdk/python/requirements.txt | 4 ++-- sdk/rust/build.rs | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index f94aa712..a8658772 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -104,10 +104,10 @@ steps: } - ${{ if eq(parameters.isWinML, true) }}: - - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.12.1 + - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.1 displayName: 'Install ORT native packages (WinML)' - ${{ else }}: - - script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 + - script: pip install onnxruntime-core==1.24.4 onnxruntime-genai-core==0.13.1 displayName: 'Install ORT native packages' - script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml index 6fc86b3b..1de20b1c 100644 --- a/.pipelines/templates/test-python-steps.yml +++ b/.pipelines/templates/test-python-steps.yml @@ -99,10 +99,10 @@ steps: } - ${{ if eq(parameters.isWinML, true) }}: - - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.12.1 + - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.1 displayName: 'Install ORT native packages (WinML)' - ${{ else }}: - - script: pip install onnxruntime-core==1.24.3 onnxruntime-genai-core==0.12.1 + - script: pip install onnxruntime-core==1.24.4 onnxruntime-genai-core==0.13.1 displayName: 'Install ORT native packages' - script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index 8b30135a..6901766d 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -11,7 +11,7 @@ const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cj const ARTIFACTS = [ { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.24.4', feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.0', feed: NUGET_FEED }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.1', feed: NUGET_FEED }, ]; (async () => { diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index e6fd554e..efa2041c 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -21,7 +21,7 @@ const binDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platfor const ARTIFACTS = [ { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.0', feed: NUGET_FEED }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.1', feed: NUGET_FEED }, ]; (async () => { diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index 68b76b56..bee268a9 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -4,4 +4,4 @@ openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. foundry-local-core-winml==1.0.0rc1 onnxruntime-core==1.23.2.3 -onnxruntime-genai-core==0.13.0 \ No newline at end of file +onnxruntime-genai-core==0.13.1 \ No newline at end of file diff --git a/sdk/python/requirements.txt b/sdk/python/requirements.txt index 9295b832..666a3721 100644 --- a/sdk/python/requirements.txt +++ b/sdk/python/requirements.txt @@ -5,5 +5,5 @@ openai>=2.24.0 foundry-local-core==1.0.0rc1 onnxruntime-core==1.24.4; sys_platform != "linux" onnxruntime-gpu==1.24.4; sys_platform == "linux" -onnxruntime-genai-core==0.13.0; sys_platform != "linux" -onnxruntime-genai-cuda==0.13.0; sys_platform == "linux" +onnxruntime-genai-core==0.13.1; sys_platform != "linux" +onnxruntime-genai-cuda==0.13.1; sys_platform == "linux" diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index b155c0b4..999bca3d 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -9,7 +9,7 @@ const ORT_NIGHTLY_FEED: &str = const CORE_VERSION: &str = "0.9.0.8-rc3"; const ORT_VERSION: &str = "1.24.4"; -const GENAI_VERSION: &str = "0.13.0"; +const GENAI_VERSION: &str = "0.13.1"; const WINML_ORT_VERSION: &str = "1.23.2.3"; From 303430c52daa5d7ef4e11a9caeff2e010832481f Mon Sep 17 00:00:00 2001 From: David Luong Date: Wed, 8 Apr 2026 12:49:21 -0400 Subject: [PATCH 31/83] Fix incorrect unload log message in C# SDK (#609) ## Summary Fixes an incorrect informational log message in the C# SDK's web unload path. ## Changes - Updated `WebUnloadModelAsync` in `sdk/cs/src/Detail/ModelLoadManager.cs` - Changed the success log text from "loaded successfully" to "unloaded successfully" ## Why Consumers of `sdk/cs` provide their own `ILogger`, so this message is surfaced in their application logs. The old text was misleading and could cause confusion during debugging and support investigations. --- sdk/cs/src/Detail/ModelLoadManager.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/cs/src/Detail/ModelLoadManager.cs b/sdk/cs/src/Detail/ModelLoadManager.cs index fbcc2d99..a157ff78 100644 --- a/sdk/cs/src/Detail/ModelLoadManager.cs +++ b/sdk/cs/src/Detail/ModelLoadManager.cs @@ -157,7 +157,7 @@ private async Task WebUnloadModelAsync(string modelId, CancellationToken? ct = n } var content = await response.Content.ReadAsStringAsync(ct ?? CancellationToken.None).ConfigureAwait(false); - _logger.LogInformation("Model {ModelId} loaded successfully from {WebService}: {Message}", + _logger.LogInformation("Model {ModelId} unloaded successfully from {WebService}: {Message}", modelId, _externalServiceUrl, content); } From 6e767ac02ea3567ad7e203a014127eb32fe80902 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Apr 2026 09:53:50 -0700 Subject: [PATCH 32/83] Bump flatted from 3.3.3 to 3.4.2 in /sdk_legacy/js (#542) Bumps [flatted](https://github.com/WebReflection/flatted) from 3.3.3 to 3.4.2.
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=flatted&package-manager=npm_and_yarn&previous-version=3.3.3&new-version=3.4.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/microsoft/Foundry-Local/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- sdk_legacy/js/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk_legacy/js/package-lock.json b/sdk_legacy/js/package-lock.json index 4a3c3cae..29f96079 100644 --- a/sdk_legacy/js/package-lock.json +++ b/sdk_legacy/js/package-lock.json @@ -3437,9 +3437,9 @@ } }, "node_modules/flatted": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", - "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "version": "3.4.2", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz", + "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==", "dev": true, "license": "ISC" }, From 25f8d9be89b52f3a43957b9e867bc60c33c44b03 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Wed, 8 Apr 2026 10:46:59 -0700 Subject: [PATCH 33/83] upgrade foundry local core osx builds to use macos-15 (#611) Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index 617ea587..cb5766c0 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -192,7 +192,7 @@ extends: displayName: 'FLC osx-arm64' pool: name: Azure Pipelines - vmImage: 'macOS-14' + vmImage: 'macOS-15' os: macOS templateContext: outputs: From 23477c8d375ee228852eb75317ec5020e127de0a Mon Sep 17 00:00:00 2001 From: Samuel Kemp Date: Wed, 8 Apr 2026 20:14:18 +0100 Subject: [PATCH 34/83] rust sdk: change download progress callback from &str to f64 (#608) Change the public download progress callback type from FnMut(&str) to FnMut(f64) for cross-SDK consistency. Python, JS, and C# SDKs all expose a numeric float type for download progress. The string-to-float parsing now happens inside model_variant.rs (using chunk.parse::()), matching the pattern already used by download_and_register_eps_with_progress. Unparseable chunks are silently TryParse). The internal core_interop streaming infrastructure remains unchanged since it is shared with chat/audio streaming. --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/rust/.cargo/config.toml | 7 ---- .../audio-transcription-example/src/main.rs | 4 +- .../rust/foundry-local-webserver/src/main.rs | 4 +- .../rust/native-chat-completions/src/main.rs | 4 +- .../tool-calling-foundry-local/src/main.rs | 4 +- .../rust/tutorial-chat-assistant/src/main.rs | 4 +- .../tutorial-document-summarizer/src/main.rs | 4 +- .../rust/tutorial-tool-calling/src/main.rs | 4 +- .../rust/tutorial-voice-to-text/src/main.rs | 8 ++-- sdk/rust/.cargo/config.toml | 7 ---- sdk/rust/README.md | 6 +-- sdk/rust/docs/api.md | 7 ++-- sdk/rust/examples/chat_completion.rs | 4 +- sdk/rust/examples/interactive_chat.rs | 2 +- sdk/rust/examples/tool_calling.rs | 2 +- sdk/rust/src/detail/model.rs | 26 +++++++++++-- sdk/rust/src/detail/model_variant.rs | 13 +++++-- sdk/rust/tests/integration/model_test.rs | 39 ++++++++++++++++--- 18 files changed, 94 insertions(+), 55 deletions(-) delete mode 100644 samples/rust/.cargo/config.toml delete mode 100644 sdk/rust/.cargo/config.toml diff --git a/samples/rust/.cargo/config.toml b/samples/rust/.cargo/config.toml deleted file mode 100644 index 84c57445..00000000 --- a/samples/rust/.cargo/config.toml +++ /dev/null @@ -1,7 +0,0 @@ -[registries] - -[source.crates-io] -replace-with = "ORT-Nightly" - -[source.ORT-Nightly] -registry = "sparse+https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/Cargo/index/" diff --git a/samples/rust/audio-transcription-example/src/main.rs b/samples/rust/audio-transcription-example/src/main.rs index c326006f..f5fb4cff 100644 --- a/samples/rust/audio-transcription-example/src/main.rs +++ b/samples/rust/audio-transcription-example/src/main.rs @@ -35,8 +35,8 @@ async fn main() -> Result<(), Box> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}%"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/foundry-local-webserver/src/main.rs b/samples/rust/foundry-local-webserver/src/main.rs index 492cbbc1..02f0360e 100644 --- a/samples/rust/foundry-local-webserver/src/main.rs +++ b/samples/rust/foundry-local-webserver/src/main.rs @@ -34,8 +34,8 @@ async fn main() -> Result<(), Box> { if !model.is_cached().await? { print!("Downloading model {model_alias}..."); model - .download(Some(move |progress: &str| { - print!("\rDownloading model... {progress}%"); + .download(Some(move |progress: f64| { + print!("\rDownloading model... {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/native-chat-completions/src/main.rs b/samples/rust/native-chat-completions/src/main.rs index 04d09372..d1c7cfd1 100644 --- a/samples/rust/native-chat-completions/src/main.rs +++ b/samples/rust/native-chat-completions/src/main.rs @@ -32,8 +32,8 @@ async fn main() -> Result<(), Box> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}%"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/tool-calling-foundry-local/src/main.rs b/samples/rust/tool-calling-foundry-local/src/main.rs index 1ccda1e8..f6ab1965 100644 --- a/samples/rust/tool-calling-foundry-local/src/main.rs +++ b/samples/rust/tool-calling-foundry-local/src/main.rs @@ -66,8 +66,8 @@ async fn main() -> Result<(), Box> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}%"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/tutorial-chat-assistant/src/main.rs b/samples/rust/tutorial-chat-assistant/src/main.rs index 6b0b587b..34a3c6ed 100644 --- a/samples/rust/tutorial-chat-assistant/src/main.rs +++ b/samples/rust/tutorial-chat-assistant/src/main.rs @@ -21,8 +21,8 @@ async fn main() -> anyhow::Result<()> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/tutorial-document-summarizer/src/main.rs b/samples/rust/tutorial-document-summarizer/src/main.rs index 9ade2e77..be600056 100644 --- a/samples/rust/tutorial-document-summarizer/src/main.rs +++ b/samples/rust/tutorial-document-summarizer/src/main.rs @@ -96,8 +96,8 @@ async fn main() -> anyhow::Result<()> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/tutorial-tool-calling/src/main.rs b/samples/rust/tutorial-tool-calling/src/main.rs index f4476643..d6cfb9ce 100644 --- a/samples/rust/tutorial-tool-calling/src/main.rs +++ b/samples/rust/tutorial-tool-calling/src/main.rs @@ -199,8 +199,8 @@ async fn main() -> anyhow::Result<()> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/samples/rust/tutorial-voice-to-text/src/main.rs b/samples/rust/tutorial-voice-to-text/src/main.rs index 2295c86a..fd802c77 100644 --- a/samples/rust/tutorial-voice-to-text/src/main.rs +++ b/samples/rust/tutorial-voice-to-text/src/main.rs @@ -28,8 +28,8 @@ async fn main() -> anyhow::Result<()> { if !speech_model.is_cached().await? { println!("Downloading speech model..."); speech_model - .download(Some(|progress: &str| { - print!("\r {progress}"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; @@ -60,8 +60,8 @@ async fn main() -> anyhow::Result<()> { if !chat_model.is_cached().await? { println!("Downloading chat model..."); chat_model - .download(Some(|progress: &str| { - print!("\r {progress}"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; diff --git a/sdk/rust/.cargo/config.toml b/sdk/rust/.cargo/config.toml deleted file mode 100644 index 84c57445..00000000 --- a/sdk/rust/.cargo/config.toml +++ /dev/null @@ -1,7 +0,0 @@ -[registries] - -[source.crates-io] -replace-with = "ORT-Nightly" - -[source.ORT-Nightly] -registry = "sparse+https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/Cargo/index/" diff --git a/sdk/rust/README.md b/sdk/rust/README.md index e98b8e4c..08f9c279 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -191,10 +191,10 @@ Download, load, and unload: ```rust // Download with progress reporting -model.download(Some(Box::new(|progress: &str| { - print!("\r{progress}"); +model.download(Some(|progress: f64| { + print!("\r{progress:.1}%"); std::io::Write::flush(&mut std::io::stdout()).ok(); -}))).await?; +})).await?; // Load into memory model.load().await?; diff --git a/sdk/rust/docs/api.md b/sdk/rust/docs/api.md index 278402fb..a21c23a0 100644 --- a/sdk/rust/docs/api.md +++ b/sdk/rust/docs/api.md @@ -151,10 +151,11 @@ pub struct Model { /* private fields */ } | `id` | `fn id(&self) -> &str` | Unique identifier of the selected variant. | | `variants` | `fn variants(&self) -> &[Arc]` | All variants in this model. | | `selected_variant` | `fn selected_variant(&self) -> &ModelVariant` | Currently selected variant. | -| `select_variant` | `fn select_variant(&self, id: &str) -> Result<(), FoundryLocalError>` | Select a variant by id. | +| `select_variant` | `fn select_variant(&self, variant: &Model) -> Result<(), FoundryLocalError>` | Select a variant from `variants()`. | +| `select_variant_by_id` | `fn select_variant_by_id(&self, id: &str) -> Result<(), FoundryLocalError>` | Select a variant by its unique id string. | | `is_cached` | `async fn is_cached(&self) -> Result` | Whether the selected variant is cached on disk. | | `is_loaded` | `async fn is_loaded(&self) -> Result` | Whether the selected variant is loaded in memory. | -| `download` | `async fn download(&self, progress: Option) -> Result<(), FoundryLocalError>` | Download the selected variant. `F: FnMut(&str) + Send + 'static` | +| `download` | `async fn download(&self, progress: Option) -> Result<(), FoundryLocalError>` | Download the selected variant. `F: FnMut(f64) + Send + 'static` — receives progress as a percentage (0.0–100.0). | | `path` | `async fn path(&self) -> Result` | Local file-system path of the selected variant. | | `load` | `async fn load(&self) -> Result<(), FoundryLocalError>` | Load the selected variant into memory. | | `unload` | `async fn unload(&self) -> Result` | Unload the selected variant from memory. | @@ -179,7 +180,7 @@ pub struct ModelVariant { /* private fields */ } | `alias` | `fn alias(&self) -> &str` | Alias shared with sibling variants. | | `is_cached` | `async fn is_cached(&self) -> Result` | Whether cached locally. ⚠️ Full IPC per call — prefer `Catalog::get_cached_models()` for batch use. | | `is_loaded` | `async fn is_loaded(&self) -> Result` | Whether currently loaded in memory. | -| `download` | `async fn download(&self, progress: Option) -> Result<(), FoundryLocalError>` | Download the variant. `F: FnMut(&str) + Send + 'static` | +| `download` | `async fn download(&self, progress: Option) -> Result<(), FoundryLocalError>` | Download the variant. `F: FnMut(f64) + Send + 'static` — receives progress as a percentage (0.0–100.0). | | `path` | `async fn path(&self) -> Result` | Local file-system path. | | `load` | `async fn load(&self) -> Result<(), FoundryLocalError>` | Load into memory. | | `unload` | `async fn unload(&self) -> Result` | Unload from memory. | diff --git a/sdk/rust/examples/chat_completion.rs b/sdk/rust/examples/chat_completion.rs index 3516aa60..f3ac15c8 100644 --- a/sdk/rust/examples/chat_completion.rs +++ b/sdk/rust/examples/chat_completion.rs @@ -40,8 +40,8 @@ async fn main() -> Result<()> { if !model.is_cached().await? { println!("Downloading model '{}'…", model.alias()); model - .download(Some(|progress: &str| { - println!(" {progress}"); + .download(Some(|progress: f64| { + println!(" {progress:.1}%"); })) .await?; } diff --git a/sdk/rust/examples/interactive_chat.rs b/sdk/rust/examples/interactive_chat.rs index bd230155..e1ccb564 100644 --- a/sdk/rust/examples/interactive_chat.rs +++ b/sdk/rust/examples/interactive_chat.rs @@ -41,7 +41,7 @@ async fn main() -> Result<(), Box> { // Download if needed if !model.is_cached().await? { println!("Downloading '{alias}'…"); - model.download(Some(|p: &str| print!("\r {p}%"))).await?; + model.download(Some(|p: f64| print!("\r {p:.1}%"))).await?; println!(); } diff --git a/sdk/rust/examples/tool_calling.rs b/sdk/rust/examples/tool_calling.rs index fecf6bc5..f556b2a9 100644 --- a/sdk/rust/examples/tool_calling.rs +++ b/sdk/rust/examples/tool_calling.rs @@ -67,7 +67,7 @@ async fn main() -> Result<()> { if !model.is_cached().await? { println!("Downloading model '{}'…", model.alias()); - model.download(Some(|p: &str| println!(" {p}"))).await?; + model.download(Some(|p: f64| println!(" {p:.1}%"))).await?; } println!("Loading model '{}'…", model.alias()); model.load().await?; diff --git a/sdk/rust/src/detail/model.rs b/sdk/rust/src/detail/model.rs index 196ebe35..3a87a1c3 100644 --- a/sdk/rust/src/detail/model.rs +++ b/sdk/rust/src/detail/model.rs @@ -204,10 +204,10 @@ impl Model { } /// Download the (selected) variant. If `progress` is provided it - /// receives human-readable progress strings as they arrive. + /// receives download progress as a percentage (0.0–100.0). pub async fn download(&self, progress: Option) -> Result<()> where - F: FnMut(&str) + Send + 'static, + F: FnMut(f64) + Send + 'static, { self.selected_variant().download(progress).await } @@ -259,7 +259,25 @@ impl Model { } } - /// Select a variant by its unique id. + /// Select a variant to use for subsequent operations. + /// + /// The `variant` must be one of the models returned by [`variants`](Model::variants). + /// + /// # Errors + /// + /// Returns an error if the variant does not belong to this model. + /// For single-variant models this always returns an error — use + /// [`Catalog::get_model`](crate::Catalog::get_model) to obtain a model + /// with all variants available. + pub fn select_variant(&self, variant: &Model) -> Result<()> { + self.select_variant_by_id(variant.id()) + } + + /// Select a variant by its unique id string. + /// + /// This is a convenience method for cases where you have a variant id + /// from an external source. Prefer [`select_variant`](Model::select_variant) + /// when you already have a [`Model`] reference from [`variants`](Model::variants). /// /// # Errors /// @@ -267,7 +285,7 @@ impl Model { /// For single-variant models this always returns an error — use /// [`Catalog::get_model`](crate::Catalog::get_model) to obtain a model /// with all variants available. - pub fn select_variant(&self, id: &str) -> Result<()> { + pub fn select_variant_by_id(&self, id: &str) -> Result<()> { match &self.inner { ModelKind::ModelVariant(v) => Err(FoundryLocalError::ModelOperation { reason: format!( diff --git a/sdk/rust/src/detail/model_variant.rs b/sdk/rust/src/detail/model_variant.rs index 636c5d5b..ca1a83c7 100644 --- a/sdk/rust/src/detail/model_variant.rs +++ b/sdk/rust/src/detail/model_variant.rs @@ -88,13 +88,20 @@ impl ModelVariant { pub(crate) async fn download(&self, progress: Option) -> Result<()> where - F: FnMut(&str) + Send + 'static, + F: FnMut(f64) + Send + 'static, { let params = json!({ "Params": { "Model": self.info.id } }); match progress { - Some(cb) => { + Some(mut cb) => { + let wrapper = move |chunk: &str| { + for token in chunk.split_whitespace() { + if let Ok(pct) = token.parse::() { + cb(pct); + } + } + }; self.core - .execute_command_streaming_async("download_model".into(), Some(params), cb) + .execute_command_streaming_async("download_model".into(), Some(params), wrapper) .await?; } None => { diff --git a/sdk/rust/tests/integration/model_test.rs b/sdk/rust/tests/integration/model_test.rs index 4e3b371b..c1ffa171 100644 --- a/sdk/rust/tests/integration/model_test.rs +++ b/sdk/rust/tests/integration/model_test.rs @@ -156,7 +156,7 @@ async fn should_return_non_empty_path_for_cached_model() { } #[tokio::test] -async fn should_select_variant_by_id() { +async fn should_select_variant_by_model() { let manager = common::get_test_manager(); let model = manager .catalog() @@ -167,9 +167,10 @@ async fn should_select_variant_by_id() { // Remember the original selection so we can restore it afterward. let original_id = model.id().to_string(); - let first_variant_id = model.variants()[0].id().to_string(); + let first_variant = model.variants()[0].clone(); + let first_variant_id = first_variant.id().to_string(); model - .select_variant(&first_variant_id) + .select_variant(&first_variant) .expect("select_variant should succeed"); assert_eq!( model.id(), @@ -180,7 +181,33 @@ async fn should_select_variant_by_id() { // Restore the original variant so other tests sharing this // model via the catalog are not affected. model - .select_variant(&original_id) + .select_variant_by_id(&original_id) + .expect("restoring original variant should succeed"); +} + +#[tokio::test] +async fn should_select_variant_by_id() { + let manager = common::get_test_manager(); + let model = manager + .catalog() + .get_model(common::TEST_MODEL_ALIAS) + .await + .expect("get_model failed"); + + let original_id = model.id().to_string(); + + let first_variant_id = model.variants()[0].id().to_string(); + model + .select_variant_by_id(&first_variant_id) + .expect("select_variant_by_id should succeed"); + assert_eq!( + model.id(), + first_variant_id, + "After select_variant_by_id, id() should match the selected variant" + ); + + model + .select_variant_by_id(&original_id) .expect("restoring original variant should succeed"); } @@ -193,10 +220,10 @@ async fn should_fail_to_select_unknown_variant() { .await .expect("get_model failed"); - let result = model.select_variant("nonexistent-variant-id"); + let result = model.select_variant_by_id("nonexistent-variant-id"); assert!( result.is_err(), - "select_variant with unknown ID should fail" + "select_variant_by_id with unknown ID should fail" ); let err_msg = result.unwrap_err().to_string(); From 3eed74953630487cb3c7626efc17996f905b6b70 Mon Sep 17 00:00:00 2001 From: Samuel Kemp Date: Wed, 8 Apr 2026 20:14:52 +0100 Subject: [PATCH 35/83] Samuel100/update readmes (#573) Update README to better align the product offering. Updated sample READMEs. --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> Co-authored-by: Baiju Meswani --- README.md | 315 ++++++------------ samples/README.md | 14 + samples/cs/Directory.Packages.props | 9 +- samples/cs/README.md | 6 +- .../LiveAudioTranscriptionExample.csproj | 55 --- .../LiveAudioTranscriptionExample.sln | 34 -- .../Program.cs | 106 ------ samples/cs/nuget.config | 5 - samples/js/README.md | 49 +++ .../js/audio-transcription-example/README.md | 38 --- .../audio-transcription-example/package.json | 15 + .../js/chat-and-audio-foundry-local/README.md | 39 --- .../chat-and-audio-foundry-local/package.json | 4 + .../js/copilot-sdk-foundry-local/README.md | 150 --------- .../js/copilot-sdk-foundry-local/package.json | 3 + samples/js/electron-chat-application/.npmrc | 2 - .../js/electron-chat-application/README.md | 239 ------------- .../js/electron-chat-application/package.json | 18 +- .../langchain-integration-example/README.md | 39 --- .../package.json | 17 + .../README.md | 58 ---- .../live-audio-transcription-example/app.js | 157 --------- samples/js/native-chat-completions/README.md | 37 -- .../js/native-chat-completions/package.json | 15 + .../js/tool-calling-foundry-local/README.md | 15 - .../tool-calling-foundry-local/package.json | 9 + .../js/tutorial-chat-assistant/package.json | 8 +- .../tutorial-document-summarizer/package.json | 8 +- samples/js/tutorial-tool-calling/package.json | 8 +- .../js/tutorial-voice-to-text/package.json | 8 +- samples/js/web-server-example/README.md | 39 --- samples/js/web-server-example/package.json | 16 + samples/python/README.md | 46 +++ .../audio-transcription/requirements.txt | 3 +- .../langchain-integration/requirements.txt | 3 +- .../native-chat-completions/requirements.txt | 3 +- samples/python/tool-calling/requirements.txt | 3 +- .../tutorial-chat-assistant/requirements.txt | 3 +- .../requirements.txt | 3 +- .../tutorial-tool-calling/requirements.txt | 3 +- .../tutorial-voice-to-text/requirements.txt | 3 +- samples/python/web-server/requirements.txt | 3 +- samples/rust/README.md | 42 ++- .../audio-transcription-example/Cargo.toml | 3 + .../audio-transcription-example/README.md | 25 -- .../rust/foundry-local-webserver/Cargo.toml | 3 + .../rust/foundry-local-webserver/README.md | 25 -- .../rust/native-chat-completions/Cargo.toml | 3 + .../rust/native-chat-completions/README.md | 25 -- .../tool-calling-foundry-local/Cargo.toml | 3 + .../rust/tool-calling-foundry-local/README.md | 25 -- .../rust/tutorial-chat-assistant/Cargo.toml | 3 + .../tutorial-document-summarizer/Cargo.toml | 3 + samples/rust/tutorial-tool-calling/Cargo.toml | 3 + .../rust/tutorial-voice-to-text/Cargo.toml | 3 + 55 files changed, 394 insertions(+), 1380 deletions(-) create mode 100644 samples/README.md delete mode 100644 samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj delete mode 100644 samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln delete mode 100644 samples/cs/live-audio-transcription-example/Program.cs create mode 100644 samples/js/README.md delete mode 100644 samples/js/audio-transcription-example/README.md create mode 100644 samples/js/audio-transcription-example/package.json delete mode 100644 samples/js/chat-and-audio-foundry-local/README.md delete mode 100644 samples/js/copilot-sdk-foundry-local/README.md delete mode 100644 samples/js/electron-chat-application/.npmrc delete mode 100644 samples/js/electron-chat-application/README.md delete mode 100644 samples/js/langchain-integration-example/README.md create mode 100644 samples/js/langchain-integration-example/package.json delete mode 100644 samples/js/live-audio-transcription-example/README.md delete mode 100644 samples/js/live-audio-transcription-example/app.js delete mode 100644 samples/js/native-chat-completions/README.md create mode 100644 samples/js/native-chat-completions/package.json delete mode 100644 samples/js/tool-calling-foundry-local/README.md delete mode 100644 samples/js/web-server-example/README.md create mode 100644 samples/js/web-server-example/package.json create mode 100644 samples/python/README.md delete mode 100644 samples/rust/audio-transcription-example/README.md delete mode 100644 samples/rust/foundry-local-webserver/README.md delete mode 100644 samples/rust/native-chat-completions/README.md delete mode 100644 samples/rust/tool-calling-foundry-local/README.md diff --git a/README.md b/README.md index 517b4b37..881bb656 100644 --- a/README.md +++ b/README.md @@ -13,93 +13,54 @@ -## Add on-device AI to your app, effortlessly +## Ship on-device AI inside your app +Foundry Local is an **end-to-end local AI solution** for building applications that run entirely on the user's device. It provides native SDKs (C#, JavaScript, Python, and Rust), a curated catalog of optimized models, and automatic hardware acceleration — all in a lightweight package (~20 MB). The compact size makes it easy to integrate into your application and distribute to end users. -Foundry Local lets you embed generative AI directly into your applications — no cloud or server calls required. All inference runs on-device, which means user data never leaves the device, responses start immediately with zero network latency, and your app works offline. No per-token costs, no backend infrastructure to maintain. +User data never leaves the device, responses start immediately with zero network latency, and your app works offline. No per-token costs, no API keys, no backend infrastructure to maintain, and no Azure subscription required. -Key benefits include: +### Key Features -- **Self-contained SDK** — Ship AI features without requiring users to install any external dependencies. -- **Chat AND Audio in one runtime** — Text generation and speech-to-text (Whisper) through a single SDK — no need for separate tools like `whisper.cpp` + `llama.cpp`. -- **Easy-to-use CLI** — Explore models and experiment locally before integrating with your app. -- **Optimized models out-of-the-box** — State-of-the-art quantization and compression deliver both performance and quality. -- **Small footprint** — Leverages [ONNX Runtime](https://onnxruntime.ai/); a high performance inference runtime (written in C++) that has minimal disk and memory requirements. -- **Automatic hardware acceleration** — Leverage GPUs and NPUs when available, with seamless fallback to CPU. Zero hardware detection code needed. -- **Model distribution** — Popular open-source models hosted in the cloud with automatic downloading and updating. -- **Multi-platform support** — Windows, macOS (Apple silicon), Linux and Android. -- **Bring your own models** — Add and run custom models alongside the built-in catalog. +- **Lightweight runtime** — The runtime handles model acquisition, hardware acceleration, model management, and inference (via [ONNX Runtime](https://onnxruntime.ai/)). -### Supported Tasks +- **Curated model catalog** — A catalog of high-quality models optimized for on-device use across a wide range of consumer hardware. The catalog covers chat completions (for example, GPT OSS, Qwen, DeepSeek, Mistral and Phi) and audio transcription (for example, Whisper). Every model goes through extensive quantization and compression to deliver the best balance of quality and performance. Models are versioned, so your application can pin to a specific version or automatically receive updates. -| Task | Model Aliases | API | -|------|--------------|-----| -| Chat / Text Generation | `phi-3.5-mini`, `qwen2.5-0.5b`, `qwen2.5-coder-0.5b`, etc. | Chat Completions | -| Audio Transcription (Speech-to-Text) | `whisper-tiny` | Audio Transcription | +- **Automatic hardware acceleration** — Foundry Local detects the available hardware on the user's device and selects the best execution provider and device (NPU, GPU or CPU). -> [!NOTE] -> Foundry Local is a **unified local AI runtime** — it replaces the need for separate tools like `whisper.cpp`, `llama.cpp`, or `ollama`. One SDK handles both chat and audio, with automatic hardware acceleration (NPU > GPU > CPU). +- **Smart model management** — Foundry Local handles the full lifecycle of models on end-user devices. Models download automatically on first use, are cached locally for instant subsequent launches, and the best-performing variant is selected for the user's specific hardware. -## 🚀 Quickstart - -### Explore with the CLI - -The Foundry Local CLI is a great way to explore models and test features before integrating with your app. - -1. Install the CLI to explore models interactively before integrating with your app. - - **Windows:** - ```bash - winget install Microsoft.FoundryLocal - ``` - - **macOS:** - ```bash - brew install microsoft/foundrylocal/foundrylocal - ``` +- **OpenAI-compatible API** — Supports OpenAI request and response formats including the [OpenAI Responses API format](https://developers.openai.com/api/reference/resources/responses). If your application already uses the OpenAI SDK, point it to a Foundry Local endpoint with minimal code changes. -2. Start a chat session with a model: - - ```bash - foundry model run qwen2.5-0.5b - ``` +- **Optional local server** — An OpenAI-compatible web server for serving models to multiple processes, integrating with tools like LangChain, or experimenting through REST calls. For most embedded application scenarios, use the SDK directly — it runs inference in-process without the overhead of a separate server. -3. Explore available models - ```bash - foundry model ls - ``` +## 🚀 Quickstart > [!TIP] -> For installation issues, see the [Installation section](#installing) below. +> The following shows a quickstart for Python and JavaScript. C# and Rust language bindings are also available. Take a look at the [samples](/samples/) for more details. -### Add on-device AI to your app - -The Foundry Local SDK makes it easy to integrate local AI models into your applications. Below are quickstart examples for JavaScript, C# and Python. - -> [!TIP] -> For the JavaScript and C# SDKs you do **not** require the CLI to be installed. The Python SDK has a dependency on the CLI but a native in-process SDK is coming soon.
JavaScript -1. Install the SDK using npm: +1. Install the SDK: ```bash + # Windows (recommended for hardware acceleration) + npm install foundry-local-sdk-winml + + # macOS/linux npm install foundry-local-sdk ``` - > [!NOTE] - > On Windows, NPU models are not currently available for the JavaScript SDK. These will be enabled in a subsequent release. - -2. Use the SDK in your application as follows: +2. Run your first chat completion: ```javascript import { FoundryLocalManager } from 'foundry-local-sdk'; - const manager = FoundryLocalManager.create({ appName: 'foundry_local_samples' }); + const manager = FoundryLocalManager.create({ appName: 'my-app' }); // Download and load a model (auto-selects best variant for user's hardware) const model = await manager.catalog.getModel('qwen2.5-0.5b'); @@ -122,112 +83,67 @@ The Foundry Local SDK makes it easy to integrate local AI models into your appli
-
-C# - -1. Install the SDK using NuGet: - - ```bash - # Windows - dotnet add package Microsoft.AI.Foundry.Local.WinML - - # macOS/Linux - dotnet add package Microsoft.AI.Foundry.Local - ``` - On Windows, we recommend using the `Microsoft.AI.Foundry.Local.WinML` package, which will enable wider hardware acceleration support. - -2. Use the SDK in your application as follows: - ```csharp - using Microsoft.AI.Foundry.Local; - - var config = new Configuration { AppName = "foundry_local_samples" }; - await FoundryLocalManager.CreateAsync(config); - var mgr = FoundryLocalManager.Instance; - - // Download and load a model (auto-selects best variant for user's hardware) - var catalog = await mgr.GetCatalogAsync(); - var model = await catalog.GetModelAsync("qwen2.5-0.5b"); - await model.DownloadAsync(); - await model.LoadAsync(); - - // Create a chat client and get a streaming completion - var chatClient = await model.GetChatClientAsync(); - var messages = new List - { - new() { Role = "user", Content = "What is the golden ratio?" } - }; - - await foreach (var chunk in chatClient.CompleteChatStreamingAsync(messages, CancellationToken.None)) - { - Console.Write(chunk.Choices[0].Message.Content); - } - - // Unload the model when done - await model.UnloadAsync(); - ``` - -
-
+
Python -**NOTE:** The Python SDK currently relies on the Foundry Local CLI and uses the OpenAI-compatible REST API. A native in-process SDK (matching JS/C#) is coming soon. - -1. Install the SDK using pip: +1. Install the SDK: ```bash - pip install foundry-local-sdk openai + # Windows (recommended for hardware acceleration) + pip install foundry-local-sdk-winml + + # macOS/Linux + pip install foundry-local-sdk ``` -2. Use the SDK in your application as follows: +2. Run your first chat completion: ```python - import openai - from foundry_local import FoundryLocalManager - - # Initialize manager (starts local service and loads model) - manager = FoundryLocalManager("phi-3.5-mini") - - # Use the OpenAI SDK pointed at your local endpoint - client = openai.OpenAI(base_url=manager.endpoint, api_key=manager.api_key) - - response = client.chat.completions.create( - model=manager.get_model_info("phi-3.5-mini").id, - messages=[{"role": "user", "content": "What is the golden ratio?"}] - ) - - print(response.choices[0].message.content) + from foundry_local_sdk import Configuration, FoundryLocalManager + + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load a model from the catalog + model = manager.catalog.get_model("qwen2.5-0.5b") + model.download() + model.load() + + # Get a chat client + client = model.get_chat_client() + + # Create and send message + messages = [ + {"role": "user", "content": "What is the golden ratio?"} + ] + response = client.complete_chat(messages) + print(f"Response: {response.choices[0].message.content}") + + model.unload() ```
-### More samples - -Explore complete working examples in the [`samples/`](samples/) folder: - -| Sample | Description | -|--------|-------------| -| [**cs/**](samples/cs/) | C# examples using the .NET SDK (includes audio transcription) | -| [**js/**](samples/js/) | JavaScript/Node.js examples (chat, audio transcription, tool calling) | -| [**python/**](samples/python/) | Python examples using the OpenAI-compatible API | -#### Audio Transcription (Speech-to-Text) +### 💬 Audio Transcription (Speech-to-Text) -The SDK also supports audio transcription via Whisper models. Use `model.createAudioClient()` to transcribe audio files on-device: +The SDK also supports audio transcription via Whisper models (available in JavaScript, C#, Python and Rust): ```javascript import { FoundryLocalManager } from 'foundry-local-sdk'; -const manager = FoundryLocalManager.create({ appName: 'MyApp' }); +const manager = FoundryLocalManager.create({ appName: 'my-app' }); -// Download and load the Whisper model const whisperModel = await manager.catalog.getModel('whisper-tiny'); await whisperModel.download(); await whisperModel.load(); -// Transcribe an audio file const audioClient = whisperModel.createAudioClient(); audioClient.settings.language = 'en'; + +// Transcribe an audio file const result = await audioClient.transcribe('recording.wav'); console.log('Transcription:', result.text); @@ -240,125 +156,80 @@ await whisperModel.unload(); ``` > [!TIP] -> A single `FoundryLocalManager` can manage both chat and audio models simultaneously. See the [chat-and-audio sample](samples/js/chat-and-audio-foundry-local/) for a complete example that transcribes audio then analyzes it with a chat model. +> A single `FoundryLocalManager` can manage both chat and audio models simultaneously. See the [chat-and-audio sample](samples/js/chat-and-audio-foundry-local/) for a complete example. -## Manage +## 📦 Samples -This section provides an overview of how to manage Foundry Local, including installation, upgrading, and removing the application. +Explore complete working examples in the [`samples/`](samples/) folder: -### Installing +| Language | Samples | Highlights | +|----------|---------|------------| +| [**C#**](samples/cs/) | 12 | Native chat, audio transcription, tool calling, model management, web server, tutorials | +| [**JavaScript**](samples/js/) | 12 | Native chat, audio, Electron app, Copilot SDK, LangChain, tool calling, tutorials | +| [**Python**](samples/python/) | 9 | Chat completions, audio transcription, LangChain, tool calling, tutorials | +| [**Rust**](samples/rust/) | 8 | Native chat, audio transcription, tool calling, web server, tutorials | -Foundry Local is available for Windows and macOS (Apple silicon only). You can install it using package managers or manually download the installer. +## 🖥️ CLI -#### Windows +The Foundry Local CLI lets you explore models and experiment interactively. -You can install Foundry Local using the following command in a Windows console (PowerShell, cmd, etc.): +**Install:** ```bash +# Windows winget install Microsoft.FoundryLocal -``` - -Alternatively, you can also manually download and install the packages. On [the releases page](https://github.com/microsoft/Foundry-Local/releases) -select a release and expand the Artifacts list. Copy the artifact full URI (for example: `https://github.com/microsoft/Foundry-Local/releases/download/v0.3.9267/FoundryLocal-x64-0.3.9267.43123.msix`) -to use in the below PowerShell steps. Replace `x64` with `arm64` as needed. -```powershell -# Download the package and its dependency -$releaseUri = "https://github.com/microsoft/Foundry-Local/releases/download/v0.3.9267/FoundryLocal-x64-0.3.9267.43123.msix" -Invoke-WebRequest -Method Get -Uri $releaseUri -OutFile .\FoundryLocal.msix -$crtUri = "https://aka.ms/Microsoft.VCLibs.x64.14.00.Desktop.appx" -Invoke-WebRequest -Method Get -Uri $crtUri -OutFile .\VcLibs.appx - -# Install the Foundry Local package -Add-AppxPackage .\FoundryLocal.msix -DependencyPath .\VcLibs.appx +# macOS +brew install microsoft/foundrylocal/foundrylocal ``` -If you're having problems installing Foundry, please [file an issue](https://github.com/microsoft/foundry-local/issues) -and include logs using one of these methods: - -- For WinGet - use `winget install Microsoft.FoundryLocal --logs --verbose` - select the most-recently-dated log file - and attach it to the issue. -- For `Add-AppxPackage` - immediately after it indicates an error, in an elevated PowerShell instance, use - `Get-MsixLogs | Out-File MsixLogs.txt` and attach it to the issue. -- Use [Windows Feedback Hub](feedback-hub:) and create a Problem in the "Apps > All other apps" category. Use the - "Add More Details > Recreate my problem" and re-run the failing commands to collect more data. Once your feedback - is submitted, use the "Share" option to generate a link and put that into the filed issue. - -> [!NOTE] -> Log files may contain information like user names, IP addresses, file paths, etc. Be sure to remove those -> before sharing here. +**Run a model:** -#### macOS +```bash +foundry model run qwen2.5-0.5b +``` -Install Foundry Local using the following command in your terminal: +**List available models:** ```bash -brew install microsoft/foundrylocal/foundrylocal +foundry model ls ``` -Alternatively, you can also manually download and install the packages by following these steps: +> For the full CLI reference and advanced usage, see the [CLI documentation on Microsoft Learn](https://learn.microsoft.com/en-us/azure/foundry-local/reference/reference-cli). + -1. Download the latest release from [the releases page](https://github.com/microsoft/Foundry-Local/releases). -1. Unzip the downloaded file. -1. Open a terminal and navigate to the unzipped folder, run the following command to install Foundry Local: +## Reporting Issues - ```bash - ./install-foundry.command - ``` +Please report issues or suggest improvements in the [GitHub Issues](https://github.com/microsoft/Foundry-Local/issues) section. -### Upgrading +## 🎓 Learn More -To upgrade Foundry Local, run the following command in your terminal: +- [Foundry Local Documentation](https://learn.microsoft.com/en-us/azure/foundry-local/) on Microsoft Learn +- [Foundry Local Lab](https://github.com/Microsoft-foundry/foundry-local-lab) — Hands-on exercises and step-by-step instructions -- **Windows** +## ❔ Frequently asked questions - ```bash - winget upgrade --id Microsoft.FoundryLocal - ``` +### Is Foundry Local a web server and CLI tool? -- **macOS**: - If you installed Foundry Local using Homebrew, you can upgrade it with the following command: - ``` - brew upgrade foundrylocal - ``` - If you installed Foundry Local manually, you'll first need to uninstall the current version using: - ```bash - uninstall-foundry - ``` - Then, follow the [installation instructions](#installing) to install the latest version. +No. Foundry Local is an **end-to-end local AI solution** that your application ships with. It handles model acquisition, hardware acceleration, and inference inside your app process through the SDK. The optional web server and CLI are available for development workflows, but the core product is the local AI runtime and SDK that you integrate directly into your application. -### Uninstalling +### Why doesn't Foundry Local support every available model? -To uninstall Foundry Local, run the following command in your terminal: +Foundry Local is designed for shipping production applications, not for general-purpose model experimentation. The model catalog is intentionally curated to include models that are optimized for specific application scenarios, tested across a range of consumer hardware, and small enough to distribute to end users. This approach ensures that every model in the catalog delivers reliable performance when embedded in your application — rather than offering a broad selection of models with unpredictable on-device behavior. -- **Windows**: You can uninstall Foundry Local using `winget` in a Windows console (PowerShell, cmd, etc.): +### Can Foundry Local run on a server? - ```bash - winget uninstall Microsoft.FoundryLocal - ``` +Foundry Local is optimized for hardware-constrained devices where a single user accesses the model at a time. While you can technically install and run it on server hardware, it isn't designed as a server inference stack. - Alternatively, you can also uninstall Foundry Local by navigating to **Settings > Apps > Apps & features** in Windows, finding "Foundry Local" in the list, and selecting the ellipsis (`...`) followed by **Uninstall**. +Server-oriented runtimes like [vLLM](https://docs.vllm.ai/en/latest/) or [Triton Inference Server](https://github.com/triton-inference-server/server) are built for multi-user scenarios — they handle concurrent request queuing, continuous batching, and efficient GPU sharing across many simultaneous clients. Foundry Local doesn't provide these capabilities. Instead, it focuses on lightweight, single-user inference with automatic hardware detection, KV-cache management, and model lifecycle handling that make sense for client applications. -- **macOS**: If you installed Foundry Local using Homebrew, you can uninstall it with the following command: - ```bash - brew rm foundrylocal - brew untap microsoft/foundrylocal - brew cleanup --scrub - ``` - If you installed Foundry Local manually, you can uninstall it by running the following command in your terminal: - ```bash - uninstall-foundry - ``` +If you need to serve models to multiple concurrent users, use a dedicated server inference framework. Use Foundry Local when the model runs on the end user's own device. -## Reporting Issues -We're actively looking for feedback during this preview phase. Please report issues or suggest improvements in the [GitHub Issues](https://github.com/microsoft/Foundry-Local/issues) section. +### What platforms are supported? -## 🎓 Learn More +Foundry Local supports Windows, macOS (Apple silicon), and Linux. -- [Foundry Local Documentation on Microsoft Learn](https://learn.microsoft.com/en-us/azure/ai-foundry/foundry-local/?view=foundry-classic) -- [Troubleshooting guide](https://learn.microsoft.com/azure/ai-foundry/foundry-local/reference/reference-best-practice?view=foundry-classic) -- [Foundry Local Lab](https://github.com/Microsoft-foundry/foundry-local-lab): This GitHub repository contains a lab designed to help you learn how to use Foundry Local effectively. It includes hands-on exercises, sample code, and step-by-step instructions to guide you through the process of setting up and using Foundry Local in various scenarios. ## ⚖️ License diff --git a/samples/README.md b/samples/README.md new file mode 100644 index 00000000..93f3bd57 --- /dev/null +++ b/samples/README.md @@ -0,0 +1,14 @@ +# Foundry Local Samples + +Explore complete working examples that demonstrate how to use Foundry Local — an end-to-end local AI solution that runs entirely on-device. These samples cover chat completions, audio transcription, tool calling, LangChain integration, and more. + +> **New to Foundry Local?** Check out the [main README](../README.md) for an overview and quickstart, or visit the [Foundry Local documentation](https://learn.microsoft.com/azure/foundry-local/) on Microsoft Learn. + +## Samples by Language + +| Language | Samples | Description | +|----------|---------|-------------| +| [**C#**](cs/) | 12 | .NET SDK samples including native chat, audio transcription, tool calling, model management, web server, and tutorials. Uses WinML on Windows for hardware acceleration. | +| [**JavaScript**](js/) | 12 | Node.js SDK samples including native chat, audio transcription, Electron desktop app, Copilot SDK integration, LangChain, tool calling, web server, and tutorials. | +| [**Python**](python/) | 9 | Python samples using the OpenAI-compatible API, including chat, audio transcription, LangChain integration, tool calling, web server, and tutorials. | +| [**Rust**](rust/) | 8 | Rust SDK samples including native chat, audio transcription, tool calling, web server, and tutorials. | diff --git a/samples/cs/Directory.Packages.props b/samples/cs/Directory.Packages.props index e5ba306b..d799c4cd 100644 --- a/samples/cs/Directory.Packages.props +++ b/samples/cs/Directory.Packages.props @@ -1,13 +1,12 @@ true - 0.13.0-dev-20260319-1131106-439ca0d51 - 1.23.2 + true - - - + + + diff --git a/samples/cs/README.md b/samples/cs/README.md index 1847bb8e..367c432e 100644 --- a/samples/cs/README.md +++ b/samples/cs/README.md @@ -22,6 +22,7 @@ Both packages provide the same APIs, so the same source code works on all platfo | [tutorial-tool-calling](tutorial-tool-calling/) | Create a tool-calling assistant (tutorial). | | [tutorial-voice-to-text](tutorial-voice-to-text/) | Transcribe and summarize audio (tutorial). | + ## Running a sample 1. Clone the repository: @@ -36,8 +37,3 @@ Both packages provide the same APIs, so the same source code works on all platfo dotnet run ``` - The unified project file automatically selects the correct SDK package for your platform. - -> [!TIP] -> On Windows, we recommend using the WinML package (selected automatically) for optimal performance. Your users benefit from a wider range of hardware acceleration options and a smaller application package size. - diff --git a/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj b/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj deleted file mode 100644 index 3d91b677..00000000 --- a/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.csproj +++ /dev/null @@ -1,55 +0,0 @@ - - - - Exe - enable - enable - - - - - net9.0-windows10.0.26100 - false - ARM64;x64 - None - false - - - - - net9.0 - - - - $(NETCoreSdkRuntimeIdentifier) - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln b/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln deleted file mode 100644 index 65ba7510..00000000 --- a/samples/cs/live-audio-transcription-example/LiveAudioTranscriptionExample.sln +++ /dev/null @@ -1,34 +0,0 @@ - -Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 -VisualStudioVersion = 17.0.31903.59 -MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LiveAudioTranscriptionExample", "LiveAudioTranscriptionExample.csproj", "{A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}" -EndProject -Global - GlobalSection(SolutionConfigurationPlatforms) = preSolution - Debug|Any CPU = Debug|Any CPU - Debug|x64 = Debug|x64 - Debug|x86 = Debug|x86 - Release|Any CPU = Release|Any CPU - Release|x64 = Release|x64 - Release|x86 = Release|x86 - EndGlobalSection - GlobalSection(ProjectConfigurationPlatforms) = postSolution - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|Any CPU.ActiveCfg = Debug|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|Any CPU.Build.0 = Debug|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x64.ActiveCfg = Debug|x64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x64.Build.0 = Debug|x64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x86.ActiveCfg = Debug|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x86.Build.0 = Debug|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|Any CPU.ActiveCfg = Release|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|Any CPU.Build.0 = Release|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x64.ActiveCfg = Release|x64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x64.Build.0 = Release|x64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x86.ActiveCfg = Release|ARM64 - {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x86.Build.0 = Release|ARM64 - EndGlobalSection - GlobalSection(SolutionProperties) = preSolution - HideSolutionNode = FALSE - EndGlobalSection -EndGlobal diff --git a/samples/cs/live-audio-transcription-example/Program.cs b/samples/cs/live-audio-transcription-example/Program.cs deleted file mode 100644 index 9b4e5921..00000000 --- a/samples/cs/live-audio-transcription-example/Program.cs +++ /dev/null @@ -1,106 +0,0 @@ -// Live Audio Transcription — Foundry Local SDK Example -// -// Demonstrates real-time microphone-to-text using: -// SDK (FoundryLocalManager) → Core (NativeAOT DLL) → onnxruntime-genai (StreamingProcessor) - -using Microsoft.AI.Foundry.Local; -using NAudio.Wave; - -Console.WriteLine("==========================================================="); -Console.WriteLine(" Foundry Local -- Live Audio Transcription Demo"); -Console.WriteLine("==========================================================="); -Console.WriteLine(); - -var config = new Configuration -{ - AppName = "foundry_local_samples", - LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information -}; - -await FoundryLocalManager.CreateAsync(config, Utils.GetAppLogger()); -var mgr = FoundryLocalManager.Instance; - -await mgr.DownloadAndRegisterEpsAsync(); - -var catalog = await mgr.GetCatalogAsync(); - -var model = await catalog.GetModelAsync("nemotron") ?? throw new Exception("Model \"nemotron\" not found in catalog"); - -await model.DownloadAsync(progress => -{ - Console.Write($"\rDownloading model: {progress:F2}%"); - if (progress >= 100f) - { - Console.WriteLine(); - } -}); - -Console.Write($"Loading model {model.Id}..."); -await model.LoadAsync(); -Console.WriteLine("done."); - -var audioClient = await model.GetAudioClientAsync(); -var session = audioClient.CreateLiveTranscriptionSession(); -session.Settings.SampleRate = 16000; // Default is 16000; shown here to match the NAudio WaveFormat below -session.Settings.Channels = 1; -session.Settings.Language = "en"; - -await session.StartAsync(); -Console.WriteLine(" Session started"); - -var readTask = Task.Run(async () => -{ - try - { - await foreach (var result in session.GetTranscriptionStream()) - { - var text = result.Content?[0]?.Text; - if (result.IsFinal) - { - Console.WriteLine(); - Console.WriteLine($" [FINAL] {text}"); - Console.Out.Flush(); - } - else if (!string.IsNullOrEmpty(text)) - { - Console.ForegroundColor = ConsoleColor.Cyan; - Console.Write(text); - Console.ResetColor(); - Console.Out.Flush(); - } - } - } - catch (OperationCanceledException) { } -}); - -using var waveIn = new WaveInEvent -{ - WaveFormat = new WaveFormat(rate: 16000, bits: 16, channels: 1), - BufferMilliseconds = 100 -}; - -waveIn.DataAvailable += (sender, e) => -{ - if (e.BytesRecorded > 0) - { - _ = session.AppendAsync(new ReadOnlyMemory(e.Buffer, 0, e.BytesRecorded)); - } -}; - -Console.WriteLine(); -Console.WriteLine("==========================================================="); -Console.WriteLine(" LIVE TRANSCRIPTION ACTIVE"); -Console.WriteLine(" Speak into your microphone."); -Console.WriteLine(" Transcription appears in real-time (cyan text)."); -Console.WriteLine(" Press ENTER to stop recording."); -Console.WriteLine("==========================================================="); -Console.WriteLine(); - -waveIn.StartRecording(); -Console.ReadLine(); -waveIn.StopRecording(); - -await session.StopAsync(); -await readTask; - -await model.UnloadAsync(); diff --git a/samples/cs/nuget.config b/samples/cs/nuget.config index 9913c715..0eb64ca1 100644 --- a/samples/cs/nuget.config +++ b/samples/cs/nuget.config @@ -4,7 +4,6 @@ - @@ -14,9 +13,5 @@ - - - - \ No newline at end of file diff --git a/samples/js/README.md b/samples/js/README.md new file mode 100644 index 00000000..28f1e7e7 --- /dev/null +++ b/samples/js/README.md @@ -0,0 +1,49 @@ +# 🚀 Foundry Local JavaScript Samples + +These samples demonstrate how to use the Foundry Local JavaScript SDK (`foundry-local-sdk`) with Node.js. + +## Prerequisites + +- [Node.js](https://nodejs.org/) (v18 or later recommended) + +## Samples + +| Sample | Description | +|--------|-------------| +| [native-chat-completions](native-chat-completions/) | Initialize the SDK, download a model, and run non-streaming and streaming chat completions. | +| [audio-transcription-example](audio-transcription-example/) | Transcribe audio files using the Whisper model with streaming output. | +| [chat-and-audio-foundry-local](chat-and-audio-foundry-local/) | Unified sample demonstrating both chat and audio transcription in one application. | +| [electron-chat-application](electron-chat-application/) | Full-featured Electron desktop chat app with voice transcription and model management. | +| [copilot-sdk-foundry-local](copilot-sdk-foundry-local/) | GitHub Copilot SDK integration with Foundry Local for agentic AI workflows. | +| [langchain-integration-example](langchain-integration-example/) | LangChain.js integration for building text generation chains. | +| [tool-calling-foundry-local](tool-calling-foundry-local/) | Tool calling with custom function definitions and streaming responses. | +| [web-server-example](web-server-example/) | Start a local OpenAI-compatible web server and call it with the OpenAI SDK. | +| [tutorial-chat-assistant](tutorial-chat-assistant/) | Build an interactive multi-turn chat assistant (tutorial). | +| [tutorial-document-summarizer](tutorial-document-summarizer/) | Summarize documents with AI (tutorial). | +| [tutorial-tool-calling](tutorial-tool-calling/) | Create a tool-calling assistant (tutorial). | +| [tutorial-voice-to-text](tutorial-voice-to-text/) | Transcribe and summarize audio (tutorial). | + +## Running a Sample + +1. Clone the repository: + + ```bash + git clone https://github.com/microsoft/Foundry-Local.git + cd Foundry-Local/samples/js + ``` + +1. Navigate to a sample and install dependencies: + + ```bash + cd native-chat-completions + npm install + ``` + +1. Run the sample: + + ```bash + npm start + ``` + +> [!TIP] +> Each sample's `package.json` includes `foundry-local-sdk` as a dependency and `foundry-local-sdk-winml` as an optional dependency. On **Windows**, the WinML variant installs automatically for broader hardware acceleration. On **macOS and Linux**, the standard SDK is used. Just run `npm install` — platform detection is handled for you. diff --git a/samples/js/audio-transcription-example/README.md b/samples/js/audio-transcription-example/README.md deleted file mode 100644 index 3c555727..00000000 --- a/samples/js/audio-transcription-example/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Audio transcription example - -This sample demonstrates how to use the audio transcription capabilities of the Foundry Local SDK with a local model. It initializes the SDK, selects an audio transcription model, and sends an audio file for transcription. - -## Prerequisites -- Ensure you have Node.js installed (version 20 or higher is recommended). - -## Setup project - -Navigate to the sample directory, setup the project, and install the Foundry Local SDK package. - -1. Navigate to the sample directory and setup the project: - ```bash - cd samples/js/audio-transcription-example - npm init -y - npm pkg set type=module - ``` - -1. Install the Foundry Local package: - - **macOS / Linux:** - ```bash - npm install --foreground-scripts foundry-local-sdk - ``` - - **Windows:** - ```bash - npm install --foreground-scripts --winml foundry-local-sdk - ``` - -## Run the sample - -Run the sample script using Node.js: - -```bash -cd samples/js/audio-transcription-example -node app.js -``` \ No newline at end of file diff --git a/samples/js/audio-transcription-example/package.json b/samples/js/audio-transcription-example/package.json new file mode 100644 index 00000000..14a2aafa --- /dev/null +++ b/samples/js/audio-transcription-example/package.json @@ -0,0 +1,15 @@ +{ + "name": "audio-transcription-example", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "scripts": { + "start": "node app.js" + }, + "dependencies": { + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + } +} diff --git a/samples/js/chat-and-audio-foundry-local/README.md b/samples/js/chat-and-audio-foundry-local/README.md deleted file mode 100644 index 23de6629..00000000 --- a/samples/js/chat-and-audio-foundry-local/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Sample: Chat + Audio Transcription with Foundry Local - -This sample demonstrates how to use Foundry Local as a **unified AI runtime** for both **text generation (chat)** and **speech-to-text (audio transcription)** — all on-device, with a single SDK managing both models. - -## What This Shows - -- Using a single `FoundryLocalManager` to manage both chat and audio models -- Transcribing an audio file using the `whisper-tiny` model -- Analyzing the transcription using the `phi-3.5-mini` chat model -- Automatic hardware acceleration for both models — zero hardware detection code needed - -## Why Foundry Local? - -Without Foundry Local, building an app with both chat and speech-to-text typically requires: -- A separate STT library (`whisper.cpp`, `@huggingface/transformers`) -- A separate LLM runtime (`llama.cpp`, `node-llama-cpp`) -- Custom hardware detection code for each runtime (~200+ lines) -- Separate model download and caching logic - -With Foundry Local, you get **one SDK, one service, both capabilities** — and the hardware detection is automatic. - -## Prerequisites - -- [Foundry Local](https://github.com/microsoft/Foundry-Local) installed on your machine -- Node.js 18+ - -## Getting Started - -Install the Foundry Local SDK: - -```bash -npm install foundry-local-sdk -``` - -Place an audio file (`recording.mp3`) in the project directory, then run: - -```bash -node src/app.js -``` diff --git a/samples/js/chat-and-audio-foundry-local/package.json b/samples/js/chat-and-audio-foundry-local/package.json index a91ecda3..7404589e 100644 --- a/samples/js/chat-and-audio-foundry-local/package.json +++ b/samples/js/chat-and-audio-foundry-local/package.json @@ -1,5 +1,6 @@ { "name": "chat-and-audio-foundry-local", + "version": "1.0.0", "type": "module", "description": "Unified chat + audio transcription sample using Foundry Local", "scripts": { @@ -7,5 +8,8 @@ }, "dependencies": { "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" } } diff --git a/samples/js/copilot-sdk-foundry-local/README.md b/samples/js/copilot-sdk-foundry-local/README.md deleted file mode 100644 index 20191bdf..00000000 --- a/samples/js/copilot-sdk-foundry-local/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# Copilot SDK + Foundry Local Sample - -This sample demonstrates using [GitHub Copilot SDK](https://github.com/github/copilot-sdk) with [Foundry Local](https://github.com/microsoft/Foundry-Local) for on-device agentic AI workflows — all inference runs locally on your machine. - -> [!WARNING] -> **GPU Required.** The Copilot SDK's agent orchestration injects a large system prompt (tool schemas, security guardrails, environment context) into every request. Combined with multi-turn conversation history, this means the local model must process a substantial input context on every turn. **A GPU with sufficient VRAM is strongly recommended**; CPU-only inference will be extremely slow (minutes per turn). - -## What This Shows - -- Bootstrapping Foundry Local with the Foundry Local SDK (service lifecycle + model management) -- Configuring Copilot SDK's **BYOK (Bring Your Own Key)** to use Foundry Local as the inference backend. Note that BYOK is not just authentication — it also allows you to specify a custom API endpoint and response format, enabling seamless integration with local models. -- Using Copilot's **built-in agentic tools** (file reading) powered by a local model -- Registering **custom tools** that the model can invoke during conversation -- Streaming responses and multi-turn conversation via the Copilot SDK session API - -## Prerequisites - -1. **[Foundry Local](https://github.com/microsoft/Foundry-Local#installing)** installed -2. **[GitHub Copilot CLI](https://docs.github.com/en/copilot/how-tos/set-up/install-copilot-cli)** installed and authenticated -3. **Node.js 18+** -4. **A GPU** is strongly recommended for reasonable performance. - -Verify prerequisites: - -```bash -foundry --version -copilot --version -node --version -``` - -## Setup and Run - -```bash -cd samples/js/copilot-sdk-foundry-local -npm install -``` - -### `app.ts` — Self-reading app (`npm start`) - -```bash -npm start -``` - -Demonstrates Copilot's built-in agentic tools running against a local model. The app creates a BYOK session, then asks the model to **read its own source code** using Copilot's `view` tool and explain what it does. A follow-up turn tests multi-turn conversation context. - -**What it shows:** -- Foundry Local bootstrap (download → load → start web service) -- BYOK session creation pointing at the local endpoint -- Copilot's built-in `view` tool reading files on disk, powered by local inference -- Streaming responses and multi-turn conversation - -### `tool-calling.ts` — Custom tools (`npm run tools`) - -```bash -npm run tools -``` - -Registers three custom tools that the model can invoke during conversation, then runs three turns — each designed to trigger a specific tool: - -| Tool | What it does | -|------|-------------| -| `calculate` | Evaluates math expressions (e.g. `Math.sqrt(144) + 8 * 3`) | -| `lookup_definition` | Looks up AI/programming terms (BYOK, ONNX, RAG, etc.) | -| `get_system_info` | Returns OS, architecture, memory, CPU count, and running model | - -When a tool is called you'll see `[Tool called: ...]` in the output. - -**What it shows:** -- Defining custom tools with `defineTool` and Zod schemas -- Tool invocation and result handling via the Copilot SDK agent loop -- The full round-trip: model decides to call a tool → SDK executes the handler → result flows back to the model - -## Configuration - -### Timeout - -Both examples default to **120 seconds** per model turn. Override via the `FOUNDRY_TIMEOUT_MS` environment variable: - -```bash -# 3-minute timeout -FOUNDRY_TIMEOUT_MS=180000 npm start - -# 5-minute timeout for tool-calling (tool round-trips take longer) -FOUNDRY_TIMEOUT_MS=300000 npm run tools -``` - -### Performance Notes - -The Copilot CLI is a full agentic system — it injects a system prompt containing tool schemas, security guardrails, and environment context into every request sent to the model. This system prompt alone can be **40–50 KB** (~12,000+ tokens). On a GPU this is processed quickly, but on CPU-only hardware the time-to-first-token can be very long. - -To mitigate this: -- **Use a GPU.** This is the single biggest improvement. -- The samples use `availableTools` to restrict which built-in tools are sent to the model, reducing the system prompt size. -- System messages include "Keep responses concise" to limit output token generation. - -## How It Works - -1. **Foundry Local bootstrap** — `FoundryLocalManager.create()` initializes the SDK, `model.download()` and `model.load()` prepare the model, and `manager.startWebService()` starts an OpenAI-compatible HTTP server -2. **Copilot SDK client** — `CopilotClient` communicates with the Copilot CLI over JSON-RPC -3. **BYOK session** — `createSession()` with `provider: { type: "openai", baseUrl: "/v1" }` routes all inference through Foundry Local instead of GitHub Copilot's cloud -4. **Tool calling** — Built-in tools (like `view`) and custom tools (like `calculate`) are available to the model; the SDK handles the tool invocation loop -5. **Multi-turn conversation** — Multiple messages in the same session share conversational context -6. **Cleanup** — `finally` block unloads the model and stops the web service - -## Architecture - -``` -Your App (this sample) - | - ├─ foundry-local-sdk ──→ Foundry Local (model lifecycle + web service) - | - └─ @github/copilot-sdk - | - ├─ JSON-RPC ──→ Copilot CLI (agent orchestration) - | - └─ BYOK provider config - | - └─ POST /v1/chat/completions ──→ Foundry Local web service - | - └─ Local Model (phi-4-mini via ONNX Runtime) -``` - -## Key Configuration: BYOK Provider - -The critical piece is the `provider` config in `createSession()`: - -```typescript -const manager = FoundryLocalManager.create({ - appName: "foundry_local_samples", - webServiceUrls: "http://localhost:6543", -}); -const model = await manager.catalog.getModel("phi-4-mini"); -await model.download(); -await model.load(); -manager.startWebService(); - -const session = await client.createSession({ - model: model.id, - provider: { - type: "openai", // Foundry Local exposes OpenAI-compatible API - baseUrl: "http://localhost:6543/v1", - apiKey: "local", // Placeholder; Foundry Local does not require auth - wireApi: "completions", // Chat Completions API format - }, - streaming: true, -}); -``` - -This tells Copilot SDK to route inference requests to Foundry Local's endpoint instead of GitHub Copilot's cloud service. See the [Copilot SDK BYOK documentation](https://github.com/github/copilot-sdk/blob/main/docs/auth/byok.md) for all provider options. - diff --git a/samples/js/copilot-sdk-foundry-local/package.json b/samples/js/copilot-sdk-foundry-local/package.json index d01a25a9..b2457d9a 100644 --- a/samples/js/copilot-sdk-foundry-local/package.json +++ b/samples/js/copilot-sdk-foundry-local/package.json @@ -12,6 +12,9 @@ "foundry-local-sdk": "latest", "zod": "^3.0.0" }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + }, "devDependencies": { "tsx": "^4.0.0", "typescript": "^5.0.0" diff --git a/samples/js/electron-chat-application/.npmrc b/samples/js/electron-chat-application/.npmrc deleted file mode 100644 index 114ea2a4..00000000 --- a/samples/js/electron-chat-application/.npmrc +++ /dev/null @@ -1,2 +0,0 @@ -registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ -always-auth=true diff --git a/samples/js/electron-chat-application/README.md b/samples/js/electron-chat-application/README.md deleted file mode 100644 index a89bd630..00000000 --- a/samples/js/electron-chat-application/README.md +++ /dev/null @@ -1,239 +0,0 @@ -# Foundry Local Chat - Electron Application - -A modern, full-featured chat application built with Electron and the Foundry Local SDK. Chat with AI models running entirely on your local machine with complete privacy. - -![Foundry Local Chat](https://img.shields.io/badge/Electron-34.1.0-47848F?logo=electron) -![Node.js](https://img.shields.io/badge/Node.js-18+-339933?logo=node.js) - -## Features - -### Core Features -- **🔒 100% Private** - All AI inference runs locally on your machine -- **⚡ Low Latency** - Direct local inference with no network round trips -- **📊 Performance Metrics** - Real-time tokens/second and time-to-first-token stats -- **🎨 Modern UI** - Beautiful dark theme with smooth animations -- **💬 Markdown Support** - Code blocks with syntax highlighting, headings, and lists -- **📋 Copy Code** - One-click copy button on all code blocks - -### Model Management -- **📦 Download Models** - Browse and download models from the catalog -- **🔄 Load/Unload** - Easily switch between downloaded models -- **🗑️ Delete Models** - Remove downloaded models to free up disk space -- **🟢 Visual Status** - Green background for loaded model, green dot for downloaded - -### Voice Transcription -- **🎤 Voice Input** - Record voice messages with the microphone button -- **🗣️ Whisper Integration** - Uses OpenAI Whisper models for accurate transcription -- **⚙️ Transcription Settings** - Choose from multiple Whisper model sizes -- **🔊 Audio Processing** - Automatic conversion to 16kHz WAV for optimal quality - -### Context Tracking -- **📏 Context Usage** - Visual progress bar showing how much context is used -- **⚠️ Usage Warnings** - Bar changes color (green → yellow → red) as context fills - -## Screenshots - -Here is a screenshot of the chat interface with some annotations highlighting key features: - -![Chat Interface](./screenshots/electron-description-of-functions.png) - -*On the first use* of the microphone button, you will be prompted to download a Whisper model for transcription: - -![Whisper Transcription](./screenshots/electron-transcription.png) - -You can also change and/or delete the model for transcription using the *Voice settings* link just underneath the text input box. - -## Prerequisites - -- [Node.js](https://nodejs.org/) 18 or later - -## Installation - -To set up and run the Electron Chat Application, follow these steps: - -**macOS / Linux:** -```bash -cd samples/js/electron-chat-application -npm install -npm install --foreground-scripts foundry-local-sdk -npm start -``` - -**Windows:** -```bash -cd samples/js/electron-chat-application -npm install -npm install --foreground-scripts --winml foundry-local-sdk -npm start -``` - -## Usage - -### Basic Chat -1. **Start the app** - Run `npm start` to launch the Electron application -2. **Download a model** - Click "Download" on any available model -3. **Load the model** - Click "Load" on a downloaded model (background turns green when loaded) -4. **Start chatting** - Type your message and press Enter to send -5. **View stats** - Each AI response shows TTFT and tokens/sec metrics - -### Voice Transcription -1. **Click the microphone** - Opens Whisper model selection if first time -2. **Download a Whisper model** - Choose a size (tiny is fastest, large is most accurate) -3. **Record your voice** - Click mic to start, click stop when done -4. **Auto-transcription** - Text appears in the input field automatically - -### Model Management -- **Load**: Click "Load" button on any downloaded model -- **Unload**: Click "Unload" on the currently loaded model -- **Delete**: Click the trash icon to remove a downloaded model from cache - -## Project Structure - -``` -electron-chat-application/ -├── main.js # Electron main process - SDK integration & IPC handlers -├── preload.js # Secure bridge between main and renderer -├── index.html # Main application UI -├── styles.css # Modern dark theme CSS -├── renderer.js # Chat UI logic, markdown rendering, voice recording -├── foundry_local_color.svg # Application logo -├── package.json # Dependencies and scripts -└── README.md # This file -``` - -## Architecture - -### Main Process (`main.js`) -- Initializes Foundry Local SDK with HTTP web service -- Handles model loading/unloading via IPC -- Streams chat completions using Server-Sent Events (SSE) -- Manages audio transcription with Whisper models - -### Preload Script (`preload.js`) -- Exposes secure API to renderer via `contextBridge` -- Handles IPC communication for all SDK operations - -### Renderer Process (`renderer.js`) -- Manages chat UI and message display -- Implements SimpleMarkdown parser for rich text -- Handles voice recording and WAV conversion -- Tracks context usage and updates UI - -## API Reference - -The renderer has access to the Foundry Local SDK via `window.foundryAPI`. This bridge is exposed via the preload script using Electron's `contextBridge`, allowing secure communication between the renderer and main process while maintaining `contextIsolation`. Each method invokes IPC handlers in the main process that call the underlying Foundry Local SDK to manage models and perform inference. - -### Available Methods - -| Method | Purpose | SDK Operation | -|--------|---------|---------------| -| `getModels()` | Fetches available AI models from the Foundry Local catalog | `manager.catalog.getModels()` | -| `downloadModel(alias)` | Downloads a model to local cache | `model.download()` | -| `loadModel(alias)` | Loads a model into memory for inference | `model.load()` | -| `unloadModel()` | Unloads the currently loaded model | `model.unload()` | -| `deleteModel(alias)` | Removes a model from local cache | `model.removeFromCache()` | -| `chat(messages)` | Sends chat messages to the loaded model and returns response | HTTP streaming via SDK web service | -| `getLoadedModel()` | Returns info about the currently loaded model | Returns cached model state | -| `onChatChunk(callback)` | Subscribes to streaming chat response chunks (returns cleanup function) | IPC event listener | -| `getWhisperModels()` | Lists available Whisper models for transcription | `manager.catalog.getModels()` (filtered) | -| `downloadWhisperModel(alias)` | Downloads a Whisper model | `model.download()` | -| `transcribeAudio(path, base64)` | Transcribes audio using Whisper | `audioClient.transcribe()` | - -### Usage Examples - -```javascript -// Get all available models -const models = await foundryAPI.getModels(); - -// Download a model -await foundryAPI.downloadModel('phi-4'); - -// Load a model for chat -await foundryAPI.loadModel('phi-4'); - -// Unload the current model -await foundryAPI.unloadModel(); - -// Delete a model from cache -await foundryAPI.deleteModel('phi-4'); - -// Send chat messages (streaming) -const response = await foundryAPI.chat([ - { role: 'user', content: 'Hello!' } -]); - -// Listen for streaming chunks -foundryAPI.onChatChunk((data) => { - console.log(data.content, data.tokenCount); -}); - -// Get Whisper models for transcription -const whisperModels = await foundryAPI.getWhisperModels(); - -// Download a Whisper model -await foundryAPI.downloadWhisperModel('whisper-small'); - -// Transcribe audio (base64 WAV data) -const text = await foundryAPI.transcribeAudio(base64WavData); -``` - -## Customization - -### Theming -Edit CSS variables in `styles.css`: -```css -:root { - --accent-primary: #6366f1; /* Primary accent color */ - --accent-secondary: #818cf8; /* Secondary accent */ - --success: #10b981; /* Success/loaded state */ - --warning: #f59e0b; /* Warning state */ - --error: #ef4444; /* Error state */ - --bg-primary: #0f0f1a; /* Main background */ -} -``` - -### Context Window -Adjust the context limit in `renderer.js`: -```javascript -const CONTEXT_LIMIT = 8192; // Default context window size -``` - -### Sidebar Width -The sidebar is resizable between 240-480px. Default is 320px, configured in CSS: -```css -.sidebar { - width: 320px; - min-width: 240px; - max-width: 480px; -} -``` - -## Technical Notes - -### HTTP Streaming -The app uses HTTP streaming via the SDK's built-in web service (port 47392) instead of native callbacks, which provides better compatibility with Electron's process model. - -### Audio Processing -Voice recordings are converted to 16kHz mono 16-bit PCM WAV format before transcription, as required by Whisper models. The conversion uses Web Audio API's OfflineAudioContext for resampling. - -### Temporary Files -Audio files are stored in the system temp directory (`os.tmpdir()`) and automatically cleaned up after transcription. - -## Troubleshooting - -**Slow performance?** -- Try a smaller model variant (e.g., phi-4-mini instead of phi-4) - -**Transcription not working?** -- Ensure you've downloaded a Whisper model first -- Check microphone permissions in System Preferences -- Verify audio is recording (mic icon changes to stop icon) - -**High context usage?** -- Click "New Chat" to clear the conversation and reset context -- The context bar shows usage: green (<70%), yellow (70-90%), red (>90%) - -## License - -MIT - diff --git a/samples/js/electron-chat-application/package.json b/samples/js/electron-chat-application/package.json index 29ccd2b7..37779260 100644 --- a/samples/js/electron-chat-application/package.json +++ b/samples/js/electron-chat-application/package.json @@ -7,19 +7,15 @@ "start": "electron .", "dev": "electron . --enable-logging" }, - "keywords": [ - "electron", - "chat", - "foundry-local", - "ai" - ], - "author": "", - "license": "MIT", - "devDependencies": { - "electron": "^34.5.8" - }, "dependencies": { + "foundry-local-sdk": "latest", "highlight.js": "^11.11.1", "marked": "^15.0.6" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + }, + "devDependencies": { + "electron": "^34.5.8" } } diff --git a/samples/js/langchain-integration-example/README.md b/samples/js/langchain-integration-example/README.md deleted file mode 100644 index 90f778fc..00000000 --- a/samples/js/langchain-integration-example/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# LangChain integration example - -This sample demonstrates how to integrate the Foundry Local SDK with LangChain.js to create a simple application that uses local language models for text generation. - -## Prerequisites -- Ensure you have Node.js installed (version 20 or higher is recommended). - -## Setup project - -Navigate to the sample directory, setup the project, and install the Foundry Local and LangChain packages. - -1. Navigate to the sample directory and setup the project: - ```bash - cd samples/js/langchain-integration-example - npm init -y - npm pkg set type=module - ``` -1. Install the Foundry Local and LangChain packages: - - **macOS / Linux:** - ```bash - npm install --foreground-scripts foundry-local-sdk - npm install @langchain/openai @langchain/core - ``` - - **Windows:** - ```bash - npm install --foreground-scripts --winml foundry-local-sdk - npm install @langchain/openai @langchain/core - ``` - -## Run the sample - -Run the sample script using Node.js: - -```bash -cd samples/js/langchain-integration-example -node app.js -``` \ No newline at end of file diff --git a/samples/js/langchain-integration-example/package.json b/samples/js/langchain-integration-example/package.json new file mode 100644 index 00000000..bb5fb635 --- /dev/null +++ b/samples/js/langchain-integration-example/package.json @@ -0,0 +1,17 @@ +{ + "name": "langchain-integration-example", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "scripts": { + "start": "node app.js" + }, + "dependencies": { + "@langchain/core": "latest", + "@langchain/openai": "latest", + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + } +} diff --git a/samples/js/live-audio-transcription-example/README.md b/samples/js/live-audio-transcription-example/README.md deleted file mode 100644 index 7c817d27..00000000 --- a/samples/js/live-audio-transcription-example/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Live Audio Transcription Example - -Real-time microphone-to-text transcription using the Foundry Local JS SDK with Nemotron ASR. - -## Prerequisites - -- [Foundry Local](https://github.com/microsoft/Foundry-Local) installed -- Node.js 18+ -- A microphone (optional — falls back to synthetic audio) - -## Setup - -```bash -npm install foundry-local-sdk naudiodon2 -``` - -> **Note:** `naudiodon2` is optional — provides cross-platform microphone capture. Without it, the example falls back to synthetic audio for testing. - -## Run - -```bash -node app.js -``` - -Speak into your microphone. Transcription appears in real-time. Press `Ctrl+C` to stop. - -## How it works - -1. Initializes the Foundry Local SDK and loads the Nemotron ASR model -2. Creates a `LiveAudioTranscriptionSession` with 16kHz/16-bit/mono PCM settings -3. Captures microphone audio via `naudiodon2` (or generates synthetic audio as fallback) -4. Pushes PCM chunks to the SDK via `session.append()` -5. Reads transcription results via `for await (const result of session.getTranscriptionStream())` -6. Access text via `result.content[0].text` (OpenAI Realtime ConversationItem pattern) - -## API - -```javascript -const audioClient = model.createAudioClient(); -const session = audioClient.createLiveTranscriptionSession(); -session.settings.sampleRate = 16000; -session.settings.channels = 1; -session.settings.language = 'en'; - -await session.start(); - -// Push audio -await session.append(pcmBytes); - -// Read results -for await (const result of session.getTranscriptionStream()) { - console.log(result.content[0].text); // transcribed text - console.log(result.content[0].transcript); // alias (OpenAI compat) - console.log(result.is_final); // true for final results -} - -await session.stop(); -``` diff --git a/samples/js/live-audio-transcription-example/app.js b/samples/js/live-audio-transcription-example/app.js deleted file mode 100644 index 794c3972..00000000 --- a/samples/js/live-audio-transcription-example/app.js +++ /dev/null @@ -1,157 +0,0 @@ -// Live Audio Transcription Example — Foundry Local JS SDK -// -// Demonstrates real-time microphone-to-text using the JS SDK. -// Requires: npm install foundry-local-sdk naudiodon2 -// -// Usage: node app.js - -import { FoundryLocalManager } from 'foundry-local-sdk'; - -console.log('╔══════════════════════════════════════════════════════════╗'); -console.log('║ Foundry Local — Live Audio Transcription (JS SDK) ║'); -console.log('╚══════════════════════════════════════════════════════════╝'); -console.log(); - -// Initialize the Foundry Local SDK -console.log('Initializing Foundry Local SDK...'); -const manager = FoundryLocalManager.create({ - appName: 'foundry_local_live_audio', - logLevel: 'info' -}); -console.log('✓ SDK initialized'); - -// Get and load the nemotron model -const modelAlias = 'nemotron'; -let model = await manager.catalog.getModel(modelAlias); -if (!model) { - console.error(`ERROR: Model "${modelAlias}" not found in catalog.`); - process.exit(1); -} - -console.log(`Found model: ${model.id}`); -console.log('Downloading model (if needed)...'); -await model.download((progress) => { - process.stdout.write(`\rDownloading... ${progress.toFixed(2)}%`); -}); -console.log('\n✓ Model downloaded'); - -console.log('Loading model...'); -await model.load(); -console.log('✓ Model loaded'); - -// Create live transcription session -const audioClient = model.createAudioClient(); -const session = audioClient.createLiveTranscriptionSession(); -session.settings.sampleRate = 16000; // Default is 16000; shown here for clarity -session.settings.channels = 1; -session.settings.bitsPerSample = 16; -session.settings.language = 'en'; - -console.log('Starting streaming session...'); -await session.start(); -console.log('✓ Session started'); - -// Read transcription results in background -const readPromise = (async () => { - try { - for await (const result of session.getTranscriptionStream()) { - const text = result.content?.[0]?.text; - if (result.is_final) { - console.log(); - console.log(` [FINAL] ${text}`); - } else if (text) { - process.stdout.write(text); - } - } - } catch (err) { - if (err.name !== 'AbortError') { - console.error('Stream error:', err.message); - } - } -})(); - -// --- Microphone capture --- -// This example uses naudiodon2 for cross-platform audio capture. -// Install with: npm install naudiodon2 -// -// If you prefer a different audio library, just push PCM bytes -// (16-bit signed LE, mono, 16kHz) via session.append(). - -let audioInput; -try { - const { default: portAudio } = await import('naudiodon2'); - - audioInput = portAudio.AudioIO({ - inOptions: { - channelCount: session.settings.channels, - sampleFormat: session.settings.bitsPerSample === 16 - ? portAudio.SampleFormat16Bit - : portAudio.SampleFormat32Bit, - sampleRate: session.settings.sampleRate, - framesPerBuffer: 1600, // 100ms chunks - maxQueue: 15 // buffer during event-loop blocks from sync FFI calls - } - }); - - let appendPending = false; - audioInput.on('data', (buffer) => { - if (appendPending) return; // drop frame while backpressured - const pcm = new Uint8Array(buffer); - appendPending = true; - session.append(pcm).then(() => { - appendPending = false; - }).catch((err) => { - appendPending = false; - console.error('append error:', err.message); - }); - }); - - console.log(); - console.log('════════════════════════════════════════════════════════════'); - console.log(' LIVE TRANSCRIPTION ACTIVE'); - console.log(' Speak into your microphone.'); - console.log(' Press Ctrl+C to stop.'); - console.log('════════════════════════════════════════════════════════════'); - console.log(); - - audioInput.start(); -} catch (err) { - console.warn('⚠ Could not initialize microphone (naudiodon2 may not be installed).'); - console.warn(' Install with: npm install naudiodon2'); - console.warn(' Falling back to synthetic audio test...'); - console.warn(); - - // Fallback: push 2 seconds of synthetic PCM (440Hz sine wave) - const sampleRate = session.settings.sampleRate; - const duration = 2; - const totalSamples = sampleRate * duration; - const pcmBytes = new Uint8Array(totalSamples * 2); - for (let i = 0; i < totalSamples; i++) { - const t = i / sampleRate; - const sample = Math.round(32767 * 0.5 * Math.sin(2 * Math.PI * 440 * t)); - pcmBytes[i * 2] = sample & 0xFF; - pcmBytes[i * 2 + 1] = (sample >> 8) & 0xFF; - } - - // Push in 100ms chunks - const chunkSize = (sampleRate / 10) * 2; - for (let offset = 0; offset < pcmBytes.length; offset += chunkSize) { - const len = Math.min(chunkSize, pcmBytes.length - offset); - await session.append(pcmBytes.slice(offset, offset + len)); - } - - console.log('✓ Synthetic audio pushed'); -} - -// Handle graceful shutdown -process.on('SIGINT', async () => { - console.log('\n\nStopping...'); - if (audioInput) { - audioInput.quit(); - } - await session.stop(); - await readPromise; - await model.unload(); - console.log('✓ Done'); - process.exit(0); -}); diff --git a/samples/js/native-chat-completions/README.md b/samples/js/native-chat-completions/README.md deleted file mode 100644 index 1ddee8bc..00000000 --- a/samples/js/native-chat-completions/README.md +++ /dev/null @@ -1,37 +0,0 @@ -# Native chat completions with Foundry Local SDK - -This sample demonstrates how to use the Foundry Local SDK to perform native chat completions using a local model. It initializes the SDK, selects a model, and sends a chat completion request with a system prompt and user message. - -## Prerequisites -- Ensure you have Node.js installed (version 20 or higher is recommended). - -## Setup project - -Navigate to the sample directory, setup the project, and install the Foundry Local SDK package. - -1. Navigate to the sample directory and setup the project: - ```bash - cd samples/js/native-chat-completions - npm init -y - npm pkg set type=module - ``` -1. Install the Foundry Local SDK package: - - **macOS / Linux:** - ```bash - npm install --foreground-scripts foundry-local-sdk - ``` - - **Windows:** - ```bash - npm install --foreground-scripts --winml foundry-local-sdk - ``` - -## Run the sample - -Run the sample script using Node.js: - -```bash -cd samples/js/native-chat-completions -node app.js -``` \ No newline at end of file diff --git a/samples/js/native-chat-completions/package.json b/samples/js/native-chat-completions/package.json new file mode 100644 index 00000000..eeba0acd --- /dev/null +++ b/samples/js/native-chat-completions/package.json @@ -0,0 +1,15 @@ +{ + "name": "native-chat-completions", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "scripts": { + "start": "node app.js" + }, + "dependencies": { + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + } +} diff --git a/samples/js/tool-calling-foundry-local/README.md b/samples/js/tool-calling-foundry-local/README.md deleted file mode 100644 index 8480fc73..00000000 --- a/samples/js/tool-calling-foundry-local/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Sample: Tool Calling with Foundry Local - -This is a simple example of how to use the Foundry Local SDK to run a model locally and perform tool calling with it. The example demonstrates how to set up the SDK, initialize a model, and perform a generated tool call. - -Install the Foundry Local SDK and OpenAI packages using npm: - -```bash -npm install foundry-local-sdk openai -``` - -Run the application using Node.js: - -```bash -node src/app.js -``` diff --git a/samples/js/tool-calling-foundry-local/package.json b/samples/js/tool-calling-foundry-local/package.json index 07bcaa27..6ae9c032 100644 --- a/samples/js/tool-calling-foundry-local/package.json +++ b/samples/js/tool-calling-foundry-local/package.json @@ -1,6 +1,15 @@ { + "name": "tool-calling-foundry-local", + "version": "1.0.0", "type": "module", + "scripts": { + "start": "node src/app.js" + }, "dependencies": { + "foundry-local-sdk": "latest", "openai": "^6.25.0" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" } } diff --git a/samples/js/tutorial-chat-assistant/package.json b/samples/js/tutorial-chat-assistant/package.json index 3e2393ce..8a36a288 100644 --- a/samples/js/tutorial-chat-assistant/package.json +++ b/samples/js/tutorial-chat-assistant/package.json @@ -3,7 +3,13 @@ "version": "1.0.0", "type": "module", "main": "app.js", + "scripts": { + "start": "node app.js" + }, "dependencies": { - "foundry-local-sdk": "*" + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" } } diff --git a/samples/js/tutorial-document-summarizer/package.json b/samples/js/tutorial-document-summarizer/package.json index c3c62321..c97e416f 100644 --- a/samples/js/tutorial-document-summarizer/package.json +++ b/samples/js/tutorial-document-summarizer/package.json @@ -3,7 +3,13 @@ "version": "1.0.0", "type": "module", "main": "app.js", + "scripts": { + "start": "node app.js" + }, "dependencies": { - "foundry-local-sdk": "*" + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" } } diff --git a/samples/js/tutorial-tool-calling/package.json b/samples/js/tutorial-tool-calling/package.json index 07337434..ab7f62d6 100644 --- a/samples/js/tutorial-tool-calling/package.json +++ b/samples/js/tutorial-tool-calling/package.json @@ -3,7 +3,13 @@ "version": "1.0.0", "type": "module", "main": "app.js", + "scripts": { + "start": "node app.js" + }, "dependencies": { - "foundry-local-sdk": "*" + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" } } diff --git a/samples/js/tutorial-voice-to-text/package.json b/samples/js/tutorial-voice-to-text/package.json index 55f2ea83..3efb0d4b 100644 --- a/samples/js/tutorial-voice-to-text/package.json +++ b/samples/js/tutorial-voice-to-text/package.json @@ -3,7 +3,13 @@ "version": "1.0.0", "type": "module", "main": "app.js", + "scripts": { + "start": "node app.js" + }, "dependencies": { - "foundry-local-sdk": "*" + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" } } diff --git a/samples/js/web-server-example/README.md b/samples/js/web-server-example/README.md deleted file mode 100644 index 4b95b0c2..00000000 --- a/samples/js/web-server-example/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# Chat completions using an OpenAI-compatible web server - -This sample demonstrates how to use the Foundry Local SDK to perform chat completions using an OpenAI-compatible web server. It initializes the SDK with the server URL, selects a model, and sends a chat completion request with a system prompt and user message. - -## Prerequisites -- Ensure you have Node.js installed (version 20 or higher is recommended). - -## Setup project - -Navigate to the sample directory, setup the project, and install the required packages. - -1. Navigate to the sample directory and setup the project: - ```bash - cd samples/js/web-server-example - npm init -y - npm pkg set type=module - ``` -1. Install the Foundry Local and OpenAI packages: - - **macOS / Linux:** - ```bash - npm install --foreground-scripts foundry-local-sdk - npm install openai - ``` - - **Windows:** - ```bash - npm install --foreground-scripts --winml foundry-local-sdk - npm install openai - ``` - -## Run the sample - -Run the sample script using Node.js: - -```bash -cd samples/js/web-server-example -node app.js -``` \ No newline at end of file diff --git a/samples/js/web-server-example/package.json b/samples/js/web-server-example/package.json new file mode 100644 index 00000000..33670514 --- /dev/null +++ b/samples/js/web-server-example/package.json @@ -0,0 +1,16 @@ +{ + "name": "web-server-example", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "scripts": { + "start": "node app.js" + }, + "dependencies": { + "foundry-local-sdk": "latest", + "openai": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + } +} diff --git a/samples/python/README.md b/samples/python/README.md new file mode 100644 index 00000000..391cf123 --- /dev/null +++ b/samples/python/README.md @@ -0,0 +1,46 @@ +# 🚀 Foundry Local Python Samples + +These samples demonstrate how to use Foundry Local with Python. + +## Prerequisites + +- [Python](https://www.python.org/) 3.11 or later + +## Samples + +| Sample | Description | +|--------|-------------| +| [native-chat-completions](native-chat-completions/) | Initialize the SDK, start the local service, and run streaming chat completions. | +| [audio-transcription](audio-transcription/) | Transcribe audio files using the Whisper model. | +| [web-server](web-server/) | Start a local OpenAI-compatible web server and call it with the OpenAI Python SDK. | +| [tool-calling](tool-calling/) | Tool calling with custom function definitions (get_weather, calculate). | +| [langchain-integration](langchain-integration/) | LangChain integration for building translation and text generation chains. | +| [tutorial-chat-assistant](tutorial-chat-assistant/) | Build an interactive multi-turn chat assistant (tutorial). | +| [tutorial-document-summarizer](tutorial-document-summarizer/) | Summarize documents with AI (tutorial). | +| [tutorial-tool-calling](tutorial-tool-calling/) | Create a tool-calling assistant (tutorial). | +| [tutorial-voice-to-text](tutorial-voice-to-text/) | Transcribe and summarize audio (tutorial). | + +## Running a Sample + +1. Clone the repository: + + ```bash + git clone https://github.com/microsoft/Foundry-Local.git + cd Foundry-Local/samples/python + ``` + +2. Navigate to a sample and install dependencies: + + ```bash + cd native-chat-completions + pip install -r requirements.txt + ``` + +3. Run the sample: + + ```bash + python src/app.py + ``` + +> [!TIP] +> Each sample's `requirements.txt` uses environment markers to automatically install the right SDK for your platform. On **Windows**, `foundry-local-sdk-winml` is installed for broader hardware acceleration. On **macOS and Linux**, the standard `foundry-local-sdk` is used. Just run `pip install -r requirements.txt` — platform detection is handled for you. diff --git a/samples/python/audio-transcription/requirements.txt b/samples/python/audio-transcription/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/audio-transcription/requirements.txt +++ b/samples/python/audio-transcription/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/langchain-integration/requirements.txt b/samples/python/langchain-integration/requirements.txt index 0ded700a..9a6b6181 100644 --- a/samples/python/langchain-integration/requirements.txt +++ b/samples/python/langchain-integration/requirements.txt @@ -1,4 +1,5 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" openai langchain-openai langchain-core diff --git a/samples/python/native-chat-completions/requirements.txt b/samples/python/native-chat-completions/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/native-chat-completions/requirements.txt +++ b/samples/python/native-chat-completions/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/tool-calling/requirements.txt b/samples/python/tool-calling/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/tool-calling/requirements.txt +++ b/samples/python/tool-calling/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/tutorial-chat-assistant/requirements.txt b/samples/python/tutorial-chat-assistant/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/tutorial-chat-assistant/requirements.txt +++ b/samples/python/tutorial-chat-assistant/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/tutorial-document-summarizer/requirements.txt b/samples/python/tutorial-document-summarizer/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/tutorial-document-summarizer/requirements.txt +++ b/samples/python/tutorial-document-summarizer/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/tutorial-tool-calling/requirements.txt b/samples/python/tutorial-tool-calling/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/tutorial-tool-calling/requirements.txt +++ b/samples/python/tutorial-tool-calling/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/tutorial-voice-to-text/requirements.txt b/samples/python/tutorial-voice-to-text/requirements.txt index c79aa6dd..7602a48b 100644 --- a/samples/python/tutorial-voice-to-text/requirements.txt +++ b/samples/python/tutorial-voice-to-text/requirements.txt @@ -1 +1,2 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/web-server/requirements.txt b/samples/python/web-server/requirements.txt index 5a0f14ae..db870f60 100644 --- a/samples/python/web-server/requirements.txt +++ b/samples/python/web-server/requirements.txt @@ -1,2 +1,3 @@ -foundry-local-sdk +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" openai diff --git a/samples/rust/README.md b/samples/rust/README.md index c5399b3d..f2ca4f52 100644 --- a/samples/rust/README.md +++ b/samples/rust/README.md @@ -1,25 +1,45 @@ -# Foundry Local Rust Samples +# 🚀 Foundry Local Rust Samples -This directory contains samples demonstrating how to use the Foundry Local Rust SDK. +These samples demonstrate how to use the Rust binding for Foundry Local. ## Prerequisites -- Rust 1.70.0 or later +- [Rust](https://www.rust-lang.org/) 1.70.0 or later ## Samples -### [Foundry Local Web Server](./foundry-local-webserver) +| Sample | Description | +|--------|-------------| +| [native-chat-completions](native-chat-completions/) | Non-streaming and streaming chat completions using the native chat client. | +| [audio-transcription-example](audio-transcription-example/) | Audio transcription (non-streaming and streaming) using the Whisper model. | +| [foundry-local-webserver](foundry-local-webserver/) | Start a local OpenAI-compatible web server and call it with a standard HTTP client. | +| [tool-calling-foundry-local](tool-calling-foundry-local/) | Tool calling with streaming responses, multi-turn conversation, and local tool execution. | +| [tutorial-chat-assistant](tutorial-chat-assistant/) | Build an interactive multi-turn chat assistant (tutorial). | +| [tutorial-document-summarizer](tutorial-document-summarizer/) | Summarize documents with AI (tutorial). | +| [tutorial-tool-calling](tutorial-tool-calling/) | Create a tool-calling assistant (tutorial). | +| [tutorial-voice-to-text](tutorial-voice-to-text/) | Transcribe and summarize audio (tutorial). | -Demonstrates how to start a local OpenAI-compatible web server using the SDK, then call it with a standard HTTP client. +## Running a Sample -### [Native Chat Completions](./native-chat-completions) +1. Clone the repository: -Shows both non-streaming and streaming chat completions using the SDK's native chat client. + ```bash + git clone https://github.com/microsoft/Foundry-Local.git + cd Foundry-Local/samples/rust + ``` -### [Tool Calling with Foundry Local](./tool-calling-foundry-local) +2. Run a sample: -Demonstrates tool calling with streaming responses, multi-turn conversation, and local tool execution. + ```bash + cargo run -p native-chat-completions + ``` -### [Audio Transcription](./audio-transcription-example) + Or navigate to a sample directory and run directly: -Demonstrates audio transcription (non-streaming and streaming) using the `whisper` model. \ No newline at end of file + ```bash + cd native-chat-completions + cargo run + ``` + +> [!TIP] +> Each sample's `Cargo.toml` uses `[target.'cfg(windows)'.dependencies]` to automatically enable the `winml` feature on Windows for broader hardware acceleration. On macOS and Linux, the standard SDK is used. No manual configuration needed. \ No newline at end of file diff --git a/samples/rust/audio-transcription-example/Cargo.toml b/samples/rust/audio-transcription-example/Cargo.toml index ba0121c2..1305170f 100644 --- a/samples/rust/audio-transcription-example/Cargo.toml +++ b/samples/rust/audio-transcription-example/Cargo.toml @@ -8,3 +8,6 @@ description = "Audio transcription example using the Foundry Local Rust SDK" foundry-local-sdk = { path = "../../../sdk/rust" } tokio = { version = "1", features = ["rt-multi-thread", "macros"] } tokio-stream = "0.1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/audio-transcription-example/README.md b/samples/rust/audio-transcription-example/README.md deleted file mode 100644 index f12842ec..00000000 --- a/samples/rust/audio-transcription-example/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Sample: Audio Transcription - -This example demonstrates audio transcription (non-streaming and streaming) using the Foundry Local Rust SDK. It uses the `whisper` model to transcribe a WAV audio file. - -The `foundry-local-sdk` dependency is referenced via a local path. No crates.io publish is required: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust" } -``` - -Run the application with a path to a WAV file: - -```bash -cargo run -- path/to/audio.wav -``` - -## Using WinML (Windows only) - -To use the WinML backend, enable the `winml` feature in `Cargo.toml`: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } -``` - -No code changes are needed — same API, different backend. diff --git a/samples/rust/foundry-local-webserver/Cargo.toml b/samples/rust/foundry-local-webserver/Cargo.toml index d17e1b15..1671ea4c 100644 --- a/samples/rust/foundry-local-webserver/Cargo.toml +++ b/samples/rust/foundry-local-webserver/Cargo.toml @@ -9,3 +9,6 @@ foundry-local-sdk = { path = "../../../sdk/rust" } tokio = { version = "1", features = ["rt-multi-thread", "macros"] } serde_json = "1" reqwest = { version = "0.12", features = ["json"] } + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/foundry-local-webserver/README.md b/samples/rust/foundry-local-webserver/README.md deleted file mode 100644 index 2e727071..00000000 --- a/samples/rust/foundry-local-webserver/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Sample: Foundry Local Web Server - -This example demonstrates how to start a local OpenAI-compatible web server using the Foundry Local SDK, then call it with a standard HTTP client. This is useful when you want to use the OpenAI REST API directly or integrate with tools that expect an OpenAI-compatible endpoint. - -The `foundry-local-sdk` dependency is referenced via a local path. No crates.io publish is required: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust" } -``` - -Run the application: - -```bash -cargo run -``` - -## Using WinML (Windows only) - -To use the WinML backend, enable the `winml` feature in `Cargo.toml`: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } -``` - -No code changes are needed — same API, different backend. diff --git a/samples/rust/native-chat-completions/Cargo.toml b/samples/rust/native-chat-completions/Cargo.toml index d2f283b3..349eaf02 100644 --- a/samples/rust/native-chat-completions/Cargo.toml +++ b/samples/rust/native-chat-completions/Cargo.toml @@ -8,3 +8,6 @@ description = "Native SDK chat completions (non-streaming and streaming) using t foundry-local-sdk = { path = "../../../sdk/rust" } tokio = { version = "1", features = ["rt-multi-thread", "macros"] } tokio-stream = "0.1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/native-chat-completions/README.md b/samples/rust/native-chat-completions/README.md deleted file mode 100644 index 68afab96..00000000 --- a/samples/rust/native-chat-completions/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Sample: Native Chat Completions - -This example demonstrates both non-streaming and streaming chat completions using the Foundry Local Rust SDK's native chat client — no external HTTP libraries needed. - -The `foundry-local-sdk` dependency is referenced via a local path. No crates.io publish is required: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust" } -``` - -Run the application: - -```bash -cargo run -``` - -## Using WinML (Windows only) - -To use the WinML backend, enable the `winml` feature in `Cargo.toml`: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } -``` - -No code changes are needed — same API, different backend. diff --git a/samples/rust/tool-calling-foundry-local/Cargo.toml b/samples/rust/tool-calling-foundry-local/Cargo.toml index 767b631e..8411a2b2 100644 --- a/samples/rust/tool-calling-foundry-local/Cargo.toml +++ b/samples/rust/tool-calling-foundry-local/Cargo.toml @@ -9,3 +9,6 @@ foundry-local-sdk = { path = "../../../sdk/rust" } tokio = { version = "1", features = ["rt-multi-thread", "macros"] } tokio-stream = "0.1" serde_json = "1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/tool-calling-foundry-local/README.md b/samples/rust/tool-calling-foundry-local/README.md deleted file mode 100644 index 1e63aa9a..00000000 --- a/samples/rust/tool-calling-foundry-local/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Sample: Tool Calling with Foundry Local - -This is a simple example of how to use the Foundry Local Rust SDK to run a model locally and perform tool calling with it. The example demonstrates how to set up the SDK, initialize a model, and perform a generated tool call. - -The `foundry-local-sdk` dependency is referenced via a local path. No crates.io publish is required: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust" } -``` - -Run the application: - -```bash -cargo run -``` - -## Using WinML (Windows only) - -To use the WinML backend, enable the `winml` feature in `Cargo.toml`: - -```toml -foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } -``` - -No code changes are needed — same API, different backend. diff --git a/samples/rust/tutorial-chat-assistant/Cargo.toml b/samples/rust/tutorial-chat-assistant/Cargo.toml index 83c7d237..5ff39b77 100644 --- a/samples/rust/tutorial-chat-assistant/Cargo.toml +++ b/samples/rust/tutorial-chat-assistant/Cargo.toml @@ -9,3 +9,6 @@ tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" anyhow = "1" serde_json = "1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/tutorial-document-summarizer/Cargo.toml b/samples/rust/tutorial-document-summarizer/Cargo.toml index cdf77fb7..f80398e7 100644 --- a/samples/rust/tutorial-document-summarizer/Cargo.toml +++ b/samples/rust/tutorial-document-summarizer/Cargo.toml @@ -8,3 +8,6 @@ foundry-local-sdk = { path = "../../../sdk/rust" } tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" anyhow = "1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/tutorial-tool-calling/Cargo.toml b/samples/rust/tutorial-tool-calling/Cargo.toml index 2de3d740..5707d696 100644 --- a/samples/rust/tutorial-tool-calling/Cargo.toml +++ b/samples/rust/tutorial-tool-calling/Cargo.toml @@ -9,3 +9,6 @@ tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" anyhow = "1" serde_json = "1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/tutorial-voice-to-text/Cargo.toml b/samples/rust/tutorial-voice-to-text/Cargo.toml index 35ec4fc4..6abf6052 100644 --- a/samples/rust/tutorial-voice-to-text/Cargo.toml +++ b/samples/rust/tutorial-voice-to-text/Cargo.toml @@ -8,3 +8,6 @@ foundry-local-sdk = { path = "../../../sdk/rust" } tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" anyhow = "1" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } From 18c11738322ba14b13e116cb88d1cf95482226f9 Mon Sep 17 00:00:00 2001 From: bmehta001 Date: Thu, 9 Apr 2026 14:55:03 -0500 Subject: [PATCH 36/83] Add EP download progress bar to all samples (#621) All samples now show per-EP download progress with name and percentage, matching the pattern already in native-chat-completions samples. **Changes (38 files across 4 languages):** - **C# (9 samples)**: Replaced bare \DownloadAndRegisterEpsAsync()\ and \RunWithSpinner\ calls with progress callback - **JS/TS (12 samples)**: Replaced bare \downloadAndRegisterEps()\ with progress callback - **Python (9 samples)**: Replaced bare \download_and_register_eps()\ with progress callback - **Rust (8 samples)**: Replaced \download_and_register_eps(None)\ with \download_and_register_eps_with_progress\ Each callback tracks the current EP name and displays a carriage-return progress line showing EP name (left-aligned 30 chars) and download percentage. --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../cs/audio-transcription-example/Program.cs | 16 ++++++--- .../cs/foundry-local-web-server/Program.cs | 16 ++++++--- .../cs/model-management-example/Program.cs | 12 ++++++- .../tool-calling-foundry-local-sdk/Program.cs | 16 ++++++--- .../Program.cs | 12 ++++++- samples/cs/tutorial-chat-assistant/Program.cs | 13 ++++++++ .../tutorial-document-summarizer/Program.cs | 13 ++++++++ samples/cs/tutorial-tool-calling/Program.cs | 13 ++++++++ samples/cs/tutorial-voice-to-text/Program.cs | 14 ++++++++ samples/js/audio-transcription-example/app.js | 11 +++++++ .../chat-and-audio-foundry-local/src/app.js | 11 +++++++ .../js/copilot-sdk-foundry-local/src/app.ts | 11 +++++++ .../src/tool-calling.ts | 11 +++++++ samples/js/electron-chat-application/main.js | 33 ++++++++++++++----- .../js/langchain-integration-example/app.js | 11 +++++++ .../js/tool-calling-foundry-local/src/app.js | 11 +++++++ samples/js/tutorial-chat-assistant/app.js | 11 +++++++ .../js/tutorial-document-summarizer/app.js | 11 +++++++ samples/js/tutorial-tool-calling/app.js | 11 +++++++ samples/js/tutorial-voice-to-text/app.js | 11 +++++++ samples/js/web-server-example/app.js | 11 +++++++ samples/python/audio-transcription/src/app.py | 14 ++++++++ .../python/langchain-integration/src/app.py | 14 ++++++++ .../python/native-chat-completions/src/app.py | 14 ++++++++ samples/python/tool-calling/src/app.py | 14 ++++++++ .../python/tutorial-chat-assistant/src/app.py | 14 ++++++++ .../tutorial-document-summarizer/src/app.py | 14 ++++++++ .../python/tutorial-tool-calling/src/app.py | 14 ++++++++ .../python/tutorial-voice-to-text/src/app.py | 14 ++++++++ samples/python/web-server/src/app.py | 14 ++++++++ .../audio-transcription-example/src/main.rs | 18 ++++++++++ .../rust/foundry-local-webserver/src/main.rs | 18 ++++++++++ .../rust/native-chat-completions/src/main.rs | 18 ++++++++++ .../tool-calling-foundry-local/src/main.rs | 18 ++++++++++ .../rust/tutorial-chat-assistant/src/main.rs | 18 ++++++++++ .../tutorial-document-summarizer/src/main.rs | 18 ++++++++++ .../rust/tutorial-tool-calling/src/main.rs | 18 ++++++++++ .../rust/tutorial-voice-to-text/src/main.rs | 18 ++++++++++ 38 files changed, 527 insertions(+), 22 deletions(-) diff --git a/samples/cs/audio-transcription-example/Program.cs b/samples/cs/audio-transcription-example/Program.cs index ac5689c1..10047421 100644 --- a/samples/cs/audio-transcription-example/Program.cs +++ b/samples/cs/audio-transcription-example/Program.cs @@ -17,10 +17,18 @@ // Ensure that any Execution Provider (EP) downloads run and are completed. -// EP packages include dependencies and may be large. -// Download is only required again if a new version of the EP is released. -// For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); // diff --git a/samples/cs/foundry-local-web-server/Program.cs b/samples/cs/foundry-local-web-server/Program.cs index 9225ad7d..eb88e4b3 100644 --- a/samples/cs/foundry-local-web-server/Program.cs +++ b/samples/cs/foundry-local-web-server/Program.cs @@ -23,10 +23,18 @@ // Ensure that any Execution Provider (EP) downloads run and are completed. -// EP packages include dependencies and may be large. -// Download is only required again if a new version of the EP is released. -// For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); // diff --git a/samples/cs/model-management-example/Program.cs b/samples/cs/model-management-example/Program.cs index a34d2737..76beb89f 100644 --- a/samples/cs/model-management-example/Program.cs +++ b/samples/cs/model-management-example/Program.cs @@ -17,7 +17,17 @@ // Download and register all execution providers. -await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); // Model catalog operations diff --git a/samples/cs/tool-calling-foundry-local-sdk/Program.cs b/samples/cs/tool-calling-foundry-local-sdk/Program.cs index 8ac96369..2a568330 100644 --- a/samples/cs/tool-calling-foundry-local-sdk/Program.cs +++ b/samples/cs/tool-calling-foundry-local-sdk/Program.cs @@ -23,10 +23,18 @@ // Ensure that any Execution Provider (EP) downloads run and are completed. -// EP packages include dependencies and may be large. -// Download is only required again if a new version of the EP is released. -// For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); // diff --git a/samples/cs/tool-calling-foundry-local-web-server/Program.cs b/samples/cs/tool-calling-foundry-local-web-server/Program.cs index 48ee6c6f..6644a438 100644 --- a/samples/cs/tool-calling-foundry-local-web-server/Program.cs +++ b/samples/cs/tool-calling-foundry-local-web-server/Program.cs @@ -22,7 +22,17 @@ // Download and register all execution providers. -await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); // Get the model catalog diff --git a/samples/cs/tutorial-chat-assistant/Program.cs b/samples/cs/tutorial-chat-assistant/Program.cs index 10e9a63b..d06de6a5 100644 --- a/samples/cs/tutorial-chat-assistant/Program.cs +++ b/samples/cs/tutorial-chat-assistant/Program.cs @@ -24,6 +24,19 @@ await FoundryLocalManager.CreateAsync(config, logger); var mgr = FoundryLocalManager.Instance; +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); + // Select and load a model from the catalog var catalog = await mgr.GetCatalogAsync(); var model = await catalog.GetModelAsync("qwen2.5-0.5b") diff --git a/samples/cs/tutorial-document-summarizer/Program.cs b/samples/cs/tutorial-document-summarizer/Program.cs index bc5546f6..333d5c96 100644 --- a/samples/cs/tutorial-document-summarizer/Program.cs +++ b/samples/cs/tutorial-document-summarizer/Program.cs @@ -24,6 +24,19 @@ await FoundryLocalManager.CreateAsync(config, logger); var mgr = FoundryLocalManager.Instance; +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); + // Select and load a model from the catalog var catalog = await mgr.GetCatalogAsync(); var model = await catalog.GetModelAsync("qwen2.5-0.5b") diff --git a/samples/cs/tutorial-tool-calling/Program.cs b/samples/cs/tutorial-tool-calling/Program.cs index 74f137db..5ae60419 100644 --- a/samples/cs/tutorial-tool-calling/Program.cs +++ b/samples/cs/tutorial-tool-calling/Program.cs @@ -122,6 +122,19 @@ string ExecuteTool(string functionName, JsonElement arguments) await FoundryLocalManager.CreateAsync(config, logger); var mgr = FoundryLocalManager.Instance; +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); + var catalog = await mgr.GetCatalogAsync(); var model = await catalog.GetModelAsync("qwen2.5-0.5b") ?? throw new Exception("Model not found"); diff --git a/samples/cs/tutorial-voice-to-text/Program.cs b/samples/cs/tutorial-voice-to-text/Program.cs index 976b44e4..9a1a36c3 100644 --- a/samples/cs/tutorial-voice-to-text/Program.cs +++ b/samples/cs/tutorial-voice-to-text/Program.cs @@ -26,6 +26,20 @@ // Initialize the singleton instance await FoundryLocalManager.CreateAsync(config, logger); var mgr = FoundryLocalManager.Instance; + +// Download and register all execution providers. +var currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") Console.WriteLine(); + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(30)} {percent,6:F1}%"); +}); +if (currentEp != "") Console.WriteLine(); + var catalog = await mgr.GetCatalogAsync(); // diff --git a/samples/js/audio-transcription-example/app.js b/samples/js/audio-transcription-example/app.js index c2517ec7..51b69f99 100644 --- a/samples/js/audio-transcription-example/app.js +++ b/samples/js/audio-transcription-example/app.js @@ -14,6 +14,17 @@ const manager = FoundryLocalManager.create({ // console.log('✓ SDK initialized successfully'); +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + // // Get the model object const modelAlias = 'whisper-tiny'; // Using an available model from the list above diff --git a/samples/js/chat-and-audio-foundry-local/src/app.js b/samples/js/chat-and-audio-foundry-local/src/app.js index 50bc195f..87845aa6 100644 --- a/samples/js/chat-and-audio-foundry-local/src/app.js +++ b/samples/js/chat-and-audio-foundry-local/src/app.js @@ -15,6 +15,17 @@ async function main() { logLevel: "info", }); + // Download and register all execution providers. + let currentEp = ''; + await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); + }); + if (currentEp !== '') process.stdout.write('\n'); + const catalog = manager.catalog; // --- Load both models --- diff --git a/samples/js/copilot-sdk-foundry-local/src/app.ts b/samples/js/copilot-sdk-foundry-local/src/app.ts index c7c7966a..4c201351 100644 --- a/samples/js/copilot-sdk-foundry-local/src/app.ts +++ b/samples/js/copilot-sdk-foundry-local/src/app.ts @@ -60,6 +60,17 @@ async function main() { webServiceUrls: endpointUrl, }); + // Download and register all execution providers. + let currentEp = ''; + await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); + }); + if (currentEp !== '') process.stdout.write('\n'); + model = await manager.catalog.getModel(alias); await model.download(); await model.load(); diff --git a/samples/js/copilot-sdk-foundry-local/src/tool-calling.ts b/samples/js/copilot-sdk-foundry-local/src/tool-calling.ts index 3e41748c..3651b527 100644 --- a/samples/js/copilot-sdk-foundry-local/src/tool-calling.ts +++ b/samples/js/copilot-sdk-foundry-local/src/tool-calling.ts @@ -139,6 +139,17 @@ async function main() { webServiceUrls: endpointUrl, }); + // Download and register all execution providers. + let currentEp = ''; + await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); + }); + if (currentEp !== '') process.stdout.write('\n'); + model = await manager.catalog.getModel(alias); await model.download(); await model.load(); diff --git a/samples/js/electron-chat-application/main.js b/samples/js/electron-chat-application/main.js index 22a1fc1d..92473846 100644 --- a/samples/js/electron-chat-application/main.js +++ b/samples/js/electron-chat-application/main.js @@ -50,17 +50,34 @@ let webServiceStarted = false; const SERVICE_PORT = 47392; const SERVICE_URL = `http://127.0.0.1:${SERVICE_PORT}`; +let initPromise = null; + async function initializeSDK() { - if (manager) return manager; + if (initPromise) return initPromise; - const { FoundryLocalManager } = await import('foundry-local-sdk'); - manager = FoundryLocalManager.create({ - appName: 'foundry_local_samples', - logLevel: 'info', - webServiceUrls: SERVICE_URL - }); + initPromise = (async () => { + const { FoundryLocalManager } = await import('foundry-local-sdk'); + manager = FoundryLocalManager.create({ + appName: 'foundry_local_samples', + logLevel: 'info', + webServiceUrls: SERVICE_URL + }); + + // Download and register all execution providers. + let currentEp = ''; + await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); + }); + if (currentEp !== '') process.stdout.write('\n'); + + return manager; + })(); - return manager; + return initPromise; } function ensureWebServiceStarted() { diff --git a/samples/js/langchain-integration-example/app.js b/samples/js/langchain-integration-example/app.js index 9e4b7b60..8b2e74e3 100644 --- a/samples/js/langchain-integration-example/app.js +++ b/samples/js/langchain-integration-example/app.js @@ -19,6 +19,17 @@ const manager = FoundryLocalManager.create({ // console.log('✓ SDK initialized successfully'); +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + // Get the model object const modelAlias = 'qwen2.5-0.5b'; // Using an available model from the list above const model = await manager.catalog.getModel(modelAlias); diff --git a/samples/js/tool-calling-foundry-local/src/app.js b/samples/js/tool-calling-foundry-local/src/app.js index f92464ee..cb06466b 100644 --- a/samples/js/tool-calling-foundry-local/src/app.js +++ b/samples/js/tool-calling-foundry-local/src/app.js @@ -33,6 +33,17 @@ async function runToolCallingExample() { }); // + // Download and register all execution providers. + let currentEp = ''; + await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); + }); + if (currentEp !== '') process.stdout.write('\n'); + // const catalog = manager.catalog; model = await catalog.getModel(alias); diff --git a/samples/js/tutorial-chat-assistant/app.js b/samples/js/tutorial-chat-assistant/app.js index 9a5a430c..bb97960d 100644 --- a/samples/js/tutorial-chat-assistant/app.js +++ b/samples/js/tutorial-chat-assistant/app.js @@ -11,6 +11,17 @@ const manager = FoundryLocalManager.create({ logLevel: 'info' }); +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + // Select and load a model from the catalog const model = await manager.catalog.getModel('qwen2.5-0.5b'); diff --git a/samples/js/tutorial-document-summarizer/app.js b/samples/js/tutorial-document-summarizer/app.js index f43e204d..436b626b 100644 --- a/samples/js/tutorial-document-summarizer/app.js +++ b/samples/js/tutorial-document-summarizer/app.js @@ -40,6 +40,17 @@ const manager = FoundryLocalManager.create({ logLevel: 'info' }); +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + // Select and load a model from the catalog const model = await manager.catalog.getModel('qwen2.5-0.5b'); diff --git a/samples/js/tutorial-tool-calling/app.js b/samples/js/tutorial-tool-calling/app.js index efdd710c..b3b15d0a 100644 --- a/samples/js/tutorial-tool-calling/app.js +++ b/samples/js/tutorial-tool-calling/app.js @@ -122,6 +122,17 @@ const manager = FoundryLocalManager.create({ logLevel: 'info' }); +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + const model = await manager.catalog.getModel('qwen2.5-0.5b'); await model.download((progress) => { diff --git a/samples/js/tutorial-voice-to-text/app.js b/samples/js/tutorial-voice-to-text/app.js index 08074100..60057e06 100644 --- a/samples/js/tutorial-voice-to-text/app.js +++ b/samples/js/tutorial-voice-to-text/app.js @@ -15,6 +15,17 @@ const manager = FoundryLocalManager.create({ }); // +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + // // Load the speech-to-text model const speechModel = await manager.catalog.getModel('whisper-tiny'); diff --git a/samples/js/web-server-example/app.js b/samples/js/web-server-example/app.js index b03bf9df..c9a1e5ce 100644 --- a/samples/js/web-server-example/app.js +++ b/samples/js/web-server-example/app.js @@ -18,6 +18,17 @@ const manager = FoundryLocalManager.create({ // console.log('✓ SDK initialized successfully'); +// Download and register all execution providers. +let currentEp = ''; +await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') process.stdout.write('\n'); + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(30)} ${percent.toFixed(1).padStart(5)}%`); +}); +if (currentEp !== '') process.stdout.write('\n'); + // // Get the model object const modelAlias = 'qwen2.5-0.5b'; // Using an available model from the list above diff --git a/samples/python/audio-transcription/src/app.py b/samples/python/audio-transcription/src/app.py index 20f9be04..ca06fb28 100644 --- a/samples/python/audio-transcription/src/app.py +++ b/samples/python/audio-transcription/src/app.py @@ -11,6 +11,20 @@ FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance +# Download and register all execution providers. +current_ep = "" +def _ep_progress(ep_name: str, percent: float): + global current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + +manager.download_and_register_eps(progress_callback=_ep_progress) +if current_ep: + print() + # Load the whisper model for speech-to-text model = manager.catalog.get_model("whisper-tiny") model.download( diff --git a/samples/python/langchain-integration/src/app.py b/samples/python/langchain-integration/src/app.py index 1dd00224..4f8661cd 100644 --- a/samples/python/langchain-integration/src/app.py +++ b/samples/python/langchain-integration/src/app.py @@ -12,6 +12,20 @@ FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance +# Download and register all execution providers. +current_ep = "" +def _ep_progress(ep_name: str, percent: float): + global current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + +manager.download_and_register_eps(progress_callback=_ep_progress) +if current_ep: + print() + # Load a model model = manager.catalog.get_model("qwen2.5-0.5b") model.download( diff --git a/samples/python/native-chat-completions/src/app.py b/samples/python/native-chat-completions/src/app.py index ca087b77..457d0cf5 100644 --- a/samples/python/native-chat-completions/src/app.py +++ b/samples/python/native-chat-completions/src/app.py @@ -12,6 +12,20 @@ async def main(): FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance + # Download and register all execution providers. + current_ep = "" + def ep_progress(ep_name: str, percent: float): + nonlocal current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + + manager.download_and_register_eps(progress_callback=ep_progress) + if current_ep: + print() + # Select and load a model from the catalog model = manager.catalog.get_model("qwen2.5-0.5b") model.download( diff --git a/samples/python/tool-calling/src/app.py b/samples/python/tool-calling/src/app.py index ac00b023..995900e3 100644 --- a/samples/python/tool-calling/src/app.py +++ b/samples/python/tool-calling/src/app.py @@ -136,6 +136,20 @@ async def main(): FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance + # Download and register all execution providers. + current_ep = "" + def ep_progress(ep_name: str, percent: float): + nonlocal current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + + manager.download_and_register_eps(progress_callback=ep_progress) + if current_ep: + print() + # Select and load a model model = manager.catalog.get_model("qwen2.5-0.5b") model.download( diff --git a/samples/python/tutorial-chat-assistant/src/app.py b/samples/python/tutorial-chat-assistant/src/app.py index 05fa0bcc..5aee3ae1 100644 --- a/samples/python/tutorial-chat-assistant/src/app.py +++ b/samples/python/tutorial-chat-assistant/src/app.py @@ -12,6 +12,20 @@ async def main(): FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance + # Download and register all execution providers. + current_ep = "" + def ep_progress(ep_name: str, percent: float): + nonlocal current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + + manager.download_and_register_eps(progress_callback=ep_progress) + if current_ep: + print() + # Select and load a model from the catalog model = manager.catalog.get_model("qwen2.5-0.5b") model.download(lambda progress: print(f"\rDownloading model: {progress:.2f}%", end="", flush=True)) diff --git a/samples/python/tutorial-document-summarizer/src/app.py b/samples/python/tutorial-document-summarizer/src/app.py index 3a62fe24..671057cd 100644 --- a/samples/python/tutorial-document-summarizer/src/app.py +++ b/samples/python/tutorial-document-summarizer/src/app.py @@ -39,6 +39,20 @@ async def main(): FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance + # Download and register all execution providers. + current_ep = "" + def ep_progress(ep_name: str, percent: float): + nonlocal current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + + manager.download_and_register_eps(progress_callback=ep_progress) + if current_ep: + print() + # Select and load a model from the catalog model = manager.catalog.get_model("qwen2.5-0.5b") model.download(lambda p: print(f"\rDownloading model: {p:.2f}%", end="", flush=True)) diff --git a/samples/python/tutorial-tool-calling/src/app.py b/samples/python/tutorial-tool-calling/src/app.py index b26085f6..5fc1cc53 100644 --- a/samples/python/tutorial-tool-calling/src/app.py +++ b/samples/python/tutorial-tool-calling/src/app.py @@ -136,6 +136,20 @@ async def main(): FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance + # Download and register all execution providers. + current_ep = "" + def ep_progress(ep_name: str, percent: float): + nonlocal current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + + manager.download_and_register_eps(progress_callback=ep_progress) + if current_ep: + print() + # Select and load a model model = manager.catalog.get_model("qwen2.5-0.5b") model.download( diff --git a/samples/python/tutorial-voice-to-text/src/app.py b/samples/python/tutorial-voice-to-text/src/app.py index 4174e5ac..46ea3926 100644 --- a/samples/python/tutorial-voice-to-text/src/app.py +++ b/samples/python/tutorial-voice-to-text/src/app.py @@ -13,6 +13,20 @@ async def main(): manager = FoundryLocalManager.instance # + # Download and register all execution providers. + current_ep = "" + def ep_progress(ep_name: str, percent: float): + nonlocal current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + + manager.download_and_register_eps(progress_callback=ep_progress) + if current_ep: + print() + # # Load the speech-to-text model speech_model = manager.catalog.get_model("whisper-tiny") diff --git a/samples/python/web-server/src/app.py b/samples/python/web-server/src/app.py index dc554ad9..67117029 100644 --- a/samples/python/web-server/src/app.py +++ b/samples/python/web-server/src/app.py @@ -10,6 +10,20 @@ FoundryLocalManager.initialize(config) manager = FoundryLocalManager.instance +# Download and register all execution providers. +current_ep = "" +def _ep_progress(ep_name: str, percent: float): + global current_ep + if ep_name != current_ep: + if current_ep: + print() + current_ep = ep_name + print(f"\r {ep_name:<30} {percent:5.1f}%", end="", flush=True) + +manager.download_and_register_eps(progress_callback=_ep_progress) +if current_ep: + print() + # Load a model model = manager.catalog.get_model("qwen2.5-0.5b") model.download( diff --git a/samples/rust/audio-transcription-example/src/main.rs b/samples/rust/audio-transcription-example/src/main.rs index f5fb4cff..70150546 100644 --- a/samples/rust/audio-transcription-example/src/main.rs +++ b/samples/rust/audio-transcription-example/src/main.rs @@ -27,6 +27,24 @@ async fn main() -> Result<(), Box> { let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; // + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // ── 2. Pick the whispermodel and ensure it is downloaded ──────────── // let model = manager.catalog().get_model(ALIAS).await?; diff --git a/samples/rust/foundry-local-webserver/src/main.rs b/samples/rust/foundry-local-webserver/src/main.rs index 02f0360e..d36581e9 100644 --- a/samples/rust/foundry-local-webserver/src/main.rs +++ b/samples/rust/foundry-local-webserver/src/main.rs @@ -26,6 +26,24 @@ async fn main() -> Result<(), Box> { println!("✓ SDK initialized"); // + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // ── 2. Download and load a model ───────────────────────────────────── // let model_alias = "qwen2.5-0.5b"; diff --git a/samples/rust/native-chat-completions/src/main.rs b/samples/rust/native-chat-completions/src/main.rs index d1c7cfd1..bee7147e 100644 --- a/samples/rust/native-chat-completions/src/main.rs +++ b/samples/rust/native-chat-completions/src/main.rs @@ -24,6 +24,24 @@ async fn main() -> Result<(), Box> { let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; // + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // ── 2. Pick a modeland ensure it is downloaded ────────────────────── // let model = manager.catalog().get_model(ALIAS).await?; diff --git a/samples/rust/tool-calling-foundry-local/src/main.rs b/samples/rust/tool-calling-foundry-local/src/main.rs index f6ab1965..7b96333a 100644 --- a/samples/rust/tool-calling-foundry-local/src/main.rs +++ b/samples/rust/tool-calling-foundry-local/src/main.rs @@ -58,6 +58,24 @@ async fn main() -> Result<(), Box> { let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; // + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // ── 2. Load a model────────────────────────────────────────────────── // let model = manager.catalog().get_model(ALIAS).await?; diff --git a/samples/rust/tutorial-chat-assistant/src/main.rs b/samples/rust/tutorial-chat-assistant/src/main.rs index 34a3c6ed..ab98460c 100644 --- a/samples/rust/tutorial-chat-assistant/src/main.rs +++ b/samples/rust/tutorial-chat-assistant/src/main.rs @@ -15,6 +15,24 @@ async fn main() -> anyhow::Result<()> { // Initialize the Foundry Local SDK let manager = FoundryLocalManager::create(FoundryLocalConfig::new("chat-assistant"))?; + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // Select and load a model from the catalog let model = manager.catalog().get_model("qwen2.5-0.5b").await?; diff --git a/samples/rust/tutorial-document-summarizer/src/main.rs b/samples/rust/tutorial-document-summarizer/src/main.rs index be600056..6e6e8e4a 100644 --- a/samples/rust/tutorial-document-summarizer/src/main.rs +++ b/samples/rust/tutorial-document-summarizer/src/main.rs @@ -87,6 +87,24 @@ async fn main() -> anyhow::Result<()> { FoundryLocalConfig::new("doc-summarizer"), )?; + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // Select and load a model from the catalog let model = manager .catalog() diff --git a/samples/rust/tutorial-tool-calling/src/main.rs b/samples/rust/tutorial-tool-calling/src/main.rs index d6cfb9ce..131e4ad5 100644 --- a/samples/rust/tutorial-tool-calling/src/main.rs +++ b/samples/rust/tutorial-tool-calling/src/main.rs @@ -190,6 +190,24 @@ async fn main() -> anyhow::Result<()> { FoundryLocalConfig::new("tool-calling-app"), )?; + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // Select and load a model let model = manager .catalog() diff --git a/samples/rust/tutorial-voice-to-text/src/main.rs b/samples/rust/tutorial-voice-to-text/src/main.rs index fd802c77..d5be04c2 100644 --- a/samples/rust/tutorial-voice-to-text/src/main.rs +++ b/samples/rust/tutorial-voice-to-text/src/main.rs @@ -18,6 +18,24 @@ async fn main() -> anyhow::Result<()> { )?; // + // Download and register all execution providers. + manager + .download_and_register_eps_with_progress(None, { + let mut current_ep = String::new(); + move |ep_name: &str, percent: f64| { + if ep_name != current_ep { + if !current_ep.is_empty() { + println!(); + } + current_ep = ep_name.to_string(); + } + print!("\r {:<30} {:5.1}%", ep_name, percent); + io::stdout().flush().ok(); + } + }) + .await?; + println!(); + // // Load the speech-to-text model let speech_model = manager From 519d6bc0bcaf4466203959e6fd6a9e803866f7b7 Mon Sep 17 00:00:00 2001 From: bmehta001 Date: Thu, 9 Apr 2026 16:47:18 -0500 Subject: [PATCH 37/83] Clean up samples, tests, and docs (#619) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix SDK: - JS: Fixed tool.function.description.trim() TypeError when description is undefined (chatClient.ts:170) Fix samples/tests - C#: Added missing ToolCallId on tool response - Python: Removed unnecessary async/await (6 files), fixed .message.content → .delta.content crash - JS: Fixed .message → .delta in streaming (3 files), replaced broken callback with for await...of Fix documentation - Python test README: Fixed version 3.10+ → 3.11+ - Rust: Fixed stale return types, removed private ModelVariant section and selected_variant() method --- .pipelines/templates/test-cs-steps.yml | 1 + .pipelines/templates/test-js-steps.yml | 1 + .pipelines/templates/test-python-steps.yml | 1 + .pipelines/templates/test-rust-steps.yml | 1 + .../tool-calling-foundry-local-sdk/Program.cs | 1 + .../chat-and-audio-foundry-local/src/app.js | 2 +- samples/js/native-chat-completions/app.js | 2 +- samples/js/tutorial-chat-assistant/app.js | 6 +-- .../python/native-chat-completions/src/app.py | 5 +-- samples/python/tool-calling/src/app.py | 5 +-- .../python/tutorial-chat-assistant/src/app.py | 7 ++-- .../tutorial-document-summarizer/src/app.py | 15 ++++---- .../python/tutorial-tool-calling/src/app.py | 5 +-- .../python/tutorial-voice-to-text/src/app.py | 5 +-- .../ChatCompletionsTests.cs | 8 +++- sdk/js/src/openai/chatClient.ts | 5 ++- sdk/python/test/README.md | 2 +- sdk/rust/docs/api.md | 37 +++---------------- 18 files changed, 44 insertions(+), 65 deletions(-) diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml index 92c9b6ee..32ce661c 100644 --- a/.pipelines/templates/test-cs-steps.yml +++ b/.pipelines/templates/test-cs-steps.yml @@ -20,6 +20,7 @@ steps: $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - task: UseDotNet@2 displayName: 'Use .NET 9 SDK' diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml index 1814626a..70e2a16b 100644 --- a/.pipelines/templates/test-js-steps.yml +++ b/.pipelines/templates/test-js-steps.yml @@ -20,6 +20,7 @@ steps: $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - ${{ if eq(parameters.isWinML, true) }}: - task: PowerShell@2 diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml index 1de20b1c..c177efde 100644 --- a/.pipelines/templates/test-python-steps.yml +++ b/.pipelines/templates/test-python-steps.yml @@ -20,6 +20,7 @@ steps: $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - ${{ if eq(parameters.isWinML, true) }}: - task: PowerShell@2 diff --git a/.pipelines/templates/test-rust-steps.yml b/.pipelines/templates/test-rust-steps.yml index 31bfd75e..40b36a23 100644 --- a/.pipelines/templates/test-rust-steps.yml +++ b/.pipelines/templates/test-rust-steps.yml @@ -18,6 +18,7 @@ steps: $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - ${{ if eq(parameters.isWinML, true) }}: - task: PowerShell@2 diff --git a/samples/cs/tool-calling-foundry-local-sdk/Program.cs b/samples/cs/tool-calling-foundry-local-sdk/Program.cs index 2a568330..a4074233 100644 --- a/samples/cs/tool-calling-foundry-local-sdk/Program.cs +++ b/samples/cs/tool-calling-foundry-local-sdk/Program.cs @@ -141,6 +141,7 @@ await model.DownloadAsync(progress => var response = new ChatMessage { Role = "tool", + ToolCallId = chunk!.Choices[0].Message.ToolCalls![0].Id, Content = result.ToString(), }; messages.Add(response); diff --git a/samples/js/chat-and-audio-foundry-local/src/app.js b/samples/js/chat-and-audio-foundry-local/src/app.js index 87845aa6..12ddabb9 100644 --- a/samples/js/chat-and-audio-foundry-local/src/app.js +++ b/samples/js/chat-and-audio-foundry-local/src/app.js @@ -95,7 +95,7 @@ async function main() { }, { role: "user", content: transcription.text }, ])) { - const content = chunk.choices?.[0]?.message?.content; + const content = chunk.choices?.[0]?.delta?.content; if (content) { process.stdout.write(content); } diff --git a/samples/js/native-chat-completions/app.js b/samples/js/native-chat-completions/app.js index 9e34c90f..2ecc4356 100644 --- a/samples/js/native-chat-completions/app.js +++ b/samples/js/native-chat-completions/app.js @@ -84,7 +84,7 @@ console.log('\nTesting streaming completion...'); for await (const chunk of chatClient.completeStreamingChat( [{ role: 'user', content: 'Write a short poem about programming.' }] )) { - const content = chunk.choices?.[0]?.message?.content; + const content = chunk.choices?.[0]?.delta?.content; if (content) { process.stdout.write(content); } diff --git a/samples/js/tutorial-chat-assistant/app.js b/samples/js/tutorial-chat-assistant/app.js index bb97960d..842db058 100644 --- a/samples/js/tutorial-chat-assistant/app.js +++ b/samples/js/tutorial-chat-assistant/app.js @@ -73,13 +73,13 @@ while (true) { // Stream the response token by token process.stdout.write('Assistant: '); let fullResponse = ''; - await chatClient.completeStreamingChat(messages, (chunk) => { - const content = chunk.choices?.[0]?.message?.content; + for await (const chunk of chatClient.completeStreamingChat(messages)) { + const content = chunk.choices?.[0]?.delta?.content; if (content) { process.stdout.write(content); fullResponse += content; } - }); + } console.log('\n'); // diff --git a/samples/python/native-chat-completions/src/app.py b/samples/python/native-chat-completions/src/app.py index 457d0cf5..eba9df41 100644 --- a/samples/python/native-chat-completions/src/app.py +++ b/samples/python/native-chat-completions/src/app.py @@ -1,11 +1,10 @@ # # -import asyncio from foundry_local_sdk import Configuration, FoundryLocalManager # -async def main(): +def main(): # # Initialize the Foundry Local SDK config = Configuration(app_name="foundry_local_samples") @@ -64,5 +63,5 @@ def ep_progress(ep_name: str, percent: float): if __name__ == "__main__": - asyncio.run(main()) + main() # diff --git a/samples/python/tool-calling/src/app.py b/samples/python/tool-calling/src/app.py index 995900e3..db619550 100644 --- a/samples/python/tool-calling/src/app.py +++ b/samples/python/tool-calling/src/app.py @@ -1,6 +1,5 @@ # # -import asyncio import json from foundry_local_sdk import Configuration, FoundryLocalManager # @@ -130,7 +129,7 @@ def process_tool_calls(messages, response, client): # -async def main(): +def main(): # Initialize the Foundry Local SDK config = Configuration(app_name="foundry_local_samples") FoundryLocalManager.initialize(config) @@ -192,5 +191,5 @@ def ep_progress(ep_name: str, percent: float): if __name__ == "__main__": - asyncio.run(main()) + main() # diff --git a/samples/python/tutorial-chat-assistant/src/app.py b/samples/python/tutorial-chat-assistant/src/app.py index 5aee3ae1..13f1c500 100644 --- a/samples/python/tutorial-chat-assistant/src/app.py +++ b/samples/python/tutorial-chat-assistant/src/app.py @@ -1,11 +1,10 @@ # # -import asyncio from foundry_local_sdk import Configuration, FoundryLocalManager # -async def main(): +def main(): # # Initialize the Foundry Local SDK config = Configuration(app_name="foundry_local_samples") @@ -64,7 +63,7 @@ def ep_progress(ep_name: str, percent: float): print("Assistant: ", end="", flush=True) full_response = "" for chunk in client.complete_streaming_chat(messages): - content = chunk.choices[0].message.content + content = chunk.choices[0].delta.content if content: print(content, end="", flush=True) full_response += content @@ -81,5 +80,5 @@ def ep_progress(ep_name: str, percent: float): if __name__ == "__main__": - asyncio.run(main()) + main() # diff --git a/samples/python/tutorial-document-summarizer/src/app.py b/samples/python/tutorial-document-summarizer/src/app.py index 671057cd..055bb992 100644 --- a/samples/python/tutorial-document-summarizer/src/app.py +++ b/samples/python/tutorial-document-summarizer/src/app.py @@ -1,13 +1,12 @@ # # -import asyncio import sys from pathlib import Path from foundry_local_sdk import Configuration, FoundryLocalManager # -async def summarize_file(client, file_path, system_prompt): +def summarize_file(client, file_path, system_prompt): """Summarize a single file and print the result.""" content = Path(file_path).read_text(encoding="utf-8") messages = [ @@ -18,7 +17,7 @@ async def summarize_file(client, file_path, system_prompt): print(response.choices[0].message.content) -async def summarize_directory(client, directory, system_prompt): +def summarize_directory(client, directory, system_prompt): """Summarize all .txt files in a directory.""" txt_files = sorted(Path(directory).glob("*.txt")) @@ -28,11 +27,11 @@ async def summarize_directory(client, directory, system_prompt): for txt_file in txt_files: print(f"--- {txt_file.name} ---") - await summarize_file(client, txt_file, system_prompt) + summarize_file(client, txt_file, system_prompt) print() -async def main(): +def main(): # # Initialize the Foundry Local SDK config = Configuration(app_name="foundry_local_samples") @@ -76,10 +75,10 @@ def ep_progress(ep_name: str, percent: float): # if target_path.is_dir(): - await summarize_directory(client, target_path, system_prompt) + summarize_directory(client, target_path, system_prompt) else: print(f"--- {target_path.name} ---") - await summarize_file(client, target_path, system_prompt) + summarize_file(client, target_path, system_prompt) # # Clean up @@ -88,5 +87,5 @@ def ep_progress(ep_name: str, percent: float): if __name__ == "__main__": - asyncio.run(main()) + main() # diff --git a/samples/python/tutorial-tool-calling/src/app.py b/samples/python/tutorial-tool-calling/src/app.py index 5fc1cc53..bb22bfe0 100644 --- a/samples/python/tutorial-tool-calling/src/app.py +++ b/samples/python/tutorial-tool-calling/src/app.py @@ -1,6 +1,5 @@ # # -import asyncio import json from foundry_local_sdk import Configuration, FoundryLocalManager # @@ -130,7 +129,7 @@ def process_tool_calls(messages, response, client): # -async def main(): +def main(): # Initialize the Foundry Local SDK config = Configuration(app_name="foundry_local_samples") FoundryLocalManager.initialize(config) @@ -197,5 +196,5 @@ def ep_progress(ep_name: str, percent: float): if __name__ == "__main__": - asyncio.run(main()) + main() # diff --git a/samples/python/tutorial-voice-to-text/src/app.py b/samples/python/tutorial-voice-to-text/src/app.py index 46ea3926..8ebbba1b 100644 --- a/samples/python/tutorial-voice-to-text/src/app.py +++ b/samples/python/tutorial-voice-to-text/src/app.py @@ -1,11 +1,10 @@ # # -import asyncio from foundry_local_sdk import Configuration, FoundryLocalManager # -async def main(): +def main(): # # Initialize the Foundry Local SDK config = Configuration(app_name="foundry_local_samples") @@ -88,5 +87,5 @@ def ep_progress(ep_name: str, percent: float): if __name__ == "__main__": - asyncio.run(main()) + main() # diff --git a/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs b/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs index 2624f98a..7e70c683 100644 --- a/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs @@ -196,8 +196,10 @@ public async Task DirectTool_NoStreaming_Succeeds() await Assert.That(response.Choices[0].Message.ToolCalls?[0].FunctionCall?.Arguments).IsEqualTo(expectedArguments); // Add the response from invoking the tool call to the conversation and check if the model can continue correctly + var toolCallId = response.Choices[0].Message.ToolCalls?[0].Id; + await Assert.That(toolCallId).IsNotNull(); var toolCallResponse = "7 x 6 = 42."; - messages.Add(new ChatMessage { Role = "tool", Content = toolCallResponse }); + messages.Add(new ChatMessage { Role = "tool", ToolCallId = toolCallId, Content = toolCallResponse }); // Prompt the model to continue the conversation after the tool call messages.Add(new ChatMessage { Role = "system", Content = "Respond only with the answer generated by the tool." }); @@ -300,8 +302,10 @@ public async Task DirectTool_Streaming_Succeeds() await Assert.That(toolCallResponse?.Choices[0].Message.ToolCalls?[0].FunctionCall?.Arguments).IsEqualTo(expectedArguments); // Add the response from invoking the tool call to the conversation and check if the model can continue correctly + var toolCallId = toolCallResponse?.Choices[0].Message.ToolCalls?[0].Id; + await Assert.That(toolCallId).IsNotNull(); var toolResponse = "7 x 6 = 42."; - messages.Add(new ChatMessage { Role = "tool", Content = toolResponse }); + messages.Add(new ChatMessage { Role = "tool", ToolCallId = toolCallId, Content = toolResponse }); // Prompt the model to continue the conversation after the tool call messages.Add(new ChatMessage { Role = "system", Content = "Respond only with the answer generated by the tool." }); diff --git a/sdk/js/src/openai/chatClient.ts b/sdk/js/src/openai/chatClient.ts index f844da41..e61efcfa 100644 --- a/sdk/js/src/openai/chatClient.ts +++ b/sdk/js/src/openai/chatClient.ts @@ -167,12 +167,15 @@ export class ChatClient { if (typeof tool.type !== 'string' || tool.type.trim() === '') { throw new Error('Each tool must have a "type" property that is a non-empty string.'); } - if (typeof tool.function !== 'object' || tool.function.description.trim() === '') { + if (!tool.function || typeof tool.function !== 'object') { throw new Error('Each tool must have a "function" property that is a non-empty object.'); } if (typeof tool.function.name !== 'string' || tool.function.name.trim() === '') { throw new Error('Each tool\'s function must have a "name" property that is a non-empty string.'); } + if (tool.function.description !== undefined && typeof tool.function.description !== 'string') { + throw new Error('Each tool\'s function "description", if provided, must be a string.'); + } } } diff --git a/sdk/python/test/README.md b/sdk/python/test/README.md index ded38f5b..4d60d557 100644 --- a/sdk/python/test/README.md +++ b/sdk/python/test/README.md @@ -4,7 +4,7 @@ This test suite mirrors the structure of the JS (`sdk_v2/js/test/`) and C# (`sdk ## Prerequisites -1. **Python 3.10+** (tested with 3.12/3.13) +1. **Python 3.11+** (tested with 3.12/3.13) 2. **SDK installed in editable mode** from the `sdk/python` directory: ```bash pip install -e . diff --git a/sdk/rust/docs/api.md b/sdk/rust/docs/api.md index a21c23a0..abfec76f 100644 --- a/sdk/rust/docs/api.md +++ b/sdk/rust/docs/api.md @@ -12,7 +12,6 @@ - [Model Catalog](#model-catalog) - [Catalog](#catalog) - [Model](#model) - - [ModelVariant](#modelvariant) - [OpenAI Clients](#openai-clients) - [ChatClient](#chatclient) - [ChatCompletionStream](#chatcompletionstream) @@ -131,15 +130,15 @@ pub struct Catalog { /* private fields */ } | `update_models` | `async fn update_models(&self) -> Result<(), FoundryLocalError>` | Refresh catalog if cache expired or invalidated. | | `get_models` | `async fn get_models(&self) -> Result>, FoundryLocalError>` | Return all known models. | | `get_model` | `async fn get_model(&self, alias: &str) -> Result, FoundryLocalError>` | Look up a model by alias. | -| `get_model_variant` | `async fn get_model_variant(&self, id: &str) -> Result, FoundryLocalError>` | Look up a variant by unique id. | -| `get_cached_models` | `async fn get_cached_models(&self) -> Result>, FoundryLocalError>` | Return only variants cached on disk. | -| `get_loaded_models` | `async fn get_loaded_models(&self) -> Result>, FoundryLocalError>` | Return model variants currently loaded in memory. | +| `get_model_variant` | `async fn get_model_variant(&self, id: &str) -> Result, FoundryLocalError>` | Look up a variant by unique id. | +| `get_cached_models` | `async fn get_cached_models(&self) -> Result>, FoundryLocalError>` | Return only variants cached on disk. | +| `get_loaded_models` | `async fn get_loaded_models(&self) -> Result>, FoundryLocalError>` | Return model variants currently loaded in memory. | --- ### Model -Groups one or more `ModelVariant`s sharing the same alias. By default, the cached variant is selected. +Groups one or more variants sharing the same alias. By default, the cached variant is selected. ```rust pub struct Model { /* private fields */ } @@ -149,8 +148,7 @@ pub struct Model { /* private fields */ } |--------|-----------|-------------| | `alias` | `fn alias(&self) -> &str` | Alias shared by all variants. | | `id` | `fn id(&self) -> &str` | Unique identifier of the selected variant. | -| `variants` | `fn variants(&self) -> &[Arc]` | All variants in this model. | -| `selected_variant` | `fn selected_variant(&self) -> &ModelVariant` | Currently selected variant. | +| `variants` | `fn variants(&self) -> Vec>` | All variants in this model. | | `select_variant` | `fn select_variant(&self, variant: &Model) -> Result<(), FoundryLocalError>` | Select a variant from `variants()`. | | `select_variant_by_id` | `fn select_variant_by_id(&self, id: &str) -> Result<(), FoundryLocalError>` | Select a variant by its unique id string. | | `is_cached` | `async fn is_cached(&self) -> Result` | Whether the selected variant is cached on disk. | @@ -165,31 +163,6 @@ pub struct Model { /* private fields */ } --- -### ModelVariant - -A single model variant — one specific id within an alias group. - -```rust -pub struct ModelVariant { /* private fields */ } -``` - -| Method | Signature | Description | -|--------|-----------|-------------| -| `info` | `fn info(&self) -> &ModelInfo` | Full metadata for this variant. | -| `id` | `fn id(&self) -> &str` | Unique identifier. | -| `alias` | `fn alias(&self) -> &str` | Alias shared with sibling variants. | -| `is_cached` | `async fn is_cached(&self) -> Result` | Whether cached locally. ⚠️ Full IPC per call — prefer `Catalog::get_cached_models()` for batch use. | -| `is_loaded` | `async fn is_loaded(&self) -> Result` | Whether currently loaded in memory. | -| `download` | `async fn download(&self, progress: Option) -> Result<(), FoundryLocalError>` | Download the variant. `F: FnMut(f64) + Send + 'static` — receives progress as a percentage (0.0–100.0). | -| `path` | `async fn path(&self) -> Result` | Local file-system path. | -| `load` | `async fn load(&self) -> Result<(), FoundryLocalError>` | Load into memory. | -| `unload` | `async fn unload(&self) -> Result` | Unload from memory. | -| `remove_from_cache` | `async fn remove_from_cache(&self) -> Result` | Remove from local cache. | -| `create_chat_client` | `fn create_chat_client(&self) -> ChatClient` | Create a ChatClient bound to this variant. | -| `create_audio_client` | `fn create_audio_client(&self) -> AudioClient` | Create an AudioClient bound to this variant. | - ---- - ## OpenAI Clients ### ChatClient From fd0d53ea9a14d81ba63cd675438e2ad1f803cb9a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 10 Apr 2026 17:44:07 +0000 Subject: [PATCH 38/83] Bump vite from 7.3.1 to 7.3.2 in /www (#600) Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 7.3.1 to 7.3.2.
Release notes

Sourced from vite's releases.

v7.3.2

Please refer to CHANGELOG.md for details.

Changelog

Sourced from vite's changelog.

7.3.2 (2026-04-06)

Bug Fixes

Commits

Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- www/package-lock.json | 85 +++++++++++++++++++++++++++++++++++-------- www/package.json | 2 +- 2 files changed, 70 insertions(+), 17 deletions(-) diff --git a/www/package-lock.json b/www/package-lock.json index a5eb79e9..c0641c14 100644 --- a/www/package-lock.json +++ b/www/package-lock.json @@ -34,10 +34,10 @@ "tailwindcss": "^4.2.2", "tailwindcss-animate": "^1.0.7", "typescript": "^5.9.3", - "vite": "^7.2.7" + "vite": "^7.3.2" }, "engines": { - "node": ">=22.0.0", + "node": ">=22.0.0 <23.0.0", "npm": ">=9.0.0" } }, @@ -54,17 +54,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@emnapi/runtime": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.0.tgz", - "integrity": "sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw==", - "dev": true, - "license": "MIT", - "optional": true, - "dependencies": { - "tslib": "^2.4.0" - } - }, "node_modules/@esbuild/aix-ppc64": { "version": "0.27.3", "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.27.3.tgz", @@ -1775,6 +1764,70 @@ "node": ">=14.0.0" } }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/core": { + "version": "1.8.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/wasi-threads": "1.1.0", + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/runtime": { + "version": "1.8.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@emnapi/wasi-threads": { + "version": "1.1.0", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@napi-rs/wasm-runtime": { + "version": "1.1.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "@emnapi/core": "^1.7.1", + "@emnapi/runtime": "^1.7.1", + "@tybys/wasm-util": "^0.10.1" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/Brooooooklyn" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/@tybys/wasm-util": { + "version": "0.10.1", + "dev": true, + "inBundle": true, + "license": "MIT", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@tailwindcss/oxide-wasm32-wasi/node_modules/tslib": { + "version": "2.8.1", + "dev": true, + "inBundle": true, + "license": "0BSD", + "optional": true + }, "node_modules/@tailwindcss/oxide-win32-arm64-msvc": { "version": "4.2.2", "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.2.2.tgz", @@ -3757,9 +3810,9 @@ "license": "MIT" }, "node_modules/vite": { - "version": "7.3.1", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz", - "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==", + "version": "7.3.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.2.tgz", + "integrity": "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg==", "devOptional": true, "license": "MIT", "dependencies": { diff --git a/www/package.json b/www/package.json index 5454236d..91a68140 100644 --- a/www/package.json +++ b/www/package.json @@ -47,7 +47,7 @@ "tailwindcss": "^4.2.2", "tailwindcss-animate": "^1.0.7", "typescript": "^5.9.3", - "vite": "^7.2.7" + "vite": "^7.3.2" }, "dependencies": { "@vercel/analytics": "^2.0.1" From d8cada4ed96203e0f91b6695643a5bcee29b382b Mon Sep 17 00:00:00 2001 From: Nenad Banfic <46795300+nenad1002@users.noreply.github.com> Date: Fri, 10 Apr 2026 16:23:38 -0700 Subject: [PATCH 39/83] Add C++ SDK Support (#544) - pipeline support missing, will come in future - ARM support will come soon --------- Co-authored-by: Your Name --- sdk/cpp/.clang-format | 47 ++ sdk/cpp/CMakeLists.txt | 151 ++++ sdk/cpp/CMakePresets.json | 109 +++ sdk/cpp/include/catalog.h | 66 ++ sdk/cpp/include/configuration.h | 68 ++ sdk/cpp/include/foundry_local.h | 18 + sdk/cpp/include/foundry_local_exception.h | 21 + sdk/cpp/include/foundry_local_manager.h | 87 ++ sdk/cpp/include/log_level.h | 37 + sdk/cpp/include/logger.h | 19 + sdk/cpp/include/model.h | 191 +++++ sdk/cpp/include/openai/openai_audio_client.h | 46 ++ sdk/cpp/include/openai/openai_chat_client.h | 116 +++ sdk/cpp/include/openai/openai_tool_types.h | 54 ++ sdk/cpp/sample/main.cpp | 357 +++++++++ sdk/cpp/src/catalog.cpp | 173 ++++ sdk/cpp/src/core.h | 114 +++ sdk/cpp/src/core_helpers.h | 146 ++++ sdk/cpp/src/core_interop_request.h | 46 ++ sdk/cpp/src/flcore_native.h | 39 + sdk/cpp/src/foundry_local_internal_core.h | 38 + sdk/cpp/src/foundry_local_manager.cpp | 191 +++++ sdk/cpp/src/model.cpp | 191 +++++ sdk/cpp/src/openai_audio_client.cpp | 70 ++ sdk/cpp/src/openai_chat_client.cpp | 148 ++++ sdk/cpp/src/parser.h | 312 ++++++++ sdk/cpp/test/catalog_test.cpp | 373 +++++++++ sdk/cpp/test/client_test.cpp | 745 ++++++++++++++++++ sdk/cpp/test/e2e_test.cpp | 574 ++++++++++++++ sdk/cpp/test/mock_core.h | 158 ++++ sdk/cpp/test/mock_object_factory.h | 64 ++ sdk/cpp/test/model_variant_test.cpp | 254 ++++++ sdk/cpp/test/parser_and_types_test.cpp | 417 ++++++++++ sdk/cpp/test/testdata/empty_models_list.json | 1 + .../test/testdata/malformed_models_list.json | 1 + .../missing_name_field_models_list.json | 12 + .../test/testdata/mixed_openai_and_local.json | 35 + sdk/cpp/test/testdata/real_models_list.json | 88 +++ .../test/testdata/single_cached_model.json | 1 + .../testdata/three_variants_one_model.json | 41 + .../test/testdata/valid_cached_models.json | 1 + .../test/testdata/valid_loaded_models.json | 1 + sdk/cpp/triplets/x64-windows-static-md.cmake | 3 + sdk/cpp/vcpkg-configuration.json | 6 + sdk/cpp/vcpkg.json | 10 + 45 files changed, 5640 insertions(+) create mode 100644 sdk/cpp/.clang-format create mode 100644 sdk/cpp/CMakeLists.txt create mode 100644 sdk/cpp/CMakePresets.json create mode 100644 sdk/cpp/include/catalog.h create mode 100644 sdk/cpp/include/configuration.h create mode 100644 sdk/cpp/include/foundry_local.h create mode 100644 sdk/cpp/include/foundry_local_exception.h create mode 100644 sdk/cpp/include/foundry_local_manager.h create mode 100644 sdk/cpp/include/log_level.h create mode 100644 sdk/cpp/include/logger.h create mode 100644 sdk/cpp/include/model.h create mode 100644 sdk/cpp/include/openai/openai_audio_client.h create mode 100644 sdk/cpp/include/openai/openai_chat_client.h create mode 100644 sdk/cpp/include/openai/openai_tool_types.h create mode 100644 sdk/cpp/sample/main.cpp create mode 100644 sdk/cpp/src/catalog.cpp create mode 100644 sdk/cpp/src/core.h create mode 100644 sdk/cpp/src/core_helpers.h create mode 100644 sdk/cpp/src/core_interop_request.h create mode 100644 sdk/cpp/src/flcore_native.h create mode 100644 sdk/cpp/src/foundry_local_internal_core.h create mode 100644 sdk/cpp/src/foundry_local_manager.cpp create mode 100644 sdk/cpp/src/model.cpp create mode 100644 sdk/cpp/src/openai_audio_client.cpp create mode 100644 sdk/cpp/src/openai_chat_client.cpp create mode 100644 sdk/cpp/src/parser.h create mode 100644 sdk/cpp/test/catalog_test.cpp create mode 100644 sdk/cpp/test/client_test.cpp create mode 100644 sdk/cpp/test/e2e_test.cpp create mode 100644 sdk/cpp/test/mock_core.h create mode 100644 sdk/cpp/test/mock_object_factory.h create mode 100644 sdk/cpp/test/model_variant_test.cpp create mode 100644 sdk/cpp/test/parser_and_types_test.cpp create mode 100644 sdk/cpp/test/testdata/empty_models_list.json create mode 100644 sdk/cpp/test/testdata/malformed_models_list.json create mode 100644 sdk/cpp/test/testdata/missing_name_field_models_list.json create mode 100644 sdk/cpp/test/testdata/mixed_openai_and_local.json create mode 100644 sdk/cpp/test/testdata/real_models_list.json create mode 100644 sdk/cpp/test/testdata/single_cached_model.json create mode 100644 sdk/cpp/test/testdata/three_variants_one_model.json create mode 100644 sdk/cpp/test/testdata/valid_cached_models.json create mode 100644 sdk/cpp/test/testdata/valid_loaded_models.json create mode 100644 sdk/cpp/triplets/x64-windows-static-md.cmake create mode 100644 sdk/cpp/vcpkg-configuration.json create mode 100644 sdk/cpp/vcpkg.json diff --git a/sdk/cpp/.clang-format b/sdk/cpp/.clang-format new file mode 100644 index 00000000..751f30aa --- /dev/null +++ b/sdk/cpp/.clang-format @@ -0,0 +1,47 @@ +--- +Language: Cpp +BasedOnStyle: Microsoft + +# Match the existing project style +Standard: c++17 +ColumnLimit: 120 + +# Indentation +IndentWidth: 4 +TabWidth: 4 +UseTab: Never +AccessModifierOffset: -4 +IndentCaseLabels: false +NamespaceIndentation: All + +# Braces +BreakBeforeBraces: Custom +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterStruct: false + BeforeCatch: true + BeforeElse: true + IndentBraces: false + +# Alignment +AlignAfterOpenBracket: Align +AlignOperands: Align +AlignTrailingComments: true + +# Includes +SortIncludes: false +IncludeBlocks: Preserve + +# Misc +AllowShortFunctionsOnASingleLine: Inline +AllowShortIfStatementsOnASingleLine: Never +AllowShortLoopsOnASingleLine: false +AllowShortBlocksOnASingleLine: Empty +PointerAlignment: Left +SpaceAfterCStyleCast: false +SpaceBeforeParens: ControlStatements diff --git a/sdk/cpp/CMakeLists.txt b/sdk/cpp/CMakeLists.txt new file mode 100644 index 00000000..7e32b7fb --- /dev/null +++ b/sdk/cpp/CMakeLists.txt @@ -0,0 +1,151 @@ +cmake_minimum_required(VERSION 3.20) + +# VS hot reload policy (safe-guarded) +if (POLICY CMP0141) + cmake_policy(SET CMP0141 NEW) + if (MSVC) + set(CMAKE_MSVC_DEBUG_INFORMATION_FORMAT + "$<$:ProgramDatabase>") + endif() +endif() + +project(CppSdk LANGUAGES CXX) + +# ----------------------------- +# Windows-only + compiler guard +# ----------------------------- +if (NOT WIN32) + message(FATAL_ERROR "CppSdk is Windows-only for now (uses Win32/WIL headers).") +endif() + +# Accept MSVC OR clang-cl (Clang in MSVC compatibility mode). +# VS CMake Open-Folder often uses clang-cl by default. +if (NOT (MSVC OR (CMAKE_CXX_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))) + message(STATUS "CMAKE_CXX_COMPILER_ID = ${CMAKE_CXX_COMPILER_ID}") + message(STATUS "CMAKE_CXX_COMPILER = ${CMAKE_CXX_COMPILER}") + message(STATUS "CMAKE_CXX_SIMULATE_ID = ${CMAKE_CXX_SIMULATE_ID}") + message(FATAL_ERROR "Need MSVC or clang-cl (MSVC-compatible toolchain).") +endif() + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +# Optional: target Windows 10+ APIs (adjust if you need older) +add_compile_definitions(_WIN32_WINNT=0x0A00 WINVER=0x0A00) + +# ----------------------------- +# Dependencies (installed via vcpkg) +# ----------------------------- +find_package(nlohmann_json CONFIG REQUIRED) +find_package(wil CONFIG REQUIRED) +find_package(Microsoft.GSL CONFIG REQUIRED) +option(BUILD_TESTING "Build unit and end-to-end tests" ON) +if (BUILD_TESTING) + find_package(GTest CONFIG REQUIRED) +endif() + +# ----------------------------- +# SDK library (STATIC) +# List ONLY .cpp files here. +# ----------------------------- +add_library(CppSdk STATIC + src/model.cpp + src/catalog.cpp + src/openai_chat_client.cpp + src/openai_audio_client.cpp + src/foundry_local_manager.cpp +) + +target_include_directories(CppSdk + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/include + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src +) + +target_link_libraries(CppSdk + PUBLIC + nlohmann_json::nlohmann_json + Microsoft.GSL::GSL + WIL::WIL +) + +# ----------------------------- +# Sample executable +# ----------------------------- +add_executable(CppSdkSample + sample/main.cpp +) + +target_link_libraries(CppSdkSample PRIVATE CppSdk) + +# ----------------------------- +# Unit tests +# ----------------------------- +if (BUILD_TESTING) + enable_testing() + + add_executable(CppSdkTests + test/parser_and_types_test.cpp + test/model_variant_test.cpp + test/catalog_test.cpp + test/client_test.cpp + ) + + target_include_directories(CppSdkTests + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/test + ${CMAKE_CURRENT_SOURCE_DIR}/src + ) + + target_compile_definitions(CppSdkTests PRIVATE FL_TESTS) + + target_link_libraries(CppSdkTests + PRIVATE + CppSdk + GTest::gtest_main + ) + + # Copy testdata files next to the test executable so file-based tests can find them. + add_custom_command(TARGET CppSdkTests POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_directory + ${CMAKE_CURRENT_SOURCE_DIR}/test/testdata + $/testdata + ) + + include(GoogleTest) + gtest_discover_tests(CppSdkTests + WORKING_DIRECTORY $ + ) + + # ----------------------------- + # End-to-end tests (separate executable, requires Core DLL) + # Exercises the full public API against the real catalog. + # Tests that need model download are DISABLED by default; + # run with --gtest_also_run_disabled_tests locally. + # ----------------------------- + add_executable(CppSdkE2ETests + test/e2e_test.cpp + ) + + target_include_directories(CppSdkE2ETests + PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/test + ${CMAKE_CURRENT_SOURCE_DIR}/src + ) + + target_link_libraries(CppSdkE2ETests + PRIVATE + CppSdk + GTest::gtest_main + ) + + gtest_discover_tests(CppSdkE2ETests + WORKING_DIRECTORY $ + ) +endif() + +# Make Visual Studio start/debug this target by default +set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + PROPERTY VS_STARTUP_PROJECT CppSdkSample) diff --git a/sdk/cpp/CMakePresets.json b/sdk/cpp/CMakePresets.json new file mode 100644 index 00000000..ddead1b2 --- /dev/null +++ b/sdk/cpp/CMakePresets.json @@ -0,0 +1,109 @@ +{ + "version": 6, + "configurePresets": [ + { + "name": "windows-base", + "hidden": true, + "generator": "Ninja", + "binaryDir": "${sourceDir}/out/build/${presetName}", + "installDir": "${sourceDir}/out/install/${presetName}", + "toolchainFile": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake", + "cacheVariables": { + "CMAKE_C_COMPILER": "cl.exe", + "CMAKE_CXX_COMPILER": "cl.exe", + "VCPKG_OVERLAY_TRIPLETS": "${sourceDir}/triplets" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Windows" + } + }, + { + "name": "x64-debug", + "displayName": "MSVC x64 Debug", + "inherits": "windows-base", + "architecture": { + "value": "x64", + "strategy": "external" + }, + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", + "VCPKG_TARGET_TRIPLET": "x64-windows-static-md" + } + }, + { + "name": "x64-release", + "displayName": "MSVC x64 Release", + "inherits": "windows-base", + "architecture": { + "value": "x64", + "strategy": "external" + }, + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", + "VCPKG_TARGET_TRIPLET": "x64-windows-static-md" + } + }, + { + "name": "linux-debug", + "displayName": "Linux Debug", + "generator": "Ninja", + "binaryDir": "${sourceDir}/out/build/${presetName}", + "installDir": "${sourceDir}/out/install/${presetName}", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Linux" + } + }, + { + "name": "macos-debug", + "displayName": "macOS Debug", + "generator": "Ninja", + "binaryDir": "${sourceDir}/out/build/${presetName}", + "installDir": "${sourceDir}/out/install/${presetName}", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug" + }, + "condition": { + "type": "equals", + "lhs": "${hostSystemName}", + "rhs": "Darwin" + } + } + ], + "buildPresets": [ + { + "name": "x64-debug", + "configurePreset": "x64-debug", + "displayName": "MSVC x64 Debug Build" + }, + { + "name": "x64-release", + "configurePreset": "x64-release", + "displayName": "MSVC x64 Release Build" + } + ], + "testPresets": [ + { + "name": "x64-debug", + "configurePreset": "x64-debug", + "displayName": "MSVC x64 Debug Tests", + "output": { + "outputOnFailure": true + } + }, + { + "name": "x64-release", + "configurePreset": "x64-release", + "displayName": "MSVC x64 Release Tests", + "output": { + "outputOnFailure": true + } + } + ] +} diff --git a/sdk/cpp/include/catalog.h b/sdk/cpp/include/catalog.h new file mode 100644 index 00000000..e4e5d17f --- /dev/null +++ b/sdk/cpp/include/catalog.h @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "model.h" + +namespace foundry_local::Internal { + struct IFoundryLocalCore; +} + +namespace foundry_local { + +class Catalog final { +public: + Catalog(const Catalog&) = delete; + Catalog& operator=(const Catalog&) = delete; + Catalog(Catalog&&) = delete; + Catalog& operator=(Catalog&&) = delete; + + explicit Catalog(gsl::not_null injected, + gsl::not_null logger); + + static std::unique_ptr Create(gsl::not_null core, + gsl::not_null logger) { + return std::make_unique(core, logger); + } + + const std::string& GetName() const { return name_; } + std::vector ListModels() const; + std::vector GetLoadedModels() const; + std::vector GetCachedModels() const; + + IModel* GetModel(std::string_view modelId) const; + IModel* GetModelVariant(std::string_view modelVariantId) const; + IModel& GetLatestVersion(const IModel& modelOrModelVariant) const; + + private: + struct CatalogState { + std::unordered_map byAlias; + std::unordered_map modelIdToModelVariant; + std::chrono::steady_clock::time_point lastFetch{}; + }; + + void UpdateModels() const; + std::shared_ptr GetState() const; + + mutable std::mutex mutex_; + mutable std::shared_ptr state_; + + gsl::not_null core_; + std::string name_; + gsl::not_null logger_; + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/configuration.h b/sdk/cpp/include/configuration.h new file mode 100644 index 00000000..21c40473 --- /dev/null +++ b/sdk/cpp/include/configuration.h @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include +#include +#include +#include +#include "log_level.h" + +namespace foundry_local { + + /// Optional configuration for the built-in web service. + struct WebServiceConfig { + // URL/s to bind the web service to. + // Default: 127.0.0.1:0 (random ephemeral port). + // Multiple URLs can be specified as a semicolon-separated list. + std::optional urls; + + // If the web service is running in a separate process, provide its URL here. + std::optional external_url; + }; + + struct Configuration { + // Construct a Configuration with just an application name. + // All other fields use their defaults. + Configuration(std::string name) : app_name(std::move(name)) {} + + // Your application name. MUST be set to a valid name. + std::string app_name; + + // Application data directory. + // Default: {home}/.{appname}, where {home} is the user's home directory and {appname} is the app_name value. + std::optional app_data_dir; + + // Model cache directory. + // Default: {appdata}/cache/models, where {appdata} is the app_data_dir value. + std::optional model_cache_dir; + + // Log directory. + // Default: {appdata}/logs + std::optional logs_dir; + + // Logging level. + // Valid values are: Verbose, Debug, Information, Warning, Error, Fatal. + // Default: LogLevel.Warning + LogLevel log_level = LogLevel::Warning; + + // Optional web service configuration. + std::optional web; + + // Additional settings that Foundry Local Core can consume. + std::optional> additional_settings; + + void Validate() const { + if (app_name.empty()) { + throw std::invalid_argument("Configuration app_name must be set to a valid application name."); + } + + constexpr std::string_view invalidChars = R"(\/:?\"<>|)"; + if (app_name.find_first_of(invalidChars) != std::string::npos) { + throw std::invalid_argument("Configuration app_name value contains invalid characters."); + } + } + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/foundry_local.h b/sdk/cpp/include/foundry_local.h new file mode 100644 index 00000000..c16337e1 --- /dev/null +++ b/sdk/cpp/include/foundry_local.h @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// Umbrella header – includes every public header for convenience. +// Consumers may also include individual headers directly. + +#pragma once + +#include "configuration.h" +#include "foundry_local_exception.h" +#include "log_level.h" +#include "logger.h" +#include "model.h" +#include "catalog.h" +#include "foundry_local_manager.h" +#include "openai/openai_tool_types.h" +#include "openai/openai_chat_client.h" +#include "openai/openai_audio_client.h" diff --git a/sdk/cpp/include/foundry_local_exception.h b/sdk/cpp/include/foundry_local_exception.h new file mode 100644 index 00000000..c1fabecc --- /dev/null +++ b/sdk/cpp/include/foundry_local_exception.h @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include + +#include "logger.h" + +namespace foundry_local { + + class Exception final : public std::runtime_error { + public: + explicit Exception(std::string message) : std::runtime_error(std::move(message)) {} + + Exception(std::string message, ILogger& logger) : std::runtime_error(std::move(message)) { + logger.Log(LogLevel::Error, what()); + } + }; +} // namespace foundry_local diff --git a/sdk/cpp/include/foundry_local_manager.h b/sdk/cpp/include/foundry_local_manager.h new file mode 100644 index 00000000..ce8725c6 --- /dev/null +++ b/sdk/cpp/include/foundry_local_manager.h @@ -0,0 +1,87 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include +#include + +#include +#include + +#include "configuration.h" +#include "logger.h" +#include "catalog.h" + +namespace foundry_local::Internal { + struct IFoundryLocalCore; +} + +namespace foundry_local { + + class Manager final { + public: + Manager(const Manager&) = delete; + Manager& operator=(const Manager&) = delete; + Manager(Manager&&) = delete; + Manager& operator=(Manager&&) = delete; + + /// Create the Manager singleton instance. + /// Throws if an instance has already been created. Call Destroy() first to release the current instance. + /// @param configuration Configuration to use. + /// @param logger Optional application logger. Pass nullptr to suppress log output. + static void Create(Configuration configuration, ILogger* logger = nullptr); + + /// Get the singleton instance. + /// Throws if Create() has not been called. + static Manager& Instance(); + + /// Returns true if the singleton instance has been created. + static bool IsInitialized() noexcept; + + /// Destroy the singleton instance, performing deterministic cleanup. + /// Unloads all loaded models and stops the web service if running. + /// After this call, Create() may be called again. + static void Destroy() noexcept; + + const Catalog& GetCatalog() const; + Catalog& GetCatalog(); + + /// Start the optional built-in web service. + /// Provides an OpenAI-compatible REST endpoint. + /// After startup, GetUrls() returns the actual bound URL/s. + /// Requires Configuration::Web to be set. + void StartWebService(); + + /// Stop the web service if started. + void StopWebService(); + + /// Returns the bound URL/s after StartWebService(), or empty if not started. + gsl::span GetUrls() const noexcept; + + /// Ensure execution providers are downloaded and registered. + /// Once downloaded, EPs are not re-downloaded unless a new version is available. + void EnsureEpsDownloaded() const; + + private: + explicit Manager(Configuration configuration, ILogger* logger); + ~Manager(); + + struct Deleter { + void operator()(Manager* p) const noexcept { delete p; } + }; + + void Initialize(); + void Cleanup() noexcept; + + static std::unique_ptr instance_; + + Configuration config_; + NullLogger defaultLogger_; + std::unique_ptr core_; + std::unique_ptr catalog_; + ILogger* logger_; + std::vector urls_; + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/log_level.h b/sdk/cpp/include/log_level.h new file mode 100644 index 00000000..75dfe667 --- /dev/null +++ b/sdk/cpp/include/log_level.h @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include + +namespace foundry_local { + + enum class LogLevel { + Verbose, + Debug, + Information, + Warning, + Error, + Fatal + }; + + inline std::string_view LogLevelToString(LogLevel level) noexcept { + switch (level) { + case LogLevel::Verbose: + return "Verbose"; + case LogLevel::Debug: + return "Debug"; + case LogLevel::Information: + return "Information"; + case LogLevel::Warning: + return "Warning"; + case LogLevel::Error: + return "Error"; + case LogLevel::Fatal: + return "Fatal"; + } + return "Unknown"; + } + +} // namespace foundry_local diff --git a/sdk/cpp/include/logger.h b/sdk/cpp/include/logger.h new file mode 100644 index 00000000..d0b05b4e --- /dev/null +++ b/sdk/cpp/include/logger.h @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include "log_level.h" + +namespace foundry_local { + class ILogger { + public: + virtual ~ILogger() = default; + virtual void Log(LogLevel level, std::string_view message) noexcept = 0; + }; + + class NullLogger final : public ILogger { + public: + void Log(LogLevel, std::string_view) noexcept override {} + }; +} // namespace foundry_local diff --git a/sdk/cpp/include/model.h b/sdk/cpp/include/model.h new file mode 100644 index 00000000..9238cf12 --- /dev/null +++ b/sdk/cpp/include/model.h @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "logger.h" + +namespace foundry_local { + class OpenAIChatClient; + class OpenAIAudioClient; +} + +namespace foundry_local::Internal { + struct IFoundryLocalCore; +} + +namespace foundry_local { +#ifdef FL_TESTS + namespace Testing { + struct MockObjectFactory; + } +#endif + + using DownloadProgressCallback = std::function; + + class IModel { + public: + virtual ~IModel() = default; + + virtual const std::string& GetId() const = 0; + virtual const std::string& GetAlias() const = 0; + virtual bool IsLoaded() const = 0; + virtual bool IsCached() const = 0; + virtual const std::filesystem::path& GetPath() const = 0; + virtual void Download(DownloadProgressCallback onProgress = nullptr) = 0; + virtual void Load() = 0; + virtual void Unload() = 0; + virtual void RemoveFromCache() = 0; + + protected: + struct CoreAccess { + gsl::not_null core; + std::string modelName; + gsl::not_null logger; + }; + + virtual CoreAccess GetCoreAccess() const = 0; + + friend class OpenAIChatClient; + friend class OpenAIAudioClient; + }; + + enum class DeviceType { + Invalid, + CPU, + GPU, + NPU + }; + + struct Runtime { + DeviceType device_type = DeviceType::Invalid; + std::string execution_provider; + }; + + struct PromptTemplate { + std::string system; + std::string user; + std::string assistant; + std::string prompt; + }; + + // Forward declarations + class ModelVariant; + + struct Parameter { + std::string name; + std::optional value; + }; + + struct ModelSettings { + std::vector parameters; + }; + + struct ModelInfo { + std::string id; + std::string name; + uint32_t version = 0; + std::string alias; + std::optional display_name; + std::string provider_type; + std::string uri; + std::string model_type; + std::optional prompt_template; + std::optional publisher; + std::optional model_settings; + std::optional license; + std::optional license_description; + bool cached = false; + std::optional task; + std::optional runtime; + std::optional file_size_mb; + std::optional supports_tool_calling; + std::optional max_output_tokens; + std::optional min_fl_version; + int64_t created_at_unix = 0; + }; + + class ModelVariant final : public IModel { + public: + explicit ModelVariant(gsl::not_null core, ModelInfo info, + gsl::not_null logger); + + const ModelInfo& GetInfo() const; + const std::filesystem::path& GetPath() const override; + void Download(DownloadProgressCallback onProgress = nullptr) override; + void Load() override; + + bool IsLoaded() const override; + bool IsCached() const override; + void Unload() override; + void RemoveFromCache() override; + + const std::string& GetId() const noexcept override; + const std::string& GetAlias() const noexcept override; + uint32_t GetVersion() const noexcept; + + protected: + CoreAccess GetCoreAccess() const override; + + private: + static std::string MakeModelParamRequest(std::string_view modelId); + + ModelInfo info_; + mutable std::filesystem::path cachedPath_; + gsl::not_null core_; + gsl::not_null logger_; + + friend class Model; + }; + + class Model final : public IModel { + public: + explicit Model(gsl::not_null core, gsl::not_null logger); + + gsl::span GetAllModelVariants() const; + + bool IsLoaded() const override { return SelectedVariant().IsLoaded(); } + bool IsCached() const override { return SelectedVariant().IsCached(); } + const std::filesystem::path& GetPath() const override { return SelectedVariant().GetPath(); } + void Download(DownloadProgressCallback onProgress = nullptr) override { + SelectedVariant().Download(std::move(onProgress)); + } + void Load() override { SelectedVariant().Load(); } + void Unload() override { SelectedVariant().Unload(); } + void RemoveFromCache() override { SelectedVariant().RemoveFromCache(); } + + const std::string& GetId() const override; + const std::string& GetAlias() const override; + void SelectVariant(const ModelVariant& variant) const; + + protected: + CoreAccess GetCoreAccess() const override; + + private: + ModelVariant& SelectedVariant(); + const ModelVariant& SelectedVariant() const; + + gsl::not_null core_; + + std::vector variants_; + mutable const ModelVariant* selectedVariant_ = nullptr; + gsl::not_null logger_; + + friend class Catalog; +#ifdef FL_TESTS + friend struct Testing::MockObjectFactory; +#endif + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/openai/openai_audio_client.h b/sdk/cpp/include/openai/openai_audio_client.h new file mode 100644 index 00000000..ac1ce719 --- /dev/null +++ b/sdk/cpp/include/openai/openai_audio_client.h @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include + +#include + +namespace foundry_local::Internal { + struct IFoundryLocalCore; +} + +namespace foundry_local { + class ILogger; + class IModel; + + struct AudioCreateTranscriptionResponse { + std::string text; + }; + + class OpenAIAudioClient final { + public: + explicit OpenAIAudioClient(const IModel& model); + + /// Returns the model ID this client was created for. + const std::string& GetModelId() const noexcept { return modelId_; } + + AudioCreateTranscriptionResponse TranscribeAudio(const std::filesystem::path& audioFilePath) const; + + using StreamCallback = std::function; + void TranscribeAudioStreaming(const std::filesystem::path& audioFilePath, const StreamCallback& onChunk) const; + + private: + OpenAIAudioClient(gsl::not_null core, std::string_view modelId, + gsl::not_null logger); + + std::string modelId_; + gsl::not_null core_; + gsl::not_null logger_; + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/openai/openai_chat_client.h b/sdk/cpp/include/openai/openai_chat_client.h new file mode 100644 index 00000000..c16b9481 --- /dev/null +++ b/sdk/cpp/include/openai/openai_chat_client.h @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "openai_tool_types.h" + +namespace foundry_local::Internal { + struct IFoundryLocalCore; +} + +namespace foundry_local { + class ILogger; + class IModel; + + /// Reason the model stopped generating tokens. + enum class FinishReason { + None, + Stop, + Length, + ToolCalls, + ContentFilter + }; + + struct ChatMessage { + std::string role; + std::string content; + std::optional tool_call_id; ///< For role="tool" responses + std::vector tool_calls; + }; + + struct ChatChoice { + int index = 0; + FinishReason finish_reason = FinishReason::None; + + // non-streaming + std::optional message; + + // streaming + std::optional delta; + }; + + struct ChatCompletionCreateResponse { + int64_t created = 0; + std::string id; + + bool is_delta = false; + bool successful = false; + int http_status_code = 0; + + std::vector choices; + + /// Returns the object type string. Derived from is_delta no allocation. + const char* GetObject() const noexcept { return is_delta ? "chat.completion.chunk" : "chat.completion"; } + + /// Returns the created timestamp as an ISO 8601 string. + /// Computed lazily, only allocates when called. + std::string GetCreatedAtIso() const; + }; + + struct ChatSettings { + std::optional frequency_penalty; + std::optional max_tokens; + std::optional n; + std::optional temperature; + std::optional presence_penalty; + std::optional random_seed; + std::optional top_k; + std::optional top_p; + std::optional tool_choice; + }; + + class OpenAIChatClient final { + public: + explicit OpenAIChatClient(const IModel& model); + + /// Returns the model ID this client was created for. + const std::string& GetModelId() const noexcept { return modelId_; } + + ChatCompletionCreateResponse CompleteChat(gsl::span messages, + const ChatSettings& settings) const; + + ChatCompletionCreateResponse CompleteChat(gsl::span messages, + gsl::span tools, + const ChatSettings& settings) const; + + using StreamCallback = std::function; + void CompleteChatStreaming(gsl::span messages, const ChatSettings& settings, + const StreamCallback& onChunk) const; + + void CompleteChatStreaming(gsl::span messages, gsl::span tools, + const ChatSettings& settings, const StreamCallback& onChunk) const; + + private: + OpenAIChatClient(gsl::not_null core, std::string_view modelId, + gsl::not_null logger); + + std::string BuildChatRequestJson(gsl::span messages, gsl::span tools, + const ChatSettings& settings, bool stream) const; + + std::string modelId_; + gsl::not_null core_; + gsl::not_null logger_; + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/openai/openai_tool_types.h b/sdk/cpp/include/openai/openai_tool_types.h new file mode 100644 index 00000000..105bc49e --- /dev/null +++ b/sdk/cpp/include/openai/openai_tool_types.h @@ -0,0 +1,54 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include + +namespace foundry_local { + + /// JSON Schema property definition used to describe tool function parameters. + struct PropertyDefinition { + std::string type; + std::optional description; + std::optional> properties; + std::optional> required; + }; + + /// Describes a function that a model may call. + struct FunctionDefinition { + std::string name; + std::optional description; + std::optional parameters; + }; + + /// A tool definition following the OpenAI tool calling spec. + struct ToolDefinition { + std::string type = "function"; + FunctionDefinition function; + }; + + /// A parsed function call returned by the model. + struct FunctionCall { + std::string name; + std::string arguments; ///< JSON string of the arguments + }; + + /// A tool call returned by the model in a chat completion response. + struct ToolCall { + std::string id; + std::string type; + std::optional function_call; + }; + + /// Controls whether and how the model calls tools. + enum class ToolChoiceKind { + Auto, + None, + Required + }; + +} // namespace foundry_local diff --git a/sdk/cpp/sample/main.cpp b/sdk/cpp/sample/main.cpp new file mode 100644 index 00000000..8ccc39d8 --- /dev/null +++ b/sdk/cpp/sample/main.cpp @@ -0,0 +1,357 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include "foundry_local.h" + +#include +#include +#include + +using namespace foundry_local; + +// --------------------------------------------------------------------------- +// Logger +// --------------------------------------------------------------------------- +class StdLogger final : public ILogger { +public: + void Log(LogLevel level, std::string_view message) noexcept override { + const char* tag = "UNK"; + switch (level) { + case LogLevel::Information: + tag = "INFO"; + break; + case LogLevel::Warning: + tag = "WARN"; + break; + case LogLevel::Error: + tag = "ERROR"; + break; + default: + tag = "DEBUG"; + break; + } + std::cout << "[FoundryLocal][" << tag << "] " << message << "\n"; + } +}; + +// --------------------------------------------------------------------------- +// Example 1 – Browse the catalog +// --------------------------------------------------------------------------- +void BrowseCatalog(Manager& manager) { + std::cout << "\n=== Example 1: Browse Catalog ===\n"; + + auto& catalog = manager.GetCatalog(); + std::cout << "Catalog: " << catalog.GetName() << "\n"; + + auto models = catalog.ListModels(); + std::cout << "Models in catalog: " << models.size() << "\n"; + + for (const auto* model : models) { + std::cout << " - " << model->GetAlias() << " (" << model->GetId() << ")" + << " cached=" << (model->IsCached() ? "yes" : "no") + << " loaded=" << (model->IsLoaded() ? "yes" : "no") << "\n"; + + auto* concreteModel = dynamic_cast(model); + if (!concreteModel) continue; + for (const auto& variant : concreteModel->GetAllModelVariants()) { + const auto& info = variant.GetInfo(); + std::cout << " variant: " << info.name << " v" << info.version + << " cached=" << (variant.IsCached() ? "yes" : "no"); + if (info.display_name) + std::cout << " display=\"" << *info.display_name << "\""; + if (info.publisher) + std::cout << " publisher=" << *info.publisher; + if (info.license) + std::cout << " license=" << *info.license; + if (info.runtime) { + std::cout << " device=" + << (info.runtime->device_type == DeviceType::GPU ? "GPU" + : info.runtime->device_type == DeviceType::NPU ? "NPU" + : "CPU") + << " ep=" << info.runtime->execution_provider; + } + if (info.file_size_mb) + std::cout << " size=" << *info.file_size_mb << "MB"; + if (info.supports_tool_calling) + std::cout << " tools=" << (*info.supports_tool_calling ? "yes" : "no"); + std::cout << "\n"; + } + } +} + +// --------------------------------------------------------------------------- +// Example 2 – Download, load, chat (non-streaming), then unload +// --------------------------------------------------------------------------- +void ChatNonStreaming(Manager& manager, const std::string& alias) { + std::cout << "\n=== Example 2: Non-Streaming Chat ===\n"; + + auto& catalog = manager.GetCatalog(); + + auto* model = catalog.GetModel(alias); + if (!model) { + std::cerr << "Model '" << alias << "' not found in catalog.\n"; + return; + } + + model->Download([](float pct) { std::cout << "\rDownloading: " << pct << "% " << std::flush; }); + std::cout << "\n"; + + model->Load(); + + if (model->IsLoaded()) { + std::cout << "Model is loaded and ready for inference.\n"; + } + else { + std::cerr << "Failed to load model.\n"; + return; + } + + OpenAIChatClient chat(*model); + + std::vector messages = {{"user", "What is the capital of Croatia?"}}; + + ChatSettings settings; + settings.temperature = 0.7f; + settings.max_tokens = 128; + + auto response = chat.CompleteChat(messages, settings); + + if (!response.choices.empty() && response.choices[0].message) { + std::cout << "Assistant: " << response.choices[0].message->content << "\n"; + } + + model->Unload(); + std::cout << "Model unloaded.\n"; +} + +// --------------------------------------------------------------------------- +// Example 3 – Streaming chat +// --------------------------------------------------------------------------- +void ChatStreaming(Manager& manager, const std::string& alias) { + std::cout << "\n=== Example 3: Streaming Chat ===\n"; + + auto& catalog = manager.GetCatalog(); + + auto* model = catalog.GetModel(alias); + if (!model) { + std::cerr << "Model '" << alias << "' not found in catalog.\n"; + return; + } + + model->Load(); + + OpenAIChatClient chat(*model); + + std::vector messages = {{"user", "Explain quantum computing in three sentences."}}; + + ChatSettings settings; + settings.temperature = 0.9f; + settings.max_tokens = 256; + + std::cout << "Assistant: "; + chat.CompleteChatStreaming(messages, settings, [](const ChatCompletionCreateResponse& chunk) { + if (chunk.choices.empty()) + return; + const auto& choice = chunk.choices[0]; + if (choice.delta && !choice.delta->content.empty()) { + std::cout << choice.delta->content << std::flush; + } + }); + std::cout << "\n"; + + model->Unload(); +} + +// --------------------------------------------------------------------------- +// Example 4 – Audio transcription +// --------------------------------------------------------------------------- +void TranscribeAudio(Manager& manager, const std::string& alias, const std::string& audioPath) { + std::cout << "\n=== Example 4: Audio Transcription ===\n"; + + auto& catalog = manager.GetCatalog(); + + auto* model = catalog.GetModel(alias); + if (!model) { + std::cerr << "Model '" << alias << "' not found in catalog.\n"; + return; + } + + model->Download([](float pct) { std::cout << "\rDownloading: " << pct << "% " << std::flush; }); + std::cout << "\n"; + + model->Load(); + + OpenAIAudioClient audio(*model); + + std::cout << "Transcribing: " << audioPath << "\n"; + auto result = audio.TranscribeAudio(audioPath); + std::cout << "Transcription: " << result.text << "\n"; + + // Streaming alternative: + audio.TranscribeAudioStreaming( + audioPath, [](const AudioCreateTranscriptionResponse& chunk) { std::cout << chunk.text << std::flush; }); + std::cout << "\n"; + + model->Unload(); +} + +// --------------------------------------------------------------------------- +// Example 5 – Tool calling +// --------------------------------------------------------------------------- +// Tool calling lets you define functions that the model can decide to invoke. +// The flow is: +// 1. You describe your tools (functions) as ToolDefinition objects. +// 2. You send a chat request with those tools attached. +// 3. The model may respond with finish_reason = ToolCalls and include +// ToolCall objects in the message, each containing the function name +// and a JSON string of arguments. +// 4. YOUR CODE executes the real function using those arguments. +// 5. You add a message with role = "tool" containing the result, then +// send the conversation back so the model can formulate a final answer. +// +// This lets the model "reach out" to external capabilities (calculators, +// databases, APIs, etc.) while keeping the actual execution in your code. +// --------------------------------------------------------------------------- +void ChatWithToolCalling(Manager& manager, const std::string& alias) { + std::cout << "\n=== Example 5: Tool Calling ===\n"; + + auto& catalog = manager.GetCatalog(); + + auto* model = catalog.GetModel(alias); + if (!model) { + std::cerr << "Model '" << alias << "' not found in catalog.\n"; + return; + } + + model->Download([](float pct) { std::cout << "\rDownloading: " << pct << "% " << std::flush; }); + std::cout << "\n"; + + model->Load(); + std::cout << "Model loaded: " << model->GetAlias() << "\n"; + + OpenAIChatClient chat(*model); + + // ── Step 1: Define tools ────────────────────────────────────────────── + // Each tool describes a function the model can call. The PropertyDefinition + // mirrors a JSON Schema so the model knows what arguments are expected. + std::vector tools = { + {"function", + FunctionDefinition{"multiply_numbers", // function name + "Multiply two integers and return the result.", // description + PropertyDefinition{ + "object", // top-level schema type + std::nullopt, // no top-level description + std::unordered_map{ + {"first", PropertyDefinition{"integer", "The first number"}}, + {"second", PropertyDefinition{"integer", "The second number"}}}, + std::vector{"first", "second"} // both params are required + }}}}; + + // ── Step 2: Send the first request ──────────────────────────────────── + // tool_choice = Required forces the model to always produce a tool call. + // In production you'd typically use Auto so the model decides on its own. + std::vector messages = { + {"system", "You are a helpful AI assistant. Use the provided tools when appropriate."}, + {"user", "What is 7 multiplied by 6?"}}; + + ChatSettings settings; + settings.temperature = 0.0f; + settings.max_tokens = 500; + settings.tool_choice = ToolChoiceKind::Required; + + std::cout << "Sending chat request with tool definitions...\n"; + auto response = chat.CompleteChat(messages, tools, settings); + + // ── Step 3: Inspect the model's tool call ───────────────────────────── + if (response.choices.empty()) { + std::cerr << "No choices returned.\n"; + model->Unload(); + return; + } + + const auto& firstChoice = response.choices[0]; + + // The model signals it wants to call a tool via finish_reason == ToolCalls. + if (firstChoice.finish_reason == FinishReason::ToolCalls && firstChoice.message && + !firstChoice.message->tool_calls.empty()) { + const auto& tc = firstChoice.message->tool_calls[0]; + std::cout << "Model requested tool call:\n" + << " function : " << (tc.function_call ? tc.function_call->name : "(none)") << "\n" + << " arguments: " << (tc.function_call ? tc.function_call->arguments : "{}") << "\n"; + + // ── Step 4: Execute the tool locally ────────────────────────────── + // Parse the arguments JSON and perform the actual computation. + // In a real application this could be a web request, DB query, etc. + std::string toolResult; + if (tc.function_call && tc.function_call->name == "multiply_numbers") { + // The arguments string is JSON, e.g. {"first": 7, "second": 6} + // For brevity we hard-code the expected result here. + toolResult = "7 x 6 = 42."; + std::cout << " result : " << toolResult << "\n"; + } + else { + toolResult = "Unknown tool."; + } + + // ── Step 5: Feed the tool result back ───────────────────────────── + // First, append the assistant message that contains the tool_calls + // so the model sees its own request in the conversation history. + messages.push_back({"assistant", "", std::nullopt, firstChoice.message->tool_calls}); + + // Then add a "tool" message with the result, referencing the + // tool_call_id so the model can match it to the call it made. + messages.push_back({"tool", toolResult, tc.id}); + + // Switch to Auto so the model can answer without calling tools again. + settings.tool_choice = ToolChoiceKind::Auto; + + std::cout << "\nSending tool result back to model...\n"; + auto followUp = chat.CompleteChat(messages, tools, settings); + + if (!followUp.choices.empty() && followUp.choices[0].message) { + std::cout << "Assistant: " << followUp.choices[0].message->content << "\n"; + } + } + else { + // The model answered directly without a tool call. + if (firstChoice.message) + std::cout << "Assistant: " << firstChoice.message->content << "\n"; + } + + model->Unload(); + std::cout << "Model unloaded.\n"; +} + +// --------------------------------------------------------------------------- +// main +// --------------------------------------------------------------------------- +int main() { + try { + StdLogger logger; + Manager::Create({"SampleApp"}, &logger); + auto& manager = Manager::Instance(); + + // 1. Browse the full catalog + BrowseCatalog(manager); + + // 2. Non-streaming chat (change alias to a model in your catalog) + ChatNonStreaming(manager, "phi-3.5-mini"); + + // 3. Streaming chat + ChatStreaming(manager, "phi-3.5-mini"); + + // 4. Audio transcription (uncomment and set a valid alias + wav path) + // TranscribeAudio(manager, "whisper-small", R"(C:\path\to\your\audio.wav)"); + + // 5. Tool calling (define tools, let the model call them, feed results back) + ChatWithToolCalling(manager, "phi-3.5-mini"); + + Manager::Destroy(); + return 0; + } + catch (const std::exception& ex) { + std::cerr << "Fatal: " << ex.what() << std::endl; + Manager::Destroy(); + return 1; + } +} diff --git a/sdk/cpp/src/catalog.cpp b/sdk/cpp/src/catalog.cpp new file mode 100644 index 00000000..82aae3be --- /dev/null +++ b/sdk/cpp/src/catalog.cpp @@ -0,0 +1,173 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include +#include +#include + +#include +#include + +#include "foundry_local.h" +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "core_helpers.h" +#include "parser.h" +#include "logger.h" + +namespace foundry_local { + + using namespace detail; + + Catalog::Catalog(gsl::not_null injected, gsl::not_null logger) + : state_(std::make_shared()), core_(injected), logger_(logger) { + auto response = core_->call("get_catalog_name", *logger_, /*dataArgument*/ nullptr); + if (response.HasError()) { + throw Exception(std::string("Error getting catalog name: ") + response.error, *logger_); + } + name_ = std::move(response.data); + } + + std::shared_ptr Catalog::GetState() const { + std::lock_guard lock(mutex_); + return state_; + } + + std::vector Catalog::GetLoadedModels() const { + UpdateModels(); + auto state = GetState(); + return CollectVariantsByIds(state->modelIdToModelVariant, GetLoadedModelsInternal(core_, *logger_)); + } + + std::vector Catalog::GetCachedModels() const { + UpdateModels(); + auto state = GetState(); + return CollectVariantsByIds(state->modelIdToModelVariant, GetCachedModelsInternal(core_, *logger_)); + } + + IModel* Catalog::GetModel(std::string_view modelId) const { + UpdateModels(); + auto state = GetState(); + auto it = state->byAlias.find(std::string(modelId)); + if (it != state->byAlias.end()) { + return const_cast(&it->second); + } + return nullptr; + } + + std::vector Catalog::ListModels() const { + UpdateModels(); + auto state = GetState(); + + std::vector out; + out.reserve(state->byAlias.size()); + for (auto& kv : state->byAlias) + out.emplace_back(const_cast(&kv.second)); + + return out; + } + + void Catalog::UpdateModels() const { + using clock = std::chrono::steady_clock; + + // TODO: make this configurable + constexpr auto kRefreshInterval = std::chrono::hours(6); + + const auto now = clock::now(); + { + auto current = GetState(); + if (current->lastFetch.time_since_epoch() != clock::duration::zero() && + (now - current->lastFetch) < kRefreshInterval) { + return; + } + } + + // Fetch outside the lock so the core call doesn't block readers. + const auto response = core_->call("get_model_list", *logger_); + if (response.HasError()) { + throw Exception(std::string("Error getting model list: ") + response.error, *logger_); + } + const auto arr = nlohmann::json::parse(response.data); + + // Build the new state locally no reader can see partial data. + auto newState = std::make_shared(); + + for (const auto& j : arr) { + const std::string alias = j.at("alias").get(); + + auto it = newState->byAlias.find(alias); + if (it == newState->byAlias.end()) { + Model m(core_, logger_); + it = newState->byAlias.emplace(alias, std::move(m)).first; + } + + ModelInfo modelVariantInfo; + from_json(j, modelVariantInfo); + ModelVariant modelVariant(core_, modelVariantInfo, logger_); + it->second.variants_.emplace_back(std::move(modelVariant)); + } + + // Build the lookup map from pointers into the owning Model::variants_ vectors, + // and auto-select the first variant for each model. + for (auto& [alias, model] : newState->byAlias) { + for (auto& variant : model.variants_) { + newState->modelIdToModelVariant.emplace(variant.GetId(), &variant); + } + if (!model.variants_.empty()) { + model.selectedVariant_ = &model.variants_.front(); + } + } + + newState->lastFetch = now; + + // Atomic swap readers that already hold the old shared_ptr keep it alive. + { + std::lock_guard lock(mutex_); + state_ = std::move(newState); + } + } + + IModel* Catalog::GetModelVariant(std::string_view id) const { + UpdateModels(); + auto state = GetState(); + auto it = state->modelIdToModelVariant.find(std::string(id)); + if (it != state->modelIdToModelVariant.end()) { + return it->second; + } + return nullptr; + } + + IModel& Catalog::GetLatestVersion(const IModel& modelOrModelVariant) const { + const auto& alias = modelOrModelVariant.GetAlias(); + auto* imodel = GetModel(alias); + if (!imodel) { + throw Exception("Model " + alias + " not found in catalog.", *logger_); + } + + auto* model = dynamic_cast(imodel); + if (!model) { + throw Exception("Model " + alias + " is not a Model instance.", *logger_); + } + + // Resolve the variant name from the IModel's ID by looking it up in the catalog. + const auto& id = modelOrModelVariant.GetId(); + auto state = GetState(); + auto it = state->modelIdToModelVariant.find(id); + if (it == state->modelIdToModelVariant.end()) { + throw Exception("Model " + alias + " does not have a " + id + " variant.", *logger_); + } + + const auto& targetName = it->second->GetInfo().name; + for (auto& v : model->GetAllModelVariants()) { + // The variants returned by the catalog are sorted by version, so the first match should always be the + // latest version. + if (v.GetInfo().name == targetName) { + return const_cast(v); + } + } + + throw Exception("Model " + alias + " does not have a " + id + " variant.", *logger_); + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/core.h b/sdk/cpp/src/core.h new file mode 100644 index 00000000..10feee5b --- /dev/null +++ b/sdk/cpp/src/core.h @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// Core DLL interop loads Microsoft.AI.Foundry.Local.Core.dll at runtime. +// Internal header, not part of the public API. + +#pragma once + +#include +#include +#include +#include +#include + +#include + +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "flcore_native.h" +#include "logger.h" + +namespace foundry_local { + + namespace { + inline std::filesystem::path GetExecutableDir() { + auto exePath = wil::GetModuleFileNameW(nullptr); + return std::filesystem::path(exePath.get()).parent_path(); + } + + inline void* RequireProc(HMODULE mod, const char* name) { + if (void* p = ::GetProcAddress(mod, name)) + return p; + throw std::runtime_error(std::string("GetProcAddress failed for ") + name); + } + } // namespace + + struct Core : Internal::IFoundryLocalCore { + using ResponseHandle = std::unique_ptr; + + Core() = default; + ~Core() = default; + + void LoadEmbedded() { LoadFromPath(GetExecutableDir() / "Microsoft.AI.Foundry.Local.Core.dll"); } + + void unload() override { + module_.reset(); + execCmd_ = nullptr; + execCbCmd_ = nullptr; + freeResCmd_ = nullptr; + } + + CoreResponse call(std::string_view command, ILogger& logger, const std::string* dataArgument = nullptr, + NativeCallbackFn callback = nullptr, void* data = nullptr) const override { + if (!module_ || !execCmd_ || !execCbCmd_ || !freeResCmd_) { + throw Exception("Core is not loaded. Cannot call command: " + std::string(command), logger); + } + + RequestBuffer request{}; + request.Command = command.empty() ? nullptr : command.data(); + request.CommandLength = static_cast(command.size()); + + if (dataArgument && !dataArgument->empty()) { + request.Data = dataArgument->data(); + request.DataLength = static_cast(dataArgument->size()); + } + + ResponseBuffer response{}; + auto safeDeleter = [fn = freeResCmd_](ResponseBuffer* buf) { + if (fn) + fn(buf); + }; + std::unique_ptr responseGuard(&response, safeDeleter); + + if (callback != nullptr) { + execCbCmd_(&request, &response, reinterpret_cast(callback), data); + } + else { + execCmd_(&request, &response); + } + + CoreResponse result; + if (response.Error && response.ErrorLength > 0) { + result.error.assign(static_cast(response.Error), response.ErrorLength); + return result; + } + + if (response.Data && response.DataLength > 0) { + result.data.assign(static_cast(response.Data), response.DataLength); + } + + return result; + } + + private: + wil::unique_hmodule module_; + execute_command_fn execCmd_{}; + execute_command_with_callback_fn execCbCmd_{}; + free_response_fn freeResCmd_{}; + + void LoadFromPath(const std::filesystem::path& path) { + wil::unique_hmodule m(::LoadLibraryW(path.c_str())); + if (!m) + throw std::runtime_error("LoadLibraryW failed"); + + execCmd_ = reinterpret_cast(RequireProc(m.get(), "execute_command")); + execCbCmd_ = reinterpret_cast( + RequireProc(m.get(), "execute_command_with_callback")); + freeResCmd_ = reinterpret_cast(RequireProc(m.get(), "free_response")); + + module_ = std::move(m); + } + }; + +} // namespace foundry_local diff --git a/sdk/cpp/src/core_helpers.h b/sdk/cpp/src/core_helpers.h new file mode 100644 index 00000000..282d01e7 --- /dev/null +++ b/sdk/cpp/src/core_helpers.h @@ -0,0 +1,146 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// Internal helpers shared across implementation files. +// Not part of the public API. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include + +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "logger.h" +#include "model.h" + +namespace foundry_local::detail { + + // Wrap Params: { ... } into a request object + inline nlohmann::json MakeParams(nlohmann::json params) { + return nlohmann::json{{"Params", std::move(params)}}; + } + + // Most common: Params { "Model": } + inline nlohmann::json MakeModelParams(std::string_view model) { + return MakeParams(nlohmann::json{{"Model", std::string(model)}}); + } + + // Serialize + call + inline CoreResponse CallWithJson(Internal::IFoundryLocalCore* core, std::string_view command, + const nlohmann::json& requestJson, ILogger& logger) { + std::string payload = requestJson.dump(); + return core->call(command, logger, &payload); + } + + // Serialize + call with native callback + inline CoreResponse CallWithJsonAndCallback(Internal::IFoundryLocalCore* core, std::string_view command, + const nlohmann::json& requestJson, ILogger& logger, + NativeCallbackFn callback, void* userData) { + std::string payload = requestJson.dump(); + return core->call(command, logger, &payload, callback, userData); + } + + // Serialize + call with a streaming chunk handler. + // Wraps the caller-supplied onChunk with the native callback boilerplate + // (null/length checks, exception capture, rethrow after the call). + // The errorContext string is used to prefix any core-layer error message. + inline CoreResponse CallWithStreamingCallback(Internal::IFoundryLocalCore* core, std::string_view command, + const std::string& payload, ILogger& logger, + const std::function& onChunk, + std::string_view errorContext) { + struct State { + const std::function* cb; + std::exception_ptr exception; + } state{&onChunk, nullptr}; + + auto nativeCallback = [](void* data, int32_t len, void* user) { + if (!data || len <= 0) + return; + + auto* st = static_cast(user); + if (st->exception) + return; + + try { + std::string chunk(static_cast(data), static_cast(len)); + (*(st->cb))(chunk); + } + catch (...) { + st->exception = std::current_exception(); + } + }; + + auto response = core->call(command, logger, &payload, +nativeCallback, &state); + if (response.HasError()) { + throw Exception(std::string(errorContext) + response.error, logger); + } + + if (state.exception) { + std::rethrow_exception(state.exception); + } + + return response; + } + + // Overload: allow Params object directly + inline CoreResponse CallWithParams(Internal::IFoundryLocalCore* core, std::string_view command, + const nlohmann::json& params, ILogger& logger) { + return CallWithJson(core, command, MakeParams(params), logger); + } + + // Overload: no payload + inline CoreResponse CallNoArgs(Internal::IFoundryLocalCore* core, std::string_view command, ILogger& logger) { + return core->call(command, logger, nullptr); + } + + inline std::vector GetLoadedModelsInternal(Internal::IFoundryLocalCore* core, ILogger& logger) { + auto response = core->call("list_loaded_models", logger); + if (response.HasError()) { + throw Exception("Failed to get loaded models: " + response.error, logger); + } + try { + auto parsed = nlohmann::json::parse(response.data); + return parsed.get>(); + } + catch (const nlohmann::json::exception& e) { + throw Exception("Catalog::GetLoadedModelsInternal() JSON error: " + std::string(e.what()), logger); + } + } + + inline std::vector GetCachedModelsInternal(Internal::IFoundryLocalCore* core, ILogger& logger) { + auto response = core->call("get_cached_models", logger); + if (response.HasError()) { + throw Exception("Failed to get cached models: " + response.error, logger); + } + + try { + auto parsed = nlohmann::json::parse(response.data); + return parsed.get>(); + } + catch (const nlohmann::json::exception& e) { + throw Exception("Catalog::GetCachedModelsInternal JSON error: " + std::string(e.what()), logger); + } + } + + inline std::vector CollectVariantsByIds( + const std::unordered_map& modelIdToModelVariant, std::vector ids) { + std::vector out; + out.reserve(ids.size()); + + for (const auto& id : ids) { + auto it = modelIdToModelVariant.find(id); + if (it != modelIdToModelVariant.end()) { + out.emplace_back(it->second); + } + } + return out; + } + +} // namespace foundry_local::detail diff --git a/sdk/cpp/src/core_interop_request.h b/sdk/cpp/src/core_interop_request.h new file mode 100644 index 00000000..67ef1590 --- /dev/null +++ b/sdk/cpp/src/core_interop_request.h @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include +#include +#include + +namespace foundry_local { + + class CoreInteropRequest final { + public: + explicit CoreInteropRequest(std::string command) : command_(std::move(command)) {} + + CoreInteropRequest& AddParam(std::string_view key, std::string_view value) { + params_[std::string(key)] = std::string(value); + return *this; + } + + template CoreInteropRequest& AddParam(std::string_view key, const T& value) { + params_[std::string(key)] = value; + return *this; + } + + CoreInteropRequest& AddJsonParam(std::string_view key, const nlohmann::json& jsonValue) { + params_[std::string(key)] = jsonValue.dump(); + return *this; + } + + std::string ToJson() const { + nlohmann::json wrapper; + if (!params_.empty()) { + wrapper["Params"] = params_; + } + return wrapper.dump(); + } + + const std::string& Command() const noexcept { return command_; } + + private: + std::string command_; + nlohmann::json params_; + }; + +} // namespace foundry_local diff --git a/sdk/cpp/src/flcore_native.h b/sdk/cpp/src/flcore_native.h new file mode 100644 index 00000000..b0778116 --- /dev/null +++ b/sdk/cpp/src/flcore_native.h @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include + +extern "C" +{ + // Layout must match C# structs exactly +#pragma pack(push, 8) + struct RequestBuffer { + const void* Command; + int32_t CommandLength; + const void* Data; + int32_t DataLength; + }; + + struct ResponseBuffer { + void* Data; + int32_t DataLength; + void* Error; + int32_t ErrorLength; + }; + + // Callback signature: void(*)(void* data, int length, void* userData) + using UserCallbackFn = void(__cdecl*)(void*, int32_t, void*); + + // Exported function pointer types + using execute_command_fn = void(__cdecl*)(RequestBuffer*, ResponseBuffer*); + using execute_command_with_callback_fn = void(__cdecl*)(RequestBuffer*, ResponseBuffer*, void* /*callback*/, + void* /*userData*/); + using free_response_fn = void(__cdecl*)(ResponseBuffer*); + + static_assert(std::is_standard_layout::value, "RequestBuffer must be standard layout"); + static_assert(std::is_standard_layout::value, "ResponseBuffer must be standard layout"); + +#pragma pack(pop) +} diff --git a/sdk/cpp/src/foundry_local_internal_core.h b/sdk/cpp/src/foundry_local_internal_core.h new file mode 100644 index 00000000..1e5af79d --- /dev/null +++ b/sdk/cpp/src/foundry_local_internal_core.h @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include "logger.h" + +namespace foundry_local { + + /// Native callback signature used by the core DLL interop. + /// Parameters: (data, dataLength, userData). + using NativeCallbackFn = void (*)(void*, int32_t, void*); + + /// Value returned by IFoundryLocalCore::call(). + /// On success, `data` contains the response payload and `error` is empty. + /// On failure, `error` contains the error message from the core layer. + struct CoreResponse { + std::string data; + std::string error; + + bool HasError() const noexcept { return !error.empty(); } + }; + + namespace Internal { + struct IFoundryLocalCore { + virtual ~IFoundryLocalCore() = default; + + virtual CoreResponse call(std::string_view command, ILogger& logger, + const std::string* dataArgument = nullptr, NativeCallbackFn callback = nullptr, + void* data = nullptr) const = 0; + virtual void unload() = 0; + }; + + } // namespace Internal +} // namespace foundry_local \ No newline at end of file diff --git a/sdk/cpp/src/foundry_local_manager.cpp b/sdk/cpp/src/foundry_local_manager.cpp new file mode 100644 index 00000000..e24be049 --- /dev/null +++ b/sdk/cpp/src/foundry_local_manager.cpp @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include +#include +#include + +#include + +#include "foundry_local.h" +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "core_interop_request.h" +#include "core.h" +#include "logger.h" + +namespace foundry_local { + +std::unique_ptr Manager::instance_; + +void Manager::Create(Configuration configuration, ILogger* logger) { + if (instance_) { + NullLogger fallback; + ILogger& log = logger ? *logger : fallback; + throw Exception("Manager has already been created. Call Destroy() first.", log); + } + + // Use a local to ensure full initialization before assigning to the static instance. + std::unique_ptr manager( + new Manager(std::move(configuration), logger)); + instance_ = std::move(manager); +} + +Manager& Manager::Instance() { + if (!instance_) { + throw Exception("Manager has not been created. Call Create() first."); + } + return *instance_; +} + +bool Manager::IsInitialized() noexcept { + return instance_ != nullptr; +} + +void Manager::Destroy() noexcept { + instance_.reset(); +} + +Manager::Manager(Configuration configuration, ILogger* logger) + : config_(std::move(configuration)), core_(std::make_unique()), + logger_(logger ? logger : &defaultLogger_) { + static_cast(core_.get())->LoadEmbedded(); + Initialize(); + catalog_ = Catalog::Create(core_.get(), logger_); +} + +Manager::~Manager() { + Cleanup(); +} + +void Manager::Cleanup() noexcept { + // Unload all loaded models before tearing down. + if (catalog_) { + try { + auto loadedModels = catalog_->GetLoadedModels(); + for (auto* variant : loadedModels) { + try { + variant->Unload(); + } + catch (const std::exception& ex) { + logger_->Log(LogLevel::Warning, + std::string("Error unloading model during destruction: ") + ex.what()); + } + } + } + catch (const std::exception& ex) { + logger_->Log(LogLevel::Warning, + std::string("Error retrieving loaded models during destruction: ") + ex.what()); + } + } + + if (!urls_.empty()) { + try { + StopWebService(); + } + catch (const std::exception& ex) { + logger_->Log(LogLevel::Warning, + std::string("Error stopping web service during destruction: ") + ex.what()); + } + } + } + + const Catalog& Manager::GetCatalog() const { + return *catalog_; + } + + Catalog& Manager::GetCatalog() { + return *catalog_; + } + + void Manager::StartWebService() { + if (!config_.web) { + throw Exception("Web service configuration was not provided.", *logger_); + } + + auto response = core_->call("start_service", *logger_); + if (response.HasError()) { + throw Exception(std::string("Error starting web service: ") + response.error, *logger_); + } + auto arr = nlohmann::json::parse(response.data); + urls_ = arr.get>(); + } + + void Manager::StopWebService() { + if (!config_.web) { + throw Exception("Web service configuration was not provided.", *logger_); + } + + auto response = core_->call("stop_service", *logger_); + if (response.HasError()) { + throw Exception(std::string("Error stopping web service: ") + response.error, *logger_); + } + urls_.clear(); + } + + gsl::span Manager::GetUrls() const noexcept { + return urls_; + } + + void Manager::EnsureEpsDownloaded() const { + auto response = core_->call("ensure_eps_downloaded", *logger_); + if (response.HasError()) { + throw Exception(std::string("Error ensuring execution providers downloaded: ") + response.error, *logger_); + } + } + + void Manager::Initialize() { + config_.Validate(); + + CoreInteropRequest initReq("initialize"); + initReq.AddParam("AppName", config_.app_name); + initReq.AddParam("LogLevel", std::string(LogLevelToString(config_.log_level))); + + if (config_.app_data_dir) { + initReq.AddParam("AppDataDir", config_.app_data_dir->string()); + } + if (config_.model_cache_dir) { + initReq.AddParam("ModelCacheDir", config_.model_cache_dir->string()); + } + if (config_.logs_dir) { + initReq.AddParam("LogsDir", config_.logs_dir->string()); + } + if (config_.web && config_.web->urls) { + initReq.AddParam("WebServiceUrls", *config_.web->urls); + } + if (config_.additional_settings) { + for (const auto& [key, value] : *config_.additional_settings) { + if (!key.empty()) { + initReq.AddParam(key, value); + } + } + } + + std::string initJson = initReq.ToJson(); + auto initResponse = core_->call(initReq.Command(), *logger_, &initJson); + if (initResponse.HasError()) { + throw Exception(std::string("Manager::Initialize failed: ") + initResponse.error, *logger_); + } + + if (config_.model_cache_dir) { + auto cacheResponse = core_->call("get_cache_directory", *logger_); + if (cacheResponse.HasError()) { + throw Exception(std::string("Manager::Initialize failed: ") + cacheResponse.error, + *logger_); + } + + if (cacheResponse.data != config_.model_cache_dir->string()) { + CoreInteropRequest setReq("set_cache_directory"); + setReq.AddParam("Directory", config_.model_cache_dir->string()); + std::string setJson = setReq.ToJson(); + auto setResponse = core_->call(setReq.Command(), *logger_, &setJson); + if (setResponse.HasError()) { + throw Exception(std::string("Manager::Initialize failed: ") + setResponse.error, + *logger_); + } + } + } + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/model.cpp b/sdk/cpp/src/model.cpp new file mode 100644 index 00000000..d4240ecb --- /dev/null +++ b/sdk/cpp/src/model.cpp @@ -0,0 +1,191 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "foundry_local.h" +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "core_helpers.h" +#include "logger.h" + +namespace foundry_local { + + using namespace detail; + + /// ModelVariant + + ModelVariant::ModelVariant(gsl::not_null core, ModelInfo info, + gsl::not_null logger) + : core_(core), info_(std::move(info)), logger_(logger) {} + + const ModelInfo& ModelVariant::GetInfo() const { + return info_; + } + + void ModelVariant::RemoveFromCache() { + auto response = CallWithJson(core_, "remove_cached_model", MakeModelParams(info_.name), *logger_); + if (response.HasError()) { + throw Exception("Error removing model from cache [" + info_.name + "]: " + response.error, *logger_); + } + cachedPath_.clear(); + } + + void ModelVariant::Unload() { + auto response = CallWithJson(core_, "unload_model", MakeModelParams(info_.name), *logger_); + if (response.HasError()) { + throw Exception("Error unloading model [" + info_.name + "]: " + response.error, *logger_); + } + } + + bool ModelVariant::IsLoaded() const { + std::vector loadedModelIds = GetLoadedModelsInternal(core_, *logger_); + for (const auto& id : loadedModelIds) { + if (id == info_.id) { + return true; + } + } + + return false; + } + + bool ModelVariant::IsCached() const { + auto cachedModels = GetCachedModelsInternal(core_, *logger_); + for (const auto& id : cachedModels) { + if (id == info_.id) { + return true; + } + } + return false; + } + + void ModelVariant::Download(DownloadProgressCallback onProgress) { + if (IsCached()) { + logger_->Log(LogLevel::Information, "Model '" + info_.name + "' is already cached, skipping download."); + return; + } + + if (onProgress) { + struct ProgressState { + DownloadProgressCallback* cb; + ILogger* logger; + } state{&onProgress, logger_}; + + auto nativeCallback = [](void* data, int32_t len, void* user) { + if (!data || len <= 0) + return; + auto* st = static_cast(user); + std::string perc(static_cast(data), static_cast(len)); + try { + float value = std::stof(perc); + (*(st->cb))(value); + } + catch (...) { + st->logger->Log(LogLevel::Warning, "Failed to parse download progress: " + perc); + } + }; + + auto response = CallWithJsonAndCallback(core_, "download_model", MakeModelParams(info_.name), *logger_, + +nativeCallback, &state); + if (response.HasError()) { + throw Exception("Error downloading model [" + info_.name + "]: " + response.error, *logger_); + } + } + else { + auto response = CallWithJson(core_, "download_model", MakeModelParams(info_.name), *logger_); + if (response.HasError()) { + throw Exception("Error downloading model [" + info_.name + "]: " + response.error, *logger_); + } + } + } + + void ModelVariant::Load() { + auto response = CallWithJson(core_, "load_model", MakeModelParams(info_.name), *logger_); + if (response.HasError()) { + throw Exception("Error loading model [" + info_.name + "]: " + response.error, *logger_); + } + } + + const std::filesystem::path& ModelVariant::GetPath() const { + if (cachedPath_.empty()) { + auto response = CallWithJson(core_, "get_model_path", MakeModelParams(info_.name), *logger_); + if (response.HasError()) { + throw Exception("Error getting model path [" + info_.name + "]: " + response.error, *logger_); + } + cachedPath_ = std::filesystem::path(response.data); + } + return cachedPath_; + } + + const std::string& ModelVariant::GetId() const noexcept { + return info_.id; + } + + const std::string& ModelVariant::GetAlias() const noexcept { + return info_.alias; + } + + uint32_t ModelVariant::GetVersion() const noexcept { + return info_.version; + } + + IModel::CoreAccess ModelVariant::GetCoreAccess() const { + return {core_, info_.name, logger_}; + } + + /// Model + + Model::Model(gsl::not_null core, gsl::not_null logger) + : core_(core), logger_(logger) {} + + ModelVariant& Model::SelectedVariant() { + if (!selectedVariant_) { + throw Exception("Model has no selected variant", *logger_); + } + return *const_cast(selectedVariant_); + } + + const ModelVariant& Model::SelectedVariant() const { + if (!selectedVariant_) { + throw Exception("Model has no selected variant", *logger_); + } + return *selectedVariant_; + } + + gsl::span Model::GetAllModelVariants() const { + return variants_; + } + + const std::string& Model::GetId() const { + return SelectedVariant().GetId(); + } + + const std::string& Model::GetAlias() const { + return SelectedVariant().GetAlias(); + } + + void Model::SelectVariant(const ModelVariant& variant) const { + const auto& targetId = variant.GetId(); + auto it = std::find_if(variants_.begin(), variants_.end(), + [&](const ModelVariant& v) { return v.GetId() == targetId; }); + + if (it == variants_.end()) { + throw Exception("Model " + GetAlias() + " does not have a " + variant.GetId() + " variant.", *logger_); + } + + selectedVariant_ = &(*it); + } + + IModel::CoreAccess Model::GetCoreAccess() const { + return SelectedVariant().GetCoreAccess(); + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/openai_audio_client.cpp b/sdk/cpp/src/openai_audio_client.cpp new file mode 100644 index 00000000..d4409d1f --- /dev/null +++ b/sdk/cpp/src/openai_audio_client.cpp @@ -0,0 +1,70 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include +#include +#include + +#include +#include + +#include "foundry_local.h" +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "core_interop_request.h" +#include "core_helpers.h" +#include "logger.h" + +namespace foundry_local { + + OpenAIAudioClient::OpenAIAudioClient(gsl::not_null core, std::string_view modelId, + gsl::not_null logger) + : core_(core), modelId_(modelId), logger_(logger) {} + + AudioCreateTranscriptionResponse OpenAIAudioClient::TranscribeAudio( + const std::filesystem::path& audioFilePath) const { + nlohmann::json openAiReq = {{"Model", modelId_}, {"FileName", audioFilePath.string()}}; + CoreInteropRequest req("audio_transcribe"); + req.AddParam("OpenAICreateRequest", openAiReq.dump()); + + std::string json = req.ToJson(); + + auto coreResponse = core_->call(req.Command(), *logger_, &json); + if (coreResponse.HasError()) { + throw Exception("Audio transcription failed: " + coreResponse.error, *logger_); + } + + AudioCreateTranscriptionResponse response; + response.text = std::move(coreResponse.data); + + return response; + } + + void OpenAIAudioClient::TranscribeAudioStreaming(const std::filesystem::path& audioFilePath, + const StreamCallback& onChunk) const { + nlohmann::json openAiReq = {{"Model", modelId_}, {"FileName", audioFilePath.string()}}; + CoreInteropRequest req("audio_transcribe"); + req.AddParam("OpenAICreateRequest", openAiReq.dump()); + + std::string json = req.ToJson(); + + detail::CallWithStreamingCallback( + core_, req.Command(), json, *logger_, + [&onChunk](const std::string& text) { + AudioCreateTranscriptionResponse chunk; + chunk.text = text; + onChunk(chunk); + }, + "Streaming audio transcription failed: "); + } + + OpenAIAudioClient::OpenAIAudioClient(const IModel& model) + : OpenAIAudioClient(model.GetCoreAccess().core, model.GetCoreAccess().modelName, model.GetCoreAccess().logger) { + if (!model.IsLoaded()) { + throw Exception("Model " + model.GetCoreAccess().modelName + " is not loaded. Call Load() first.", + *model.GetCoreAccess().logger); + } + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/openai_chat_client.cpp b/sdk/cpp/src/openai_chat_client.cpp new file mode 100644 index 00000000..5c19a0ba --- /dev/null +++ b/sdk/cpp/src/openai_chat_client.cpp @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include +#include +#include + +#include +#include + +#include "foundry_local.h" +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "core_interop_request.h" +#include "core_helpers.h" +#include "parser.h" +#include "logger.h" + +namespace foundry_local { + + std::string ChatCompletionCreateResponse::GetCreatedAtIso() const { + if (created == 0) + return {}; + std::time_t t = static_cast(created); + std::tm tm{}; +#ifdef _WIN32 + gmtime_s(&tm, &t); +#else + gmtime_r(&t, &tm); +#endif + char buf[32]; + std::strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%SZ", &tm); + return buf; + } + + OpenAIChatClient::OpenAIChatClient(gsl::not_null core, std::string_view modelId, + gsl::not_null logger) + : core_(core), modelId_(modelId), logger_(logger) {} + + std::string OpenAIChatClient::BuildChatRequestJson(gsl::span messages, + gsl::span tools, + const ChatSettings& settings, bool stream) const { + nlohmann::json jMessages = nlohmann::json::array(); + for (const auto& msg : messages) { + nlohmann::json jMsg = {{"role", msg.role}, {"content", msg.content}}; + if (msg.tool_call_id) + jMsg["tool_call_id"] = *msg.tool_call_id; + if (!msg.tool_calls.empty()) { + nlohmann::json jToolCalls = nlohmann::json::array(); + for (const auto& tc : msg.tool_calls) { + nlohmann::json jtc; + to_json(jtc, tc); + jToolCalls.push_back(std::move(jtc)); + } + jMsg["tool_calls"] = std::move(jToolCalls); + } + jMessages.push_back(std::move(jMsg)); + } + + nlohmann::json req = {{"model", modelId_}, {"messages", std::move(jMessages)}, {"stream", stream}}; + + if (!tools.empty()) { + nlohmann::json jTools = nlohmann::json::array(); + for (const auto& tool : tools) { + nlohmann::json jTool; + to_json(jTool, tool); + jTools.push_back(std::move(jTool)); + } + req["tools"] = std::move(jTools); + } + + if (settings.tool_choice) + req["tool_choice"] = ParsingUtils::tool_choice_to_string(*settings.tool_choice); + if (settings.top_k) + req["metadata"] = {{"top_k", *settings.top_k}}; + if (settings.frequency_penalty) + req["frequency_penalty"] = *settings.frequency_penalty; + if (settings.presence_penalty) + req["presence_penalty"] = *settings.presence_penalty; + if (settings.max_tokens) + req["max_completion_tokens"] = *settings.max_tokens; + if (settings.n) + req["n"] = *settings.n; + if (settings.temperature) + req["temperature"] = *settings.temperature; + if (settings.top_p) + req["top_p"] = *settings.top_p; + if (settings.random_seed) + req["seed"] = *settings.random_seed; + + return req.dump(); + } + + ChatCompletionCreateResponse OpenAIChatClient::CompleteChat(gsl::span messages, + const ChatSettings& settings) const { + return CompleteChat(messages, {}, settings); + } + + ChatCompletionCreateResponse OpenAIChatClient::CompleteChat(gsl::span messages, + gsl::span tools, + const ChatSettings& settings) const { + std::string openAiReqJson = BuildChatRequestJson(messages, tools, settings, /*stream=*/false); + + CoreInteropRequest req("chat_completions"); + req.AddParam("OpenAICreateRequest", openAiReqJson); + + std::string json = req.ToJson(); + auto response = core_->call(req.Command(), *logger_, &json); + if (response.HasError()) { + throw Exception("Chat completion failed: " + response.error, *logger_); + } + + return nlohmann::json::parse(response.data).get(); + } + + void OpenAIChatClient::CompleteChatStreaming(gsl::span messages, const ChatSettings& settings, + const StreamCallback& onChunk) const { + CompleteChatStreaming(messages, {}, settings, onChunk); + } + + void OpenAIChatClient::CompleteChatStreaming(gsl::span messages, + gsl::span tools, const ChatSettings& settings, + const StreamCallback& onChunk) const { + std::string openAiReqJson = BuildChatRequestJson(messages, tools, settings, /*stream=*/true); + + CoreInteropRequest req("chat_completions"); + req.AddParam("OpenAICreateRequest", openAiReqJson); + std::string json = req.ToJson(); + + detail::CallWithStreamingCallback( + core_, req.Command(), json, *logger_, + [&onChunk](const std::string& chunk) { + auto parsed = nlohmann::json::parse(chunk).get(); + onChunk(parsed); + }, + "Streaming chat completion failed: "); + } + + OpenAIChatClient::OpenAIChatClient(const IModel& model) + : OpenAIChatClient(model.GetCoreAccess().core, model.GetCoreAccess().modelName, model.GetCoreAccess().logger) { + if (!model.IsLoaded()) { + throw Exception("Model " + model.GetCoreAccess().modelName + " is not loaded. Call Load() first.", + *model.GetCoreAccess().logger); + } + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/parser.h b/sdk/cpp/src/parser.h new file mode 100644 index 00000000..3596579c --- /dev/null +++ b/sdk/cpp/src/parser.h @@ -0,0 +1,312 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once +#include +#include +#include "foundry_local.h" +#include + +namespace foundry_local { + + class ParsingUtils { + public: + static DeviceType parse_device_type(std::string_view v) { + if (v == "CPU") { + return DeviceType::CPU; + } + if (v == "NPU") { + return DeviceType::NPU; + } + if (v == "GPU") { + return DeviceType::GPU; + } + return DeviceType::Invalid; + } + + static FinishReason parse_finish_reason(std::string_view v) { + if (v == "stop") + return FinishReason::Stop; + if (v == "length") + return FinishReason::Length; + if (v == "tool_calls") + return FinishReason::ToolCalls; + if (v == "content_filter") + return FinishReason::ContentFilter; + return FinishReason::None; + } + + static std::string get_string_or_empty(const nlohmann::json& j, const char* key) { + auto it = j.find(key); + std::string out = ""; + if (it != j.end() && it->is_string()) { + out = it->get(); + } + return out; + } + + static std::optional get_opt_string(const nlohmann::json& j, const char* key) { + auto it = j.find(key); + if (it == j.end() || it->is_null()) { + return std::nullopt; + } + if (it->is_string()) { + return it->get(); + } + return std::nullopt; + } + + static std::optional get_opt_int(const nlohmann::json& j, const char* key) { + auto it = j.find(key); + if (it == j.end() || it->is_null()) { + return std::nullopt; + } + if (it->is_number_integer()) { + return it->get(); + } + return std::nullopt; + } + + static std::optional get_opt_i64(const nlohmann::json& j, const char* key) { + auto it = j.find(key); + if (it == j.end() || it->is_null()) { + return std::nullopt; + } + if (it->is_number_integer()) { + return it->get(); + } + return std::nullopt; + } + + static std::optional get_opt_bool(const nlohmann::json& j, const char* key) { + auto it = j.find(key); + if (it == j.end() || it->is_null()) { + return std::nullopt; + } + if (it->is_boolean()) { + return it->get(); + } + return std::nullopt; + } + + static std::string tool_choice_to_string(ToolChoiceKind kind) { + switch (kind) { + case ToolChoiceKind::Auto: + return "auto"; + case ToolChoiceKind::None: + return "none"; + case ToolChoiceKind::Required: + return "required"; + } + return "auto"; + } + }; + + // ---------- from_json / to_json (ADL overloads for nlohmann::json) ---------- + + inline void from_json(const nlohmann::json& j, Runtime& r) { + std::string deviceType; + j.at("deviceType").get_to(deviceType); + j.at("executionProvider").get_to(r.execution_provider); + + r.device_type = ParsingUtils::parse_device_type(std::move(deviceType)); + } + + inline void from_json(const nlohmann::json& j, PromptTemplate& p) { + p.system = ParsingUtils::get_string_or_empty(j, "system"); + p.user = ParsingUtils::get_string_or_empty(j, "user"); + p.assistant = ParsingUtils::get_string_or_empty(j, "assistant"); + p.prompt = ParsingUtils::get_string_or_empty(j, "prompt"); + } + + inline void from_json(const nlohmann::json& j, Parameter& p) { + j.at("name").get_to(p.name); + p.value = ParsingUtils::get_opt_string(j, "value"); + } + + inline void from_json(const nlohmann::json& j, ModelSettings& ms) { + ms.parameters.clear(); + if (auto it = j.find("parameters"); it != j.end() && it->is_array()) { + ms.parameters = it->get>(); + } + } + + inline void from_json(const nlohmann::json& j, ModelInfo& m) { + j.at("id").get_to(m.id); + j.at("name").get_to(m.name); + j.at("version").get_to(m.version); + j.at("alias").get_to(m.alias); + j.at("providerType").get_to(m.provider_type); + j.at("uri").get_to(m.uri); + j.at("modelType").get_to(m.model_type); + + m.display_name = ParsingUtils::get_opt_string(j, "displayName"); + m.publisher = ParsingUtils::get_opt_string(j, "publisher"); + m.license = ParsingUtils::get_opt_string(j, "license"); + m.license_description = ParsingUtils::get_opt_string(j, "licenseDescription"); + m.task = ParsingUtils::get_opt_string(j, "task"); + if (auto it = j.find("fileSizeMb"); it != j.end() && !it->is_null() && it->is_number_integer()) { + auto v = it->get(); + m.file_size_mb = (v >= 0) ? static_cast(v) : 0u; + } + m.supports_tool_calling = ParsingUtils::get_opt_bool(j, "supportsToolCalling"); + m.max_output_tokens = ParsingUtils::get_opt_i64(j, "maxOutputTokens"); + m.min_fl_version = ParsingUtils::get_opt_string(j, "minFLVersion"); + + if (auto it = j.find("cached"); it != j.end() && it->is_boolean()) { + m.cached = it->get(); + } + else { + m.cached = false; + } + + if (auto it = j.find("createdAt"); it != j.end() && it->is_number_integer()) { + m.created_at_unix = it->get(); + } + else { + m.created_at_unix = 0; + } + + // nested optional objects + if (auto it = j.find("modelSettings"); it != j.end() && it->is_object()) { + m.model_settings = it->get(); + } + else { + m.model_settings.reset(); + } + + if (auto it = j.find("promptTemplate"); it != j.end() && it->is_object()) { + m.prompt_template = it->get(); + } + else { + m.prompt_template.reset(); + } + + if (auto it = j.find("runtime"); it != j.end() && it->is_object()) { + m.runtime = it->get(); + } + else { + m.runtime.reset(); + } + } + + // ---------- Tool calling: to_json (serialization for requests) ---------- + + inline void to_json(nlohmann::json& j, const PropertyDefinition& pd) { + j = nlohmann::json{{"type", pd.type}}; + if (pd.description) + j["description"] = *pd.description; + if (pd.properties) { + nlohmann::json props = nlohmann::json::object(); + for (const auto& [key, val] : *pd.properties) { + nlohmann::json pj; + to_json(pj, val); + props[key] = std::move(pj); + } + j["properties"] = std::move(props); + } + if (pd.required) + j["required"] = *pd.required; + } + + inline void to_json(nlohmann::json& j, const FunctionDefinition& fd) { + j = nlohmann::json{{"name", fd.name}}; + if (fd.description) + j["description"] = *fd.description; + if (fd.parameters) { + nlohmann::json pj; + to_json(pj, *fd.parameters); + j["parameters"] = std::move(pj); + } + } + + inline void to_json(nlohmann::json& j, const ToolDefinition& td) { + j = nlohmann::json{{"type", td.type}}; + nlohmann::json fj; + to_json(fj, td.function); + j["function"] = std::move(fj); + } + + // ---------- Tool calling: to_json for response types (needed for multi-turn serialization) ---------- + + inline void to_json(nlohmann::json& j, const FunctionCall& fc) { + j = nlohmann::json{{"name", fc.name}, {"arguments", fc.arguments}}; + } + + inline void to_json(nlohmann::json& j, const ToolCall& tc) { + j = nlohmann::json{{"id", tc.id}, {"type", tc.type}}; + if (tc.function_call) { + nlohmann::json fj; + to_json(fj, *tc.function_call); + j["function"] = std::move(fj); + } + } + + // ---------- Tool calling: from_json (deserialization from responses) ---------- + + inline void from_json(const nlohmann::json& j, FunctionCall& fc) { + fc.name = ParsingUtils::get_string_or_empty(j, "name"); + if (j.contains("arguments")) { + const auto& args = j.at("arguments"); + if (args.is_string()) + fc.arguments = args.get(); + else + fc.arguments = args.dump(); + } + } + + inline void from_json(const nlohmann::json& j, ToolCall& tc) { + tc.id = ParsingUtils::get_string_or_empty(j, "id"); + tc.type = ParsingUtils::get_string_or_empty(j, "type"); + if (j.contains("function") && j.at("function").is_object()) + tc.function_call = j.at("function").get(); + } + + inline void from_json(const nlohmann::json& j, ChatMessage& m) { + if (j.contains("role")) + j.at("role").get_to(m.role); + if (j.contains("content") && !j.at("content").is_null()) + j.at("content").get_to(m.content); + + m.tool_call_id = ParsingUtils::get_opt_string(j, "tool_call_id"); + + m.tool_calls.clear(); + if (j.contains("tool_calls") && j.at("tool_calls").is_array()) { + for (const auto& tc : j.at("tool_calls")) { + if (tc.is_object()) + m.tool_calls.push_back(tc.get()); + } + } + } + + inline void from_json(const nlohmann::json& j, ChatChoice& c) { + if (j.contains("index")) + j.at("index").get_to(c.index); + if (j.contains("finish_reason") && !j.at("finish_reason").is_null()) + c.finish_reason = ParsingUtils::parse_finish_reason(j.at("finish_reason").get()); + + if (j.contains("message") && !j.at("message").is_null()) + c.message = j.at("message").get(); + + if (j.contains("delta") && !j.at("delta").is_null()) + c.delta = j.at("delta").get(); + } + + inline void from_json(const nlohmann::json& j, ChatCompletionCreateResponse& r) { + if (j.contains("created")) + j.at("created").get_to(r.created); + r.id = ParsingUtils::get_string_or_empty(j, "id"); + if (j.contains("IsDelta")) + j.at("IsDelta").get_to(r.is_delta); + if (j.contains("Successful")) + j.at("Successful").get_to(r.successful); + if (j.contains("HttpStatusCode")) + j.at("HttpStatusCode").get_to(r.http_status_code); + + r.choices.clear(); + if (j.contains("choices") && j.at("choices").is_array()) { + r.choices = j.at("choices").get>(); + } + } + +} // namespace foundry_local \ No newline at end of file diff --git a/sdk/cpp/test/catalog_test.cpp b/sdk/cpp/test/catalog_test.cpp new file mode 100644 index 00000000..d93de49e --- /dev/null +++ b/sdk/cpp/test/catalog_test.cpp @@ -0,0 +1,373 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include +#include +#include + +#include "mock_core.h" +#include "mock_object_factory.h" +#include "parser.h" +#include "foundry_local_exception.h" + +#include + +using namespace foundry_local; +using namespace foundry_local::Testing; + +using Factory = MockObjectFactory; + +class CatalogTest : public ::testing::Test { +protected: + MockCore core_; + NullLogger logger_; + + std::string MakeModelListJson(const std::vector>& models) { + nlohmann::json arr = nlohmann::json::array(); + for (const auto& [name, alias] : models) { + arr.push_back(nlohmann::json::parse(Factory::MakeModelInfoJson(name, alias))); + } + return arr.dump(); + } + + std::unique_ptr MakeCatalog() { + core_.OnCall("get_catalog_name", "test-catalog"); + return Factory::CreateCatalog(&core_, &logger_); + } +}; + +TEST_F(CatalogTest, GetName) { + auto catalog = MakeCatalog(); + EXPECT_EQ("test-catalog", catalog->GetName()); +} + +TEST_F(CatalogTest, Create_ThrowsOnCoreError) { + core_.OnCallThrow("get_catalog_name", "catalog error"); + EXPECT_THROW(MockObjectFactory::CreateCatalog(&core_, &logger_), Exception); +} + +TEST_F(CatalogTest, ListModels_Empty) { + core_.OnCall("get_model_list", "[]"); + auto catalog = MakeCatalog(); + auto models = catalog->ListModels(); + EXPECT_TRUE(models.empty()); +} + +TEST_F(CatalogTest, ListModels_SingleModel) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "my-model"}})); + auto catalog = MakeCatalog(); + auto models = catalog->ListModels(); + ASSERT_EQ(1u, models.size()); + EXPECT_EQ("my-model", models[0]->GetAlias()); +} + +TEST_F(CatalogTest, ListModels_MultipleVariantsSameAlias) { + // Two variants of the same model (same alias, different names) + nlohmann::json arr = nlohmann::json::array(); + arr.push_back(nlohmann::json::parse(Factory::MakeModelInfoJson("model-v1", "my-model", 1))); + arr.push_back(nlohmann::json::parse(Factory::MakeModelInfoJson("model-v2", "my-model", 2))); + core_.OnCall("get_model_list", arr.dump()); + + auto catalog = MakeCatalog(); + auto models = catalog->ListModels(); + + // Should be grouped into one Model + ASSERT_EQ(1u, models.size()); + EXPECT_EQ(2u, dynamic_cast(models[0])->GetAllModelVariants().size()); +} + +TEST_F(CatalogTest, ListModels_DifferentAliases) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-a", "alias-a"}, {"model-b", "alias-b"}})); + auto catalog = MakeCatalog(); + auto models = catalog->ListModels(); + EXPECT_EQ(2u, models.size()); +} + +TEST_F(CatalogTest, ListModels_IncludesOpenAIPrefix) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-a", "my-model"}, {"openai-model", "openai-stuff"}})); + auto catalog = MakeCatalog(); + auto models = catalog->ListModels(); + ASSERT_EQ(2u, models.size()); +} + +TEST_F(CatalogTest, GetModel_Found) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "my-model"}})); + auto catalog = MakeCatalog(); + + auto* model = catalog->GetModel("my-model"); + ASSERT_NE(nullptr, model); + EXPECT_EQ("my-model", model->GetAlias()); +} + +TEST_F(CatalogTest, GetModel_NotFound) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "my-model"}})); + auto catalog = MakeCatalog(); + + EXPECT_EQ(nullptr, catalog->GetModel("nonexistent")); +} + +TEST_F(CatalogTest, GetModelVariant_Found) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "my-model"}})); + auto catalog = MakeCatalog(); + + auto* variant = catalog->GetModelVariant("model-1:1"); + ASSERT_NE(nullptr, variant); + EXPECT_EQ("model-1:1", variant->GetId()); +} + +TEST_F(CatalogTest, GetModelVariant_NotFound) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "my-model"}})); + auto catalog = MakeCatalog(); + + EXPECT_EQ(nullptr, catalog->GetModelVariant("nonexistent:1")); +} + +TEST_F(CatalogTest, GetLoadedModels) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "alias-1"}, {"model-2", "alias-2"}})); + core_.OnCall("list_loaded_models", R"(["model-1:1"])"); + + auto catalog = MakeCatalog(); + + auto loaded = catalog->GetLoadedModels(); + ASSERT_EQ(1u, loaded.size()); + EXPECT_EQ("model-1:1", loaded[0]->GetId()); +} + +TEST_F(CatalogTest, GetCachedModels) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "alias-1"}, {"model-2", "alias-2"}})); + core_.OnCall("get_cached_models", R"(["model-1:1", "model-2:1"])"); + + auto catalog = MakeCatalog(); + + auto cached = catalog->GetCachedModels(); + EXPECT_EQ(2u, cached.size()); +} + +TEST_F(CatalogTest, ListModels_CachesResults) { + core_.OnCall("get_model_list", MakeModelListJson({{"model-1", "my-model"}})); + auto catalog = MakeCatalog(); + + catalog->ListModels(); + catalog->ListModels(); + + // Should only call get_model_list once due to caching + EXPECT_EQ(1, core_.GetCallCount("get_model_list")); +} + +TEST_F(CatalogTest, GetLatestVersion) { + nlohmann::json arr = nlohmann::json::array(); + arr.push_back(nlohmann::json::parse(Factory::MakeModelInfoJson("target-model", "alias", 1))); + arr.push_back(nlohmann::json::parse(Factory::MakeModelInfoJson("target-model", "alias", 2))); + core_.OnCall("get_model_list", arr.dump()); + + auto catalog = MakeCatalog(); + auto* model = dynamic_cast(catalog->GetModel("alias")); + ASSERT_NE(nullptr, model); + + const auto& first = model->GetAllModelVariants()[0]; + auto& latest = catalog->GetLatestVersion(first); + // Should return the first one with matching name (which is variants_[0]) + EXPECT_EQ(&first, &latest); + + // Also works when passing the Model (IModel) itself + auto& latestFromModel = catalog->GetLatestVersion(*model); + EXPECT_EQ(&first, &latestFromModel); +} + +class FileBasedCatalogTest : public ::testing::Test { +protected: + NullLogger logger_; + + static std::string TestDataPath(const std::string& filename) { return "testdata/" + filename; } +}; + +TEST_F(FileBasedCatalogTest, RealModelsList) { + auto core = FileBackedCore::FromModelList(TestDataPath("real_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto models = catalog->ListModels(); + ASSERT_EQ(2u, models.size()); + + int phi_models = 0, mistral_models = 0; + size_t phi_variants = 0, mistral_variants = 0; + + for (const auto* model : models) { + if (model->GetAlias() == "phi-4") { + phi_models++; + phi_variants = dynamic_cast(model)->GetAllModelVariants().size(); + } + else if (model->GetAlias() == "mistral-7b-v0.2") { + mistral_models++; + mistral_variants = dynamic_cast(model)->GetAllModelVariants().size(); + } + } + + EXPECT_EQ(1, phi_models); + EXPECT_EQ(1, mistral_models); + EXPECT_EQ(2u, phi_variants); + EXPECT_EQ(2u, mistral_variants); +} + +TEST_F(FileBasedCatalogTest, RealModelsList_VariantDetails) { + auto core = FileBackedCore::FromModelList(TestDataPath("real_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + const auto* gpuVariant = dynamic_cast(catalog->GetModelVariant("Phi-4-generic-gpu:1")); + ASSERT_NE(nullptr, gpuVariant); + + const auto& info = gpuVariant->GetInfo(); + EXPECT_EQ("Phi-4-generic-gpu:1", info.id); + EXPECT_EQ("Phi-4-generic-gpu", info.name); + EXPECT_EQ("phi-4", info.alias); + ASSERT_TRUE(info.display_name.has_value()); + EXPECT_EQ("Phi-4 (GPU)", *info.display_name); + ASSERT_TRUE(info.publisher.has_value()); + EXPECT_EQ("Microsoft", *info.publisher); + ASSERT_TRUE(info.license.has_value()); + EXPECT_EQ("MIT", *info.license); + ASSERT_TRUE(info.runtime.has_value()); + EXPECT_EQ(DeviceType::GPU, info.runtime->device_type); + EXPECT_EQ("DML", info.runtime->execution_provider); + ASSERT_TRUE(info.file_size_mb.has_value()); + EXPECT_EQ(8192u, *info.file_size_mb); + ASSERT_TRUE(info.supports_tool_calling.has_value()); + EXPECT_TRUE(*info.supports_tool_calling); + ASSERT_TRUE(info.max_output_tokens.has_value()); + EXPECT_EQ(4096, *info.max_output_tokens); + ASSERT_TRUE(info.prompt_template.has_value()); + EXPECT_EQ("<|system|>", info.prompt_template->system); + EXPECT_EQ("<|user|>", info.prompt_template->user); + EXPECT_EQ("<|assistant|>", info.prompt_template->assistant); + EXPECT_EQ("<|prompt|>", info.prompt_template->prompt); +} + +TEST_F(FileBasedCatalogTest, RealModelsList_CpuVariantDetails) { + auto core = FileBackedCore::FromModelList(TestDataPath("real_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + const auto* cpuVariant = dynamic_cast(catalog->GetModelVariant("Phi-4-generic-cpu:1")); + ASSERT_NE(nullptr, cpuVariant); + + const auto& info = cpuVariant->GetInfo(); + EXPECT_EQ("Phi-4-generic-cpu", info.name); + ASSERT_TRUE(info.runtime.has_value()); + EXPECT_EQ(DeviceType::CPU, info.runtime->device_type); + EXPECT_EQ("ORT", info.runtime->execution_provider); + ASSERT_TRUE(info.file_size_mb.has_value()); + EXPECT_EQ(4096u, *info.file_size_mb); + ASSERT_TRUE(info.supports_tool_calling.has_value()); + EXPECT_FALSE(*info.supports_tool_calling); + EXPECT_FALSE(info.prompt_template.has_value()); +} + +TEST_F(FileBasedCatalogTest, EmptyModelsList) { + auto core = FileBackedCore::FromModelList(TestDataPath("empty_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto models = catalog->ListModels(); + EXPECT_TRUE(models.empty()); +} + +TEST_F(FileBasedCatalogTest, MalformedJson) { + auto core = FileBackedCore::FromModelList(TestDataPath("malformed_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + EXPECT_ANY_THROW(catalog->ListModels()); +} + +TEST_F(FileBasedCatalogTest, MissingNameField) { + auto core = FileBackedCore::FromModelList(TestDataPath("missing_name_field_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + try { + catalog->ListModels(); + FAIL() << "Expected exception for missing 'name' field"; + } + catch (const std::exception& e) { + std::string msg = e.what(); + EXPECT_NE(std::string::npos, msg.find("name")) << "Actual: " << msg; + } +} + +TEST_F(FileBasedCatalogTest, CachedModels) { + auto core = + FileBackedCore::FromBoth(TestDataPath("real_models_list.json"), TestDataPath("valid_cached_models.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto cached = catalog->GetCachedModels(); + ASSERT_EQ(2u, cached.size()); + + std::vector names; + names.reserve(cached.size()); + for (const auto* mv : cached) + names.push_back(dynamic_cast(mv)->GetInfo().name); + + EXPECT_NE(std::find(names.begin(), names.end(), "Phi-4-generic-gpu"), names.end()); + EXPECT_NE(std::find(names.begin(), names.end(), "Phi-4-generic-cpu"), names.end()); +} + +TEST_F(FileBasedCatalogTest, CoreErrorOnModelList) { + auto core = FileBackedCore::FromModelList("testdata/nonexistent_file.json"); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + EXPECT_ANY_THROW(catalog->ListModels()); +} + +TEST_F(FileBasedCatalogTest, MixedOpenAIAndLocal_IncludesAll) { + auto core = FileBackedCore::FromModelList(TestDataPath("mixed_openai_and_local.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto models = catalog->ListModels(); + ASSERT_EQ(3u, models.size()); +} + +TEST_F(FileBasedCatalogTest, ThreeVariantsOneModel) { + auto core = FileBackedCore::FromModelList(TestDataPath("three_variants_one_model.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto models = catalog->ListModels(); + ASSERT_EQ(1u, models.size()); + EXPECT_EQ(3u, dynamic_cast(models[0])->GetAllModelVariants().size()); +} + +TEST_F(FileBasedCatalogTest, ThreeVariantsOneModel_CachedSubset) { + auto core = FileBackedCore::FromBoth(TestDataPath("three_variants_one_model.json"), + TestDataPath("single_cached_model.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto cached = catalog->GetCachedModels(); + ASSERT_EQ(1u, cached.size()); + EXPECT_EQ("multi-v1-cpu", dynamic_cast(cached[0])->GetInfo().name); +} + +TEST_F(FileBasedCatalogTest, GetModelByAlias) { + auto core = FileBackedCore::FromModelList(TestDataPath("real_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + const auto* model = catalog->GetModel("phi-4"); + ASSERT_NE(nullptr, model); + EXPECT_EQ("phi-4", model->GetAlias()); + EXPECT_EQ(2u, dynamic_cast(model)->GetAllModelVariants().size()); + + const auto* missing = catalog->GetModel("nonexistent-alias"); + EXPECT_EQ(nullptr, missing); +} + +TEST_F(FileBasedCatalogTest, GetModelVariant_NotInCatalog) { + auto core = FileBackedCore::FromModelList(TestDataPath("real_models_list.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + EXPECT_EQ(nullptr, catalog->GetModelVariant("nonexistent-variant-id")); +} + +TEST_F(FileBasedCatalogTest, LoadedModels) { + auto core = FileBackedCore::FromAll(TestDataPath("real_models_list.json"), TestDataPath("valid_cached_models.json"), + TestDataPath("valid_loaded_models.json")); + auto catalog = Factory::CreateCatalog(&core, &logger_); + + auto loaded = catalog->GetLoadedModels(); + ASSERT_EQ(1u, loaded.size()); + EXPECT_EQ("Phi-4-generic-gpu", dynamic_cast(loaded[0])->GetInfo().name); +} diff --git a/sdk/cpp/test/client_test.cpp b/sdk/cpp/test/client_test.cpp new file mode 100644 index 00000000..6f083cef --- /dev/null +++ b/sdk/cpp/test/client_test.cpp @@ -0,0 +1,745 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "mock_core.h" +#include "mock_object_factory.h" +#include "parser.h" +#include "foundry_local_exception.h" + +#include + +using namespace foundry_local; +using namespace foundry_local::Testing; + +using Factory = MockObjectFactory; + +class OpenAIChatClientTest : public ::testing::Test { +protected: + MockCore core_; + NullLogger logger_; + + std::string MakeChatResponseJson(const std::string& content = "Hello!") { + nlohmann::json resp = { + {"created", 1700000000}, + {"id", "chatcmpl-test"}, + {"IsDelta", false}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, {"finish_reason", "stop"}, {"message", {{"role", "assistant"}, {"content", content}}}}}}}; + return resp.dump(); + } + + ModelVariant MakeLoadedVariant(const std::string& name = "chat-model") { + core_.OnCall("list_loaded_models", "[\"" + name + ":1\"]"); + return Factory::CreateModelVariant(&core_, Factory::MakeModelInfo(name, "alias"), &logger_); + } +}; + +TEST_F(OpenAIChatClientTest, CompleteChat_BasicResponse) { + core_.OnCall("chat_completions", MakeChatResponseJson("Hello world!")); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "Say hello", {}}}; + ChatSettings settings; + auto response = client.CompleteChat(messages, settings); + + EXPECT_TRUE(response.successful); + ASSERT_EQ(1u, response.choices.size()); + EXPECT_EQ("Hello world!", response.choices[0].message->content); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_WithSettings) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "test", {}}}; + ChatSettings settings; + settings.temperature = 0.7f; + settings.max_tokens = 100; + settings.top_p = 0.9f; + settings.frequency_penalty = 0.5f; + settings.presence_penalty = 0.3f; + settings.n = 2; + settings.random_seed = 42; + settings.top_k = 10; + + auto response = client.CompleteChat(messages, settings); + + // Verify the request JSON contains the settings + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + EXPECT_NEAR(0.7f, openAiReq["temperature"].get(), 0.001f); + EXPECT_EQ(100, openAiReq["max_completion_tokens"].get()); + EXPECT_NEAR(0.9f, openAiReq["top_p"].get(), 0.001f); + EXPECT_NEAR(0.5f, openAiReq["frequency_penalty"].get(), 0.001f); + EXPECT_NEAR(0.3f, openAiReq["presence_penalty"].get(), 0.001f); + EXPECT_EQ(2, openAiReq["n"].get()); + EXPECT_EQ(42, openAiReq["seed"].get()); + EXPECT_EQ(10, openAiReq["metadata"]["top_k"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_RequestFormat) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"system", "You are helpful", {}}, {"user", "Hello", {}}}; + ChatSettings settings; + auto response = client.CompleteChat(messages, settings); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + EXPECT_EQ("chat-model", openAiReq["model"].get()); + EXPECT_FALSE(openAiReq["stream"].get()); + ASSERT_EQ(2u, openAiReq["messages"].size()); + EXPECT_EQ("system", openAiReq["messages"][0]["role"].get()); + EXPECT_EQ("user", openAiReq["messages"][1]["role"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChatStreaming) { + nlohmann::json chunk1 = { + {"created", 1700000000}, + {"id", "chatcmpl-1"}, + {"IsDelta", true}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, {"finish_reason", nullptr}, {"delta", {{"role", "assistant"}, {"content", "Hello"}}}}}}}; + nlohmann::json chunk2 = { + {"created", 1700000000}, + {"id", "chatcmpl-1"}, + {"IsDelta", true}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", {{{"index", 0}, {"finish_reason", "stop"}, {"delta", {{"content", " world"}}}}}}}; + + core_.OnCall("chat_completions", + [&](std::string_view, const std::string*, NativeCallbackFn callback, void* userData) -> std::string { + if (callback && userData) { + std::string s1 = chunk1.dump(); + std::string s2 = chunk2.dump(); + callback(s1.data(), static_cast(s1.size()), userData); + callback(s2.data(), static_cast(s2.size()), userData); + } + return ""; + }); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "test", {}}}; + ChatSettings settings; + + std::vector chunks; + client.CompleteChatStreaming(messages, settings, + [&](const ChatCompletionCreateResponse& chunk) { chunks.push_back(chunk); }); + + ASSERT_EQ(2u, chunks.size()); + EXPECT_TRUE(chunks[0].is_delta); + ASSERT_TRUE(chunks[0].choices[0].delta.has_value()); + EXPECT_EQ("Hello", chunks[0].choices[0].delta->content); + EXPECT_EQ(" world", chunks[1].choices[0].delta->content); +} + +TEST_F(OpenAIChatClientTest, CompleteChatStreaming_PropagatesCallbackException) { + nlohmann::json chunk = { + {"created", 1700000000}, + {"id", "chatcmpl-1"}, + {"IsDelta", true}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, {"finish_reason", nullptr}, {"delta", {{"role", "assistant"}, {"content", "Hi"}}}}}}}; + + core_.OnCall("chat_completions", + [&](std::string_view, const std::string*, NativeCallbackFn callback, void* userData) -> std::string { + if (callback && userData) { + std::string s = chunk.dump(); + callback(s.data(), static_cast(s.size()), userData); + } + return ""; + }); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "test", {}}}; + ChatSettings settings; + + EXPECT_THROW(client.CompleteChatStreaming( + messages, settings, + [](const ChatCompletionCreateResponse&) { throw std::runtime_error("callback error"); }), + std::runtime_error); +} + +TEST_F(OpenAIChatClientTest, Constructor_ThrowsIfNotLoaded) { + core_.OnCall("list_loaded_models", R"([])"); + auto variant = Factory::CreateModelVariant(&core_, Factory::MakeModelInfo("unloaded-model", "alias"), &logger_); + EXPECT_THROW(OpenAIChatClient client(variant), Exception); +} + +TEST_F(OpenAIChatClientTest, GetModelId) { + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + EXPECT_EQ("chat-model", client.GetModelId()); +} + +// ---------- Tool calling tests ---------- + +TEST_F(OpenAIChatClientTest, CompleteChat_WithTools_IncludesToolsInRequest) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "What is 7 * 6?", {}}}; + + std::vector tools = { + {"function", + FunctionDefinition{"multiply_numbers", "A tool for multiplying two numbers.", + PropertyDefinition{"object", std::nullopt, + std::unordered_map{ + {"first", PropertyDefinition{"integer", "The first number"}}, + {"second", PropertyDefinition{"integer", "The second number"}}}, + std::vector{"first", "second"}}}}}; + + ChatSettings settings; + settings.tool_choice = ToolChoiceKind::Required; + + auto response = client.CompleteChat(messages, tools, settings); + + // Verify the request JSON contains tools and tool_choice + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + ASSERT_TRUE(openAiReq.contains("tools")); + ASSERT_TRUE(openAiReq["tools"].is_array()); + EXPECT_EQ(1u, openAiReq["tools"].size()); + EXPECT_EQ("function", openAiReq["tools"][0]["type"].get()); + EXPECT_EQ("multiply_numbers", openAiReq["tools"][0]["function"]["name"].get()); + EXPECT_EQ("A tool for multiplying two numbers.", + openAiReq["tools"][0]["function"]["description"].get()); + EXPECT_EQ("object", openAiReq["tools"][0]["function"]["parameters"]["type"].get()); + EXPECT_TRUE(openAiReq["tools"][0]["function"]["parameters"].contains("properties")); + EXPECT_TRUE(openAiReq["tools"][0]["function"]["parameters"]["properties"].contains("first")); + EXPECT_TRUE(openAiReq["tools"][0]["function"]["parameters"]["properties"].contains("second")); + + EXPECT_EQ("required", openAiReq["tool_choice"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_WithoutTools_OmitsToolsField) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "Hello", {}}}; + ChatSettings settings; + auto response = client.CompleteChat(messages, settings); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + EXPECT_FALSE(openAiReq.contains("tools")); + EXPECT_FALSE(openAiReq.contains("tool_choice")); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_ToolCallResponse_Parsed) { + // Simulate a response with tool calls from the model + nlohmann::json resp = { + {"created", 1700000000}, + {"id", "chatcmpl-tool"}, + {"IsDelta", false}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, + {"finish_reason", "tool_calls"}, + {"message", + {{"role", "assistant"}, + {"content", "[{\"name\": \"multiply_numbers\", \"parameters\": {\"first\": 7, \"second\": " + "6}}]"}, + {"tool_calls", + {{{"id", "call_1"}, + {"type", "function"}, + {"function", {{"name", "multiply_numbers"}, {"arguments", "{\"first\": 7, \"second\": 6}"}}}}}}}}}}}}; + + core_.OnCall("chat_completions", resp.dump()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "What is 7 * 6?", {}}}; + ChatSettings settings; + auto response = client.CompleteChat(messages, settings); + + ASSERT_EQ(1u, response.choices.size()); + EXPECT_EQ(FinishReason::ToolCalls, response.choices[0].finish_reason); + ASSERT_TRUE(response.choices[0].message.has_value()); + + const auto& msg = *response.choices[0].message; + ASSERT_EQ(1u, msg.tool_calls.size()); + EXPECT_EQ("call_1", msg.tool_calls[0].id); + EXPECT_EQ("function", msg.tool_calls[0].type); + ASSERT_TRUE(msg.tool_calls[0].function_call.has_value()); + EXPECT_EQ("multiply_numbers", msg.tool_calls[0].function_call->name); + EXPECT_EQ("{\"first\": 7, \"second\": 6}", msg.tool_calls[0].function_call->arguments); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_ToolChoiceAuto) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "test", {}}}; + ChatSettings settings; + settings.tool_choice = ToolChoiceKind::Auto; + + client.CompleteChat(messages, settings); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + EXPECT_EQ("auto", openAiReq["tool_choice"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_ToolChoiceNone) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "test", {}}}; + ChatSettings settings; + settings.tool_choice = ToolChoiceKind::None; + + client.CompleteChat(messages, settings); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + EXPECT_EQ("none", openAiReq["tool_choice"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_ToolMessageWithToolCallId) { + core_.OnCall("chat_completions", MakeChatResponseJson()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + ChatMessage toolMsg; + toolMsg.role = "tool"; + toolMsg.content = "42"; + toolMsg.tool_call_id = "call_1"; + + std::vector messages = {{"user", "What is 7 * 6?", {}}, std::move(toolMsg)}; + ChatSettings settings; + client.CompleteChat(messages, settings); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + ASSERT_EQ(2u, openAiReq["messages"].size()); + EXPECT_FALSE(openAiReq["messages"][0].contains("tool_call_id")); + EXPECT_EQ("call_1", openAiReq["messages"][1]["tool_call_id"].get()); + EXPECT_EQ("tool", openAiReq["messages"][1]["role"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_AssistantToolCallsSerialized) { + // Multi-turn tool calling: the assistant message with tool_calls must be sent back + // alongside the tool result message for the model to match the tool response. + core_.OnCall("chat_completions", MakeChatResponseJson("The answer is 42.")); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + ChatMessage assistantMsg; + assistantMsg.role = "assistant"; + assistantMsg.content = ""; + assistantMsg.tool_calls = { + {"call_1", "function", FunctionCall{"multiply_numbers", "{\"first\": 7, \"second\": 6}"}}}; + + ChatMessage toolMsg; + toolMsg.role = "tool"; + toolMsg.content = "42"; + toolMsg.tool_call_id = "call_1"; + + std::vector messages = { + {"user", "What is 7 * 6?", {}}, std::move(assistantMsg), std::move(toolMsg)}; + ChatSettings settings; + client.CompleteChat(messages, settings); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + ASSERT_EQ(3u, openAiReq["messages"].size()); + + // User message: no tool_calls + EXPECT_FALSE(openAiReq["messages"][0].contains("tool_calls")); + + // Assistant message: must include tool_calls + const auto& assistantJson = openAiReq["messages"][1]; + EXPECT_EQ("assistant", assistantJson["role"].get()); + ASSERT_TRUE(assistantJson.contains("tool_calls")); + ASSERT_TRUE(assistantJson["tool_calls"].is_array()); + ASSERT_EQ(1u, assistantJson["tool_calls"].size()); + EXPECT_EQ("call_1", assistantJson["tool_calls"][0]["id"].get()); + EXPECT_EQ("function", assistantJson["tool_calls"][0]["type"].get()); + EXPECT_EQ("multiply_numbers", assistantJson["tool_calls"][0]["function"]["name"].get()); + EXPECT_EQ("{\"first\": 7, \"second\": 6}", + assistantJson["tool_calls"][0]["function"]["arguments"].get()); + + // Tool message: must include tool_call_id + const auto& toolJson = openAiReq["messages"][2]; + EXPECT_EQ("tool", toolJson["role"].get()); + EXPECT_EQ("call_1", toolJson["tool_call_id"].get()); + EXPECT_FALSE(toolJson.contains("tool_calls")); +} + +TEST_F(OpenAIChatClientTest, CompleteChatStreaming_WithTools) { + nlohmann::json chunk1 = { + {"created", 1700000000}, + {"id", "chatcmpl-1"}, + {"IsDelta", true}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, {"finish_reason", nullptr}, {"delta", {{"role", "assistant"}, {"content", ""}}}}}}}; + nlohmann::json chunk2 = {{"created", 1700000000}, + {"id", "chatcmpl-1"}, + {"IsDelta", true}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, + {"finish_reason", "tool_calls"}, + {"delta", + {{"content", ""}, + {"tool_calls", + {{{"id", "call_1"}, + {"type", "function"}, + {"function", {{"name", "multiply"}, {"arguments", "{\"a\":1}"}}}}}}}}}}}}; + + core_.OnCall("chat_completions", + [&](std::string_view, const std::string*, NativeCallbackFn callback, void* userData) -> std::string { + if (callback && userData) { + std::string s1 = chunk1.dump(); + std::string s2 = chunk2.dump(); + callback(s1.data(), static_cast(s1.size()), userData); + callback(s2.data(), static_cast(s2.size()), userData); + } + return ""; + }); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "test", {}}}; + + std::vector tools = {{"function", FunctionDefinition{"multiply", "Multiply numbers."}}}; + + ChatSettings settings; + settings.tool_choice = ToolChoiceKind::Required; + + std::vector chunks; + client.CompleteChatStreaming(messages, tools, settings, + [&](const ChatCompletionCreateResponse& chunk) { chunks.push_back(chunk); }); + + ASSERT_EQ(2u, chunks.size()); + EXPECT_EQ(FinishReason::ToolCalls, chunks[1].choices[0].finish_reason); + ASSERT_TRUE(chunks[1].choices[0].delta.has_value()); + ASSERT_EQ(1u, chunks[1].choices[0].delta->tool_calls.size()); + EXPECT_EQ("multiply", chunks[1].choices[0].delta->tool_calls[0].function_call->name); + + // Verify tools were included in the request + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + ASSERT_TRUE(openAiReq.contains("tools")); + EXPECT_EQ("required", openAiReq["tool_choice"].get()); +} + +class OpenAIAudioClientTest : public ::testing::Test { +protected: + MockCore core_; + NullLogger logger_; + + ModelVariant MakeLoadedVariant(const std::string& name = "audio-model") { + core_.OnCall("list_loaded_models", "[\"" + name + ":1\"]"); + return Factory::CreateModelVariant(&core_, Factory::MakeModelInfo(name, "alias"), &logger_); + } +}; + +TEST_F(OpenAIAudioClientTest, TranscribeAudio) { + core_.OnCall("audio_transcribe", "Hello world transcribed text"); + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + auto response = client.TranscribeAudio("test.wav"); + + EXPECT_EQ("Hello world transcribed text", response.text); +} + +TEST_F(OpenAIAudioClientTest, TranscribeAudio_RequestFormat) { + core_.OnCall("audio_transcribe", "text"); + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + client.TranscribeAudio("audio.wav"); + + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("audio_transcribe")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + EXPECT_EQ("audio-model", openAiReq["Model"].get()); + EXPECT_EQ("audio.wav", openAiReq["FileName"].get()); +} + +TEST_F(OpenAIAudioClientTest, TranscribeAudioStreaming) { + core_.OnCall("audio_transcribe", + [](std::string_view, const std::string*, NativeCallbackFn callback, void* userData) -> std::string { + if (callback && userData) { + std::string text1 = "Hello "; + std::string text2 = "world!"; + callback(text1.data(), static_cast(text1.size()), userData); + callback(text2.data(), static_cast(text2.size()), userData); + } + return ""; + }); + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + + std::vector chunks; + client.TranscribeAudioStreaming( + "test.wav", [&](const AudioCreateTranscriptionResponse& chunk) { chunks.push_back(chunk.text); }); + + ASSERT_EQ(2u, chunks.size()); + EXPECT_EQ("Hello ", chunks[0]); + EXPECT_EQ("world!", chunks[1]); +} + +TEST_F(OpenAIAudioClientTest, TranscribeAudioStreaming_PropagatesCallbackException) { + core_.OnCall("audio_transcribe", + [](std::string_view, const std::string*, NativeCallbackFn callback, void* userData) -> std::string { + if (callback && userData) { + std::string text = "test"; + callback(text.data(), static_cast(text.size()), userData); + } + return ""; + }); + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + + EXPECT_THROW( + client.TranscribeAudioStreaming( + "test.wav", [](const AudioCreateTranscriptionResponse&) { throw std::runtime_error("streaming error"); }), + std::runtime_error); +} + +TEST_F(OpenAIAudioClientTest, Constructor_ThrowsIfNotLoaded) { + core_.OnCall("list_loaded_models", R"([])"); + auto variant = Factory::CreateModelVariant(&core_, Factory::MakeModelInfo("unloaded-model", "alias"), &logger_); + EXPECT_THROW(OpenAIAudioClient client(variant), Exception); +} + +TEST_F(OpenAIAudioClientTest, GetModelId) { + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + EXPECT_EQ("audio-model", client.GetModelId()); +} + +TEST_F(OpenAIAudioClientTest, TranscribeAudio_CoreError_Throws) { + core_.OnCallThrow("audio_transcribe", "transcription failed"); + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + + EXPECT_THROW(client.TranscribeAudio("test.wav"), Exception); +} + +TEST_F(OpenAIAudioClientTest, TranscribeAudioStreaming_CoreError_Throws) { + core_.OnCallThrow("audio_transcribe", "streaming transcription failed"); + core_.OnCall("list_loaded_models", R"(["audio-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIAudioClient client(variant); + + EXPECT_THROW(client.TranscribeAudioStreaming("test.wav", [](const AudioCreateTranscriptionResponse&) {}), + Exception); +} + +// ===================================================================== +// Multi-turn conversation tests +// ===================================================================== + +TEST_F(OpenAIChatClientTest, CompleteChat_MultiTurn) { + // First turn: user asks a question + core_.OnCall("chat_completions", MakeChatResponseJson("42")); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "What is 7 * 6?", {}}}; + ChatSettings settings; + auto response = client.CompleteChat(messages, settings); + + ASSERT_TRUE(response.successful); + ASSERT_EQ(1u, response.choices.size()); + EXPECT_EQ("42", response.choices[0].message->content); + + // Second turn: add assistant response + user follow-up + messages.push_back({"assistant", response.choices[0].message->content, {}}); + messages.push_back({"user", "Is that a real number?", {}}); + + core_.OnCall("chat_completions", MakeChatResponseJson("Yes")); + auto response2 = client.CompleteChat(messages, settings); + + ASSERT_TRUE(response2.successful); + EXPECT_EQ("Yes", response2.choices[0].message->content); + + // Verify the second request contained all 3 messages + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + ASSERT_EQ(3u, openAiReq["messages"].size()); + EXPECT_EQ("user", openAiReq["messages"][0]["role"].get()); + EXPECT_EQ("assistant", openAiReq["messages"][1]["role"].get()); + EXPECT_EQ("user", openAiReq["messages"][2]["role"].get()); +} + +TEST_F(OpenAIChatClientTest, CompleteChat_CoreError_Throws) { + core_.OnCallThrow("chat_completions", "inference failed"); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "Hello", {}}}; + ChatSettings settings; + + EXPECT_THROW(client.CompleteChat(messages, settings), Exception); +} + +TEST_F(OpenAIChatClientTest, CompleteChatStreaming_CoreError_Throws) { + core_.OnCallThrow("chat_completions", "streaming failed"); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"user", "Hello", {}}}; + ChatSettings settings; + + EXPECT_THROW(client.CompleteChatStreaming(messages, settings, [](const ChatCompletionCreateResponse&) {}), + Exception); +} + +// ===================================================================== +// Full tool-call round-trip +// ===================================================================== + +TEST_F(OpenAIChatClientTest, CompleteChat_ToolCallRoundTrip) { + // Step 1: model returns a tool call + nlohmann::json toolCallResp = { + {"created", 1700000000}, + {"id", "chatcmpl-tool"}, + {"IsDelta", false}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, + {"finish_reason", "tool_calls"}, + {"message", + {{"role", "assistant"}, + {"content", "[{\"name\": \"multiply_numbers\", \"parameters\": {\"first\": 7, \"second\": " + "6}}]"}, + {"tool_calls", + {{{"id", "call_1"}, + {"type", "function"}, + {"function", {{"name", "multiply_numbers"}, {"arguments", "{\"first\": 7, \"second\": 6}"}}}}}}}}}}}}; + + core_.OnCall("chat_completions", toolCallResp.dump()); + core_.OnCall("list_loaded_models", R"(["chat-model:1"])"); + + auto variant = MakeLoadedVariant(); + OpenAIChatClient client(variant); + + std::vector messages = {{"system", "You are a helpful AI assistant.", {}}, + {"user", "What is 7 multiplied by 6?", {}}}; + + std::vector tools = { + {"function", + FunctionDefinition{"multiply_numbers", "A tool for multiplying two numbers.", + PropertyDefinition{"object", std::nullopt, + std::unordered_map{ + {"first", PropertyDefinition{"integer", "The first number"}}, + {"second", PropertyDefinition{"integer", "The second number"}}}, + std::vector{"first", "second"}}}}}; + + ChatSettings settings; + settings.tool_choice = ToolChoiceKind::Required; + + auto response = client.CompleteChat(messages, tools, settings); + + ASSERT_EQ(1u, response.choices.size()); + EXPECT_EQ(FinishReason::ToolCalls, response.choices[0].finish_reason); + ASSERT_EQ(1u, response.choices[0].message->tool_calls.size()); + EXPECT_EQ("multiply_numbers", response.choices[0].message->tool_calls[0].function_call->name); + + // Step 2: send tool response back, model continues with the answer + messages.push_back({"assistant", response.choices[0].message->content, {}}); + + ChatMessage toolMsg; + toolMsg.role = "tool"; + toolMsg.content = "7 x 6 = 42."; + toolMsg.tool_call_id = "call_1"; + messages.push_back(std::move(toolMsg)); + + messages.push_back({"system", "Respond only with the answer generated by the tool.", {}}); + + core_.OnCall("chat_completions", MakeChatResponseJson("42")); + settings.tool_choice = ToolChoiceKind::Auto; + + auto response2 = client.CompleteChat(messages, tools, settings); + + ASSERT_TRUE(response2.successful); + EXPECT_EQ("42", response2.choices[0].message->content); + + // Verify the second request contained tool response message + auto requestJson = nlohmann::json::parse(core_.GetLastDataArg("chat_completions")); + auto openAiReq = nlohmann::json::parse(requestJson["Params"]["OpenAICreateRequest"].get()); + + // 5 messages: system, user, assistant (tool_call), tool, system (continue) + ASSERT_EQ(5u, openAiReq["messages"].size()); + EXPECT_EQ("tool", openAiReq["messages"][3]["role"].get()); + EXPECT_EQ("call_1", openAiReq["messages"][3]["tool_call_id"].get()); + EXPECT_EQ("auto", openAiReq["tool_choice"].get()); +} diff --git a/sdk/cpp/test/e2e_test.cpp b/sdk/cpp/test/e2e_test.cpp new file mode 100644 index 00000000..06bdc0ff --- /dev/null +++ b/sdk/cpp/test/e2e_test.cpp @@ -0,0 +1,574 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// End-to-end tests that exercise the public API with the real Core DLL. +// Tests marked DISABLED_ are skipped in CI (no Core DLL / no network). +// Run locally with: --gtest_also_run_disabled_tests + +#include + +#include "foundry_local.h" + +#include +#include +#include +#include +#include + +using namespace foundry_local; + +// --------------------------------------------------------------------------- +// Helper: detect CI environment (mirrors C# SkipInCI logic) +// --------------------------------------------------------------------------- +static bool IsRunningInCI() { + auto check = [](const char* var) -> bool { + const char* val = std::getenv(var); + if (!val) + return false; + std::string s(val); + for (auto& c : s) + c = static_cast(std::tolower(static_cast(c))); + return s == "true" || s == "1"; + }; + return check("TF_BUILD") || check("GITHUB_ACTIONS") || check("CI"); +} + +// --------------------------------------------------------------------------- +// Fixture: creates a real Manager with the Core DLL. +// All tests in this fixture require the native DLLs next to the test binary. +// --------------------------------------------------------------------------- +class EndToEndTest : public ::testing::Test { +protected: + static void SetUpTestSuite() { + Configuration config("CppSdkE2ETest"); + config.log_level = LogLevel::Information; + try { + Manager::Create(std::move(config)); + } + catch (const std::exception& ex) { + std::cerr << "[E2E] Failed to create Manager: " << ex.what() << "\n"; + GTEST_SKIP() << "Core DLL not available: " << ex.what(); + } + } + + static void TearDownTestSuite() { Manager::Destroy(); } + + void SetUp() override { + if (!Manager::IsInitialized()) { + GTEST_SKIP() << "Manager not available (Core DLL missing?)"; + } + } + + static bool IsAudioModel(const std::string& alias) { return alias.find("whisper") != std::string::npos; } + + /// Find a chat-capable model, preferring cached, then known small models, then any. + /// Selects the CPU variant when available to avoid GPU/EP dependency issues. + static IModel* FindChatModel(Catalog& catalog) { + IModel* target = nullptr; + + auto cached = catalog.GetCachedModels(); + for (auto* variant : cached) { + if (!IsAudioModel(variant->GetAlias())) { + target = catalog.GetModel(variant->GetAlias()); + if (target) + break; + } + } + + if (!target) { + for (const auto& alias : {"qwen2.5-0.5b", "qwen2.5-coder-0.5b", "phi-4-mini"}) { + target = catalog.GetModel(alias); + if (target) + break; + } + } + + if (!target) { + auto models = catalog.ListModels(); + for (auto* model : models) { + if (!IsAudioModel(model->GetAlias())) { + target = model; + break; + } + } + } + + if (target) { + auto* model = dynamic_cast(target); + if (model) { + for (const auto& variant : model->GetAllModelVariants()) { + if (variant.GetInfo().runtime.has_value() && + variant.GetInfo().runtime->device_type == DeviceType::CPU) { + model->SelectVariant(variant); + break; + } + } + } + } + + return target; + } + + /// Find an audio model, preferring cached. + static IModel* FindAudioModel(Catalog& catalog) { + IModel* target = nullptr; + + auto cached = catalog.GetCachedModels(); + for (auto* variant : cached) { + if (IsAudioModel(variant->GetAlias())) { + target = catalog.GetModel(variant->GetAlias()); + if (target) + break; + } + } + + if (!target) { + for (const auto& alias : {"whisper-small", "whisper-tiny"}) { + target = catalog.GetModel(alias); + if (target) + break; + } + } + + return target; + } +}; + +// =========================================================================== +// Catalog tests (no model download required) +// =========================================================================== + +TEST_F(EndToEndTest, BrowseCatalog_ListsModels) { +auto& catalog = Manager::Instance().GetCatalog(); + EXPECT_FALSE(catalog.GetName().empty()); + + auto models = catalog.ListModels(); + EXPECT_GT(models.size(), 0u) << "Catalog should have at least one model"; + + for (const auto* model : models) { + EXPECT_FALSE(model->GetAlias().empty()); + auto* concreteModel = dynamic_cast(model); + ASSERT_NE(nullptr, concreteModel); + EXPECT_FALSE(concreteModel->GetAllModelVariants().empty()); + + for (const auto& variant : concreteModel->GetAllModelVariants()) { + const auto& info = variant.GetInfo(); + EXPECT_FALSE(info.id.empty()); + EXPECT_FALSE(info.name.empty()); + EXPECT_FALSE(info.alias.empty()); + EXPECT_FALSE(info.provider_type.empty()); + EXPECT_FALSE(info.model_type.empty()); + } + } +} + +TEST_F(EndToEndTest, GetCachedModels_Succeeds) { +auto& catalog = Manager::Instance().GetCatalog(); + auto cached = catalog.GetCachedModels(); + for (auto* variant : cached) { + EXPECT_FALSE(variant->GetId().empty()); + EXPECT_TRUE(variant->IsCached()); + } +} + +TEST_F(EndToEndTest, GetLoadedModels_Succeeds) { +auto& catalog = Manager::Instance().GetCatalog(); + auto loaded = catalog.GetLoadedModels(); + for (auto* variant : loaded) { + EXPECT_FALSE(variant->GetId().empty()); + EXPECT_TRUE(variant->IsLoaded()); + } +} + +TEST_F(EndToEndTest, GetModel_NotFound_ReturnsNull) { +auto& catalog = Manager::Instance().GetCatalog(); + auto* model = catalog.GetModel("this-model-does-not-exist-12345"); + EXPECT_EQ(model, nullptr); +} + +TEST_F(EndToEndTest, GetModelVariant_NotFound_ReturnsNull) { +auto& catalog = Manager::Instance().GetCatalog(); + auto* variant = catalog.GetModelVariant("nonexistent-model:999"); + EXPECT_EQ(variant, nullptr); +} + +TEST_F(EndToEndTest, GetModelVariant_Found) { +auto& catalog = Manager::Instance().GetCatalog(); + auto models = catalog.ListModels(); + if (models.empty()) { + GTEST_SKIP() << "No models in catalog"; + } + + const auto* firstConcreteModel = dynamic_cast(models[0]); + ASSERT_NE(nullptr, firstConcreteModel); + const auto& firstVariant = firstConcreteModel->GetAllModelVariants()[0]; + auto* found = catalog.GetModelVariant(firstVariant.GetId()); + ASSERT_NE(nullptr, found); + EXPECT_EQ(firstVariant.GetId(), found->GetId()); +} + +TEST_F(EndToEndTest, ModelVariantInfo_HasRequiredFields) { +auto& catalog = Manager::Instance().GetCatalog(); + auto models = catalog.ListModels(); + if (models.empty()) { + GTEST_SKIP() << "No models in catalog"; + } + + for (const auto* model : models) { + auto* concreteModel = dynamic_cast(model); + ASSERT_NE(nullptr, concreteModel); + for (const auto& variant : concreteModel->GetAllModelVariants()) { + const auto& info = variant.GetInfo(); + EXPECT_FALSE(info.id.empty()); + EXPECT_FALSE(info.name.empty()); + EXPECT_GT(info.version, 0u); + EXPECT_FALSE(info.alias.empty()); + EXPECT_FALSE(info.uri.empty()); + } + } +} + +TEST_F(EndToEndTest, ModelVariant_SelectVariant) { +auto& catalog = Manager::Instance().GetCatalog(); + auto models = catalog.ListModels(); + + // Find a model with multiple variants + Model* multiVariantModel = nullptr; + for (auto* model : models) { + auto* concreteModel = dynamic_cast(model); + if (concreteModel && concreteModel->GetAllModelVariants().size() > 1) { + multiVariantModel = concreteModel; + break; + } + } + + if (!multiVariantModel) { + GTEST_SKIP() << "No model with multiple variants found"; + } + + const auto& variants = multiVariantModel->GetAllModelVariants(); + const auto& secondVariant = variants[1]; + multiVariantModel->SelectVariant(secondVariant); + EXPECT_EQ(secondVariant.GetId(), multiVariantModel->GetId()); + + // Select back the first variant + multiVariantModel->SelectVariant(variants[0]); + EXPECT_EQ(variants[0].GetId(), multiVariantModel->GetId()); +} + +// =========================================================================== +// EnsureEpsDownloaded (no model download, but may download EPs) +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_EnsureEpsDownloaded_Succeeds) { + if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (may require network)"; + } + + EXPECT_NO_THROW(Manager::Instance().EnsureEpsDownloaded()); +} + +// =========================================================================== +// Web service tests +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_WebService_StartAndStop) { + if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI"; + } + + auto& manager = Manager::Instance(); + + // GetUrls should be empty before starting + EXPECT_TRUE(manager.GetUrls().empty()); + + // StartWebService without web config should throw + // Note: the manager was created without web config, so this verifies the guard. + EXPECT_THROW(manager.StartWebService(), Exception); +} + +// =========================================================================== +// Download, load, chat (non-streaming), unload +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_DownloadLoadChatUnload) { +if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (requires model download)"; +} + +auto& catalog = Manager::Instance().GetCatalog(); + auto* target = FindChatModel(catalog); + if (!target) { + GTEST_SKIP() << "No chat-capable model found in catalog"; + } + + std::cout << "[E2E] Using model: " << target->GetAlias() << " variant: " << target->GetId() << "\n"; + + // Download (no-op if already cached) + bool progressCallbackInvoked = false; + target->Download([&](float pct) { + progressCallbackInvoked = true; + std::cout << "\r[E2E] Download: " << pct << "% " << std::flush; + }); + std::cout << "\n"; + + EXPECT_TRUE(target->IsCached()); + + // Load + target->Load(); + EXPECT_TRUE(target->IsLoaded()); + + // Verify it appears in loaded models + auto loaded = catalog.GetLoadedModels(); + bool foundInLoaded = false; + for (auto* v : loaded) { + if (v->GetId() == target->GetId()) { + foundInLoaded = true; + break; + } + } + EXPECT_TRUE(foundInLoaded) << "Model should appear in GetLoadedModels() after Load()"; + + // Chat (non-streaming) + OpenAIChatClient client(*target); + + std::vector messages = {{"user", "Say hello in one word.", {}}}; + ChatSettings settings; + settings.max_tokens = 32; + auto response = client.CompleteChat(messages, settings); + EXPECT_TRUE(response.successful); + ASSERT_FALSE(response.choices.empty()); + ASSERT_TRUE(response.choices[0].message.has_value()); + EXPECT_FALSE(response.choices[0].message->content.empty()); + EXPECT_EQ(FinishReason::Stop, response.choices[0].finish_reason); + std::cout << "[E2E] Response: " << response.choices[0].message->content << "\n"; + + // Unload + target->Unload(); + EXPECT_FALSE(target->IsLoaded()); +} + +// =========================================================================== +// Streaming chat +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_StreamingChat) { +if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (requires model download)"; +} + +auto& catalog = Manager::Instance().GetCatalog(); + auto* target = FindChatModel(catalog); + if (!target) { + GTEST_SKIP() << "No chat-capable model found in catalog"; + } + + target->Download(); + target->Load(); + ASSERT_TRUE(target->IsLoaded()); + + std::cout << "[E2E] Streaming with model: " << target->GetAlias() << "\n"; + + OpenAIChatClient client(*target); + + std::vector messages = {{"user", "Count from 1 to 5.", {}}}; + ChatSettings settings; + settings.max_tokens = 64; + settings.temperature = 0.0f; + + std::vector chunks; + std::string fullContent; + client.CompleteChatStreaming(messages, settings, [&](const ChatCompletionCreateResponse& chunk) { + chunks.push_back(chunk); + if (!chunk.choices.empty() && chunk.choices[0].delta.has_value() && !chunk.choices[0].delta->content.empty()) { + fullContent += chunk.choices[0].delta->content; + } + }); + + EXPECT_GT(chunks.size(), 0u) << "Should have received at least one streaming chunk"; + EXPECT_FALSE(fullContent.empty()) << "Accumulated streaming content should not be empty"; + std::cout << "[E2E] Streaming response: " << fullContent << "\n"; + + // Last chunk should have a stop finish reason + ASSERT_FALSE(chunks.empty()); + const auto& lastChunk = chunks.back(); + if (!lastChunk.choices.empty()) { + EXPECT_EQ(FinishReason::Stop, lastChunk.choices[0].finish_reason); + } + + target->Unload(); +} + +// =========================================================================== +// Chat with tool calling +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_ChatWithToolCalling) { + if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (requires model download)"; + } + + auto& catalog = Manager::Instance().GetCatalog(); + auto* target = FindChatModel(catalog); + if (!target) { + GTEST_SKIP() << "No chat-capable model found in catalog"; + } + + // Check if the selected variant supports tool calling + bool supportsCalling = false; + auto* targetModel = dynamic_cast(target); + if (targetModel) { + for (const auto& v : targetModel->GetAllModelVariants()) { + if (v.GetInfo().supports_tool_calling.has_value() && *v.GetInfo().supports_tool_calling) { + supportsCalling = true; + break; + } + } + } + if (!supportsCalling) { + GTEST_SKIP() << "Model does not support tool calling"; + } + + target->Download(); + target->Load(); + ASSERT_TRUE(target->IsLoaded()); + + std::cout << "[E2E] Tool calling with model: " << target->GetAlias() << "\n"; + + OpenAIChatClient client(*target); + + std::vector tools = { + {"function", FunctionDefinition{"get_weather", "Get the current weather for a city.", + PropertyDefinition{"object", std::nullopt, + std::unordered_map{ + {"city", PropertyDefinition{"string", "The city name"}}}, + std::vector{"city"}}}}}; + + std::vector messages = { + {"system", "You are a helpful assistant. Use the provided tools when asked about weather."}, + {"user", "What is the weather in Seattle?"}}; + + ChatSettings settings; + settings.temperature = 0.0f; + settings.max_tokens = 256; + settings.tool_choice = ToolChoiceKind::Required; + + auto response = client.CompleteChat(messages, tools, settings); + EXPECT_TRUE(response.successful); + ASSERT_FALSE(response.choices.empty()); + + const auto& choice = response.choices[0]; + // With tool_choice = Required, the model should produce a tool call + if (choice.finish_reason == FinishReason::ToolCalls) { + ASSERT_TRUE(choice.message.has_value()); + ASSERT_FALSE(choice.message->tool_calls.empty()); + const auto& tc = choice.message->tool_calls[0]; + EXPECT_FALSE(tc.id.empty()); + ASSERT_TRUE(tc.function_call.has_value()); + EXPECT_EQ("get_weather", tc.function_call->name); + EXPECT_FALSE(tc.function_call->arguments.empty()); + std::cout << "[E2E] Tool call: " << tc.function_call->name << " args: " << tc.function_call->arguments << "\n"; + } + + target->Unload(); +} + +// =========================================================================== +// Audio transcription +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_AudioTranscription) { +if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (requires model download + audio file)"; +} + +auto& catalog = Manager::Instance().GetCatalog(); + auto* target = FindAudioModel(catalog); + if (!target) { + GTEST_SKIP() << "No audio model found in catalog"; + } + + target->Download(); + target->Load(); + ASSERT_TRUE(target->IsLoaded()); + + std::cout << "[E2E] Audio model: " << target->GetAlias() << "\n"; + + OpenAIAudioClient client(*target); + + // Note: this test requires a valid audio file to be present. + // Skip if no test audio file is available. + const char* audioPath = std::getenv("FL_TEST_AUDIO_PATH"); + if (!audioPath) { + target->Unload(); + GTEST_SKIP() << "Set FL_TEST_AUDIO_PATH env var to a .wav file to run audio tests"; + } + + auto result = client.TranscribeAudio(audioPath); + EXPECT_FALSE(result.text.empty()); + std::cout << "[E2E] Transcription: " << result.text << "\n"; + + target->Unload(); +} + +TEST_F(EndToEndTest, DISABLED_AudioTranscriptionStreaming) { +if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (requires model download + audio file)"; +} + +auto& catalog = Manager::Instance().GetCatalog(); + auto* target = FindAudioModel(catalog); + if (!target) { + GTEST_SKIP() << "No audio model found in catalog"; + } + + target->Download(); + target->Load(); + ASSERT_TRUE(target->IsLoaded()); + + const char* audioPath = std::getenv("FL_TEST_AUDIO_PATH"); + if (!audioPath) { + target->Unload(); + GTEST_SKIP() << "Set FL_TEST_AUDIO_PATH env var to a .wav file to run audio tests"; + } + + OpenAIAudioClient client(*target); + + std::string fullText; + int chunkCount = 0; + client.TranscribeAudioStreaming(audioPath, [&](const AudioCreateTranscriptionResponse& chunk) { + fullText += chunk.text; + chunkCount++; + }); + + EXPECT_GT(chunkCount, 0) << "Should have received at least one streaming chunk"; + EXPECT_FALSE(fullText.empty()); + std::cout << "[E2E] Streaming transcription (" << chunkCount << " chunks): " << fullText << "\n"; + + target->Unload(); +} + +// =========================================================================== +// RemoveFromCache +// =========================================================================== + +TEST_F(EndToEndTest, DISABLED_DownloadAndRemoveFromCache) { + if (IsRunningInCI()) { + GTEST_SKIP() << "Skipped in CI (requires model download)"; + } + + auto& catalog = Manager::Instance().GetCatalog(); + auto* target = FindChatModel(catalog); + if (!target) { + GTEST_SKIP() << "No chat-capable model found in catalog"; + } + + target->Download(); + EXPECT_TRUE(target->IsCached()); + + // RemoveFromCache should succeed without throwing. + EXPECT_NO_THROW(target->RemoveFromCache()); + + std::cout << "[E2E] RemoveFromCache completed for: " << target->GetAlias() + << " (IsCached=" << (target->IsCached() ? "true" : "false") << ")\n"; +} diff --git a/sdk/cpp/test/mock_core.h b/sdk/cpp/test/mock_core.h new file mode 100644 index 00000000..f89af91a --- /dev/null +++ b/sdk/cpp/test/mock_core.h @@ -0,0 +1,158 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "foundry_local_internal_core.h" +#include "logger.h" + +namespace foundry_local::Testing { + + /// A mock implementation of IFoundryLocalCore for unit testing. + /// Register expected command -> response mappings before use. + class MockCore final : public Internal::IFoundryLocalCore { + public: + /// Handler signature: (command, dataArgument, callback, userData) -> response string. + using Handler = std::function; + + /// Register a fixed response for a command. + void OnCall(std::string command, std::string response) { + handlers_[std::move(command)] = [r = std::move(response)](std::string_view, const std::string*, + NativeCallbackFn, void*) { return r; }; + } + + /// Register a custom handler for a command. + void OnCall(std::string command, Handler handler) { handlers_[std::move(command)] = std::move(handler); } + + /// Register a handler that returns an error for a command. + void OnCallThrow(std::string command, std::string errorMessage) { + errorResponses_[std::move(command)] = std::move(errorMessage); + } + + /// Returns the number of times a command was called. + int GetCallCount(const std::string& command) const { + auto it = callCounts_.find(command); + return it != callCounts_.end() ? it->second : 0; + } + + /// Returns the last data argument passed for a command. + const std::string& GetLastDataArg(const std::string& command) const { + auto it = lastDataArgs_.find(command); + if (it == lastDataArgs_.end()) { + static const std::string empty; + return empty; + } + return it->second; + } + + // IFoundryLocalCore implementation + CoreResponse call(std::string_view command, ILogger& /*logger*/, const std::string* dataArgument = nullptr, + NativeCallbackFn callback = nullptr, void* data = nullptr) const override { + + std::string cmd(command); + const_cast(this)->callCounts_[cmd]++; + if (dataArgument) { + const_cast(this)->lastDataArgs_[cmd] = *dataArgument; + } + + auto errIt = errorResponses_.find(cmd); + if (errIt != errorResponses_.end()) { + CoreResponse resp; + resp.error = errIt->second; + return resp; + } + + auto it = handlers_.find(cmd); + if (it == handlers_.end()) { + throw std::runtime_error("MockCore: no handler registered for command '" + cmd + "'"); + } + + CoreResponse resp; + resp.data = it->second(command, dataArgument, callback, data); + return resp; + } + + void unload() override {} + + private: + std::unordered_map handlers_; + std::unordered_map errorResponses_; + std::unordered_map callCounts_; + std::unordered_map lastDataArgs_; + }; + + /// Read a file into a string. Throws on failure. + inline std::string ReadFile(const std::string& path) { + std::ifstream in(path, std::ios::in | std::ios::binary); + if (!in) + throw std::runtime_error("Failed to open test data file: " + path); + std::ostringstream contents; + contents << in.rdbuf(); + return contents.str(); + } + + /// A mock core that reads model list, cached models and loaded models from JSON files on disk. + class FileBackedCore final : public Internal::IFoundryLocalCore { + public: + FileBackedCore(std::string modelListPath, std::string cachedModelsPath, std::string loadedModelsPath = "") + : modelListPath_(std::move(modelListPath)), cachedModelsPath_(std::move(cachedModelsPath)), + loadedModelsPath_(std::move(loadedModelsPath)) {} + + static FileBackedCore FromModelList(const std::string& path) { return FileBackedCore(path, ""); } + + static FileBackedCore FromBoth(const std::string& modelListPath, const std::string& cachedModelsPath) { + return FileBackedCore(modelListPath, cachedModelsPath); + } + + static FileBackedCore FromAll(const std::string& modelListPath, const std::string& cachedModelsPath, + const std::string& loadedModelsPath) { + return FileBackedCore(modelListPath, cachedModelsPath, loadedModelsPath); + } + + CoreResponse call(std::string_view command, ILogger& /*logger*/, const std::string* /*dataArgument*/ = nullptr, + NativeCallbackFn /*callback*/ = nullptr, void* /*data*/ = nullptr) const override { + + CoreResponse resp; + + if (command == "get_catalog_name") { + resp.data = "TestCatalog"; + return resp; + } + + if (command == "get_model_list") { + resp.data = modelListPath_.empty() ? "[]" : ReadFile(modelListPath_); + return resp; + } + + if (command == "get_cached_models") { + resp.data = cachedModelsPath_.empty() ? "[]" : ReadFile(cachedModelsPath_); + return resp; + } + + if (command == "list_loaded_models") { + resp.data = loadedModelsPath_.empty() ? "[]" : ReadFile(loadedModelsPath_); + return resp; + } + + resp.data = "{}"; + return resp; + } + + void unload() override {} + + private: + std::string modelListPath_; + std::string cachedModelsPath_; + std::string loadedModelsPath_; + }; + +} // namespace foundry_local::Testing diff --git a/sdk/cpp/test/mock_object_factory.h b/sdk/cpp/test/mock_object_factory.h new file mode 100644 index 00000000..face5272 --- /dev/null +++ b/sdk/cpp/test/mock_object_factory.h @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#ifndef FL_TESTS +#define FL_TESTS +#endif + +#include "foundry_local.h" +#include "foundry_local_internal_core.h" +#include "logger.h" + +namespace foundry_local::Testing { + + /// Factory to construct types for testing. + struct MockObjectFactory { + static ModelVariant CreateModelVariant(gsl::not_null core, ModelInfo info, + gsl::not_null logger) { + return ModelVariant(core, std::move(info), logger); + } + + static std::unique_ptr CreateCatalog(gsl::not_null core, + gsl::not_null logger) { + return std::make_unique(core, logger); + } + + static Model CreateModel(gsl::not_null core, gsl::not_null logger) { + return Model(core, logger); + } + + /// Push a variant into a Model's internal variant list. + static void AddVariantToModel(Model& model, ModelVariant variant) { + model.variants_.push_back(std::move(variant)); + } + + /// Set the selected variant on a Model. + static void SelectFirstVariant(Model& model) { model.selectedVariant_ = &model.variants_.front(); } + + /// Helper to build a minimal ModelInfo with defaults. + static ModelInfo MakeModelInfo(std::string name, std::string alias = "", uint32_t version = 1) { + ModelInfo info; + info.id = name + ":" + std::to_string(version); + info.name = std::move(name); + info.alias = alias.empty() ? info.name : std::move(alias); + info.version = version; + info.provider_type = "test"; + info.uri = "test://uri"; + info.model_type = "text"; + return info; + } + + /// Helper to build a JSON string representing a model list entry. + static std::string MakeModelInfoJson(const std::string& name, const std::string& alias = "", + uint32_t version = 1, bool cached = false) { + std::string a = alias.empty() ? name : alias; + std::string id = name + ":" + std::to_string(version); + return R"({"id":")" + id + R"(","name":")" + name + R"(","version":)" + std::to_string(version) + + R"(,"alias":")" + a + R"(","providerType":"test","uri":"test://uri","modelType":"text","cached":)" + + (cached ? "true" : "false") + R"(,"createdAt":0})"; + } + }; + +} // namespace foundry_local::Testing diff --git a/sdk/cpp/test/model_variant_test.cpp b/sdk/cpp/test/model_variant_test.cpp new file mode 100644 index 00000000..c0ea9b39 --- /dev/null +++ b/sdk/cpp/test/model_variant_test.cpp @@ -0,0 +1,254 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "mock_core.h" +#include "mock_object_factory.h" +#include "parser.h" +#include "foundry_local_exception.h" + +#include + +using namespace foundry_local; +using namespace foundry_local::Testing; + +using Factory = MockObjectFactory; + +class ModelVariantTest : public ::testing::Test { +protected: + MockCore core_; + NullLogger logger_; + + ModelVariant MakeVariant(std::string name = "test-model", std::string alias = "test-alias", uint32_t version = 1) { + return Factory::CreateModelVariant(&core_, Factory::MakeModelInfo(name, alias, version), &logger_); + } +}; + +TEST_F(ModelVariantTest, GetInfo) { + auto variant = MakeVariant("my-model", "my-alias", 3); + const auto& info = variant.GetInfo(); + EXPECT_EQ("my-model", info.name); + EXPECT_EQ("my-alias", info.alias); + EXPECT_EQ(3u, info.version); +} + +TEST_F(ModelVariantTest, GetId) { + auto variant = MakeVariant("my-model"); + EXPECT_EQ("my-model:1", variant.GetId()); +} + +TEST_F(ModelVariantTest, GetAlias) { + auto variant = MakeVariant("name", "alias"); + EXPECT_EQ("alias", variant.GetAlias()); +} + +TEST_F(ModelVariantTest, GetVersion) { + auto variant = MakeVariant("name", "alias", 5); + EXPECT_EQ(5u, variant.GetVersion()); +} + +TEST_F(ModelVariantTest, IsLoaded_True) { + core_.OnCall("list_loaded_models", R"(["test-model:1"])"); + auto variant = MakeVariant("test-model"); + EXPECT_TRUE(variant.IsLoaded()); +} + +TEST_F(ModelVariantTest, IsLoaded_False) { + core_.OnCall("list_loaded_models", R"(["other-model:1"])"); + auto variant = MakeVariant("test-model"); + EXPECT_FALSE(variant.IsLoaded()); +} + +TEST_F(ModelVariantTest, IsLoaded_EmptyList) { + core_.OnCall("list_loaded_models", R"([])"); + auto variant = MakeVariant("test-model"); + EXPECT_FALSE(variant.IsLoaded()); +} + +TEST_F(ModelVariantTest, IsCached_True) { + core_.OnCall("get_cached_models", R"(["test-model:1"])"); + auto variant = MakeVariant("test-model"); + EXPECT_TRUE(variant.IsCached()); +} + +TEST_F(ModelVariantTest, IsCached_False) { + core_.OnCall("get_cached_models", R"(["other-model:1"])"); + auto variant = MakeVariant("test-model"); + EXPECT_FALSE(variant.IsCached()); +} + +TEST_F(ModelVariantTest, Load_CallsCore) { + core_.OnCall("load_model", ""); + auto variant = MakeVariant("test-model"); + variant.Load(); + EXPECT_EQ(1, core_.GetCallCount("load_model")); + + // Verify the data argument contains the model name + auto parsed = nlohmann::json::parse(core_.GetLastDataArg("load_model")); + EXPECT_EQ("test-model", parsed["Params"]["Model"].get()); +} + +TEST_F(ModelVariantTest, Unload_CallsCore) { + core_.OnCall("unload_model", ""); + auto variant = MakeVariant("test-model"); + variant.Unload(); + EXPECT_EQ(1, core_.GetCallCount("unload_model")); +} + +TEST_F(ModelVariantTest, Unload_ThrowsOnError) { + core_.OnCallThrow("unload_model", "unload failed"); + auto variant = MakeVariant("test-model"); + EXPECT_THROW(variant.Unload(), Exception); +} + +TEST_F(ModelVariantTest, Download_NoCallback) { + core_.OnCall("get_cached_models", R"([])"); + core_.OnCall("download_model", ""); + auto variant = MakeVariant("test-model"); + variant.Download(); + EXPECT_EQ(1, core_.GetCallCount("download_model")); +} + +TEST_F(ModelVariantTest, Download_WithCallback) { + core_.OnCall("get_cached_models", R"([])"); + core_.OnCall("download_model", + [](std::string_view, const std::string*, NativeCallbackFn callback, void* userData) -> std::string { + // Simulate calling the progress callback + if (callback && userData) { + std::string progress = "50"; + callback(progress.data(), static_cast(progress.size()), userData); + } + return ""; + }); + + auto variant = MakeVariant("test-model"); + float lastProgress = -1.0f; + variant.Download([&](float pct) { lastProgress = pct; }); + EXPECT_NEAR(50.0f, lastProgress, 0.01f); +} + +TEST_F(ModelVariantTest, RemoveFromCache_CallsCore) { + core_.OnCall("remove_cached_model", ""); + auto variant = MakeVariant("test-model"); + variant.RemoveFromCache(); + EXPECT_EQ(1, core_.GetCallCount("remove_cached_model")); +} + +TEST_F(ModelVariantTest, RemoveFromCache_ThrowsOnError) { + core_.OnCallThrow("remove_cached_model", "remove failed"); + auto variant = MakeVariant("test-model"); + EXPECT_THROW(variant.RemoveFromCache(), Exception); +} + +TEST_F(ModelVariantTest, GetPath_CallsCore) { + core_.OnCall("get_model_path", R"(C:\models\test)"); + auto variant = MakeVariant("test-model"); + const auto& path = variant.GetPath(); + EXPECT_EQ(std::filesystem::path(R"(C:\models\test)"), path); +} + +TEST_F(ModelVariantTest, GetPath_CachesResult) { + core_.OnCall("get_model_path", R"(C:\models\test)"); + auto variant = MakeVariant("test-model"); + variant.GetPath(); + variant.GetPath(); + // Should only call once due to caching + EXPECT_EQ(1, core_.GetCallCount("get_model_path")); +} + +class ModelTest : public ::testing::Test { +protected: + MockCore core_; + NullLogger logger_; + + Model MakeModel() { return Factory::CreateModel(&core_, &logger_); } + + ModelVariant MakeVariant(std::string name = "test-model", std::string alias = "test-alias", uint32_t version = 1) { + return Factory::CreateModelVariant(&core_, Factory::MakeModelInfo(name, alias, version), &logger_); + } + + /// Helper: create a Model with one variant and selectedVariant_ set. + Model MakeModelWithVariant(const std::string& name = "test-model", const std::string& alias = "test-alias") { + auto model = MakeModel(); + Factory::AddVariantToModel(model, MakeVariant(name, alias, 1)); + Factory::SelectFirstVariant(model); + return model; + } +}; + +TEST_F(ModelTest, SelectedVariant_ThrowsWhenEmpty) { + auto model = MakeModel(); + EXPECT_THROW(model.GetId(), Exception); +} + +TEST_F(ModelTest, AddVariant_AndSelect) { + auto model = MakeModel(); + Factory::AddVariantToModel(model, MakeVariant("v1", "alias", 1)); + Factory::SelectFirstVariant(model); + + EXPECT_EQ("v1:1", model.GetId()); + EXPECT_EQ("alias", model.GetAlias()); +} + +TEST_F(ModelTest, GetAllModelVariants) { + auto model = MakeModel(); + Factory::AddVariantToModel(model, MakeVariant("v1", "alias", 1)); + Factory::AddVariantToModel(model, MakeVariant("v2", "alias", 2)); + Factory::SelectFirstVariant(model); + + auto variants = model.GetAllModelVariants(); + EXPECT_EQ(2u, variants.size()); +} + +TEST_F(ModelTest, SelectVariant) { + auto model = MakeModel(); + Factory::AddVariantToModel(model, MakeVariant("v1", "alias", 1)); + Factory::AddVariantToModel(model, MakeVariant("v2", "alias", 2)); + Factory::SelectFirstVariant(model); + + const auto& v2 = model.GetAllModelVariants()[1]; + model.SelectVariant(v2); + EXPECT_EQ("v2:2", model.GetId()); +} + +TEST_F(ModelTest, SelectVariant_NotFound_Throws) { + auto model = MakeModel(); + Factory::AddVariantToModel(model, MakeVariant("v1", "alias", 1)); + Factory::SelectFirstVariant(model); + + auto external = MakeVariant("external", "ext-alias", 1); + EXPECT_THROW(model.SelectVariant(external), Exception); +} + +TEST_F(ModelTest, SelectVariant_ByIdFromExternalInstance) { + auto model = MakeModel(); + Factory::AddVariantToModel(model, MakeVariant("v1", "alias", 1)); + Factory::AddVariantToModel(model, MakeVariant("v2", "alias", 2)); + Factory::SelectFirstVariant(model); + + // Simulate a variant obtained externally (e.g. from Catalog::GetModelVariant) + // with the same id as v2 but a different object instance. + auto externalV2 = MakeVariant("v2", "alias", 2); + model.SelectVariant(externalV2); + EXPECT_EQ("v2:2", model.GetId()); +} + +TEST_F(ModelTest, DelegationMethods) { + // Test that Model delegates to SelectedVariant + core_.OnCall("list_loaded_models", R"(["test-model:1"])"); + core_.OnCall("get_cached_models", R"(["test-model:1"])"); + core_.OnCall("load_model", ""); + core_.OnCall("unload_model", ""); + core_.OnCall("download_model", ""); + core_.OnCall("get_model_path", R"(C:\test)"); + + auto model = MakeModelWithVariant("test-model", "alias"); + + EXPECT_TRUE(model.IsLoaded()); + EXPECT_TRUE(model.IsCached()); + model.Load(); + model.Unload(); + model.Download(); + EXPECT_EQ(std::filesystem::path(R"(C:\test)"), model.GetPath()); +} diff --git a/sdk/cpp/test/parser_and_types_test.cpp b/sdk/cpp/test/parser_and_types_test.cpp new file mode 100644 index 00000000..681e912f --- /dev/null +++ b/sdk/cpp/test/parser_and_types_test.cpp @@ -0,0 +1,417 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "mock_core.h" +#include "mock_object_factory.h" +#include "parser.h" +#include "foundry_local_exception.h" +#include "core_interop_request.h" + +#include + +using namespace foundry_local; +using namespace foundry_local::Testing; + +class ParserTest : public ::testing::Test { +protected: + static nlohmann::json MinimalModelJson() { + return nlohmann::json{{"id", "model-1:1"}, {"name", "model-1"}, {"version", 1}, + {"alias", "my-model"}, {"providerType", "onnx"}, {"uri", "https://example.com/model"}, + {"modelType", "text"}, {"cached", false}, {"createdAt", 1700000000}}; + } +}; + +TEST_F(ParserTest, ParseDeviceType_CPU) { + EXPECT_EQ(DeviceType::CPU, ParsingUtils::parse_device_type("CPU")); +} + +TEST_F(ParserTest, ParseDeviceType_GPU) { + EXPECT_EQ(DeviceType::GPU, ParsingUtils::parse_device_type("GPU")); +} + +TEST_F(ParserTest, ParseDeviceType_NPU) { + EXPECT_EQ(DeviceType::NPU, ParsingUtils::parse_device_type("NPU")); +} + +TEST_F(ParserTest, ParseDeviceType_Unknown) { + EXPECT_EQ(DeviceType::Invalid, ParsingUtils::parse_device_type("FPGA")); +} + +TEST_F(ParserTest, ParseFinishReason_Stop) { + EXPECT_EQ(FinishReason::Stop, ParsingUtils::parse_finish_reason("stop")); +} + +TEST_F(ParserTest, ParseFinishReason_Length) { + EXPECT_EQ(FinishReason::Length, ParsingUtils::parse_finish_reason("length")); +} + +TEST_F(ParserTest, ParseFinishReason_ToolCalls) { + EXPECT_EQ(FinishReason::ToolCalls, ParsingUtils::parse_finish_reason("tool_calls")); +} + +TEST_F(ParserTest, ParseFinishReason_ContentFilter) { + EXPECT_EQ(FinishReason::ContentFilter, ParsingUtils::parse_finish_reason("content_filter")); +} + +TEST_F(ParserTest, ParseFinishReason_None) { + EXPECT_EQ(FinishReason::None, ParsingUtils::parse_finish_reason("unknown_value")); +} + +TEST_F(ParserTest, GetStringOrEmpty_Present) { + nlohmann::json j = {{"key", "value"}}; + EXPECT_EQ("value", ParsingUtils::get_string_or_empty(j, "key")); +} + +TEST_F(ParserTest, GetStringOrEmpty_Missing) { + nlohmann::json j = {{"other", "value"}}; + EXPECT_EQ("", ParsingUtils::get_string_or_empty(j, "key")); +} + +TEST_F(ParserTest, GetStringOrEmpty_NonString) { + nlohmann::json j = {{"key", 42}}; + EXPECT_EQ("", ParsingUtils::get_string_or_empty(j, "key")); +} + +TEST_F(ParserTest, GetOptString_Present) { + nlohmann::json j = {{"key", "hello"}}; + auto result = ParsingUtils::get_opt_string(j, "key"); + ASSERT_TRUE(result.has_value()); + EXPECT_EQ("hello", *result); +} + +TEST_F(ParserTest, GetOptString_Null) { + nlohmann::json j = {{"key", nullptr}}; + EXPECT_FALSE(ParsingUtils::get_opt_string(j, "key").has_value()); +} + +TEST_F(ParserTest, GetOptString_Missing) { + nlohmann::json j = {{"other", "v"}}; + EXPECT_FALSE(ParsingUtils::get_opt_string(j, "key").has_value()); +} + +TEST_F(ParserTest, GetOptInt_Present) { + nlohmann::json j = {{"key", 42}}; + auto result = ParsingUtils::get_opt_int(j, "key"); + ASSERT_TRUE(result.has_value()); + EXPECT_EQ(42, *result); +} + +TEST_F(ParserTest, GetOptInt_Missing) { + nlohmann::json j = {}; + EXPECT_FALSE(ParsingUtils::get_opt_int(j, "key").has_value()); +} + +TEST_F(ParserTest, GetOptBool_Present) { + nlohmann::json j = {{"key", true}}; + auto result = ParsingUtils::get_opt_bool(j, "key"); + ASSERT_TRUE(result.has_value()); + EXPECT_TRUE(*result); +} + +TEST_F(ParserTest, GetOptBool_Missing) { + nlohmann::json j = {}; + EXPECT_FALSE(ParsingUtils::get_opt_bool(j, "key").has_value()); +} + +TEST_F(ParserTest, ParseRuntime) { + nlohmann::json j = {{"deviceType", "GPU"}, {"executionProvider", "DML"}}; + Runtime r = j.get(); + EXPECT_EQ(DeviceType::GPU, r.device_type); + EXPECT_EQ("DML", r.execution_provider); +} + +TEST_F(ParserTest, ParsePromptTemplate) { + nlohmann::json j = {{"system", "sys"}, {"user", "usr"}, {"assistant", "asst"}, {"prompt", "p"}}; + PromptTemplate pt = j.get(); + EXPECT_EQ("sys", pt.system); + EXPECT_EQ("usr", pt.user); + EXPECT_EQ("asst", pt.assistant); + EXPECT_EQ("p", pt.prompt); +} + +TEST_F(ParserTest, ParsePromptTemplate_MissingFields) { + nlohmann::json j = {{"system", "sys"}}; + PromptTemplate pt = j.get(); + EXPECT_EQ("sys", pt.system); + EXPECT_EQ("", pt.user); + EXPECT_EQ("", pt.assistant); + EXPECT_EQ("", pt.prompt); +} + +TEST_F(ParserTest, ParseModelInfo_Minimal) { + auto j = MinimalModelJson(); + ModelInfo info = j.get(); + EXPECT_EQ("model-1:1", info.id); + EXPECT_EQ("model-1", info.name); + EXPECT_EQ(1u, info.version); + EXPECT_EQ("my-model", info.alias); + EXPECT_EQ("onnx", info.provider_type); + EXPECT_EQ("https://example.com/model", info.uri); + EXPECT_EQ("text", info.model_type); + EXPECT_FALSE(info.cached); + EXPECT_EQ(1700000000, info.created_at_unix); + EXPECT_FALSE(info.display_name.has_value()); + EXPECT_FALSE(info.publisher.has_value()); + EXPECT_FALSE(info.runtime.has_value()); + EXPECT_FALSE(info.prompt_template.has_value()); + EXPECT_FALSE(info.model_settings.has_value()); +} + +TEST_F(ParserTest, ParseModelInfo_WithOptionals) { + auto j = MinimalModelJson(); + j["displayName"] = "My Model"; + j["publisher"] = "TestPublisher"; + j["license"] = "MIT"; + j["fileSizeMb"] = 512; + j["supportsToolCalling"] = true; + j["maxOutputTokens"] = 4096; + j["runtime"] = {{"deviceType", "CPU"}, {"executionProvider", "ORT"}}; + + ModelInfo info = j.get(); + ASSERT_TRUE(info.display_name.has_value()); + EXPECT_EQ("My Model", *info.display_name); + ASSERT_TRUE(info.publisher.has_value()); + EXPECT_EQ("TestPublisher", *info.publisher); + ASSERT_TRUE(info.license.has_value()); + EXPECT_EQ("MIT", *info.license); + ASSERT_TRUE(info.file_size_mb.has_value()); + EXPECT_EQ(512u, *info.file_size_mb); + ASSERT_TRUE(info.supports_tool_calling.has_value()); + EXPECT_TRUE(*info.supports_tool_calling); + ASSERT_TRUE(info.max_output_tokens.has_value()); + EXPECT_EQ(4096, *info.max_output_tokens); + ASSERT_TRUE(info.runtime.has_value()); + EXPECT_EQ(DeviceType::CPU, info.runtime->device_type); + EXPECT_EQ("ORT", info.runtime->execution_provider); +} + +TEST_F(ParserTest, ParseModelSettings) { + nlohmann::json j = {{"parameters", {{{"name", "p1"}, {"value", "v1"}}, {{"name", "p2"}}}}}; + ModelSettings ms = j.get(); + ASSERT_EQ(2u, ms.parameters.size()); + EXPECT_EQ("p1", ms.parameters[0].name); + ASSERT_TRUE(ms.parameters[0].value.has_value()); + EXPECT_EQ("v1", *ms.parameters[0].value); + EXPECT_EQ("p2", ms.parameters[1].name); + EXPECT_FALSE(ms.parameters[1].value.has_value()); +} + +TEST_F(ParserTest, ParseChatMessage) { + nlohmann::json j = {{"role", "user"}, {"content", "hello"}}; + ChatMessage msg = j.get(); + EXPECT_EQ("user", msg.role); + EXPECT_EQ("hello", msg.content); + EXPECT_TRUE(msg.tool_calls.empty()); + EXPECT_FALSE(msg.tool_call_id.has_value()); +} + +TEST_F(ParserTest, ParseChatMessage_WithToolCalls) { + nlohmann::json j = {{"role", "assistant"}, + {"content", "I'll call a tool."}, + {"tool_calls", + {{{"id", "call_abc123"}, + {"type", "function"}, + {"function", {{"name", "get_weather"}, {"arguments", "{\"city\": \"Seattle\"}"}}}}}}}; + ChatMessage msg = j.get(); + EXPECT_EQ("assistant", msg.role); + ASSERT_EQ(1u, msg.tool_calls.size()); + EXPECT_EQ("call_abc123", msg.tool_calls[0].id); + EXPECT_EQ("function", msg.tool_calls[0].type); + ASSERT_TRUE(msg.tool_calls[0].function_call.has_value()); + EXPECT_EQ("get_weather", msg.tool_calls[0].function_call->name); + EXPECT_EQ("{\"city\": \"Seattle\"}", msg.tool_calls[0].function_call->arguments); +} + +TEST_F(ParserTest, ParseChatMessage_WithToolCallId) { + nlohmann::json j = {{"role", "tool"}, {"content", "72 degrees and sunny"}, {"tool_call_id", "call_abc123"}}; + ChatMessage msg = j.get(); + EXPECT_EQ("tool", msg.role); + EXPECT_EQ("72 degrees and sunny", msg.content); + ASSERT_TRUE(msg.tool_call_id.has_value()); + EXPECT_EQ("call_abc123", *msg.tool_call_id); +} + +TEST_F(ParserTest, ParseFunctionCall) { + nlohmann::json j = {{"name", "multiply"}, {"arguments", "{\"a\": 1, \"b\": 2}"}}; + FunctionCall fc = j.get(); + EXPECT_EQ("multiply", fc.name); + EXPECT_EQ("{\"a\": 1, \"b\": 2}", fc.arguments); +} + +TEST_F(ParserTest, ParseFunctionCall_ObjectArguments) { + nlohmann::json j = {{"name", "add"}, {"arguments", {{"x", 10}}}}; + FunctionCall fc = j.get(); + EXPECT_EQ("add", fc.name); + EXPECT_EQ("{\"x\":10}", fc.arguments); +} + +TEST_F(ParserTest, ParseToolCall) { + nlohmann::json j = {{"id", "call_1"}, + {"type", "function"}, + {"function", {{"name", "search"}, {"arguments", "{\"query\": \"test\"}"}}}}; + ToolCall tc = j.get(); + EXPECT_EQ("call_1", tc.id); + EXPECT_EQ("function", tc.type); + ASSERT_TRUE(tc.function_call.has_value()); + EXPECT_EQ("search", tc.function_call->name); +} + +TEST_F(ParserTest, SerializeToolDefinition) { + ToolDefinition tool; + tool.type = "function"; + tool.function.name = "get_weather"; + tool.function.description = "Get the current weather"; + tool.function.parameters = PropertyDefinition{"object", std::nullopt, + std::unordered_map{ + {"location", PropertyDefinition{"string", "The city name"}}}, + std::vector{"location"}}; + + nlohmann::json j; + to_json(j, tool); + + EXPECT_EQ("function", j["type"].get()); + EXPECT_EQ("get_weather", j["function"]["name"].get()); + EXPECT_EQ("Get the current weather", j["function"]["description"].get()); + EXPECT_EQ("object", j["function"]["parameters"]["type"].get()); + ASSERT_TRUE(j["function"]["parameters"]["properties"].contains("location")); + EXPECT_EQ("string", j["function"]["parameters"]["properties"]["location"]["type"].get()); + ASSERT_EQ(1u, j["function"]["parameters"]["required"].size()); + EXPECT_EQ("location", j["function"]["parameters"]["required"][0].get()); +} + +TEST_F(ParserTest, SerializeToolDefinition_MinimalFunction) { + ToolDefinition tool; + tool.function.name = "noop"; + + nlohmann::json j; + to_json(j, tool); + + EXPECT_EQ("function", j["type"].get()); + EXPECT_EQ("noop", j["function"]["name"].get()); + EXPECT_FALSE(j["function"].contains("description")); + EXPECT_FALSE(j["function"].contains("parameters")); +} + +TEST_F(ParserTest, ToolChoiceToString) { + EXPECT_EQ("auto", ParsingUtils::tool_choice_to_string(ToolChoiceKind::Auto)); + EXPECT_EQ("none", ParsingUtils::tool_choice_to_string(ToolChoiceKind::None)); + EXPECT_EQ("required", ParsingUtils::tool_choice_to_string(ToolChoiceKind::Required)); +} + +TEST_F(ParserTest, ParseChatChoice_NonStreaming) { + nlohmann::json j = { + {"index", 0}, {"finish_reason", "stop"}, {"message", {{"role", "assistant"}, {"content", "Hi there!"}}}}; + ChatChoice c = j.get(); + EXPECT_EQ(0, c.index); + EXPECT_EQ(FinishReason::Stop, c.finish_reason); + ASSERT_TRUE(c.message.has_value()); + EXPECT_EQ("assistant", c.message->role); + EXPECT_EQ("Hi there!", c.message->content); + EXPECT_FALSE(c.delta.has_value()); +} + +TEST_F(ParserTest, ParseChatChoice_Streaming) { + nlohmann::json j = { + {"index", 0}, {"finish_reason", nullptr}, {"delta", {{"role", "assistant"}, {"content", "Hi"}}}}; + ChatChoice c = j.get(); + EXPECT_EQ(FinishReason::None, c.finish_reason); + EXPECT_FALSE(c.message.has_value()); + ASSERT_TRUE(c.delta.has_value()); + EXPECT_EQ("Hi", c.delta->content); +} + +TEST_F(ParserTest, ParseChatCompletionCreateResponse) { + nlohmann::json j = { + {"created", 1700000000}, + {"id", "chatcmpl-123"}, + {"IsDelta", false}, + {"Successful", true}, + {"HttpStatusCode", 200}, + {"choices", + {{{"index", 0}, {"finish_reason", "stop"}, {"message", {{"role", "assistant"}, {"content", "Hello!"}}}}}}}; + ChatCompletionCreateResponse r = j.get(); + EXPECT_EQ(1700000000, r.created); + EXPECT_EQ("chatcmpl-123", r.id); + EXPECT_FALSE(r.is_delta); + EXPECT_TRUE(r.successful); + EXPECT_EQ(200, r.http_status_code); + ASSERT_EQ(1u, r.choices.size()); + EXPECT_EQ("Hello!", r.choices[0].message->content); +} + +TEST(ChatCompletionCreateResponseTest, GetObject_NonDelta) { + ChatCompletionCreateResponse r; + r.is_delta = false; + EXPECT_STREQ("chat.completion", r.GetObject()); +} + +TEST(ChatCompletionCreateResponseTest, GetObject_Delta) { + ChatCompletionCreateResponse r; + r.is_delta = true; + EXPECT_STREQ("chat.completion.chunk", r.GetObject()); +} + +TEST(ChatCompletionCreateResponseTest, GetCreatedAtIso_Zero) { + ChatCompletionCreateResponse r; + r.created = 0; + EXPECT_EQ("", r.GetCreatedAtIso()); +} + +TEST(ChatCompletionCreateResponseTest, GetCreatedAtIso_ValidTimestamp) { + ChatCompletionCreateResponse r; + r.created = 1700000000; // 2023-11-14T22:13:20Z + std::string iso = r.GetCreatedAtIso(); + EXPECT_FALSE(iso.empty()); + EXPECT_EQ('Z', iso.back()); + EXPECT_NE(std::string::npos, iso.find("2023")); +} + +// ============================================================================= +// CoreInteropRequest tests +// ============================================================================= + +TEST(CoreInteropRequestTest, Command) { + CoreInteropRequest req("test_command"); + EXPECT_EQ("test_command", req.Command()); +} + +TEST(CoreInteropRequestTest, ToJson_NoParams) { + CoreInteropRequest req("cmd"); + std::string json = req.ToJson(); + auto parsed = nlohmann::json::parse(json); + EXPECT_FALSE(parsed.contains("Params")); +} + +TEST(CoreInteropRequestTest, ToJson_WithParams) { + CoreInteropRequest req("cmd"); + req.AddParam("key1", "value1"); + req.AddParam("key2", "value2"); + std::string json = req.ToJson(); + auto parsed = nlohmann::json::parse(json); + ASSERT_TRUE(parsed.contains("Params")); + EXPECT_EQ("value1", parsed["Params"]["key1"].get()); + EXPECT_EQ("value2", parsed["Params"]["key2"].get()); +} + +TEST(CoreInteropRequestTest, AddParam_Chaining) { + CoreInteropRequest req("cmd"); + auto& ref = req.AddParam("a", "1").AddParam("b", "2"); + EXPECT_EQ(&req, &ref); +} + +// ============================================================================= +// Exception tests +// ============================================================================= + +TEST(ExceptionTest, MessageOnly) { + Exception ex("test error"); + EXPECT_STREQ("test error", ex.what()); +} + +TEST(ExceptionTest, MessageAndLogger) { + NullLogger logger; + Exception ex("logged error", logger); + EXPECT_STREQ("logged error", ex.what()); +} \ No newline at end of file diff --git a/sdk/cpp/test/testdata/empty_models_list.json b/sdk/cpp/test/testdata/empty_models_list.json new file mode 100644 index 00000000..fe51488c --- /dev/null +++ b/sdk/cpp/test/testdata/empty_models_list.json @@ -0,0 +1 @@ +[] diff --git a/sdk/cpp/test/testdata/malformed_models_list.json b/sdk/cpp/test/testdata/malformed_models_list.json new file mode 100644 index 00000000..a04360f5 --- /dev/null +++ b/sdk/cpp/test/testdata/malformed_models_list.json @@ -0,0 +1 @@ +{this is not valid json[} diff --git a/sdk/cpp/test/testdata/missing_name_field_models_list.json b/sdk/cpp/test/testdata/missing_name_field_models_list.json new file mode 100644 index 00000000..da1e9465 --- /dev/null +++ b/sdk/cpp/test/testdata/missing_name_field_models_list.json @@ -0,0 +1,12 @@ +[ + { + "id": "model-missing-name:1", + "version": 1, + "alias": "test", + "providerType": "onnx", + "uri": "https://example.com/model", + "modelType": "text", + "cached": false, + "createdAt": 0 + } +] diff --git a/sdk/cpp/test/testdata/mixed_openai_and_local.json b/sdk/cpp/test/testdata/mixed_openai_and_local.json new file mode 100644 index 00000000..9d8de80b --- /dev/null +++ b/sdk/cpp/test/testdata/mixed_openai_and_local.json @@ -0,0 +1,35 @@ +[ + { + "id": "openai-gpt4:1", + "name": "openai-gpt4", + "version": 1, + "alias": "openai-gpt4", + "providerType": "openai", + "uri": "https://example.com/openai-gpt4", + "modelType": "text", + "cached": false, + "createdAt": 0 + }, + { + "id": "openai-whisper:1", + "name": "openai-whisper", + "version": 1, + "alias": "openai-whisper", + "providerType": "openai", + "uri": "https://example.com/openai-whisper", + "modelType": "audio", + "cached": false, + "createdAt": 0 + }, + { + "id": "local-phi-4:1", + "name": "local-phi-4", + "version": 1, + "alias": "phi-4", + "providerType": "onnx", + "uri": "https://example.com/phi-4", + "modelType": "text", + "cached": false, + "createdAt": 1700000000 + } +] diff --git a/sdk/cpp/test/testdata/real_models_list.json b/sdk/cpp/test/testdata/real_models_list.json new file mode 100644 index 00000000..284d3a1a --- /dev/null +++ b/sdk/cpp/test/testdata/real_models_list.json @@ -0,0 +1,88 @@ +[ + { + "id": "Phi-4-generic-gpu:1", + "name": "Phi-4-generic-gpu", + "version": 1, + "alias": "phi-4", + "displayName": "Phi-4 (GPU)", + "providerType": "onnx", + "uri": "https://example.com/phi-4-gpu", + "modelType": "text", + "publisher": "Microsoft", + "license": "MIT", + "fileSizeMb": 8192, + "supportsToolCalling": true, + "maxOutputTokens": 4096, + "cached": false, + "createdAt": 1700000000, + "runtime": { + "deviceType": "GPU", + "executionProvider": "DML" + }, + "promptTemplate": { + "system": "<|system|>", + "user": "<|user|>", + "assistant": "<|assistant|>", + "prompt": "<|prompt|>" + } + }, + { + "id": "Phi-4-generic-cpu:1", + "name": "Phi-4-generic-cpu", + "version": 1, + "alias": "phi-4", + "displayName": "Phi-4 (CPU)", + "providerType": "onnx", + "uri": "https://example.com/phi-4-cpu", + "modelType": "text", + "publisher": "Microsoft", + "license": "MIT", + "fileSizeMb": 4096, + "supportsToolCalling": false, + "maxOutputTokens": 2048, + "cached": false, + "createdAt": 1700000000, + "runtime": { + "deviceType": "CPU", + "executionProvider": "ORT" + } + }, + { + "id": "Mistral-7b-v0.2-generic-gpu:1", + "name": "Mistral-7b-v0.2-generic-gpu", + "version": 1, + "alias": "mistral-7b-v0.2", + "displayName": "Mistral 7B v0.2 (GPU)", + "providerType": "onnx", + "uri": "https://example.com/mistral-gpu", + "modelType": "text", + "publisher": "Mistral AI", + "license": "Apache-2.0", + "fileSizeMb": 14000, + "cached": false, + "createdAt": 1700100000, + "runtime": { + "deviceType": "GPU", + "executionProvider": "DML" + } + }, + { + "id": "Mistral-7b-v0.2-generic-cpu:1", + "name": "Mistral-7b-v0.2-generic-cpu", + "version": 1, + "alias": "mistral-7b-v0.2", + "displayName": "Mistral 7B v0.2 (CPU)", + "providerType": "onnx", + "uri": "https://example.com/mistral-cpu", + "modelType": "text", + "publisher": "Mistral AI", + "license": "Apache-2.0", + "fileSizeMb": 7000, + "cached": false, + "createdAt": 1700100000, + "runtime": { + "deviceType": "CPU", + "executionProvider": "ORT" + } + } +] diff --git a/sdk/cpp/test/testdata/single_cached_model.json b/sdk/cpp/test/testdata/single_cached_model.json new file mode 100644 index 00000000..76efa8e7 --- /dev/null +++ b/sdk/cpp/test/testdata/single_cached_model.json @@ -0,0 +1 @@ +["multi-v1-cpu:1"] diff --git a/sdk/cpp/test/testdata/three_variants_one_model.json b/sdk/cpp/test/testdata/three_variants_one_model.json new file mode 100644 index 00000000..fad0555d --- /dev/null +++ b/sdk/cpp/test/testdata/three_variants_one_model.json @@ -0,0 +1,41 @@ +[ + { + "id": "multi-v1-gpu:1", + "name": "multi-v1-gpu", + "version": 1, + "alias": "multi-model", + "displayName": "Multi Model v1 GPU", + "providerType": "onnx", + "uri": "https://example.com/multi-v1-gpu", + "modelType": "text", + "cached": false, + "createdAt": 1700000000, + "runtime": { "deviceType": "GPU", "executionProvider": "DML" } + }, + { + "id": "multi-v1-cpu:1", + "name": "multi-v1-cpu", + "version": 1, + "alias": "multi-model", + "displayName": "Multi Model v1 CPU", + "providerType": "onnx", + "uri": "https://example.com/multi-v1-cpu", + "modelType": "text", + "cached": true, + "createdAt": 1700000000, + "runtime": { "deviceType": "CPU", "executionProvider": "ORT" } + }, + { + "id": "multi-v1-npu:1", + "name": "multi-v1-npu", + "version": 1, + "alias": "multi-model", + "displayName": "Multi Model v1 NPU", + "providerType": "onnx", + "uri": "https://example.com/multi-v1-npu", + "modelType": "text", + "cached": false, + "createdAt": 1700000000, + "runtime": { "deviceType": "NPU", "executionProvider": "QNN" } + } +] diff --git a/sdk/cpp/test/testdata/valid_cached_models.json b/sdk/cpp/test/testdata/valid_cached_models.json new file mode 100644 index 00000000..2b144174 --- /dev/null +++ b/sdk/cpp/test/testdata/valid_cached_models.json @@ -0,0 +1 @@ +["Phi-4-generic-gpu:1", "Phi-4-generic-cpu:1"] diff --git a/sdk/cpp/test/testdata/valid_loaded_models.json b/sdk/cpp/test/testdata/valid_loaded_models.json new file mode 100644 index 00000000..4d2ef328 --- /dev/null +++ b/sdk/cpp/test/testdata/valid_loaded_models.json @@ -0,0 +1 @@ +["Phi-4-generic-gpu:1"] diff --git a/sdk/cpp/triplets/x64-windows-static-md.cmake b/sdk/cpp/triplets/x64-windows-static-md.cmake new file mode 100644 index 00000000..63d6cde2 --- /dev/null +++ b/sdk/cpp/triplets/x64-windows-static-md.cmake @@ -0,0 +1,3 @@ +set(VCPKG_TARGET_ARCHITECTURE x64) +set(VCPKG_CRT_LINKAGE dynamic) +set(VCPKG_LIBRARY_LINKAGE static) diff --git a/sdk/cpp/vcpkg-configuration.json b/sdk/cpp/vcpkg-configuration.json new file mode 100644 index 00000000..a5253fb7 --- /dev/null +++ b/sdk/cpp/vcpkg-configuration.json @@ -0,0 +1,6 @@ +{ + "default-registry": { + "kind": "builtin", + "baseline": "a9f0cd0345fb29cd227d802f1fd1917c28f8e5a3" + } +} diff --git a/sdk/cpp/vcpkg.json b/sdk/cpp/vcpkg.json new file mode 100644 index 00000000..ec08c349 --- /dev/null +++ b/sdk/cpp/vcpkg.json @@ -0,0 +1,10 @@ +{ + "name": "cppsdk", + "version-string": "0.1.0", + "dependencies": [ + "nlohmann-json", + "wil", + "ms-gsl", + "gtest" + ] +} From d891f921ef703e91b7d79c89ddd197af045d6c4b Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Mon, 13 Apr 2026 21:49:33 -0700 Subject: [PATCH 40/83] mega pipeline improvements (#592) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Unified Packaging Pipeline ## Overview Enables the packaging pipeline to build Core and all 4 SDKs in a single run — SDKs reference the just-built Core directly from pipeline artifacts instead of requiring Core to be published to a feed first. Also reduces the pipeline from 21 stages to 10 saving ~20mins per run. ## Changes ### Centralized Version Computation A `compute_version` stage produces a `version-info` artifact with `sdkVersion.txt` (semver), `pyVersion.txt` (PEP 440), and `flcVersion.txt` (NuGet). All downstream stages read from this single source — no timestamp drift between standard and WinML builds. ### FLC/ORT/ORT-GenAI Dependency Version Infrastructure Each SDK reads its native dependency versions (FLC Core, ORT, GenAI) from a JSON file at build time. The pipeline generates this file during FLC packaging with the exact versions that were just built, so SDK builds resolve the correct (unpublished) FLC without hitting a feed. Standard and WinML each get their own file (`deps_versions.json` / `deps_versions_winml.json`) with identical key structure, allowing both paths to build fully in parallel. ### Combined Build/Test/Packaging Stages Merged 10 separate test/packaging stages into the build templates. Pipeline: 21 → 10 stages. ### Bug Fixes - Removes dead code like nightly version support in c#/js --------- Co-authored-by: Prathik Rao --- .github/workflows/build-rust-steps.yml | 7 + .pipelines/foundry-local-packaging.yml | 412 +++++------------- .pipelines/templates/build-cs-steps.yml | 59 ++- .pipelines/templates/build-js-steps.yml | 95 +++- .pipelines/templates/build-python-steps.yml | 53 ++- .pipelines/templates/build-rust-steps.yml | 71 ++- .pipelines/templates/package-core-steps.yml | 47 ++ .pipelines/templates/test-cs-steps.yml | 118 ----- .pipelines/templates/test-js-steps.yml | 123 ------ .pipelines/templates/test-python-steps.yml | 137 ------ .pipelines/templates/test-rust-steps.yml | 160 ------- .../templates/update-deps-versions-steps.yml | 41 ++ sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 11 +- sdk/deps_versions.json | 12 + sdk/deps_versions_winml.json | 12 + sdk/js/script/install-standard.cjs | 14 +- sdk/js/script/install-utils.cjs | 47 +- sdk/js/script/install-winml.cjs | 14 +- sdk/js/script/pack.cjs | 27 +- sdk/js/script/preinstall.cjs | 22 +- sdk/python/build_backend.py | 71 ++- sdk/python/requirements-base.txt | 3 + sdk/rust/Cargo.toml | 3 +- sdk/rust/build.rs | 165 +++++-- 24 files changed, 769 insertions(+), 955 deletions(-) delete mode 100644 .pipelines/templates/test-cs-steps.yml delete mode 100644 .pipelines/templates/test-js-steps.yml delete mode 100644 .pipelines/templates/test-python-steps.yml delete mode 100644 .pipelines/templates/test-rust-steps.yml create mode 100644 .pipelines/templates/update-deps-versions-steps.yml create mode 100644 sdk/deps_versions.json create mode 100644 sdk/deps_versions_winml.json create mode 100644 sdk/python/requirements-base.txt diff --git a/.github/workflows/build-rust-steps.yml b/.github/workflows/build-rust-steps.yml index f007b7ee..810b6c1e 100644 --- a/.github/workflows/build-rust-steps.yml +++ b/.github/workflows/build-rust-steps.yml @@ -58,6 +58,13 @@ jobs: Write-Host "Removed .cargo/config.toml crates-io redirect" } + # Copy deps_versions.json into the crate directory so cargo package + # can include it and build.rs can find it during verify. + - name: Copy deps_versions.json for crate packaging + shell: pwsh + working-directory: ${{ github.workspace }} + run: Copy-Item sdk/deps_versions.json sdk/rust/deps_versions.json + - name: Checkout test-data-shared from Azure DevOps if: ${{ inputs.run-integration-tests }} shell: pwsh diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index cb5766c0..d90a15e7 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -127,13 +127,13 @@ extends: Write-Host "Python version: $pyVersion" Write-Host "FLC version: $flcVersion" - # ── Build & Test FLC ── + # ── Build FLC ── - stage: build_core - displayName: 'Build & Test FLC' + displayName: 'Build Core' dependsOn: compute_version jobs: - job: flc_win_x64 - displayName: 'FLC win-x64' + displayName: 'Core win-x64' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -153,7 +153,7 @@ extends: platform: x64 - job: flc_win_arm64 - displayName: 'FLC win-arm64' + displayName: 'Core win-arm64' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -171,7 +171,7 @@ extends: platform: arm64 - job: flc_linux_x64 - displayName: 'FLC linux-x64' + displayName: 'Core linux-x64' pool: name: onnxruntime-Ubuntu2404-AMD-CPU os: linux @@ -189,7 +189,7 @@ extends: platform: x64 - job: flc_osx_arm64 - displayName: 'FLC osx-arm64' + displayName: 'Core osx-arm64' pool: name: Azure Pipelines vmImage: 'macOS-15' @@ -207,13 +207,13 @@ extends: flavor: osx-arm64 platform: arm64 - # ── Package FLC ── - - stage: package_core - displayName: 'Package FLC' - dependsOn: build_core - jobs: - job: package_flc - displayName: 'Package FLC' + displayName: 'Package Core' + dependsOn: + - flc_win_x64 + - flc_win_arm64 + - flc_linux_x64 + - flc_osx_arm64 pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -229,6 +229,9 @@ extends: - output: pipelineArtifact artifactName: 'flc-wheels' targetPath: '$(Build.ArtifactStagingDirectory)/flc-wheels' + - output: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Build.ArtifactStagingDirectory)/deps-versions' steps: - checkout: neutron-server clean: true @@ -282,7 +285,8 @@ extends: # ── Build C# SDK ── - stage: build_cs displayName: 'Build C# SDK' - dependsOn: package_core + dependsOn: + - build_core jobs: - job: cs_sdk displayName: 'Build' @@ -297,6 +301,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-nuget' targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' outputs: - output: pipelineArtifact artifactName: 'cs-sdk' @@ -313,11 +320,13 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: false flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' # ── Build JS SDK ── - stage: build_js displayName: 'Build JS SDK' - dependsOn: package_core + dependsOn: + - build_core jobs: - job: js_sdk displayName: 'Build' @@ -332,6 +341,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-nuget' targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' outputs: - output: pipelineArtifact artifactName: 'js-sdk' @@ -348,11 +360,13 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: false flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' # ── Build Python SDK ── - stage: build_python displayName: 'Build Python SDK' - dependsOn: package_core + dependsOn: + - build_core jobs: - job: python_sdk displayName: 'Build' @@ -367,6 +381,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-wheels' targetPath: '$(Pipeline.Workspace)/flc-wheels' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' outputs: - output: pipelineArtifact artifactName: 'python-sdk' @@ -383,11 +400,13 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: false flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' # ── Build Rust SDK ── - stage: build_rust displayName: 'Build Rust SDK' - dependsOn: package_core + dependsOn: + - build_core jobs: - job: rust_sdk displayName: 'Build' @@ -402,6 +421,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-nuget' targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' outputs: - output: pipelineArtifact artifactName: 'rust-sdk' @@ -418,133 +440,15 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: false flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - # ── Test C# SDK (win-x64) ── - - stage: test_cs - displayName: 'Test C# SDK' - dependsOn: build_cs - jobs: - - job: test_cs_win_x64 - displayName: 'Test C# (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-nuget' - targetPath: '$(Pipeline.Workspace)/flc-nuget' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-cs-steps.yml@self - parameters: - version: ${{ parameters.version }} - isWinML: false - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' - - # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. - # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Test JS SDK (win-x64) ── - - stage: test_js - displayName: 'Test JS SDK' - dependsOn: build_js - jobs: - - job: test_js_win_x64 - displayName: 'Test JS (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-nuget' - targetPath: '$(Pipeline.Workspace)/flc-nuget' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-js-steps.yml@self - parameters: - version: ${{ parameters.version }} - isWinML: false - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' - - # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. - # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Test Python SDK (win-x64) ── - - stage: test_python - displayName: 'Test Python SDK' - dependsOn: build_python - jobs: - - job: test_python_win_x64 - displayName: 'Test Python (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-wheels' - targetPath: '$(Pipeline.Workspace)/flc-wheels' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-python-steps.yml@self - parameters: - version: ${{ parameters.version }} - isWinML: false - flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' - - # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. - # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Test Rust SDK (win-x64) ── - - stage: test_rust - displayName: 'Test Rust SDK' - dependsOn: build_rust - jobs: - - job: test_rust_win_x64 - displayName: 'Test Rust (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-nuget' - targetPath: '$(Pipeline.Workspace)/flc-nuget' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-rust-steps.yml@self - parameters: - isWinML: false - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' - - # TODO: Add macOS (osx-arm64) test job when a macOS ARM64 pool is available. - # TODO: Add Linux (linux-x64) test job when Linux onnxruntime dependency is stabilized. - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Build & Test FLC (WinML) ── + # ── Build FLC (WinML) ── - stage: build_core_winml - displayName: 'Build & Test FLC WinML' + displayName: 'Build Core (WinML)' dependsOn: compute_version jobs: - job: flc_winml_win_x64 - displayName: 'FLC win-x64 (WinML)' + displayName: 'Core win-x64 (WinML)' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -565,7 +469,7 @@ extends: isWinML: true - job: flc_winml_win_arm64 - displayName: 'FLC win-arm64 (WinML)' + displayName: 'Core win-arm64 (WinML)' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -583,13 +487,11 @@ extends: platform: arm64 isWinML: true - # ── Package FLC (WinML) ── - - stage: package_core_winml - displayName: 'Package FLC WinML' - dependsOn: build_core_winml - jobs: - job: package_flc_winml - displayName: 'Package FLC (WinML)' + displayName: 'Package Core (WinML)' + dependsOn: + - flc_winml_win_x64 + - flc_winml_win_arm64 pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -605,6 +507,9 @@ extends: - output: pipelineArtifact artifactName: 'flc-wheels-winml' targetPath: '$(Build.ArtifactStagingDirectory)/flc-wheels' + - output: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Build.ArtifactStagingDirectory)/deps-versions' steps: - checkout: neutron-server clean: true @@ -643,8 +548,9 @@ extends: # ── Build C# SDK (WinML) ── - stage: build_cs_winml - displayName: 'Build C# SDK WinML' - dependsOn: package_core_winml + displayName: 'Build C# SDK (WinML)' + dependsOn: + - build_core_winml jobs: - job: cs_sdk_winml displayName: 'Build' @@ -659,6 +565,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-nuget-winml' targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + - input: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Pipeline.Workspace)/deps-versions-winml' outputs: - output: pipelineArtifact artifactName: 'cs-sdk-winml' @@ -675,12 +584,14 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: true flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' outputDir: '$(Build.ArtifactStagingDirectory)/cs-sdk-winml' # ── Build JS SDK (WinML) ── - stage: build_js_winml - displayName: 'Build JS SDK WinML' - dependsOn: package_core_winml + displayName: 'Build JS SDK (WinML)' + dependsOn: + - build_core_winml jobs: - job: js_sdk_winml displayName: 'Build' @@ -695,6 +606,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-nuget-winml' targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + - input: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Pipeline.Workspace)/deps-versions-winml' outputs: - output: pipelineArtifact artifactName: 'js-sdk-winml' @@ -711,11 +625,13 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: true flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' # ── Build Python SDK (WinML) ── - stage: build_python_winml - displayName: 'Build Python SDK WinML' - dependsOn: package_core_winml + displayName: 'Build Python SDK (WinML)' + dependsOn: + - build_core_winml jobs: - job: python_sdk_winml displayName: 'Build' @@ -730,6 +646,9 @@ extends: - input: pipelineArtifact artifactName: 'flc-wheels-winml' targetPath: '$(Pipeline.Workspace)/flc-wheels-winml' + - input: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Pipeline.Workspace)/deps-versions-winml' outputs: - output: pipelineArtifact artifactName: 'python-sdk-winml' @@ -746,152 +665,49 @@ extends: prereleaseId: ${{ parameters.prereleaseId }} isWinML: true flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels-winml' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' outputDir: '$(Build.ArtifactStagingDirectory)/python-sdk-winml' - # ── Build Rust SDK (WinML) ── - - stage: build_rust_winml - displayName: 'Build Rust SDK WinML' - dependsOn: package_core_winml - jobs: - - job: rust_sdk_winml - displayName: 'Build' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'version-info' - targetPath: '$(Pipeline.Workspace)/version-info' - - input: pipelineArtifact - artifactName: 'flc-nuget-winml' - targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' - outputs: - - output: pipelineArtifact - artifactName: 'rust-sdk-winml' - targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/build-rust-steps.yml@self - parameters: - version: ${{ parameters.version }} - isRelease: ${{ parameters.isRelease }} - prereleaseId: ${{ parameters.prereleaseId }} - isWinML: true - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' - outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' - - # ── Test C# SDK WinML (win-x64) ── - - stage: test_cs_winml - displayName: 'Test C# SDK WinML' - dependsOn: build_cs_winml - jobs: - - job: test_cs_winml_win_x64 - displayName: 'Test C# WinML (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-nuget-winml' - targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-cs-steps.yml@self - parameters: - version: ${{ parameters.version }} - isWinML: true - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' - - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Test JS SDK WinML (win-x64) ── - - stage: test_js_winml - displayName: 'Test JS SDK WinML' - dependsOn: build_js_winml - jobs: - - job: test_js_winml_win_x64 - displayName: 'Test JS WinML (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-nuget-winml' - targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-js-steps.yml@self - parameters: - version: ${{ parameters.version }} - isWinML: true - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' - - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Test Python SDK WinML (win-x64) ── - - stage: test_python_winml - displayName: 'Test Python SDK WinML' - dependsOn: build_python_winml - jobs: - - job: test_python_winml_win_x64 - displayName: 'Test Python WinML (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-wheels-winml' - targetPath: '$(Pipeline.Workspace)/flc-wheels-winml' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-python-steps.yml@self - parameters: - version: ${{ parameters.version }} - isWinML: true - flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels-winml' - - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - - # ── Test Rust SDK WinML (win-x64) ── - - stage: test_rust_winml - displayName: 'Test Rust SDK WinML' - dependsOn: build_rust_winml - jobs: - - job: test_rust_winml_win_x64 - displayName: 'Test Rust WinML (win-x64)' - pool: - name: onnxruntime-Win-CPU-2022 - os: windows - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-nuget-winml' - targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-rust-steps.yml@self - parameters: - isWinML: true - flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' - - # TODO: Add Windows ARM64 (win-arm64) test job when a Windows ARM64 pool is available. - + # Rust SDK has one package with different install options for standard vs WinML, + # so we only publish once under the standard stage and skip the WinML stage. Leaving + # it as a commented block incase we decide to publish separate Rust WinML package in the future. + # # ── Build Rust SDK (WinML) ── + # - stage: build_rust_winml + # displayName: 'Build Rust SDK (WinML)' + # dependsOn: + # - build_core_winml + # jobs: + # - job: rust_sdk_winml + # displayName: 'Build' + # pool: + # name: onnxruntime-Win-CPU-2022 + # os: windows + # templateContext: + # inputs: + # - input: pipelineArtifact + # artifactName: 'version-info' + # targetPath: '$(Pipeline.Workspace)/version-info' + # - input: pipelineArtifact + # artifactName: 'flc-nuget-winml' + # targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + # - input: pipelineArtifact + # artifactName: 'deps-versions-winml' + # targetPath: '$(Pipeline.Workspace)/deps-versions-winml' + # outputs: + # - output: pipelineArtifact + # artifactName: 'rust-sdk-winml' + # targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' + # steps: + # - checkout: self + # clean: true + # - checkout: test-data-shared + # lfs: true + # - template: .pipelines/templates/build-rust-steps.yml@self + # parameters: + # version: ${{ parameters.version }} + # isRelease: ${{ parameters.isRelease }} + # prereleaseId: ${{ parameters.prereleaseId }} + # isWinML: true + # flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' + # outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' \ No newline at end of file diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml index 38f5b8bf..5d8f67c1 100644 --- a/.pipelines/templates/build-cs-steps.yml +++ b/.pipelines/templates/build-cs-steps.yml @@ -20,6 +20,10 @@ parameters: - name: prereleaseId type: string default: '' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' steps: # Set paths for multi-repo checkout - task: PowerShell@2 @@ -48,6 +52,13 @@ steps: Write-Host "Package version: $v" Write-Host "##vso[task.setvariable variable=packageVersion]$v" +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + # List downloaded artifact for debugging - task: PowerShell@2 displayName: 'List downloaded FLC artifact' @@ -74,14 +85,9 @@ steps: "@ - # Determine the FLC version from the .nupkg filename + # Point the local NuGet feed at the directory that actually contains the .nupkg $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } - $flcVer = $nupkg.BaseName -replace '^Microsoft\.AI\.Foundry\.Local\.Core(\.WinML)?\.', '' - Write-Host "##vso[task.setvariable variable=resolvedFlcVersion]$flcVer" - Write-Host "Resolved FLC version: $flcVer" - - # Point the local NuGet feed at the directory that actually contains the .nupkg $flcFeedDir = $nupkg.DirectoryName $nugetConfig = $nugetConfig -replace [regex]::Escape("${{ parameters.flcNugetDir }}"), $flcFeedDir $configPath = "$(Build.ArtifactStagingDirectory)/NuGet.config" @@ -182,3 +188,44 @@ steps: signConfigType: inlineSignParams inlineOperation: | [{"keyCode":"CP-401405","operationSetCode":"NuGetSign","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"},{"keyCode":"CP-401405","operationSetCode":"NuGetVerify","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"}] + +# ── Tests ── +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + & $installerPath --quiet --force + if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } + errorActionPreference: 'stop' + +- task: PowerShell@2 + displayName: 'Restore & build tests' + inputs: + targetType: inline + script: | + dotnet restore "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --configfile "$(customNugetConfig)" ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + + dotnet build "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --no-restore --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run SDK tests' + inputs: + targetType: inline + script: | + dotnet test "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --no-build --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/build-js-steps.yml b/.pipelines/templates/build-js-steps.yml index 3aa2908d..ca42fea1 100644 --- a/.pipelines/templates/build-js-steps.yml +++ b/.pipelines/templates/build-js-steps.yml @@ -17,6 +17,10 @@ parameters: - name: prereleaseId type: string default: '' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' steps: # Set paths for multi-repo checkout - task: PowerShell@2 @@ -45,7 +49,14 @@ steps: inputs: versionSpec: '20.x' -# Read version from the version-info artifact produced by compute_version stage. +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + +# Compute version - task: PowerShell@2 displayName: 'Set package version' inputs: @@ -55,13 +66,66 @@ steps: Write-Host "Package version: $v" Write-Host "##vso[task.setvariable variable=packageVersion]$v" -# Install dependencies including native binaries (FLC, ORT, GenAI) from NuGet feeds +# Install JS dependencies. When a pipeline-built FLC artifact is provided, +# use --ignore-scripts to skip the native binary download (which would 404 +# on the unpublished FLC package), then extract FLC from the local artifact +# and run the install script manually to fetch ORT/GenAI from public feeds. - task: Npm@1 - displayName: 'npm install' + displayName: 'npm install (skip native downloads)' inputs: command: custom workingDir: $(repoRoot)/sdk/js - customCommand: 'install' + customCommand: 'install --ignore-scripts' + +- task: PowerShell@2 + displayName: 'Extract FLC from pipeline-built artifact' + inputs: + targetType: inline + script: | + $os = 'win32' + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + $platformKey = "$os-$arch" + $rid = if ($arch -eq 'arm64') { 'win-arm64' } else { 'win-x64' } + + if ($IsLinux) { + $os = 'linux' + $platformKey = "$os-$arch" + $rid = "linux-$arch" + } elseif ($IsMacOS) { + $os = 'darwin' + $platformKey = "$os-$arch" + $rid = "osx-$arch" + } + + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + # Place FLC binary so the install script skips downloading it + $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + $nativeDir = "$extractDir/runtimes/$rid/native" + if (Test-Path $nativeDir) { + Get-ChildItem $nativeDir -File | ForEach-Object { + Copy-Item $_.FullName -Destination "$destDir/$($_.Name)" -Force + Write-Host "Placed $($_.Name) from pipeline artifact" + } + } else { + Write-Warning "No native binaries found at $nativeDir for RID $rid" + } + +- task: PowerShell@2 + displayName: 'Run native binary install (ORT + GenAI)' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/js" + node script/preinstall.cjs + node script/install-standard.cjs # Overwrite the FLC native binary with the one we just built - task: PowerShell@2 @@ -149,3 +213,26 @@ steps: $destDir = "$(Build.ArtifactStagingDirectory)/js-sdk" New-Item -ItemType Directory -Path $destDir -Force | Out-Null Copy-Item "$(repoRoot)/sdk/js/*.tgz" "$destDir/" + +# ── Tests ── +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + & $installerPath --quiet --force + if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } + errorActionPreference: 'stop' + +- task: Npm@1 + displayName: 'npm test' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'test' + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index a8658772..5ada9cb6 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -20,6 +20,10 @@ parameters: - name: prereleaseId type: string default: '' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' steps: # Set paths for multi-repo checkout - task: PowerShell@2 @@ -37,6 +41,13 @@ steps: inputs: versionSpec: '3.12' +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + # List downloaded FLC wheels for debugging - task: PowerShell@2 displayName: 'List downloaded FLC wheels' @@ -103,12 +114,19 @@ steps: Write-Warning "No FLC wheel found matching $filter in ${{ parameters.flcWheelsDir }}" } -- ${{ if eq(parameters.isWinML, true) }}: - - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.1 - displayName: 'Install ORT native packages (WinML)' -- ${{ else }}: - - script: pip install onnxruntime-core==1.24.4 onnxruntime-genai-core==0.13.1 - displayName: 'Install ORT native packages' +- task: PowerShell@2 + displayName: 'Install ORT native packages' + inputs: + targetType: inline + script: | + $isWinML = "${{ parameters.isWinML }}" -eq "True" + $fileName = if ($isWinML) { "deps_versions_winml.json" } else { "deps_versions.json" } + $deps = Get-Content "$(repoRoot)/sdk/$fileName" -Raw | ConvertFrom-Json + $ortVer = $deps.onnxruntime.version + $genaiVer = $deps.'onnxruntime-genai'.version + Write-Host "Installing onnxruntime-core==$ortVer onnxruntime-genai-core==$genaiVer" + pip install "onnxruntime-core==$ortVer" "onnxruntime-genai-core==$genaiVer" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" displayName: 'Install pure python dependencies' @@ -147,3 +165,26 @@ steps: Copy-Item "$(repoRoot)/sdk/python/dist/*" "$destDir/" Write-Host "Staged wheels:" Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } + +# ── Tests ── +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + & $installerPath --quiet --force + if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } + errorActionPreference: 'stop' + +- script: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 + displayName: 'Install test dependencies' + +- script: python -m pytest test/ -v + displayName: 'Run tests' + workingDirectory: $(repoRoot)/sdk/python + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/build-rust-steps.yml b/.pipelines/templates/build-rust-steps.yml index ed3161e5..c0489f4f 100644 --- a/.pipelines/templates/build-rust-steps.yml +++ b/.pipelines/templates/build-rust-steps.yml @@ -20,6 +20,10 @@ parameters: type: string default: '$(Build.ArtifactStagingDirectory)/rust-sdk' displayName: 'Path to directory for the packaged crate' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' steps: # Set paths for multi-repo checkout - task: PowerShell@2 @@ -57,6 +61,24 @@ steps: Write-Host "Contents of ${{ parameters.flcNugetDir }}:" Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + +# Copy both deps_versions JSON files into the crate directory so cargo +# package includes them and build.rs can find the right one at build time +# since there is only 1 package for both rust artifacts. +- task: PowerShell@2 + displayName: 'Copy deps_versions for crate packaging' + inputs: + targetType: inline + script: | + Copy-Item "$(repoRoot)/sdk/deps_versions.json" "$(repoRoot)/sdk/rust/deps_versions.json" -Force + Copy-Item "$(repoRoot)/sdk/deps_versions_winml.json" "$(repoRoot)/sdk/rust/deps_versions_winml.json" -Force + # Extract FLC native binaries from the pipeline-built .nupkg so that # build.rs finds them already present and skips downloading from the feed. - task: PowerShell@2 @@ -90,7 +112,9 @@ steps: $flcNativeDir = "$(Build.ArtifactStagingDirectory)/flc-native-rust" New-Item -ItemType Directory -Path $flcNativeDir -Force | Out-Null Get-ChildItem $nativeDir -File | Copy-Item -Destination $flcNativeDir -Force - Write-Host "##vso[task.setvariable variable=flcNativeDir]$flcNativeDir" + # Set FOUNDRY_NATIVE_OVERRIDE_DIR so build.rs copies these into OUT_DIR + # instead of trying to download the unpublished FLC Core from the feed. + Write-Host "##vso[task.setvariable variable=FOUNDRY_NATIVE_OVERRIDE_DIR]$flcNativeDir" Write-Host "Extracted FLC native binaries to $flcNativeDir`:" Get-ChildItem $flcNativeDir | ForEach-Object { Write-Host " $($_.Name)" } @@ -154,28 +178,6 @@ steps: Invoke-Expression "cargo build $features" if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } -# Overwrite the FLC core binary in cargo's OUT_DIR with the pipeline-built -# version so that integration tests use the freshly-built FLC. build.rs -# sets FOUNDRY_NATIVE_DIR to OUT_DIR, which the SDK checks at runtime. -- task: PowerShell@2 - displayName: 'Overwrite FLC binary with pipeline-built version' - inputs: - targetType: inline - script: | - # Find cargo's OUT_DIR for the foundry-local-sdk build script - $outDir = Get-ChildItem "$(repoRoot)/sdk/rust/target/debug/build" -Directory -Filter "foundry-local-sdk-*" -Recurse | - Where-Object { Test-Path "$($_.FullName)/out" } | - ForEach-Object { "$($_.FullName)/out" } | - Select-Object -First 1 - if (-not $outDir) { throw "Could not find cargo OUT_DIR for foundry-local-sdk" } - Write-Host "Cargo OUT_DIR: $outDir" - - # Copy pipeline-built FLC native binaries over the downloaded ones - Get-ChildItem "$(flcNativeDir)" -File -Filter "Microsoft.AI.Foundry.Local.Core.*" | ForEach-Object { - Copy-Item $_.FullName -Destination "$outDir/$($_.Name)" -Force - Write-Host "Overwrote $($_.Name) with pipeline-built version" - } - # --allow-dirty allows packaging with uncommitted changes (build.rs modifies generated files) - task: PowerShell@2 displayName: 'Package crate' @@ -198,3 +200,26 @@ steps: Copy-Item "$(repoRoot)/sdk/rust/target/package/*.crate" "$destDir/" Write-Host "Staged crates:" Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } + +# ── Tests ── +- task: PowerShell@2 + displayName: 'Run unit tests' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo test --lib $features" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run integration tests' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo test --tests $features -- --include-ignored --test-threads=1 --nocapture" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/package-core-steps.yml b/.pipelines/templates/package-core-steps.yml index 01697085..fdd54c28 100644 --- a/.pipelines/templates/package-core-steps.yml +++ b/.pipelines/templates/package-core-steps.yml @@ -246,3 +246,50 @@ steps: Write-Host "`nAll wheels:" Get-ChildItem $stagingDir -Filter "*.whl" | ForEach-Object { Write-Host " $($_.Name)" } + +# Write partial deps_versions.json for this variant. The merge_deps_versions +# stage combines standard + WinML partials into a single complete artifact. +- task: PowerShell@2 + displayName: 'Write deps_versions.json artifact' + inputs: + targetType: inline + script: | + $nsRoot = "$(nsRoot)" + [xml]$propsXml = Get-Content "$nsRoot/Directory.Packages.props" + $pg = $propsXml.Project.PropertyGroup + + $isWinML = "${{ parameters.isWinML }}" -eq "True" + + # Compute PEP 440 version from the NuGet flcVersion + $parts = "$(flcVersion)" -split '-' + $pyVer = if ($parts.Count -ge 3 -and $parts[1] -eq 'dev') { "$($parts[0]).dev$($parts[2])" } + elseif ($parts.Count -eq 2) { "$($parts[0])$($parts[1])" } + else { $parts[0] } + + # Both standard and WinML write a deps_versions.json with identical key + # structure. The pipeline produces separate artifacts (deps-versions-standard + # / deps-versions-winml) so SDK stages pick the right one via isWinML. + if ($isWinML) { + $deps = @{ + 'foundry-local-core' = @{ nuget = "$(flcVersion)"; python = $pyVer } + onnxruntime = @{ version = [string]$pg.OnnxRuntimeFoundryVersionForWinML } + 'onnxruntime-genai' = @{ version = [string]$pg.OnnxRuntimeGenAIFoundryVersion } + } + } else { + $deps = @{ + 'foundry-local-core' = @{ nuget = "$(flcVersion)"; python = $pyVer } + onnxruntime = @{ version = [string]$pg.OnnxRuntimeFoundryVersion } + 'onnxruntime-genai' = @{ version = [string]$pg.OnnxRuntimeGenAIFoundryVersion } + } + } + + # WinML artifact is named deps_versions_winml.json to match repo convention. + $fileName = if ($isWinML) { "deps_versions_winml.json" } else { "deps_versions.json" } + $json = $deps | ConvertTo-Json -Depth 3 + Write-Host "${fileName}:" + Write-Host $json + + $outDir = "$(Build.ArtifactStagingDirectory)/deps-versions" + New-Item -ItemType Directory -Path $outDir -Force | Out-Null + [System.IO.File]::WriteAllText("$outDir/$fileName", $json, [System.Text.UTF8Encoding]::new($false)) + Write-Host "Wrote $fileName to $outDir" diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml deleted file mode 100644 index 32ce661c..00000000 --- a/.pipelines/templates/test-cs-steps.yml +++ /dev/null @@ -1,118 +0,0 @@ -# Lightweight test-only steps for the C# SDK. -# Builds from source and runs tests — no signing or NuGet packing. -parameters: -- name: version - type: string -- name: isWinML - type: boolean - default: false -- name: flcNugetDir - type: string - displayName: 'Path to directory containing the FLC .nupkg' - -steps: -- task: PowerShell@2 - displayName: 'Set source paths' - inputs: - targetType: inline - script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" - $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" - Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" - Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" - Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - -- task: UseDotNet@2 - displayName: 'Use .NET 9 SDK' - inputs: - packageType: sdk - version: '9.0.x' - -- task: PowerShell@2 - displayName: 'List downloaded FLC artifact' - inputs: - targetType: inline - script: | - Write-Host "Contents of ${{ parameters.flcNugetDir }}:" - Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } - -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - - Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - - Write-Host "Installing Windows App SDK Runtime..." - & $installerPath --quiet --force - - if ($LASTEXITCODE -ne 0) { - Write-Error "Installation failed with exit code $LASTEXITCODE" - exit 1 - } - - Write-Host "Windows App SDK Runtime installed successfully." - errorActionPreference: 'stop' - -- task: PowerShell@2 - displayName: 'Create NuGet.config with local FLC feed' - inputs: - targetType: inline - script: | - $nugetConfig = @" - - - - - - - - - - "@ - $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 - if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } - $flcVer = $nupkg.BaseName -replace '^Microsoft\.AI\.Foundry\.Local\.Core(\.WinML)?\.', '' - Write-Host "##vso[task.setvariable variable=resolvedFlcVersion]$flcVer" - - $flcFeedDir = $nupkg.DirectoryName - $nugetConfig = $nugetConfig -replace [regex]::Escape("${{ parameters.flcNugetDir }}"), $flcFeedDir - $configPath = "$(Build.ArtifactStagingDirectory)/NuGet.config" - Set-Content -Path $configPath -Value $nugetConfig - Write-Host "##vso[task.setvariable variable=customNugetConfig]$configPath" - -- task: NuGetAuthenticate@1 - displayName: 'Authenticate NuGet feeds' - -- task: PowerShell@2 - displayName: 'Restore & build tests' - inputs: - targetType: inline - script: | - dotnet restore "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` - --configfile "$(customNugetConfig)" ` - /p:UseWinML=${{ parameters.isWinML }} ` - /p:FoundryLocalCoreVersion=$(resolvedFlcVersion) - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - - dotnet build "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` - --no-restore --configuration Release ` - /p:UseWinML=${{ parameters.isWinML }} - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - -- task: PowerShell@2 - displayName: 'Run SDK tests' - inputs: - targetType: inline - script: | - dotnet test "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` - --no-build --configuration Release ` - /p:UseWinML=${{ parameters.isWinML }} - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml deleted file mode 100644 index 70e2a16b..00000000 --- a/.pipelines/templates/test-js-steps.yml +++ /dev/null @@ -1,123 +0,0 @@ -# Lightweight test-only steps for the JS SDK. -# Builds from source and runs tests — no npm pack or artifact staging. -parameters: -- name: version - type: string -- name: isWinML - type: boolean - default: false -- name: flcNugetDir - type: string - displayName: 'Path to directory containing the FLC .nupkg' - -steps: -- task: PowerShell@2 - displayName: 'Set source paths' - inputs: - targetType: inline - script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" - $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" - Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" - Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" - Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - - Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - - Write-Host "Installing Windows App SDK Runtime..." - & $installerPath --quiet --force - - if ($LASTEXITCODE -ne 0) { - Write-Error "Installation failed with exit code $LASTEXITCODE" - exit 1 - } - - Write-Host "Windows App SDK Runtime installed successfully." - errorActionPreference: 'stop' - -- task: PowerShell@2 - displayName: 'List downloaded FLC artifact' - inputs: - targetType: inline - script: | - Write-Host "Contents of ${{ parameters.flcNugetDir }}:" - Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } - -- task: NodeTool@0 - displayName: 'Use Node.js 20' - inputs: - versionSpec: '20.x' - -- task: Npm@1 - displayName: 'npm install' - inputs: - command: custom - workingDir: $(repoRoot)/sdk/js - customCommand: 'install' - -# Overwrite the FLC native binary with the pipeline-built one -- task: PowerShell@2 - displayName: 'Overwrite FLC with pipeline-built binary' - inputs: - targetType: inline - script: | - $os = 'win32' - $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } - $platformKey = "$os-$arch" - $rid = if ($arch -eq 'arm64') { 'win-arm64' } else { 'win-x64' } - - if ($IsLinux) { - $os = 'linux' - $platformKey = "$os-$arch" - $rid = "linux-$arch" - } elseif ($IsMacOS) { - $os = 'darwin' - $platformKey = "$os-$arch" - $rid = "osx-$arch" - } - - $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 - if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } - - $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract" - $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") - Copy-Item $nupkg.FullName $zip -Force - Expand-Archive -Path $zip -DestinationPath $extractDir -Force - - $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" - New-Item -ItemType Directory -Path $destDir -Force | Out-Null - $nativeDir = "$extractDir/runtimes/$rid/native" - if (Test-Path $nativeDir) { - Get-ChildItem $nativeDir -File | ForEach-Object { - Copy-Item $_.FullName -Destination "$destDir/$($_.Name)" -Force - Write-Host "Overwrote $($_.Name) with pipeline-built version" - } - } else { - Write-Warning "No native binaries found at $nativeDir for RID $rid" - } - -- task: Npm@1 - displayName: 'npm build' - inputs: - command: custom - workingDir: $(repoRoot)/sdk/js - customCommand: 'run build' - -- task: Npm@1 - displayName: 'npm test' - inputs: - command: custom - workingDir: $(repoRoot)/sdk/js - customCommand: 'test' - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml deleted file mode 100644 index c177efde..00000000 --- a/.pipelines/templates/test-python-steps.yml +++ /dev/null @@ -1,137 +0,0 @@ -# Lightweight test-only steps for the Python SDK. -# Builds from source and runs tests — no artifact staging. -parameters: -- name: version - type: string -- name: isWinML - type: boolean - default: false -- name: flcWheelsDir - type: string - displayName: 'Path to directory containing the FLC wheels' - -steps: -- task: PowerShell@2 - displayName: 'Set source paths' - inputs: - targetType: inline - script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" - $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" - Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" - Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" - Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - - Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - - Write-Host "Installing Windows App SDK Runtime..." - & $installerPath --quiet --force - - if ($LASTEXITCODE -ne 0) { - Write-Error "Installation failed with exit code $LASTEXITCODE" - exit 1 - } - - Write-Host "Windows App SDK Runtime installed successfully." - errorActionPreference: 'stop' - -- task: UsePythonVersion@0 - displayName: 'Use Python 3.12' - inputs: - versionSpec: '3.12' - -- task: PowerShell@2 - displayName: 'List downloaded FLC wheels' - condition: and(succeeded(), ne('${{ parameters.flcWheelsDir }}', '')) - inputs: - targetType: inline - script: | - Write-Host "Contents of ${{ parameters.flcWheelsDir }}:" - Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } - -- task: PowerShell@2 - displayName: 'Configure pip for Azure Artifacts' - inputs: - targetType: inline - script: | - pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ - pip config set global.extra-index-url https://pypi.org/simple/ - pip config set global.pre true - -- script: python -m pip install build - displayName: 'Install build tool' - -- task: PowerShell@2 - displayName: 'Set SDK version' - inputs: - targetType: inline - script: | - Set-Content -Path "$(repoRoot)/sdk/python/src/version.py" -Value '__version__ = "${{ parameters.version }}"' - -- task: PowerShell@2 - displayName: 'Pre-install pipeline-built FLC wheel' - condition: and(succeeded(), ne('${{ parameters.flcWheelsDir }}', '')) - inputs: - targetType: inline - script: | - # Determine platform wheel tag for the current machine - $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'amd64' } - if ($IsLinux) { $platTag = "manylinux*x86_64" } - elseif ($IsMacOS) { $platTag = "macosx*$arch" } - else { $platTag = "win_$arch" } - - $filter = if ("${{ parameters.isWinML }}" -eq "True") { "foundry_local_core_winml*$platTag.whl" } else { "foundry_local_core-*$platTag.whl" } - $wheel = Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse -Filter $filter | Select-Object -First 1 - if ($wheel) { - Write-Host "Installing pipeline-built FLC wheel: $($wheel.FullName)" - pip install $($wheel.FullName) - } else { - Write-Warning "No FLC wheel found matching $filter" - } - -- ${{ if eq(parameters.isWinML, true) }}: - - script: pip install onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.1 - displayName: 'Install ORT native packages (WinML)' -- ${{ else }}: - - script: pip install onnxruntime-core==1.24.4 onnxruntime-genai-core==0.13.1 - displayName: 'Install ORT native packages' - -- script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" - displayName: 'Install pure python dependencies' - -- ${{ if not(parameters.isWinML) }}: - - script: python -m build --wheel --outdir dist/ - displayName: 'Build wheel' - workingDirectory: $(repoRoot)/sdk/python - -- ${{ if parameters.isWinML }}: - - script: python -m build --wheel -C winml=true --outdir dist/ - displayName: 'Build wheel (WinML)' - workingDirectory: $(repoRoot)/sdk/python - -- task: PowerShell@2 - displayName: 'Install built wheel' - inputs: - targetType: inline - script: | - $wheel = (Get-ChildItem "$(repoRoot)/sdk/python/dist/*.whl" | Select-Object -First 1).FullName - pip install --no-deps $wheel - -- script: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 - displayName: 'Install test dependencies' - -- script: python -m pytest test/ -v - displayName: 'Run tests' - workingDirectory: $(repoRoot)/sdk/python - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/test-rust-steps.yml b/.pipelines/templates/test-rust-steps.yml deleted file mode 100644 index 40b36a23..00000000 --- a/.pipelines/templates/test-rust-steps.yml +++ /dev/null @@ -1,160 +0,0 @@ -# Lightweight test-only steps for the Rust SDK. -# Builds from source and runs tests — no cargo package or artifact staging. -parameters: -- name: isWinML - type: boolean - default: false -- name: flcNugetDir - type: string - displayName: 'Path to directory containing the FLC .nupkg' - -steps: -- task: PowerShell@2 - displayName: 'Set source paths' - inputs: - targetType: inline - script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" - $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" - Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" - Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" - Write-Host "##vso[task.setvariable variable=FOUNDRY_TESTING_MODE]1" - -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - - Write-Host "Downloading Windows App SDK Runtime installer from $installerUrl..." - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - - Write-Host "Installing Windows App SDK Runtime..." - & $installerPath --quiet --force - - if ($LASTEXITCODE -ne 0) { - Write-Error "Installation failed with exit code $LASTEXITCODE" - exit 1 - } - - Write-Host "Windows App SDK Runtime installed successfully." - errorActionPreference: 'stop' - -- task: PowerShell@2 - displayName: 'List downloaded FLC artifact' - inputs: - targetType: inline - script: | - Write-Host "Contents of ${{ parameters.flcNugetDir }}:" - Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse | ForEach-Object { Write-Host $_.FullName } - -# Extract FLC native binaries from the pipeline-built .nupkg -- task: PowerShell@2 - displayName: 'Extract FLC native binaries' - inputs: - targetType: inline - script: | - $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 - if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } - - $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract-rust" - $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") - Copy-Item $nupkg.FullName $zip -Force - Expand-Archive -Path $zip -DestinationPath $extractDir -Force - - $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } - if ($IsLinux) { - $rid = "linux-$arch" - } elseif ($IsMacOS) { - $rid = "osx-$arch" - } else { - $rid = "win-$arch" - } - - $nativeDir = "$extractDir/runtimes/$rid/native" - if (-not (Test-Path $nativeDir)) { throw "No native binaries found at $nativeDir for RID $rid" } - - $flcNativeDir = "$(Build.ArtifactStagingDirectory)/flc-native-rust" - New-Item -ItemType Directory -Path $flcNativeDir -Force | Out-Null - Get-ChildItem $nativeDir -File | Copy-Item -Destination $flcNativeDir -Force - Write-Host "##vso[task.setvariable variable=flcNativeDir]$flcNativeDir" - Write-Host "Extracted FLC native binaries for $rid" - -- task: PowerShell@2 - displayName: 'Install Rust toolchain' - inputs: - targetType: inline - script: | - if ($IsWindows -or (-not $IsLinux -and -not $IsMacOS)) { - Invoke-WebRequest -Uri https://win.rustup.rs/x86_64 -OutFile rustup-init.exe - .\rustup-init.exe -y --default-toolchain stable --profile minimal -c clippy,rustfmt - Remove-Item rustup-init.exe - $cargoPath = "$env:USERPROFILE\.cargo\bin" - } else { - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal -c clippy,rustfmt - $cargoPath = "$env:HOME/.cargo/bin" - } - Write-Host "##vso[task.prependpath]$cargoPath" - -- task: PowerShell@2 - displayName: 'Use crates.io directly' - inputs: - targetType: inline - script: | - $configPath = "$(repoRoot)/sdk/rust/.cargo/config.toml" - if (Test-Path $configPath) { - Remove-Item $configPath - Write-Host "Removed .cargo/config.toml crates-io redirect" - } - -- task: PowerShell@2 - displayName: 'Build' - inputs: - targetType: inline - script: | - Set-Location "$(repoRoot)/sdk/rust" - $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } - Invoke-Expression "cargo build $features" - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - -# Overwrite FLC binary with pipeline-built version -- task: PowerShell@2 - displayName: 'Overwrite FLC binary with pipeline-built version' - inputs: - targetType: inline - script: | - $outDir = Get-ChildItem "$(repoRoot)/sdk/rust/target/debug/build" -Directory -Filter "foundry-local-sdk-*" -Recurse | - Where-Object { Test-Path "$($_.FullName)/out" } | - ForEach-Object { "$($_.FullName)/out" } | - Select-Object -First 1 - if (-not $outDir) { throw "Could not find cargo OUT_DIR for foundry-local-sdk" } - - Get-ChildItem "$(flcNativeDir)" -File -Filter "Microsoft.AI.Foundry.Local.Core.*" | ForEach-Object { - Copy-Item $_.FullName -Destination "$outDir/$($_.Name)" -Force - Write-Host "Overwrote $($_.Name) with pipeline-built version" - } - -- task: PowerShell@2 - displayName: 'Run unit tests' - inputs: - targetType: inline - script: | - Set-Location "$(repoRoot)/sdk/rust" - $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } - Invoke-Expression "cargo test --lib $features" - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - -- task: PowerShell@2 - displayName: 'Run integration tests' - inputs: - targetType: inline - script: | - Set-Location "$(repoRoot)/sdk/rust" - $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } - Invoke-Expression "cargo test --tests $features -- --include-ignored --test-threads=1 --nocapture" - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/update-deps-versions-steps.yml b/.pipelines/templates/update-deps-versions-steps.yml new file mode 100644 index 00000000..9d489ab7 --- /dev/null +++ b/.pipelines/templates/update-deps-versions-steps.yml @@ -0,0 +1,41 @@ +# Shared template to update deps_versions.json / deps_versions_winml.json +# from pipeline artifacts. Both files use identical key structure — the +# isWinML parameter determines which file gets overwritten. +parameters: +- name: repoRoot + type: string + default: '$(repoRoot)' +- name: artifactDir + type: string + default: '' + displayName: 'Path to artifact directory containing pipeline-generated deps_versions JSON' +- name: isWinML + type: boolean + default: false + +steps: +- task: PowerShell@2 + displayName: 'Update deps_versions from pipeline artifact' + inputs: + targetType: inline + script: | + $isWinML = "${{ parameters.isWinML }}" -eq "True" + $fileName = if ($isWinML) { "deps_versions_winml.json" } else { "deps_versions.json" } + $repoJson = "${{ parameters.repoRoot }}/sdk/$fileName" + $artifactDir = "${{ parameters.artifactDir }}" + + if ($artifactDir -eq '' -or -not (Test-Path "$artifactDir/$fileName")) { + throw "Pipeline-built $fileName not found in artifact directory: $artifactDir" + } + + Copy-Item "$artifactDir/$fileName" $repoJson -Force + Write-Host "Updated repo $fileName from pipeline artifact at $artifactDir" + + $deps = Get-Content $repoJson -Raw | ConvertFrom-Json + + # Log resolved versions for debugging + Write-Host "Dependency versions from ${fileName}:" + Write-Host " FLC Core (NuGet): $($deps.'foundry-local-core'.nuget)" + Write-Host " FLC Core (Python): $($deps.'foundry-local-core'.python)" + Write-Host " OnnxRuntime: $($deps.onnxruntime.version)" + Write-Host " GenAI: $($deps.'onnxruntime-genai'.version)" diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index 26d74ff6..df8fc2cf 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -98,10 +98,15 @@ - + + <_DepsVersionsPath Condition="'$(UseWinML)' == 'true'">$(MSBuildThisFileDirectory)..\..\deps_versions_winml.json + <_DepsVersionsPath Condition="'$(UseWinML)' != 'true'">$(MSBuildThisFileDirectory)..\..\deps_versions.json + <_DepsVersionsJson>$([System.IO.File]::ReadAllText('$(_DepsVersionsPath)')) $(FoundryLocalCoreVersion) - 0.9.0-dev-202603310538-f6efa8d3 - 0.9.0-dev-202603310538-f6efa8d3 + $([System.Text.RegularExpressions.Regex]::Match('$(_DepsVersionsJson)', '"nuget"\s*:\s*"([^"]+)"').Groups[1].Value) + $([System.Text.RegularExpressions.Regex]::Match('$(_DepsVersionsJson)', '"nuget"\s*:\s*"([^"]+)"').Groups[1].Value) + True diff --git a/sdk/deps_versions.json b/sdk/deps_versions.json new file mode 100644 index 00000000..1ecd6e6f --- /dev/null +++ b/sdk/deps_versions.json @@ -0,0 +1,12 @@ +{ + "foundry-local-core": { + "nuget": "0.9.0-dev-202603310538-f6efa8d3", + "python": "0.9.0.dev20260327060216" + }, + "onnxruntime": { + "version": "1.24.4" + }, + "onnxruntime-genai": { + "version": "0.13.1" + } +} diff --git a/sdk/deps_versions_winml.json b/sdk/deps_versions_winml.json new file mode 100644 index 00000000..dd17833a --- /dev/null +++ b/sdk/deps_versions_winml.json @@ -0,0 +1,12 @@ +{ + "foundry-local-core": { + "nuget": "0.9.0-dev-202603310538-f6efa8d3", + "python": "0.9.0.dev20260331004032" + }, + "onnxruntime": { + "version": "1.23.2.3" + }, + "onnxruntime-genai": { + "version": "0.13.1" + } +} diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index 6901766d..19ceacfb 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -5,13 +5,21 @@ 'use strict'; +const fs = require('fs'); const os = require('os'); +const path = require('path'); const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); +// deps_versions.json lives at the package root when published, or at sdk/ in the repo. +const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions.json')) + ? path.resolve(__dirname, '..', 'deps_versions.json') + : path.resolve(__dirname, '..', '..', 'deps_versions.json'); +const deps = require(depsPath); + const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, - { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.24.4', feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.1', feed: NUGET_FEED }, + { name: 'Microsoft.AI.Foundry.Local.Core', version: deps['foundry-local-core'].nuget, feed: ORT_NIGHTLY_FEED }, + { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version, feed: NUGET_FEED }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai'].version, feed: NUGET_FEED }, ]; (async () => { diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs index 090a25e3..aa74f4d5 100644 --- a/sdk/js/script/install-utils.cjs +++ b/sdk/js/script/install-utils.cjs @@ -106,10 +106,29 @@ async function getBaseAddress(feedUrl) { return baseAddress.endsWith('/') ? baseAddress : baseAddress + '/'; } -async function installPackage(artifact, tempDir, binDir) { +async function installPackage(artifact, tempDir, binDir, skipIfPresent) { const pkgName = artifact.name; const pkgVer = artifact.version; + // Skip download if this package's main native binary is already present + // (e.g. pre-populated by CI from a locally-built artifact). + // Callers pass skipIfPresent=false when overriding (e.g. WinML over standard). + if (skipIfPresent) { + const prefix = os.platform() === 'win32' ? '' : 'lib'; + let expectedFile; + if (pkgName.includes('Foundry.Local.Core')) { + expectedFile = `Microsoft.AI.Foundry.Local.Core${EXT}`; + } else if (pkgName.includes('OnnxRuntimeGenAI')) { + expectedFile = `${prefix}onnxruntime-genai${EXT}`; + } else if (pkgName.includes('OnnxRuntime')) { + expectedFile = `${prefix}onnxruntime${EXT}`; + } + if (expectedFile && fs.existsSync(path.join(binDir, expectedFile))) { + console.log(` ${pkgName}: already present, skipping download.`); + return; + } + } + const baseAddress = await getBaseAddress(artifact.feed); const nameLower = pkgName.toLowerCase(); const verLower = pkgVer.toLowerCase(); @@ -136,14 +155,16 @@ async function installPackage(artifact, tempDir, binDir) { console.warn(` No files found for RID ${RID} in ${pkgName}.`); } - // Update platform package.json version for Core packages + // Overwrite FLC platform package.json so require.resolve can find the package if (pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { const pkgJsonPath = path.join(binDir, 'package.json'); - if (fs.existsSync(pkgJsonPath)) { - const pkgJson = JSON.parse(fs.readFileSync(pkgJsonPath, 'utf8')); - pkgJson.version = pkgVer; - fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgJson, null, 2)); - } + const pkgContent = { + name: `@foundry-local-core/${platformKey}`, + version: pkgVer, + description: `Native binaries for Foundry Local SDK (${platformKey})`, + private: true + }; + fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); } } @@ -153,13 +174,11 @@ async function runInstall(artifacts, options) { return; } - const force = options && options.force; const binDir = (options && options.binDir) || BIN_DIR; - - if (!force && fs.existsSync(binDir) && REQUIRED_FILES.every(f => fs.existsSync(path.join(binDir, f)))) { - console.log(`[foundry-local] Native libraries already installed.`); - return; - } + // When a custom binDir is provided (e.g. WinML overriding standard), + // don't skip packages whose output files already exist — we need to + // overwrite them with the variant's binaries. + const skipIfPresent = !(options && options.binDir); console.log(`[foundry-local] Installing native libraries for ${RID}...`); fs.mkdirSync(binDir, { recursive: true }); @@ -167,7 +186,7 @@ async function runInstall(artifacts, options) { const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'foundry-install-')); try { for (const artifact of artifacts) { - await installPackage(artifact, tempDir, binDir); + await installPackage(artifact, tempDir, binDir, skipIfPresent); } console.log('[foundry-local] Installation complete.'); } finally { diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index efa2041c..72f07b95 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -10,18 +10,26 @@ 'use strict'; +const fs = require('fs'); const path = require('path'); const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); +// WinML uses its own deps_versions_winml.json with the same key structure +// as the standard deps_versions.json — no variant-specific keys needed. +// deps_versions_winml.json lives at the package root when published, or at sdk/ in the repo. +const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions_winml.json')) + ? path.resolve(__dirname, '..', 'deps_versions_winml.json') + : path.resolve(__dirname, '..', '..', 'deps_versions_winml.json'); +const deps = require(depsPath); // Resolve foundry-local-sdk's binary directory const sdkRoot = path.dirname(require.resolve('foundry-local-sdk/package.json')); const platformKey = `${process.platform}-${process.arch}`; const binDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platformKey); const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: '0.9.0-dev-202603310538-f6efa8d3', feed: ORT_NIGHTLY_FEED }, - { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: '1.23.2.3', feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: '0.13.1', feed: NUGET_FEED }, + { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: deps['foundry-local-core']['nuget'], feed: ORT_NIGHTLY_FEED }, + { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version, feed: NUGET_FEED }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai']['version'], feed: NUGET_FEED }, ]; (async () => { diff --git a/sdk/js/script/pack.cjs b/sdk/js/script/pack.cjs index 79a00828..f550043e 100644 --- a/sdk/js/script/pack.cjs +++ b/sdk/js/script/pack.cjs @@ -15,6 +15,15 @@ const pkgPath = path.join(__dirname, '..', 'package.json'); const original = fs.readFileSync(pkgPath, 'utf8'); const isWinML = process.argv[2] === 'winml'; +// deps_versions.json lives in the parent sdk/ directory; copy it into the +// JS package root so that npm pack includes it in the tarball. +const pkgRoot = path.join(__dirname, '..'); +const depsSource = path.join(pkgRoot, '..', 'deps_versions.json'); +const depsDest = path.join(pkgRoot, 'deps_versions.json'); +const depsWinmlSource = path.join(pkgRoot, '..', 'deps_versions_winml.json'); +const depsWinmlDest = path.join(pkgRoot, 'deps_versions_winml.json'); +const copiedFiles = []; + try { const pkg = JSON.parse(original); if (isWinML) { @@ -25,16 +34,28 @@ try { pkg.dependencies = { 'foundry-local-sdk': pkg.version }; pkg.scripts = { install: 'node script/install-winml.cjs' }; // No dist/ or preinstall needed — the standard SDK provides the JS code - pkg.files = ['script/install-winml.cjs', 'script/install-utils.cjs']; + pkg.files = ['script/install-winml.cjs', 'script/install-utils.cjs', 'deps_versions_winml.json']; delete pkg.main; delete pkg.types; delete pkg.optionalDependencies; + if (fs.existsSync(depsWinmlSource) && !fs.existsSync(depsWinmlDest)) { + fs.copyFileSync(depsWinmlSource, depsWinmlDest); + copiedFiles.push(depsWinmlDest); + } } else { - pkg.files = ['dist', 'script/install-standard.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs']; + pkg.files = ['dist', 'script/install-standard.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs', 'deps_versions.json']; + if (fs.existsSync(depsSource) && !fs.existsSync(depsDest)) { + fs.copyFileSync(depsSource, depsDest); + copiedFiles.push(depsDest); + } } fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2)); - execSync('npm pack', { cwd: path.join(__dirname, '..'), stdio: 'inherit' }); + execSync('npm pack', { cwd: pkgRoot, stdio: 'inherit' }); } finally { // Always restore original package.json fs.writeFileSync(pkgPath, original); + // Clean up copied deps_versions files + for (const f of copiedFiles) { + if (fs.existsSync(f)) fs.unlinkSync(f); + } } diff --git a/sdk/js/script/preinstall.cjs b/sdk/js/script/preinstall.cjs index 8cd953d2..99e805d7 100644 --- a/sdk/js/script/preinstall.cjs +++ b/sdk/js/script/preinstall.cjs @@ -35,18 +35,16 @@ for (const platform of ALL_PLATFORMS) { } const pkgJsonPath = path.join(dir, 'package.json'); - if (!fs.existsSync(pkgJsonPath)) { - const pkgContent = { - name: `@foundry-local-core/${platform.key}`, - version: "0.0.0", // Placeholder version, will be replaced during script/install-utils.cjs (installPackage()) - description: `Native binaries for Foundry Local SDK (${platform.key})`, - os: [platform.os], - cpu: [platform.cpu], - private: true - }; - fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); - console.log(` Created skeleton for ${platform.key}`); - } + const pkgContent = { + name: `@foundry-local-core/${platform.key}`, + version: "0.0.0", + description: `Native binaries for Foundry Local SDK (${platform.key})`, + os: [platform.os], + cpu: [platform.cpu], + private: true + }; + fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); + console.log(` Created skeleton for ${platform.key}`); } console.log('[foundry-local] Preinstall complete.'); diff --git a/sdk/python/build_backend.py b/sdk/python/build_backend.py index 1bdf6cbb..57e96286 100644 --- a/sdk/python/build_backend.py +++ b/sdk/python/build_backend.py @@ -30,8 +30,8 @@ from __future__ import annotations import contextlib +import json import os -import shutil from collections.abc import Generator from pathlib import Path @@ -44,13 +44,52 @@ _PROJECT_ROOT = Path(__file__).parent _PYPROJECT = _PROJECT_ROOT / "pyproject.toml" _REQUIREMENTS = _PROJECT_ROOT / "requirements.txt" -_REQUIREMENTS_WINML = _PROJECT_ROOT / "requirements-winml.txt" +_REQUIREMENTS_BASE = _PROJECT_ROOT / "requirements-base.txt" # The exact string in pyproject.toml to patch for the WinML variant. _STANDARD_NAME = 'name = "foundry-local-sdk"' _WINML_NAME = 'name = "foundry-local-sdk-winml"' +# --------------------------------------------------------------------------- +# Requirements generation from deps_versions.json +# --------------------------------------------------------------------------- + + +def _load_deps_versions(*, winml: bool) -> dict: + """Load the appropriate deps_versions JSON file. + + Standard and WinML each have their own file with identical key structure, + so callers never need variant-specific key names. + """ + filename = "deps_versions_winml.json" if winml else "deps_versions.json" + filepath = _PROJECT_ROOT.parent / filename + with open(filepath, encoding="utf-8-sig") as f: + return json.load(f) + + +def _generate_requirements(*, winml: bool) -> str: + """Generate requirements.txt content from base deps + deps_versions.json.""" + base = _REQUIREMENTS_BASE.read_text(encoding="utf-8").rstrip("\n") + deps = _load_deps_versions(winml=winml) + + if winml: + requirement_lines = [ + f"foundry-local-core-winml=={deps['foundry-local-core']['python']}", + f"onnxruntime-core=={deps['onnxruntime']['version']}", + f"onnxruntime-genai-core=={deps['onnxruntime-genai']['version']}", + ] + else: + requirement_lines = [ + f"foundry-local-core=={deps['foundry-local-core']['python']}", + f"""onnxruntime-gpu=={deps['onnxruntime']['version']}; platform_system == "Linux" """.rstrip(), + f"""onnxruntime-core=={deps['onnxruntime']['version']}; platform_system != "Linux" """.rstrip(), + f"""onnxruntime-genai-cuda=={deps['onnxruntime-genai']['version']}; platform_system == "Linux" """.rstrip(), + f"""onnxruntime-genai-core=={deps['onnxruntime-genai']['version']}; platform_system != "Linux" """.rstrip(), + ] + return f"{base}\n" + "\n".join(requirement_lines) + "\n" + + # --------------------------------------------------------------------------- # Variant detection # --------------------------------------------------------------------------- @@ -74,13 +113,12 @@ def _is_winml(config_settings: dict | None) -> bool: @contextlib.contextmanager def _patch_for_winml() -> Generator[None, None, None]: - """Temporarily patch ``pyproject.toml`` and ``requirements.txt`` for WinML. + """Temporarily patch ``pyproject.toml`` and generate ``requirements.txt`` for WinML. - Both files are restored to their original content in the ``finally`` - block, even if the build raises an exception. + ``pyproject.toml`` is restored in the ``finally`` block. + ``requirements.txt`` is left in place (generated from deps_versions.json). """ pyproject_original = _PYPROJECT.read_text(encoding="utf-8") - requirements_original = _REQUIREMENTS.read_text(encoding="utf-8") try: # Patch package name (simple string replacement — no TOML writer needed) patched_pyproject = pyproject_original.replace(_STANDARD_NAME, _WINML_NAME, 1) @@ -90,21 +128,24 @@ def _patch_for_winml() -> Generator[None, None, None]: "WinML name patch failed." ) _PYPROJECT.write_text(patched_pyproject, encoding="utf-8") - - # Swap requirements.txt with the WinML variant - shutil.copy2(_REQUIREMENTS_WINML, _REQUIREMENTS) - + _REQUIREMENTS.write_text(_generate_requirements(winml=True), encoding="utf-8") yield finally: _PYPROJECT.write_text(pyproject_original, encoding="utf-8") - _REQUIREMENTS.write_text(requirements_original, encoding="utf-8") + + +@contextlib.contextmanager +def _patch_standard_deps() -> Generator[None, None, None]: + """Generate ``requirements.txt`` from base deps + ``deps_versions.json``.""" + _REQUIREMENTS.write_text(_generate_requirements(winml=False), encoding="utf-8") + yield def _apply_patches(config_settings: dict | None): """Return a context manager that applies the appropriate patches.""" if _is_winml(config_settings): return _patch_for_winml() - return contextlib.nullcontext() + return _patch_standard_deps() # --------------------------------------------------------------------------- @@ -148,7 +189,5 @@ def get_requires_for_build_sdist(config_settings=None): def build_sdist(sdist_directory, config_settings=None): - if _is_winml(config_settings): - with _patch_for_winml(): - return _sb.build_sdist(sdist_directory, config_settings) - return _sb.build_sdist(sdist_directory, config_settings) + with _apply_patches(config_settings): + return _sb.build_sdist(sdist_directory, config_settings) diff --git a/sdk/python/requirements-base.txt b/sdk/python/requirements-base.txt new file mode 100644 index 00000000..dfc8d718 --- /dev/null +++ b/sdk/python/requirements-base.txt @@ -0,0 +1,3 @@ +pydantic>=2.0.0 +requests>=2.32.4 +openai>=2.24.0 diff --git a/sdk/rust/Cargo.toml b/sdk/rust/Cargo.toml index 2a6292b7..af6a64f2 100644 --- a/sdk/rust/Cargo.toml +++ b/sdk/rust/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "foundry-local-sdk" -version = "0.1.0" +version = "1.0.0" edition = "2021" license = "MIT" readme = "README.md" @@ -8,6 +8,7 @@ description = "Local AI model inference powered by the Foundry Local Core engine homepage = "https://www.foundrylocal.ai/" repository = "https://github.com/microsoft/Foundry-Local" documentation = "https://github.com/microsoft/Foundry-Local/blob/main/sdk/rust/docs/api.md" +include = ["src/**", "build.rs", "Cargo.toml", "README.md", "LICENSE", "deps_versions.json", "deps_versions_winml.json"] [features] default = [] diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index 999bca3d..7daf7a73 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -7,11 +7,64 @@ const NUGET_FEED: &str = "https://api.nuget.org/v3/index.json"; const ORT_NIGHTLY_FEED: &str = "https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json"; -const CORE_VERSION: &str = "0.9.0.8-rc3"; -const ORT_VERSION: &str = "1.24.4"; -const GENAI_VERSION: &str = "0.13.1"; +/// Versions loaded from deps_versions.json (or deps_versions_winml.json). +/// Both files share the same key structure — the build script picks the +/// right file based on the winml cargo feature. +struct DepsVersions { + core: String, + ort: String, + genai: String, +} + +fn load_deps_versions() -> DepsVersions { + let winml = env::var("CARGO_FEATURE_WINML").is_ok(); + let manifest_dir = env::var("CARGO_MANIFEST_DIR").unwrap_or_default(); + let manifest_path = Path::new(&manifest_dir); + + // Standard and WinML each have their own file with identical key structure. + let filename = if winml { + "deps_versions_winml.json" + } else { + "deps_versions.json" + }; + + // Check manifest dir first (packaged crate), then parent (repo layout) + let json_path = if manifest_path.join(filename).exists() { + manifest_path.join(filename) + } else { + manifest_path.join("..").join(filename) + }; + + // Tell Cargo to rebuild if the versions file changes + println!( + "cargo:rerun-if-changed={}", + json_path + .canonicalize() + .unwrap_or(json_path.clone()) + .display() + ); -const WINML_ORT_VERSION: &str = "1.23.2.3"; + let content = fs::read_to_string(&json_path).expect("Failed to read deps_versions.json"); + // Strip UTF-8 BOM if present (PowerShell may write files with BOM) + let stripped_content = content.strip_prefix('\u{FEFF}').unwrap_or(&content); + let val: serde_json::Value = + serde_json::from_str(stripped_content).expect("Failed to parse deps_versions.json"); + + let s = |obj: &serde_json::Value, key: &str| -> String { + obj.get(key) + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string() + }; + let flc = &val["foundry-local-core"]; + let ort = &val["onnxruntime"]; + let genai = &val["onnxruntime-genai"]; + DepsVersions { + core: s(flc, "nuget"), + ort: s(ort, "version"), + genai: s(genai, "version"), + } +} struct NuGetPackage { name: &'static str, @@ -43,6 +96,7 @@ fn native_lib_extension() -> &'static str { fn get_packages(rid: &str) -> Vec { let winml = env::var("CARGO_FEATURE_WINML").is_ok(); let is_linux = rid.starts_with("linux"); + let deps = load_deps_versions(); // Use pinned versions directly — dynamic resolution via resolve_latest_version // is unreliable (feed returns versions in unexpected order, and some old versions @@ -53,44 +107,44 @@ fn get_packages(rid: &str) -> Vec { if winml { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core.WinML", - version: CORE_VERSION.to_string(), + version: deps.core.clone(), feed_url: ORT_NIGHTLY_FEED, }); packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Foundry", - version: WINML_ORT_VERSION.to_string(), + version: deps.ort.clone(), feed_url: NUGET_FEED, }); packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", - version: GENAI_VERSION.to_string(), - feed_url: NUGET_FEED, + version: deps.genai.clone(), + feed_url: ORT_NIGHTLY_FEED, }); } else { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core", - version: CORE_VERSION.to_string(), + version: deps.core.clone(), feed_url: ORT_NIGHTLY_FEED, }); if is_linux { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Gpu.Linux", - version: ORT_VERSION.to_string(), + version: deps.ort.clone(), feed_url: NUGET_FEED, }); } else { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Foundry", - version: ORT_VERSION.to_string(), + version: deps.ort.clone(), feed_url: NUGET_FEED, }); } packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", - version: GENAI_VERSION.to_string(), - feed_url: NUGET_FEED, + version: deps.genai.clone(), + feed_url: ORT_NIGHTLY_FEED, }); } @@ -133,7 +187,33 @@ fn resolve_base_address(feed_url: &str) -> Result { } /// Download a .nupkg and extract native libraries for the given RID into `out_dir`. +/// Skips download if native files from this package are already present. fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result<(), String> { + // Skip if this package's main native library is already in out_dir + // (e.g. pre-populated from FOUNDRY_NATIVE_OVERRIDE_DIR). + let ext = native_lib_extension(); + let prefix = if env::consts::OS == "windows" { + "" + } else { + "lib" + }; + let expected_file = if pkg.name.contains("Foundry.Local.Core") { + format!("Microsoft.AI.Foundry.Local.Core.{ext}") + } else if pkg.name.contains("OnnxRuntimeGenAI") { + format!("{prefix}onnxruntime-genai.{ext}") + } else if pkg.name.contains("OnnxRuntime") { + format!("{prefix}onnxruntime.{ext}") + } else { + String::new() + }; + if !expected_file.is_empty() && out_dir.join(&expected_file).exists() { + println!( + "cargo:warning={} already present, skipping download.", + pkg.name + ); + return Ok(()); + } + let base_address = resolve_base_address(pkg.feed_url)?; let lower_name = pkg.name.to_lowercase(); let lower_version = pkg.version.to_lowercase(); @@ -212,19 +292,26 @@ fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result Ok(()) } -/// Check whether the core native library is already present in `out_dir`. +/// Check whether all required native libraries are already present in `out_dir`. fn libs_already_present(out_dir: &Path) -> bool { - let core_lib = match env::consts::OS { - "windows" => "Microsoft.AI.Foundry.Local.Core.dll", - "linux" => "Microsoft.AI.Foundry.Local.Core.so", - "macos" => "Microsoft.AI.Foundry.Local.Core.dylib", - _ => return false, + let ext = native_lib_extension(); + let prefix = if env::consts::OS == "windows" { + "" + } else { + "lib" }; - out_dir.join(core_lib).exists() + let required = [ + format!("Microsoft.AI.Foundry.Local.Core.{ext}"), + format!("{prefix}onnxruntime.{ext}"), + format!("{prefix}onnxruntime-genai.{ext}"), + ]; + required.iter().all(|f| out_dir.join(f).exists()) } fn main() { println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-env-changed=FOUNDRY_NATIVE_OVERRIDE_DIR"); + println!("cargo:rerun-if-env-changed=CARGO_FEATURE_WINML"); let out_dir = PathBuf::from(env::var("OUT_DIR").expect("OUT_DIR not set")); @@ -240,7 +327,29 @@ fn main() { } }; - // Skip download if libraries already exist + // If FOUNDRY_NATIVE_OVERRIDE_DIR is set (e.g. by CI), copy all native + // libraries from that directory into OUT_DIR. This pre-populates FLC Core + // binaries that aren't published to a feed yet. The download loop below + // will then only fetch packages whose files are still missing (ORT, GenAI). + if let Ok(override_dir) = env::var("FOUNDRY_NATIVE_OVERRIDE_DIR") { + let src = Path::new(&override_dir); + if src.is_dir() { + let ext = native_lib_extension(); + for entry in fs::read_dir(src).expect("Failed to read FOUNDRY_NATIVE_OVERRIDE_DIR") { + let path = entry.expect("Failed to read dir entry").path(); + if path.extension().and_then(|e| e.to_str()) == Some(ext) { + let dest = out_dir.join(path.file_name().unwrap()); + fs::copy(&path, &dest).expect("Failed to copy native lib from override dir"); + println!( + "cargo:warning=Copied {} from override dir", + path.file_name().unwrap().to_string_lossy() + ); + } + } + } + } + + // Skip all downloads if every required library is already present if libs_already_present(&out_dir) { println!("cargo:warning=Native libraries already present in OUT_DIR, skipping download."); println!("cargo:rustc-link-search=native={}", out_dir.display()); @@ -252,16 +361,22 @@ fn main() { let packages = get_packages(rid); + let mut download_failed = false; for pkg in &packages { if let Err(e) = download_and_extract(pkg, rid, &out_dir) { println!("cargo:warning=Error downloading {}: {e}", pkg.name); - println!("cargo:warning=Build will continue, but runtime loading may fail."); - println!( - "cargo:warning=You can manually place native libraries in the output directory." - ); + download_failed = true; } } + if download_failed && !libs_already_present(&out_dir) { + panic!( + "One or more native library downloads failed and required libraries are missing. \ + You can manually place native libraries in the output directory: {}", + out_dir.display() + ); + } + println!("cargo:rustc-link-search=native={}", out_dir.display()); println!("cargo:rustc-env=FOUNDRY_NATIVE_DIR={}", out_dir.display()); From bc8f27de98bbfea6b858793d7ab19ceec3859ab3 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Tue, 14 Apr 2026 17:21:57 -0700 Subject: [PATCH 41/83] pipeline fixes (#631) - Change dependency source from ORT-Nightly to AIFoundryLocal_PublicPackages (AIFoundryLocal_PublicPackages has upstreams so we can keep up with new dependency version updates) - Removes commit id checkout for test-data-shared, now tests run from main --------- Co-authored-by: Prathik Rao --- .github/workflows/build-cs-steps.yml | 35 +++++++-------------- .github/workflows/build-js-steps.yml | 13 -------- .github/workflows/build-python-steps.yml | 12 +------ .github/workflows/build-rust-steps.yml | 14 --------- .pipelines/templates/build-core-steps.yml | 6 ++-- .pipelines/templates/build-cs-steps.yml | 2 +- .pipelines/templates/build-python-steps.yml | 11 +++++-- sdk/cs/NuGet.config | 2 +- sdk/js/.npmrc | 2 +- 9 files changed, 26 insertions(+), 71 deletions(-) diff --git a/.github/workflows/build-cs-steps.yml b/.github/workflows/build-cs-steps.yml index cf680d49..937f728f 100644 --- a/.github/workflows/build-cs-steps.yml +++ b/.github/workflows/build-cs-steps.yml @@ -39,26 +39,26 @@ jobs: with: dotnet-version: '10.0.x' env: - NUGET_AUTH_TOKEN: ${{ secrets.AZURE_DEVOPS_PAT }} + NUGET_AUTH_TOKEN: ${{ secrets.AI_FOUNDRY_LOCAL_PAT }} - name: Generate temporary NuGet.config run: | - # The repo-level NuGet.config cleared all sources and only included ORT-Nightly. - # We generate a temporary one with both nuget.org and ORT-Nightly. - # We provide credentials to allow the ORT-Nightly feed to pull from its upstreams. + # The repo-level NuGet.config cleared all sources and only included AIFoundryLocal_PublicPackages. + # We generate a temporary one with both nuget.org and AIFoundryLocal_PublicPackages. + # We provide credentials to allow the AIFoundryLocal_PublicPackages feed to pull from its upstreams. $xml = @" - + - + - - + + "@ @@ -95,24 +95,11 @@ jobs: Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - name: Checkout specific commit in test-data-shared - shell: pwsh - working-directory: ${{ github.workspace }}/../test-data-shared - run: | - Write-Host "Current directory: $(Get-Location)" - git checkout 231f820fe285145b7ea4a449b112c1228ce66a41 - if ($LASTEXITCODE -ne 0) { - Write-Error "Git checkout failed." - exit 1 - } - Write-Host "`nDirectory contents:" - Get-ChildItem -Recurse -Depth 2 | ForEach-Object { Write-Host " $($_.FullName)" } - - name: Run Foundry Local Core tests run: | - # dotnet test sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj --verbosity normal /p:UseWinML=${{ inputs.useWinML }} /p:FoundryLocalCoreVersion="*-*" - # Use the temporary config file for test restore as well. - dotnet test sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj --verbosity normal /p:UseWinML=${{ inputs.useWinML }} + # Restore test project with authenticated config, then run tests without restoring again. + dotnet restore sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj /p:UseWinML=${{ inputs.useWinML }} --configfile sdk/cs/NuGet.temp.config + dotnet test sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj --no-restore --verbosity normal /p:UseWinML=${{ inputs.useWinML }} - name: Pack NuGet package shell: pwsh diff --git a/.github/workflows/build-js-steps.yml b/.github/workflows/build-js-steps.yml index 55f3ebf8..28111de3 100644 --- a/.github/workflows/build-js-steps.yml +++ b/.github/workflows/build-js-steps.yml @@ -71,19 +71,6 @@ jobs: Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - name: Checkout specific commit in test-data-shared - shell: pwsh - working-directory: ${{ github.workspace }}/../test-data-shared - run: | - Write-Host "Current directory: $(Get-Location)" - git checkout 231f820fe285145b7ea4a449b112c1228ce66a41 - if ($LASTEXITCODE -ne 0) { - Write-Error "Git checkout failed." - exit 1 - } - Write-Host "`nDirectory contents:" - Get-ChildItem -Recurse -Depth 2 | ForEach-Object { Write-Host " $($_.FullName)" } - # The .npmrc points to an Azure Artifacts feed for CFS compliance. # Remove it in CI so npm uses the public registry directly. - name: Remove .npmrc (use public registry) diff --git a/.github/workflows/build-python-steps.yml b/.github/workflows/build-python-steps.yml index dc180bb4..d74f97b2 100644 --- a/.github/workflows/build-python-steps.yml +++ b/.github/workflows/build-python-steps.yml @@ -48,23 +48,13 @@ jobs: Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - name: Checkout specific commit in test-data-shared - shell: pwsh - working-directory: ${{ github.workspace }}/../test-data-shared - run: | - git checkout 231f820fe285145b7ea4a449b112c1228ce66a41 - if ($LASTEXITCODE -ne 0) { - Write-Error "Git checkout failed." - exit 1 - } - - name: Install build tool run: | python -m pip install build - name: Configure pip for Azure Artifacts run: | - pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.index-url https://az:${{ secrets.AI_FOUNDRY_LOCAL_PAT }}@pkgs.dev.azure.com/aiinfra/AIFoundryLocal/_packaging/AIFoundryLocal_PublicPackages/pypi/simple/ pip config set global.extra-index-url https://pypi.org/simple/ pip config set global.pre true diff --git a/.github/workflows/build-rust-steps.yml b/.github/workflows/build-rust-steps.yml index 810b6c1e..75d86b20 100644 --- a/.github/workflows/build-rust-steps.yml +++ b/.github/workflows/build-rust-steps.yml @@ -82,20 +82,6 @@ jobs: Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - name: Checkout specific commit in test-data-shared - if: ${{ inputs.run-integration-tests }} - shell: pwsh - working-directory: ${{ github.workspace }}/../test-data-shared - run: | - Write-Host "Current directory: $(Get-Location)" - git checkout 231f820fe285145b7ea4a449b112c1228ce66a41 - if ($LASTEXITCODE -ne 0) { - Write-Error "Git checkout failed." - exit 1 - } - Write-Host "`nDirectory contents:" - Get-ChildItem -Recurse -Depth 2 | ForEach-Object { Write-Host " $($_.FullName)" } - - name: Check formatting run: cargo fmt --all -- --check diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml index 3803ccf0..1b80ec71 100644 --- a/.pipelines/templates/build-core-steps.yml +++ b/.pipelines/templates/build-core-steps.yml @@ -44,10 +44,10 @@ steps: - + - + @@ -57,7 +57,7 @@ steps: "@ Set-Content -Path "$(nsRoot)/nuget.config" -Value $nugetConfig - Write-Host "Updated nuget.config to use nuget.org, ORT-Nightly, and Neutron with mappings" + Write-Host "Updated nuget.config to use nuget.org, AIFoundryLocal_PublicPackages, and Neutron with mappings" - ${{ if eq(parameters.isWinML, true) }}: - task: DotNetCoreCLI@2 diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml index 5d8f67c1..6c2c8d97 100644 --- a/.pipelines/templates/build-cs-steps.yml +++ b/.pipelines/templates/build-cs-steps.yml @@ -81,7 +81,7 @@ steps: - + "@ diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index 5ada9cb6..f52e069f 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -68,13 +68,18 @@ steps: Write-Host "Package version: $v" Write-Host "##vso[task.setvariable variable=packageVersion]$v" -# Configure pip to use ORT-Nightly feed (plus PyPI as fallback) +# Configure pip to use AIFoundryLocal_PublicPackages feed (plus PyPI as fallback) +- task: PipAuthenticate@1 + displayName: 'Authenticate pip with Azure Artifacts' + inputs: + artifactFeeds: 'AIFoundryLocal/AIFoundryLocal_PublicPackages' + - task: PowerShell@2 - displayName: 'Configure pip for Azure Artifacts' + displayName: 'Configure pip index URLs' inputs: targetType: inline script: | - pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/AIFoundryLocal/_packaging/AIFoundryLocal_PublicPackages/pypi/simple/ pip config set global.extra-index-url https://pypi.org/simple/ pip config set global.pre true diff --git a/sdk/cs/NuGet.config b/sdk/cs/NuGet.config index 420497e9..29505d6d 100644 --- a/sdk/cs/NuGet.config +++ b/sdk/cs/NuGet.config @@ -2,6 +2,6 @@ - + diff --git a/sdk/js/.npmrc b/sdk/js/.npmrc index 114ea2a4..7418403b 100644 --- a/sdk/js/.npmrc +++ b/sdk/js/.npmrc @@ -1,2 +1,2 @@ -registry=https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/npm/registry/ +registry=https://pkgs.dev.azure.com/aiinfra/AIFoundryLocal/_packaging/AIFoundryLocal_PublicPackages/npm/registry/ always-auth=true From 3b3614fa28dcc699bec037da05084e777ca42421 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Wed, 15 Apr 2026 12:24:12 -0700 Subject: [PATCH 42/83] Replace JS SDK koffi interop with Node-API addon and ship prebuilt binaries (#633) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the JavaScript SDK’s `koffi`-based FFI layer with a native Node-API addon and package prebuilt `.node` binaries with the npm package. This removes the large `koffi` dependency, keeps the existing JS SDK surface area unchanged, and preserves cross-platform support through CI-built prebuilts. What changed? - Added a native Node-API addon for JS ↔ Foundry Local Core interop: - `loadLibrary` - `executeCommand` - `executeCommandWithBinary` - `executeCommandStreaming` - Rewrote `CoreInterop.ts` to use the addon instead of `koffi` - Removed `koffi` from the JS SDK dependencies - Added `build:native` flow for SDK contributors - Switched npm packaging to ship prebuilt addons from `prebuilds//` - Updated pack/install scripts to support the new packaging model - Added CI/CD steps to build platform-specific addons and include them in the packaged JS SDK - Updated JS SDK README with contributor build instructions --- .github/workflows/build-js-steps.yml | 18 + .../workflows/samples-integration-test.yml | 24 + .gitignore | 1 + .pipelines/foundry-local-packaging.yml | 196 ++++- .pipelines/templates/build-js-addon-steps.yml | 45 + samples/cs/nuget.config | 10 - sdk/js/README.md | 32 + sdk/js/native/binding.gyp | 30 + sdk/js/native/foundry_local_napi.c | 822 ++++++++++++++++++ sdk/js/package.json | 6 +- sdk/js/script/copy-addon.cjs | 24 + sdk/js/script/pack.cjs | 2 +- sdk/js/src/detail/coreInterop.ts | 211 ++--- 13 files changed, 1246 insertions(+), 175 deletions(-) create mode 100644 .pipelines/templates/build-js-addon-steps.yml create mode 100644 sdk/js/native/binding.gyp create mode 100644 sdk/js/native/foundry_local_napi.c create mode 100644 sdk/js/script/copy-addon.cjs diff --git a/.github/workflows/build-js-steps.yml b/.github/workflows/build-js-steps.yml index 28111de3..a869477d 100644 --- a/.github/workflows/build-js-steps.yml +++ b/.github/workflows/build-js-steps.yml @@ -34,6 +34,24 @@ jobs: with: node-version: '20.x' + - name: Setup Python (for node-gyp) + uses: actions/setup-python@v5 + with: + python-version: '3.x' + + - name: Build Node-API addon + shell: pwsh + working-directory: sdk/js + run: | + npm install --no-save node-gyp node-api-headers --registry https://registry.npmjs.org + Set-Location native + npx node-gyp rebuild + $platformKey = node -e 'process.stdout.write(process.platform + "-" + process.arch)' + $destDir = "../prebuilds/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "build/Release/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + Write-Host "Built addon for $platformKey -> $destDir/foundry_local_napi.node" + # needed to download Foundry Local Core from Azure Artifacts - name: Setup .NET SDK for NuGet authentication uses: actions/setup-dotnet@v5 diff --git a/.github/workflows/samples-integration-test.yml b/.github/workflows/samples-integration-test.yml index c844ca12..ebac905e 100644 --- a/.github/workflows/samples-integration-test.yml +++ b/.github/workflows/samples-integration-test.yml @@ -100,11 +100,35 @@ jobs: with: node-version: '20.x' + - name: Setup Python (for node-gyp) + uses: actions/setup-python@v5 + with: + python-version: '3.x' + - name: Setup .NET SDK for NuGet authentication uses: actions/setup-dotnet@v5 with: dotnet-version: '10.0.x' + - name: Remove .npmrc (use public registry) + working-directory: sdk/js + shell: pwsh + run: | + if (Test-Path .npmrc) { Remove-Item .npmrc -Force; Write-Host "Removed .npmrc" } + + - name: Build Node-API addon + shell: pwsh + working-directory: sdk/js + run: | + npm install --no-save node-gyp node-api-headers --registry https://registry.npmjs.org + Set-Location native + npx node-gyp rebuild + $platformKey = node -e 'process.stdout.write(process.platform + "-" + process.arch)' + $destDir = "../prebuilds/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "build/Release/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + Write-Host "Built addon for $platformKey -> $destDir/foundry_local_napi.node" + - name: Build SDK from source working-directory: sdk/js run: | diff --git a/.gitignore b/.gitignore index 552012ec..c5859ed2 100644 --- a/.gitignore +++ b/.gitignore @@ -23,6 +23,7 @@ __pycache__/ node_modules/ packages/ package-lock.json +prebuilds/ # Rust build targets target/ diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index d90a15e7..9ff902cc 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -322,11 +322,120 @@ extends: flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + # ── Build JS Node-API Addon (all platforms) ── + - stage: build_js_addon + displayName: 'Build JS Addon' + dependsOn: [] + jobs: + - job: js_addon_win_x64 + displayName: 'Addon win32-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'js-addon-win32-x64' + targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' + steps: + - checkout: self + clean: true + - template: .pipelines/templates/build-js-addon-steps.yml@self + parameters: + repoRoot: $(Build.SourcesDirectory) + - task: PowerShell@2 + displayName: 'Stage addon artifact' + inputs: + targetType: inline + script: | + $destDir = "$(Build.ArtifactStagingDirectory)/js-addon" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(Build.SourcesDirectory)/sdk/js/prebuilds/win32-x64/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + + - job: js_addon_win_arm64 + displayName: 'Addon win32-arm64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'js-addon-win32-arm64' + targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' + steps: + - checkout: self + clean: true + - template: .pipelines/templates/build-js-addon-steps.yml@self + parameters: + repoRoot: $(Build.SourcesDirectory) + targetArch: arm64 + - task: PowerShell@2 + displayName: 'Stage addon artifact' + inputs: + targetType: inline + script: | + $destDir = "$(Build.ArtifactStagingDirectory)/js-addon" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(Build.SourcesDirectory)/sdk/js/prebuilds/win32-arm64/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + + - job: js_addon_linux_x64 + displayName: 'Addon linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'js-addon-linux-x64' + targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' + steps: + - checkout: self + clean: true + - template: .pipelines/templates/build-js-addon-steps.yml@self + parameters: + repoRoot: $(Build.SourcesDirectory) + - task: PowerShell@2 + displayName: 'Stage addon artifact' + inputs: + targetType: inline + script: | + $destDir = "$(Build.ArtifactStagingDirectory)/js-addon" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(Build.SourcesDirectory)/sdk/js/prebuilds/linux-x64/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + + - job: js_addon_osx_arm64 + displayName: 'Addon osx-arm64' + pool: + name: Azure Pipelines + vmImage: 'macOS-15' + os: macOS + templateContext: + outputs: + - output: pipelineArtifact + artifactName: 'js-addon-darwin-arm64' + targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' + steps: + - checkout: self + clean: true + - template: .pipelines/templates/build-js-addon-steps.yml@self + parameters: + repoRoot: $(Build.SourcesDirectory) + targetArch: arm64 + - task: PowerShell@2 + displayName: 'Stage addon artifact' + inputs: + targetType: inline + script: | + $destDir = "$(Build.ArtifactStagingDirectory)/js-addon" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "$(Build.SourcesDirectory)/sdk/js/prebuilds/darwin-arm64/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + # ── Build JS SDK ── - stage: build_js displayName: 'Build JS SDK' dependsOn: - build_core + - build_js_addon jobs: - job: js_sdk displayName: 'Build' @@ -353,6 +462,48 @@ extends: clean: true - checkout: test-data-shared lfs: true + + # Download prebuilt Node-API addons for all platforms + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (win32-x64)' + inputs: + buildType: current + artifactName: 'js-addon-win32-x64' + targetPath: '$(Pipeline.Workspace)/js-addon-win32-x64' + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (win32-arm64)' + inputs: + buildType: current + artifactName: 'js-addon-win32-arm64' + targetPath: '$(Pipeline.Workspace)/js-addon-win32-arm64' + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (linux-x64)' + inputs: + buildType: current + artifactName: 'js-addon-linux-x64' + targetPath: '$(Pipeline.Workspace)/js-addon-linux-x64' + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (osx-arm64)' + inputs: + buildType: current + artifactName: 'js-addon-darwin-arm64' + targetPath: '$(Pipeline.Workspace)/js-addon-darwin-arm64' + + - task: PowerShell@2 + displayName: 'Place prebuilt addons' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $prebuildsDir = "$repoRoot/sdk/js/prebuilds" + foreach ($platform in @('win32-x64','win32-arm64','linux-x64','darwin-arm64')) { + $src = "$(Pipeline.Workspace)/js-addon-$platform/foundry_local_napi.node" + $destDir = "$prebuildsDir/$platform" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item $src "$destDir/foundry_local_napi.node" -Force + Write-Host "Placed addon for $platform ($($(Get-Item $src).Length) bytes)" + } + - template: .pipelines/templates/build-js-steps.yml@self parameters: version: ${{ parameters.version }} @@ -592,6 +743,7 @@ extends: displayName: 'Build JS SDK (WinML)' dependsOn: - build_core_winml + - build_js_addon jobs: - job: js_sdk_winml displayName: 'Build' @@ -618,6 +770,48 @@ extends: clean: true - checkout: test-data-shared lfs: true + + # Download prebuilt Node-API addons for all platforms + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (win32-x64)' + inputs: + buildType: current + artifactName: 'js-addon-win32-x64' + targetPath: '$(Pipeline.Workspace)/js-addon-win32-x64' + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (win32-arm64)' + inputs: + buildType: current + artifactName: 'js-addon-win32-arm64' + targetPath: '$(Pipeline.Workspace)/js-addon-win32-arm64' + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (linux-x64)' + inputs: + buildType: current + artifactName: 'js-addon-linux-x64' + targetPath: '$(Pipeline.Workspace)/js-addon-linux-x64' + - task: DownloadPipelineArtifact@2 + displayName: 'Download addon (osx-arm64)' + inputs: + buildType: current + artifactName: 'js-addon-darwin-arm64' + targetPath: '$(Pipeline.Workspace)/js-addon-darwin-arm64' + + - task: PowerShell@2 + displayName: 'Place prebuilt addons' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $prebuildsDir = "$repoRoot/sdk/js/prebuilds" + foreach ($platform in @('win32-x64','win32-arm64','linux-x64','darwin-arm64')) { + $src = "$(Pipeline.Workspace)/js-addon-$platform/foundry_local_napi.node" + $destDir = "$prebuildsDir/$platform" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item $src "$destDir/foundry_local_napi.node" -Force + Write-Host "Placed addon for $platform ($($(Get-Item $src).Length) bytes)" + } + - template: .pipelines/templates/build-js-steps.yml@self parameters: version: ${{ parameters.version }} @@ -710,4 +904,4 @@ extends: # isWinML: true # flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' - # outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' \ No newline at end of file + # outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' diff --git a/.pipelines/templates/build-js-addon-steps.yml b/.pipelines/templates/build-js-addon-steps.yml new file mode 100644 index 00000000..dce0f7fc --- /dev/null +++ b/.pipelines/templates/build-js-addon-steps.yml @@ -0,0 +1,45 @@ +# Builds the Node-API native addon for the current platform. +# Produces a foundry_local_napi.node binary under prebuilds//. +parameters: +- name: repoRoot + type: string +- name: targetArch + type: string + default: '' + displayName: 'Target architecture for cross-compilation (e.g. arm64). Empty = native.' + +steps: +- task: NodeTool@0 + displayName: 'Use Node.js 20' + inputs: + versionSpec: '20.x' + +- task: PowerShell@2 + displayName: 'Install node-gyp and node-api-headers' + inputs: + targetType: inline + pwsh: true + script: | + Set-Location "${{ parameters.repoRoot }}/sdk/js" + npm install --no-save node-gyp node-api-headers --registry https://registry.npmjs.org + +- task: PowerShell@2 + displayName: 'Build Node-API addon' + inputs: + targetType: inline + pwsh: true + script: | + Set-Location "${{ parameters.repoRoot }}/sdk/js/native" + $archFlag = "${{ parameters.targetArch }}" + if ($archFlag -ne '') { + npx node-gyp rebuild --arch=$archFlag + $platformKey = (node -e 'process.stdout.write(process.platform)') + "-$archFlag" + } else { + npx node-gyp rebuild + $platformKey = node -e 'process.stdout.write(process.platform + "-" + process.arch)' + } + $destDir = "${{ parameters.repoRoot }}/sdk/js/prebuilds/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + Copy-Item "build/Release/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force + Write-Host "Built addon for $platformKey -> $destDir/foundry_local_napi.node" + Get-Item "$destDir/foundry_local_napi.node" | ForEach-Object { Write-Host " Size: $($_.Length) bytes" } diff --git a/samples/cs/nuget.config b/samples/cs/nuget.config index 0eb64ca1..3a9f6b32 100644 --- a/samples/cs/nuget.config +++ b/samples/cs/nuget.config @@ -3,15 +3,5 @@ - - - - - - - - - - \ No newline at end of file diff --git a/sdk/js/README.md b/sdk/js/README.md index 13d50442..b2fe31dd 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -261,6 +261,38 @@ Auto-generated class documentation lives in [`docs/classes/`](docs/classes/): - [AudioClient](docs/classes/AudioClient.md) — Audio transcription (sync and streaming) - [ModelLoadManager](docs/classes/ModelLoadManager.md) — Low-level model loading management +## Contributing: Building from Source + +### Prerequisites + +- **Node.js 20+** +- **Python 3.x** — required by `node-gyp` for compiling the native addon +- **C/C++ toolchain**: + - **Windows**: Visual Studio Build Tools (the "Desktop development with C++" workload) + - **Linux**: `build-essential` (`apt install build-essential`) + - **macOS**: Xcode Command Line Tools (`xcode-select --install`) + +### Build Steps + +```bash +# 1. Install JS dependencies (also downloads native core binaries) +npm install + +# 2. Build the Node-API native addon (compiles C code and copies to prebuilds/) +npm run build:native + +# 3. Build the TypeScript source +npm run build + +# 4. Run tests +npm test + +# 5. Pack the SDK into a .tgz (includes prebuilt addon for your platform) +npm run pack +``` + +> **Note:** `npm run build:native` compiles the addon only for your current platform. The published npm package includes prebuilt addons for all supported platforms (win32-x64, win32-arm64, linux-x64, darwin-arm64), which are compiled in CI. + ## Running Tests ```bash diff --git a/sdk/js/native/binding.gyp b/sdk/js/native/binding.gyp new file mode 100644 index 00000000..e8aed098 --- /dev/null +++ b/sdk/js/native/binding.gyp @@ -0,0 +1,30 @@ +{ + "targets": [ + { + "target_name": "foundry_local_napi", + "sources": ["foundry_local_napi.c"], + "include_dirs": [ + " +#include +#include +#include +#include + +/* ── Platform-specific dynamic loading ─────────────────────────────────── */ + +#ifdef _WIN32 + #define WIN32_LEAN_AND_MEAN + #include + typedef HMODULE lib_handle_t; + #define LIB_OPEN(path) LoadLibraryA(path) + #define LIB_SYM(handle, sym) GetProcAddress(handle, sym) + #define LIB_CLOSE(handle) FreeLibrary(handle) +#else + #include + typedef void* lib_handle_t; + #define LIB_OPEN(path) dlopen(path, RTLD_NOW | RTLD_LOCAL) + #define LIB_SYM(handle, sym) dlsym(handle, sym) + #define LIB_CLOSE(handle) dlclose(handle) +#endif + +/* ── Native core structs (must match C# / Rust definitions) ───────────── */ + +typedef struct { + const char* Command; + int32_t CommandLength; + const char* Data; + int32_t DataLength; +} RequestBuffer; + +typedef struct { + void* Data; + int32_t DataLength; + void* Error; + int32_t ErrorLength; +} ResponseBuffer; + +typedef struct { + const char* Command; + int32_t CommandLength; + const char* Data; + int32_t DataLength; + const void* BinaryData; + int32_t BinaryDataLength; +} StreamingRequestBuffer; + +typedef int32_t (*CallbackFn)(const void* data, int32_t length, void* userData); + +/* ── Native function pointer types ────────────────────────────────────── */ + +typedef void (*ExecuteCommandFn)( + const RequestBuffer* request, + ResponseBuffer* response +); + +typedef void (*ExecuteCommandWithCallbackFn)( + const RequestBuffer* request, + ResponseBuffer* response, + CallbackFn callback, + void* userData +); + +typedef void (*ExecuteCommandWithBinaryFn)( + const StreamingRequestBuffer* request, + ResponseBuffer* response +); + +/* ── Module state ─────────────────────────────────────────────────────── */ + +static lib_handle_t g_core_lib = NULL; +static lib_handle_t* g_dep_libs = NULL; +static size_t g_dep_lib_count = 0; + +static ExecuteCommandFn g_execute_command = NULL; +static ExecuteCommandWithCallbackFn g_execute_command_with_callback = NULL; +static ExecuteCommandWithBinaryFn g_execute_command_with_binary = NULL; + +/* ── Platform-specific memory deallocation ────────────────────────────── */ + +/* + * The .NET native core allocates response buffers with Marshal.AllocHGlobal: + * - Unix: malloc → free with free() + * - Windows: LocalAlloc → free with LocalFree() + */ +static void free_native_buffer(void* ptr) { + if (!ptr) return; +#ifdef _WIN32 + LocalFree(ptr); +#else + free(ptr); +#endif +} + +/* ── Helper: throw JS error from napi_status ──────────────────────────── */ + +#define NAPI_CALL(env, call) \ + do { \ + napi_status _status = (call); \ + if (_status != napi_ok) { \ + const napi_extended_error_info* _err_info = NULL; \ + napi_get_last_error_info((env), &_err_info); \ + const char* _msg = (_err_info && _err_info->error_message) \ + ? _err_info->error_message \ + : "Unknown N-API error"; \ + napi_throw_error((env), NULL, _msg); \ + return NULL; \ + } \ + } while (0) + +/* Maximum string length we accept (guard against size_t → int32_t overflow) */ +#define MAX_STRING_LENGTH ((size_t)INT32_MAX) + +/* ── Helper: validate string length fits in int32_t ───────────────────── */ + +static int check_string_length(napi_env env, size_t len, const char* param_name) { + if (len > MAX_STRING_LENGTH) { + char msg[128]; + snprintf(msg, sizeof(msg), "%s exceeds maximum length (2GB)", param_name); + napi_throw_error(env, NULL, msg); + return 0; /* failure */ + } + return 1; /* ok */ +} + +/* ── Helper: create a JS Error object and reject a deferred promise ──── */ + +static void reject_with_error(napi_env env, napi_deferred deferred, + const char* message) { + napi_value err_msg, err_obj; + napi_create_string_utf8(env, message, NAPI_AUTO_LENGTH, &err_msg); + napi_create_error(env, NULL, err_msg, &err_obj); + napi_reject_deferred(env, deferred, err_obj); +} + +/* ── Helper: clean up loaded libraries on error ───────────────────────── */ + +static void cleanup_loaded_libs(void) { + if (g_core_lib) { + LIB_CLOSE(g_core_lib); + g_core_lib = NULL; + } + for (size_t i = 0; i < g_dep_lib_count; i++) { + if (g_dep_libs[i]) LIB_CLOSE(g_dep_libs[i]); + } + free(g_dep_libs); + g_dep_libs = NULL; + g_dep_lib_count = 0; + g_execute_command = NULL; + g_execute_command_with_callback = NULL; + g_execute_command_with_binary = NULL; +} + +/* ── Helper: extract response and free native buffers ─────────────────── */ + +static napi_value handle_response(napi_env env, const char* command, + ResponseBuffer* res) { + napi_value result; + + if (res->Error && res->ErrorLength > 0) { + char* msg = (char*)malloc(res->ErrorLength + 64); + if (msg) { + snprintf(msg, res->ErrorLength + 64, "Command '%s' failed: %.*s", + command, res->ErrorLength, (const char*)res->Error); + napi_throw_error(env, NULL, msg); + free(msg); + } else { + napi_throw_error(env, NULL, "Command failed (out of memory for error)"); + } + free_native_buffer(res->Data); + free_native_buffer(res->Error); + return NULL; + } + + napi_status st; + if (res->Data && res->DataLength > 0) { + st = napi_create_string_utf8(env, (const char*)res->Data, + res->DataLength, &result); + } else { + st = napi_create_string_utf8(env, "", 0, &result); + } + + free_native_buffer(res->Data); + free_native_buffer(res->Error); + + if (st != napi_ok) { + napi_throw_error(env, NULL, "Failed to create response string"); + return NULL; + } + + return result; +} + +/* ── loadLibrary(corePath, depPaths?) ─────────────────────────────────── */ + +static napi_value napi_load_library(napi_env env, napi_callback_info info) { + size_t argc = 2; + napi_value argv[2]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, argv, NULL, NULL)); + + if (argc < 1) { + napi_throw_error(env, NULL, "loadLibrary requires at least 1 argument (corePath)"); + return NULL; + } + + /* Close previously loaded libraries if any */ + cleanup_loaded_libs(); + + /* Load dependency libraries first (e.g., onnxruntime on Windows) */ + if (argc >= 2) { + napi_valuetype vt; + NAPI_CALL(env, napi_typeof(env, argv[1], &vt)); + + if (vt != napi_undefined && vt != napi_null) { + bool is_array = false; + NAPI_CALL(env, napi_is_array(env, argv[1], &is_array)); + if (!is_array) { + napi_throw_type_error(env, NULL, "depPaths must be an array of strings"); + return NULL; + } + + uint32_t dep_count = 0; + NAPI_CALL(env, napi_get_array_length(env, argv[1], &dep_count)); + + if (dep_count > 0) { + g_dep_libs = (lib_handle_t*)calloc(dep_count, sizeof(lib_handle_t)); + if (!g_dep_libs) { + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + g_dep_lib_count = dep_count; + + for (uint32_t i = 0; i < dep_count; i++) { + napi_value elem; + NAPI_CALL(env, napi_get_element(env, argv[1], i, &elem)); + + size_t len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, elem, NULL, 0, &len)); + char* dep_path = (char*)malloc(len + 1); + if (!dep_path) { + cleanup_loaded_libs(); + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + NAPI_CALL(env, napi_get_value_string_utf8(env, elem, dep_path, len + 1, &len)); + + g_dep_libs[i] = LIB_OPEN(dep_path); + if (!g_dep_libs[i]) { + char err_msg[512]; + snprintf(err_msg, sizeof(err_msg), + "Failed to load dependency library: %s", dep_path); + free(dep_path); + cleanup_loaded_libs(); + napi_throw_error(env, NULL, err_msg); + return NULL; + } + free(dep_path); + } + } + } + } + + /* Load the core library */ + size_t core_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], NULL, 0, &core_len)); + char* core_path = (char*)malloc(core_len + 1); + if (!core_path) { + cleanup_loaded_libs(); + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], core_path, core_len + 1, &core_len)); + + g_core_lib = LIB_OPEN(core_path); + if (!g_core_lib) { + char err_msg[512]; + snprintf(err_msg, sizeof(err_msg), + "Failed to load core library: %s", core_path); + free(core_path); + cleanup_loaded_libs(); + napi_throw_error(env, NULL, err_msg); + return NULL; + } + free(core_path); + + /* Resolve function pointers */ + g_execute_command = (ExecuteCommandFn)LIB_SYM(g_core_lib, "execute_command"); + if (!g_execute_command) { + cleanup_loaded_libs(); + napi_throw_error(env, NULL, "Failed to resolve 'execute_command' symbol"); + return NULL; + } + + g_execute_command_with_callback = (ExecuteCommandWithCallbackFn)LIB_SYM( + g_core_lib, "execute_command_with_callback"); + if (!g_execute_command_with_callback) { + cleanup_loaded_libs(); + napi_throw_error(env, NULL, "Failed to resolve 'execute_command_with_callback' symbol"); + return NULL; + } + + g_execute_command_with_binary = (ExecuteCommandWithBinaryFn)LIB_SYM( + g_core_lib, "execute_command_with_binary"); + if (!g_execute_command_with_binary) { + cleanup_loaded_libs(); + napi_throw_error(env, NULL, "Failed to resolve 'execute_command_with_binary' symbol"); + return NULL; + } + + napi_value undefined; + NAPI_CALL(env, napi_get_undefined(env, &undefined)); + return undefined; +} + +/* ── executeCommand(command, dataJson) ────────────────────────────────── */ + +static napi_value napi_execute_command(napi_env env, napi_callback_info info) { + if (!g_execute_command) { + napi_throw_error(env, NULL, "Native library not loaded. Call loadLibrary() first."); + return NULL; + } + + size_t argc = 2; + napi_value argv[2]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, argv, NULL, NULL)); + + if (argc < 2) { + napi_throw_error(env, NULL, "executeCommand requires 2 arguments (command, dataJson)"); + return NULL; + } + + /* Extract command string */ + size_t cmd_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], NULL, 0, &cmd_len)); + if (!check_string_length(env, cmd_len, "command")) return NULL; + char* cmd = (char*)malloc(cmd_len + 1); + if (!cmd) { napi_throw_error(env, NULL, "Out of memory"); return NULL; } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], cmd, cmd_len + 1, &cmd_len)); + + /* Extract data JSON string */ + size_t data_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], NULL, 0, &data_len)); + if (!check_string_length(env, data_len, "dataJson")) { free(cmd); return NULL; } + char* data = (char*)malloc(data_len + 1); + if (!data) { free(cmd); napi_throw_error(env, NULL, "Out of memory"); return NULL; } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], data, data_len + 1, &data_len)); + + RequestBuffer req = { + .Command = cmd, + .CommandLength = (int32_t)cmd_len, + .Data = data, + .DataLength = (int32_t)data_len + }; + ResponseBuffer res = { NULL, 0, NULL, 0 }; + + g_execute_command(&req, &res); + + napi_value result = handle_response(env, cmd, &res); + + free(cmd); + free(data); + return result; +} + +/* ── executeCommandWithBinary(command, dataJson, binaryBuffer) ────────── */ + +static napi_value napi_execute_command_with_binary(napi_env env, + napi_callback_info info) { + if (!g_execute_command_with_binary) { + napi_throw_error(env, NULL, "Native library not loaded. Call loadLibrary() first."); + return NULL; + } + + size_t argc = 3; + napi_value argv[3]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, argv, NULL, NULL)); + + if (argc < 3) { + napi_throw_error(env, NULL, + "executeCommandWithBinary requires 3 arguments (command, dataJson, binaryBuffer)"); + return NULL; + } + + /* Extract command string */ + size_t cmd_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], NULL, 0, &cmd_len)); + if (!check_string_length(env, cmd_len, "command")) return NULL; + char* cmd = (char*)malloc(cmd_len + 1); + if (!cmd) { napi_throw_error(env, NULL, "Out of memory"); return NULL; } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], cmd, cmd_len + 1, &cmd_len)); + + /* Extract data JSON string */ + size_t data_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], NULL, 0, &data_len)); + if (!check_string_length(env, data_len, "dataJson")) { free(cmd); return NULL; } + char* data = (char*)malloc(data_len + 1); + if (!data) { free(cmd); napi_throw_error(env, NULL, "Out of memory"); return NULL; } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], data, data_len + 1, &data_len)); + + /* Extract binary buffer */ + void* bin_data = NULL; + size_t bin_len = 0; + bool is_buffer = false; + NAPI_CALL(env, napi_is_buffer(env, argv[2], &is_buffer)); + + if (is_buffer) { + NAPI_CALL(env, napi_get_buffer_info(env, argv[2], &bin_data, &bin_len)); + } else { + bool is_typedarray = false; + NAPI_CALL(env, napi_is_typedarray(env, argv[2], &is_typedarray)); + if (is_typedarray) { + napi_typedarray_type type; + size_t length; + void* arr_data; + napi_value arr_buf; + size_t offset; + NAPI_CALL(env, napi_get_typedarray_info(env, argv[2], &type, &length, + &arr_data, &arr_buf, &offset)); + if (type != napi_uint8_array) { + free(cmd); + free(data); + napi_throw_type_error(env, NULL, + "binaryBuffer must be a Buffer or Uint8Array"); + return NULL; + } + bin_data = arr_data; + bin_len = length; + } else { + free(cmd); + free(data); + napi_throw_type_error(env, NULL, + "binaryBuffer must be a Buffer or Uint8Array"); + return NULL; + } + } + + if (!check_string_length(env, bin_len, "binaryBuffer")) { + free(cmd); free(data); return NULL; + } + + StreamingRequestBuffer req = { + .Command = cmd, + .CommandLength = (int32_t)cmd_len, + .Data = data, + .DataLength = (int32_t)data_len, + .BinaryData = bin_data, + .BinaryDataLength = (int32_t)bin_len + }; + ResponseBuffer res = { NULL, 0, NULL, 0 }; + + g_execute_command_with_binary(&req, &res); + + napi_value result = handle_response(env, cmd, &res); + + free(cmd); + free(data); + return result; +} + +/* ── Streaming async work data ────────────────────────────────────────── */ + +/* Chunk data passed from the native callback to the JS thread. + Carries both the data pointer and its length so we avoid strlen(). */ +typedef struct StreamingWorkData StreamingWorkData; +typedef struct { + char* data; + size_t length; + StreamingWorkData* work_data; /* back-pointer for cancellation */ +} ChunkData; + +struct StreamingWorkData { + /* Input (owned, freed after work completes) */ + char* command; + size_t command_length; + char* data; + size_t data_length; + + /* Threadsafe function for streaming callback */ + napi_threadsafe_function tsfn; + + /* Set by the JS thread when the callback throws an exception. + Checked by the native trampoline to cancel the stream. */ + volatile int should_cancel; + + /* Output from native call */ + ResponseBuffer response; + + /* Promise */ + napi_deferred deferred; + napi_async_work work; +}; + +/* Called on the JS thread when the native callback fires */ +static void streaming_call_js(napi_env env, napi_value js_callback, + void* context, void* data) { + if (!env || !data) return; + + ChunkData* chunk = (ChunkData*)data; + + napi_value argv[1]; + napi_value global; + napi_status status; + + status = napi_create_string_utf8(env, chunk->data, chunk->length, &argv[0]); + StreamingWorkData* work_data = chunk->work_data; + free(chunk->data); + free(chunk); + + if (status != napi_ok) return; + + status = napi_get_global(env, &global); + if (status != napi_ok) return; + + napi_value result; + status = napi_call_function(env, global, js_callback, 1, argv, &result); + + /* If the JS callback threw, clear the exception and signal cancellation + to the native trampoline so the stream stops promptly. */ + if (status == napi_pending_exception) { + napi_value exception; + napi_get_and_clear_last_exception(env, &exception); + if (work_data) { + work_data->should_cancel = 1; + } + } +} + +/* Native callback trampoline invoked by the core library (possibly from + a worker thread). Copies chunk data and dispatches to the JS thread + via threadsafe function. Returns 0 to continue, 1 to cancel. */ +static int32_t streaming_native_callback(const void* data, int32_t length, + void* userData) { + StreamingWorkData* work_data = (StreamingWorkData*)userData; + if (!work_data || !work_data->tsfn || !data || length <= 0) { + return 0; /* continue even on unexpected state */ + } + + /* Check if the JS callback requested cancellation */ + if (work_data->should_cancel) { + return 1; /* cancel */ + } + + /* Heap-copy the chunk so it survives until the JS thread picks it up */ + ChunkData* chunk = (ChunkData*)malloc(sizeof(ChunkData)); + if (!chunk) return 1; /* cancel on OOM */ + chunk->data = (char*)malloc((size_t)length); + if (!chunk->data) { free(chunk); return 1; } + memcpy(chunk->data, data, (size_t)length); + chunk->length = (size_t)length; + chunk->work_data = work_data; + + napi_status status = napi_call_threadsafe_function( + work_data->tsfn, chunk, napi_tsfn_blocking); + if (status != napi_ok) { + free(chunk->data); + free(chunk); + return 1; /* cancel */ + } + + return 0; /* continue */ +} + +/* Runs on the libuv worker thread – must NOT call napi_* (except tsfn) */ +static void streaming_execute(napi_env env, void* data) { + StreamingWorkData* work_data = (StreamingWorkData*)data; + + RequestBuffer req = { + .Command = work_data->command, + .CommandLength = (int32_t)work_data->command_length, + .Data = work_data->data, + .DataLength = (int32_t)work_data->data_length + }; + + work_data->response.Data = NULL; + work_data->response.DataLength = 0; + work_data->response.Error = NULL; + work_data->response.ErrorLength = 0; + + g_execute_command_with_callback( + &req, &work_data->response, + streaming_native_callback, work_data); +} + +/* Runs on the JS main thread after streaming_execute completes */ +static void streaming_complete(napi_env env, napi_status status, void* data) { + StreamingWorkData* work_data = (StreamingWorkData*)data; + + /* Release the threadsafe function */ + napi_release_threadsafe_function(work_data->tsfn, napi_tsfn_release); + + if (status == napi_cancelled) { + reject_with_error(env, work_data->deferred, "Async work cancelled"); + } else if (work_data->response.Error && work_data->response.ErrorLength > 0) { + /* Build error message */ + int32_t elen = work_data->response.ErrorLength; + size_t msg_size = (size_t)elen + 128; + char* msg = (char*)malloc(msg_size); + if (msg) { + snprintf(msg, msg_size, "Command '%s' failed: %.*s", + work_data->command, elen, + (const char*)work_data->response.Error); + reject_with_error(env, work_data->deferred, msg); + free(msg); + } else { + reject_with_error(env, work_data->deferred, "Command failed (OOM)"); + } + } else { + napi_value result; + if (work_data->response.Data && work_data->response.DataLength > 0) { + napi_create_string_utf8(env, + (const char*)work_data->response.Data, + work_data->response.DataLength, &result); + } else { + napi_create_string_utf8(env, "", 0, &result); + } + napi_resolve_deferred(env, work_data->deferred, result); + } + + /* Free native response buffers */ + free_native_buffer(work_data->response.Data); + free_native_buffer(work_data->response.Error); + + /* Free work data */ + napi_delete_async_work(env, work_data->work); + free(work_data->command); + free(work_data->data); + free(work_data); +} + +/* ── Streaming setup helpers ──────────────────────────────────────────── */ + +/* Free a partially-initialized StreamingWorkData. If the tsfn was created, + release it before freeing. */ +static void streaming_cleanup(StreamingWorkData* work_data, bool has_tsfn) { + if (has_tsfn) { + napi_release_threadsafe_function(work_data->tsfn, napi_tsfn_release); + } + free(work_data->command); + free(work_data->data); + free(work_data); +} + +/* Create the promise, threadsafe function, async work, and queue it. + On success, sets *out_promise and returns true (work_data ownership + transfers to streaming_complete). On failure, cleans up work_data, + throws a JS error, and returns false. */ +static bool streaming_setup(napi_env env, napi_value js_callback, + StreamingWorkData* work_data, + napi_value* out_promise) { + napi_status st; + + st = napi_create_promise(env, &work_data->deferred, out_promise); + if (st != napi_ok) { + streaming_cleanup(work_data, false); + napi_throw_error(env, NULL, "Failed to create streaming promise"); + return false; + } + + napi_value resource_name; + st = napi_create_string_utf8(env, "foundry_streaming_cb", + NAPI_AUTO_LENGTH, &resource_name); + if (st != napi_ok) { + streaming_cleanup(work_data, false); + napi_throw_error(env, NULL, "Failed to create streaming operation"); + return false; + } + + st = napi_create_threadsafe_function( + env, js_callback, NULL, resource_name, + 0, /* max_queue_size: 0 = unlimited */ + 1, /* initial_thread_count */ + NULL, /* thread_finalize_data */ + NULL, /* thread_finalize_cb */ + NULL, /* context */ + streaming_call_js, + &work_data->tsfn); + if (st != napi_ok) { + streaming_cleanup(work_data, false); + napi_throw_error(env, NULL, "Failed to create streaming callback"); + return false; + } + + napi_value work_name; + st = napi_create_string_utf8(env, "foundry_streaming_work", + NAPI_AUTO_LENGTH, &work_name); + if (st != napi_ok) { + streaming_cleanup(work_data, true); + napi_throw_error(env, NULL, "Failed to create streaming operation"); + return false; + } + + st = napi_create_async_work(env, NULL, work_name, + streaming_execute, + streaming_complete, + work_data, + &work_data->work); + if (st != napi_ok) { + streaming_cleanup(work_data, true); + napi_throw_error(env, NULL, "Failed to create streaming async work"); + return false; + } + + st = napi_queue_async_work(env, work_data->work); + if (st != napi_ok) { + napi_delete_async_work(env, work_data->work); + streaming_cleanup(work_data, true); + napi_throw_error(env, NULL, "Failed to queue streaming work"); + return false; + } + + return true; +} + +/* ── executeCommandStreaming(command, dataJson, callback) → Promise ───── */ + +static napi_value napi_execute_command_streaming(napi_env env, + napi_callback_info info) { + if (!g_execute_command_with_callback) { + napi_throw_error(env, NULL, "Native library not loaded. Call loadLibrary() first."); + return NULL; + } + + size_t argc = 3; + napi_value argv[3]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, argv, NULL, NULL)); + + if (argc < 3) { + napi_throw_error(env, NULL, + "executeCommandStreaming requires 3 arguments (command, dataJson, callback)"); + return NULL; + } + + /* Verify callback is a function */ + napi_valuetype cb_type; + NAPI_CALL(env, napi_typeof(env, argv[2], &cb_type)); + if (cb_type != napi_function) { + napi_throw_type_error(env, NULL, "Third argument must be a function"); + return NULL; + } + + /* Extract command string */ + size_t cmd_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], NULL, 0, &cmd_len)); + if (!check_string_length(env, cmd_len, "command")) return NULL; + char* cmd = (char*)malloc(cmd_len + 1); + if (!cmd) { napi_throw_error(env, NULL, "Out of memory"); return NULL; } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], cmd, cmd_len + 1, &cmd_len)); + + /* Extract data JSON string */ + size_t data_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], NULL, 0, &data_len)); + if (!check_string_length(env, data_len, "dataJson")) { free(cmd); return NULL; } + char* data_str = (char*)malloc(data_len + 1); + if (!data_str) { + free(cmd); + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], data_str, data_len + 1, &data_len)); + + /* Allocate work data */ + StreamingWorkData* work_data = (StreamingWorkData*)calloc(1, sizeof(StreamingWorkData)); + if (!work_data) { + free(cmd); + free(data_str); + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + work_data->command = cmd; + work_data->command_length = cmd_len; + work_data->data = data_str; + work_data->data_length = data_len; + + /* Setup phase: use manual status checks instead of NAPI_CALL so we can + clean up work_data on failure. Once async work is queued successfully, + streaming_complete owns all cleanup. */ + napi_value promise = NULL; + if (!streaming_setup(env, argv[2], work_data, &promise)) { + return NULL; + } + + return promise; +} + +/* ── Module initialization ────────────────────────────────────────────── */ + +static napi_value init(napi_env env, napi_value exports) { + napi_property_descriptor props[] = { + { "loadLibrary", NULL, napi_load_library, NULL, NULL, NULL, + napi_default, NULL }, + { "executeCommand", NULL, napi_execute_command, NULL, NULL, NULL, + napi_default, NULL }, + { "executeCommandWithBinary", NULL, napi_execute_command_with_binary, + NULL, NULL, NULL, napi_default, NULL }, + { "executeCommandStreaming", NULL, napi_execute_command_streaming, + NULL, NULL, NULL, napi_default, NULL }, + }; + + NAPI_CALL(env, napi_define_properties(env, exports, + sizeof(props) / sizeof(props[0]), props)); + + return exports; +} + +NAPI_MODULE(NODE_GYP_MODULE_NAME, init) diff --git a/sdk/js/package.json b/sdk/js/package.json index 6e4acf50..408036b1 100644 --- a/sdk/js/package.json +++ b/sdk/js/package.json @@ -7,6 +7,7 @@ "type": "module", "files": [ "dist", + "prebuilds", "script/install-standard.cjs", "script/install-winml.cjs", "script/install-utils.cjs", @@ -15,6 +16,7 @@ ], "scripts": { "build": "tsc -p tsconfig.build.json", + "build:native": "cd native && node-gyp rebuild && node ../script/copy-addon.cjs", "docs": "typedoc", "example": "tsx examples/chat-completion.ts", "install": "node script/install-standard.cjs", @@ -24,7 +26,6 @@ "test": "mocha --import=tsx test/**/*.test.ts" }, "dependencies": { - "koffi": "^2.9.0", "adm-zip": "^0.5.16" }, "devDependencies": { @@ -33,6 +34,7 @@ "@types/node": "^24.10.1", "chai": "^6.2.1", "mocha": "^11.7.5", + "node-api-headers": "^1.8.0", "tsx": "^4.7.0", "typedoc": "^0.28.15", "typedoc-plugin-markdown": "^4.2.0", @@ -45,4 +47,4 @@ }, "author": "", "license": "ISC" -} \ No newline at end of file +} diff --git a/sdk/js/script/copy-addon.cjs b/sdk/js/script/copy-addon.cjs new file mode 100644 index 00000000..6c31fc9d --- /dev/null +++ b/sdk/js/script/copy-addon.cjs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Copies the locally-built Node-API addon into the prebuilds directory +// so that CoreInterop can find it at runtime during development. + +'use strict'; + +const fs = require('fs'); +const path = require('path'); + +const platformKey = `${process.platform}-${process.arch}`; +const source = path.join(__dirname, '..', 'native', 'build', 'Release', 'foundry_local_napi.node'); +const destDir = path.join(__dirname, '..', 'prebuilds', platformKey); +const dest = path.join(destDir, 'foundry_local_napi.node'); + +if (!fs.existsSync(source)) { + console.warn(`[copy-addon] Addon not found at ${source}. Run 'npm run build:native' first.`); + process.exit(1); +} + +fs.mkdirSync(destDir, { recursive: true }); +fs.copyFileSync(source, dest); +console.log(`[copy-addon] Copied addon to ${dest}`); diff --git a/sdk/js/script/pack.cjs b/sdk/js/script/pack.cjs index f550043e..f57ab63e 100644 --- a/sdk/js/script/pack.cjs +++ b/sdk/js/script/pack.cjs @@ -43,7 +43,7 @@ try { copiedFiles.push(depsWinmlDest); } } else { - pkg.files = ['dist', 'script/install-standard.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs', 'deps_versions.json']; + pkg.files = ['dist', 'prebuilds', 'script/install-standard.cjs', 'script/install-utils.cjs', 'script/preinstall.cjs', 'deps_versions.json']; if (fs.existsSync(depsSource) && !fs.existsSync(depsDest)) { fs.copyFileSync(depsSource, depsDest); copiedFiles.push(depsDest); diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index 6a0bc6b4..ece88e8d 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -1,43 +1,50 @@ -import koffi from 'koffi'; import path from 'path'; import fs from 'fs'; +import { createRequire } from 'module'; import { fileURLToPath } from 'url'; import { Configuration } from '../configuration.js'; -koffi.struct('RequestBuffer', { - Command: 'char*', - CommandLength: 'int32_t', - Data: 'char*', - DataLength: 'int32_t', -}); - -koffi.struct('ResponseBuffer', { - Data: 'void*', - DataLength: 'int32_t', - Error: 'void*', - ErrorLength: 'int32_t', -}); - -// Extended request struct for binary data (audio streaming) -koffi.struct('StreamingRequestBuffer', { - Command: 'char*', - CommandLength: 'int32_t', - Data: 'char*', // JSON params - DataLength: 'int32_t', - BinaryData: 'void*', // raw PCM audio bytes - BinaryDataLength: 'int32_t', -}); - -const CallbackType = koffi.proto('int32_t CallbackType(void *data, int32_t length, void *userData)'); - const __filename = fileURLToPath(import.meta.url); const __dirname = path.dirname(__filename); +// Load the prebuilt Node-API addon +const require = createRequire(import.meta.url); + +interface NativeAddon { + loadLibrary(corePath: string, depPaths?: string[]): void; + executeCommand(command: string, dataJson: string): string; + executeCommandWithBinary(command: string, dataJson: string, binaryBuffer: Buffer): string; + executeCommandStreaming(command: string, dataJson: string, callback: (chunk: string) => void): Promise; +} + +function loadAddon(): NativeAddon { + const platform = process.platform; + const arch = process.arch; + const platformKey = `${platform}-${arch}`; + + // The prebuilt addon ships inside the SDK package under prebuilds// + const sdkRoot = path.resolve(__dirname, '..', '..'); + const prebuiltPath = path.join(sdkRoot, 'prebuilds', platformKey, 'foundry_local_napi.node'); + + if (fs.existsSync(prebuiltPath)) { + return require(prebuiltPath) as NativeAddon; + } + + // Fallback: development builds from node-gyp (sdk contributors) + const devPath = path.join(sdkRoot, 'native', 'build', 'Release', 'foundry_local_napi.node'); + if (fs.existsSync(devPath)) { + return require(devPath) as NativeAddon; + } + + throw new Error( + `Could not find foundry_local_napi.node for platform ${platformKey}. ` + + `Searched: ${prebuiltPath}, ${devPath}. ` + + `Please ensure the SDK was installed correctly or run 'npm run build:native' to compile from source.` + ); +} + export class CoreInterop { - private lib: any; - private execute_command: any; - private execute_command_with_callback: any; - private execute_command_with_binary: any = null; + private addon: NativeAddon; private static _getLibraryExtension(): string { const platform = process.platform; @@ -78,11 +85,9 @@ export class CoreInterop { return null; } - private _toBytes(str: string): Uint8Array { - return new TextEncoder().encode(str); - } - constructor(config: Configuration) { + this.addon = loadAddon(); + const corePath = config.params['FoundryLocalCorePath'] || CoreInterop._resolveDefaultCorePath(config); if (!corePath) { @@ -93,50 +98,20 @@ export class CoreInterop { const ext = CoreInterop._getLibraryExtension(); // On Windows, explicitly load dependencies to work around DLL resolution challenges + const depPaths: string[] = []; if (process.platform === 'win32') { - koffi.load(path.join(coreDir, `onnxruntime${ext}`)); - koffi.load(path.join(coreDir, `onnxruntime-genai${ext}`)); - process.env.PATH = `${coreDir};${process.env.PATH}`; + depPaths.push(path.join(coreDir, `onnxruntime${ext}`)); + depPaths.push(path.join(coreDir, `onnxruntime-genai${ext}`)); + const currentPath = process.env.PATH ?? ''; + process.env.PATH = currentPath ? `${coreDir};${currentPath}` : coreDir; } - this.lib = koffi.load(corePath); - this.execute_command = this.lib.func('void execute_command(RequestBuffer *request, _Inout_ ResponseBuffer *response)'); - this.execute_command_with_callback = this.lib.func('void execute_command_with_callback(RequestBuffer *request, _Inout_ ResponseBuffer *response, CallbackType *callback, void *userData)'); - this.execute_command_with_binary = this.lib.func('void execute_command_with_binary(StreamingRequestBuffer *request, _Inout_ ResponseBuffer *response)'); + this.addon.loadLibrary(corePath, depPaths.length > 0 ? depPaths : undefined); } public executeCommand(command: string, params?: any): string { - const cmdBuf = koffi.alloc('char', command.length + 1); - koffi.encode(cmdBuf, 'char', command, command.length + 1); - const dataStr = params ? JSON.stringify(params) : ''; - const dataBytes = this._toBytes(dataStr); - const dataBuf = koffi.alloc('char', dataBytes.length + 1); - koffi.encode(dataBuf, 'char', dataStr, dataBytes.length + 1); - - const req = { - Command: koffi.address(cmdBuf), - CommandLength: command.length, - Data: koffi.address(dataBuf), - DataLength: dataBytes.length - }; - const res = { Data: 0, DataLength: 0, Error: 0, ErrorLength: 0 }; - - this.execute_command(req, res); - - try { - if (res.Error) { - const errorMsg = koffi.decode(res.Error, 'char', res.ErrorLength); - throw new Error(`Command '${command}' failed: ${errorMsg}`); - } - - return res.Data ? koffi.decode(res.Data, 'char', res.DataLength) : ""; - } finally { - // Free the heap-allocated response strings using koffi.free() - // Docs: https://koffi.dev/pointers/#disposable-types - if (res.Data) koffi.free(res.Data); - if (res.Error) koffi.free(res.Error); - } + return this.addon.executeCommand(command, dataStr); } /** @@ -145,100 +120,14 @@ export class CoreInterop { * both JSON params and raw binary data via StreamingRequestBuffer. */ public executeCommandWithBinary(command: string, params: any, binaryData: Uint8Array): string { - const cmdBuf = koffi.alloc('char', command.length + 1); - koffi.encode(cmdBuf, 'char', command, command.length + 1); - const dataStr = params ? JSON.stringify(params) : ''; - const dataBytes = this._toBytes(dataStr); - const dataBuf = koffi.alloc('char', dataBytes.length + 1); - koffi.encode(dataBuf, 'char', dataStr, dataBytes.length + 1); - - // For binary data, use a Node.js Buffer which allocates stable external memory - // that won't be moved by V8's garbage collector during the FFI call. - const binLength = binaryData.length; - const binBuf = Buffer.from(binaryData); - - // Use koffi.as to pass Buffer directly as a typed pointer - const binTypedPtr = koffi.as(binBuf, 'void *'); - - const req = { - Command: koffi.address(cmdBuf), - CommandLength: command.length, - Data: koffi.address(dataBuf), - DataLength: dataBytes.length, - BinaryData: binTypedPtr, - BinaryDataLength: binLength - }; - const res = { Data: 0, DataLength: 0, Error: 0, ErrorLength: 0 }; - - this.execute_command_with_binary(req, res); - - try { - if (res.Error) { - const errorMsg = koffi.decode(res.Error, 'char', res.ErrorLength); - throw new Error(`Command '${command}' failed: ${errorMsg}`); - } - - return res.Data ? koffi.decode(res.Data, 'char', res.DataLength) : ""; - } finally { - if (res.Data) koffi.free(res.Data); - if (res.Error) koffi.free(res.Error); - } + const binBuf = Buffer.from(binaryData.buffer, binaryData.byteOffset, binaryData.byteLength); + return this.addon.executeCommandWithBinary(command, dataStr, binBuf); } public executeCommandStreaming(command: string, params: any, callback: (chunk: string) => void): Promise { - const cmdBuf = koffi.alloc('char', command.length + 1); - koffi.encode(cmdBuf, 'char', command, command.length + 1); - const dataStr = params ? JSON.stringify(params) : ''; - const dataBytes = this._toBytes(dataStr); - const dataBuf = koffi.alloc('char', dataBytes.length + 1); - koffi.encode(dataBuf, 'char', dataStr, dataBytes.length + 1); - - const cb = koffi.register((data: any, length: number, userData: any) => { - try { - const chunk = koffi.decode(data, 'char', length); - callback(chunk); - return 0; // continue - } catch { - return 1; // cancel on error - } - }, koffi.pointer(CallbackType)); - - return new Promise((resolve, reject) => { - const req = { - Command: koffi.address(cmdBuf), - CommandLength: command.length, - Data: koffi.address(dataBuf), - DataLength: dataBytes.length - }; - const res = { Data: 0, DataLength: 0, Error: 0, ErrorLength: 0 }; - - this.execute_command_with_callback.async(req, res, cb, null, (err: any) => { - koffi.unregister(cb); - koffi.free(cmdBuf); - koffi.free(dataBuf); - - if (err) { - reject(err); - return; - } - - try { - if (res.Error) { - const errorMsg = koffi.decode(res.Error, 'char', res.ErrorLength); - reject(new Error(`Command '${command}' failed: ${errorMsg}`)); - } else { - const responseData = res.Data ? koffi.decode(res.Data, 'char', res.DataLength) : ''; - resolve(responseData); - } - } finally { - // Free the heap-allocated response strings using koffi.free() - if (res.Data) koffi.free(res.Data); - if (res.Error) koffi.free(res.Error); - } - }); - }); + return this.addon.executeCommandStreaming(command, dataStr, callback); } } From 2d2f4dce0c0bbc0774a5b0f9284bafc04e483954 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Thu, 16 Apr 2026 21:37:45 -0700 Subject: [PATCH 43/83] expands tests to run on osx-arm64 and linux-x64 (#638) Adds mac/linux test coverage for FL Core & SDK and retires github actions macos tests. NOTE: The Linux JS test job is currently disabled due to intermittent SSL errors when running get_model_list. This issue is under investigation. --------- Co-authored-by: Prathik Rao --- .github/workflows/build-cs-steps.yml | 151 ----- .github/workflows/build-js-steps.yml | 136 ----- .github/workflows/build-python-steps.yml | 100 ---- .github/workflows/build-rust-steps.yml | 117 ---- .github/workflows/foundry-local-sdk-build.yml | 45 -- .pipelines/foundry-local-packaging.yml | 529 ++++++++++++++++-- .pipelines/templates/build-core-steps.yml | 2 +- .pipelines/templates/build-cs-steps.yml | 40 -- .pipelines/templates/build-js-steps.yml | 22 - .pipelines/templates/build-python-steps.yml | 31 +- .pipelines/templates/build-rust-steps.yml | 22 - .pipelines/templates/test-cs-steps.yml | 106 ++++ .pipelines/templates/test-js-steps.yml | 142 +++++ .pipelines/templates/test-python-steps.yml | 133 +++++ .pipelines/templates/test-rust-steps.yml | 130 +++++ sdk/js/test/detail/modelLoadManager.test.ts | 2 + 16 files changed, 999 insertions(+), 709 deletions(-) delete mode 100644 .github/workflows/build-cs-steps.yml delete mode 100644 .github/workflows/build-js-steps.yml delete mode 100644 .github/workflows/build-python-steps.yml delete mode 100644 .github/workflows/build-rust-steps.yml delete mode 100644 .github/workflows/foundry-local-sdk-build.yml create mode 100644 .pipelines/templates/test-cs-steps.yml create mode 100644 .pipelines/templates/test-js-steps.yml create mode 100644 .pipelines/templates/test-python-steps.yml create mode 100644 .pipelines/templates/test-rust-steps.yml diff --git a/.github/workflows/build-cs-steps.yml b/.github/workflows/build-cs-steps.yml deleted file mode 100644 index 937f728f..00000000 --- a/.github/workflows/build-cs-steps.yml +++ /dev/null @@ -1,151 +0,0 @@ -name: Build C# SDK - -on: - workflow_call: - inputs: - version: - required: true - type: string - useWinML: - required: false - type: boolean - default: false - buildConfiguration: - required: false - type: string - default: 'Debug' # or 'Release' - platform: - required: false - type: string - default: 'windows' # or 'macos' or 'ubuntu' - -permissions: - contents: read - -jobs: - build: - runs-on: ${{ inputs.platform }}-latest - env: - buildConfiguration: 'Debug' - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - clean: true - - - name: Setup .NET 10 SDK - uses: actions/setup-dotnet@v5 - with: - dotnet-version: '10.0.x' - env: - NUGET_AUTH_TOKEN: ${{ secrets.AI_FOUNDRY_LOCAL_PAT }} - - - name: Generate temporary NuGet.config - run: | - # The repo-level NuGet.config cleared all sources and only included AIFoundryLocal_PublicPackages. - # We generate a temporary one with both nuget.org and AIFoundryLocal_PublicPackages. - # We provide credentials to allow the AIFoundryLocal_PublicPackages feed to pull from its upstreams. - $xml = @" - - - - - - - - - - - - - - - "@ - Set-Content -Path sdk/cs/NuGet.temp.config -Value $xml - shell: pwsh - - # TODO: once the nightly packaging is fixed, add back the commented out lines with /p:FoundryLocalCoreVersion="*-*" - # /p:FoundryLocalCoreVersion="*-*" to always use nightly version of Foundry Local Core - - name: Restore dependencies - run: | - # Clear the local NuGet cache to avoid bad metadata or corrupted package states. - dotnet nuget locals all --clear - # Restore using the temporary config file with credentials. - dotnet restore sdk/cs/src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=${{ inputs.useWinML }} --configfile sdk/cs/NuGet.temp.config - - - name: Build solution - run: | - dotnet build sdk/cs/src/Microsoft.AI.Foundry.Local.csproj --no-restore --configuration ${{ inputs.buildConfiguration }} /p:UseWinML=${{ inputs.useWinML }} - - # need to use direct git commands to clone from Azure DevOps instead of actions/checkout - - name: Checkout test-data-shared from Azure DevOps - shell: pwsh - working-directory: ${{ github.workspace }}/.. - run: | - $pat = "${{ secrets.AZURE_DEVOPS_PAT }}" - $encodedPat = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes(":$pat")) - - # Configure git to use the PAT - git config --global http.https://dev.azure.com.extraheader "AUTHORIZATION: Basic $encodedPat" - - # Clone with LFS to parent directory - git lfs install - git clone --depth 1 https://dev.azure.com/microsoft/windows.ai.toolkit/_git/test-data-shared test-data-shared - - Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - - name: Run Foundry Local Core tests - run: | - # Restore test project with authenticated config, then run tests without restoring again. - dotnet restore sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj /p:UseWinML=${{ inputs.useWinML }} --configfile sdk/cs/NuGet.temp.config - dotnet test sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj --no-restore --verbosity normal /p:UseWinML=${{ inputs.useWinML }} - - - name: Pack NuGet package - shell: pwsh - run: | - $projectPath = "sdk/cs/src/Microsoft.AI.Foundry.Local.csproj" - $outputDir = "sdk/cs/bin" - $version = "${{ inputs.version }}" - $config = "${{ inputs.buildConfiguration }}" - $useWinML = "${{ inputs.useWinML }}" - # $coreVersion = "${{ env.FOUNDRY_CORE_VERSION }}" - - # Always mark as prerelease since we use nightly core - if (-not $version.Contains("dev")) { - $version = "$version-dev" - } - - Write-Host "Packing project: $projectPath" - Write-Host "Output directory: $outputDir" - Write-Host "Version: $version" - Write-Host "Configuration: $config" - Write-Host "UseWinML: $useWinML" - # Write-Host "FoundryLocalCoreVersion: $coreVersion" - - # & dotnet pack $projectPath --no-build --configuration $config --output $outputDir /p:PackageVersion=$version /p:UseWinML=$useWinML /p:FoundryLocalCoreVersion="*-*" /p:IncludeSymbols=true /p:SymbolPackageFormat=snupkg --verbosity normal - & dotnet pack $projectPath --no-build --configuration $config --output $outputDir /p:PackageVersion=$version /p:UseWinML=$useWinML /p:IncludeSymbols=true /p:SymbolPackageFormat=snupkg --verbosity normal - - if ($LASTEXITCODE -ne 0) { - Write-Error "dotnet pack failed with exit code $LASTEXITCODE" - exit $LASTEXITCODE - } - - Write-Host "Pack completed successfully" - Write-Host "Generated packages:" - Get-ChildItem -Path $outputDir -Filter "*.nupkg" | ForEach-Object { Write-Host " $($_.Name)" } - Get-ChildItem -Path $outputDir -Filter "*.snupkg" | ForEach-Object { Write-Host " $($_.Name)" } - - - name: Upload NuGet packages - uses: actions/upload-artifact@v4 - with: - name: cs-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }} - path: | - sdk/cs/bin/*.nupkg - sdk/cs/bin/*.snupkg - - - name: Upload flcore logs - uses: actions/upload-artifact@v4 - with: - name: cs-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }}-logs - path: sdk/cs/logs/** \ No newline at end of file diff --git a/.github/workflows/build-js-steps.yml b/.github/workflows/build-js-steps.yml deleted file mode 100644 index a869477d..00000000 --- a/.github/workflows/build-js-steps.yml +++ /dev/null @@ -1,136 +0,0 @@ -name: Build JS SDK - -on: - workflow_call: - inputs: - version: - required: true - type: string - useWinML: - required: false - type: boolean - default: false - platform: - required: false - type: string - default: 'windows' # or 'macos' or 'ubuntu' - -permissions: - contents: read - -jobs: - build: - # https://github.com/actions/runner-images?tab=readme-ov-file#available-images - runs-on: ${{ inputs.platform }}-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - clean: true - - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: '20.x' - - - name: Setup Python (for node-gyp) - uses: actions/setup-python@v5 - with: - python-version: '3.x' - - - name: Build Node-API addon - shell: pwsh - working-directory: sdk/js - run: | - npm install --no-save node-gyp node-api-headers --registry https://registry.npmjs.org - Set-Location native - npx node-gyp rebuild - $platformKey = node -e 'process.stdout.write(process.platform + "-" + process.arch)' - $destDir = "../prebuilds/$platformKey" - New-Item -ItemType Directory -Path $destDir -Force | Out-Null - Copy-Item "build/Release/foundry_local_napi.node" "$destDir/foundry_local_napi.node" -Force - Write-Host "Built addon for $platformKey -> $destDir/foundry_local_napi.node" - - # needed to download Foundry Local Core from Azure Artifacts - - name: Setup .NET SDK for NuGet authentication - uses: actions/setup-dotnet@v5 - with: - dotnet-version: '9.0.x' - env: - NUGET_AUTH_TOKEN: ${{ secrets.AZURE_DEVOPS_PAT }} - - - name: Format version for JS - shell: pwsh - run: | - # Release: 0.9.0.41 -> 0.9.0-41 - $version = "${{ inputs.version }}" - $versionParts = $version -split '\.' - $baseVersion = ($versionParts[0..2]) -join '.' - $buildNumber = $versionParts[3] - $version = "$baseVersion-$buildNumber" - Write-Host "Modified version for JS: $version" - Write-Host "ProjectVersion=$version" >> $env:GITHUB_ENV - - # need to use direct git commands to clone from Azure DevOps instead of actions/checkout - - name: Checkout test-data-shared from Azure DevOps - shell: pwsh - working-directory: ${{ github.workspace }}/.. - run: | - $pat = "${{ secrets.AZURE_DEVOPS_PAT }}" - $encodedPat = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes(":$pat")) - - # Configure git to use the PAT - git config --global http.https://dev.azure.com.extraheader "AUTHORIZATION: Basic $encodedPat" - - # Clone with LFS to parent directory - git lfs install - git clone --depth 1 https://dev.azure.com/microsoft/windows.ai.toolkit/_git/test-data-shared test-data-shared - - Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - # The .npmrc points to an Azure Artifacts feed for CFS compliance. - # Remove it in CI so npm uses the public registry directly. - - name: Remove .npmrc (use public registry) - shell: pwsh - working-directory: sdk/js - run: | - if (Test-Path .npmrc) { Remove-Item .npmrc -Force; Write-Host "Removed .npmrc" } - - - name: npm install - working-directory: sdk/js - run: npm install - - - name: Set package version - working-directory: sdk/js - run: npm version ${{ env.ProjectVersion }} --no-git-tag-version --allow-same-version - - - name: Run tests - working-directory: sdk/js - run: npm test - - - name: Build package - working-directory: sdk/js - run: npm run build - - - name: Pack npm package (WinML) - if: ${{ inputs.useWinML == true }} - working-directory: sdk/js - run: npm run pack:winml - - - name: Pack npm package (Standard) - if: ${{ inputs.useWinML == false }} - working-directory: sdk/js - run: npm run pack - - - name: Upload npm packages - uses: actions/upload-artifact@v4 - with: - name: js-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }} - path: sdk/js/*.tgz - - - name: Upload flcore logs - uses: actions/upload-artifact@v4 - with: - name: js-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }}-logs - path: sdk/js/logs/** \ No newline at end of file diff --git a/.github/workflows/build-python-steps.yml b/.github/workflows/build-python-steps.yml deleted file mode 100644 index d74f97b2..00000000 --- a/.github/workflows/build-python-steps.yml +++ /dev/null @@ -1,100 +0,0 @@ -name: Build Python SDK - -on: - workflow_call: - inputs: - version: - required: true - type: string - useWinML: - required: false - type: boolean - default: false - platform: - required: false - type: string - default: 'windows' - -permissions: - contents: read - -jobs: - build: - runs-on: ${{ inputs.platform }}-latest - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - clean: true - - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: '3.12' - - # Clone test-data-shared from Azure DevOps (models for integration tests) - - name: Checkout test-data-shared from Azure DevOps - shell: pwsh - working-directory: ${{ github.workspace }}/.. - run: | - $pat = "${{ secrets.AZURE_DEVOPS_PAT }}" - $encodedPat = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes(":$pat")) - - git config --global http.https://dev.azure.com.extraheader "AUTHORIZATION: Basic $encodedPat" - - git lfs install - git clone --depth 1 https://dev.azure.com/microsoft/windows.ai.toolkit/_git/test-data-shared test-data-shared - - Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - - name: Install build tool - run: | - python -m pip install build - - - name: Configure pip for Azure Artifacts - run: | - pip config set global.index-url https://az:${{ secrets.AI_FOUNDRY_LOCAL_PAT }}@pkgs.dev.azure.com/aiinfra/AIFoundryLocal/_packaging/AIFoundryLocal_PublicPackages/pypi/simple/ - pip config set global.extra-index-url https://pypi.org/simple/ - pip config set global.pre true - - - name: Set package version - working-directory: sdk/python - run: echo '__version__ = "${{ inputs.version }}"' > src/version.py - - - name: Build wheel (Cross-Platform) - if: ${{ inputs.useWinML == false }} - working-directory: sdk/python - run: python -m build --wheel --outdir dist/ - - - name: Build wheel (WinML) - if: ${{ inputs.useWinML == true }} - working-directory: sdk/python - run: python -m build --wheel -C winml=true --outdir dist/ - - - name: Install built wheel - working-directory: sdk/python - shell: pwsh - run: | - $wheel = (Get-ChildItem dist/*.whl | Select-Object -First 1).FullName - pip install $wheel - - - name: Install test dependencies - run: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 - - - name: Run tests - working-directory: sdk/python - run: python -m pytest test/ -v - - - name: Upload Python packages - uses: actions/upload-artifact@v4 - with: - name: python-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }} - path: sdk/python/dist/* - - - name: Upload flcore logs - uses: actions/upload-artifact@v4 - if: always() - with: - name: python-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }}-logs - path: sdk/python/logs/** diff --git a/.github/workflows/build-rust-steps.yml b/.github/workflows/build-rust-steps.yml deleted file mode 100644 index 75d86b20..00000000 --- a/.github/workflows/build-rust-steps.yml +++ /dev/null @@ -1,117 +0,0 @@ -name: Build Rust SDK - -on: - workflow_call: - inputs: - platform: - required: false - type: string - default: 'ubuntu' # or 'windows' or 'macos' - useWinML: - required: false - type: boolean - default: false - run-integration-tests: - required: false - type: boolean - default: true - -permissions: - contents: read - -jobs: - build: - runs-on: ${{ inputs.platform }}-latest - - defaults: - run: - working-directory: sdk/rust - - env: - CARGO_FEATURES: ${{ inputs.useWinML && '--features winml,nightly' || '--features nightly' }} - - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - clean: true - - - name: Install Rust toolchain - uses: dtolnay/rust-toolchain@stable - with: - components: clippy, rustfmt - - - name: Cache cargo dependencies - uses: Swatinem/rust-cache@v2 - with: - workspaces: sdk/rust -> target - - # The .cargo/config.toml redirects crates-io to an Azure Artifacts feed - # for CFS compliance. Remove the redirect in CI so cargo can fetch from - # crates.io directly without Azure DevOps auth. - - name: Use crates.io directly - shell: pwsh - working-directory: sdk/rust - run: | - if (Test-Path .cargo/config.toml) { - Remove-Item .cargo/config.toml - Write-Host "Removed .cargo/config.toml crates-io redirect" - } - - # Copy deps_versions.json into the crate directory so cargo package - # can include it and build.rs can find it during verify. - - name: Copy deps_versions.json for crate packaging - shell: pwsh - working-directory: ${{ github.workspace }} - run: Copy-Item sdk/deps_versions.json sdk/rust/deps_versions.json - - - name: Checkout test-data-shared from Azure DevOps - if: ${{ inputs.run-integration-tests }} - shell: pwsh - working-directory: ${{ github.workspace }}/.. - run: | - $pat = "${{ secrets.AZURE_DEVOPS_PAT }}" - $encodedPat = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes(":$pat")) - - # Configure git to use the PAT - git config --global http.https://dev.azure.com.extraheader "AUTHORIZATION: Basic $encodedPat" - - # Clone with LFS to parent directory - git lfs install - git clone --depth 1 https://dev.azure.com/microsoft/windows.ai.toolkit/_git/test-data-shared test-data-shared - - Write-Host "Clone completed successfully to ${{ github.workspace }}/../test-data-shared" - - - name: Check formatting - run: cargo fmt --all -- --check - - # Run Clippy - Rust's official linter for catching common mistakes, enforcing idioms, and improving code quality - - name: Run clippy - run: cargo clippy --all-targets ${{ env.CARGO_FEATURES }} -- -D warnings - - - name: Build - run: cargo build ${{ env.CARGO_FEATURES }} - - - name: Run unit tests - run: cargo test --lib ${{ env.CARGO_FEATURES }} - - - name: Run integration tests - if: ${{ inputs.run-integration-tests }} - run: cargo test --tests ${{ env.CARGO_FEATURES }} -- --include-ignored --test-threads=1 --nocapture - - # --allow-dirty allows publishing with uncommitted changes, needed because the build process modifies generated files - - name: Package crate - run: cargo package ${{ env.CARGO_FEATURES }} --allow-dirty - - - name: Upload SDK artifact - uses: actions/upload-artifact@v4 - with: - name: rust-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }} - path: sdk/rust/target/package/*.crate - - - name: Upload flcore logs - uses: actions/upload-artifact@v4 - if: always() - with: - name: rust-sdk-${{ inputs.platform }}${{ inputs.useWinML == true && '-winml' || '' }}-logs - path: sdk/rust/logs/** diff --git a/.github/workflows/foundry-local-sdk-build.yml b/.github/workflows/foundry-local-sdk-build.yml deleted file mode 100644 index 07ae4d68..00000000 --- a/.github/workflows/foundry-local-sdk-build.yml +++ /dev/null @@ -1,45 +0,0 @@ -name: SDK Build - -on: - pull_request: - paths: - - 'sdk/**' - - '.github/workflows/**' - push: - paths: - - 'sdk/**' - - '.github/workflows/**' - branches: - - main - workflow_dispatch: - -permissions: - contents: read - -jobs: - # Windows build/test moved to .pipelines/foundry-local-packaging.yml and runs in ADO - # MacOS ARM64 not supported in ADO, need to use GitHub Actions - build-cs-macos: - uses: ./.github/workflows/build-cs-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'macos' - secrets: inherit - build-js-macos: - uses: ./.github/workflows/build-js-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'macos' - secrets: inherit - build-python-macos: - uses: ./.github/workflows/build-python-steps.yml - with: - version: '0.9.0.${{ github.run_number }}' - platform: 'macos' - secrets: inherit - build-rust-macos: - uses: ./.github/workflows/build-rust-steps.yml - with: - platform: 'macos' - run-integration-tests: true - secrets: inherit \ No newline at end of file diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index 9ff902cc..f4d49405 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -133,7 +133,7 @@ extends: dependsOn: compute_version jobs: - job: flc_win_x64 - displayName: 'Core win-x64' + displayName: 'win-x64' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -153,7 +153,7 @@ extends: platform: x64 - job: flc_win_arm64 - displayName: 'Core win-arm64' + displayName: 'win-arm64' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -171,7 +171,7 @@ extends: platform: arm64 - job: flc_linux_x64 - displayName: 'Core linux-x64' + displayName: 'linux-x64' pool: name: onnxruntime-Ubuntu2404-AMD-CPU os: linux @@ -183,17 +183,20 @@ extends: steps: - checkout: neutron-server clean: true + - checkout: test-data-shared + lfs: true - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: linux-x64 platform: x64 - job: flc_osx_arm64 - displayName: 'Core osx-arm64' + displayName: 'osx-arm64' pool: - name: Azure Pipelines - vmImage: 'macOS-15' + name: AcesShared os: macOS + demands: + - ImageOverride -equals ACES_VM_SharedPool_Sequoia templateContext: outputs: - output: pipelineArtifact @@ -593,13 +596,390 @@ extends: flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + # Rust SDK has one package with different install options for standard vs WinML, + # so we only publish once under the standard stage and skip the WinML stage. Leaving + # it as a commented block incase we decide to publish separate Rust WinML package in the future. + # # ── Build Rust SDK (WinML) ── + # - stage: build_rust_winml + # displayName: 'Build Rust SDK (WinML)' + # dependsOn: + # - build_core_winml + # jobs: + # - job: rust_sdk_winml + # displayName: 'Build' + # pool: + # name: onnxruntime-Win-CPU-2022 + # os: windows + # templateContext: + # inputs: + # - input: pipelineArtifact + # artifactName: 'version-info' + # targetPath: '$(Pipeline.Workspace)/version-info' + # - input: pipelineArtifact + # artifactName: 'flc-nuget-winml' + # targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + # - input: pipelineArtifact + # artifactName: 'deps-versions-winml' + # targetPath: '$(Pipeline.Workspace)/deps-versions-winml' + # outputs: + # - output: pipelineArtifact + # artifactName: 'rust-sdk-winml' + # targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' + # steps: + # - checkout: self + # clean: true + # - checkout: test-data-shared + # lfs: true + # - template: .pipelines/templates/build-rust-steps.yml@self + # parameters: + # version: ${{ parameters.version }} + # isRelease: ${{ parameters.isRelease }} + # prereleaseId: ${{ parameters.prereleaseId }} + # isWinML: true + # flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' + # outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' + + # ── Test C# SDK ── + - stage: test_cs + displayName: 'Test C#' + dependsOn: build_cs + jobs: + - job: test_cs_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-cs-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_cs_linux_x64 + displayName: 'linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-cs-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_cs_osx_arm64 + displayName: 'osx-arm64' + pool: + name: AcesShared + os: macOS + demands: + - ImageOverride -equals ACES_VM_SharedPool_Sequoia + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-cs-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + # ── Test JS SDK ── + - stage: test_js + displayName: 'Test JS' + dependsOn: build_js + jobs: + - job: test_js_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-js-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + # The Linux JS test job is currently disabled due to intermittent SSL errors when running get_model_list. This issue is under investigation. + # Error: Command 'get_model_list' failed: Error: System.Net.Http.HttpRequestException: An error occurred while sending the request. + # ---> System.IO.IOException: The decryption operation failed, see inner exception. + # ---> Interop+OpenSsl+SslException: Decrypt failed with OpenSSL error - SSL_ERROR_SSL. + # ---> System.Security.Cryptography.CryptographicException: Error occurred during a cryptographic operation. + # - job: test_js_linux_x64 + # displayName: 'linux-x64' + # pool: + # name: onnxruntime-Ubuntu2404-AMD-CPU + # os: linux + # templateContext: + # inputs: + # - input: pipelineArtifact + # artifactName: 'flc-nuget' + # targetPath: '$(Pipeline.Workspace)/flc-nuget' + # - input: pipelineArtifact + # artifactName: 'deps-versions-standard' + # targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + # steps: + # - checkout: self + # clean: true + # - checkout: test-data-shared + # lfs: true + # - template: .pipelines/templates/test-js-steps.yml@self + # parameters: + # isWinML: false + # flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_js_osx_arm64 + displayName: 'osx-arm64' + pool: + name: AcesShared + os: macOS + demands: + - ImageOverride -equals ACES_VM_SharedPool_Sequoia + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-js-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + # ── Test Python SDK ── + - stage: test_python + displayName: 'Test Python' + dependsOn: build_python + jobs: + - job: test_python_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Pipeline.Workspace)/flc-wheels' + - input: pipelineArtifact + artifactName: 'python-sdk' + targetPath: '$(Pipeline.Workspace)/python-sdk' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + isWinML: false + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_python_linux_x64 + displayName: 'linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Pipeline.Workspace)/flc-wheels' + - input: pipelineArtifact + artifactName: 'python-sdk' + targetPath: '$(Pipeline.Workspace)/python-sdk' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + isWinML: false + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_python_osx_arm64 + displayName: 'osx-arm64' + pool: + name: AcesShared + os: macOS + demands: + - ImageOverride -equals ACES_VM_SharedPool_Sequoia + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Pipeline.Workspace)/flc-wheels' + - input: pipelineArtifact + artifactName: 'python-sdk' + targetPath: '$(Pipeline.Workspace)/python-sdk' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + isWinML: false + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + # ── Test Rust SDK ── + - stage: test_rust + displayName: 'Test Rust' + dependsOn: build_rust + jobs: + - job: test_rust_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-rust-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_rust_linux_x64 + displayName: 'linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-rust-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + + - job: test_rust_osx_arm64 + displayName: 'osx-arm64' + pool: + name: AcesShared + os: macOS + demands: + - ImageOverride -equals ACES_VM_SharedPool_Sequoia + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-rust-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + # ── Build FLC (WinML) ── - stage: build_core_winml displayName: 'Build Core (WinML)' dependsOn: compute_version jobs: - job: flc_winml_win_x64 - displayName: 'Core win-x64 (WinML)' + displayName: 'win-x64 (WinML)' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -620,7 +1000,7 @@ extends: isWinML: true - job: flc_winml_win_arm64 - displayName: 'Core win-arm64 (WinML)' + displayName: 'win-arm64 (WinML)' pool: name: onnxruntime-Win-CPU-2022 os: windows @@ -862,46 +1242,93 @@ extends: depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' outputDir: '$(Build.ArtifactStagingDirectory)/python-sdk-winml' - # Rust SDK has one package with different install options for standard vs WinML, - # so we only publish once under the standard stage and skip the WinML stage. Leaving - # it as a commented block incase we decide to publish separate Rust WinML package in the future. - # # ── Build Rust SDK (WinML) ── - # - stage: build_rust_winml - # displayName: 'Build Rust SDK (WinML)' - # dependsOn: - # - build_core_winml - # jobs: - # - job: rust_sdk_winml - # displayName: 'Build' - # pool: - # name: onnxruntime-Win-CPU-2022 - # os: windows - # templateContext: - # inputs: - # - input: pipelineArtifact - # artifactName: 'version-info' - # targetPath: '$(Pipeline.Workspace)/version-info' - # - input: pipelineArtifact - # artifactName: 'flc-nuget-winml' - # targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' - # - input: pipelineArtifact - # artifactName: 'deps-versions-winml' - # targetPath: '$(Pipeline.Workspace)/deps-versions-winml' - # outputs: - # - output: pipelineArtifact - # artifactName: 'rust-sdk-winml' - # targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' - # steps: - # - checkout: self - # clean: true - # - checkout: test-data-shared - # lfs: true - # - template: .pipelines/templates/build-rust-steps.yml@self - # parameters: - # version: ${{ parameters.version }} - # isRelease: ${{ parameters.isRelease }} - # prereleaseId: ${{ parameters.prereleaseId }} - # isWinML: true - # flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' - # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' - # outputDir: '$(Build.ArtifactStagingDirectory)/rust-sdk-winml' + # ── Test C# SDK (WinML) ── + - stage: test_cs_winml + displayName: 'Test C# (WinML)' + dependsOn: build_cs_winml + jobs: + - job: test_cs_winml_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + - input: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Pipeline.Workspace)/deps-versions-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-cs-steps.yml@self + parameters: + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' + + # ── Test JS SDK (WinML) ── + - stage: test_js_winml + displayName: 'Test JS (WinML)' + dependsOn: build_js_winml + jobs: + - job: test_js_winml_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget-winml' + targetPath: '$(Pipeline.Workspace)/flc-nuget-winml' + - input: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Pipeline.Workspace)/deps-versions-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-js-steps.yml@self + parameters: + isWinML: true + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget-winml' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' + + # ── Test Python SDK (WinML) ── + - stage: test_python_winml + displayName: 'Test Python (WinML)' + dependsOn: build_python_winml + jobs: + - job: test_python_winml_win_x64 + displayName: 'win-x64' + pool: + name: onnxruntime-Win-CPU-2022 + os: windows + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels-winml' + targetPath: '$(Pipeline.Workspace)/flc-wheels-winml' + - input: pipelineArtifact + artifactName: 'python-sdk-winml' + targetPath: '$(Pipeline.Workspace)/python-sdk-winml' + - input: pipelineArtifact + artifactName: 'deps-versions-winml' + targetPath: '$(Pipeline.Workspace)/deps-versions-winml' + steps: + - checkout: self + clean: true + - checkout: test-data-shared + lfs: true + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + isWinML: true + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels-winml' + sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk-winml' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-winml' diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml index 1b80ec71..974673fa 100644 --- a/.pipelines/templates/build-core-steps.yml +++ b/.pipelines/templates/build-core-steps.yml @@ -126,7 +126,7 @@ steps: projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release' - - ${{ if eq(parameters.flavor, 'win-x64') }}: + - ${{ if or(eq(parameters.flavor, 'win-x64'), eq(parameters.flavor, 'linux-x64'), eq(parameters.flavor, 'osx-x64')) }}: - task: DotNetCoreCLI@2 displayName: 'Restore FLC Tests ${{ parameters.flavor }}' inputs: diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml index 6c2c8d97..7f77647f 100644 --- a/.pipelines/templates/build-cs-steps.yml +++ b/.pipelines/templates/build-cs-steps.yml @@ -189,43 +189,3 @@ steps: inlineOperation: | [{"keyCode":"CP-401405","operationSetCode":"NuGetSign","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"},{"keyCode":"CP-401405","operationSetCode":"NuGetVerify","parameters":[],"toolName":"sign","toolVersion":"6.2.9304.0"}] -# ── Tests ── -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - & $installerPath --quiet --force - if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } - errorActionPreference: 'stop' - -- task: PowerShell@2 - displayName: 'Restore & build tests' - inputs: - targetType: inline - script: | - dotnet restore "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` - --configfile "$(customNugetConfig)" ` - /p:UseWinML=${{ parameters.isWinML }} - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - - dotnet build "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` - --no-restore --configuration Release ` - /p:UseWinML=${{ parameters.isWinML }} - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - -- task: PowerShell@2 - displayName: 'Run SDK tests' - inputs: - targetType: inline - script: | - dotnet test "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` - --no-build --configuration Release ` - /p:UseWinML=${{ parameters.isWinML }} - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/build-js-steps.yml b/.pipelines/templates/build-js-steps.yml index ca42fea1..a081930f 100644 --- a/.pipelines/templates/build-js-steps.yml +++ b/.pipelines/templates/build-js-steps.yml @@ -214,25 +214,3 @@ steps: New-Item -ItemType Directory -Path $destDir -Force | Out-Null Copy-Item "$(repoRoot)/sdk/js/*.tgz" "$destDir/" -# ── Tests ── -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - & $installerPath --quiet --force - if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } - errorActionPreference: 'stop' - -- task: Npm@1 - displayName: 'npm test' - inputs: - command: custom - workingDir: $(repoRoot)/sdk/js - customCommand: 'test' - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index f52e069f..c4c62825 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -129,8 +129,13 @@ steps: $deps = Get-Content "$(repoRoot)/sdk/$fileName" -Raw | ConvertFrom-Json $ortVer = $deps.onnxruntime.version $genaiVer = $deps.'onnxruntime-genai'.version - Write-Host "Installing onnxruntime-core==$ortVer onnxruntime-genai-core==$genaiVer" - pip install "onnxruntime-core==$ortVer" "onnxruntime-genai-core==$genaiVer" + if ($IsLinux) { + Write-Host "Installing onnxruntime-gpu==$ortVer onnxruntime-genai-cuda==$genaiVer (Linux)" + pip install "onnxruntime-gpu==$ortVer" "onnxruntime-genai-cuda==$genaiVer" + } else { + Write-Host "Installing onnxruntime-core==$ortVer onnxruntime-genai-core==$genaiVer" + pip install "onnxruntime-core==$ortVer" "onnxruntime-genai-core==$genaiVer" + } if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" @@ -171,25 +176,3 @@ steps: Write-Host "Staged wheels:" Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } -# ── Tests ── -- ${{ if eq(parameters.isWinML, true) }}: - - task: PowerShell@2 - displayName: 'Install Windows App SDK Runtime' - inputs: - targetType: 'inline' - script: | - $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" - $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" - Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath - & $installerPath --quiet --force - if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } - errorActionPreference: 'stop' - -- script: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 - displayName: 'Install test dependencies' - -- script: python -m pytest test/ -v - displayName: 'Run tests' - workingDirectory: $(repoRoot)/sdk/python - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/build-rust-steps.yml b/.pipelines/templates/build-rust-steps.yml index c0489f4f..e466b10f 100644 --- a/.pipelines/templates/build-rust-steps.yml +++ b/.pipelines/templates/build-rust-steps.yml @@ -201,25 +201,3 @@ steps: Write-Host "Staged crates:" Get-ChildItem $destDir | ForEach-Object { Write-Host " $($_.Name)" } -# ── Tests ── -- task: PowerShell@2 - displayName: 'Run unit tests' - inputs: - targetType: inline - script: | - Set-Location "$(repoRoot)/sdk/rust" - $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } - Invoke-Expression "cargo test --lib $features" - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - -- task: PowerShell@2 - displayName: 'Run integration tests' - inputs: - targetType: inline - script: | - Set-Location "$(repoRoot)/sdk/rust" - $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } - Invoke-Expression "cargo test --tests $features -- --include-ignored --test-threads=1 --nocapture" - if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - env: - TF_BUILD: 'true' diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml new file mode 100644 index 00000000..43ba8331 --- /dev/null +++ b/.pipelines/templates/test-cs-steps.yml @@ -0,0 +1,106 @@ +# Steps to test the C# SDK. +# Expects the SDK to be already built and the NuGet package available. +parameters: +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- task: UseDotNet@2 + displayName: 'Use .NET 9 SDK' + inputs: + packageType: sdk + version: '9.0.x' + +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + +# Create a temporary NuGet.config that includes the local FLC feed +- task: PowerShell@2 + displayName: 'Create NuGet.config with local FLC feed' + inputs: + targetType: inline + script: | + $nugetConfig = @" + + + + + + + + + + "@ + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + $flcFeedDir = $nupkg.DirectoryName + $nugetConfig = $nugetConfig -replace [regex]::Escape("${{ parameters.flcNugetDir }}"), $flcFeedDir + $configPath = "$(Build.ArtifactStagingDirectory)/NuGet.config" + Set-Content -Path $configPath -Value $nugetConfig + Write-Host "##vso[task.setvariable variable=customNugetConfig]$configPath" + Write-Host "Local FLC feed directory: $flcFeedDir" + +- task: NuGetAuthenticate@1 + displayName: 'Authenticate NuGet feeds' + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + & $installerPath --quiet --force + if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } + errorActionPreference: 'stop' + +- task: PowerShell@2 + displayName: 'Restore & build tests' + inputs: + targetType: inline + script: | + dotnet restore "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --configfile "$(customNugetConfig)" ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + + dotnet build "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --no-restore --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run SDK tests' + inputs: + targetType: inline + script: | + dotnet test "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` + --no-build --configuration Release ` + /p:UseWinML=${{ parameters.isWinML }} + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml new file mode 100644 index 00000000..0f72f2a5 --- /dev/null +++ b/.pipelines/templates/test-js-steps.yml @@ -0,0 +1,142 @@ +# Steps to test the JS SDK. +# Expects the SDK to be already built via build-js-steps.yml. +parameters: +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + default: '' + displayName: 'Path to directory containing the FLC .nupkg' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- task: NodeTool@0 + displayName: 'Use Node.js 20' + inputs: + versionSpec: '20.x' + +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + +# Install JS dependencies with native binaries +- task: Npm@1 + displayName: 'npm install (skip native downloads)' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'install --ignore-scripts' + +# esbuild (transitive dep via tsx) requires a platform-specific binary package. +# --ignore-scripts prevents it from installing automatically, so do it explicitly for Linux. +- task: Npm@1 + displayName: 'Install esbuild platform binary (Linux)' + condition: and(succeeded(), eq(variables['Agent.OS'], 'Linux')) + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'install @esbuild/linux-x64 --no-save' + +- task: PowerShell@2 + displayName: 'Extract FLC from pipeline-built artifact' + condition: and(succeeded(), ne('${{ parameters.flcNugetDir }}', '')) + inputs: + targetType: inline + script: | + $os = 'win32' + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + $platformKey = "$os-$arch" + $rid = if ($arch -eq 'arm64') { 'win-arm64' } else { 'win-x64' } + + if ($IsLinux) { + $os = 'linux' + $platformKey = "$os-$arch" + $rid = "linux-$arch" + } elseif ($IsMacOS) { + $os = 'darwin' + $platformKey = "$os-$arch" + $rid = "osx-$arch" + } + + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + New-Item -ItemType Directory -Path $destDir -Force | Out-Null + $nativeDir = "$extractDir/runtimes/$rid/native" + if (Test-Path $nativeDir) { + Get-ChildItem $nativeDir -File | ForEach-Object { + Copy-Item $_.FullName -Destination "$destDir/$($_.Name)" -Force + Write-Host "Placed $($_.Name) from pipeline artifact" + } + } else { + Write-Warning "No native binaries found at $nativeDir for RID $rid" + } + +- task: PowerShell@2 + displayName: 'Run native binary install (ORT + GenAI)' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/js" + node script/preinstall.cjs + node script/install-standard.cjs + +# Build the Node-API native addon +- task: Npm@1 + displayName: 'npm build node-api addon' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'run build:native' + +- task: Npm@1 + displayName: 'npm build' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'run build' + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + & $installerPath --quiet --force + if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } + errorActionPreference: 'stop' + +- task: Npm@1 + displayName: 'npm test' + inputs: + command: custom + workingDir: $(repoRoot)/sdk/js + customCommand: 'test' + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml new file mode 100644 index 00000000..00ac621b --- /dev/null +++ b/.pipelines/templates/test-python-steps.yml @@ -0,0 +1,133 @@ +# Steps to test the Python SDK. +# Expects the SDK wheel to be already built via build-python-steps.yml. +parameters: +- name: isWinML + type: boolean + default: false +- name: flcWheelsDir + type: string + displayName: 'Path to directory containing the FLC wheels' +- name: sdkWheelsDir + type: string + displayName: 'Path to directory containing the built SDK wheel' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +- task: PowerShell@2 + displayName: 'Detect Python architecture' + inputs: + targetType: inline + script: | + # UsePythonVersion defaults to x64 but arm64 agents only have arm64 Python. + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'ARM64' } else { 'x64' } + Write-Host "##vso[task.setvariable variable=PythonArch]$arch" + Write-Host "Detected Python architecture: $arch" + +- task: UsePythonVersion@0 + displayName: 'Use Python 3.12' + inputs: + versionSpec: '3.12' + architecture: '$(PythonArch)' + +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + +# Configure pip to use ORT-Nightly feed (plus PyPI as fallback) +- task: PowerShell@2 + displayName: 'Configure pip for Azure Artifacts' + inputs: + targetType: inline + script: | + pip config set global.index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + pip config set global.extra-index-url https://pypi.org/simple/ + pip config set global.pre true + +# Install the FLC wheel from the pipeline +- task: PowerShell@2 + displayName: 'Install pipeline-built FLC wheel' + inputs: + targetType: inline + script: | + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'amd64' } + if ($IsLinux) { $platTag = "manylinux*x86_64" } + elseif ($IsMacOS) { $platTag = "macosx*$arch" } + else { $platTag = "win_$arch" } + + $filter = if ("${{ parameters.isWinML }}" -eq "True") { "foundry_local_core_winml*$platTag.whl" } else { "foundry_local_core-*$platTag.whl" } + $wheel = Get-ChildItem "${{ parameters.flcWheelsDir }}" -Recurse -Filter $filter | Select-Object -First 1 + if ($wheel) { + Write-Host "Installing pipeline-built FLC wheel: $($wheel.FullName)" + pip install $($wheel.FullName) + } else { + Write-Warning "No FLC wheel found matching $filter in ${{ parameters.flcWheelsDir }}" + } + +- task: PowerShell@2 + displayName: 'Install ORT native packages' + inputs: + targetType: inline + script: | + $isWinML = "${{ parameters.isWinML }}" -eq "True" + $fileName = if ($isWinML) { "deps_versions_winml.json" } else { "deps_versions.json" } + $deps = Get-Content "$(repoRoot)/sdk/$fileName" -Raw | ConvertFrom-Json + $ortVer = $deps.onnxruntime.version + $genaiVer = $deps.'onnxruntime-genai'.version + if ($IsLinux) { + Write-Host "Installing onnxruntime-gpu==$ortVer onnxruntime-genai-cuda==$genaiVer (Linux)" + pip install "onnxruntime-gpu==$ortVer" "onnxruntime-genai-cuda==$genaiVer" + } else { + Write-Host "Installing onnxruntime-core==$ortVer onnxruntime-genai-core==$genaiVer" + pip install "onnxruntime-core==$ortVer" "onnxruntime-genai-core==$genaiVer" + } + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- script: pip install "pydantic>=2.0.0" "requests>=2.32.4" "openai>=2.24.0" + displayName: 'Install pure python dependencies' + +# Install the built SDK wheel +- task: PowerShell@2 + displayName: 'Install SDK wheel' + inputs: + targetType: inline + script: | + $wheel = (Get-ChildItem "${{ parameters.sdkWheelsDir }}/*.whl" | Select-Object -First 1).FullName + pip install --no-deps $wheel + +- ${{ if eq(parameters.isWinML, true) }}: + - task: PowerShell@2 + displayName: 'Install Windows App SDK Runtime' + inputs: + targetType: 'inline' + script: | + $installerUrl = "https://aka.ms/windowsappsdk/1.8/latest/windowsappruntimeinstall-x64.exe" + $installerPath = "$env:TEMP\windowsappruntimeinstall.exe" + Invoke-WebRequest -Uri $installerUrl -OutFile $installerPath + & $installerPath --quiet --force + if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } + errorActionPreference: 'stop' + +- script: pip install coverage pytest>=7.0.0 pytest-timeout>=2.1.0 + displayName: 'Install test dependencies' + +- script: python -m pytest test/ -v + displayName: 'Run tests' + workingDirectory: $(repoRoot)/sdk/python + env: + TF_BUILD: 'true' diff --git a/.pipelines/templates/test-rust-steps.yml b/.pipelines/templates/test-rust-steps.yml new file mode 100644 index 00000000..4a7b1220 --- /dev/null +++ b/.pipelines/templates/test-rust-steps.yml @@ -0,0 +1,130 @@ +# Steps to test the Rust SDK. +# Expects the SDK to be already built via build-rust-steps.yml. +parameters: +- name: isWinML + type: boolean + default: false +- name: flcNugetDir + type: string + displayName: 'Path to directory containing the FLC .nupkg' +- name: depsVersionsDir + type: string + default: '' + displayName: 'Path to deps-versions artifact directory' + +steps: +- task: PowerShell@2 + displayName: 'Set source paths' + inputs: + targetType: inline + script: | + $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" + Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" + Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" + +# Load dependency versions from deps_versions.json +- template: update-deps-versions-steps.yml + parameters: + repoRoot: $(repoRoot) + artifactDir: ${{ parameters.depsVersionsDir }} + isWinML: ${{ parameters.isWinML }} + +# Extract FLC native binaries from the pipeline-built .nupkg +- task: PowerShell@2 + displayName: 'Extract FLC native binaries' + inputs: + targetType: inline + script: | + $nupkg = Get-ChildItem "${{ parameters.flcNugetDir }}" -Recurse -Filter "Microsoft.AI.Foundry.Local.Core*.nupkg" -Exclude "*.snupkg" | Select-Object -First 1 + if (-not $nupkg) { throw "No FLC .nupkg found in ${{ parameters.flcNugetDir }}" } + + $extractDir = "$(Build.ArtifactStagingDirectory)/flc-extract-rust" + $zip = [System.IO.Path]::ChangeExtension($nupkg.FullName, ".zip") + Copy-Item $nupkg.FullName $zip -Force + Expand-Archive -Path $zip -DestinationPath $extractDir -Force + + $arch = if ([System.Runtime.InteropServices.RuntimeInformation]::OSArchitecture -eq 'Arm64') { 'arm64' } else { 'x64' } + if ($IsLinux) { $rid = "linux-$arch" } + elseif ($IsMacOS) { $rid = "osx-$arch" } + else { $rid = "win-$arch" } + + $nativeDir = "$extractDir/runtimes/$rid/native" + if (-not (Test-Path $nativeDir)) { throw "No native binaries found at $nativeDir for RID $rid" } + + $flcNativeDir = "$(Build.ArtifactStagingDirectory)/flc-native-rust" + New-Item -ItemType Directory -Path $flcNativeDir -Force | Out-Null + Get-ChildItem $nativeDir -File | Copy-Item -Destination $flcNativeDir -Force + Write-Host "##vso[task.setvariable variable=FOUNDRY_NATIVE_OVERRIDE_DIR]$flcNativeDir" + Write-Host "Extracted FLC native binaries to $flcNativeDir" + +# Copy deps_versions files +- task: PowerShell@2 + displayName: 'Copy deps_versions for Rust' + inputs: + targetType: inline + script: | + Copy-Item "$(repoRoot)/sdk/deps_versions.json" "$(repoRoot)/sdk/rust/deps_versions.json" -Force + Copy-Item "$(repoRoot)/sdk/deps_versions_winml.json" "$(repoRoot)/sdk/rust/deps_versions_winml.json" -Force + +# Install Rust toolchain +- task: PowerShell@2 + displayName: 'Install Rust toolchain' + inputs: + targetType: inline + script: | + if ($IsWindows -or (-not $IsLinux -and -not $IsMacOS)) { + Invoke-WebRequest -Uri https://win.rustup.rs/x86_64 -OutFile rustup-init.exe + .\rustup-init.exe -y --default-toolchain stable --profile minimal -c clippy,rustfmt + Remove-Item rustup-init.exe + $cargoPath = "$env:USERPROFILE\.cargo\bin" + } else { + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable --profile minimal -c clippy,rustfmt + $cargoPath = "$env:HOME/.cargo/bin" + } + Write-Host "##vso[task.prependpath]$cargoPath" + +# Remove .cargo/config.toml crates-io redirect +- task: PowerShell@2 + displayName: 'Use crates.io directly' + inputs: + targetType: inline + script: | + $configPath = "$(repoRoot)/sdk/rust/.cargo/config.toml" + if (Test-Path $configPath) { + Remove-Item $configPath + Write-Host "Removed .cargo/config.toml crates-io redirect" + } + +# Build before testing +- task: PowerShell@2 + displayName: 'Build' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo build $features" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run unit tests' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo test --lib $features" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + +- task: PowerShell@2 + displayName: 'Run integration tests' + inputs: + targetType: inline + script: | + Set-Location "$(repoRoot)/sdk/rust" + $features = if ("${{ parameters.isWinML }}" -eq "True") { "--features winml" } else { "" } + Invoke-Expression "cargo test --tests $features -- --include-ignored --test-threads=1 --nocapture" + if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } + env: + TF_BUILD: 'true' diff --git a/sdk/js/test/detail/modelLoadManager.test.ts b/sdk/js/test/detail/modelLoadManager.test.ts index 32d46f1d..475417b7 100644 --- a/sdk/js/test/detail/modelLoadManager.test.ts +++ b/sdk/js/test/detail/modelLoadManager.test.ts @@ -9,6 +9,8 @@ describe('ModelLoadManager', function() { let serviceUrl: string; before(async function() { + // The catalog network fetch can be slow on macOS CI agents, exceeding mocha's default 2s timeout + this.timeout(30000); managerInstance = getTestManager(); // Access private coreInterop using any cast coreInterop = (managerInstance as any).coreInterop; From e42e5f69e0baa9f32e7aed6ac18b4962b0631a39 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Fri, 17 Apr 2026 10:40:49 -0700 Subject: [PATCH 44/83] change sdk to pull from public sources (#647) --- sdk/cs/NuGet.config | 7 ------- sdk/deps_versions.json | 4 ++-- sdk/deps_versions_winml.json | 4 ++-- sdk/js/.npmrc | 2 -- sdk/js/script/install-standard.cjs | 4 ++-- sdk/js/script/install-utils.cjs | 3 +-- sdk/js/script/install-winml.cjs | 4 ++-- sdk/rust/build.rs | 17 +++++------------ 8 files changed, 14 insertions(+), 31 deletions(-) delete mode 100644 sdk/cs/NuGet.config delete mode 100644 sdk/js/.npmrc diff --git a/sdk/cs/NuGet.config b/sdk/cs/NuGet.config deleted file mode 100644 index 29505d6d..00000000 --- a/sdk/cs/NuGet.config +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - diff --git a/sdk/deps_versions.json b/sdk/deps_versions.json index 1ecd6e6f..5fec13c8 100644 --- a/sdk/deps_versions.json +++ b/sdk/deps_versions.json @@ -1,7 +1,7 @@ { "foundry-local-core": { - "nuget": "0.9.0-dev-202603310538-f6efa8d3", - "python": "0.9.0.dev20260327060216" + "nuget": "1.0.0", + "python": "1.0.0" }, "onnxruntime": { "version": "1.24.4" diff --git a/sdk/deps_versions_winml.json b/sdk/deps_versions_winml.json index dd17833a..385767d5 100644 --- a/sdk/deps_versions_winml.json +++ b/sdk/deps_versions_winml.json @@ -1,7 +1,7 @@ { "foundry-local-core": { - "nuget": "0.9.0-dev-202603310538-f6efa8d3", - "python": "0.9.0.dev20260331004032" + "nuget": "1.0.0", + "python": "1.0.0" }, "onnxruntime": { "version": "1.23.2.3" diff --git a/sdk/js/.npmrc b/sdk/js/.npmrc deleted file mode 100644 index 7418403b..00000000 --- a/sdk/js/.npmrc +++ /dev/null @@ -1,2 +0,0 @@ -registry=https://pkgs.dev.azure.com/aiinfra/AIFoundryLocal/_packaging/AIFoundryLocal_PublicPackages/npm/registry/ -always-auth=true diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index 19ceacfb..e32160f6 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -8,7 +8,7 @@ const fs = require('fs'); const os = require('os'); const path = require('path'); -const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); +const { NUGET_FEED, runInstall } = require('./install-utils.cjs'); // deps_versions.json lives at the package root when published, or at sdk/ in the repo. const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions.json')) @@ -17,7 +17,7 @@ const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions.json const deps = require(depsPath); const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core', version: deps['foundry-local-core'].nuget, feed: ORT_NIGHTLY_FEED }, + { name: 'Microsoft.AI.Foundry.Local.Core', version: deps['foundry-local-core'].nuget, feed: NUGET_FEED }, { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version, feed: NUGET_FEED }, { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai'].version, feed: NUGET_FEED }, ]; diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs index aa74f4d5..1338c961 100644 --- a/sdk/js/script/install-utils.cjs +++ b/sdk/js/script/install-utils.cjs @@ -31,7 +31,6 @@ const REQUIRED_FILES = [ ]; const NUGET_FEED = 'https://api.nuget.org/v3/index.json'; -const ORT_NIGHTLY_FEED = 'https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json'; // --- Download helpers --- @@ -194,4 +193,4 @@ async function runInstall(artifacts, options) { } } -module.exports = { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall }; +module.exports = { NUGET_FEED, runInstall }; diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index 72f07b95..1aba9d02 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -12,7 +12,7 @@ const fs = require('fs'); const path = require('path'); -const { NUGET_FEED, ORT_NIGHTLY_FEED, runInstall } = require('./install-utils.cjs'); +const { NUGET_FEED, runInstall } = require('./install-utils.cjs'); // WinML uses its own deps_versions_winml.json with the same key structure // as the standard deps_versions.json — no variant-specific keys needed. @@ -27,7 +27,7 @@ const platformKey = `${process.platform}-${process.arch}`; const binDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platformKey); const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: deps['foundry-local-core']['nuget'], feed: ORT_NIGHTLY_FEED }, + { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: deps['foundry-local-core']['nuget'], feed: NUGET_FEED }, { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version, feed: NUGET_FEED }, { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai']['version'], feed: NUGET_FEED }, ]; diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index 7daf7a73..67b18305 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -4,8 +4,6 @@ use std::io::{self, Read}; use std::path::{Path, PathBuf}; const NUGET_FEED: &str = "https://api.nuget.org/v3/index.json"; -const ORT_NIGHTLY_FEED: &str = - "https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json"; /// Versions loaded from deps_versions.json (or deps_versions_winml.json). /// Both files share the same key structure — the build script picks the @@ -108,7 +106,7 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core.WinML", version: deps.core.clone(), - feed_url: ORT_NIGHTLY_FEED, + feed_url: NUGET_FEED, }); packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Foundry", @@ -118,13 +116,13 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: deps.genai.clone(), - feed_url: ORT_NIGHTLY_FEED, + feed_url: NUGET_FEED, }); } else { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core", version: deps.core.clone(), - feed_url: ORT_NIGHTLY_FEED, + feed_url: NUGET_FEED, }); if is_linux { @@ -144,7 +142,7 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: deps.genai.clone(), - feed_url: ORT_NIGHTLY_FEED, + feed_url: NUGET_FEED, }); } @@ -221,14 +219,9 @@ fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result format!("{base_address}{lower_name}/{lower_version}/{lower_name}.{lower_version}.nupkg"); println!( - "cargo:warning=Downloading {name} {ver} from {feed}", + "cargo:warning=Downloading {name} {ver} from NuGet.org", name = pkg.name, ver = pkg.version, - feed = if pkg.feed_url == NUGET_FEED { - "NuGet.org" - } else { - "ORT-Nightly" - }, ); let mut response = ureq::get(&url) From a0fed245d0dddeca143f18628d09da4c746eb7d2 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Sat, 18 Apr 2026 19:45:39 -0700 Subject: [PATCH 45/83] pipeline fixes for main with expanded tests (#648) Skips Foundry Local Core tests on osx-arm64. Embeddings tests are flaky and there appears to be a genai issue on macos. ` Microsoft.ML.OnnxRuntimeGenAI.OnnxRuntimeGenAIException: Exception during initialization: filesystem error: in file_size: No such file or directory ["/Users/cloudtest/vss/_work/1/s/test-data-shared/openai-whisper-tiny-generic-cpu-2/cpu-/model.onnx.data"]` Explicitly installs lfs on macOS CI agents (they do not have them pre-installed). Skips linux test stage on python due to unable to find onnxruntime-gpu onnxruntime-genai-cuda binaries. Overwrites runtimeidentifiers in test project to fetch the correct netcore runtime based on platform and prevent corrupt directory error. Leverages NUGET_PACKAGES env var to enable per-build isolated nupkg siloing to prevent concurrent access issues --------- Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 75 +++++++++++++++-------- .pipelines/templates/build-core-steps.yml | 3 +- .pipelines/templates/test-cs-steps.yml | 23 ++++++- 3 files changed, 71 insertions(+), 30 deletions(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index f4d49405..d639d350 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -165,6 +165,8 @@ extends: steps: - checkout: neutron-server clean: true + - checkout: test-data-shared + lfs: true - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: win-arm64 @@ -203,8 +205,13 @@ extends: artifactName: 'flc-osx-arm64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: + # AcesShared macOS agents don't have git-lfs pre-installed + - script: brew install git-lfs && git lfs install + displayName: 'Install Git LFS' - checkout: neutron-server clean: true + - checkout: test-data-shared + lfs: true - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: osx-arm64 @@ -709,6 +716,9 @@ extends: artifactName: 'deps-versions-standard' targetPath: '$(Pipeline.Workspace)/deps-versions-standard' steps: + # AcesShared macOS agents don't have git-lfs pre-installed + - script: brew install git-lfs && git lfs install + displayName: 'Install Git LFS' - checkout: self clean: true - checkout: test-data-shared @@ -793,6 +803,9 @@ extends: artifactName: 'deps-versions-standard' targetPath: '$(Pipeline.Workspace)/deps-versions-standard' steps: + # AcesShared macOS agents don't have git-lfs pre-installed + - script: brew install git-lfs && git lfs install + displayName: 'Install Git LFS' - checkout: self clean: true - checkout: test-data-shared @@ -836,33 +849,35 @@ extends: sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - - job: test_python_linux_x64 - displayName: 'linux-x64' - pool: - name: onnxruntime-Ubuntu2404-AMD-CPU - os: linux - templateContext: - inputs: - - input: pipelineArtifact - artifactName: 'flc-wheels' - targetPath: '$(Pipeline.Workspace)/flc-wheels' - - input: pipelineArtifact - artifactName: 'python-sdk' - targetPath: '$(Pipeline.Workspace)/python-sdk' - - input: pipelineArtifact - artifactName: 'deps-versions-standard' - targetPath: '$(Pipeline.Workspace)/deps-versions-standard' - steps: - - checkout: self - clean: true - - checkout: test-data-shared - lfs: true - - template: .pipelines/templates/test-python-steps.yml@self - parameters: - isWinML: false - flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' - sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' - depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + # Linux Python tests are disabled due to native dependency resolution issues + # (onnxruntime-gpu / onnxruntime-genai-cuda not found correctly on CI agents). + # - job: test_python_linux_x64 + # displayName: 'linux-x64' + # pool: + # name: onnxruntime-Ubuntu2404-AMD-CPU + # os: linux + # templateContext: + # inputs: + # - input: pipelineArtifact + # artifactName: 'flc-wheels' + # targetPath: '$(Pipeline.Workspace)/flc-wheels' + # - input: pipelineArtifact + # artifactName: 'python-sdk' + # targetPath: '$(Pipeline.Workspace)/python-sdk' + # - input: pipelineArtifact + # artifactName: 'deps-versions-standard' + # targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + # steps: + # - checkout: self + # clean: true + # - checkout: test-data-shared + # lfs: true + # - template: .pipelines/templates/test-python-steps.yml@self + # parameters: + # isWinML: false + # flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + # sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' + # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - job: test_python_osx_arm64 displayName: 'osx-arm64' @@ -883,6 +898,9 @@ extends: artifactName: 'deps-versions-standard' targetPath: '$(Pipeline.Workspace)/deps-versions-standard' steps: + # AcesShared macOS agents don't have git-lfs pre-installed + - script: brew install git-lfs && git lfs install + displayName: 'Install Git LFS' - checkout: self clean: true - checkout: test-data-shared @@ -963,6 +981,9 @@ extends: artifactName: 'deps-versions-standard' targetPath: '$(Pipeline.Workspace)/deps-versions-standard' steps: + # AcesShared macOS agents don't have git-lfs pre-installed + - script: brew install git-lfs && git lfs install + displayName: 'Install Git LFS' - checkout: self clean: true - checkout: test-data-shared diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml index 974673fa..d24f41bc 100644 --- a/.pipelines/templates/build-core-steps.yml +++ b/.pipelines/templates/build-core-steps.yml @@ -126,7 +126,8 @@ steps: projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release' - - ${{ if or(eq(parameters.flavor, 'win-x64'), eq(parameters.flavor, 'linux-x64'), eq(parameters.flavor, 'osx-x64')) }}: + # FLC tests on osx-arm64 are flaky in CI, will investigate separately. Skip for now since the main goal of this job is to produce the AOT binary. + - ${{ if or(eq(parameters.flavor, 'win-x64'), eq(parameters.flavor, 'linux-x64')) }}: - task: DotNetCoreCLI@2 displayName: 'Restore FLC Tests ${{ parameters.flavor }}' inputs: diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml index 43ba8331..773e9000 100644 --- a/.pipelines/templates/test-cs-steps.yml +++ b/.pipelines/templates/test-cs-steps.yml @@ -78,19 +78,38 @@ steps: if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } errorActionPreference: 'stop' +# Use a per-build NuGet packages directory to avoid corruption and file-locking +# issues from the shared global cache on reused agents. +# https://learn.microsoft.com/en-us/nuget/reference/cli-reference/cli-ref-environment-variables +- task: PowerShell@2 + displayName: 'Set isolated NuGet packages path' + inputs: + targetType: inline + script: | + $pkgDir = "$(Build.BinariesDirectory)/nuget-packages" + New-Item -ItemType Directory -Force -Path $pkgDir | Out-Null + Write-Host "##vso[task.setvariable variable=NUGET_PACKAGES]$pkgDir" + Write-Host "NuGet packages directory: $pkgDir" + - task: PowerShell@2 displayName: 'Restore & build tests' inputs: targetType: inline script: | + $rid = dotnet msbuild "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" -getProperty:NETCoreSdkRuntimeIdentifier + if ($LASTEXITCODE -ne 0 -or -not $rid) { throw "Failed to determine RuntimeIdentifier" } + Write-Host "Restoring for RuntimeIdentifier: $rid" + dotnet restore "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` --configfile "$(customNugetConfig)" ` - /p:UseWinML=${{ parameters.isWinML }} + /p:UseWinML=${{ parameters.isWinML }} ` + /p:RuntimeIdentifiers=$rid if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } dotnet build "$(repoRoot)/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj" ` --no-restore --configuration Release ` - /p:UseWinML=${{ parameters.isWinML }} + /p:UseWinML=${{ parameters.isWinML }} ` + /p:RuntimeIdentifiers=$rid if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } - task: PowerShell@2 From 419cffe0a8e6e60d0a2338810e8b358430022c97 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Mon, 20 Apr 2026 13:07:56 -0700 Subject: [PATCH 46/83] Add license files to rust and sdk packages (#649) --- sdk/js/LICENSE.txt | 21 +++++++++++++++++++++ sdk/js/package.json | 6 +++--- sdk/rust/Cargo.toml | 2 +- sdk/rust/LICENSE.txt | 21 +++++++++++++++++++++ 4 files changed, 46 insertions(+), 4 deletions(-) create mode 100644 sdk/js/LICENSE.txt create mode 100644 sdk/rust/LICENSE.txt diff --git a/sdk/js/LICENSE.txt b/sdk/js/LICENSE.txt new file mode 100644 index 00000000..48bc6bb4 --- /dev/null +++ b/sdk/js/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/sdk/js/package.json b/sdk/js/package.json index 408036b1..9ebd0ce4 100644 --- a/sdk/js/package.json +++ b/sdk/js/package.json @@ -1,6 +1,6 @@ { "name": "foundry-local-sdk", - "version": "0.9.0", + "version": "1.0.0", "description": "Foundry Local JavaScript SDK", "main": "dist/index.js", "types": "dist/index.d.ts", @@ -46,5 +46,5 @@ "test": "test" }, "author": "", - "license": "ISC" -} + "license": "MIT" +} \ No newline at end of file diff --git a/sdk/rust/Cargo.toml b/sdk/rust/Cargo.toml index af6a64f2..92675da2 100644 --- a/sdk/rust/Cargo.toml +++ b/sdk/rust/Cargo.toml @@ -8,7 +8,7 @@ description = "Local AI model inference powered by the Foundry Local Core engine homepage = "https://www.foundrylocal.ai/" repository = "https://github.com/microsoft/Foundry-Local" documentation = "https://github.com/microsoft/Foundry-Local/blob/main/sdk/rust/docs/api.md" -include = ["src/**", "build.rs", "Cargo.toml", "README.md", "LICENSE", "deps_versions.json", "deps_versions_winml.json"] +include = ["src/**", "build.rs", "Cargo.toml", "README.md", "LICENSE.txt", "deps_versions.json", "deps_versions_winml.json"] [features] default = [] diff --git a/sdk/rust/LICENSE.txt b/sdk/rust/LICENSE.txt new file mode 100644 index 00000000..48bc6bb4 --- /dev/null +++ b/sdk/rust/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. From 32a449104f2f039edd21caa8081eb871a480b45f Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Mon, 20 Apr 2026 13:09:41 -0700 Subject: [PATCH 47/83] Remove node_modules from flc installation folder (#650) Native libraries were stored under `node_modules/@foundry-local-core//` inside the SDK package. npm v7+ treats nested `node_modules` as part of the managed dependency tree, so any subsequent `npm install` (e.g. npm install koffi) would prune the folder as "extraneous", deleting the downloaded binaries. Moves the binary storage location from node_modules/@foundry-local-core/ to foundry-local-core/ --- .pipelines/templates/build-js-steps.yml | 4 ++-- .pipelines/templates/test-js-steps.yml | 2 +- sdk/js/script/install-utils.cjs | 7 +++---- sdk/js/script/install-winml.cjs | 2 +- sdk/js/script/preinstall.cjs | 2 +- sdk/js/src/detail/coreInterop.ts | 5 +++-- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/.pipelines/templates/build-js-steps.yml b/.pipelines/templates/build-js-steps.yml index a081930f..9da8acd8 100644 --- a/.pipelines/templates/build-js-steps.yml +++ b/.pipelines/templates/build-js-steps.yml @@ -106,7 +106,7 @@ steps: Expand-Archive -Path $zip -DestinationPath $extractDir -Force # Place FLC binary so the install script skips downloading it - $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + $destDir = "$(repoRoot)/sdk/js/foundry-local-core/$platformKey" New-Item -ItemType Directory -Path $destDir -Force | Out-Null $nativeDir = "$extractDir/runtimes/$rid/native" if (Test-Path $nativeDir) { @@ -160,7 +160,7 @@ steps: Expand-Archive -Path $zip -DestinationPath $extractDir -Force # Overwrite FLC binary in the npm-installed location - $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + $destDir = "$(repoRoot)/sdk/js/foundry-local-core/$platformKey" New-Item -ItemType Directory -Path $destDir -Force | Out-Null $nativeDir = "$extractDir/runtimes/$rid/native" if (Test-Path $nativeDir) { diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml index 0f72f2a5..955086e2 100644 --- a/.pipelines/templates/test-js-steps.yml +++ b/.pipelines/templates/test-js-steps.yml @@ -83,7 +83,7 @@ steps: Copy-Item $nupkg.FullName $zip -Force Expand-Archive -Path $zip -DestinationPath $extractDir -Force - $destDir = "$(repoRoot)/sdk/js/node_modules/@foundry-local-core/$platformKey" + $destDir = "$(repoRoot)/sdk/js/foundry-local-core/$platformKey" New-Item -ItemType Directory -Path $destDir -Force | Out-Null $nativeDir = "$extractDir/runtimes/$rid/native" if (Test-Path $nativeDir) { diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs index 1338c961..01b14d1a 100644 --- a/sdk/js/script/install-utils.cjs +++ b/sdk/js/script/install-utils.cjs @@ -19,9 +19,8 @@ const PLATFORM_MAP = { }; const platformKey = `${os.platform()}-${os.arch()}`; const RID = PLATFORM_MAP[platformKey]; -// Install binaries into node_modules/@foundry-local-core/ so they -// are shared across foundry-local-sdk and foundry-local-sdk-winml. -const BIN_DIR = path.join(__dirname, '..', 'node_modules', '@foundry-local-core', platformKey); +// Install binaries into foundry-local-core/ inside the package root. +const BIN_DIR = path.join(__dirname, '..', 'foundry-local-core', platformKey); const EXT = os.platform() === 'win32' ? '.dll' : os.platform() === 'darwin' ? '.dylib' : '.so'; const REQUIRED_FILES = [ @@ -154,7 +153,7 @@ async function installPackage(artifact, tempDir, binDir, skipIfPresent) { console.warn(` No files found for RID ${RID} in ${pkgName}.`); } - // Overwrite FLC platform package.json so require.resolve can find the package + // Write a metadata package.json with version info for diagnostics if (pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { const pkgJsonPath = path.join(binDir, 'package.json'); const pkgContent = { diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index 1aba9d02..4276c740 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -24,7 +24,7 @@ const deps = require(depsPath); // Resolve foundry-local-sdk's binary directory const sdkRoot = path.dirname(require.resolve('foundry-local-sdk/package.json')); const platformKey = `${process.platform}-${process.arch}`; -const binDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platformKey); +const binDir = path.join(sdkRoot, 'foundry-local-core', platformKey); const ARTIFACTS = [ { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: deps['foundry-local-core']['nuget'], feed: NUGET_FEED }, diff --git a/sdk/js/script/preinstall.cjs b/sdk/js/script/preinstall.cjs index 99e805d7..49f14bd5 100644 --- a/sdk/js/script/preinstall.cjs +++ b/sdk/js/script/preinstall.cjs @@ -25,7 +25,7 @@ const ALL_PLATFORMS = Object.keys(optionalDependencies) }; }); -const packagesRoot = path.join(__dirname, '..', 'node_modules', '@foundry-local-core'); +const packagesRoot = path.join(__dirname, '..', 'foundry-local-core'); for (const platform of ALL_PLATFORMS) { const dir = path.join(packagesRoot, platform.key); diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index ece88e8d..72df7e26 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -59,10 +59,11 @@ export class CoreInterop { const arch = process.arch; const platformKey = `${platform}-${arch}`; - // Resolve the platform package directory at node_modules/@foundry-local-core/, + // Resolve the native binary directory at foundry-local-core/, // the shared location where install scripts place the native binaries. + const sdkRoot = path.resolve(__dirname, '..', '..'); - const packageDir = path.join(sdkRoot, 'node_modules', '@foundry-local-core', platformKey); + const packageDir = path.join(sdkRoot, 'foundry-local-core', platformKey); const ext = CoreInterop._getLibraryExtension(); const corePath = path.join(packageDir, `Microsoft.AI.Foundry.Local.Core${ext}`); From ea84d90ee98fd313b470e9ffd108c2958fcb24fd Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Mon, 20 Apr 2026 14:32:52 -0700 Subject: [PATCH 48/83] replaces PAT-based service connection with service principle to access windows.ai.toolkit from AIFoundryLocal (#641) Removes reliance on PATs as they will no longer be allowed after April 30th. https://eng.ms/docs/coreai/devdiv/one-engineering-system-1es/1es-docs/1es-security-configuration/azdo-disable-pats/overview --------- Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 212 +++++++++++--------- .pipelines/templates/build-core-steps.yml | 9 +- .pipelines/templates/build-cs-steps.yml | 7 +- .pipelines/templates/build-js-steps.yml | 7 +- .pipelines/templates/build-python-steps.yml | 7 +- .pipelines/templates/build-rust-steps.yml | 7 +- .pipelines/templates/checkout-steps.yml | 74 +++++++ .pipelines/templates/package-core-steps.yml | 2 +- .pipelines/templates/test-cs-steps.yml | 7 +- .pipelines/templates/test-js-steps.yml | 8 +- .pipelines/templates/test-python-steps.yml | 8 +- .pipelines/templates/test-rust-steps.yml | 8 +- sdk/js/test/testUtils.ts | 7 +- sdk/python/test/conftest.py | 9 +- sdk/rust/tests/integration/common/mod.rs | 10 +- 15 files changed, 268 insertions(+), 114 deletions(-) create mode 100644 .pipelines/templates/checkout-steps.yml diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index d639d350..c44e373d 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -36,17 +36,6 @@ variables: resources: repositories: - - repository: neutron-server - type: git - name: windows.ai.toolkit/neutron-server - endpoint: AIFoundryLocal-WindowsAIToolkit-SC - ref: refs/heads/${{ parameters.neutronServerBranch }} - - repository: test-data-shared - type: git - name: windows.ai.toolkit/test-data-shared - endpoint: AIFoundryLocal-WindowsAIToolkit-SC - lfs: true - ref: refs/heads/main - repository: 1ESPipelineTemplates type: git name: 1ESPipelineTemplates/1ESPipelineTemplates @@ -65,10 +54,6 @@ extends: binskim: break: false scanOutputDirectoryOnly: true - sourceRepositoriesToScan: - include: - - repository: neutron-server - - repository: test-data-shared stages: # ── Compute Version ── # A single version string is computed once and shared across all stages. @@ -143,10 +128,13 @@ extends: artifactName: 'flc-win-x64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: - - checkout: neutron-server - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: win-x64 @@ -163,10 +151,13 @@ extends: artifactName: 'flc-win-arm64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: - - checkout: neutron-server - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: win-arm64 @@ -183,10 +174,13 @@ extends: artifactName: 'flc-linux-x64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: - - checkout: neutron-server - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: linux-x64 @@ -205,13 +199,16 @@ extends: artifactName: 'flc-osx-arm64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared # AcesShared macOS agents don't have git-lfs pre-installed - script: brew install git-lfs && git lfs install displayName: 'Install Git LFS' - - checkout: neutron-server - clean: true - - checkout: test-data-shared - lfs: true - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: osx-arm64 @@ -243,8 +240,10 @@ extends: artifactName: 'deps-versions-standard' targetPath: '$(Build.ArtifactStagingDirectory)/deps-versions' steps: - - checkout: neutron-server - clean: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} - task: DownloadPipelineArtifact@2 inputs: buildType: current @@ -320,9 +319,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/cs-sdk' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-cs-steps.yml@self parameters: version: ${{ parameters.version }} @@ -349,7 +348,6 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' steps: - checkout: self - clean: true - template: .pipelines/templates/build-js-addon-steps.yml@self parameters: repoRoot: $(Build.SourcesDirectory) @@ -374,7 +372,6 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' steps: - checkout: self - clean: true - template: .pipelines/templates/build-js-addon-steps.yml@self parameters: repoRoot: $(Build.SourcesDirectory) @@ -400,7 +397,6 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' steps: - checkout: self - clean: true - template: .pipelines/templates/build-js-addon-steps.yml@self parameters: repoRoot: $(Build.SourcesDirectory) @@ -426,7 +422,6 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/js-addon' steps: - checkout: self - clean: true - template: .pipelines/templates/build-js-addon-steps.yml@self parameters: repoRoot: $(Build.SourcesDirectory) @@ -469,9 +464,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/js-sdk' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared # Download prebuilt Node-API addons for all platforms - task: DownloadPipelineArtifact@2 @@ -504,7 +499,8 @@ extends: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { $repoRoot = $multiCheckout } else { $repoRoot = "$(Build.SourcesDirectory)" } $prebuildsDir = "$repoRoot/sdk/js/prebuilds" foreach ($platform in @('win32-x64','win32-arm64','linux-x64','darwin-arm64')) { $src = "$(Pipeline.Workspace)/js-addon-$platform/foundry_local_napi.node" @@ -551,9 +547,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/python-sdk' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-python-steps.yml@self parameters: version: ${{ parameters.version }} @@ -591,9 +587,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/rust-sdk' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-rust-steps.yml@self parameters: version: ${{ parameters.version }} @@ -668,8 +664,10 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared + basePath: '$(Agent.BuildDirectory)' - template: .pipelines/templates/test-cs-steps.yml@self parameters: isWinML: false @@ -692,8 +690,10 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared + basePath: '$(Agent.BuildDirectory)' - template: .pipelines/templates/test-cs-steps.yml@self parameters: isWinML: false @@ -721,8 +721,10 @@ extends: displayName: 'Install Git LFS' - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared + basePath: '$(Agent.BuildDirectory)' - template: .pipelines/templates/test-cs-steps.yml@self parameters: isWinML: false @@ -750,8 +752,9 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-js-steps.yml@self parameters: isWinML: false @@ -808,8 +811,9 @@ extends: displayName: 'Install Git LFS' - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-js-steps.yml@self parameters: isWinML: false @@ -840,8 +844,9 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-python-steps.yml@self parameters: isWinML: false @@ -903,8 +908,9 @@ extends: displayName: 'Install Git LFS' - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-python-steps.yml@self parameters: isWinML: false @@ -933,8 +939,9 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-rust-steps.yml@self parameters: isWinML: false @@ -957,8 +964,9 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-rust-steps.yml@self parameters: isWinML: false @@ -986,8 +994,9 @@ extends: displayName: 'Install Git LFS' - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-rust-steps.yml@self parameters: isWinML: false @@ -1010,10 +1019,13 @@ extends: artifactName: 'flc-winml-win-x64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: - - checkout: neutron-server - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: win-x64 @@ -1031,8 +1043,13 @@ extends: artifactName: 'flc-winml-win-arm64' targetPath: '$(Build.ArtifactStagingDirectory)/native' steps: - - checkout: neutron-server - clean: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-core-steps.yml@self parameters: flavor: win-arm64 @@ -1063,8 +1080,10 @@ extends: artifactName: 'deps-versions-winml' targetPath: '$(Build.ArtifactStagingDirectory)/deps-versions' steps: - - checkout: neutron-server - clean: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: neutron-server + ref: refs/heads/${{ parameters.neutronServerBranch }} - task: DownloadPipelineArtifact@2 inputs: buildType: current @@ -1126,9 +1145,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/cs-sdk-winml' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-cs-steps.yml@self parameters: version: ${{ parameters.version }} @@ -1168,9 +1187,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/js-sdk' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared # Download prebuilt Node-API addons for all platforms - task: DownloadPipelineArtifact@2 @@ -1203,7 +1222,8 @@ extends: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { $repoRoot = $multiCheckout } else { $repoRoot = "$(Build.SourcesDirectory)" } $prebuildsDir = "$repoRoot/sdk/js/prebuilds" foreach ($platform in @('win32-x64','win32-arm64','linux-x64','darwin-arm64')) { $src = "$(Pipeline.Workspace)/js-addon-$platform/foundry_local_napi.node" @@ -1250,9 +1270,9 @@ extends: targetPath: '$(Build.ArtifactStagingDirectory)/python-sdk-winml' steps: - checkout: self - clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/build-python-steps.yml@self parameters: version: ${{ parameters.version }} @@ -1284,8 +1304,10 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared + basePath: '$(Agent.BuildDirectory)' - template: .pipelines/templates/test-cs-steps.yml@self parameters: isWinML: true @@ -1313,8 +1335,9 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-js-steps.yml@self parameters: isWinML: true @@ -1345,8 +1368,9 @@ extends: steps: - checkout: self clean: true - - checkout: test-data-shared - lfs: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared - template: .pipelines/templates/test-python-steps.yml@self parameters: isWinML: true diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml index d24f41bc..c21e0b92 100644 --- a/.pipelines/templates/build-core-steps.yml +++ b/.pipelines/templates/build-core-steps.yml @@ -16,15 +16,8 @@ steps: inputs: targetType: inline script: | - # Multi-checkout places repos in subdirectories; single checkout places contents at root - $multiCheckout = "$(Build.SourcesDirectory)/neutron-server" - if (Test-Path $multiCheckout) { - $nsRoot = $multiCheckout - } else { - $nsRoot = "$(Build.SourcesDirectory)" - } + $nsRoot = "$(Build.SourcesDirectory)/neutron-server" Write-Host "##vso[task.setvariable variable=nsRoot]$nsRoot" - Write-Host "neutron-server root: $nsRoot" - task: UseDotNet@2 displayName: 'Use .NET SDK from global.json' diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml index 7f77647f..bb1a4fb9 100644 --- a/.pipelines/templates/build-cs-steps.yml +++ b/.pipelines/templates/build-cs-steps.yml @@ -31,7 +31,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" diff --git a/.pipelines/templates/build-js-steps.yml b/.pipelines/templates/build-js-steps.yml index 9da8acd8..1e4e329e 100644 --- a/.pipelines/templates/build-js-steps.yml +++ b/.pipelines/templates/build-js-steps.yml @@ -28,7 +28,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" diff --git a/.pipelines/templates/build-python-steps.yml b/.pipelines/templates/build-python-steps.yml index c4c62825..da0de572 100644 --- a/.pipelines/templates/build-python-steps.yml +++ b/.pipelines/templates/build-python-steps.yml @@ -31,7 +31,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" diff --git a/.pipelines/templates/build-rust-steps.yml b/.pipelines/templates/build-rust-steps.yml index e466b10f..d0b03eb2 100644 --- a/.pipelines/templates/build-rust-steps.yml +++ b/.pipelines/templates/build-rust-steps.yml @@ -31,7 +31,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" diff --git a/.pipelines/templates/checkout-steps.yml b/.pipelines/templates/checkout-steps.yml new file mode 100644 index 00000000..601eacf5 --- /dev/null +++ b/.pipelines/templates/checkout-steps.yml @@ -0,0 +1,74 @@ +# Clones an Azure DevOps Git repo using a service connection. +# Replaces `checkout: ` for repos that cannot use PAT-based endpoints. +# +# The repo is cloned into $(Build.SourcesDirectory)/ to match the +# directory layout that multi-checkout produces. +parameters: +- name: repoName + type: string + displayName: 'Repository name (also used as the checkout directory name)' +- name: ref + type: string + default: 'refs/heads/main' + displayName: 'Git ref to checkout (branch, tag, or commit SHA)' +- name: basePath + type: string + default: '$(Build.SourcesDirectory)' + displayName: 'Base directory to clone into (repo will be at basePath/repoName)' + +steps: +- task: AzureCLI@2 + displayName: 'Checkout ${{ parameters.repoName }}' + inputs: + azureSubscription: 'FoundryLocalCore-SP' + scriptType: pscore + scriptLocation: inlineScript + inlineScript: | + $ErrorActionPreference = 'Stop' + + $repoDir = "${{ parameters.basePath }}/${{ parameters.repoName }}" + $ref = "${{ parameters.ref }}" + + # Obtain bearer token scoped to Azure DevOps + $token = az account get-access-token --resource 499b84ac-1321-427f-aa17-267ca6975798 --query accessToken -o tsv + if ($LASTEXITCODE -ne 0 -or -not $token) { throw "Failed to obtain access token" } + + # Embed the token directly in the remote URL for reliable auth + $repoUrl = "https://oauth:${token}@dev.azure.com/microsoft/windows.ai.toolkit/_git/${{ parameters.repoName }}" + + # Always clean to avoid conflicts with stale state from agent reuse + if (Test-Path $repoDir) { + Write-Host "Cleaning existing directory: $repoDir" + Remove-Item -Recurse -Force $repoDir + } + + # Create and init + New-Item -ItemType Directory -Path $repoDir -Force | Out-Null + Push-Location $repoDir + git init + git remote add origin $repoUrl + + # Fetch — use shallow clone for test-data-shared (no NBGV), full clone for others + $branch = $ref -replace '^refs/heads/', '' + $repoName = '${{ parameters.repoName }}' + if ($repoName -eq 'test-data-shared') { + git fetch origin $branch --depth=1 + } else { + git fetch origin $branch + } + if ($LASTEXITCODE -ne 0) { throw "git fetch failed for ${{ parameters.repoName }}" } + + # Checkout + git checkout "origin/$branch" -B $branch + if ($LASTEXITCODE -ne 0) { throw "git checkout failed for ${{ parameters.repoName }} at ref $ref" } + + # LFS — enable for test-data-shared (contains LFS-tracked test data) + if ($repoName -eq 'test-data-shared') { + Write-Host "Pulling LFS objects..." + git lfs install --local + git lfs pull + if ($LASTEXITCODE -ne 0) { throw "git lfs pull failed for ${{ parameters.repoName }}" } + } + + Pop-Location + Write-Host "Checked out ${{ parameters.repoName }} at $(git -C $repoDir rev-parse HEAD)" diff --git a/.pipelines/templates/package-core-steps.yml b/.pipelines/templates/package-core-steps.yml index fdd54c28..e00a6316 100644 --- a/.pipelines/templates/package-core-steps.yml +++ b/.pipelines/templates/package-core-steps.yml @@ -23,7 +23,7 @@ steps: inputs: targetType: inline script: | - $nsRoot = "$(Build.SourcesDirectory)" + $nsRoot = "$(Build.SourcesDirectory)/neutron-server" Write-Host "##vso[task.setvariable variable=nsRoot]$nsRoot" - task: PowerShell@2 diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml index 773e9000..0b107b21 100644 --- a/.pipelines/templates/test-cs-steps.yml +++ b/.pipelines/templates/test-cs-steps.yml @@ -18,7 +18,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" diff --git a/.pipelines/templates/test-js-steps.yml b/.pipelines/templates/test-js-steps.yml index 955086e2..e0ea3a34 100644 --- a/.pipelines/templates/test-js-steps.yml +++ b/.pipelines/templates/test-js-steps.yml @@ -19,7 +19,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" @@ -140,3 +145,4 @@ steps: customCommand: 'test' env: TF_BUILD: 'true' + FOUNDRY_TEST_DATA_DIR: $(testDataDir) diff --git a/.pipelines/templates/test-python-steps.yml b/.pipelines/templates/test-python-steps.yml index 00ac621b..37bfa5a8 100644 --- a/.pipelines/templates/test-python-steps.yml +++ b/.pipelines/templates/test-python-steps.yml @@ -21,7 +21,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" @@ -131,3 +136,4 @@ steps: workingDirectory: $(repoRoot)/sdk/python env: TF_BUILD: 'true' + FOUNDRY_TEST_DATA_DIR: $(testDataDir) diff --git a/.pipelines/templates/test-rust-steps.yml b/.pipelines/templates/test-rust-steps.yml index 4a7b1220..4d382b1d 100644 --- a/.pipelines/templates/test-rust-steps.yml +++ b/.pipelines/templates/test-rust-steps.yml @@ -18,7 +18,12 @@ steps: inputs: targetType: inline script: | - $repoRoot = "$(Build.SourcesDirectory)/Foundry-Local" + $multiCheckout = "$(Build.SourcesDirectory)/Foundry-Local" + if (Test-Path $multiCheckout) { + $repoRoot = $multiCheckout + } else { + $repoRoot = "$(Build.SourcesDirectory)" + } $testDataDir = "$(Build.SourcesDirectory)/test-data-shared" Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" @@ -128,3 +133,4 @@ steps: if ($LASTEXITCODE -ne 0) { exit $LASTEXITCODE } env: TF_BUILD: 'true' + FOUNDRY_TEST_DATA_DIR: $(testDataDir) diff --git a/sdk/js/test/testUtils.ts b/sdk/js/test/testUtils.ts index 62cf7968..39fbed71 100644 --- a/sdk/js/test/testUtils.ts +++ b/sdk/js/test/testUtils.ts @@ -14,7 +14,12 @@ function getGitRepoRoot(): string { } function getTestDataSharedPath(): string { - // Try to find test-data-shared relative to the git repo root + // Use FOUNDRY_TEST_DATA_DIR env var if set (CI), otherwise look for + // test-data-shared as a sibling of the git repo root (local dev). + const envPath = process.env.FOUNDRY_TEST_DATA_DIR; + if (envPath && fs.existsSync(envPath)) { + return envPath; + } const repoRoot = getGitRepoRoot(); const testDataSharedPath = path.join(path.dirname(repoRoot), 'test-data-shared'); return testDataSharedPath; diff --git a/sdk/python/test/conftest.py b/sdk/python/test/conftest.py index b7e22c97..1cb85704 100644 --- a/sdk/python/test/conftest.py +++ b/sdk/python/test/conftest.py @@ -40,7 +40,14 @@ def get_git_repo_root() -> Path: def get_test_data_shared_path() -> str: - """Return absolute path to the test-data-shared folder (sibling of the repo root).""" + """Return absolute path to the test-data-shared folder. + + Uses FOUNDRY_TEST_DATA_DIR env var if set (CI), otherwise falls back + to looking for test-data-shared as a sibling of the repo root. + """ + env_path = os.environ.get("FOUNDRY_TEST_DATA_DIR") + if env_path and os.path.isdir(env_path): + return env_path repo_root = get_git_repo_root() return str(repo_root.parent / "test-data-shared") diff --git a/sdk/rust/tests/integration/common/mod.rs b/sdk/rust/tests/integration/common/mod.rs index b0ca1a77..d657310c 100644 --- a/sdk/rust/tests/integration/common/mod.rs +++ b/sdk/rust/tests/integration/common/mod.rs @@ -44,8 +44,16 @@ pub fn get_git_repo_root() -> PathBuf { } } -/// Path to the shared test-data directory that lives alongside the repo root. +/// Path to the shared test-data directory. +/// Uses FOUNDRY_TEST_DATA_DIR env var if set (CI), otherwise falls back +/// to looking for test-data-shared as a sibling of the repo root. pub fn get_test_data_shared_path() -> PathBuf { + if let Ok(env_path) = std::env::var("FOUNDRY_TEST_DATA_DIR") { + let p = PathBuf::from(&env_path); + if p.is_dir() { + return p; + } + } let repo_root = get_git_repo_root(); repo_root .parent() From 86baa8e9df34f5a438f475239b29749f0695512f Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Mon, 20 Apr 2026 22:07:19 -0700 Subject: [PATCH 49/83] adds back python linux sdk tests (#654) Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 58 +++++++++++++------------- sdk/python/src/detail/utils.py | 2 +- 2 files changed, 30 insertions(+), 30 deletions(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index c44e373d..6c3b65c4 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -854,35 +854,35 @@ extends: sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - # Linux Python tests are disabled due to native dependency resolution issues - # (onnxruntime-gpu / onnxruntime-genai-cuda not found correctly on CI agents). - # - job: test_python_linux_x64 - # displayName: 'linux-x64' - # pool: - # name: onnxruntime-Ubuntu2404-AMD-CPU - # os: linux - # templateContext: - # inputs: - # - input: pipelineArtifact - # artifactName: 'flc-wheels' - # targetPath: '$(Pipeline.Workspace)/flc-wheels' - # - input: pipelineArtifact - # artifactName: 'python-sdk' - # targetPath: '$(Pipeline.Workspace)/python-sdk' - # - input: pipelineArtifact - # artifactName: 'deps-versions-standard' - # targetPath: '$(Pipeline.Workspace)/deps-versions-standard' - # steps: - # - checkout: self - # clean: true - # - checkout: test-data-shared - # lfs: true - # - template: .pipelines/templates/test-python-steps.yml@self - # parameters: - # isWinML: false - # flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' - # sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' - # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + - job: test_python_linux_x64 + displayName: 'linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-wheels' + targetPath: '$(Pipeline.Workspace)/flc-wheels' + - input: pipelineArtifact + artifactName: 'python-sdk' + targetPath: '$(Pipeline.Workspace)/python-sdk' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared + basePath: '$(Agent.BuildDirectory)' + - template: .pipelines/templates/test-python-steps.yml@self + parameters: + isWinML: false + flcWheelsDir: '$(Pipeline.Workspace)/flc-wheels' + sdkWheelsDir: '$(Pipeline.Workspace)/python-sdk' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - job: test_python_osx_arm64 displayName: 'osx-arm64' diff --git a/sdk/python/src/detail/utils.py b/sdk/python/src/detail/utils.py index 5780cfc9..4f37123f 100644 --- a/sdk/python/src/detail/utils.py +++ b/sdk/python/src/detail/utils.py @@ -89,7 +89,7 @@ def _find_file_in_package(package_name: str, filename: str) -> Path | None: # Quick checks for well-known sub-directories first for candidate_dir in (pkg_root, pkg_root / "capi", pkg_root / "native", pkg_root / "lib", pkg_root / "bin"): - candidates = list(candidate_dir.glob(f"*{filename}*")) + candidates = [p for p in candidate_dir.glob(f"*{filename}*") if not p.name.endswith(".dbg")] if candidates: return candidates[0] From 8ce6b3a98033695cbe4fea9e652bddabbc6c29ed Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Tue, 21 Apr 2026 10:18:16 -0700 Subject: [PATCH 50/83] adds nuget env vars to combat flaky test stages (#656) Co-authored-by: Prathik Rao Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .pipelines/templates/test-cs-steps.yml | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml index 0b107b21..c1378b04 100644 --- a/.pipelines/templates/test-cs-steps.yml +++ b/.pipelines/templates/test-cs-steps.yml @@ -83,18 +83,31 @@ steps: if ($LASTEXITCODE -ne 0) { throw "Windows App SDK Runtime install failed" } errorActionPreference: 'stop' -# Use a per-build NuGet packages directory to avoid corruption and file-locking -# issues from the shared global cache on reused agents. +# Per-job NuGet isolation to prevent "Central Directory corrupt" / file-locking +# errors when multiple C# test jobs (regular + WinML) run concurrently on the +# same reused agent. Keyed by $(System.JobId); cleaned on each run. # https://learn.microsoft.com/en-us/nuget/reference/cli-reference/cli-ref-environment-variables - task: PowerShell@2 displayName: 'Set isolated NuGet packages path' inputs: targetType: inline script: | - $pkgDir = "$(Build.BinariesDirectory)/nuget-packages" - New-Item -ItemType Directory -Force -Path $pkgDir | Out-Null + $buildDir = "$(Build.BinariesDirectory)/nuget-isolated-$(System.JobId)" + # Clean any leftover state from previous runs on this agent + if (Test-Path $buildDir) { + Remove-Item -Recurse -Force $buildDir + } + $pkgDir = "$buildDir/packages" + $httpCacheDir = "$buildDir/http-cache" + $pluginsCacheDir = "$buildDir/plugins-cache" + foreach ($d in @($pkgDir, $httpCacheDir, $pluginsCacheDir)) { + New-Item -ItemType Directory -Force -Path $d | Out-Null + } + Write-Host "##vso[task.setvariable variable=NUGET_PACKAGES]$pkgDir" - Write-Host "NuGet packages directory: $pkgDir" + Write-Host "##vso[task.setvariable variable=NUGET_HTTP_CACHE_PATH]$httpCacheDir" + Write-Host "##vso[task.setvariable variable=NUGET_PLUGINS_CACHE_PATH]$pluginsCacheDir" + Write-Host "NuGet isolation directory: $buildDir" - task: PowerShell@2 displayName: 'Restore & build tests' From 989396bd8703e0e4678f84e9302e091bbcf08bb9 Mon Sep 17 00:00:00 2001 From: "microsoft-github-policy-service[bot]" <77245923+microsoft-github-policy-service[bot]@users.noreply.github.com> Date: Tue, 21 Apr 2026 10:19:04 -0700 Subject: [PATCH 51/83] Auto-generated baselines by 1ES Pipeline Templates (#658) Co-authored-by: microsoft-github-policy-service[bot] <77245923+microsoft-github-policy-service[bot]@users.noreply.github.com> --- .../1espt/PipelineAutobaseliningConfig.yml | 17 ++++++++++ .config/guardian/.gdnbaselines | 33 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 .config/1espt/PipelineAutobaseliningConfig.yml create mode 100644 .config/guardian/.gdnbaselines diff --git a/.config/1espt/PipelineAutobaseliningConfig.yml b/.config/1espt/PipelineAutobaseliningConfig.yml new file mode 100644 index 00000000..bfbb3214 --- /dev/null +++ b/.config/1espt/PipelineAutobaseliningConfig.yml @@ -0,0 +1,17 @@ +## DO NOT MODIFY THIS FILE MANUALLY. This is part of auto-baselining from 1ES Pipeline Templates. Go to [https://aka.ms/1espt-autobaselining] for more details. + +pipelines: + 2192: + retail: + binary: + binskim: + lastModifiedDate: 2026-04-21 + spotbugs: + lastModifiedDate: 2026-04-21 + source: + eslint: + lastModifiedDate: 2026-04-21 + psscriptanalyzer: + lastModifiedDate: 2026-04-21 + armory: + lastModifiedDate: 2026-04-21 diff --git a/.config/guardian/.gdnbaselines b/.config/guardian/.gdnbaselines new file mode 100644 index 00000000..24273b90 --- /dev/null +++ b/.config/guardian/.gdnbaselines @@ -0,0 +1,33 @@ +{ + "properties": { + "helpUri": "https://eng.ms/docs/microsoft-security/security/azure-security/cloudai-security-fundamentals-engineering/security-integration/guardian-wiki/microsoft-guardian/general/baselines" + }, + "version": "1.0.0", + "baselines": { + "default": { + "name": "default", + "createdDate": "2026-04-21 05:29:03Z", + "lastUpdatedDate": "2026-04-21 05:29:03Z" + } + }, + "results": { + "69b647712f85ffe7b40ad90ffc86e9ff9fac0abb44d922c42bcecab81e4ccec2": { + "signature": "69b647712f85ffe7b40ad90ffc86e9ff9fac0abb44d922c42bcecab81e4ccec2", + "alternativeSignatures": [ + "ebb5b86a3fed512da35d013ab87c994656fef3061f5b28396da9c0f8e8e1f513", + "54a3900e69221d9330006bc7a493a75b00e864d977cc5ec729d1d2efd836de02", + "468342a92ed62a2d43a75e78f8fe8b6683f9f8c1c289b1436043319a5f94d3ed" + ], + "target": "native/Microsoft.AI.Foundry.Local.Core.dll", + "uriBaseId": "file:///E:/_work/1/a/", + "memberOf": [ + "default" + ], + "tool": "binskim", + "ruleId": "BA2008", + "createdDate": "2026-04-21 05:29:03Z", + "expirationDate": "2026-10-08 06:22:33Z", + "justification": "This error is baselined with an expiration date of 180 days from 2026-04-21 06:22:33Z" + } + } +} \ No newline at end of file From 03fef37e3cba9a8c1eb0fb6c4c6a68f44358e8e4 Mon Sep 17 00:00:00 2001 From: Raja Phanindra Chava Date: Wed, 22 Apr 2026 08:43:06 -0700 Subject: [PATCH 52/83] Added embedding API to C#, JS, Python and Rust SDKs (#639) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Adds embedding support to all four Foundry Local SDKs (C#, JavaScript, Python, Rust), enabling text embedding generation through the `OpenAIEmbeddingClient` via the FoundryLocalCore native interop layer. Supports both single and batch input. ## Changes ### C# SDK - **`OpenAIEmbeddingClient`** — Client with `GenerateEmbeddingAsync(string)` for single input and `GenerateEmbeddingsAsync(IEnumerable)` for batch. - **`EmbeddingRequestResponseTypes.cs`** — Request DTO extending Betalgo's `EmbeddingCreateRequest` with `FromUserInput()` factory for both single and batch. Response deserialization with null-check and error handling. - **`IModel.GetEmbeddingClientAsync()`** — New interface method, implemented in `Model` and `ModelVariant`. - **`JsonSerializationContext`** — Registered `EmbeddingCreateRequestExtended` and `EmbeddingCreateResponse` for AOT. ### JavaScript SDK - **`EmbeddingClient`** — Client with `generateEmbedding(string)` and `generateEmbeddings(string[])`. Shared `executeRequest()` for both paths. - **`IModel.createEmbeddingClient()`** — Factory method in interface, `Model`, and `ModelVariant`. - Exported from `index.ts`. ### Python SDK - **`EmbeddingClient`** — Client with `generate_embedding(str)` and `generate_embeddings(List[str])`. Uses OpenAI SDK types (`EmbeddingCreateParams` for request, `CreateEmbeddingResponse` for response). Patches server response to add missing `object` and `usage` fields required by the OpenAI SDK type. - **`IModel.get_embedding_client()`** — Abstract method, implemented in `Model` and `ModelVariant`. - Exported from `openai/__init__.py`. ### Rust SDK - **`EmbeddingClient`** — Client with `generate_embedding(&str)` and `generate_embeddings(&[&str])`. Uses `async_openai::types::embeddings::CreateEmbeddingResponse` as return type. Patches server response for missing `object` and `usage` fields. - **`Model.create_embedding_client()`** — Factory method in `Model` and `ModelVariant`. - Added `"embedding-types"` feature to `async-openai` dependency. ### Tests All SDKs include tests for: - Basic embedding generation (1024 dimensions, correct response structure) - L2 normalization (norm ≈ 1.0, values within [-1, 1]) - Different inputs produce different embeddings (cosine similarity < 0.99) - Same input produces identical embeddings (determinism) - Batch embedding (multiple inputs → multiple results with correct indices) - Batch results match single-input results - Input validation (empty input, empty list) - Known golden values (C# only) ### Samples New `embeddings` sample in each SDK (`samples/{cs,js,python,rust}/embeddings/`) demonstrating: - SDK initialization and model setup - Single embedding generation with dimension output - Batch embedding generation with multiple inputs ### Documentation - All four SDK READMEs updated with embeddings feature documentation and usage examples. - C# API docs (`index.md`, `imodel.md`, `model.md`, `modelvariant.md`, `openaiembeddingclient.md`). - JS docs (`README.md` class index). - Rust API docs (`docs/api.md` with `EmbeddingClient`, `EmbeddingResponse`, `EmbeddingData` reference). - Python API reference table updated. ## Test plan - [x] C# SDK tests pass - [x] Python SDK tests pass - [x] JS SDK tests pass - [x] Rust SDK tests pass - [x] All existing chat/audio tests unaffected ## Dependencies This PR depends on the FoundryLocalCore (neutron-server) PR that adds the `"embeddings"` NativeInterop command, `/v1/embeddings` endpoint, and batch support: https://microsoft.visualstudio.com/windows.ai.toolkit/_git/neutron-server/pullrequest/15212502 --------- Co-authored-by: Raja Phanindra Chava Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .gitignore | 1 + samples/README.md | 10 +- samples/cs/README.md | 1 + samples/cs/embeddings/Embeddings.csproj | 48 +++ samples/cs/embeddings/Program.cs | 74 +++++ samples/js/README.md | 1 + samples/js/embeddings/app.js | 73 +++++ samples/js/embeddings/package.json | 15 + samples/python/README.md | 1 + samples/python/embeddings/requirements.txt | 2 + samples/python/embeddings/src/app.py | 61 ++++ samples/rust/Cargo.toml | 1 + samples/rust/README.md | 1 + samples/rust/embeddings/Cargo.toml | 12 + samples/rust/embeddings/src/main.rs | 88 ++++++ sdk/cs/README.md | 19 ++ sdk/cs/docs/api/index.md | 2 + .../api/microsoft.ai.foundry.local.imodel.md | 18 ++ .../api/microsoft.ai.foundry.local.model.md | 14 + ...microsoft.ai.foundry.local.modelvariant.md | 14 + ....ai.foundry.local.openaiembeddingclient.md | 57 ++++ sdk/cs/src/Detail/JsonSerializationContext.cs | 2 + sdk/cs/src/Detail/Model.cs | 5 + sdk/cs/src/Detail/ModelVariant.cs | 17 + sdk/cs/src/IModel.cs | 7 + sdk/cs/src/OpenAI/EmbeddingClient.cs | 102 ++++++ .../OpenAI/EmbeddingRequestResponseTypes.cs | 74 +++++ .../EmbeddingClientTests.cs | 273 ++++++++++++++++ sdk/cs/test/FoundryLocal.Tests/Utils.cs | 22 ++ sdk/js/README.md | 23 ++ sdk/js/docs/README.md | 1 + sdk/js/src/detail/model.ts | 9 + sdk/js/src/detail/modelVariant.ts | 9 + sdk/js/src/imodel.ts | 2 + sdk/js/src/index.ts | 1 + sdk/js/src/openai/embeddingClient.ts | 86 +++++ sdk/js/test/openai/embeddingClient.test.ts | 295 ++++++++++++++++++ sdk/js/test/testUtils.ts | 1 + sdk/python/README.md | 24 ++ sdk/python/src/detail/model.py | 5 + sdk/python/src/detail/model_variant.py | 7 +- sdk/python/src/imodel.py | 9 + sdk/python/src/openai/__init__.py | 3 +- sdk/python/src/openai/embedding_client.py | 107 +++++++ sdk/python/test/conftest.py | 1 + .../test/openai/test_embedding_client.py | 202 ++++++++++++ sdk/rust/Cargo.toml | 2 +- sdk/rust/README.md | 22 ++ sdk/rust/docs/api.md | 30 ++ sdk/rust/src/detail/model.rs | 6 + sdk/rust/src/detail/model_variant.rs | 5 + sdk/rust/src/openai/embedding_client.rs | 100 ++++++ sdk/rust/src/openai/mod.rs | 2 + sdk/rust/tests/integration/common/mod.rs | 3 + .../integration/embedding_client_test.rs | 223 +++++++++++++ sdk/rust/tests/integration/main.rs | 1 + 56 files changed, 2186 insertions(+), 8 deletions(-) create mode 100644 samples/cs/embeddings/Embeddings.csproj create mode 100644 samples/cs/embeddings/Program.cs create mode 100644 samples/js/embeddings/app.js create mode 100644 samples/js/embeddings/package.json create mode 100644 samples/python/embeddings/requirements.txt create mode 100644 samples/python/embeddings/src/app.py create mode 100644 samples/rust/embeddings/Cargo.toml create mode 100644 samples/rust/embeddings/src/main.rs create mode 100644 sdk/cs/docs/api/microsoft.ai.foundry.local.openaiembeddingclient.md create mode 100644 sdk/cs/src/OpenAI/EmbeddingClient.cs create mode 100644 sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs create mode 100644 sdk/js/src/openai/embeddingClient.ts create mode 100644 sdk/js/test/openai/embeddingClient.test.ts create mode 100644 sdk/python/src/openai/embedding_client.py create mode 100644 sdk/python/test/openai/test_embedding_client.py create mode 100644 sdk/rust/src/openai/embedding_client.rs create mode 100644 sdk/rust/tests/integration/embedding_client_test.rs diff --git a/.gitignore b/.gitignore index c5859ed2..594d5e0c 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ test.ipynb bin/ obj/ .vs/ +.vscode/ # build, distribute, and bins build/ diff --git a/samples/README.md b/samples/README.md index 93f3bd57..bcac6bf3 100644 --- a/samples/README.md +++ b/samples/README.md @@ -1,6 +1,6 @@ # Foundry Local Samples -Explore complete working examples that demonstrate how to use Foundry Local — an end-to-end local AI solution that runs entirely on-device. These samples cover chat completions, audio transcription, tool calling, LangChain integration, and more. +Explore complete working examples that demonstrate how to use Foundry Local — an end-to-end local AI solution that runs entirely on-device. These samples cover chat completions, embeddings, audio transcription, tool calling, LangChain integration, and more. > **New to Foundry Local?** Check out the [main README](../README.md) for an overview and quickstart, or visit the [Foundry Local documentation](https://learn.microsoft.com/azure/foundry-local/) on Microsoft Learn. @@ -8,7 +8,7 @@ Explore complete working examples that demonstrate how to use Foundry Local — | Language | Samples | Description | |----------|---------|-------------| -| [**C#**](cs/) | 12 | .NET SDK samples including native chat, audio transcription, tool calling, model management, web server, and tutorials. Uses WinML on Windows for hardware acceleration. | -| [**JavaScript**](js/) | 12 | Node.js SDK samples including native chat, audio transcription, Electron desktop app, Copilot SDK integration, LangChain, tool calling, web server, and tutorials. | -| [**Python**](python/) | 9 | Python samples using the OpenAI-compatible API, including chat, audio transcription, LangChain integration, tool calling, web server, and tutorials. | -| [**Rust**](rust/) | 8 | Rust SDK samples including native chat, audio transcription, tool calling, web server, and tutorials. | +| [**C#**](cs/) | 13 | .NET SDK samples including native chat, embeddings, audio transcription, tool calling, model management, web server, and tutorials. Uses WinML on Windows for hardware acceleration. | +| [**JavaScript**](js/) | 13 | Node.js SDK samples including native chat, embeddings, audio transcription, Electron desktop app, Copilot SDK integration, LangChain, tool calling, web server, and tutorials. | +| [**Python**](python/) | 10 | Python samples using the OpenAI-compatible API, including chat, embeddings, audio transcription, LangChain integration, tool calling, web server, and tutorials. | +| [**Rust**](rust/) | 9 | Rust SDK samples including native chat, embeddings, audio transcription, tool calling, web server, and tutorials. | diff --git a/samples/cs/README.md b/samples/cs/README.md index 367c432e..ad10a3c6 100644 --- a/samples/cs/README.md +++ b/samples/cs/README.md @@ -12,6 +12,7 @@ Both packages provide the same APIs, so the same source code works on all platfo | Sample | Description | |---|---| | [native-chat-completions](native-chat-completions/) | Initialize the SDK, download a model, and run chat completions. | +| [embeddings](embeddings/) | Generate single and batch text embeddings using the Foundry Local SDK. | | [audio-transcription-example](audio-transcription-example/) | Transcribe audio files using the Foundry Local SDK. | | [foundry-local-web-server](foundry-local-web-server/) | Set up a local OpenAI-compliant web server. | | [tool-calling-foundry-local-sdk](tool-calling-foundry-local-sdk/) | Use tool calling with native chat completions. | diff --git a/samples/cs/embeddings/Embeddings.csproj b/samples/cs/embeddings/Embeddings.csproj new file mode 100644 index 00000000..4d948c56 --- /dev/null +++ b/samples/cs/embeddings/Embeddings.csproj @@ -0,0 +1,48 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/embeddings/Program.cs b/samples/cs/embeddings/Program.cs new file mode 100644 index 00000000..348bc346 --- /dev/null +++ b/samples/cs/embeddings/Program.cs @@ -0,0 +1,74 @@ +// +// +using Microsoft.AI.Foundry.Local; +// + +// +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +// Initialize the singleton instance. +await FoundryLocalManager.CreateAsync(config, Utils.GetAppLogger()); +var mgr = FoundryLocalManager.Instance; +// + +// +// Get the model catalog +var catalog = await mgr.GetCatalogAsync(); + +// Get an embedding model +var model = await catalog.GetModelAsync("qwen3-0.6b-embedding") ?? throw new Exception("Embedding model not found"); + +// Download the model (the method skips download if already cached) +await model.DownloadAsync(progress => +{ + Console.Write($"\rDownloading model: {progress:F2}%"); + if (progress >= 100f) + { + Console.WriteLine(); + } +}); + +// Load the model +Console.Write($"Loading model {model.Id}..."); +await model.LoadAsync(); +Console.WriteLine("done."); +// + +// +// Get an embedding client +var embeddingClient = await model.GetEmbeddingClientAsync(); + +// Generate a single embedding +Console.WriteLine("\n--- Single Embedding ---"); +var response = await embeddingClient.GenerateEmbeddingAsync("The quick brown fox jumps over the lazy dog"); +var embedding = response.Data[0].Embedding; +Console.WriteLine($"Dimensions: {embedding.Count}"); +Console.WriteLine($"First 5 values: [{string.Join(", ", embedding.Take(5).Select(v => v.ToString("F6")))}]"); +// + +// +// Generate embeddings for multiple inputs +Console.WriteLine("\n--- Batch Embeddings ---"); +var batchResponse = await embeddingClient.GenerateEmbeddingsAsync([ + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + "Rust is a systems programming language" +]); + +Console.WriteLine($"Number of embeddings: {batchResponse.Data.Count}"); +for (var i = 0; i < batchResponse.Data.Count; i++) +{ + Console.WriteLine($" [{i}] Dimensions: {batchResponse.Data[i].Embedding.Count}"); +} +// + +// +// Tidy up - unload the model +await model.UnloadAsync(); +Console.WriteLine("\nModel unloaded."); +// +// diff --git a/samples/js/README.md b/samples/js/README.md index 28f1e7e7..d334555c 100644 --- a/samples/js/README.md +++ b/samples/js/README.md @@ -11,6 +11,7 @@ These samples demonstrate how to use the Foundry Local JavaScript SDK (`foundry- | Sample | Description | |--------|-------------| | [native-chat-completions](native-chat-completions/) | Initialize the SDK, download a model, and run non-streaming and streaming chat completions. | +| [embeddings](embeddings/) | Generate single and batch text embeddings using the Foundry Local SDK. | | [audio-transcription-example](audio-transcription-example/) | Transcribe audio files using the Whisper model with streaming output. | | [chat-and-audio-foundry-local](chat-and-audio-foundry-local/) | Unified sample demonstrating both chat and audio transcription in one application. | | [electron-chat-application](electron-chat-application/) | Full-featured Electron desktop chat app with voice transcription and model management. | diff --git a/samples/js/embeddings/app.js b/samples/js/embeddings/app.js new file mode 100644 index 00000000..ea6ff185 --- /dev/null +++ b/samples/js/embeddings/app.js @@ -0,0 +1,73 @@ +// +// +import { FoundryLocalManager } from 'foundry-local-sdk'; +// + +// Initialize the Foundry Local SDK +console.log('Initializing Foundry Local SDK...'); + +// +const manager = FoundryLocalManager.create({ + appName: 'foundry_local_samples', + logLevel: 'info' +}); +// +console.log('✓ SDK initialized successfully'); + +// +// Get an embedding model +const modelAlias = 'qwen3-0.6b-embedding'; +const model = await manager.catalog.getModel(modelAlias); + +// Download the model +console.log(`\nDownloading model ${modelAlias}...`); +await model.download((progress) => { + process.stdout.write(`\rDownloading... ${progress.toFixed(2)}%`); +}); +console.log('\n✓ Model downloaded'); + +// Load the model +console.log(`\nLoading model ${modelAlias}...`); +await model.load(); +console.log('✓ Model loaded'); +// + +// +// Create embedding client +console.log('\nCreating embedding client...'); +const embeddingClient = model.createEmbeddingClient(); +console.log('✓ Embedding client created'); + +// Generate a single embedding +console.log('\n--- Single Embedding ---'); +const response = await embeddingClient.generateEmbedding( + 'The quick brown fox jumps over the lazy dog' +); + +const embedding = response.data[0].embedding; +console.log(`Dimensions: ${embedding.length}`); +console.log(`First 5 values: [${embedding.slice(0, 5).map(v => v.toFixed(6)).join(', ')}]`); +// + +// +// Generate embeddings for multiple inputs +console.log('\n--- Batch Embeddings ---'); +const batchResponse = await embeddingClient.generateEmbeddings([ + 'Machine learning is a subset of artificial intelligence', + 'The capital of France is Paris', + 'Rust is a systems programming language' +]); + +console.log(`Number of embeddings: ${batchResponse.data.length}`); +for (let i = 0; i < batchResponse.data.length; i++) { + console.log(` [${i}] Dimensions: ${batchResponse.data[i].embedding.length}`); +} +// + +// +// Unload the model +console.log('\nUnloading model...'); +await model.unload(); +console.log('✓ Model unloaded'); +// +// diff --git a/samples/js/embeddings/package.json b/samples/js/embeddings/package.json new file mode 100644 index 00000000..8353cb65 --- /dev/null +++ b/samples/js/embeddings/package.json @@ -0,0 +1,15 @@ +{ + "name": "embeddings", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "scripts": { + "start": "node app.js" + }, + "dependencies": { + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "foundry-local-sdk-winml": "latest" + } +} diff --git a/samples/python/README.md b/samples/python/README.md index 391cf123..7262f012 100644 --- a/samples/python/README.md +++ b/samples/python/README.md @@ -11,6 +11,7 @@ These samples demonstrate how to use Foundry Local with Python. | Sample | Description | |--------|-------------| | [native-chat-completions](native-chat-completions/) | Initialize the SDK, start the local service, and run streaming chat completions. | +| [embeddings](embeddings/) | Generate single and batch text embeddings using the Foundry Local SDK. | | [audio-transcription](audio-transcription/) | Transcribe audio files using the Whisper model. | | [web-server](web-server/) | Start a local OpenAI-compatible web server and call it with the OpenAI Python SDK. | | [tool-calling](tool-calling/) | Tool calling with custom function definitions (get_weather, calculate). | diff --git a/samples/python/embeddings/requirements.txt b/samples/python/embeddings/requirements.txt new file mode 100644 index 00000000..7602a48b --- /dev/null +++ b/samples/python/embeddings/requirements.txt @@ -0,0 +1,2 @@ +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" diff --git a/samples/python/embeddings/src/app.py b/samples/python/embeddings/src/app.py new file mode 100644 index 00000000..30ade4b2 --- /dev/null +++ b/samples/python/embeddings/src/app.py @@ -0,0 +1,61 @@ +# +# +from foundry_local_sdk import Configuration, FoundryLocalManager +# + + +def main(): + # + # Initialize the Foundry Local SDK + config = Configuration(app_name="foundry_local_samples") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + + # Select and load an embedding model from the catalog + model = manager.catalog.get_model("qwen3-0.6b-embedding") + model.download( + lambda progress: print( + f"\rDownloading model: {progress:.2f}%", + end="", + flush=True, + ) + ) + print() + model.load() + print("Model loaded and ready.") + + # Get an embedding client + client = model.get_embedding_client() + # + + # + # Generate a single embedding + print("\n--- Single Embedding ---") + response = client.generate_embedding("The quick brown fox jumps over the lazy dog") + embedding = response.data[0].embedding + print(f"Dimensions: {len(embedding)}") + print(f"First 5 values: {embedding[:5]}") + # + + # + # Generate embeddings for multiple inputs + print("\n--- Batch Embeddings ---") + batch_response = client.generate_embeddings([ + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + "Rust is a systems programming language", + ]) + + print(f"Number of embeddings: {len(batch_response.data)}") + for i, data in enumerate(batch_response.data): + print(f" [{i}] Dimensions: {len(data.embedding)}") + # + + # Clean up + model.unload() + print("\nModel unloaded.") + + +if __name__ == "__main__": + main() +# diff --git a/samples/rust/Cargo.toml b/samples/rust/Cargo.toml index 42d1293f..7be551ea 100644 --- a/samples/rust/Cargo.toml +++ b/samples/rust/Cargo.toml @@ -4,6 +4,7 @@ members = [ "tool-calling-foundry-local", "native-chat-completions", "audio-transcription-example", + "embeddings", "tutorial-chat-assistant", "tutorial-document-summarizer", "tutorial-tool-calling", diff --git a/samples/rust/README.md b/samples/rust/README.md index f2ca4f52..71a66873 100644 --- a/samples/rust/README.md +++ b/samples/rust/README.md @@ -11,6 +11,7 @@ These samples demonstrate how to use the Rust binding for Foundry Local. | Sample | Description | |--------|-------------| | [native-chat-completions](native-chat-completions/) | Non-streaming and streaming chat completions using the native chat client. | +| [embeddings](embeddings/) | Generate single and batch text embeddings using the native embedding client. | | [audio-transcription-example](audio-transcription-example/) | Audio transcription (non-streaming and streaming) using the Whisper model. | | [foundry-local-webserver](foundry-local-webserver/) | Start a local OpenAI-compatible web server and call it with a standard HTTP client. | | [tool-calling-foundry-local](tool-calling-foundry-local/) | Tool calling with streaming responses, multi-turn conversation, and local tool execution. | diff --git a/samples/rust/embeddings/Cargo.toml b/samples/rust/embeddings/Cargo.toml new file mode 100644 index 00000000..ebaa21be --- /dev/null +++ b/samples/rust/embeddings/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "embeddings" +version = "0.1.0" +edition = "2021" +description = "Native SDK embeddings (single and batch) using the Foundry Local Rust SDK" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/embeddings/src/main.rs b/samples/rust/embeddings/src/main.rs new file mode 100644 index 00000000..9b5550f0 --- /dev/null +++ b/samples/rust/embeddings/src/main.rs @@ -0,0 +1,88 @@ +// +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// +use foundry_local_sdk::{FoundryLocalConfig, FoundryLocalManager}; +// + +const ALIAS: &str = "qwen3-0.6b-embedding"; + +#[tokio::main] +async fn main() -> Result<(), Box> { + println!("Native Embeddings"); + println!("=================\n"); + + // ── 1. Initialise the manager ──────────────────────────────────────── + // + let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; + // + + // ── 2. Pick a model and ensure it is downloaded ───────────────────── + // + let model = manager.catalog().get_model(ALIAS).await?; + println!("Model: {} (id: {})", model.alias(), model.id()); + + if !model.is_cached().await? { + println!("Downloading model..."); + model + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); + std::io::Write::flush(&mut std::io::stdout()).ok(); + })) + .await?; + println!(); + } + + println!("Loading model..."); + model.load().await?; + println!("✓ Model loaded\n"); + // + + // ── 3. Create an embedding client ─────────────────────────────────── + // + let client = model.create_embedding_client(); + // + + // ── 4. Single embedding ───────────────────────────────────────────── + // + println!("--- Single Embedding ---"); + let response = client + .generate_embedding("The quick brown fox jumps over the lazy dog") + .await?; + + let embedding = &response.data[0].embedding; + println!("Dimensions: {}", embedding.len()); + println!( + "First 5 values: {:?}", + &embedding[..5] + ); + // + + // ── 5. Batch embeddings ───────────────────────────────────────────── + // + println!("\n--- Batch Embeddings ---"); + let batch_response = client + .generate_embeddings(&[ + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + "Rust is a systems programming language", + ]) + .await?; + + println!("Number of embeddings: {}", batch_response.data.len()); + for (i, data) in batch_response.data.iter().enumerate() { + println!(" [{i}] Dimensions: {}", data.embedding.len()); + } + // + + // ── 6. Unload the model ───────────────────────────────────────────── + // + println!("\nUnloading model..."); + model.unload().await?; + println!("Done."); + // + + Ok(()) +} +// diff --git a/sdk/cs/README.md b/sdk/cs/README.md index 20580e65..8547434d 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -7,6 +7,7 @@ The Foundry Local C# SDK provides a .NET interface for running AI models locally - **Model catalog** — browse and search all available models; filter by cached or loaded state - **Lifecycle management** — download, load, unload, and remove models programmatically - **Chat completions** — synchronous and `IAsyncEnumerable` streaming via OpenAI-compatible types +- **Embeddings** — generate text embeddings via OpenAI-compatible API - **Audio transcription** — transcribe audio files with streaming support - **Download progress** — wire up an `Action` callback for real-time download percentage - **Model variants** — select specific hardware/quantization variants per model alias @@ -246,6 +247,24 @@ chatClient.Settings.TopP = 0.9f; chatClient.Settings.FrequencyPenalty = 0.5f; ``` +### Embeddings + +```csharp +var embeddingClient = await model.GetEmbeddingClientAsync(); + +// Single input +var response = await embeddingClient.GenerateEmbeddingAsync("The quick brown fox jumps over the lazy dog"); +var embedding = response.Data[0].Embedding; // List +Console.WriteLine($"Dimensions: {embedding.Count}"); + +// Batch input +var batchResponse = await embeddingClient.GenerateEmbeddingsAsync([ + "The quick brown fox", + "The capital of France is Paris" +]); +// batchResponse.Data[0].Embedding, batchResponse.Data[1].Embedding +``` + ### Audio Transcription ```csharp diff --git a/sdk/cs/docs/api/index.md b/sdk/cs/docs/api/index.md index 4d084f87..c83e0a43 100644 --- a/sdk/cs/docs/api/index.md +++ b/sdk/cs/docs/api/index.md @@ -30,6 +30,8 @@ [OpenAIChatClient](./microsoft.ai.foundry.local.openaichatclient.md) +[OpenAIEmbeddingClient](./microsoft.ai.foundry.local.openaiembeddingclient.md) + [Parameter](./microsoft.ai.foundry.local.parameter.md) [PromptTemplate](./microsoft.ai.foundry.local.prompttemplate.md) diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md index 861386a8..95185abe 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md @@ -208,6 +208,24 @@ Optional cancellation token. [Task<OpenAIAudioClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
OpenAI.AudioClient +### **GetEmbeddingClientAsync(Nullable<CancellationToken>)** + +Get an OpenAI API based EmbeddingClient + +```csharp +Task GetEmbeddingClientAsync(Nullable ct) +``` + +#### Parameters + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<OpenAIEmbeddingClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+OpenAI.EmbeddingClient + ### **SelectVariant(IModel)** Select a model variant from [IModel.Variants](./microsoft.ai.foundry.local.imodel.md#variants) to use for [IModel](./microsoft.ai.foundry.local.imodel.md) operations. diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md index 23cd67a3..c6eac5f2 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md @@ -176,6 +176,20 @@ public Task GetAudioClientAsync(Nullable c [Task<OpenAIAudioClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+### **GetEmbeddingClientAsync(Nullable<CancellationToken>)** + +```csharp +public Task GetEmbeddingClientAsync(Nullable ct) +``` + +#### Parameters + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+ +#### Returns + +[Task<OpenAIEmbeddingClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+ ### **UnloadAsync(Nullable<CancellationToken>)** ```csharp diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.modelvariant.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.modelvariant.md index 1f674511..cc2b20a6 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.modelvariant.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.modelvariant.md @@ -181,3 +181,17 @@ public Task GetAudioClientAsync(Nullable c #### Returns [Task<OpenAIAudioClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+ +### **GetEmbeddingClientAsync(Nullable<CancellationToken>)** + +```csharp +public Task GetEmbeddingClientAsync(Nullable ct) +``` + +#### Parameters + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+ +#### Returns + +[Task<OpenAIEmbeddingClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiembeddingclient.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiembeddingclient.md new file mode 100644 index 00000000..745413f5 --- /dev/null +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiembeddingclient.md @@ -0,0 +1,57 @@ +# OpenAIEmbeddingClient + +Namespace: Microsoft.AI.Foundry.Local + +Embedding Client that uses the OpenAI API. + Implemented using Betalgo.Ranul.OpenAI SDK types. + +```csharp +public class OpenAIEmbeddingClient +``` + +Inheritance [Object](https://docs.microsoft.com/en-us/dotnet/api/system.object) → [OpenAIEmbeddingClient](./microsoft.ai.foundry.local.openaiembeddingclient.md)
+Attributes [NullableContextAttribute](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.compilerservices.nullablecontextattribute), [NullableAttribute](https://docs.microsoft.com/en-us/dotnet/api/system.runtime.compilerservices.nullableattribute) + +## Methods + +### **GenerateEmbeddingAsync(String, Nullable<CancellationToken>)** + +Generate embeddings for the given input text. + +```csharp +public Task GenerateEmbeddingAsync(string input, Nullable ct) +``` + +#### Parameters + +`input` [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+The text to generate embeddings for. + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<EmbeddingCreateResponse>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Embedding response containing the embedding vector. + +### **GenerateEmbeddingsAsync(IEnumerable<String>, Nullable<CancellationToken>)** + +Generate embeddings for multiple input texts in a single request. + +```csharp +public Task GenerateEmbeddingsAsync(IEnumerable inputs, Nullable ct) +``` + +#### Parameters + +`inputs` [IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+The texts to generate embeddings for. + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<EmbeddingCreateResponse>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Embedding response containing one embedding vector per input. diff --git a/sdk/cs/src/Detail/JsonSerializationContext.cs b/sdk/cs/src/Detail/JsonSerializationContext.cs index 37cc81ac..0fe5e677 100644 --- a/sdk/cs/src/Detail/JsonSerializationContext.cs +++ b/sdk/cs/src/Detail/JsonSerializationContext.cs @@ -23,6 +23,8 @@ namespace Microsoft.AI.Foundry.Local.Detail; [JsonSerializable(typeof(ChatCompletionCreateResponse))] [JsonSerializable(typeof(AudioCreateTranscriptionRequest))] [JsonSerializable(typeof(AudioCreateTranscriptionResponse))] +[JsonSerializable(typeof(EmbeddingCreateRequestExtended))] +[JsonSerializable(typeof(EmbeddingCreateResponse))] [JsonSerializable(typeof(string[]))] // list loaded or cached models [JsonSerializable(typeof(EpInfo[]))] [JsonSerializable(typeof(EpDownloadResult))] diff --git a/sdk/cs/src/Detail/Model.cs b/sdk/cs/src/Detail/Model.cs index c4d96057..03e9321b 100644 --- a/sdk/cs/src/Detail/Model.cs +++ b/sdk/cs/src/Detail/Model.cs @@ -99,6 +99,11 @@ public async Task GetAudioClientAsync(CancellationToken? ct = return await SelectedVariant.GetAudioClientAsync(ct).ConfigureAwait(false); } + public async Task GetEmbeddingClientAsync(CancellationToken? ct = null) + { + return await SelectedVariant.GetEmbeddingClientAsync(ct).ConfigureAwait(false); + } + public async Task UnloadAsync(CancellationToken? ct = null) { await SelectedVariant.UnloadAsync(ct).ConfigureAwait(false); diff --git a/sdk/cs/src/Detail/ModelVariant.cs b/sdk/cs/src/Detail/ModelVariant.cs index 9f2deaba..250c601a 100644 --- a/sdk/cs/src/Detail/ModelVariant.cs +++ b/sdk/cs/src/Detail/ModelVariant.cs @@ -102,6 +102,13 @@ public async Task GetAudioClientAsync(CancellationToken? ct = .ConfigureAwait(false); } + public async Task GetEmbeddingClientAsync(CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling(() => GetEmbeddingClientImplAsync(ct), + "Error getting embedding client for model", _logger) + .ConfigureAwait(false); + } + private async Task IsLoadedImplAsync(CancellationToken? ct = null) { var loadedModels = await _modelLoadManager.ListLoadedModelsAsync(ct).ConfigureAwait(false); @@ -193,6 +200,16 @@ private async Task GetAudioClientImplAsync(CancellationToken? return new OpenAIAudioClient(Id); } + private async Task GetEmbeddingClientImplAsync(CancellationToken? ct = null) + { + if (!await IsLoadedAsync(ct)) + { + throw new FoundryLocalException($"Model {Id} is not loaded. Call LoadAsync first."); + } + + return new OpenAIEmbeddingClient(Id); + } + public void SelectVariant(IModel variant) { throw new FoundryLocalException( diff --git a/sdk/cs/src/IModel.cs b/sdk/cs/src/IModel.cs index a27f3a3d..37249782 100644 --- a/sdk/cs/src/IModel.cs +++ b/sdk/cs/src/IModel.cs @@ -70,6 +70,13 @@ Task DownloadAsync(Action? downloadProgress = null, /// OpenAI.AudioClient Task GetAudioClientAsync(CancellationToken? ct = null); + /// + /// Get an OpenAI API based EmbeddingClient + /// + /// Optional cancellation token. + /// OpenAI.EmbeddingClient + Task GetEmbeddingClientAsync(CancellationToken? ct = null); + /// /// Variants of the model that are available. Variants of the model are optimized for different devices. /// diff --git a/sdk/cs/src/OpenAI/EmbeddingClient.cs b/sdk/cs/src/OpenAI/EmbeddingClient.cs new file mode 100644 index 00000000..91877f47 --- /dev/null +++ b/sdk/cs/src/OpenAI/EmbeddingClient.cs @@ -0,0 +1,102 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local; + +using Betalgo.Ranul.OpenAI.ObjectModels.ResponseModels; + +using Microsoft.AI.Foundry.Local.Detail; +using Microsoft.AI.Foundry.Local.OpenAI; +using Microsoft.Extensions.Logging; + +/// +/// Embedding Client that uses the OpenAI API. +/// Implemented using Betalgo.Ranul.OpenAI SDK types. +/// +public class OpenAIEmbeddingClient +{ + private readonly string _modelId; + + private readonly ICoreInterop _coreInterop = FoundryLocalManager.Instance.CoreInterop; + private readonly ILogger _logger = FoundryLocalManager.Instance.Logger; + + internal OpenAIEmbeddingClient(string modelId) + { + _modelId = modelId; + } + + /// + /// Generate embeddings for the given input text. + /// + /// The text to generate embeddings for. + /// Optional cancellation token. + /// Embedding response containing the embedding vector. + public async Task GenerateEmbeddingAsync(string input, + CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling( + () => GenerateEmbeddingImplAsync(input, ct), + "Error during embedding generation.", _logger).ConfigureAwait(false); + } + + /// + /// Generate embeddings for multiple input texts in a single request. + /// + /// The texts to generate embeddings for. + /// Optional cancellation token. + /// Embedding response containing one embedding vector per input. + public async Task GenerateEmbeddingsAsync(IEnumerable inputs, + CancellationToken? ct = null) + { + return await Utils.CallWithExceptionHandling( + () => GenerateEmbeddingsImplAsync(inputs, ct), + "Error during batch embedding generation.", _logger).ConfigureAwait(false); + } + + private async Task GenerateEmbeddingImplAsync(string input, + CancellationToken? ct) + { + if (string.IsNullOrWhiteSpace(input)) + { + throw new ArgumentException("Input must be a non-empty string.", nameof(input)); + } + + var embeddingRequest = EmbeddingCreateRequestExtended.FromUserInput(_modelId, input); + var embeddingRequestJson = embeddingRequest.ToJson(); + + var request = new CoreInteropRequest { Params = new() { { "OpenAICreateRequest", embeddingRequestJson } } }; + var response = await _coreInterop.ExecuteCommandAsync("embeddings", request, + ct ?? CancellationToken.None).ConfigureAwait(false); + + return response.ToEmbeddingResponse(_logger); + } + + private async Task GenerateEmbeddingsImplAsync(IEnumerable inputs, + CancellationToken? ct) + { + if (inputs == null || !inputs.Any()) + { + throw new ArgumentException("Inputs must be a non-empty array of strings.", nameof(inputs)); + } + + foreach (var input in inputs) + { + if (string.IsNullOrWhiteSpace(input)) + { + throw new ArgumentException("Each input must be a non-empty string.", nameof(inputs)); + } + } + + var embeddingRequest = EmbeddingCreateRequestExtended.FromUserInput(_modelId, inputs); + var embeddingRequestJson = embeddingRequest.ToJson(); + + var request = new CoreInteropRequest { Params = new() { { "OpenAICreateRequest", embeddingRequestJson } } }; + var response = await _coreInterop.ExecuteCommandAsync("embeddings", request, + ct ?? CancellationToken.None).ConfigureAwait(false); + + return response.ToEmbeddingResponse(_logger); + } +} diff --git a/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs b/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs new file mode 100644 index 00000000..0939fbaa --- /dev/null +++ b/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs @@ -0,0 +1,74 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.OpenAI; + +using System.Text.Json; + +using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; +using Betalgo.Ranul.OpenAI.ObjectModels.ResponseModels; + +using Microsoft.AI.Foundry.Local.Detail; +using Microsoft.Extensions.Logging; + +// https://platform.openai.com/docs/api-reference/embeddings/create +internal record EmbeddingCreateRequestExtended : EmbeddingCreateRequest +{ + internal static EmbeddingCreateRequestExtended FromUserInput(string modelId, string input) + { + return new EmbeddingCreateRequestExtended + { + Model = modelId, + Input = input, + }; + } + + internal static EmbeddingCreateRequestExtended FromUserInput(string modelId, IEnumerable inputs) + { + return new EmbeddingCreateRequestExtended + { + Model = modelId, + InputAsList = inputs.ToList(), + }; + } +} + +internal static class EmbeddingRequestResponseExtensions +{ + internal static string ToJson(this EmbeddingCreateRequestExtended request) + { + return JsonSerializer.Serialize(request, JsonSerializationContext.Default.EmbeddingCreateRequestExtended); + } + + internal static EmbeddingCreateResponse ToEmbeddingResponse(this ICoreInterop.Response response, ILogger logger) + { + if (response.Error != null) + { + logger.LogError("Error from embeddings: {Error}", response.Error); + throw new FoundryLocalException($"Error from embeddings command: {response.Error}"); + } + + if (string.IsNullOrWhiteSpace(response.Data)) + { + logger.LogError("Embeddings command returned no data"); + throw new FoundryLocalException("Embeddings command returned null or empty response data"); + } + + return response.Data.ToEmbeddingResponse(logger); + } + + internal static EmbeddingCreateResponse ToEmbeddingResponse(this string responseData, ILogger logger) + { + var output = JsonSerializer.Deserialize(responseData, JsonSerializationContext.Default.EmbeddingCreateResponse); + if (output == null) + { + logger.LogError("Failed to deserialize embedding response: {ResponseData}", responseData); + throw new JsonException("Failed to deserialize EmbeddingCreateResponse"); + } + + return output; + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs b/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs new file mode 100644 index 00000000..a5123cb0 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs @@ -0,0 +1,273 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using System.Threading.Tasks; + +internal sealed class EmbeddingClientTests +{ + private static IModel? model; + + [Before(Class)] + public static async Task Setup() + { + var manager = FoundryLocalManager.Instance; // initialized by Utils + var catalog = await manager.GetCatalogAsync(); + + // Reduce max_length in the embedding model's genai_config.json to avoid OOM + // when allocating the KV cache. Embedding models only need a single forward pass + // so a large max_length is unnecessary. + Utils.PatchModelMaxLength("qwen3-0.6b-embedding-generic-cpu-1", "v1"); + + // Load the specific cached model variant directly + var model = await catalog.GetModelVariantAsync("qwen3-0.6b-embedding-generic-cpu:1").ConfigureAwait(false); + await Assert.That(model).IsNotNull(); + + await model!.LoadAsync().ConfigureAwait(false); + await Assert.That(await model.IsLoadedAsync()).IsTrue(); + + EmbeddingClientTests.model = model; + } + + [After(Class)] + public static async Task Cleanup() + { + if (model != null && await model.IsLoadedAsync()) + { + await model.UnloadAsync().ConfigureAwait(false); + } + } + + [Test] + public async Task Embedding_BasicRequest_Succeeds() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var response = await embeddingClient.GenerateEmbeddingAsync("The quick brown fox jumps over the lazy dog") + .ConfigureAwait(false); + + await Assert.That(response).IsNotNull(); + await Assert.That(response.Model).IsEqualTo("qwen3-0.6b-embedding-generic-cpu:1"); + await Assert.That(response.Data).IsNotNull().And.IsNotEmpty(); + await Assert.That(response.Data[0].Embedding).IsNotNull(); + await Assert.That(response.Data[0].Embedding.Count).IsEqualTo(1024); + await Assert.That(response.Data[0].Index).IsEqualTo(0); + + Console.WriteLine($"Embedding dimension: {response.Data[0].Embedding.Count}"); + Console.WriteLine($"First value: {response.Data[0].Embedding[0]}"); + Console.WriteLine($"Last value: {response.Data[0].Embedding[1023]}"); + } + + [Test] + public async Task Embedding_IsNormalized() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var inputs = new[] + { + "The quick brown fox jumps over the lazy dog", + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris" + }; + + foreach (var input in inputs) + { + var response = await embeddingClient.GenerateEmbeddingAsync(input).ConfigureAwait(false); + + await Assert.That(response).IsNotNull(); + await Assert.That(response.Data).IsNotNull().And.IsNotEmpty(); + + var embedding = response.Data[0].Embedding; + + await Assert.That(embedding.Count).IsEqualTo(1024); + + // Verify L2 norm is approximately 1.0 + double norm = 0; + foreach (var val in embedding) + { + norm += val * val; + } + + norm = Math.Sqrt(norm); + await Assert.That(norm).IsGreaterThanOrEqualTo(0.99); + await Assert.That(norm).IsLessThanOrEqualTo(1.01); + + // All values should be within [-1, 1] for a normalized vector + foreach (var val in embedding) + { + await Assert.That(val).IsGreaterThanOrEqualTo(-1.0); + await Assert.That(val).IsLessThanOrEqualTo(1.0); + } + } + } + + [Test] + public async Task Embedding_DifferentInputs_ProduceDifferentEmbeddings() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var response1 = await embeddingClient.GenerateEmbeddingAsync("The quick brown fox").ConfigureAwait(false); + var response2 = await embeddingClient.GenerateEmbeddingAsync("The capital of France is Paris").ConfigureAwait(false); + + await Assert.That(response1).IsNotNull(); + await Assert.That(response2).IsNotNull(); + await Assert.That(response1.Data).IsNotNull().And.IsNotEmpty(); + await Assert.That(response2.Data).IsNotNull().And.IsNotEmpty(); + + // Same dimensionality + await Assert.That(response1.Data[0].Embedding.Count) + .IsEqualTo(response2.Data[0].Embedding.Count); + + // But different values (cosine similarity should not be 1.0) + double dot = 0; + for (int i = 0; i < response1.Data[0].Embedding.Count; i++) + { + dot += response1.Data[0].Embedding[i] * response2.Data[0].Embedding[i]; + } + + await Assert.That(dot).IsLessThan(0.99); + } + + [Test] + public async Task Embedding_SameInput_ProducesSameEmbedding() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var input = "Deterministic embedding test"; + + var response1 = await embeddingClient.GenerateEmbeddingAsync(input).ConfigureAwait(false); + var response2 = await embeddingClient.GenerateEmbeddingAsync(input).ConfigureAwait(false); + + await Assert.That(response1).IsNotNull(); + await Assert.That(response2).IsNotNull(); + await Assert.That(response1.Data).IsNotNull().And.IsNotEmpty(); + await Assert.That(response2.Data).IsNotNull().And.IsNotEmpty(); + + await Assert.That(response1.Data[0].Embedding.Count) + .IsEqualTo(response2.Data[0].Embedding.Count); + + for (int i = 0; i < response1.Data[0].Embedding.Count; i++) + { + await Assert.That(response1.Data[0].Embedding[i]) + .IsEqualTo(response2.Data[0].Embedding[i]); + } + } + + [Test] + public async Task Embedding_KnownValues_CapitalOfFrance() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var response = await embeddingClient.GenerateEmbeddingAsync("The capital of France is Paris") + .ConfigureAwait(false); + await Assert.That(response).IsNotNull(); + await Assert.That(response.Data).IsNotNull().And.IsNotEmpty(); + var embedding = response.Data[0].Embedding; + + await Assert.That(embedding.Count).IsEqualTo(1024); + + // Use tolerance for float32 model outputs which may vary across hardware + const double tolerance = 1e-3; + await Assert.That(Math.Abs(embedding[0] - (-0.02815740555524826))).IsLessThanOrEqualTo(tolerance); + await Assert.That(Math.Abs(embedding[1023] - (-0.00887922290712595))).IsLessThanOrEqualTo(tolerance); + } + + [Test] + public async Task Embedding_EmptyInput_ThrowsException() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + await Assert.That(async () => await embeddingClient.GenerateEmbeddingAsync("").ConfigureAwait(false)) + .ThrowsException(); + } + + [Test] + public async Task Embedding_EmptyBatch_ThrowsException() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + await Assert.That(async () => await embeddingClient.GenerateEmbeddingsAsync(Array.Empty()).ConfigureAwait(false)) + .ThrowsException(); + } + + [Test] + public async Task Embedding_Batch_ReturnsMultipleEmbeddings() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var response = await embeddingClient.GenerateEmbeddingsAsync([ + "The quick brown fox jumps over the lazy dog", + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris" + ]).ConfigureAwait(false); + + await Assert.That(response).IsNotNull(); + await Assert.That(response.Data).IsNotNull().And.IsNotEmpty(); + await Assert.That(response.Data.Count).IsEqualTo(3); + + for (var i = 0; i < 3; i++) + { + await Assert.That(response.Data[i].Index).IsEqualTo(i); + await Assert.That(response.Data[i].Embedding.Count).IsEqualTo(1024); + } + } + + [Test] + public async Task Embedding_Batch_EachEmbeddingIsNormalized() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var response = await embeddingClient.GenerateEmbeddingsAsync([ + "Hello world", + "Goodbye world" + ]).ConfigureAwait(false); + + await Assert.That(response.Data.Count).IsEqualTo(2); + + foreach (var data in response.Data) + { + double norm = 0; + foreach (var val in data.Embedding) + { + norm += val * val; + } + + norm = Math.Sqrt(norm); + await Assert.That(norm).IsGreaterThanOrEqualTo(0.99); + await Assert.That(norm).IsLessThanOrEqualTo(1.01); + } + } + + [Test] + public async Task Embedding_Batch_MatchesSingleInputResults() + { + var embeddingClient = await model!.GetEmbeddingClientAsync(); + await Assert.That(embeddingClient).IsNotNull(); + + var input = "The capital of France is Paris"; + + var singleResponse = await embeddingClient.GenerateEmbeddingAsync(input).ConfigureAwait(false); + var batchResponse = await embeddingClient.GenerateEmbeddingsAsync([input]).ConfigureAwait(false); + + await Assert.That(batchResponse.Data.Count).IsEqualTo(1); + + for (var i = 0; i < singleResponse.Data[0].Embedding.Count; i++) + { + await Assert.That(batchResponse.Data[0].Embedding[i]) + .IsEqualTo(singleResponse.Data[0].Embedding[i]); + } + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/Utils.cs b/sdk/cs/test/FoundryLocal.Tests/Utils.cs index 9611d0d4..a289011b 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Utils.cs +++ b/sdk/cs/test/FoundryLocal.Tests/Utils.cs @@ -451,4 +451,26 @@ private static string GetRepoRoot() throw new InvalidOperationException("Could not find git repository root from test file location"); } + + /// + /// Patches max_length in a cached model's genai_config.json to a small value. + /// ORT GenAI allocates a KV cache sized by max_length; the default (32768) can cause + /// OOM when multiple models are loaded. Embedding models only need a single forward pass + /// so a small max_length is sufficient. + /// + internal static void PatchModelMaxLength(string modelDirName, string variantSubDir, int newMaxLength = 512) + { + var repoRoot = new DirectoryInfo(GetRepoRoot()); + var configPath = Path.Combine(repoRoot.Parent!.FullName, "test-data-shared", + modelDirName, variantSubDir, "genai_config.json"); + + if (!File.Exists(configPath)) return; + + var json = File.ReadAllText(configPath); + if (json.Contains("\"max_length\": 32768")) + { + json = json.Replace("\"max_length\": 32768", $"\"max_length\": {newMaxLength}"); + File.WriteAllText(configPath, json); + } + } } diff --git a/sdk/js/README.md b/sdk/js/README.md index b2fe31dd..ff1ac542 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -8,6 +8,7 @@ The Foundry Local JS SDK provides a JavaScript/TypeScript interface for running - **Model catalog** — Browse and discover available models, check what's cached or loaded - **Automatic model management** — Download, load, unload, and remove models from cache - **Chat completions** — OpenAI-compatible chat API with both synchronous and streaming responses +- **Embeddings** — Generate text embeddings via OpenAI-compatible API - **Audio transcription** — Transcribe audio files locally with streaming support - **Multi-variant models** — Models can have multiple variants (e.g., different quantizations) with automatic selection of the best cached variant - **Embedded web service** — Start a local HTTP service for OpenAI-compatible API access @@ -204,6 +205,28 @@ for await (const chunk of chatClient.completeStreamingChat( } ``` +### Embeddings + +Generate text embeddings using the `EmbeddingClient`: + +```typescript +const embeddingClient = model.createEmbeddingClient(); + +// Single input +const response = await embeddingClient.generateEmbedding( + 'The quick brown fox jumps over the lazy dog' +); +const embedding = response.data[0].embedding; // number[] +console.log(`Dimensions: ${embedding.length}`); + +// Batch input +const batchResponse = await embeddingClient.generateEmbeddings([ + 'The quick brown fox', + 'The capital of France is Paris' +]); +// batchResponse.data[0].embedding, batchResponse.data[1].embedding +``` + ### Audio Transcription Transcribe audio files locally using the `AudioClient`: diff --git a/sdk/js/docs/README.md b/sdk/js/docs/README.md index b0167b4d..8be2e1e4 100644 --- a/sdk/js/docs/README.md +++ b/sdk/js/docs/README.md @@ -20,6 +20,7 @@ - [Catalog](classes/Catalog.md) - [ChatClient](classes/ChatClient.md) - [ChatClientSettings](classes/ChatClientSettings.md) +- [EmbeddingClient](classes/EmbeddingClient.md) - [FoundryLocalManager](classes/FoundryLocalManager.md) - [Model](classes/Model.md) - [ModelLoadManager](classes/ModelLoadManager.md) diff --git a/sdk/js/src/detail/model.ts b/sdk/js/src/detail/model.ts index 46245ee5..c1ee0d5f 100644 --- a/sdk/js/src/detail/model.ts +++ b/sdk/js/src/detail/model.ts @@ -1,6 +1,7 @@ import { ModelVariant } from './modelVariant.js'; import { ChatClient } from '../openai/chatClient.js'; import { AudioClient } from '../openai/audioClient.js'; +import { EmbeddingClient } from '../openai/embeddingClient.js'; import { ResponsesClient } from '../openai/responsesClient.js'; import { LiveAudioTranscriptionSession } from '../openai/liveAudioTranscriptionClient.js'; import { IModel } from '../imodel.js'; @@ -177,6 +178,14 @@ export class Model implements IModel { return this.selectedVariant.createAudioClient(); } + /** + * Creates an EmbeddingClient for generating text embeddings with the model. + * @returns An EmbeddingClient instance. + */ + public createEmbeddingClient(): EmbeddingClient { + return this.selectedVariant.createEmbeddingClient(); + } + /** * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. * @returns A LiveAudioTranscriptionSession instance. diff --git a/sdk/js/src/detail/modelVariant.ts b/sdk/js/src/detail/modelVariant.ts index d1c1e20c..43484bac 100644 --- a/sdk/js/src/detail/modelVariant.ts +++ b/sdk/js/src/detail/modelVariant.ts @@ -3,6 +3,7 @@ import { ModelLoadManager } from './modelLoadManager.js'; import { ModelInfo } from '../types.js'; import { ChatClient } from '../openai/chatClient.js'; import { AudioClient } from '../openai/audioClient.js'; +import { EmbeddingClient } from '../openai/embeddingClient.js'; import { LiveAudioTranscriptionSession } from '../openai/liveAudioTranscriptionClient.js'; import { ResponsesClient } from '../openai/responsesClient.js'; import { IModel } from '../imodel.js'; @@ -170,6 +171,14 @@ export class ModelVariant implements IModel { return new AudioClient(this._modelInfo.id, this.coreInterop); } + /** + * Creates an EmbeddingClient for generating text embeddings with the model. + * @returns An EmbeddingClient instance. + */ + public createEmbeddingClient(): EmbeddingClient { + return new EmbeddingClient(this._modelInfo.id, this.coreInterop); + } + /** * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. * @returns A LiveAudioTranscriptionSession instance. diff --git a/sdk/js/src/imodel.ts b/sdk/js/src/imodel.ts index 7a2f5a2c..8f9bd0c1 100644 --- a/sdk/js/src/imodel.ts +++ b/sdk/js/src/imodel.ts @@ -1,5 +1,6 @@ import { ChatClient } from './openai/chatClient.js'; import { AudioClient } from './openai/audioClient.js'; +import { EmbeddingClient } from './openai/embeddingClient.js'; import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; import { ResponsesClient } from './openai/responsesClient.js'; import { ModelInfo } from './types.js'; @@ -25,6 +26,7 @@ export interface IModel { createChatClient(): ChatClient; createAudioClient(): AudioClient; + createEmbeddingClient(): EmbeddingClient; /** * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. diff --git a/sdk/js/src/index.ts b/sdk/js/src/index.ts index 42b498c3..bc27293b 100644 --- a/sdk/js/src/index.ts +++ b/sdk/js/src/index.ts @@ -8,6 +8,7 @@ export { ModelVariant } from './detail/modelVariant.js'; export type { IModel } from './imodel.js'; export { ChatClient, ChatClientSettings } from './openai/chatClient.js'; export { AudioClient, AudioClientSettings } from './openai/audioClient.js'; +export { EmbeddingClient } from './openai/embeddingClient.js'; export { LiveAudioTranscriptionSession, LiveAudioTranscriptionOptions } from './openai/liveAudioTranscriptionClient.js'; export type { LiveAudioTranscriptionResponse, TranscriptionContentPart } from './openai/liveAudioTranscriptionTypes.js'; export { ResponsesClient, ResponsesClientSettings, getOutputText } from './openai/responsesClient.js'; diff --git a/sdk/js/src/openai/embeddingClient.ts b/sdk/js/src/openai/embeddingClient.ts new file mode 100644 index 00000000..ab415e0f --- /dev/null +++ b/sdk/js/src/openai/embeddingClient.ts @@ -0,0 +1,86 @@ +import { CoreInterop } from '../detail/coreInterop.js'; + +/** + * Client for generating text embeddings with a loaded model. + * Follows the OpenAI Embeddings API structure. + */ +export class EmbeddingClient { + private modelId: string; + private coreInterop: CoreInterop; + + /** + * @internal + * Restricted to internal use because CoreInterop is an internal implementation detail. + * Users should create clients via the Model.createEmbeddingClient() factory method. + */ + constructor(modelId: string, coreInterop: CoreInterop) { + this.modelId = modelId; + this.coreInterop = coreInterop; + } + + /** + * Validates that the input text is a non-empty string. + * @internal + */ + private validateInput(input: string): void { + if (typeof input !== 'string' || input.trim() === '') { + throw new Error('Input must be a non-empty string.'); + } + } + + /** + * Validates that the inputs array is non-empty and all elements are non-empty strings. + * @internal + */ + private validateInputs(inputs: string[]): void { + if (!inputs || !Array.isArray(inputs) || inputs.length === 0) { + throw new Error('Inputs must be a non-empty array of strings.'); + } + for (const input of inputs) { + this.validateInput(input); + } + } + + /** + * Sends an embedding request and parses the response. + * @internal + */ + private executeRequest(input: string | string[]): any { + const request = { + model: this.modelId, + input, + }; + + try { + const response = this.coreInterop.executeCommand('embeddings', { + Params: { OpenAICreateRequest: JSON.stringify(request) } + }); + return JSON.parse(response); + } catch (error: any) { + throw new Error( + `Embedding generation failed for model '${this.modelId}': ${error instanceof Error ? error.message : String(error)}`, + { cause: error } + ); + } + } + + /** + * Generates embeddings for the given input text. + * @param input - The text to generate embeddings for. + * @returns The embedding response containing the embedding vector. + */ + public async generateEmbedding(input: string): Promise { + this.validateInput(input); + return this.executeRequest(input); + } + + /** + * Generates embeddings for multiple input texts in a single request. + * @param inputs - The texts to generate embeddings for. + * @returns The embedding response containing one embedding vector per input. + */ + public async generateEmbeddings(inputs: string[]): Promise { + this.validateInputs(inputs); + return this.executeRequest(inputs); + } +} diff --git a/sdk/js/test/openai/embeddingClient.test.ts b/sdk/js/test/openai/embeddingClient.test.ts new file mode 100644 index 00000000..96824964 --- /dev/null +++ b/sdk/js/test/openai/embeddingClient.test.ts @@ -0,0 +1,295 @@ +import { describe, it } from 'mocha'; +import { expect } from 'chai'; +import { getTestManager, EMBEDDING_MODEL_ALIAS } from '../testUtils.js'; + +describe('Embedding Client Tests', () => { + + it('should generate embedding', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + expect(cachedModels.length).to.be.greaterThan(0); + + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + expect(cachedVariant, 'qwen3-0.6b-embedding-generic-cpu should be cached').to.not.be.undefined; + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + expect(model).to.not.be.undefined; + if (!cachedVariant) return; + + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + expect(embeddingClient).to.not.be.undefined; + + const response = await embeddingClient.generateEmbedding( + 'The quick brown fox jumps over the lazy dog' + ); + + expect(response).to.not.be.undefined; + expect(response.data).to.be.an('array').with.length.greaterThan(0); + expect(response.data[0].embedding).to.be.an('array'); + expect(response.data[0].embedding.length).to.equal(1024); + expect(response.data[0].index).to.equal(0); + + console.log(`Embedding dimension: ${response.data[0].embedding.length}`); + } finally { + await model.unload(); + } + }); + + it('should generate normalized embedding', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + const response = await embeddingClient.generateEmbedding( + 'Machine learning is a subset of artificial intelligence' + ); + + const embedding = response.data[0].embedding; + expect(embedding.length).to.equal(1024); + + // Verify L2 norm is approximately 1.0 + let norm = 0; + for (const val of embedding) { + norm += val * val; + } + norm = Math.sqrt(norm); + expect(norm).to.be.greaterThan(0.99); + expect(norm).to.be.lessThan(1.01); + } finally { + await model.unload(); + } + }); + + it('should produce different embeddings for different inputs', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + + const response1 = await embeddingClient.generateEmbedding('The quick brown fox'); + const response2 = await embeddingClient.generateEmbedding('The capital of France is Paris'); + + expect(response1.data[0].embedding.length).to.equal(response2.data[0].embedding.length); + + // Cosine similarity should not be 1.0 + let dot = 0, norm1 = 0, norm2 = 0; + for (let i = 0; i < response1.data[0].embedding.length; i++) { + const v1 = response1.data[0].embedding[i]; + const v2 = response2.data[0].embedding[i]; + dot += v1 * v2; + norm1 += v1 * v1; + norm2 += v2 * v2; + } + const cosineSimilarity = dot / (Math.sqrt(norm1) * Math.sqrt(norm2)); + expect(cosineSimilarity).to.be.lessThan(0.99); + } finally { + await model.unload(); + } + }); + + it('should produce same embedding for same input', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + + const response1 = await embeddingClient.generateEmbedding('Deterministic embedding test'); + const response2 = await embeddingClient.generateEmbedding('Deterministic embedding test'); + + for (let i = 0; i < response1.data[0].embedding.length; i++) { + expect(response1.data[0].embedding[i]).to.equal(response2.data[0].embedding[i]); + } + } finally { + await model.unload(); + } + }); + + it('should throw for empty input', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + try { + await embeddingClient.generateEmbedding(''); + expect.fail('Expected an error for empty input'); + } catch (e: any) { + expect(e.message).to.include('non-empty'); + } + } finally { + await model.unload(); + } + }); + + it('should throw for empty batch', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + try { + await embeddingClient.generateEmbeddings([]); + expect.fail('Expected an error for empty batch'); + } catch (e: any) { + expect(e.message).to.include('non-empty'); + } + } finally { + await model.unload(); + } + }); + + it('should generate batch embeddings', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + + const response = await embeddingClient.generateEmbeddings([ + 'The quick brown fox jumps over the lazy dog', + 'Machine learning is a subset of artificial intelligence', + 'The capital of France is Paris' + ]); + + expect(response).to.not.be.undefined; + expect(response.data).to.be.an('array').with.length(3); + + for (let i = 0; i < 3; i++) { + expect(response.data[i].index).to.equal(i); + expect(response.data[i].embedding.length).to.equal(1024); + } + } finally { + await model.unload(); + } + }); + + it('should produce normalized batch embeddings', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + + const response = await embeddingClient.generateEmbeddings([ + 'Hello world', + 'Goodbye world' + ]); + + expect(response.data.length).to.equal(2); + + for (const data of response.data) { + let norm = 0; + for (const val of data.embedding) { + norm += val * val; + } + norm = Math.sqrt(norm); + expect(norm).to.be.greaterThan(0.99); + expect(norm).to.be.lessThan(1.01); + } + } finally { + await model.unload(); + } + }); + + it('should match single and batch results', async function() { + this.timeout(30000); + const manager = getTestManager(); + const catalog = manager.catalog; + + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === EMBEDDING_MODEL_ALIAS); + if (!cachedVariant) { this.skip(); return; } + + const model = await catalog.getModel(EMBEDDING_MODEL_ALIAS); + model.selectVariant(cachedVariant); + await model.load(); + + try { + const embeddingClient = model.createEmbeddingClient(); + + const singleResponse = await embeddingClient.generateEmbedding('The capital of France is Paris'); + const batchResponse = await embeddingClient.generateEmbeddings(['The capital of France is Paris']); + + expect(batchResponse.data.length).to.equal(1); + + for (let i = 0; i < singleResponse.data[0].embedding.length; i++) { + expect(batchResponse.data[0].embedding[i]).to.equal(singleResponse.data[0].embedding[i]); + } + } finally { + await model.unload(); + } + }); +}); diff --git a/sdk/js/test/testUtils.ts b/sdk/js/test/testUtils.ts index 39fbed71..7cac6b29 100644 --- a/sdk/js/test/testUtils.ts +++ b/sdk/js/test/testUtils.ts @@ -44,6 +44,7 @@ export const TEST_CONFIG: FoundryLocalConfig = { }; export const TEST_MODEL_ALIAS = 'qwen2.5-0.5b'; +export const EMBEDDING_MODEL_ALIAS = 'qwen3-0.6b-embedding-generic-cpu'; export function getTestManager() { return FoundryLocalManager.create(TEST_CONFIG); diff --git a/sdk/python/README.md b/sdk/python/README.md index dbdef1f8..2a121411 100644 --- a/sdk/python/README.md +++ b/sdk/python/README.md @@ -8,6 +8,7 @@ The Foundry Local Python SDK provides a Python interface for interacting with lo - **Model Management** – download, cache, load, and unload models - **Chat Completions** – OpenAI-compatible chat API (non-streaming and streaming) - **Tool Calling** – function-calling support with chat completions +- **Embeddings** – generate text embeddings via OpenAI-compatible API - **Audio Transcription** – Whisper-based speech-to-text (non-streaming and streaming) - **Built-in Web Service** – optional HTTP endpoint for multi-process scenarios - **Native Performance** – ctypes FFI to AOT-compiled Foundry Local Core @@ -240,6 +241,28 @@ for chunk in client.complete_streaming_chat(messages): model.unload() ``` +### Embeddings + +Generate text embeddings using the `EmbeddingClient`: + +```python +embedding_client = model.get_embedding_client() + +# Single input +response = embedding_client.generate_embedding( + "The quick brown fox jumps over the lazy dog" +) +embedding = response.data[0].embedding # List[float] +print(f"Dimensions: {len(embedding)}") + +# Batch input +batch_response = embedding_client.generate_embeddings([ + "The quick brown fox", + "The capital of France is Paris" +]) +# batch_response.data[0].embedding, batch_response.data[1].embedding +``` + ### Web Service (Optional) Start a built-in HTTP server for multi-process access. @@ -271,6 +294,7 @@ manager.stop_web_service() | Class | Description | |---|---| | `ChatClient` | Chat completions (non-streaming and streaming) with tool calling | +| `EmbeddingClient` | Text embedding generation via OpenAI-compatible API | | `AudioClient` | Audio transcription (non-streaming and streaming) | ### Internal / Detail diff --git a/sdk/python/src/detail/model.py b/sdk/python/src/detail/model.py index 189920b1..6d60b7a2 100644 --- a/sdk/python/src/detail/model.py +++ b/sdk/python/src/detail/model.py @@ -10,6 +10,7 @@ from ..imodel import IModel from ..openai.chat_client import ChatClient from ..openai.audio_client import AudioClient +from ..openai.embedding_client import EmbeddingClient from .model_variant import ModelVariant from ..exception import FoundryLocalException from .core_interop import CoreInterop @@ -141,3 +142,7 @@ def get_chat_client(self) -> ChatClient: def get_audio_client(self) -> AudioClient: """Get an audio client for the currently selected variant.""" return self._selected_variant.get_audio_client() + + def get_embedding_client(self) -> EmbeddingClient: + """Get an embedding client for the currently selected variant.""" + return self._selected_variant.get_embedding_client() diff --git a/sdk/python/src/detail/model_variant.py b/sdk/python/src/detail/model_variant.py index a5ac02d4..76efb05c 100644 --- a/sdk/python/src/detail/model_variant.py +++ b/sdk/python/src/detail/model_variant.py @@ -16,6 +16,7 @@ from .model_load_manager import ModelLoadManager from ..openai.audio_client import AudioClient from ..openai.chat_client import ChatClient +from ..openai.embedding_client import EmbeddingClient logger = logging.getLogger(__name__) @@ -169,4 +170,8 @@ def get_chat_client(self) -> ChatClient: def get_audio_client(self) -> AudioClient: """Create an OpenAI-compatible ``AudioClient`` for this variant.""" - return AudioClient(self.id, self._core_interop) \ No newline at end of file + return AudioClient(self.id, self._core_interop) + + def get_embedding_client(self) -> EmbeddingClient: + """Create an OpenAI-compatible ``EmbeddingClient`` for this variant.""" + return EmbeddingClient(self.id, self._core_interop) diff --git a/sdk/python/src/imodel.py b/sdk/python/src/imodel.py index 8237aeb4..f723e514 100644 --- a/sdk/python/src/imodel.py +++ b/sdk/python/src/imodel.py @@ -9,6 +9,7 @@ from .openai.chat_client import ChatClient from .openai.audio_client import AudioClient +from .openai.embedding_client import EmbeddingClient from .detail.model_data_types import ModelInfo class IModel(ABC): @@ -127,6 +128,14 @@ def get_audio_client(self) -> AudioClient: """ pass + @abstractmethod + def get_embedding_client(self) -> 'EmbeddingClient': + """ + Get an OpenAI API based EmbeddingClient. + :return: EmbeddingClient instance. + """ + pass + @property @abstractmethod def variants(self) -> List['IModel']: diff --git a/sdk/python/src/openai/__init__.py b/sdk/python/src/openai/__init__.py index e445ba1d..bec5d68b 100644 --- a/sdk/python/src/openai/__init__.py +++ b/sdk/python/src/openai/__init__.py @@ -6,5 +6,6 @@ from .chat_client import ChatClient, ChatClientSettings from .audio_client import AudioClient +from .embedding_client import EmbeddingClient -__all__ = ["AudioClient", "ChatClient", "ChatClientSettings"] +__all__ = ["AudioClient", "ChatClient", "ChatClientSettings", "EmbeddingClient"] diff --git a/sdk/python/src/openai/embedding_client.py b/sdk/python/src/openai/embedding_client.py new file mode 100644 index 00000000..89a3b8e5 --- /dev/null +++ b/sdk/python/src/openai/embedding_client.py @@ -0,0 +1,107 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- + +from __future__ import annotations + +import json +import logging +from typing import List, Union + +from ..detail.core_interop import CoreInterop, InteropRequest +from ..exception import FoundryLocalException + +from openai.types import CreateEmbeddingResponse +from openai.types.embedding_create_params import EmbeddingCreateParams + +logger = logging.getLogger(__name__) + + +class EmbeddingClient: + """OpenAI-compatible embedding client backed by Foundry Local Core. + + Attributes: + model_id: The ID of the loaded embedding model variant. + """ + + def __init__(self, model_id: str, core_interop: CoreInterop): + self.model_id = model_id + self._core_interop = core_interop + + @staticmethod + def _validate_input(input_text: str) -> None: + """Validate that the input is a non-empty string.""" + if not isinstance(input_text, str) or input_text.strip() == "": + raise ValueError("Input must be a non-empty string.") + + def _create_request_json(self, input_value: Union[str, List[str]]) -> str: + """Build the JSON payload for the ``embeddings`` native command.""" + request: dict = { + "model": self.model_id, + "input": input_value, + } + + embedding_request = EmbeddingCreateParams(request) + + return json.dumps(embedding_request) + + def _execute_embedding_request(self, input_value: Union[str, List[str]]) -> CreateEmbeddingResponse: + """Send an embedding request and parse the response.""" + request_json = self._create_request_json(input_value) + request = InteropRequest(params={"OpenAICreateRequest": request_json}) + + response = self._core_interop.execute_command("embeddings", request) + if response.error is not None: + raise FoundryLocalException( + f"Embedding generation failed for model '{self.model_id}': {response.error}" + ) + + data = json.loads(response.data) + + # Add fields required by the OpenAI SDK type that the server doesn't return + for item in data.get("data", []): + if "object" not in item: + item["object"] = "embedding" + + if "usage" not in data: + data["usage"] = {"prompt_tokens": 0, "total_tokens": 0} + + return CreateEmbeddingResponse.model_validate(data) + + def generate_embedding(self, input_text: str) -> CreateEmbeddingResponse: + """Generate embeddings for a single input text. + + Args: + input_text: The text to generate embeddings for. + + Returns: + A ``CreateEmbeddingResponse`` containing the embedding vector. + + Raises: + ValueError: If *input_text* is not a non-empty string. + FoundryLocalException: If the underlying native embeddings command fails. + """ + self._validate_input(input_text) + return self._execute_embedding_request(input_text) + + def generate_embeddings(self, inputs: List[str]) -> CreateEmbeddingResponse: + """Generate embeddings for multiple input texts in a single request. + + Args: + inputs: The texts to generate embeddings for. + + Returns: + A ``CreateEmbeddingResponse`` containing one embedding vector per input. + + Raises: + ValueError: If *inputs* is empty or contains empty strings. + FoundryLocalException: If the underlying native embeddings command fails. + """ + if not inputs or len(inputs) == 0: + raise ValueError("Inputs must be a non-empty list of strings.") + + for text in inputs: + self._validate_input(text) + + return self._execute_embedding_request(inputs) diff --git a/sdk/python/test/conftest.py b/sdk/python/test/conftest.py index 1cb85704..dc76a237 100644 --- a/sdk/python/test/conftest.py +++ b/sdk/python/test/conftest.py @@ -26,6 +26,7 @@ TEST_MODEL_ALIAS = "qwen2.5-0.5b" AUDIO_MODEL_ALIAS = "whisper-tiny" +EMBEDDING_MODEL_ALIAS = "qwen3-0.6b-embedding-generic-cpu" def get_git_repo_root() -> Path: """Walk upward from __file__ until we find a .git directory.""" diff --git a/sdk/python/test/openai/test_embedding_client.py b/sdk/python/test/openai/test_embedding_client.py new file mode 100644 index 00000000..69e9648d --- /dev/null +++ b/sdk/python/test/openai/test_embedding_client.py @@ -0,0 +1,202 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Tests for EmbeddingClient – mirrors EmbeddingClientTests.cs.""" + +from __future__ import annotations + +import math + +import pytest + +from ..conftest import EMBEDDING_MODEL_ALIAS + + +def _get_loaded_embedding_model(catalog): + """Helper: ensure the embedding model is selected, loaded, and return Model.""" + cached = catalog.get_cached_models() + assert len(cached) > 0 + + cached_variant = next((m for m in cached if m.alias == EMBEDDING_MODEL_ALIAS), None) + assert cached_variant is not None, f"{EMBEDDING_MODEL_ALIAS} should be cached" + + model = catalog.get_model(EMBEDDING_MODEL_ALIAS) + assert model is not None + + model.select_variant(cached_variant) + model.load() + return model + + +class TestEmbeddingClient: + """Embedding Client Tests.""" + + def test_should_generate_embedding(self, catalog): + """Basic embedding generation.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + assert embedding_client is not None + + response = embedding_client.generate_embedding( + "The quick brown fox jumps over the lazy dog" + ) + + assert response is not None + assert response.model is not None + assert len(response.data) == 1 + assert response.data[0].index == 0 + assert len(response.data[0].embedding) == 1024 + + print(f"Embedding dimension: {len(response.data[0].embedding)}") + print(f"First value: {response.data[0].embedding[0]}") + print(f"Last value: {response.data[0].embedding[-1]}") + finally: + model.unload() + + def test_should_generate_normalized_embedding(self, catalog): + """Verify L2 norm is approximately 1.0.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + inputs = [ + "The quick brown fox jumps over the lazy dog", + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + ] + + for input_text in inputs: + response = embedding_client.generate_embedding(input_text) + embedding = response.data[0].embedding + + assert len(embedding) == 1024 + + norm = math.sqrt(sum(v * v for v in embedding)) + assert 0.99 <= norm <= 1.01, f"L2 norm {norm} not approximately 1.0" + + for val in embedding: + assert -1.0 <= val <= 1.0 + finally: + model.unload() + + def test_should_produce_different_embeddings_for_different_inputs(self, catalog): + """Different inputs should produce different embeddings.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + response1 = embedding_client.generate_embedding("The quick brown fox") + response2 = embedding_client.generate_embedding("The capital of France is Paris") + + emb1 = response1.data[0].embedding + emb2 = response2.data[0].embedding + + assert len(emb1) == len(emb2) + + # Cosine similarity should not be 1.0 + dot = sum(a * b for a, b in zip(emb1, emb2)) + norm1 = math.sqrt(sum(a * a for a in emb1)) + norm2 = math.sqrt(sum(b * b for b in emb2)) + cosine_similarity = dot / (norm1 * norm2) + assert cosine_similarity < 0.99 + finally: + model.unload() + + def test_should_produce_same_embedding_for_same_input(self, catalog): + """Same input should produce identical embeddings.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + response1 = embedding_client.generate_embedding("Deterministic embedding test") + response2 = embedding_client.generate_embedding("Deterministic embedding test") + + emb1 = response1.data[0].embedding + emb2 = response2.data[0].embedding + + for i in range(len(emb1)): + assert emb1[i] == emb2[i] + finally: + model.unload() + + def test_should_raise_for_empty_input(self, catalog): + """Empty input should raise ValueError.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + with pytest.raises(ValueError): + embedding_client.generate_embedding("") + finally: + model.unload() + + def test_batch_should_return_multiple_embeddings(self, catalog): + """Batch request should return one embedding per input.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + response = embedding_client.generate_embeddings([ + "The quick brown fox jumps over the lazy dog", + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + ]) + + assert response is not None + assert len(response.data) == 3 + + for i, data in enumerate(response.data): + assert data.index == i + assert len(data.embedding) == 1024 + finally: + model.unload() + + def test_batch_each_embedding_is_normalized(self, catalog): + """Each embedding in a batch should be L2-normalized.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + response = embedding_client.generate_embeddings([ + "Hello world", + "Goodbye world", + ]) + + assert len(response.data) == 2 + + for data in response.data: + norm = math.sqrt(sum(v * v for v in data.embedding)) + assert 0.99 <= norm <= 1.01, f"L2 norm {norm} not approximately 1.0" + finally: + model.unload() + + def test_batch_matches_single_input_results(self, catalog): + """Batch result should match single-input result for the same text.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + input_text = "The capital of France is Paris" + + single_response = embedding_client.generate_embedding(input_text) + batch_response = embedding_client.generate_embeddings([input_text]) + + assert len(batch_response.data) == 1 + + for i in range(len(single_response.data[0].embedding)): + assert batch_response.data[0].embedding[i] == single_response.data[0].embedding[i] + finally: + model.unload() + + def test_batch_should_raise_for_empty_list(self, catalog): + """Empty list should raise ValueError.""" + model = _get_loaded_embedding_model(catalog) + try: + embedding_client = model.get_embedding_client() + + with pytest.raises(ValueError): + embedding_client.generate_embeddings([]) + finally: + model.unload() diff --git a/sdk/rust/Cargo.toml b/sdk/rust/Cargo.toml index 92675da2..7ec7823a 100644 --- a/sdk/rust/Cargo.toml +++ b/sdk/rust/Cargo.toml @@ -25,7 +25,7 @@ tokio-stream = "0.1" futures-core = "0.3" reqwest = { version = "0.12", features = ["json"] } urlencoding = "2" -async-openai = { version = "0.33", default-features = false, features = ["chat-completion-types"] } +async-openai = { version = "0.33", default-features = false, features = ["chat-completion-types", "embedding-types"] } [build-dependencies] ureq = "3" diff --git a/sdk/rust/README.md b/sdk/rust/README.md index 08f9c279..ce97a7dd 100644 --- a/sdk/rust/README.md +++ b/sdk/rust/README.md @@ -8,6 +8,7 @@ The Foundry Local Rust SDK provides an async Rust interface for running AI model - **Model catalog** — Browse and discover available models; check what's cached or loaded - **Automatic model management** — Download, load, unload, and remove models from cache - **Chat completions** — OpenAI-compatible chat API with both non-streaming and streaming responses +- **Embeddings** — Generate text embeddings via OpenAI-compatible API - **Audio transcription** — Transcribe audio files locally with streaming support - **Tool calling** — Function/tool calling with streaming, multi-turn conversation support - **Response format control** — Text, JSON, JSON Schema, and Lark grammar constrained output @@ -353,6 +354,27 @@ let client = model.create_chat_client() .response_format(ChatResponseFormat::LarkGrammar(grammar.to_string())); ``` +### Embeddings + +Generate text embeddings using the `EmbeddingClient`: + +```rust +let embedding_client = model.create_embedding_client(); + +// Single input +let response = embedding_client + .generate_embedding("The quick brown fox jumps over the lazy dog") + .await?; +let embedding = &response.data[0].embedding; // Vec +println!("Dimensions: {}", embedding.len()); + +// Batch input +let batch_response = embedding_client + .generate_embeddings(&["The quick brown fox", "The capital of France is Paris"]) + .await?; +// batch_response.data[0].embedding, batch_response.data[1].embedding +``` + ### Audio Transcription Transcribe audio files locally using the `AudioClient`: diff --git a/sdk/rust/docs/api.md b/sdk/rust/docs/api.md index abfec76f..8dcb0c29 100644 --- a/sdk/rust/docs/api.md +++ b/sdk/rust/docs/api.md @@ -15,6 +15,8 @@ - [OpenAI Clients](#openai-clients) - [ChatClient](#chatclient) - [ChatCompletionStream](#chatcompletionstream) + - [EmbeddingClient](#embeddingclient) + - [EmbeddingResponse](#embeddingresponse) - [AudioClient](#audioclient) - [AudioTranscriptionStream](#audiotranscriptionstream) - [AudioTranscriptionResponse](#audiotranscriptionresponse) @@ -214,6 +216,34 @@ A stream of `CreateChatCompletionStreamResponse` chunks. Use with `StreamExt::ne --- +### EmbeddingClient + +OpenAI-compatible embedding generation backed by a local model. + +| Method | Description | +|---|---| +| `new(model_id, core)` | *(internal)* Create a new client | +| `generate_embedding(input: &str) -> Result` | Generate embedding for a single input | +| `generate_embeddings(inputs: &[&str]) -> Result` | Generate embeddings for multiple inputs | + +Returns `async_openai::types::embeddings::CreateEmbeddingResponse`: + +| Field | Type | Description | +|---|---|---| +| `model` | `String` | Model used for generation | +| `object` | `String` | Object type (always `"list"`) | +| `data` | `Vec` | List of embedding results | +| `usage` | `Usage` | Token usage information | + +Each `Embedding` in `data`: + +| Field | Type | Description | +|---|---|---| +| `index` | `u32` | Index of this embedding in the batch | +| `embedding` | `Vec` | The embedding vector (float32) | + +--- + ### AudioClient OpenAI-compatible audio transcription backed by a local model. diff --git a/sdk/rust/src/detail/model.rs b/sdk/rust/src/detail/model.rs index 3a87a1c3..08288aee 100644 --- a/sdk/rust/src/detail/model.rs +++ b/sdk/rust/src/detail/model.rs @@ -14,6 +14,7 @@ use super::model_variant::ModelVariant; use crate::error::{FoundryLocalError, Result}; use crate::openai::AudioClient; use crate::openai::ChatClient; +use crate::openai::EmbeddingClient; use crate::types::ModelInfo; /// The public model type. @@ -242,6 +243,11 @@ impl Model { self.selected_variant().create_audio_client() } + /// Create an [`EmbeddingClient`] bound to the (selected) variant. + pub fn create_embedding_client(&self) -> EmbeddingClient { + self.selected_variant().create_embedding_client() + } + /// Available variants of this model. /// /// For a single-variant model (e.g. from diff --git a/sdk/rust/src/detail/model_variant.rs b/sdk/rust/src/detail/model_variant.rs index ca1a83c7..1f8ce7d5 100644 --- a/sdk/rust/src/detail/model_variant.rs +++ b/sdk/rust/src/detail/model_variant.rs @@ -15,6 +15,7 @@ use crate::catalog::CacheInvalidator; use crate::error::Result; use crate::openai::AudioClient; use crate::openai::ChatClient; +use crate::openai::EmbeddingClient; use crate::types::ModelInfo; /// Represents one specific variant of a model (a particular id within an alias @@ -148,4 +149,8 @@ impl ModelVariant { pub(crate) fn create_audio_client(&self) -> AudioClient { AudioClient::new(&self.info.id, Arc::clone(&self.core)) } + + pub(crate) fn create_embedding_client(&self) -> EmbeddingClient { + EmbeddingClient::new(&self.info.id, Arc::clone(&self.core)) + } } diff --git a/sdk/rust/src/openai/embedding_client.rs b/sdk/rust/src/openai/embedding_client.rs new file mode 100644 index 00000000..5de080a0 --- /dev/null +++ b/sdk/rust/src/openai/embedding_client.rs @@ -0,0 +1,100 @@ +//! OpenAI-compatible embedding client. + +use std::sync::Arc; + +use async_openai::types::embeddings::CreateEmbeddingResponse; +use serde_json::{json, Value}; + +use crate::detail::core_interop::CoreInterop; +use crate::error::{FoundryLocalError, Result}; + +/// Client for OpenAI-compatible embedding generation backed by a local model. +pub struct EmbeddingClient { + model_id: String, + core: Arc, +} + +impl EmbeddingClient { + pub(crate) fn new(model_id: &str, core: Arc) -> Self { + Self { + model_id: model_id.to_owned(), + core, + } + } + + /// Generate embeddings for a single input text. + pub async fn generate_embedding(&self, input: &str) -> Result { + Self::validate_input(input)?; + let request = self.build_request(json!(input)); + self.execute_request(request).await + } + + /// Generate embeddings for multiple input texts in a single request. + pub async fn generate_embeddings(&self, inputs: &[&str]) -> Result { + if inputs.is_empty() { + return Err(FoundryLocalError::Validation { + reason: "inputs must be a non-empty array".into(), + }); + } + for input in inputs { + Self::validate_input(input)?; + } + let request = self.build_request(json!(inputs)); + self.execute_request(request).await + } + + async fn execute_request(&self, request: Value) -> Result { + let params = json!({ + "Params": { + "OpenAICreateRequest": serde_json::to_string(&request)? + } + }); + + let raw = self + .core + .execute_command_async("embeddings".into(), Some(params)) + .await?; + + // Patch the response to add fields required by async_openai types + // that the server doesn't return (object on each item, usage) + let mut response_value: Value = serde_json::from_str(&raw)?; + if let Some(data) = response_value + .get_mut("data") + .and_then(|d| d.as_array_mut()) + { + for item in data { + if item.get("object").is_none() { + item.as_object_mut() + .map(|m| m.insert("object".into(), json!("embedding"))); + } + } + } + if response_value.get("usage").is_none() { + response_value.as_object_mut().map(|m| { + m.insert( + "usage".into(), + json!({"prompt_tokens": 0, "total_tokens": 0}), + ) + }); + } + + let parsed: CreateEmbeddingResponse = serde_json::from_value(response_value)?; + Ok(parsed) + } + + fn build_request(&self, input: Value) -> Value { + json!({ + "model": self.model_id, + "input": input, + }) + } + + fn validate_input(input: &str) -> Result<()> { + if input.trim().is_empty() { + return Err(FoundryLocalError::Validation { + reason: "input must be a non-empty string".into(), + }); + } + Ok(()) + } +} diff --git a/sdk/rust/src/openai/mod.rs b/sdk/rust/src/openai/mod.rs index c3d4a645..5c17a0df 100644 --- a/sdk/rust/src/openai/mod.rs +++ b/sdk/rust/src/openai/mod.rs @@ -1,5 +1,6 @@ mod audio_client; mod chat_client; +mod embedding_client; mod json_stream; pub use self::audio_client::{ @@ -7,4 +8,5 @@ pub use self::audio_client::{ TranscriptionSegment, TranscriptionWord, }; pub use self::chat_client::{ChatClient, ChatClientSettings, ChatCompletionStream}; +pub use self::embedding_client::EmbeddingClient; pub use self::json_stream::JsonStream; diff --git a/sdk/rust/tests/integration/common/mod.rs b/sdk/rust/tests/integration/common/mod.rs index d657310c..4e65e4ea 100644 --- a/sdk/rust/tests/integration/common/mod.rs +++ b/sdk/rust/tests/integration/common/mod.rs @@ -14,6 +14,9 @@ pub const TEST_MODEL_ALIAS: &str = "qwen2.5-0.5b"; /// Default model alias used for audio-transcription integration tests. pub const WHISPER_MODEL_ALIAS: &str = "whisper-tiny"; +/// Default model alias used for embedding integration tests. +pub const EMBEDDING_MODEL_ALIAS: &str = "qwen3-0.6b-embedding-generic-cpu"; + /// Expected transcription text fragment for the shared audio test file. pub const EXPECTED_TRANSCRIPTION_TEXT: &str = " And lots of times you need to give people more than one link at a time"; diff --git a/sdk/rust/tests/integration/embedding_client_test.rs b/sdk/rust/tests/integration/embedding_client_test.rs new file mode 100644 index 00000000..0f577329 --- /dev/null +++ b/sdk/rust/tests/integration/embedding_client_test.rs @@ -0,0 +1,223 @@ +//! Integration tests for EmbeddingClient. + +use std::sync::Arc; + +use foundry_local_sdk::openai::EmbeddingClient; +use foundry_local_sdk::Model; + +use crate::common; + +async fn setup_embedding_client() -> (EmbeddingClient, Arc) { + let manager = common::get_test_manager(); + let catalog = manager.catalog(); + + let model = catalog + .get_model(common::EMBEDDING_MODEL_ALIAS) + .await + .expect("embedding model should exist in catalog"); + + model.load().await.expect("model should load successfully"); + + let client = model.create_embedding_client(); + (client, model) +} + +#[tokio::test] +async fn should_generate_embedding() { + let (client, model) = setup_embedding_client().await; + + let response = client + .generate_embedding("The quick brown fox jumps over the lazy dog") + .await + .expect("embedding should succeed"); + + assert_eq!(response.data.len(), 1); + assert_eq!(response.data[0].index, 0); + assert_eq!(response.data[0].embedding.len(), 1024); + + println!("Embedding dimension: {}", response.data[0].embedding.len()); + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_generate_normalized_embedding() { + let (client, model) = setup_embedding_client().await; + + let inputs = [ + "The quick brown fox jumps over the lazy dog", + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + ]; + + for input in &inputs { + let response = client + .generate_embedding(input) + .await + .expect("embedding should succeed"); + + let embedding = &response.data[0].embedding; + assert_eq!(embedding.len(), 1024); + + // Verify L2 norm is approximately 1.0 + let norm: f32 = embedding.iter().map(|v| v * v).sum::().sqrt(); + assert!( + (0.99_f32..=1.01_f32).contains(&norm), + "L2 norm {norm} not approximately 1.0" + ); + + for val in embedding { + assert!( + (-1.0_f32..=1.0_f32).contains(val), + "value {val} outside [-1, 1]" + ); + } + } + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_produce_different_embeddings_for_different_inputs() { + let (client, model) = setup_embedding_client().await; + + let response1 = client + .generate_embedding("The quick brown fox") + .await + .expect("embedding should succeed"); + + let response2 = client + .generate_embedding("The capital of France is Paris") + .await + .expect("embedding should succeed"); + + let emb1 = &response1.data[0].embedding; + let emb2 = &response2.data[0].embedding; + + assert_eq!(emb1.len(), emb2.len()); + + // Cosine similarity should not be 1.0 + let dot: f32 = emb1.iter().zip(emb2.iter()).map(|(a, b)| a * b).sum(); + let norm1: f32 = emb1.iter().map(|v| v * v).sum::().sqrt(); + let norm2: f32 = emb2.iter().map(|v| v * v).sum::().sqrt(); + let cosine_similarity = dot / (norm1 * norm2); + assert!( + cosine_similarity < 0.99_f32, + "cosine similarity {cosine_similarity} should be < 0.99" + ); + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_produce_same_embedding_for_same_input() { + let (client, model) = setup_embedding_client().await; + + let response1 = client + .generate_embedding("Deterministic embedding test") + .await + .expect("embedding should succeed"); + + let response2 = client + .generate_embedding("Deterministic embedding test") + .await + .expect("embedding should succeed"); + + let emb1 = &response1.data[0].embedding; + let emb2 = &response2.data[0].embedding; + + for (i, (a, b)) in emb1.iter().zip(emb2.iter()).enumerate() { + assert_eq!(a, b, "mismatch at index {i}"); + } + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_throw_for_empty_input() { + let (client, model) = setup_embedding_client().await; + + let result = client.generate_embedding("").await; + assert!(result.is_err(), "empty input should return an error"); + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_throw_for_empty_batch() { + let (client, model) = setup_embedding_client().await; + + let result = client.generate_embeddings(&[]).await; + assert!(result.is_err(), "empty batch should return an error"); + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_generate_batch_embeddings() { + let (client, model) = setup_embedding_client().await; + + let response = client + .generate_embeddings(&[ + "The quick brown fox jumps over the lazy dog", + "Machine learning is a subset of artificial intelligence", + "The capital of France is Paris", + ]) + .await + .expect("batch embedding should succeed"); + + assert_eq!(response.data.len(), 3); + for (i, data) in response.data.iter().enumerate() { + assert_eq!(data.index, i as u32); + assert_eq!(data.embedding.len(), 1024); + } + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_generate_normalized_batch_embeddings() { + let (client, model) = setup_embedding_client().await; + + let response = client + .generate_embeddings(&["Hello world", "Goodbye world"]) + .await + .expect("batch embedding should succeed"); + + assert_eq!(response.data.len(), 2); + for data in &response.data { + let norm: f32 = data.embedding.iter().map(|v| v * v).sum::().sqrt(); + assert!( + (0.99_f32..=1.01_f32).contains(&norm), + "L2 norm {norm} not approximately 1.0" + ); + } + + model.unload().await.expect("unload should succeed"); +} + +#[tokio::test] +async fn should_match_single_and_batch_results() { + let (client, model) = setup_embedding_client().await; + + let single = client + .generate_embedding("The capital of France is Paris") + .await + .expect("single embedding should succeed"); + + let batch = client + .generate_embeddings(&["The capital of France is Paris"]) + .await + .expect("batch embedding should succeed"); + + assert_eq!(batch.data.len(), 1); + for (a, b) in single.data[0] + .embedding + .iter() + .zip(batch.data[0].embedding.iter()) + { + assert_eq!(a, b); + } + + model.unload().await.expect("unload should succeed"); +} diff --git a/sdk/rust/tests/integration/main.rs b/sdk/rust/tests/integration/main.rs index 04de9a23..c63956f3 100644 --- a/sdk/rust/tests/integration/main.rs +++ b/sdk/rust/tests/integration/main.rs @@ -11,6 +11,7 @@ mod common; mod audio_client_test; mod catalog_test; mod chat_client_test; +mod embedding_client_test; mod manager_test; mod model_test; mod web_service_test; From 413925d269482c959284c469e0769866490f88f4 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Wed, 22 Apr 2026 13:19:17 -0700 Subject: [PATCH 53/83] Avoid standard installation when WinML installation is in flight (#662) Cherry-pick of https://github.com/microsoft/Foundry-Local/pull/618/ to main --- sdk/js/script/install-standard.cjs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index e32160f6..87c5b1ac 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -8,6 +8,16 @@ const fs = require('fs'); const os = require('os'); const path = require('path'); + +// If foundry-local-sdk-winml is also being installed, skip the standard binary +// download entirely — the winml install script will handle all binary provisioning. +// npm extracts all packages before running lifecycle scripts, so this check is reliable. +const winmlPkgJson = path.join(__dirname, '..', '..', 'foundry-local-sdk-winml', 'package.json'); +if (fs.existsSync(winmlPkgJson)) { + console.log('[foundry-local] foundry-local-sdk-winml detected. Deferring binary install to winml variant.'); + process.exit(0); +} + const { NUGET_FEED, runInstall } = require('./install-utils.cjs'); // deps_versions.json lives at the package root when published, or at sdk/ in the repo. From 088f844f3d478edfa3add86ce8c5b62e63183fe4 Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Wed, 22 Apr 2026 13:59:59 -0700 Subject: [PATCH 54/83] bump dev version from 1.0.0 -> 1.1.0 (#663) Co-authored-by: Prathik Rao --- .pipelines/foundry-local-packaging.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index 6c3b65c4..bf05607f 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -17,7 +17,7 @@ parameters: - name: version displayName: 'Package version' type: string - default: '1.0.0' + default: '1.1.0' - name: prereleaseId displayName: 'Pre-release identifier (e.g. rc1, beta).' type: string From ab1236336928aa7763fca87dadb62d0b06c00807 Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 23 Apr 2026 21:56:43 +0000 Subject: [PATCH 55/83] Ship TypeScript declarations with foundry-local-sdk (#666) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `package.json` advertised `"types": "dist/index.d.ts"`, but the build never emitted declarations — consumers importing `foundry-local-sdk` from TypeScript got no IntelliSense or type checking. ### Changes - **`sdk/js/tsconfig.build.json`** — enable `declaration: true` and `sourceMap: true`. Scoped to the build tsconfig so tests/examples are unaffected. `declarationMap` is intentionally omitted to keep `src/` out of the tarball. - **`sdk/js/README.md`** — add a short "TypeScript support" section so the bundled typings are discoverable. No changes needed in `package.json` or `script/pack.cjs`: `dist/` is already in `files`, so the 16 emitted `.d.ts` files (`index`, `catalog`, `foundryLocalManager`, `configuration`, `imodel`, `types`, `openai/*`, `detail/*`) flow into the tarball automatically. The `foundry-local-sdk-winml` variant continues to inherit types transitively via its `foundry-local-sdk` dependency. ```jsonc // sdk/js/tsconfig.build.json "compilerOptions": { "rootDir": "./src", "outDir": "./dist", "declaration": true, "sourceMap": true } ``` `@internal`-tagged exports (`Model`, `ModelVariant`, `CoreInterop`, `Configuration`) are left in the emitted declarations to keep the public surface unchanged from what the `.js` already exposes; `stripInternal` can be layered on as a follow-up if tightening the advertised API is desired. Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: baijumeswani <12852605+baijumeswani@users.noreply.github.com> --- sdk/js/README.md | 6 ++++++ sdk/js/tsconfig.build.json | 4 +++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/sdk/js/README.md b/sdk/js/README.md index ff1ac542..26471cc8 100644 --- a/sdk/js/README.md +++ b/sdk/js/README.md @@ -21,6 +21,12 @@ The Foundry Local JS SDK provides a JavaScript/TypeScript interface for running npm install foundry-local-sdk ``` +## TypeScript support + +The package is authored in TypeScript and ships with bundled type declarations (`.d.ts` files) alongside the compiled JavaScript. No `@types/foundry-local-sdk` package or manual ambient declarations are needed. + +Importing from `foundry-local-sdk` in a TypeScript project gives you full type information and IntelliSense for every public API, including `FoundryLocalManager`, `Catalog`, `ChatClient`, `AudioClient`, `EmbeddingClient`, `ResponsesClient`, `LiveAudioTranscriptionSession`, and all of their associated option and response types. + ## WinML: Automatic Hardware Acceleration (Windows) On Windows, install the WinML package to enable automatic execution provider management. The SDK will automatically discover, download, and register hardware-specific execution providers (e.g., Qualcomm QNN for NPU acceleration) via the Windows App Runtime — no manual driver or EP setup required. diff --git a/sdk/js/tsconfig.build.json b/sdk/js/tsconfig.build.json index 4ebb99dd..7c985b52 100644 --- a/sdk/js/tsconfig.build.json +++ b/sdk/js/tsconfig.build.json @@ -2,7 +2,9 @@ "extends": "./tsconfig.json", "compilerOptions": { "rootDir": "./src", - "outDir": "./dist" + "outDir": "./dist", + "declaration": true, + "sourceMap": true }, "include": ["src/**/*"], "exclude": ["node_modules", "test", "examples"] From 857aa2242b7bb1c7411d1a8d932875464b6839ee Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Thu, 23 Apr 2026 16:20:06 -0700 Subject: [PATCH 56/83] adds support for netstandard2.0 (#629) https://github.com/microsoft/Foundry-Local/issues/628 --------- Co-authored-by: Prathik Rao Co-authored-by: Jesse Squire --- .pipelines/templates/build-cs-steps.yml | 8 +- .pipelines/templates/test-cs-steps.yml | 8 +- sdk/cs/src/Configuration.cs | 6 +- sdk/cs/src/Detail/CoreInterop.Modern.cs | 116 ++++++++ sdk/cs/src/Detail/CoreInterop.NetStandard.cs | 95 +++++++ sdk/cs/src/Detail/CoreInterop.WinML.cs | 24 ++ sdk/cs/src/Detail/CoreInterop.cs | 224 +++++---------- sdk/cs/src/Detail/ICoreInterop.cs | 10 +- sdk/cs/src/Detail/ModelLoadManager.cs | 6 +- sdk/cs/src/FoundryLocalException.cs | 3 - sdk/cs/src/FoundryLocalManager.cs | 2 +- sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 262 +++++++++--------- .../OpenAI/LiveAudioTranscriptionClient.cs | 2 +- sdk/cs/src/Utils.cs | 7 +- .../FoundryLocal.Tests/AudioClientTests.cs | 3 +- .../ChatCompletionsTests.cs | 1 + .../FoundryLocal.Tests/ConfigurationTests.cs | 195 +++++++++++++ .../CoreInteropUtilTests.cs | 163 +++++++++++ sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs | 1 + .../ExceptionHandlingTests.cs | 134 +++++++++ .../FoundryLocalExceptionTests.cs | 71 +++++ .../FoundryLocalManagerTest.cs | 1 + .../LiveAudioTranscriptionTests.cs | 8 +- .../Microsoft.AI.Foundry.Local.Tests.csproj | 26 +- .../OperatingSystemConverter.cs | 11 +- .../SkipUnlessIntegrationAttribute.cs | 20 ++ .../SkipUnlessIntegrationTests.cs | 34 +++ sdk/cs/test/FoundryLocal.Tests/Utils.cs | 74 +++-- 28 files changed, 1161 insertions(+), 354 deletions(-) create mode 100644 sdk/cs/src/Detail/CoreInterop.Modern.cs create mode 100644 sdk/cs/src/Detail/CoreInterop.NetStandard.cs create mode 100644 sdk/cs/src/Detail/CoreInterop.WinML.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/ConfigurationTests.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/CoreInteropUtilTests.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/ExceptionHandlingTests.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/FoundryLocalExceptionTests.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationAttribute.cs create mode 100644 sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationTests.cs diff --git a/.pipelines/templates/build-cs-steps.yml b/.pipelines/templates/build-cs-steps.yml index bb1a4fb9..8d289353 100644 --- a/.pipelines/templates/build-cs-steps.yml +++ b/.pipelines/templates/build-cs-steps.yml @@ -41,11 +41,13 @@ steps: Write-Host "##vso[task.setvariable variable=repoRoot]$repoRoot" Write-Host "##vso[task.setvariable variable=testDataDir]$testDataDir" +# Using the latest SDK does not prevent us from targeting other platforms. +# It ensures that we've got the latest tooling, fixes, and performance enhancements. - task: UseDotNet@2 - displayName: 'Use .NET 9 SDK' + displayName: 'Use .NET 10 SDK' inputs: packageType: sdk - version: '9.0.x' + version: '10.0.x' # Read version from the version-info artifact produced by compute_version stage. - task: PowerShell@2 @@ -132,7 +134,7 @@ steps: targetType: inline script: | $base = "$(repoRoot)/sdk/cs/src/bin/Release" - # The SDK targets net9.0 (standard) or net9.0-windows10.0.26100.0 (WinML). + # The SDK targets netstandard2.0 and any compatible runtime. # Find whichever TFM directory was produced by the build. $tfmDir = Get-ChildItem $base -Directory | Select-Object -First 1 if (-not $tfmDir) { throw "No target framework directory found under $base" } diff --git a/.pipelines/templates/test-cs-steps.yml b/.pipelines/templates/test-cs-steps.yml index c1378b04..605b36cf 100644 --- a/.pipelines/templates/test-cs-steps.yml +++ b/.pipelines/templates/test-cs-steps.yml @@ -34,6 +34,12 @@ steps: packageType: sdk version: '9.0.x' +- task: UseDotNet@2 + displayName: 'Install .NET 8 runtime' + inputs: + packageType: runtime + version: '8.0.x' + # Load dependency versions from deps_versions.json - template: update-deps-versions-steps.yml parameters: @@ -54,7 +60,7 @@ steps: - + "@ diff --git a/sdk/cs/src/Configuration.cs b/sdk/cs/src/Configuration.cs index 4634dcbc..e04d5c3b 100644 --- a/sdk/cs/src/Configuration.cs +++ b/sdk/cs/src/Configuration.cs @@ -109,17 +109,17 @@ internal Dictionary AsDictionary() if (!string.IsNullOrEmpty(AppDataDir)) { - configValues.Add("AppDataDir", AppDataDir); + configValues.Add("AppDataDir", AppDataDir!); } if (!string.IsNullOrEmpty(ModelCacheDir)) { - configValues.Add("ModelCacheDir", ModelCacheDir); + configValues.Add("ModelCacheDir", ModelCacheDir!); } if (!string.IsNullOrEmpty(LogsDir)) { - configValues.Add("LogsDir", LogsDir); + configValues.Add("LogsDir", LogsDir!); } if (Web != null) diff --git a/sdk/cs/src/Detail/CoreInterop.Modern.cs b/sdk/cs/src/Detail/CoreInterop.Modern.cs new file mode 100644 index 00000000..1774c0d3 --- /dev/null +++ b/sdk/cs/src/Detail/CoreInterop.Modern.cs @@ -0,0 +1,116 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +// Modern .NET (net7.0+) native library loading and source-generated P/Invoke declarations. + +#if NET7_0_OR_GREATER + +namespace Microsoft.AI.Foundry.Local.Detail; + +using System.Diagnostics; +using System.IO; +using System.Runtime.InteropServices; + +using static Microsoft.AI.Foundry.Local.Detail.ICoreInterop; + +internal partial class CoreInterop +{ + [LibraryImport(LibraryName, EntryPoint = "execute_command")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreExecuteCommand(RequestBuffer* request, ResponseBuffer* response); + + [LibraryImport(LibraryName, EntryPoint = "execute_command_with_callback")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreExecuteCommandWithCallback(RequestBuffer* nativeRequest, + ResponseBuffer* nativeResponse, + nint callbackPtr, + nint userData); + + [LibraryImport(LibraryName, EntryPoint = "execute_command_with_binary")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreExecuteCommandWithBinary(StreamingRequestBuffer* nativeRequest, + ResponseBuffer* nativeResponse); + + [LibraryImport(LibraryName, EntryPoint = "audio_stream_start")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreAudioStreamStart(RequestBuffer* request, ResponseBuffer* response); + + [LibraryImport(LibraryName, EntryPoint = "audio_stream_push")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreAudioStreamPush(StreamingRequestBuffer* request, ResponseBuffer* response); + + [LibraryImport(LibraryName, EntryPoint = "audio_stream_stop")] + [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] + private static unsafe partial void CoreAudioStreamStop(RequestBuffer* request, ResponseBuffer* response); + + private static bool TryLoadNativeLibrary(string path, out IntPtr handle) + { + return NativeLibrary.TryLoad(path, out handle); + } + + static partial void InitializeNativeLibraryResolver() + { + NativeLibrary.SetDllImportResolver(typeof(CoreInterop).Assembly, (libraryName, assembly, searchPath) => + { + if (libraryName == LibraryName) + { + Debug.WriteLine($"Resolving {libraryName}. BaseDirectory: {AppContext.BaseDirectory}"); + + // Check if this build is platform specific. In that case all files are flattened + // in the one directory and there's no need to look in runtimes/-/native. + // e.g. `dotnet publish -r win-x64` copies all dependencies into the publish output folder. + var libraryPath = Path.Combine(AppContext.BaseDirectory, AddLibraryExtension(LibraryName)); + if (File.Exists(libraryPath)) + { + if (NativeLibrary.TryLoad(libraryPath, out var handle)) + { + Debug.WriteLine($"Loaded native library from: {libraryPath}"); + + if (IsWindows) + { + LoadOrtDllsIfInSameDir(AppContext.BaseDirectory); + } + + return handle; + } + } + + // TODO: figure out what is required on Android and iOS + // The nuget has an AAR and xcframework respectively so we need to determine what files are where + // after a build. + var os = IsWindows ? "win" : + IsLinux ? "linux" : + IsMacOS ? "osx" : + throw new PlatformNotSupportedException(); + + var arch = RuntimeInformation.OSArchitecture.ToString().ToLowerInvariant(); + var runtimePath = Path.Combine(AppContext.BaseDirectory, "runtimes", $"{os}-{arch}", "native"); + libraryPath = Path.Combine(runtimePath, AddLibraryExtension(LibraryName)); + + Debug.WriteLine($"Looking for native library at: {libraryPath}"); + + if (File.Exists(libraryPath)) + { + if (NativeLibrary.TryLoad(libraryPath, out var handle)) + { + Debug.WriteLine($"Loaded native library from: {libraryPath}"); + + if (IsWindows) + { + LoadOrtDllsIfInSameDir(runtimePath); + } + + return handle; + } + } + } + + return IntPtr.Zero; + }); + } +} + +#endif diff --git a/sdk/cs/src/Detail/CoreInterop.NetStandard.cs b/sdk/cs/src/Detail/CoreInterop.NetStandard.cs new file mode 100644 index 00000000..b96a258b --- /dev/null +++ b/sdk/cs/src/Detail/CoreInterop.NetStandard.cs @@ -0,0 +1,95 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +// Legacy native library loading and runtime-generated P/Invoke declarations for +// .NET Framework 4.6.2+ (Windows only). LoadLibraryW pre-loads the native DLL +// so [DllImport] can resolve it by name from the process module table. + +#if !NET7_0_OR_GREATER + +namespace Microsoft.AI.Foundry.Local.Detail; + +using System.Diagnostics; +using System.IO; +using System.Runtime.InteropServices; + +using static Microsoft.AI.Foundry.Local.Detail.ICoreInterop; + +internal partial class CoreInterop +{ + [DllImport(LibraryName, EntryPoint = "execute_command", CallingConvention = CallingConvention.Cdecl)] + private static unsafe extern void CoreExecuteCommand(RequestBuffer* request, ResponseBuffer* response); + + [DllImport(LibraryName, EntryPoint = "execute_command_with_callback", CallingConvention = CallingConvention.Cdecl)] + private static unsafe extern void CoreExecuteCommandWithCallback(RequestBuffer* nativeRequest, + ResponseBuffer* nativeResponse, + nint callbackPtr, + nint userData); + + [DllImport(LibraryName, EntryPoint = "execute_command_with_binary", CallingConvention = CallingConvention.Cdecl)] + private static unsafe extern void CoreExecuteCommandWithBinary(StreamingRequestBuffer* nativeRequest, + ResponseBuffer* nativeResponse); + + [DllImport(LibraryName, EntryPoint = "audio_stream_start", CallingConvention = CallingConvention.Cdecl)] + private static unsafe extern void CoreAudioStreamStart(RequestBuffer* request, ResponseBuffer* response); + + [DllImport(LibraryName, EntryPoint = "audio_stream_push", CallingConvention = CallingConvention.Cdecl)] + private static unsafe extern void CoreAudioStreamPush(StreamingRequestBuffer* request, ResponseBuffer* response); + + [DllImport(LibraryName, EntryPoint = "audio_stream_stop", CallingConvention = CallingConvention.Cdecl)] + private static unsafe extern void CoreAudioStreamStop(RequestBuffer* request, ResponseBuffer* response); + + [DllImport("kernel32", SetLastError = true, CharSet = CharSet.Unicode)] + private static extern IntPtr LoadLibraryW(string path); + + private static bool TryLoadNativeLibrary(string path, out IntPtr handle) + { + handle = LoadLibraryW(path); + return handle != IntPtr.Zero; + } + + static partial void InitializeNativeLibraryResolver() + { + if (!IsWindows) + { + throw new PlatformNotSupportedException( + "The netstandard2.0 build is only supported on .NET Framework 4.6.2+ (Windows). " + + "Use the net8.0 build for cross-platform support."); + } + + // Pre-load the native library using the same path probing as the net8.0 resolver. + // Once loaded, [DllImport] finds it by name in the process module table. + var libraryPath = Path.Combine(AppContext.BaseDirectory, AddLibraryExtension(LibraryName)); + Debug.WriteLine($"Resolving {LibraryName}. BaseDirectory: {AppContext.BaseDirectory}"); + + if (File.Exists(libraryPath)) + { + if (TryLoadNativeLibrary(libraryPath, out _)) + { + Debug.WriteLine($"Loaded native library from: {libraryPath}"); + LoadOrtDllsIfInSameDir(AppContext.BaseDirectory); + return; + } + } + + var arch = RuntimeInformation.OSArchitecture.ToString().ToLowerInvariant(); + var runtimePath = Path.Combine(AppContext.BaseDirectory, "runtimes", $"win-{arch}", "native"); + libraryPath = Path.Combine(runtimePath, AddLibraryExtension(LibraryName)); + + Debug.WriteLine($"Looking for native library at: {libraryPath}"); + + if (File.Exists(libraryPath)) + { + if (TryLoadNativeLibrary(libraryPath, out _)) + { + Debug.WriteLine($"Loaded native library from: {libraryPath}"); + LoadOrtDllsIfInSameDir(runtimePath); + } + } + } +} + +#endif diff --git a/sdk/cs/src/Detail/CoreInterop.WinML.cs b/sdk/cs/src/Detail/CoreInterop.WinML.cs new file mode 100644 index 00000000..1a9ebd13 --- /dev/null +++ b/sdk/cs/src/Detail/CoreInterop.WinML.cs @@ -0,0 +1,24 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +// WinML build variant: injects Bootstrap parameter for Windows App Runtime initialization. + +#if IS_WINML + +namespace Microsoft.AI.Foundry.Local.Detail; + +internal partial class CoreInterop +{ + partial void PrepareWinMLBootstrap(CoreInteropRequest request) + { + if (!request.Params.ContainsKey("Bootstrap")) + { + request.Params["Bootstrap"] = "true"; + } + } +} + +#endif diff --git a/sdk/cs/src/Detail/CoreInterop.cs b/sdk/cs/src/Detail/CoreInterop.cs index b88f5597..7239a48e 100644 --- a/sdk/cs/src/Detail/CoreInterop.cs +++ b/sdk/cs/src/Detail/CoreInterop.cs @@ -1,4 +1,4 @@ -// -------------------------------------------------------------------------------------------------------------------- +// -------------------------------------------------------------------------------------------------------------------- // // Copyright (c) Microsoft. All rights reserved. // @@ -17,121 +17,47 @@ internal partial class CoreInterop : ICoreInterop { // TODO: Android and iOS may need special handling. See ORT C# NativeMethods.shared.cs internal const string LibraryName = "Microsoft.AI.Foundry.Local.Core"; + private readonly ILogger _logger; - private static string AddLibraryExtension(string name) => - RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? $"{name}.dll" : - RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ? $"{name}.so" : - RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? $"{name}.dylib" : - throw new PlatformNotSupportedException(); +#if NET5_0_OR_GREATER + private static readonly bool IsWindows = OperatingSystem.IsWindows(); + private static readonly bool IsLinux = OperatingSystem.IsLinux(); + private static readonly bool IsMacOS = OperatingSystem.IsMacOS(); +#else + private static readonly bool IsWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows); + private static readonly bool IsLinux = RuntimeInformation.IsOSPlatform(OSPlatform.Linux); + private static readonly bool IsMacOS = RuntimeInformation.IsOSPlatform(OSPlatform.OSX); +#endif private static IntPtr genaiLibHandle = IntPtr.Zero; private static IntPtr ortLibHandle = IntPtr.Zero; + private static readonly NativeCallbackFn handleCallbackDelegate = HandleCallback; - // we need to manually load ORT and ORT GenAI dlls on Windows to ensure - // a) we're using the libraries we think we are - // b) that dependencies are resolved correctly as the dlls may not be in the default load path. - // it's a 'Try' as we can't do anything else if it fails as the dlls may be available somewhere else. - private static void LoadOrtDllsIfInSameDir(string path) - { - var genaiLibName = AddLibraryExtension("onnxruntime-genai"); - var ortLibName = AddLibraryExtension("onnxruntime"); - var genaiPath = Path.Combine(path, genaiLibName); - var ortPath = Path.Combine(path, ortLibName); - - // need to load ORT first as the winml GenAI library redirects and tries to load a winml onnxruntime.dll, - // which will not have the EPs we expect/require. if/when we don't bundle our own onnxruntime.dll we need to - // revisit this. - var loadedOrt = NativeLibrary.TryLoad(ortPath, out ortLibHandle); - var loadedGenAI = NativeLibrary.TryLoad(genaiPath, out genaiLibHandle); + [UnmanagedFunctionPointer(CallingConvention.Cdecl)] + private unsafe delegate void ExecuteCommandDelegate(RequestBuffer* req, ResponseBuffer* resp); -#if DEBUG - Console.WriteLine($"Loaded ORT:{loadedOrt} handle={ortLibHandle}"); - Console.WriteLine($"Loaded GenAI: {loadedGenAI} handle={genaiLibHandle}"); -#endif + internal class CallbackHelper + { + public CallbackFn Callback { get; } + public Exception? Exception { get; set; } + public CallbackHelper(CallbackFn callback) + { + Callback = callback ?? throw new ArgumentNullException(nameof(callback)); + } } static CoreInterop() { - NativeLibrary.SetDllImportResolver(typeof(CoreInterop).Assembly, (libraryName, assembly, searchPath) => - { - if (libraryName == LibraryName) - { -#if DEBUG - Console.WriteLine($"Resolving {libraryName}. BaseDirectory: {AppContext.BaseDirectory}"); -#endif - var isWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows); - - // check if this build is platform specific. in that case all files are flattened in the one directory - // and there's no need to look in runtimes/-/native. - // e.g. `dotnet publish -r win-x64` copies all the dependencies into the publish output folder. - var libraryPath = Path.Combine(AppContext.BaseDirectory, AddLibraryExtension(LibraryName)); - if (File.Exists(libraryPath)) - { - if (NativeLibrary.TryLoad(libraryPath, out var handle)) - { -#if DEBUG - Console.WriteLine($"Loaded native library from: {libraryPath}"); -#endif - if (isWindows) - { - LoadOrtDllsIfInSameDir(AppContext.BaseDirectory); - } - - return handle; - } - } - - // TODO: figure out what is required on Android and iOS - // The nuget has an AAR and xcframework respectively so we need to determine what files are where - // after a build. - var os = RuntimeInformation.IsOSPlatform(OSPlatform.Windows) ? "win" : - RuntimeInformation.IsOSPlatform(OSPlatform.Linux) ? "linux" : - RuntimeInformation.IsOSPlatform(OSPlatform.OSX) ? "osx" : - throw new PlatformNotSupportedException(); - - var arch = RuntimeInformation.OSArchitecture.ToString().ToLowerInvariant(); - var runtimePath = Path.Combine(AppContext.BaseDirectory, "runtimes", $"{os}-{arch}", "native"); - libraryPath = Path.Combine(runtimePath, AddLibraryExtension(LibraryName)); - -#if DEBUG - Console.WriteLine($"Looking for native library at: {libraryPath}"); -#endif - if (File.Exists(libraryPath)) - { - if (NativeLibrary.TryLoad(libraryPath, out var handle)) - { -#if DEBUG - Console.WriteLine($"Loaded native library from: {libraryPath}"); -#endif - if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) - { - LoadOrtDllsIfInSameDir(runtimePath); - } - - return handle; - } - } - } - - return IntPtr.Zero; - }); + InitializeNativeLibraryResolver(); } internal CoreInterop(Configuration config, ILogger logger) { - _logger = logger ?? throw new ArgumentNullException(nameof(logger)); var request = new CoreInteropRequest { Params = config.AsDictionary() }; - -#if IS_WINML - // WinML builds require bootstrapping the Windows App Runtime - if (!request.Params.ContainsKey("Bootstrap")) - { - request.Params["Bootstrap"] = "true"; - } -#endif + PrepareWinMLBootstrap(request); var response = ExecuteCommand("initialize", request); @@ -145,62 +71,43 @@ internal CoreInterop(Configuration config, ILogger logger) } } - // For testing. Skips the 'initialize' command so assumes this has been done previously. + /// For testing. Skips the 'initialize' command so assumes this has been done previously. internal CoreInterop(ILogger logger) { _logger = logger ?? throw new ArgumentNullException(nameof(logger)); } + // Implemented in CoreInterop.NetStandard.cs and CoreInterop.Modern.cs. + static partial void InitializeNativeLibraryResolver(); - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - private unsafe delegate void ExecuteCommandDelegate(RequestBuffer* req, ResponseBuffer* resp); + // Implemented in CoreInterop.WinML.cs when IS_WINML is defined; otherwise a no-op. + partial void PrepareWinMLBootstrap(CoreInteropRequest request); - // Import the function from the AOT-compiled library - [LibraryImport(LibraryName, EntryPoint = "execute_command")] - [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] - private static unsafe partial void CoreExecuteCommand(RequestBuffer* request, ResponseBuffer* response); - - [LibraryImport(LibraryName, EntryPoint = "execute_command_with_callback")] - [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] - private static unsafe partial void CoreExecuteCommandWithCallback(RequestBuffer* nativeRequest, - ResponseBuffer* nativeResponse, - nint callbackPtr, // NativeCallbackFn pointer - nint userData); - - [LibraryImport(LibraryName, EntryPoint = "execute_command_with_binary")] - [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] - private static unsafe partial void CoreExecuteCommandWithBinary(StreamingRequestBuffer* nativeRequest, - ResponseBuffer* nativeResponse); - - // --- Audio streaming P/Invoke imports (kept for future dedicated entry points) --- - - [LibraryImport(LibraryName, EntryPoint = "audio_stream_start")] - [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] - private static unsafe partial void CoreAudioStreamStart( - RequestBuffer* request, - ResponseBuffer* response); - - [LibraryImport(LibraryName, EntryPoint = "audio_stream_push")] - [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] - private static unsafe partial void CoreAudioStreamPush( - StreamingRequestBuffer* request, - ResponseBuffer* response); - - [LibraryImport(LibraryName, EntryPoint = "audio_stream_stop")] - [UnmanagedCallConv(CallConvs = new[] { typeof(System.Runtime.CompilerServices.CallConvCdecl) })] - private static unsafe partial void CoreAudioStreamStop( - RequestBuffer* request, - ResponseBuffer* response); - - // helper to capture exceptions in callbacks - internal class CallbackHelper + private static string AddLibraryExtension(string name) => + IsWindows ? $"{name}.dll" : + IsLinux ? $"{name}.so" : + IsMacOS ? $"{name}.dylib" : + throw new PlatformNotSupportedException(); + + // We need to manually load ORT and ORT GenAI dlls on Windows to ensure + // a) we're using the libraries we think we are + // b) that dependencies are resolved correctly as the dlls may not be in the default load path. + // It's a 'Try' as we can't do anything else if it fails as the dlls may be available somewhere else. + private static void LoadOrtDllsIfInSameDir(string path) { - public CallbackFn Callback { get; } - public Exception? Exception { get; set; } // keep the first only. most likely it will be the same issue in all - public CallbackHelper(CallbackFn callback) - { - Callback = callback ?? throw new ArgumentNullException(nameof(callback)); - } + var genaiLibName = AddLibraryExtension("onnxruntime-genai"); + var ortLibName = AddLibraryExtension("onnxruntime"); + var genaiPath = Path.Combine(path, genaiLibName); + var ortPath = Path.Combine(path, ortLibName); + + // Need to load ORT first as the winml GenAI library redirects and tries to load a winml onnxruntime.dll, + // which will not have the EPs we expect/require. If/when we don't bundle our own onnxruntime.dll we need to + // revisit this. + var loadedOrt = TryLoadNativeLibrary(ortPath, out ortLibHandle); + var loadedGenAI = TryLoadNativeLibrary(genaiPath, out genaiLibHandle); + + Debug.WriteLine($"Loaded ORT:{loadedOrt} handle={ortLibHandle}"); + Debug.WriteLine($"Loaded GenAI: {loadedGenAI} handle={genaiLibHandle}"); } private static int HandleCallback(nint data, int length, nint callbackHelper) @@ -242,16 +149,12 @@ private static int HandleCallback(nint data, int length, nint callbackHelper) } } - private static readonly NativeCallbackFn handleCallbackDelegate = HandleCallback; - - public Response ExecuteCommandImpl(string commandName, string? commandInput, CallbackFn? callback = null) { try { byte[] commandBytes = System.Text.Encoding.UTF8.GetBytes(commandName); - // Allocate unmanaged memory for the command bytes IntPtr commandPtr = Marshal.AllocHGlobal(commandBytes.Length); Marshal.Copy(commandBytes, 0, commandPtr, commandBytes.Length); @@ -265,7 +168,6 @@ public Response ExecuteCommandImpl(string commandName, string? commandInput, Marshal.Copy(inputBytes, 0, inputPtr.Value, inputBytes.Length); } - // Prepare request var request = new RequestBuffer { Command = commandPtr, @@ -303,7 +205,6 @@ public Response ExecuteCommandImpl(string commandName, string? commandInput, } else { - // Pin request/response on the stack unsafe { CoreExecuteCommand(&request, &response); @@ -323,7 +224,7 @@ public Response ExecuteCommandImpl(string commandName, string? commandInput, if (response.Error != IntPtr.Zero && response.ErrorLength > 0) { - result.Error = Marshal.PtrToStringUTF8(response.Error, response.ErrorLength)!; + result.Error = PtrToStringUtf8(response.Error, response.ErrorLength); _logger.LogDebug($"Input:{commandInput ?? "null"}"); _logger.LogDebug($"Command: {commandName} Error: {result.Error}"); } @@ -391,7 +292,7 @@ private Response MarshalResponse(ResponseBuffer response) if (response.Error != IntPtr.Zero && response.ErrorLength > 0) { - result.Error = Marshal.PtrToStringUTF8(response.Error, response.ErrorLength)!; + result.Error = PtrToStringUtf8(response.Error, response.ErrorLength); } Marshal.FreeHGlobal(response.Data); @@ -400,11 +301,6 @@ private Response MarshalResponse(ResponseBuffer response) return result; } - // --- Audio streaming managed implementations --- - // Route through the existing execute_command / execute_command_with_binary entry points. - // The Core handles audio_stream_start / audio_stream_stop as command cases in ExecuteCommandManaged, - // and audio_stream_push as a command case in ExecuteCommandWithBinaryManaged. - public Response StartAudioStream(CoreInteropRequest request) { return ExecuteCommand("audio_stream_start", request); @@ -465,4 +361,14 @@ public Response StopAudioStream(CoreInteropRequest request) return ExecuteCommand("audio_stream_stop", request); } + private static string PtrToStringUtf8(IntPtr ptr, int length) + { +#if NETSTANDARD2_0 + byte[] buffer = new byte[length]; + Marshal.Copy(ptr, buffer, 0, length); + return System.Text.Encoding.UTF8.GetString(buffer); +#else + return Marshal.PtrToStringUTF8(ptr, length)!; +#endif + } } diff --git a/sdk/cs/src/Detail/ICoreInterop.cs b/sdk/cs/src/Detail/ICoreInterop.cs index 74e2a8ad..5fe02204 100644 --- a/sdk/cs/src/Detail/ICoreInterop.cs +++ b/sdk/cs/src/Detail/ICoreInterop.cs @@ -19,10 +19,10 @@ internal record Response internal string? Error; } - public delegate void CallbackFn(string callbackData); + internal delegate void CallbackFn(string callbackData); [StructLayout(LayoutKind.Sequential)] - protected unsafe struct RequestBuffer + internal unsafe struct RequestBuffer { public nint Command; public int CommandLength; @@ -31,7 +31,7 @@ protected unsafe struct RequestBuffer } [StructLayout(LayoutKind.Sequential)] - protected unsafe struct ResponseBuffer + internal unsafe struct ResponseBuffer { public nint Data; public int DataLength; @@ -42,7 +42,7 @@ protected unsafe struct ResponseBuffer // native callback function signature // Return: 0 = continue, 1 = cancel [UnmanagedFunctionPointer(CallingConvention.Cdecl)] - protected unsafe delegate int NativeCallbackFn(nint data, int length, nint userData); + internal unsafe delegate int NativeCallbackFn(nint data, int length, nint userData); Response ExecuteCommand(string commandName, CoreInteropRequest? commandInput = null); Response ExecuteCommandWithCallback(string commandName, CoreInteropRequest? commandInput, CallbackFn callback); @@ -56,7 +56,7 @@ Task ExecuteCommandWithCallbackAsync(string commandName, CoreInteropRe // --- Audio streaming session support --- [StructLayout(LayoutKind.Sequential)] - protected unsafe struct StreamingRequestBuffer + internal unsafe struct StreamingRequestBuffer { public nint Command; public int CommandLength; diff --git a/sdk/cs/src/Detail/ModelLoadManager.cs b/sdk/cs/src/Detail/ModelLoadManager.cs index a157ff78..76b48539 100644 --- a/sdk/cs/src/Detail/ModelLoadManager.cs +++ b/sdk/cs/src/Detail/ModelLoadManager.cs @@ -109,7 +109,7 @@ private async Task WebListLoadedModelAsync(CancellationToken? ct = nul $"{response.ReasonPhrase}"); } - var content = await response.Content.ReadAsStringAsync(ct ?? CancellationToken.None).ConfigureAwait(false); + var content = await response.Content.ReadAsStringAsync().ConfigureAwait(false); _logger.LogDebug("Loaded models json from {WebService}: {Data}", _externalServiceUrl, content); var typeInfo = JsonSerializationContext.Default.StringArray; var modelList = JsonSerializer.Deserialize(content, typeInfo); @@ -138,7 +138,7 @@ private async Task WebLoadModelAsync(string modelId, CancellationToken? ct = nul $"{response.ReasonPhrase}"); } - var content = await response.Content.ReadAsStringAsync(ct ?? CancellationToken.None).ConfigureAwait(false); + var content = await response.Content.ReadAsStringAsync().ConfigureAwait(false); _logger.LogInformation("Model {ModelId} loaded successfully from {WebService}: {Message}", modelId, _externalServiceUrl, content); } @@ -156,7 +156,7 @@ private async Task WebUnloadModelAsync(string modelId, CancellationToken? ct = n $"{response.ReasonPhrase}"); } - var content = await response.Content.ReadAsStringAsync(ct ?? CancellationToken.None).ConfigureAwait(false); + var content = await response.Content.ReadAsStringAsync().ConfigureAwait(false); _logger.LogInformation("Model {ModelId} unloaded successfully from {WebService}: {Message}", modelId, _externalServiceUrl, content); } diff --git a/sdk/cs/src/FoundryLocalException.cs b/sdk/cs/src/FoundryLocalException.cs index d6e606c9..dae5ef04 100644 --- a/sdk/cs/src/FoundryLocalException.cs +++ b/sdk/cs/src/FoundryLocalException.cs @@ -6,7 +6,6 @@ namespace Microsoft.AI.Foundry.Local; using System; -using System.Diagnostics; using Microsoft.Extensions.Logging; @@ -22,14 +21,12 @@ public FoundryLocalException(string message, Exception innerException) : base(me internal FoundryLocalException(string message, ILogger logger) : base(message) { - Debug.Assert(logger != null); logger.LogError(message); } internal FoundryLocalException(string message, Exception innerException, ILogger logger) : base(message, innerException) { - Debug.Assert(logger != null); logger.LogError(innerException, message); } } diff --git a/sdk/cs/src/FoundryLocalManager.cs b/sdk/cs/src/FoundryLocalManager.cs index 10b51285..b014850f 100644 --- a/sdk/cs/src/FoundryLocalManager.cs +++ b/sdk/cs/src/FoundryLocalManager.cs @@ -286,7 +286,7 @@ private EpInfo[] DiscoverEpsImpl() return Array.Empty(); } - return JsonSerializer.Deserialize(data, JsonSerializationContext.Default.EpInfoArray) + return JsonSerializer.Deserialize(data!, JsonSerializationContext.Default.EpInfoArray) ?? Array.Empty(); } diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index df8fc2cf..384b4415 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -1,134 +1,132 @@  - - Microsoft AI Foundry Local - Microsoft Foundry Local SDK - Microsoft - Microsoft Corporation - © Microsoft Corporation. All rights reserved. - LICENSE.txt - https://github.com/microsoft/Foundry-Local - Microsoft AI Foundry Local SDK for .NET - Microsoft AI Foundry SDK - README.md - https://github.com/microsoft/Foundry-Local - git - - net9.0 - win-x64;win-arm64;linux-x64;linux-arm64;osx-arm64 - - true - False - enable - True - True - enable - - - true - snupkg - true - - - false - win-x64;win-arm64 - - - - - $([System.DateTime]::Now.ToString("yyyyMMddHHmmss")) - 0.5.0-dev.local.$(BuildTimestamp) - - - - true - true - true - - - $(DefineConstants);IS_WINDOWS - $(DefineConstants);IS_OSX - $(DefineConstants);IS_LINUX - - - - - - - - - - - - - - - - - - - - - - - - - - - Microsoft AI Foundry Local for WinML - Microsoft Foundry Local SDK for WinML - Microsoft.AI.Foundry.Local.WinML - Microsoft.AI.Foundry.Local.WinML - $(DefineConstants);IS_WINML - net9.0-windows10.0.26100.0 - win-x64;win-arm64 - - 10.0.17763.0 - - - $(NoWarn);CsWinRT1028 - - - - - <_DepsVersionsPath Condition="'$(UseWinML)' == 'true'">$(MSBuildThisFileDirectory)..\..\deps_versions_winml.json - <_DepsVersionsPath Condition="'$(UseWinML)' != 'true'">$(MSBuildThisFileDirectory)..\..\deps_versions.json - <_DepsVersionsJson>$([System.IO.File]::ReadAllText('$(_DepsVersionsPath)')) - $(FoundryLocalCoreVersion) - $([System.Text.RegularExpressions.Regex]::Match('$(_DepsVersionsJson)', '"nuget"\s*:\s*"([^"]+)"').Groups[1].Value) - $([System.Text.RegularExpressions.Regex]::Match('$(_DepsVersionsJson)', '"nuget"\s*:\s*"([^"]+)"').Groups[1].Value) - - - - True - - - True - - - - - $(NoWarn);NU1604 - - - - - - - - - - + + Microsoft AI Foundry Local + Microsoft Foundry Local SDK + Microsoft + Microsoft Corporation + © Microsoft Corporation. All rights reserved. + + LICENSE.txt + https://github.com/microsoft/Foundry-Local + Microsoft AI Foundry Local SDK for .NET + Microsoft AI Foundry SDK + README.md + https://github.com/microsoft/Foundry-Local + git + + net8.0;netstandard2.0 + true + enable + True + enable + latest + + true + true + + + $(NoWarn);NU1604 + + + false + + + + + + net8.0-windows10.0.26100.0 + win-x64;win-arm64 + 10.0.17763.0 + + true + $(DefineConstants);IS_WINML + + Microsoft AI Foundry Local for WinML + Microsoft Foundry Local SDK for WinML + Microsoft.AI.Foundry.Local.WinML + Microsoft.AI.Foundry.Local.WinML + + + $(NoWarn);CsWinRT1028 + + + + + $([System.DateTime]::Now.ToString("yyyyMMddHHmmss")) + 0.5.0-dev.local.$(BuildTimestamp) + + + + + + + + + + + + + + + + + + + + + + <_DepsVersionsPath Condition="'$(UseWinML)' == 'true'">$(MSBuildThisFileDirectory)..\..\deps_versions_winml.json + <_DepsVersionsJson>$([System.IO.File]::ReadAllText('$(_DepsVersionsPath)')) + $([System.Text.RegularExpressions.Regex]::Match('$(_DepsVersionsJson)', '"nuget"\s*:\s*"([^"]+)"').Groups[1].Value) + + + + <_DepsVersionsPath>$(MSBuildThisFileDirectory)..\..\deps_versions.json + <_DepsVersionsJson>$([System.IO.File]::ReadAllText('$(_DepsVersionsPath)')) + $([System.Text.RegularExpressions.Regex]::Match('$(_DepsVersionsJson)', '"nuget"\s*:\s*"([^"]+)"').Groups[1].Value) + + + + + + + + + + + + + + + + + + + + + + + diff --git a/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs b/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs index 6da4d076..5e9d1d48 100644 --- a/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs +++ b/sdk/cs/src/OpenAI/LiveAudioTranscriptionClient.cs @@ -217,7 +217,7 @@ private async Task PushLoopAsync(CancellationToken ct) { try { - var transcription = LiveAudioTranscriptionResponse.FromJson(response.Data); + var transcription = LiveAudioTranscriptionResponse.FromJson(response.Data!); if (!string.IsNullOrEmpty(transcription.Content?[0]?.Text)) { _outputChannel?.Writer.TryWrite(transcription); diff --git a/sdk/cs/src/Utils.cs b/sdk/cs/src/Utils.cs index 8300a967..09338497 100644 --- a/sdk/cs/src/Utils.cs +++ b/sdk/cs/src/Utils.cs @@ -41,12 +41,9 @@ internal static T CallWithExceptionHandling(Func func, string errorMsg, IL } // we ignore OperationCanceledException to allow proper cancellation propagation // this also covers TaskCanceledException since it derives from OperationCanceledException - catch (Exception ex) when (ex is not OperationCanceledException) + catch (Exception ex) + when (ex is not OperationCanceledException and not FoundryLocalException) { - if (ex is FoundryLocalException) - { - throw; - } throw new FoundryLocalException(errorMsg, ex, logger); } } diff --git a/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs b/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs index 5c4cc8d6..0a048811 100644 --- a/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/AudioClientTests.cs @@ -10,6 +10,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; using System.Threading.Tasks; +[SkipUnlessIntegration] internal sealed class AudioClientTests { private static IModel? model; @@ -22,7 +23,7 @@ public static async Task Setup() var model = await catalog.GetModelAsync("whisper-tiny").ConfigureAwait(false); await Assert.That(model).IsNotNull(); - await model.LoadAsync().ConfigureAwait(false); + await model!.LoadAsync().ConfigureAwait(false); await Assert.That(await model.IsLoadedAsync()).IsTrue(); AudioClientTests.model = model; diff --git a/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs b/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs index 7e70c683..21578147 100644 --- a/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/ChatCompletionsTests.cs @@ -13,6 +13,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; using Betalgo.Ranul.OpenAI.ObjectModels.ResponseModels; using Betalgo.Ranul.OpenAI.ObjectModels.SharedModels; +[SkipUnlessIntegration] internal sealed class ChatCompletionsTests { private static IModel? model; diff --git a/sdk/cs/test/FoundryLocal.Tests/ConfigurationTests.cs b/sdk/cs/test/FoundryLocal.Tests/ConfigurationTests.cs new file mode 100644 index 00000000..fd69c337 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/ConfigurationTests.cs @@ -0,0 +1,195 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using System.Collections.Generic; + +internal sealed class ConfigurationTests +{ + [Test] + public async Task AsDictionary_RequiredAppName_IsIncluded() + { + var config = new Configuration { AppName = "TestApp" }; + var dict = config.AsDictionary(); + + await Assert.That(dict["AppName"]).IsEqualTo("TestApp"); + } + + [Test] + public async Task AsDictionary_DefaultLogLevel_IsWarning() + { + var config = new Configuration { AppName = "TestApp" }; + var dict = config.AsDictionary(); + + await Assert.That(dict["LogLevel"]).IsEqualTo("Warning"); + } + + [Test] + public async Task AsDictionary_CustomLogLevel_IsMapped() + { + var config = new Configuration { AppName = "TestApp", LogLevel = LogLevel.Debug }; + var dict = config.AsDictionary(); + + await Assert.That(dict["LogLevel"]).IsEqualTo("Debug"); + } + + [Test] + public async Task AsDictionary_NullAppName_Throws() + { + var config = new Configuration { AppName = null! }; + + await Assert.That(() => config.AsDictionary()).Throws(); + } + + [Test] + public async Task AsDictionary_EmptyAppName_Throws() + { + var config = new Configuration { AppName = string.Empty }; + + await Assert.That(() => config.AsDictionary()).Throws(); + } + + [Test] + public async Task AsDictionary_OptionalModelCacheDir_IncludedWhenSet() + { + var config = new Configuration { AppName = "TestApp", ModelCacheDir = "/tmp/models" }; + var dict = config.AsDictionary(); + + await Assert.That(dict).ContainsKey("ModelCacheDir"); + await Assert.That(dict["ModelCacheDir"]).IsEqualTo("/tmp/models"); + } + + [Test] + public async Task AsDictionary_OptionalModelCacheDir_OmittedWhenNull() + { + var config = new Configuration { AppName = "TestApp" }; + var dict = config.AsDictionary(); + + await Assert.That(dict.ContainsKey("ModelCacheDir")).IsFalse(); + } + + [Test] + public async Task AsDictionary_OptionalLogsDir_IncludedWhenSet() + { + var config = new Configuration { AppName = "TestApp", LogsDir = "/tmp/logs" }; + var dict = config.AsDictionary(); + + await Assert.That(dict["LogsDir"]).IsEqualTo("/tmp/logs"); + } + + [Test] + public async Task AsDictionary_OptionalAppDataDir_IncludedWhenSet() + { + var config = new Configuration { AppName = "TestApp", AppDataDir = "/tmp/appdata" }; + var dict = config.AsDictionary(); + + await Assert.That(dict["AppDataDir"]).IsEqualTo("/tmp/appdata"); + } + + [Test] + public async Task AsDictionary_WebServiceUrls_IncludedWhenSet() + { + var config = new Configuration + { + AppName = "TestApp", + Web = new Configuration.WebService { Urls = "http://localhost:5000" } + }; + var dict = config.AsDictionary(); + + await Assert.That(dict["WebServiceUrls"]).IsEqualTo("http://localhost:5000"); + } + + [Test] + public async Task AsDictionary_WebServiceUrls_OmittedWhenNull() + { + var config = new Configuration + { + AppName = "TestApp", + Web = new Configuration.WebService() + }; + var dict = config.AsDictionary(); + + await Assert.That(dict.ContainsKey("WebServiceUrls")).IsFalse(); + } + + [Test] + public async Task AsDictionary_AdditionalSettings_AreMerged() + { + var config = new Configuration + { + AppName = "TestApp", + AdditionalSettings = new Dictionary + { + { "CustomKey", "CustomValue" }, + { "AnotherKey", "AnotherValue" } + } + }; + var dict = config.AsDictionary(); + + await Assert.That(dict["CustomKey"]).IsEqualTo("CustomValue"); + await Assert.That(dict["AnotherKey"]).IsEqualTo("AnotherValue"); + } + + [Test] + public async Task AsDictionary_AdditionalSettings_EmptyKeysSkipped() + { + var config = new Configuration + { + AppName = "TestApp", + AdditionalSettings = new Dictionary + { + { "ValidKey", "Value" } + } + }; + var dict = config.AsDictionary(); + + // Should have AppName, LogLevel, ValidKey — no empty keys + await Assert.That(dict).HasCount().EqualTo(3); + } + + [Test] + public async Task AsDictionary_AdditionalSettings_CanOverrideBuiltInKeys() + { + var config = new Configuration + { + AppName = "TestApp", + AdditionalSettings = new Dictionary + { + { "LogLevel", "OverriddenValue" } + } + }; + var dict = config.AsDictionary(); + + // AdditionalSettings uses indexer so it overwrites + await Assert.That(dict["LogLevel"]).IsEqualTo("OverriddenValue"); + } + + [Test] + public async Task Validate_ValidConfig_DoesNotThrow() + { + var config = new Configuration { AppName = "TestApp" }; + + // Should complete without throwing + await Assert.That(() => config.Validate()).ThrowsNothing(); + } + + [Test] + public async Task Validate_EmptyAppName_Throws() + { + var config = new Configuration { AppName = string.Empty }; + + await Assert.That(() => config.Validate()).Throws(); + } + + [Test] + public async Task Validate_InvalidCharsInAppName_Throws() + { + var config = new Configuration { AppName = "invalid/name" }; + + await Assert.That(() => config.Validate()).Throws(); + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/CoreInteropUtilTests.cs b/sdk/cs/test/FoundryLocal.Tests/CoreInteropUtilTests.cs new file mode 100644 index 00000000..0c9052c0 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/CoreInteropUtilTests.cs @@ -0,0 +1,163 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using System; +using System.Reflection; +using System.Runtime.InteropServices; + +using Microsoft.AI.Foundry.Local.Detail; + +internal sealed class CoreInteropUtilTests +{ + private static readonly Type CoreInteropType = typeof(CoreInterop); + + [Test] + public async Task AddLibraryExtension_ReturnsCorrectExtensionForCurrentOS() + { + var method = CoreInteropType.GetMethod( + "AddLibraryExtension", + BindingFlags.NonPublic | BindingFlags.Static); + + await Assert.That(method).IsNotNull(); + + var result = (string)method!.Invoke(null, new object[] { "TestLib" })!; + + if (Utils.IsWindows) + { + await Assert.That(result).IsEqualTo("TestLib.dll"); + } + else if (Utils.IsLinux) + { + await Assert.That(result).IsEqualTo("TestLib.so"); + } + else if (Utils.IsMacOS) + { + await Assert.That(result).IsEqualTo("TestLib.dylib"); + } + } + + [Test] + public async Task PtrToStringUtf8_RoundTripsUtf8String() + { + var method = CoreInteropType.GetMethod( + "PtrToStringUtf8", + BindingFlags.NonPublic | BindingFlags.Static); + + await Assert.That(method).IsNotNull(); + + var testString = "Hello, 世界! 🌍"; + var bytes = System.Text.Encoding.UTF8.GetBytes(testString); + var ptr = Marshal.AllocHGlobal(bytes.Length); + + try + { + Marshal.Copy(bytes, 0, ptr, bytes.Length); + var result = (string)method!.Invoke(null, new object[] { ptr, bytes.Length })!; + + await Assert.That(result).IsEqualTo(testString); + } + finally + { + Marshal.FreeHGlobal(ptr); + } + } + + [Test] + public async Task PtrToStringUtf8_EmptyString_ReturnsEmpty() + { + var method = CoreInteropType.GetMethod( + "PtrToStringUtf8", + BindingFlags.NonPublic | BindingFlags.Static); + + await Assert.That(method).IsNotNull(); + + var bytes = System.Text.Encoding.UTF8.GetBytes(string.Empty); + var ptr = Marshal.AllocHGlobal(1); // allocate at least 1 byte + + try + { + var result = (string)method!.Invoke(null, new object[] { ptr, 0 })!; + + await Assert.That(result).IsEqualTo(string.Empty); + } + finally + { + Marshal.FreeHGlobal(ptr); + } + } + + [Test] + public async Task CallbackHelper_NullCallback_ThrowsArgumentNullException() + { + await Assert.That(() => new CoreInterop.CallbackHelper(null!)) + .Throws(); + } + + [Test] + public async Task CallbackHelper_ValidCallback_StoresCallback() + { + ICoreInterop.CallbackFn fn = _ => { }; + var helper = new CoreInterop.CallbackHelper(fn); + + await Assert.That(helper.Callback).IsSameReferenceAs(fn); + await Assert.That(helper.Exception).IsNull(); + } + + [Test] + public async Task CallbackHelper_Exception_CanBeSetAndRetrieved() + { + ICoreInterop.CallbackFn fn = _ => { }; + var helper = new CoreInterop.CallbackHelper(fn); + var ex = new InvalidOperationException("test"); + + helper.Exception = ex; + + await Assert.That(helper.Exception).IsSameReferenceAs(ex); + } + + [Test] + public async Task OsPlatformHelpers_AtLeastOneIsTrue() + { + var isWindows = (bool)CoreInteropType + .GetField("IsWindows", BindingFlags.NonPublic | BindingFlags.Static)! + .GetValue(null)!; + var isLinux = (bool)CoreInteropType + .GetField("IsLinux", BindingFlags.NonPublic | BindingFlags.Static)! + .GetValue(null)!; + var isMacOS = (bool)CoreInteropType + .GetField("IsMacOS", BindingFlags.NonPublic | BindingFlags.Static)! + .GetValue(null)!; + + await Assert.That(isWindows || isLinux || isMacOS).IsTrue(); + } + + [Test] + public async Task OsPlatformHelpers_ConsistentWithExpectedValues() + { + var isWindows = (bool)CoreInteropType + .GetField("IsWindows", BindingFlags.NonPublic | BindingFlags.Static)! + .GetValue(null)!; + var isLinux = (bool)CoreInteropType + .GetField("IsLinux", BindingFlags.NonPublic | BindingFlags.Static)! + .GetValue(null)!; + var isMacOS = (bool)CoreInteropType + .GetField("IsMacOS", BindingFlags.NonPublic | BindingFlags.Static)! + .GetValue(null)!; + + await Assert.That(isWindows).IsEqualTo(Utils.IsWindows); + await Assert.That(isLinux).IsEqualTo(Utils.IsLinux); + await Assert.That(isMacOS).IsEqualTo(Utils.IsMacOS); + } + + [Test] + public async Task LibraryName_IsExpectedValue() + { + var name = CoreInterop.LibraryName; + await Assert.That(name).IsEqualTo("Microsoft.AI.Foundry.Local.Core"); + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs b/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs index 56c70769..1b4019ba 100644 --- a/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs +++ b/sdk/cs/test/FoundryLocal.Tests/EndToEnd.cs @@ -8,6 +8,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; using System; using System.Threading.Tasks; +[SkipUnlessIntegration] internal sealed class EndToEnd { // end-to-end using real catalog. run manually as a standalone test as it alters the model cache. diff --git a/sdk/cs/test/FoundryLocal.Tests/ExceptionHandlingTests.cs b/sdk/cs/test/FoundryLocal.Tests/ExceptionHandlingTests.cs new file mode 100644 index 00000000..206724bc --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/ExceptionHandlingTests.cs @@ -0,0 +1,134 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using System; +using System.Threading.Tasks; + +using Microsoft.Extensions.Logging; + +using Moq; + +using SdkUtils = Microsoft.AI.Foundry.Local.Utils; + +internal sealed class ExceptionHandlingTests +{ + private readonly Mock _mockLogger = new(); + + [Test] + public async Task CallWithExceptionHandling_SuccessfulFunc_ReturnsResult() + { + var result = SdkUtils.CallWithExceptionHandling(() => 42, "error msg", _mockLogger.Object); + + await Assert.That(result).IsEqualTo(42); + } + + [Test] + public async Task CallWithExceptionHandling_GenericException_WrapsInFoundryLocalException() + { + var original = new InvalidOperationException("boom"); + + await Assert.That(() => + SdkUtils.CallWithExceptionHandling( + () => throw original, + "wrapper message", + _mockLogger.Object)) + .Throws() + .WithMessage("wrapper message"); + } + + [Test] + public async Task CallWithExceptionHandling_FoundryLocalException_RethrowsDirectly() + { + var original = new FoundryLocalException("direct error"); + + try + { + SdkUtils.CallWithExceptionHandling( + () => throw original, + "should not wrap", + _mockLogger.Object); + } + catch (FoundryLocalException ex) + { + // Should be the SAME exception, not a new wrapper + await Assert.That(ex).IsSameReferenceAs(original); + return; + } + + throw new Exception("Expected FoundryLocalException was not thrown"); + } + + [Test] + public async Task CallWithExceptionHandling_OperationCanceledException_PropagatesUnchanged() + { + var original = new OperationCanceledException("cancelled"); + + try + { + SdkUtils.CallWithExceptionHandling( + () => throw original, + "should not wrap", + _mockLogger.Object); + } + catch (OperationCanceledException ex) + { + await Assert.That(ex).IsSameReferenceAs(original); + return; + } + + throw new Exception("Expected OperationCanceledException was not thrown"); + } + + [Test] + public async Task CallWithExceptionHandling_TaskCanceledException_PropagatesUnchanged() + { + var original = new TaskCanceledException("task cancelled"); + + try + { + SdkUtils.CallWithExceptionHandling( + () => throw original, + "should not wrap", + _mockLogger.Object); + } + catch (TaskCanceledException ex) + { + await Assert.That(ex).IsSameReferenceAs(original); + return; + } + + throw new Exception("Expected TaskCanceledException was not thrown"); + } + + [Test] + public Task CallWithExceptionHandling_GenericException_LogsViaLogger() + { + try + { + SdkUtils.CallWithExceptionHandling( + () => throw new InvalidOperationException("boom"), + "error context", + _mockLogger.Object); + } + catch (FoundryLocalException) + { + // Expected — now verify the logger was called + } + + _mockLogger.Verify( + l => l.Log( + LogLevel.Error, + It.IsAny(), + It.Is((v, _) => v.ToString()!.Contains("error context")), + It.IsAny(), + It.IsAny>()), + Times.Once); + + return Task.CompletedTask; + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalExceptionTests.cs b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalExceptionTests.cs new file mode 100644 index 00000000..5f084de1 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalExceptionTests.cs @@ -0,0 +1,71 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using Microsoft.Extensions.Logging; + +using Moq; + +internal sealed class FoundryLocalExceptionTests +{ + [Test] + public async Task PublicCtor_MessageOnly_SetsMessage() + { + var ex = new FoundryLocalException("test error"); + + await Assert.That(ex.Message).IsEqualTo("test error"); + await Assert.That(ex.InnerException).IsNull(); + } + + [Test] + public async Task PublicCtor_WithInnerException_PropagatesBoth() + { + var inner = new InvalidOperationException("inner"); + var ex = new FoundryLocalException("outer", inner); + + await Assert.That(ex.Message).IsEqualTo("outer"); + await Assert.That(ex.InnerException).IsSameReferenceAs(inner); + } + + [Test] + public async Task InternalCtor_WithLogger_LogsError() + { + var mockLogger = new Mock(); + var ex = new FoundryLocalException("logged error", mockLogger.Object); + + await Assert.That(ex.Message).IsEqualTo("logged error"); + + mockLogger.Verify( + l => l.Log( + LogLevel.Error, + It.IsAny(), + It.Is((v, _) => v.ToString()!.Contains("logged error")), + null, + It.IsAny>()), + Times.Once); + } + + [Test] + public async Task InternalCtor_WithInnerExceptionAndLogger_LogsWithInner() + { + var mockLogger = new Mock(); + var inner = new InvalidOperationException("inner cause"); + var ex = new FoundryLocalException("logged outer", inner, mockLogger.Object); + + await Assert.That(ex.Message).IsEqualTo("logged outer"); + await Assert.That(ex.InnerException).IsSameReferenceAs(inner); + + mockLogger.Verify( + l => l.Log( + LogLevel.Error, + It.IsAny(), + It.Is((v, _) => v.ToString()!.Contains("logged outer")), + inner, + It.IsAny>()), + Times.Once); + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs index cd7e7793..2aa29d18 100644 --- a/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs +++ b/sdk/cs/test/FoundryLocal.Tests/FoundryLocalManagerTest.cs @@ -11,6 +11,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; using Microsoft.AI.Foundry.Local; using Microsoft.AI.Foundry.Local.Detail; +[SkipUnlessIntegration] public class FoundryLocalManagerTests { [Test] diff --git a/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs b/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs index 2bc39d68..3c56fc51 100644 --- a/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/LiveAudioTranscriptionTests.cs @@ -133,6 +133,7 @@ public async Task CoreErrorResponse_TryParse_TransientError() // --- Session state guard tests --- [Test] + [SkipUnlessIntegration] public async Task AppendAsync_BeforeStart_Throws() { await using var session = new LiveAudioTranscriptionSession("test-model"); @@ -152,6 +153,7 @@ public async Task AppendAsync_BeforeStart_Throws() } [Test] + [SkipUnlessIntegration] public async Task GetTranscriptionStream_BeforeStart_Throws() { await using var session = new LiveAudioTranscriptionSession("test-model"); @@ -175,13 +177,9 @@ public async Task GetTranscriptionStream_BeforeStart_Throws() // --- E2E streaming test with synthetic PCM audio --- [Test] + [SkipUnlessIntegration] public async Task LiveStreaming_E2E_WithSyntheticPCM_ReturnsValidResponse() { - // Skip if FoundryLocalManager is not initialized (no Core DLL / no models) - if (!FoundryLocalManager.IsInitialized) - { - return; - } var manager = FoundryLocalManager.Instance; var catalog = await manager.GetCatalogAsync(); diff --git a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj index fe0dfcd2..5280da42 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj +++ b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj @@ -1,27 +1,33 @@  - net9.0 + + net462;net8.0 + net8.0 enable enable - false - true + latest false - false - - $(NETCoreSdkRuntimeIdentifier) + + + win-x64 - net9.0-windows10.0.26100.0 + net8.0-windows10.0.26100.0; 10.0.17763.0 None + $(NETCoreSdkRuntimeIdentifier) @@ -42,9 +48,9 @@ runtime; build; native; contentfiles; analyzers; buildtransitive all - - - + + + diff --git a/sdk/cs/test/FoundryLocal.Tests/OperatingSystemConverter.cs b/sdk/cs/test/FoundryLocal.Tests/OperatingSystemConverter.cs index ad81942f..b661f9b0 100644 --- a/sdk/cs/test/FoundryLocal.Tests/OperatingSystemConverter.cs +++ b/sdk/cs/test/FoundryLocal.Tests/OperatingSystemConverter.cs @@ -4,14 +4,23 @@ // // -------------------------------------------------------------------------------------------------------------------- +using System.Runtime.InteropServices; + public static class OperatingSystemConverter { public static string ToJson(string s) { - if (!OperatingSystem.IsWindows()) + if (!IsWindows()) { s = s.Replace("\r\n", "\n"); } return s; } + + private static bool IsWindows() => +#if NET5_0_OR_GREATER + OperatingSystem.IsWindows(); +#else + RuntimeInformation.IsOSPlatform(OSPlatform.Windows); +#endif } diff --git a/sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationAttribute.cs b/sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationAttribute.cs new file mode 100644 index 00000000..7125c765 --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationAttribute.cs @@ -0,0 +1,20 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using TUnit.Core; + +using System.Threading.Tasks; + +public class SkipUnlessIntegrationAttribute() + : SkipAttribute("Integration test infrastructure not available. See LOCAL_MODEL_TESTING.md for setup instructions.") +{ + public override Task ShouldSkip(TestRegisteredContext context) + { + return Task.FromResult(!Utils.IntegrationTestsAvailable); + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationTests.cs b/sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationTests.cs new file mode 100644 index 00000000..1d882bdb --- /dev/null +++ b/sdk/cs/test/FoundryLocal.Tests/SkipUnlessIntegrationTests.cs @@ -0,0 +1,34 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local.Tests; + +using System.Threading.Tasks; + +internal sealed class SkipUnlessIntegrationTests +{ + [Test] + public async Task ShouldSkip_ReturnsTrue_WhenIntegrationTestsNotAvailable() + { + // IntegrationTestsAvailable is set during assembly init. + // Without test-data-shared, it will be false and ShouldSkip returns true. + // With test-data-shared, it will be true and ShouldSkip returns false. + // Either way, it should be the inverse of IntegrationTestsAvailable. + + var attr = new SkipUnlessIntegrationAttribute(); + var shouldSkip = await attr.ShouldSkip(null!); + + await Assert.That(shouldSkip).IsEqualTo(!Utils.IntegrationTestsAvailable); + } + + [Test] + public async Task SkipReason_ContainsSetupInstructions() + { + var attr = new SkipUnlessIntegrationAttribute(); + + await Assert.That(attr.Reason).Contains("LOCAL_MODEL_TESTING.md"); + } +} diff --git a/sdk/cs/test/FoundryLocal.Tests/Utils.cs b/sdk/cs/test/FoundryLocal.Tests/Utils.cs index a289011b..f8969853 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Utils.cs +++ b/sdk/cs/test/FoundryLocal.Tests/Utils.cs @@ -9,14 +9,13 @@ namespace Microsoft.AI.Foundry.Local.Tests; using System; using System.Collections.Generic; using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; using System.Text.Json; using Microsoft.AI.Foundry.Local.Detail; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Logging; -using Microsoft.VisualStudio.TestPlatform.TestHost; - using Moq; internal static class Utils @@ -46,7 +45,7 @@ public static void AssemblyInit(AssemblyHookContext _) .SetMinimumLevel(LogLevel.Debug); }); - ILogger logger = loggerFactory.CreateLogger(); + ILogger logger = loggerFactory.CreateLogger("FoundryLocal.Tests"); // Read configuration from appsettings.Test.json logger.LogDebug("Reading configuration from appsettings.Test.json"); @@ -70,35 +69,68 @@ public static void AssemblyInit(AssemblyHookContext _) testDataSharedPath = Path.GetFullPath(Path.Combine(GetRepoRoot(), "..", testModelCacheDirName)); } - logger.LogInformation("Using test model cache directory: {testDataSharedPath}", testDataSharedPath); + logger.LogInformation("Using test model cache directory: {TestDataSharedPath}", testDataSharedPath); if (!Directory.Exists(testDataSharedPath)) { - // need to ensure there's a user visible error when running in VS. - logger.LogCritical($"Test model cache directory does not exist: {testDataSharedPath}"); - throw new DirectoryNotFoundException($"Test model cache directory does not exist: {testDataSharedPath}"); - + var message = $"WARNING: Test model cache directory does not exist: {testDataSharedPath}\n" + + "Integration tests will be skipped. See LOCAL_MODEL_TESTING.md for setup instructions."; + logger.LogWarning( + "Test model cache directory does not exist: {Path}. " + + "Integration tests will be skipped. See LOCAL_MODEL_TESTING.md for setup instructions.", + testDataSharedPath); + Console.Error.WriteLine(message); + IntegrationTestsAvailable = false; + return; } - var config = new Configuration + try { - AppName = "FoundryLocalSdkTest", - LogLevel = Local.LogLevel.Debug, - Web = new Configuration.WebService + var config = new Configuration { - Urls = "http://127.0.0.1:0" - }, - ModelCacheDir = testDataSharedPath, - LogsDir = Path.Combine(GetRepoRoot(), "sdk", "cs", "logs") - }; + AppName = "FoundryLocalSdkTest", + LogLevel = Local.LogLevel.Debug, + Web = new Configuration.WebService + { + Urls = "http://127.0.0.1:0" + }, + ModelCacheDir = testDataSharedPath, + LogsDir = Path.Combine(GetRepoRoot(), "sdk", "cs", "logs") + }; - // Initialize the singleton instance. - FoundryLocalManager.CreateAsync(config, logger).GetAwaiter().GetResult(); + // Initialize the singleton instance. + FoundryLocalManager.CreateAsync(config, logger).GetAwaiter().GetResult(); - // standalone instance for testing individual components that skips the 'initialize' command - CoreInterop = new CoreInterop(logger); + // standalone instance for testing individual components that skips the 'initialize' command + CoreInterop = new CoreInterop(logger); + IntegrationTestsAvailable = true; + } + catch (Exception ex) + { + logger.LogWarning( + ex, + "Failed to initialize integration test infrastructure. " + + "Integration tests will be skipped. See LOCAL_MODEL_TESTING.md for setup instructions."); + Console.Error.WriteLine( + "WARNING: Failed to initialize integration test infrastructure. " + + "Integration tests will be skipped. See LOCAL_MODEL_TESTING.md for setup instructions.\n" + + ex.Message); + IntegrationTestsAvailable = false; + } } + internal static bool IntegrationTestsAvailable { get; private set; } + +#if NET5_0_OR_GREATER + internal static readonly bool IsWindows = OperatingSystem.IsWindows(); + internal static readonly bool IsLinux = OperatingSystem.IsLinux(); + internal static readonly bool IsMacOS = OperatingSystem.IsMacOS(); +#else + internal static readonly bool IsWindows = RuntimeInformation.IsOSPlatform(OSPlatform.Windows); + internal static readonly bool IsLinux = RuntimeInformation.IsOSPlatform(OSPlatform.Linux); + internal static readonly bool IsMacOS = RuntimeInformation.IsOSPlatform(OSPlatform.OSX); +#endif + internal static ICoreInterop CoreInterop { get; private set; } = default!; internal static Mock CreateCapturingLoggerMock(List sink) From 0579d4293a7d23de2fd74393cd7a1a4a0ca2377c Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Fri, 24 Apr 2026 15:05:34 -0700 Subject: [PATCH 57/83] fixes netstandard2.0 bug in embeddings request response types (#675) ``` E:\_work\1\s\sdk\cs\src\OpenAI\EmbeddingRequestResponseTypes.cs(60,16): error CS8604: Possible null reference argument for parameter 'responseData' in 'EmbeddingCreateResponse EmbeddingRequestResponseExtensions.ToEmbeddingResponse(string responseData, ILogger logger)'. [E:\_work\1\s\sdk\cs\src\Microsoft.AI.Foundry.Local.csproj::TargetFramework=netstandard2.0] ``` The null check string.IsNullOrWhiteSpace doesn't narrow the nullable type under netstandard2.0's annotations. Needs to be response.Data! to avoid error but we know it's not null or whitespace due to the check above. Co-authored-by: Prathik Rao --- sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs | 2 +- sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs b/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs index 0939fbaa..b8db7543 100644 --- a/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs +++ b/sdk/cs/src/OpenAI/EmbeddingRequestResponseTypes.cs @@ -57,7 +57,7 @@ internal static EmbeddingCreateResponse ToEmbeddingResponse(this ICoreInterop.Re throw new FoundryLocalException("Embeddings command returned null or empty response data"); } - return response.Data.ToEmbeddingResponse(logger); + return response.Data!.ToEmbeddingResponse(logger); } internal static EmbeddingCreateResponse ToEmbeddingResponse(this string responseData, ILogger logger) diff --git a/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs b/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs index a5123cb0..bed3a8ea 100644 --- a/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/EmbeddingClientTests.cs @@ -8,6 +8,7 @@ namespace Microsoft.AI.Foundry.Local.Tests; using System.Threading.Tasks; +[SkipUnlessIntegration] internal sealed class EmbeddingClientTests { private static IModel? model; From c641118e5048e28ac1a8c133fb3a8d0bead48fc1 Mon Sep 17 00:00:00 2001 From: "Nat Kershaw (MSFT)" Date: Fri, 24 Apr 2026 16:45:47 -0700 Subject: [PATCH 58/83] Add sections for SDK and CLI (#667) Clarify license terms for CLI and SDK --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- LICENSE | 35 +++++++++++++++++++++++++++++++---- README.md | 5 ++++- licenses/deepseek.md | 11 ----------- licenses/mistral.md | 1 - licenses/phi.md | 9 --------- 5 files changed, 35 insertions(+), 26 deletions(-) delete mode 100644 licenses/deepseek.md delete mode 100644 licenses/mistral.md delete mode 100644 licenses/phi.md diff --git a/LICENSE b/LICENSE index 9d947464..2bd4b6fe 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,34 @@ -MICROSOFT SOFTWARE LICENSE TERMS - -FOUNDRY LOCAL - +FOUNDRY LOCAL LICENSE TERMS +=========================== + +FOUNDRY LOCAL SDK +----------------- + +MIT License + +Copyright (c) Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +FOUNDRY LOCAL CLI +----------------- + These license terms are an agreement between you and Microsoft Corporation (or based on where you live, one of its affiliates). They apply to the software named above. The terms also apply to any Microsoft services or updates for the software, except to the extent those have additional terms. IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. diff --git a/README.md b/README.md index 881bb656..ae720698 100644 --- a/README.md +++ b/README.md @@ -233,4 +233,7 @@ Foundry Local supports Windows, macOS (Apple silicon), and Linux. ## ⚖️ License -Foundry Local is licensed under the Microsoft Software License Terms. For more details, read the [LICENSE](LICENSE) file. +Foundry Local SDK is licensed under the MIT license. For more details, see the [LICENSE](LICENSE) file. +Foundry Local CLI is licensed under the Microsoft Software License Terms. For more details, read the [LICENSE](LICENSE) file. + +Individual models made available for use with Foundry Local are subject to the each model's license terms, notices, and use restrictions. Refer to the model's documentation or download/listing page for the applicable terms before using or redistributing a model. diff --git a/licenses/deepseek.md b/licenses/deepseek.md deleted file mode 100644 index ad5aba62..00000000 --- a/licenses/deepseek.md +++ /dev/null @@ -1,11 +0,0 @@ -The DeepSeek R1 model is provided as a First Party Consumption Service and is not an Azure product. Your use of the DeepSeek R1 model is subject to the following license terms and must comply with the Acceptable Use Policy for Microsoft Online Services and the Microsoft Enterprise AI Services Code of Conduct. - -Copyright (c) 2023 DeepSeek - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/licenses/mistral.md b/licenses/mistral.md deleted file mode 100644 index 619ef4ec..00000000 --- a/licenses/mistral.md +++ /dev/null @@ -1 +0,0 @@ -This model is provided under the License Terms available at https://mistral.ai/terms-of-use/. \ No newline at end of file diff --git a/licenses/phi.md b/licenses/phi.md deleted file mode 100644 index 829ab086..00000000 --- a/licenses/phi.md +++ /dev/null @@ -1,9 +0,0 @@ -Microsoft. Copyright (c) Microsoft Corporation. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file From 984892ecefaab069f0af9a78fce1f6aa4e463f95 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Fri, 24 Apr 2026 18:48:30 -0700 Subject: [PATCH 59/83] Add Nemotron-ASR streaming inference to C++ SDK (#655) ## Add Nemotron-ASR streaming inference to Python SDK ### Description Adds real-time audio streaming support to the Foundry Local C++ SDK, enabling live microphone-to-text transcription via ONNX Runtime GenAI's StreamingProcessor API (Nemotron ASR). This is the C++ port of C# PR https://github.com/microsoft/Foundry-Local/pull/485 with full feature parity. The existing AudioClient only supports file-based transcription. This PR introduces LiveAudioTranscriptionSession that accepts continuous PCM audio chunks (e.g., from a microphone) and returns partial/final transcription results as a synchronous generator. --------- Co-authored-by: ruiren_microsoft Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/cpp/CMakeLists.txt | 3 + sdk/cpp/include/foundry_local.h | 2 + sdk/cpp/include/openai/openai_audio_client.h | 6 + .../include/openai/openai_live_audio_client.h | 101 ++++++ .../include/openai/openai_live_audio_types.h | 50 +++ sdk/cpp/src/core.h | 50 ++- sdk/cpp/src/flcore_native.h | 11 + sdk/cpp/src/foundry_local_internal_core.h | 5 + sdk/cpp/src/openai_audio_client.cpp | 6 + sdk/cpp/src/openai_live_audio_client.cpp | 276 +++++++++++++++ sdk/cpp/src/openai_live_audio_types.cpp | 83 +++++ sdk/cpp/src/thread_safe_queue.h | 143 ++++++++ sdk/cpp/test/live_audio_test.cpp | 334 ++++++++++++++++++ sdk/cpp/test/mock_core.h | 13 + 14 files changed, 1082 insertions(+), 1 deletion(-) create mode 100644 sdk/cpp/include/openai/openai_live_audio_client.h create mode 100644 sdk/cpp/include/openai/openai_live_audio_types.h create mode 100644 sdk/cpp/src/openai_live_audio_client.cpp create mode 100644 sdk/cpp/src/openai_live_audio_types.cpp create mode 100644 sdk/cpp/src/thread_safe_queue.h create mode 100644 sdk/cpp/test/live_audio_test.cpp diff --git a/sdk/cpp/CMakeLists.txt b/sdk/cpp/CMakeLists.txt index 7e32b7fb..41f12c27 100644 --- a/sdk/cpp/CMakeLists.txt +++ b/sdk/cpp/CMakeLists.txt @@ -54,6 +54,8 @@ add_library(CppSdk STATIC src/catalog.cpp src/openai_chat_client.cpp src/openai_audio_client.cpp + src/openai_live_audio_types.cpp + src/openai_live_audio_client.cpp src/foundry_local_manager.cpp ) @@ -91,6 +93,7 @@ if (BUILD_TESTING) test/model_variant_test.cpp test/catalog_test.cpp test/client_test.cpp + test/live_audio_test.cpp ) target_include_directories(CppSdkTests diff --git a/sdk/cpp/include/foundry_local.h b/sdk/cpp/include/foundry_local.h index c16337e1..01b8b98d 100644 --- a/sdk/cpp/include/foundry_local.h +++ b/sdk/cpp/include/foundry_local.h @@ -16,3 +16,5 @@ #include "openai/openai_tool_types.h" #include "openai/openai_chat_client.h" #include "openai/openai_audio_client.h" +#include "openai/openai_live_audio_types.h" +#include "openai/openai_live_audio_client.h" diff --git a/sdk/cpp/include/openai/openai_audio_client.h b/sdk/cpp/include/openai/openai_audio_client.h index ac1ce719..c58fad1c 100644 --- a/sdk/cpp/include/openai/openai_audio_client.h +++ b/sdk/cpp/include/openai/openai_audio_client.h @@ -7,6 +7,7 @@ #include #include #include +#include #include @@ -22,6 +23,8 @@ namespace foundry_local { std::string text; }; + class LiveAudioTranscriptionSession; + class OpenAIAudioClient final { public: explicit OpenAIAudioClient(const IModel& model); @@ -34,6 +37,9 @@ namespace foundry_local { using StreamCallback = std::function; void TranscribeAudioStreaming(const std::filesystem::path& audioFilePath, const StreamCallback& onChunk) const; + /// Create a new live audio transcription session for streaming PCM audio. + std::unique_ptr CreateLiveTranscriptionSession() const; + private: OpenAIAudioClient(gsl::not_null core, std::string_view modelId, gsl::not_null logger); diff --git a/sdk/cpp/include/openai/openai_live_audio_client.h b/sdk/cpp/include/openai/openai_live_audio_client.h new file mode 100644 index 00000000..c65a2d63 --- /dev/null +++ b/sdk/cpp/include/openai/openai_live_audio_client.h @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "openai_live_audio_types.h" + +namespace foundry_local::Internal { + struct IFoundryLocalCore; + template class ThreadSafeQueue; +} // namespace foundry_local::Internal + +namespace foundry_local { + class ILogger; + + class LiveAudioTranscriptionSession final { + public: + LiveAudioTranscriptionSession(gsl::not_null core, + std::string modelId, + gsl::not_null logger); + ~LiveAudioTranscriptionSession() noexcept; + + // Non-copyable, non-movable + LiveAudioTranscriptionSession(const LiveAudioTranscriptionSession&) = delete; + LiveAudioTranscriptionSession& operator=(const LiveAudioTranscriptionSession&) = delete; + LiveAudioTranscriptionSession(LiveAudioTranscriptionSession&&) = delete; + LiveAudioTranscriptionSession& operator=(LiveAudioTranscriptionSession&&) = delete; + + /// Mutable settings reference; only effective before Start(). + LiveAudioTranscriptionOptions& Settings() { return settings_; } + /// Read-only settings reference. + const LiveAudioTranscriptionOptions& Settings() const { return settings_; } + /// Settings that were active when Start() was called. + const LiveAudioTranscriptionOptions& ActiveSettings() const { return activeSettings_; } + + /// Begin the streaming session. Must be called before Append/TryAppend. + void Start(); + + /// Enqueue PCM audio data. Blocks if the push queue is full. + void Append(const uint8_t* pcmData, size_t length); + + /// Try to get the next transcription result within the given timeout. + TranscriptionStatus TryGetNext(LiveAudioTranscriptionResponse& result, + std::chrono::milliseconds timeout = std::chrono::seconds(5)); + + /// Signal the end of audio input and stop the session. + void Stop(); + + /// Returns the error message if the session is in an error state. + std::string GetErrorMessage() const; + + /// Returns true if the session has been started. + bool IsStarted() const; + + /// Returns true if the session has been stopped. + bool IsStopped() const; + + private: + enum class SessionState { + Created, + Starting, + Started, + Stopped + }; + + void PushWorkerLoop(); + void StopInternal(std::unique_lock& lock); + + gsl::not_null core_; + std::string modelId_; + gsl::not_null logger_; + + LiveAudioTranscriptionOptions settings_; + LiveAudioTranscriptionOptions activeSettings_; + + mutable std::mutex mutex_; + SessionState state_ = SessionState::Created; + std::string sessionHandle_; + + using AudioChunk = std::vector; + std::unique_ptr> pushQueue_; + std::unique_ptr> resultQueue_; + + std::thread pushThread_; + std::string errorMessage_; + LiveAudioTranscriptionResponse finalResult_; + bool hasFinalResult_ = false; + }; + +} // namespace foundry_local diff --git a/sdk/cpp/include/openai/openai_live_audio_types.h b/sdk/cpp/include/openai/openai_live_audio_types.h new file mode 100644 index 00000000..d7d31f12 --- /dev/null +++ b/sdk/cpp/include/openai/openai_live_audio_types.h @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include + +namespace foundry_local { + + struct ContentPart { + std::string text; + std::string transcript; + }; + + struct LiveAudioTranscriptionResponse { + std::string text; + bool is_final = false; + std::optional start_time; + std::optional end_time; + std::vector content; + + static LiveAudioTranscriptionResponse FromJson(const std::string& json); + }; + + struct LiveAudioTranscriptionOptions { + int sample_rate = 16000; + int channels = 1; + int bits_per_sample = 16; + std::optional language; + int push_queue_capacity = 100; + }; + + struct CoreErrorResponse { + std::string code; + std::string message; + bool is_transient = false; + + static std::optional TryParse(const std::string& error_string); + }; + + enum class TranscriptionStatus { + Result, + Timeout, + Closed, + Error + }; + +} // namespace foundry_local diff --git a/sdk/cpp/src/core.h b/sdk/cpp/src/core.h index 10feee5b..cc37ce9e 100644 --- a/sdk/cpp/src/core.h +++ b/sdk/cpp/src/core.h @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // -// Core DLL interop loads Microsoft.AI.Foundry.Local.Core.dll at runtime. +// Core DLL interop � loads Microsoft.AI.Foundry.Local.Core.dll at runtime. // Internal header, not part of the public API. #pragma once @@ -46,6 +46,7 @@ namespace foundry_local { module_.reset(); execCmd_ = nullptr; execCbCmd_ = nullptr; + execBinaryCmd_ = nullptr; freeResCmd_ = nullptr; } @@ -91,10 +92,55 @@ namespace foundry_local { return result; } + CoreResponse callWithBinary(std::string_view command, ILogger& logger, + const std::string* dataArgument, + const uint8_t* binaryData, size_t binaryDataLength) const override { + if (!module_ || !execBinaryCmd_ || !freeResCmd_) { + throw Exception("Core is not loaded. Cannot call command: " + std::string(command), logger); + } + + StreamingRequestBuffer request{}; + request.Command = command.empty() ? nullptr : command.data(); + request.CommandLength = static_cast(command.size()); + + if (dataArgument && !dataArgument->empty()) { + request.Data = dataArgument->data(); + request.DataLength = static_cast(dataArgument->size()); + } + + if (binaryData && binaryDataLength > 0) { + if (binaryDataLength > static_cast(INT32_MAX)) { + throw Exception("Binary data length exceeds maximum supported size (INT32_MAX).", logger); + } + request.BinaryData = binaryData; + request.BinaryDataLength = static_cast(binaryDataLength); + } + + ResponseBuffer response{}; + auto safeDeleter = [fn = freeResCmd_](ResponseBuffer* buf) { + if (fn) + fn(buf); + }; + std::unique_ptr responseGuard(&response, safeDeleter); + + execBinaryCmd_(&request, &response); + + CoreResponse result; + if (response.Error && response.ErrorLength > 0) { + result.error.assign(static_cast(response.Error), response.ErrorLength); + return result; + } + if (response.Data && response.DataLength > 0) { + result.data.assign(static_cast(response.Data), response.DataLength); + } + return result; + } + private: wil::unique_hmodule module_; execute_command_fn execCmd_{}; execute_command_with_callback_fn execCbCmd_{}; + execute_command_with_binary_fn execBinaryCmd_{}; free_response_fn freeResCmd_{}; void LoadFromPath(const std::filesystem::path& path) { @@ -105,6 +151,8 @@ namespace foundry_local { execCmd_ = reinterpret_cast(RequireProc(m.get(), "execute_command")); execCbCmd_ = reinterpret_cast( RequireProc(m.get(), "execute_command_with_callback")); + execBinaryCmd_ = reinterpret_cast( + RequireProc(m.get(), "execute_command_with_binary")); freeResCmd_ = reinterpret_cast(RequireProc(m.get(), "free_response")); module_ = std::move(m); diff --git a/sdk/cpp/src/flcore_native.h b/sdk/cpp/src/flcore_native.h index b0778116..d87baa09 100644 --- a/sdk/cpp/src/flcore_native.h +++ b/sdk/cpp/src/flcore_native.h @@ -26,14 +26,25 @@ extern "C" // Callback signature: void(*)(void* data, int length, void* userData) using UserCallbackFn = void(__cdecl*)(void*, int32_t, void*); + struct StreamingRequestBuffer { + const void* Command; + int32_t CommandLength; + const void* Data; + int32_t DataLength; + const void* BinaryData; + int32_t BinaryDataLength; + }; + // Exported function pointer types using execute_command_fn = void(__cdecl*)(RequestBuffer*, ResponseBuffer*); using execute_command_with_callback_fn = void(__cdecl*)(RequestBuffer*, ResponseBuffer*, void* /*callback*/, void* /*userData*/); + using execute_command_with_binary_fn = void(__cdecl*)(StreamingRequestBuffer*, ResponseBuffer*); using free_response_fn = void(__cdecl*)(ResponseBuffer*); static_assert(std::is_standard_layout::value, "RequestBuffer must be standard layout"); static_assert(std::is_standard_layout::value, "ResponseBuffer must be standard layout"); + static_assert(std::is_standard_layout::value, "StreamingRequestBuffer must be standard layout"); #pragma pack(pop) } diff --git a/sdk/cpp/src/foundry_local_internal_core.h b/sdk/cpp/src/foundry_local_internal_core.h index 1e5af79d..f6c2af77 100644 --- a/sdk/cpp/src/foundry_local_internal_core.h +++ b/sdk/cpp/src/foundry_local_internal_core.h @@ -31,6 +31,11 @@ namespace foundry_local { virtual CoreResponse call(std::string_view command, ILogger& logger, const std::string* dataArgument = nullptr, NativeCallbackFn callback = nullptr, void* data = nullptr) const = 0; + + virtual CoreResponse callWithBinary(std::string_view command, ILogger& logger, + const std::string* dataArgument, + const uint8_t* binaryData, size_t binaryDataLength) const = 0; + virtual void unload() = 0; }; diff --git a/sdk/cpp/src/openai_audio_client.cpp b/sdk/cpp/src/openai_audio_client.cpp index d4409d1f..42b1c6a6 100644 --- a/sdk/cpp/src/openai_audio_client.cpp +++ b/sdk/cpp/src/openai_audio_client.cpp @@ -16,6 +16,8 @@ #include "core_helpers.h" #include "logger.h" +#include "openai/openai_live_audio_client.h" + namespace foundry_local { OpenAIAudioClient::OpenAIAudioClient(gsl::not_null core, std::string_view modelId, @@ -67,4 +69,8 @@ namespace foundry_local { } } + std::unique_ptr OpenAIAudioClient::CreateLiveTranscriptionSession() const { + return std::make_unique(core_, modelId_, logger_); + } + } // namespace foundry_local diff --git a/sdk/cpp/src/openai_live_audio_client.cpp b/sdk/cpp/src/openai_live_audio_client.cpp new file mode 100644 index 00000000..51ea8be6 --- /dev/null +++ b/sdk/cpp/src/openai_live_audio_client.cpp @@ -0,0 +1,276 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include +#include +#include +#include +#include + +#include + +#include "openai/openai_live_audio_client.h" +#include "openai/openai_live_audio_types.h" +#include "foundry_local_internal_core.h" +#include "foundry_local_exception.h" +#include "core_interop_request.h" +#include "thread_safe_queue.h" +#include "logger.h" + +namespace foundry_local { + + LiveAudioTranscriptionSession::LiveAudioTranscriptionSession( + gsl::not_null core, + std::string modelId, + gsl::not_null logger) + : core_(core), modelId_(std::move(modelId)), logger_(logger) {} + + LiveAudioTranscriptionSession::~LiveAudioTranscriptionSession() noexcept { + try { + std::unique_lock lock(mutex_); + if (state_ == SessionState::Started) { + StopInternal(lock); + } + } + catch (...) { + // Suppress exceptions in destructor + } + } + + void LiveAudioTranscriptionSession::Start() { + std::unique_lock lock(mutex_); + if (state_ != SessionState::Created) { + throw Exception("Session has already been started.", *logger_); + } + + // Transition to Starting state before releasing lock for FFI call + state_ = SessionState::Starting; + activeSettings_ = settings_; + + // Validate queue capacity early + if (activeSettings_.push_queue_capacity <= 0) { + state_ = SessionState::Created; + throw Exception("push_queue_capacity must be greater than 0.", *logger_); + } + + // Build the start command + CoreInteropRequest req("audio_stream_start"); + req.AddParam("Model", modelId_); + req.AddParam("SampleRate", std::to_string(activeSettings_.sample_rate)); + req.AddParam("Channels", std::to_string(activeSettings_.channels)); + req.AddParam("BitsPerSample", std::to_string(activeSettings_.bits_per_sample)); + if (activeSettings_.language.has_value()) { + req.AddParam("Language", activeSettings_.language.value()); + } + std::string json = req.ToJson(); + + // Release lock during FFI call to avoid holding mutex across boundary + lock.unlock(); + + auto response = core_->call(req.Command(), *logger_, &json); + + lock.lock(); + + if (response.HasError()) { + state_ = SessionState::Created; + throw Exception("Failed to start audio stream: " + response.error, *logger_); + } + + sessionHandle_ = std::move(response.data); + if (sessionHandle_.empty()) { + state_ = SessionState::Created; + throw Exception("audio_stream_start returned an empty session handle.", *logger_); + } + + // Validate queue capacity + const size_t queueCapacity = static_cast(activeSettings_.push_queue_capacity); + + // Create the queues + pushQueue_ = std::make_unique>(queueCapacity); + resultQueue_ = std::make_unique>(queueCapacity); + + state_ = SessionState::Started; + + // Start the push worker thread + pushThread_ = std::thread([this] { PushWorkerLoop(); }); + } + + void LiveAudioTranscriptionSession::Append(const uint8_t* pcmData, size_t length) { + { + std::lock_guard lock(mutex_); + if (state_ != SessionState::Started) { + throw Exception( + state_ == SessionState::Stopped + ? "Session has already been stopped." + : "Session is not started. Call Start() first.", + *logger_); + } + } + + AudioChunk chunk(pcmData, pcmData + length); + if (!pushQueue_->Push(std::move(chunk))) { + throw Exception("Failed to enqueue audio data: session is closed.", *logger_); + } + } + + TranscriptionStatus LiveAudioTranscriptionSession::TryGetNext(LiveAudioTranscriptionResponse& result, + std::chrono::milliseconds timeout) { + { + std::lock_guard lock(mutex_); + if (state_ != SessionState::Started && state_ != SessionState::Stopped) { + throw Exception("Session is not started. Call Start() first.", *logger_); + } + } + + auto status = resultQueue_->TryPop(result, timeout); + switch (status) { + case Internal::DequeueStatus::Item: + return TranscriptionStatus::Result; + case Internal::DequeueStatus::Timeout: + return TranscriptionStatus::Timeout; + case Internal::DequeueStatus::Closed: { + // Return the final result from Stop() if available + std::lock_guard lock(mutex_); + if (hasFinalResult_) { + result = std::move(finalResult_); + hasFinalResult_ = false; + return TranscriptionStatus::Result; + } + return TranscriptionStatus::Closed; + } + case Internal::DequeueStatus::Error: + return TranscriptionStatus::Error; + default: + return TranscriptionStatus::Error; + } + } + + void LiveAudioTranscriptionSession::Stop() { + std::unique_lock lock(mutex_); + if (state_ != SessionState::Started) { + return; + } + StopInternal(lock); + } + + void LiveAudioTranscriptionSession::StopInternal(std::unique_lock& lock) { + state_ = SessionState::Stopped; + std::string handle = sessionHandle_; + + // Close the push queue to signal the worker thread to finish + if (pushQueue_) { + pushQueue_->Close(); + } + + // Close the result queue to unblock any blocked Push() in the worker thread, + // preventing a deadlock when joining below. + if (resultQueue_) { + resultQueue_->Close(); + } + + lock.unlock(); + + // Wait for the push thread to finish (safe now — worker is unblocked) + if (pushThread_.joinable()) { + pushThread_.join(); + } + + // Send stop command to core + CoreInteropRequest req("audio_stream_stop"); + req.AddParam("SessionHandle", handle); + std::string json = req.ToJson(); + + auto response = core_->call(req.Command(), *logger_, &json); + + // Store the final result or error for retrieval via TryGetNext + if (response.HasError()) { + if (resultQueue_) { + resultQueue_->CloseWithError("audio_stream_stop failed: " + response.error); + } + } + else if (!response.data.empty()) { + try { + finalResult_ = LiveAudioTranscriptionResponse::FromJson(response.data); + hasFinalResult_ = true; + } + catch (const std::exception& e) { + logger_->Log(LogLevel::Warning, + std::string("Failed to parse final transcription response: ") + e.what()); + } + } + + lock.lock(); + } + + void LiveAudioTranscriptionSession::PushWorkerLoop() { + AudioChunk chunk; + while (true) { + auto status = pushQueue_->Pop(chunk); + if (status != Internal::DequeueStatus::Item) { + break; + } + + std::string handle; + { + std::lock_guard lock(mutex_); + handle = sessionHandle_; + } + + CoreInteropRequest req("audio_stream_push"); + req.AddParam("SessionHandle", handle); + std::string json = req.ToJson(); + + auto response = core_->callWithBinary(req.Command(), *logger_, &json, + chunk.data(), chunk.size()); + + if (response.HasError()) { + auto coreError = CoreErrorResponse::TryParse(response.error); + std::string msg = + (coreError.has_value() && !coreError->message.empty()) + ? coreError->message + : response.error; + + logger_->Log(LogLevel::Error, "audio_stream_push failed: " + msg); + pushQueue_->Close(); + resultQueue_->CloseWithError(msg); + + std::lock_guard lock(mutex_); + errorMessage_ = std::move(msg); + return; + } + + // Parse the response as a transcription result if there is data + if (!response.data.empty()) { + try { + auto result = LiveAudioTranscriptionResponse::FromJson(response.data); + if (!resultQueue_->TryPush(std::move(result))) { + logger_->Log( + LogLevel::Warning, + "Dropping transcription result because the result queue is full."); + } + } + catch (const std::exception& e) { + logger_->Log(LogLevel::Warning, + std::string("Failed to parse transcription response: ") + e.what()); + } + } + } + } + + std::string LiveAudioTranscriptionSession::GetErrorMessage() const { + std::lock_guard lock(mutex_); + return errorMessage_; + } + + bool LiveAudioTranscriptionSession::IsStarted() const { + std::lock_guard lock(mutex_); + return state_ == SessionState::Started; + } + + bool LiveAudioTranscriptionSession::IsStopped() const { + std::lock_guard lock(mutex_); + return state_ == SessionState::Stopped; + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/openai_live_audio_types.cpp b/sdk/cpp/src/openai_live_audio_types.cpp new file mode 100644 index 00000000..f781a992 --- /dev/null +++ b/sdk/cpp/src/openai_live_audio_types.cpp @@ -0,0 +1,83 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include +#include + +#include + +#include "openai/openai_live_audio_types.h" + +namespace foundry_local { + + LiveAudioTranscriptionResponse LiveAudioTranscriptionResponse::FromJson(const std::string& json) { + auto j = nlohmann::json::parse(json); + LiveAudioTranscriptionResponse response; + + if (j.contains("text") && j["text"].is_string()) { + response.text = j["text"].get(); + } + + if (j.contains("is_final") && j["is_final"].is_boolean()) { + response.is_final = j["is_final"].get(); + } + else if (j.contains("isFinal") && j["isFinal"].is_boolean()) { + response.is_final = j["isFinal"].get(); + } + + if (j.contains("start_time") && j["start_time"].is_number()) { + response.start_time = j["start_time"].get(); + } + else if (j.contains("startTime") && j["startTime"].is_number()) { + response.start_time = j["startTime"].get(); + } + + if (j.contains("end_time") && j["end_time"].is_number()) { + response.end_time = j["end_time"].get(); + } + else if (j.contains("endTime") && j["endTime"].is_number()) { + response.end_time = j["endTime"].get(); + } + + if (j.contains("content") && j["content"].is_array()) { + for (const auto& item : j["content"]) { + ContentPart part; + if (item.contains("text") && item["text"].is_string()) { + part.text = item["text"].get(); + } + if (item.contains("transcript") && item["transcript"].is_string()) { + part.transcript = item["transcript"].get(); + } + response.content.push_back(std::move(part)); + } + } + + return response; + } + + std::optional CoreErrorResponse::TryParse(const std::string& error_string) { + try { + auto j = nlohmann::json::parse(error_string); + CoreErrorResponse response; + + if (j.contains("code") && j["code"].is_string()) { + response.code = j["code"].get(); + } + if (j.contains("message") && j["message"].is_string()) { + response.message = j["message"].get(); + } + if (j.contains("is_transient") && j["is_transient"].is_boolean()) { + response.is_transient = j["is_transient"].get(); + } + else if (j.contains("isTransient") && j["isTransient"].is_boolean()) { + response.is_transient = j["isTransient"].get(); + } + + return response; + } + catch (const nlohmann::json::exception&) { + return std::nullopt; + } + } + +} // namespace foundry_local diff --git a/sdk/cpp/src/thread_safe_queue.h b/sdk/cpp/src/thread_safe_queue.h new file mode 100644 index 00000000..c6ea7446 --- /dev/null +++ b/sdk/cpp/src/thread_safe_queue.h @@ -0,0 +1,143 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace foundry_local::Internal { + + enum class DequeueStatus { + Item, + Timeout, + Closed, + Error + }; + + /// A bounded, thread-safe queue with graceful close/error semantics. + template class ThreadSafeQueue final { + public: + explicit ThreadSafeQueue(size_t capacity) : capacity_(capacity) {} + + /// Blocking push. Waits until space is available or the queue is closed. + /// Returns true if the item was enqueued, false if the queue was closed. + bool Push(T item) { + std::unique_lock lock(mutex_); + notFull_.wait(lock, [this] { return queue_.size() < capacity_ || closed_; }); + if (closed_) { + return false; + } + queue_.push(std::move(item)); + notEmpty_.notify_one(); + return true; + } + + /// Non-blocking push. Returns true if the item was enqueued. + bool TryPush(T item) { + std::lock_guard lock(mutex_); + if (closed_ || queue_.size() >= capacity_) { + return false; + } + queue_.push(std::move(item)); + notEmpty_.notify_one(); + return true; + } + + /// Timed push. Returns true if the item was enqueued within the timeout. + bool TryPushFor(T item, std::chrono::milliseconds timeout) { + std::unique_lock lock(mutex_); + if (!notFull_.wait_for(lock, timeout, [this] { return queue_.size() < capacity_ || closed_; })) { + return false; + } + if (closed_) { + return false; + } + queue_.push(std::move(item)); + notEmpty_.notify_one(); + return true; + } + + /// Blocking pop. Waits until an item is available or the queue is closed/errored. + DequeueStatus Pop(T& item) { + std::unique_lock lock(mutex_); + notEmpty_.wait(lock, [this] { return !queue_.empty() || closed_ || hasError_; }); + if (hasError_ && queue_.empty()) { + return DequeueStatus::Error; + } + if (queue_.empty()) { + return DequeueStatus::Closed; + } + item = std::move(queue_.front()); + queue_.pop(); + notFull_.notify_one(); + return DequeueStatus::Item; + } + + /// Timed pop. Returns the dequeue status. + DequeueStatus TryPop(T& item, std::chrono::milliseconds timeout) { + std::unique_lock lock(mutex_); + if (!notEmpty_.wait_for(lock, timeout, [this] { return !queue_.empty() || closed_ || hasError_; })) { + return DequeueStatus::Timeout; + } + if (hasError_ && queue_.empty()) { + return DequeueStatus::Error; + } + if (queue_.empty()) { + return DequeueStatus::Closed; + } + item = std::move(queue_.front()); + queue_.pop(); + notFull_.notify_one(); + return DequeueStatus::Item; + } + + /// Close the queue gracefully. No more items can be pushed. + void Close() { + std::lock_guard lock(mutex_); + closed_ = true; + notEmpty_.notify_all(); + notFull_.notify_all(); + } + + /// Close the queue with an error message. + void CloseWithError(std::string errorMessage) { + std::lock_guard lock(mutex_); + closed_ = true; + hasError_ = true; + errorMessage_ = std::move(errorMessage); + notEmpty_.notify_all(); + notFull_.notify_all(); + } + + bool IsClosed() const { + std::lock_guard lock(mutex_); + return closed_; + } + + bool HasError() const { + std::lock_guard lock(mutex_); + return hasError_; + } + + std::string GetErrorMessage() const { + std::lock_guard lock(mutex_); + return errorMessage_; + } + + private: + const size_t capacity_; + std::queue queue_; + mutable std::mutex mutex_; + std::condition_variable notEmpty_; + std::condition_variable notFull_; + bool closed_ = false; + bool hasError_ = false; + std::string errorMessage_; + }; + +} // namespace foundry_local::Internal diff --git a/sdk/cpp/test/live_audio_test.cpp b/sdk/cpp/test/live_audio_test.cpp new file mode 100644 index 00000000..c6fc10b4 --- /dev/null +++ b/sdk/cpp/test/live_audio_test.cpp @@ -0,0 +1,334 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#include + +#include "mock_core.h" +#include "mock_object_factory.h" +#include "foundry_local_exception.h" + +#include "openai/openai_live_audio_types.h" +#include "openai/openai_live_audio_client.h" + +#include + +#include +#include +#include +#include + +using namespace foundry_local; +using namespace foundry_local::Testing; + +// --------------------------------------------------------------------------- +// LiveAudioTranscriptionResponse parsing tests +// --------------------------------------------------------------------------- + +TEST(LiveAudioTypesTest, FromJson_BasicResponse) { + nlohmann::json j = { + {"text", "hello world"}, + {"is_final", true}, + {"start_time", 0.5}, + {"end_time", 1.5}}; + + auto resp = LiveAudioTranscriptionResponse::FromJson(j.dump()); + EXPECT_EQ("hello world", resp.text); + EXPECT_TRUE(resp.is_final); + ASSERT_TRUE(resp.start_time.has_value()); + EXPECT_DOUBLE_EQ(0.5, resp.start_time.value()); + ASSERT_TRUE(resp.end_time.has_value()); + EXPECT_DOUBLE_EQ(1.5, resp.end_time.value()); +} + +TEST(LiveAudioTypesTest, FromJson_CamelCaseFields) { + nlohmann::json j = { + {"text", "test"}, + {"isFinal", false}, + {"startTime", 1.0}, + {"endTime", 2.0}}; + + auto resp = LiveAudioTranscriptionResponse::FromJson(j.dump()); + EXPECT_EQ("test", resp.text); + EXPECT_FALSE(resp.is_final); + ASSERT_TRUE(resp.start_time.has_value()); + EXPECT_DOUBLE_EQ(1.0, resp.start_time.value()); +} + +TEST(LiveAudioTypesTest, FromJson_WithContent) { + nlohmann::json j = { + {"text", "hello"}, + {"is_final", true}, + {"content", {{{"text", "hi"}, {"transcript", "hi there"}}}}}; + + auto resp = LiveAudioTranscriptionResponse::FromJson(j.dump()); + ASSERT_EQ(1u, resp.content.size()); + EXPECT_EQ("hi", resp.content[0].text); + EXPECT_EQ("hi there", resp.content[0].transcript); +} + +TEST(LiveAudioTypesTest, FromJson_EmptyJson) { + auto resp = LiveAudioTranscriptionResponse::FromJson("{}"); + EXPECT_TRUE(resp.text.empty()); + EXPECT_FALSE(resp.is_final); + EXPECT_FALSE(resp.start_time.has_value()); + EXPECT_FALSE(resp.end_time.has_value()); + EXPECT_TRUE(resp.content.empty()); +} + +TEST(LiveAudioTypesTest, CoreErrorResponse_TryParse_Valid) { + nlohmann::json j = { + {"code", "RATE_LIMITED"}, + {"message", "Too many requests"}, + {"is_transient", true}}; + + auto result = CoreErrorResponse::TryParse(j.dump()); + ASSERT_TRUE(result.has_value()); + EXPECT_EQ("RATE_LIMITED", result->code); + EXPECT_EQ("Too many requests", result->message); + EXPECT_TRUE(result->is_transient); +} + +TEST(LiveAudioTypesTest, CoreErrorResponse_TryParse_Invalid) { + auto result = CoreErrorResponse::TryParse("not json"); + EXPECT_FALSE(result.has_value()); +} + +// --------------------------------------------------------------------------- +// LiveAudioTranscriptionSession tests +// --------------------------------------------------------------------------- + +class LiveAudioSessionTest : public ::testing::Test { +protected: + MockCore core_; + NullLogger logger_; + + void SetUpStartHandlers(const std::string& sessionHandle = "session-123") { + core_.OnCall("audio_stream_start", sessionHandle); + } + + void SetUpPushHandler(const std::string& responseJson = "") { + core_.OnCall("audio_stream_push", + [responseJson](std::string_view, const std::string*, NativeCallbackFn, void*) { + return responseJson; + }); + } + + void SetUpStopHandler() { + core_.OnCall("audio_stream_stop", ""); + } + + void SetUpAllHandlers(const std::string& pushResponse = "") { + SetUpStartHandlers(); + SetUpPushHandler(pushResponse); + SetUpStopHandler(); + } +}; + +TEST_F(LiveAudioSessionTest, ConstructorDefaults) { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + EXPECT_FALSE(session.IsStarted()); + EXPECT_FALSE(session.IsStopped()); + EXPECT_EQ(16000, session.Settings().sample_rate); + EXPECT_EQ(1, session.Settings().channels); + EXPECT_EQ(16, session.Settings().bits_per_sample); +} + +TEST_F(LiveAudioSessionTest, SettingsCanBeModifiedBeforeStart) { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Settings().sample_rate = 44100; + session.Settings().channels = 2; + session.Settings().language = "en"; + + EXPECT_EQ(44100, session.Settings().sample_rate); + EXPECT_EQ(2, session.Settings().channels); + EXPECT_EQ("en", session.Settings().language.value()); +} + +TEST_F(LiveAudioSessionTest, Start_Success) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + + EXPECT_TRUE(session.IsStarted()); + EXPECT_FALSE(session.IsStopped()); + EXPECT_EQ(16000, session.ActiveSettings().sample_rate); + + session.Stop(); + EXPECT_TRUE(session.IsStopped()); +} + +TEST_F(LiveAudioSessionTest, Start_WithCustomSettings) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Settings().sample_rate = 44100; + session.Settings().language = "fr"; + session.Start(); + + EXPECT_EQ(44100, session.ActiveSettings().sample_rate); + EXPECT_EQ("fr", session.ActiveSettings().language.value()); + + // Verify the request included our settings + auto lastArg = core_.GetLastDataArg("audio_stream_start"); + auto parsed = nlohmann::json::parse(lastArg); + EXPECT_EQ("44100", parsed["Params"]["SampleRate"].get()); + EXPECT_EQ("fr", parsed["Params"]["Language"].get()); + + session.Stop(); +} + +TEST_F(LiveAudioSessionTest, Start_Failure) { + core_.OnCallThrow("audio_stream_start", "Connection refused"); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + EXPECT_THROW(session.Start(), Exception); + EXPECT_FALSE(session.IsStarted()); +} + +TEST_F(LiveAudioSessionTest, Start_EmptyHandle) { + core_.OnCall("audio_stream_start", ""); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + EXPECT_THROW(session.Start(), Exception); + EXPECT_FALSE(session.IsStarted()); +} + +TEST_F(LiveAudioSessionTest, DoubleStartThrows) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + EXPECT_THROW(session.Start(), Exception); + + session.Stop(); +} + +TEST_F(LiveAudioSessionTest, AppendBeforeStartThrows) { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + std::vector data = {0, 1, 2, 3}; + EXPECT_THROW(session.Append(data.data(), data.size()), Exception); +} + +TEST_F(LiveAudioSessionTest, AppendAfterStopThrows) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + session.Stop(); + std::vector data = {0, 1, 2, 3}; + EXPECT_THROW(session.Append(data.data(), data.size()), Exception); +} + +TEST_F(LiveAudioSessionTest, Start_InvalidCapacityThrows) { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Settings().push_queue_capacity = 0; + EXPECT_THROW(session.Start(), Exception); +} + +TEST_F(LiveAudioSessionTest, StopParseFinalResponse) { + SetUpStartHandlers(); + SetUpPushHandler(); + + // audio_stream_stop returns a final transcription result + nlohmann::json finalResponse = { + {"text", "final result"}, + {"is_final", true}}; + core_.OnCall("audio_stream_stop", finalResponse.dump()); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + session.Stop(); + + // The final result should be retrievable from the result queue + LiveAudioTranscriptionResponse result; + auto status = session.TryGetNext(result, std::chrono::milliseconds(100)); + EXPECT_EQ(TranscriptionStatus::Result, status); + EXPECT_EQ("final result", result.text); + EXPECT_TRUE(result.is_final); +} + +TEST_F(LiveAudioSessionTest, AppendAndGetResult) { + nlohmann::json pushResponse = { + {"text", "hello"}, + {"is_final", false}}; + SetUpAllHandlers(pushResponse.dump()); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + + // Append some data + std::vector data(320, 0); + session.Append(data.data(), data.size()); + + // Try to get a result + LiveAudioTranscriptionResponse result; + auto status = session.TryGetNext(result, std::chrono::seconds(2)); + + if (status == TranscriptionStatus::Result) { + EXPECT_EQ("hello", result.text); + EXPECT_FALSE(result.is_final); + } + + session.Stop(); +} + +TEST_F(LiveAudioSessionTest, StopSendsCommand) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + session.Stop(); + + EXPECT_EQ(1, core_.GetCallCount("audio_stream_stop")); + + auto lastArg = core_.GetLastDataArg("audio_stream_stop"); + auto parsed = nlohmann::json::parse(lastArg); + EXPECT_EQ("session-123", parsed["Params"]["SessionHandle"].get()); +} + +TEST_F(LiveAudioSessionTest, StopWhenNotStartedIsNoop) { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Stop(); // Should not throw + EXPECT_EQ(0, core_.GetCallCount("audio_stream_stop")); +} + +TEST_F(LiveAudioSessionTest, DoubleStopIsNoop) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + session.Stop(); + session.Stop(); // Should not throw or send a second command + EXPECT_EQ(1, core_.GetCallCount("audio_stream_stop")); +} + +TEST_F(LiveAudioSessionTest, DestructorStopsSession) { + SetUpAllHandlers(); + + { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + // Destructor should call Stop + } + + EXPECT_EQ(1, core_.GetCallCount("audio_stream_stop")); +} + +TEST_F(LiveAudioSessionTest, TryGetNextTimeout) { + SetUpAllHandlers(); + + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + session.Start(); + + LiveAudioTranscriptionResponse result; + auto status = session.TryGetNext(result, std::chrono::milliseconds(50)); + EXPECT_EQ(TranscriptionStatus::Timeout, status); + + session.Stop(); +} + +TEST_F(LiveAudioSessionTest, GetErrorMessage_NoError) { + LiveAudioTranscriptionSession session(&core_, "whisper-model", &logger_); + EXPECT_TRUE(session.GetErrorMessage().empty()); +} diff --git a/sdk/cpp/test/mock_core.h b/sdk/cpp/test/mock_core.h index f89af91a..e7b5f84c 100644 --- a/sdk/cpp/test/mock_core.h +++ b/sdk/cpp/test/mock_core.h @@ -81,6 +81,13 @@ namespace foundry_local::Testing { return resp; } + CoreResponse callWithBinary(std::string_view command, ILogger& logger, + const std::string* dataArgument, + const uint8_t* /*binaryData*/, size_t /*binaryDataLength*/) const override { + // Route through regular call() for testing + return call(command, logger, dataArgument); + } + void unload() override {} private: @@ -147,6 +154,12 @@ namespace foundry_local::Testing { return resp; } + CoreResponse callWithBinary(std::string_view command, ILogger& logger, + const std::string* dataArgument, + const uint8_t* /*binaryData*/, size_t /*binaryDataLength*/) const override { + return call(command, logger, dataArgument); + } + void unload() override {} private: From 99b091f017a1966251d5215a282587eac8375434 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Fri, 24 Apr 2026 22:33:41 -0700 Subject: [PATCH 60/83] Add Nemotron-ASR streaming inference to Rust SDK (#613) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Add Nemotron-ASR streaming inference to Rust SDK" ### Description Ports the C# live audio transcription feature ([PR #485](https://github.com/microsoft/Foundry-Local/pull/485)) to the Rust SDK with full API parity. The existing `AudioClient` only supports file-based transcription. This PR introduces `LiveAudioTranscriptionSession` that accepts continuous PCM audio chunks (e.g., from a microphone) and returns partial/final transcription results as an async stream. ### What's included **New files** - `sdk/rust/src/openai/live_audio_client.rs` — Streaming session with `start()`, `append()`, `get_transcription_stream()`, `stop()`, plus types, cancellation support, and unit tests - `sdk/rust/tests/integration/live_audio_test.rs` — E2E integration test with synthetic PCM audio - `samples/rust/live-audio-transcription-example/` — Full sample with real microphone capture (cpal) and resampling **Modified files** - `sdk/rust/src/detail/core_interop.rs` — Added `StreamingRequestBuffer` FFI struct and `execute_command_with_binary()` for binary audio data - `sdk/rust/src/openai/audio_client.rs` — Added `create_live_transcription_session()` factory method - `sdk/rust/src/detail/model.rs`, `model_variant.rs` — Wired factory method to `Model` - `sdk/rust/src/openai/mod.rs`, `src/lib.rs` — Module registration and public exports - `sdk/rust/Cargo.toml` — Added `tokio-util` dependency for `CancellationToken` ### API surface ```rust let audio_client = model.create_audio_client(); let session = audio_client.create_live_transcription_session(); session.settings.sample_rate = 16000; session.settings.channels = 1; session.settings.language = Some("en".into()); session.start(None).await?; // Push audio from microphone callback session.append(&pcm_bytes, None).await?; // Read results as async stream use tokio_stream::StreamExt; let mut stream = session.get_transcription_stream()?; while let Some(result) = stream.next().await { let result = result?; println!("{}", result.content[0].text); } session.stop(None).await?; ``` ### C# API parity | C# | Rust | Status | |----|------|--------| | `CreateLiveTranscriptionSession()` | `create_live_transcription_session()` | ✅ | | `StartAsync(CancellationToken)` | `start(Option)` | ✅ | | `AppendAsync(ReadOnlyMemory, CancellationToken)` | `append(&[u8], Option)` | ✅ | | `GetTranscriptionStream(CancellationToken)` | `get_transcription_stream()` | ✅ | | `StopAsync(CancellationToken)` + cancel-safe cleanup | `stop(Option)` + cancel-safe cleanup | ✅ | | `IAsyncDisposable.DisposeAsync()` | `Drop` with best-effort native stop | ✅ | | `LiveAudioTranscriptionResponse.Content[0].Text` | `response.content[0].text` | ✅ | | `LiveAudioTranscriptionResponse.Content[0].Transcript` | `response.content[0].transcript` | ✅ | | `LiveAudioTranscriptionResponse.IsFinal` | `response.is_final` | ✅ | | `LiveAudioTranscriptionResponse.StartTime/EndTime` | `response.start_time` / `response.end_time` | ✅ | | `LiveAudioTranscriptionOptions` (SampleRate, Channels, BitsPerSample, Language, PushQueueCapacity) | `LiveAudioTranscriptionOptions` (sample_rate, channels, bits_per_sample, language, push_queue_capacity) | ✅ | | `CoreErrorResponse.TryParse()` | `CoreErrorResponse::try_parse()` | ✅ | | Native commands: `audio_stream_start`, `audio_stream_push`, `audio_stream_stop` | Same commands via `execute_command` / `execute_command_with_binary` | ✅ | ### Design highlights - **CancellationToken support** — `start/append/stop` accept `Option` via `tokio_util::sync::CancellationToken` - **Cancel-safe stop** — `stop()` always performs native `audio_stream_stop` even if token fires, preventing native session leaks (matches C# `StopAsync` pattern) - **Response envelope** — `LiveAudioTranscriptionResponse` uses `content: Vec` matching C#'s `ConversationItem.Content[0].Text/Transcript` - **Bounded push queue** — Backpressure via bounded channel (capacity=100); prevents unbounded memory growth - **Push loop on blocking thread** — `execute_command_with_binary` FFI calls run on `spawn_blocking`, keeping async runtime free - **Settings freeze** — Audio format settings are cloned at `start()` and immutable during the session - **Drop safety** — Best-effort synchronous `audio_stream_stop` in `Drop` to prevent native session leaks - **FFI null pointer safety** — Empty binary slices use `std::ptr::null()` to avoid dangling pointer across FFI boundary ### Verified working - ✅ SDK build succeeds (0 errors, 0 clippy warnings) - ✅ 13 unit tests passing (JSON deserialization, settings defaults, error parsing, content envelope) - ✅ E2E pipeline: Microphone (48kHz/2ch/F32) → Resample (16kHz/mono/16-bit) → SDK → Core.dll → onnxruntime-genai.dll → nemotron model - ✅ Synthetic audio test: 30 chunks (96KB PCM) pushed with clean session lifecycle - ✅ Live microphone test: real-time capture, session start/stop, no native errors ### Stats - **14 files changed**, **1,329 additions**, **2 deletions** --------- Co-authored-by: ruiren_microsoft Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/rust/Cargo.toml | 1 + sdk/rust/src/detail/core_interop.rs | 84 +++ sdk/rust/src/lib.rs | 6 +- sdk/rust/src/openai/audio_client.rs | 10 + sdk/rust/src/openai/live_audio_client.rs | 698 ++++++++++++++++++ sdk/rust/src/openai/mod.rs | 5 + sdk/rust/tests/integration/live_audio_test.rs | 117 +++ sdk/rust/tests/integration/main.rs | 1 + 8 files changed, 920 insertions(+), 2 deletions(-) create mode 100644 sdk/rust/src/openai/live_audio_client.rs create mode 100644 sdk/rust/tests/integration/live_audio_test.rs diff --git a/sdk/rust/Cargo.toml b/sdk/rust/Cargo.toml index 7ec7823a..94794697 100644 --- a/sdk/rust/Cargo.toml +++ b/sdk/rust/Cargo.toml @@ -22,6 +22,7 @@ serde_json = "1" thiserror = "2" tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] } tokio-stream = "0.1" +tokio-util = "0.7" futures-core = "0.3" reqwest = { version = "0.12", features = ["json"] } urlencoding = "2" diff --git a/sdk/rust/src/detail/core_interop.rs b/sdk/rust/src/detail/core_interop.rs index 43884d7f..0d17fe62 100644 --- a/sdk/rust/src/detail/core_interop.rs +++ b/sdk/rust/src/detail/core_interop.rs @@ -48,6 +48,19 @@ impl ResponseBuffer { } } +/// Request buffer with binary payload for `execute_command_with_binary`. +/// +/// Used for audio streaming — carries both JSON params and raw PCM bytes. +#[repr(C)] +struct StreamingRequestBuffer { + command: *const i8, + command_length: i32, + data: *const i8, + data_length: i32, + binary_data: *const u8, + binary_data_length: i32, +} + /// Signature for `execute_command`. type ExecuteCommandFn = unsafe extern "C" fn(*const RequestBuffer, *mut ResponseBuffer); @@ -63,6 +76,10 @@ type ExecuteCommandWithCallbackFn = unsafe extern "C" fn( *mut std::ffi::c_void, ); +/// Signature for `execute_command_with_binary`. +type ExecuteCommandWithBinaryFn = + unsafe extern "C" fn(*const StreamingRequestBuffer, *mut ResponseBuffer); + // ── Library name helpers ───────────────────────────────────────────────────── #[cfg(target_os = "windows")] @@ -237,6 +254,8 @@ pub(crate) struct CoreInterop { CallbackFn, *mut std::ffi::c_void, ), + execute_command_with_binary: + Option, } impl std::fmt::Debug for CoreInterop { @@ -307,12 +326,22 @@ impl CoreInterop { *sym }; + // SAFETY: Same as above — symbol must match `ExecuteCommandWithBinaryFn`. + // Optional: older native cores may not export this symbol (used for audio streaming). + let execute_command_with_binary: Option = unsafe { + library + .get::(b"execute_command_with_binary\0") + .ok() + .map(|sym| *sym) + }; + Ok(Self { _library: library, #[cfg(target_os = "windows")] _dependency_libs, execute_command, execute_command_with_callback, + execute_command_with_binary, }) } @@ -354,6 +383,61 @@ impl CoreInterop { Self::process_response(response) } + /// Execute a command with an additional binary payload. + /// + /// Used for audio streaming — `binary_data` carries raw PCM bytes + /// alongside the JSON parameters. + pub fn execute_command_with_binary( + &self, + command: &str, + params: Option<&Value>, + binary_data: &[u8], + ) -> Result { + let native_fn = self.execute_command_with_binary.ok_or_else(|| { + FoundryLocalError::CommandExecution { + reason: "execute_command_with_binary is not supported by this native core \ + (symbol not found)" + .into(), + } + })?; + + let cmd = CString::new(command).map_err(|e| FoundryLocalError::CommandExecution { + reason: format!("Invalid command string: {e}"), + })?; + + let data_json = match params { + Some(v) => serde_json::to_string(v)?, + None => String::new(), + }; + let data_cstr = + CString::new(data_json.as_str()).map_err(|e| FoundryLocalError::CommandExecution { + reason: format!("Invalid data string: {e}"), + })?; + + let request = StreamingRequestBuffer { + command: cmd.as_ptr(), + command_length: cmd.as_bytes().len() as i32, + data: data_cstr.as_ptr(), + data_length: data_cstr.as_bytes().len() as i32, + binary_data: if binary_data.is_empty() { + std::ptr::null() + } else { + binary_data.as_ptr() + }, + binary_data_length: binary_data.len() as i32, + }; + + let mut response = ResponseBuffer::new(); + + // SAFETY: `request` fields point into `cmd`, `data_cstr`, and + // `binary_data` which are all alive for the duration of this call. + unsafe { + (native_fn)(&request, &mut response); + } + + Self::process_response(response) + } + /// Execute a command that streams results back via `callback`. /// /// Each chunk delivered by the native library is decoded as UTF-8 and diff --git a/sdk/rust/src/lib.rs b/sdk/rust/src/lib.rs index 872a875c..9fb4bb85 100644 --- a/sdk/rust/src/lib.rs +++ b/sdk/rust/src/lib.rs @@ -31,8 +31,10 @@ pub use async_openai::types::chat::{ // Re-export OpenAI response types for convenience. pub use crate::openai::{ - AudioTranscriptionResponse, AudioTranscriptionStream, ChatCompletionStream, - TranscriptionSegment, TranscriptionWord, + AudioTranscriptionResponse, AudioTranscriptionStream, ChatCompletionStream, ContentPart, + CoreErrorResponse, LiveAudioTranscriptionOptions, LiveAudioTranscriptionResponse, + LiveAudioTranscriptionSession, LiveAudioTranscriptionStream, TranscriptionSegment, + TranscriptionWord, }; pub use async_openai::types::chat::{ ChatChoice, ChatChoiceStream, ChatCompletionMessageToolCall, diff --git a/sdk/rust/src/openai/audio_client.rs b/sdk/rust/src/openai/audio_client.rs index 0319da38..cc1813d0 100644 --- a/sdk/rust/src/openai/audio_client.rs +++ b/sdk/rust/src/openai/audio_client.rs @@ -9,6 +9,7 @@ use crate::detail::core_interop::CoreInterop; use crate::error::{FoundryLocalError, Result}; use super::json_stream::JsonStream; +use super::live_audio_client::LiveAudioTranscriptionSession; /// A segment of a transcription, as returned by the OpenAI-compatible API. #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] @@ -196,6 +197,15 @@ impl AudioClient { Ok(AudioTranscriptionStream::new(rx)) } + /// Create a [`LiveAudioTranscriptionSession`] for real-time audio + /// streaming transcription. + /// + /// Configure the session's [`settings`](LiveAudioTranscriptionSession::settings) + /// before calling [`start`](LiveAudioTranscriptionSession::start). + pub fn create_live_transcription_session(&self) -> LiveAudioTranscriptionSession { + LiveAudioTranscriptionSession::new(&self.model_id, Arc::clone(&self.core)) + } + fn validate_path(path: &str) -> Result<()> { if path.trim().is_empty() { return Err(FoundryLocalError::Validation { diff --git a/sdk/rust/src/openai/live_audio_client.rs b/sdk/rust/src/openai/live_audio_client.rs new file mode 100644 index 00000000..8b285a96 --- /dev/null +++ b/sdk/rust/src/openai/live_audio_client.rs @@ -0,0 +1,698 @@ +//! Live audio transcription streaming session. +//! +//! Provides real-time audio streaming ASR (Automatic Speech Recognition). +//! Audio data from a microphone (or other source) is pushed in as PCM chunks +//! and transcription results are returned as an async [`Stream`](futures_core::Stream). +//! +//! # Example +//! +//! ```ignore +//! let audio_client = model.create_audio_client(); +//! let mut session = audio_client.create_live_transcription_session(); +//! session.settings.sample_rate = 16000; +//! session.settings.channels = 1; +//! session.settings.language = Some("en".into()); +//! +//! session.start(None).await?; +//! +//! // Push audio from microphone callback +//! session.append(&pcm_bytes, None).await?; +//! +//! // Read results as async stream +//! use tokio_stream::StreamExt; +//! let mut stream = session.get_transcription_stream().await?; +//! while let Some(result) = stream.next().await { +//! let result = result?; +//! print!("{}", result.content[0].text); +//! } +//! +//! session.stop(None).await?; +//! ``` + +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use serde_json::json; +use tokio_util::sync::CancellationToken; + +use crate::detail::core_interop::CoreInterop; +use crate::error::{FoundryLocalError, Result}; + +// ── Types ──────────────────────────────────────────────────────────────────── + +/// Audio format settings for a live transcription session. +/// +/// Must be configured before calling [`LiveAudioTranscriptionSession::start`]. +/// Settings are frozen once the session starts. +#[derive(Debug, Clone)] +pub struct LiveAudioTranscriptionOptions { + /// PCM sample rate in Hz. Default: 16000. + pub sample_rate: u32, + /// Number of audio channels. Default: 1 (mono). + pub channels: u32, + /// Number of bits per audio sample. Default: 16. + pub bits_per_sample: u32, + /// Optional BCP-47 language hint (e.g., `"en"`, `"zh"`). + pub language: Option, + /// Maximum number of audio chunks buffered in the internal push queue. + /// If the queue is full, [`LiveAudioTranscriptionSession::append`] will + /// wait asynchronously. + /// Default: 100 (~3 seconds of audio at typical chunk sizes). + pub push_queue_capacity: usize, +} + +impl Default for LiveAudioTranscriptionOptions { + fn default() -> Self { + Self { + sample_rate: 16000, + channels: 1, + bits_per_sample: 16, + language: None, + push_queue_capacity: 100, + } + } +} + +/// Internal raw deserialization target matching the native core's JSON format. +#[derive(Debug, Clone, serde::Deserialize)] +struct LiveAudioTranscriptionRaw { + #[serde(default)] + is_final: bool, + #[serde(default)] + text: String, + start_time: Option, + end_time: Option, +} + +/// A content part within a [`LiveAudioTranscriptionResponse`]. +/// +/// Mirrors the C# `ContentPart` shape from the OpenAI Realtime API so that +/// callers can access `result.content[0].text` or `result.content[0].transcript` +/// consistently across SDKs. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct ContentPart { + /// The transcribed text. + pub text: String, + /// Same as `text` — provided for OpenAI Realtime API compatibility. + pub transcript: String, +} + +/// Transcription result from a live audio streaming session. +/// +/// Shaped to match the C# `LiveAudioTranscriptionResponse : ConversationItem` +/// so that callers access text via `result.content[0].text` or +/// `result.content[0].transcript`. +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +pub struct LiveAudioTranscriptionResponse { + /// Content parts — typically a single element. Access text via + /// `result.content[0].text` or `result.content[0].transcript`. + pub content: Vec, + /// Whether this is a final or partial (interim) result. + /// Nemotron models always return `true`; other models may return `false` + /// for interim hypotheses that will be replaced by a subsequent final result. + pub is_final: bool, + /// Start time offset of this segment in the audio stream (seconds). + pub start_time: Option, + /// End time offset of this segment in the audio stream (seconds). + pub end_time: Option, +} + +impl LiveAudioTranscriptionResponse { + /// Parse a transcription response from the native core's JSON format. + pub fn from_json(json: &str) -> Result { + serde_json::from_str::(json) + .map(Self::from_raw) + .map_err(FoundryLocalError::from) + } + + fn from_raw(raw: LiveAudioTranscriptionRaw) -> Self { + Self { + content: vec![ContentPart { + transcript: raw.text.clone(), + text: raw.text, + }], + is_final: raw.is_final, + start_time: raw.start_time, + end_time: raw.end_time, + } + } +} + +/// Structured error response from the native core. +#[derive(Debug, Clone, serde::Deserialize)] +pub struct CoreErrorResponse { + /// Error code (e.g. `"ASR_SESSION_NOT_FOUND"`). + pub code: String, + /// Human-readable error message. + pub message: String, + /// Whether this error is transient (retryable). + #[serde(rename = "isTransient", default)] + pub is_transient: bool, +} + +impl CoreErrorResponse { + /// Attempt to parse a native error string as structured JSON. + /// Returns `None` if the error is not valid JSON or doesn't match the schema. + pub fn try_parse(error_string: &str) -> Option { + serde_json::from_str(error_string).ok() + } +} + +// ── Stream type ────────────────────────────────────────────────────────────── + +/// An async stream of [`LiveAudioTranscriptionResponse`] items. +/// +/// Returned by [`LiveAudioTranscriptionSession::get_transcription_stream`]. +/// Implements [`futures_core::Stream`]. +pub struct LiveAudioTranscriptionStream { + rx: tokio::sync::mpsc::UnboundedReceiver>, +} + +impl futures_core::Stream for LiveAudioTranscriptionStream { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + self.rx.poll_recv(cx) + } +} + +// ── Session state ──────────────────────────────────────────────────────────── + +struct SessionState { + session_handle: Option, + started: bool, + stopped: bool, + push_tx: Option>>, + output_tx: Option>>, + output_rx: Option>>, + push_loop_handle: Option>, +} + +impl SessionState { + fn new() -> Self { + Self { + session_handle: None, + started: false, + stopped: false, + push_tx: None, + output_tx: None, + output_rx: None, + push_loop_handle: None, + } + } +} + +// ── Session ────────────────────────────────────────────────────────────────── + +/// Session for real-time audio streaming ASR (Automatic Speech Recognition). +/// +/// Audio data from a microphone (or other source) is pushed in as PCM chunks +/// via [`append`](Self::append), and transcription results are returned as an +/// async [`Stream`](futures_core::Stream) via +/// [`get_transcription_stream`](Self::get_transcription_stream). +/// +/// Created via [`AudioClient::create_live_transcription_session`](super::AudioClient::create_live_transcription_session). +/// +/// # Thread safety +/// +/// [`append`](Self::append) can be called from any thread (including +/// high-frequency audio callbacks). Pushes are internally serialized via a +/// bounded channel to prevent unbounded memory growth and ensure ordering. +/// +/// # Cancellation +/// +/// All lifecycle methods accept an optional [`CancellationToken`]. Pass `None` +/// to use the default (no cancellation). +pub struct LiveAudioTranscriptionSession { + model_id: String, + core: Arc, + /// Audio format settings. Must be configured before calling [`start`](Self::start). + /// Settings are frozen once the session starts. + pub settings: LiveAudioTranscriptionOptions, + state: tokio::sync::Mutex, +} + +impl LiveAudioTranscriptionSession { + pub(crate) fn new(model_id: &str, core: Arc) -> Self { + Self { + model_id: model_id.to_owned(), + core, + settings: LiveAudioTranscriptionOptions::default(), + state: tokio::sync::Mutex::new(SessionState::new()), + } + } + + /// Start a real-time audio streaming session. + /// + /// Must be called before [`append`](Self::append) or + /// [`get_transcription_stream`](Self::get_transcription_stream). + /// Settings are frozen after this call. + /// + /// # Cancellation + /// + /// Pass a [`CancellationToken`] to abort the start operation. If + /// cancelled, any native session that was created is cleaned up + /// automatically. + pub async fn start(&self, ct: Option) -> Result<()> { + let mut state = self.state.lock().await; + + if state.started { + return Err(FoundryLocalError::Validation { + reason: "Streaming session already started. Call stop() first.".into(), + }); + } + + let active_settings = self.settings.clone(); + + let (output_tx, output_rx) = + tokio::sync::mpsc::unbounded_channel::>(); + let (push_tx, push_rx) = + tokio::sync::mpsc::channel::>(active_settings.push_queue_capacity); + + let request = self.build_start_request(&active_settings); + + let core = Arc::clone(&self.core); + let start_future = tokio::task::spawn_blocking(move || { + core.execute_command("audio_stream_start", Some(&request)) + }); + + let session_handle = self.await_start(start_future, ct).await?; + + if session_handle.is_empty() { + return Err(FoundryLocalError::CommandExecution { + reason: "Native core did not return a session handle.".into(), + }); + } + + let push_loop_core = Arc::clone(&self.core); + let push_loop_output_tx = output_tx.clone(); + let handle_clone = session_handle.clone(); + let push_loop_handle = tokio::task::spawn_blocking(move || { + Self::push_loop(push_loop_core, handle_clone, push_rx, push_loop_output_tx); + }); + + state.session_handle = Some(session_handle); + state.started = true; + state.stopped = false; + state.push_tx = Some(push_tx); + state.output_tx = Some(output_tx); + state.output_rx = Some(output_rx); + state.push_loop_handle = Some(push_loop_handle); + + Ok(()) + } + + /// Push a chunk of raw PCM audio data to the streaming session. + /// + /// Can be called from any async context (including high-frequency audio + /// callbacks when wrapped). Chunks are internally queued and serialized to + /// the native core. + /// + /// The data is copied internally so the caller can reuse the buffer. + /// + /// # Cancellation + /// + /// Pass a [`CancellationToken`] to abort if the push queue is full + /// (backpressure). The audio chunk will not be queued if cancelled. + pub async fn append(&self, pcm_data: &[u8], ct: Option) -> Result<()> { + // Clone the sender while holding the lock, then drop the lock before + // awaiting the send. This prevents deadlock when the bounded push + // queue is full — stop() can still acquire the lock to close the + // channel and unblock the send. + let tx = { + let state = self.state.lock().await; + + if !state.started || state.stopped { + return Err(FoundryLocalError::Validation { + reason: "No active streaming session. Call start() first.".into(), + }); + } + + state + .push_tx + .clone() + .ok_or_else(|| FoundryLocalError::Internal { + reason: "Push channel not available — session may be in an invalid state" + .into(), + })? + }; + + let data = pcm_data.to_vec(); + + if let Some(token) = &ct { + tokio::select! { + result = tx.send(data) => { + result.map_err(|_| FoundryLocalError::CommandExecution { + reason: "Push channel closed — session has been stopped".into(), + }) + } + _ = token.cancelled() => { + Err(FoundryLocalError::CommandExecution { + reason: "Append cancelled".into(), + }) + } + } + } else { + tx.send(data) + .await + .map_err(|_| FoundryLocalError::CommandExecution { + reason: "Push channel closed — session has been stopped".into(), + }) + } + } + + /// Get the async stream of transcription results. + /// + /// Results arrive as the native ASR engine processes audio data. + /// Can only be called once per session (the receiver is moved out). + pub async fn get_transcription_stream(&self) -> Result { + let mut state = self.state.lock().await; + + let rx = state + .output_rx + .take() + .ok_or_else(|| FoundryLocalError::Validation { + reason: "No active streaming session, or stream already taken. \ + Call start() first and only call get_transcription_stream() once." + .into(), + })?; + + Ok(LiveAudioTranscriptionStream { rx }) + } + + /// Signal end-of-audio and stop the streaming session. + /// + /// Any remaining buffered audio in the push queue will be drained to the + /// native core first. Final results are delivered through the transcription + /// stream before it completes. + /// + /// # Cancellation safety + /// + /// Even if the provided [`CancellationToken`] fires, the native session + /// stop is always completed to avoid native session leaks (matching the C# + /// `StopAsync` cancellation-safe pattern). + pub async fn stop(&self, ct: Option) -> Result<()> { + let mut state = self.state.lock().await; + + if !state.started || state.stopped { + return Ok(()); + } + + state.stopped = true; + + self.drain_push_loop(&mut state).await; + let stop_result = self.stop_native_session(&state, ct).await; + Self::write_final_result(&stop_result, &state); + self.finalize_state(&mut state); + + stop_result?; + Ok(()) + } + + // ── Private helpers ────────────────────────────────────────────────── + + /// Build the JSON request for `audio_stream_start`. + fn build_start_request(&self, settings: &LiveAudioTranscriptionOptions) -> serde_json::Value { + let mut params = json!({ + "Model": self.model_id, + "SampleRate": settings.sample_rate.to_string(), + "Channels": settings.channels.to_string(), + "BitsPerSample": settings.bits_per_sample.to_string(), + }); + if let Some(ref lang) = settings.language { + params["Language"] = json!(lang); + } + json!({ "Params": params }) + } + + /// Await the start future with cancellation safety. If cancelled, any + /// native session that was already created is cleaned up via + /// `audio_stream_stop`. + async fn await_start( + &self, + start_future: tokio::task::JoinHandle>, + ct: Option, + ) -> Result { + // Always await the start future — we cannot drop it because the + // spawn_blocking task may create a native session that would leak. + let join_result = start_future + .await + .map_err(|e| FoundryLocalError::CommandExecution { + reason: format!("Start audio stream task join error: {e}"), + })?; + + // If a cancellation token was provided and is already cancelled, + // clean up any native session that was created and return an error. + if let Some(token) = ct { + if token.is_cancelled() { + if let Ok(ref handle) = join_result { + if !handle.is_empty() { + let params = json!({ + "Params": { "SessionHandle": handle } + }); + let _ = self + .core + .execute_command("audio_stream_stop", Some(¶ms)); + } + } + return Err(FoundryLocalError::CommandExecution { + reason: "Start cancelled".into(), + }); + } + } + + join_result + } + + /// Close the push channel and wait for the push loop to drain. + async fn drain_push_loop(&self, state: &mut SessionState) { + state.push_tx.take(); + if let Some(handle) = state.push_loop_handle.take() { + let _ = handle.await; + } + } + + /// Tell the native core to stop the audio stream session. Always completes + /// even if the cancellation token fires. + async fn stop_native_session( + &self, + state: &SessionState, + _ct: Option, + ) -> Result { + let session_handle = state + .session_handle + .as_ref() + .ok_or_else(|| FoundryLocalError::Internal { + reason: "Session handle missing during stop".into(), + })? + .clone(); + + let params = json!({ "Params": { "SessionHandle": session_handle } }); + let core = Arc::clone(&self.core); + + // Always await the native stop to completion regardless of cancellation. + // This prevents double-stop and native session leaks. + tokio::task::spawn_blocking(move || { + core.execute_command("audio_stream_stop", Some(¶ms)) + }) + .await + .map_err(|e| FoundryLocalError::CommandExecution { + reason: format!("Stop audio stream task join error: {e}"), + })? + } + + /// Write a final transcription result from a stop response into the output channel. + fn write_final_result(stop_result: &Result, state: &SessionState) { + let _ = stop_result + .as_ref() + .ok() + .filter(|d| !d.is_empty()) + .and_then(|d| serde_json::from_str::(d).ok()) + .filter(|r| !r.text.is_empty()) + .and_then(|raw| { + state.output_tx.as_ref().map(|tx| { + let _ = tx.send(Ok(LiveAudioTranscriptionResponse::from_raw(raw))); + }) + }); + } + + /// Clean up session state after stop. + fn finalize_state(&self, state: &mut SessionState) { + state.output_tx.take(); + state.session_handle = None; + state.started = false; + } + + /// Internal push loop — runs entirely on a blocking thread. + /// + /// Drains the push queue and sends chunks to the native core one at a time. + /// Terminates the session on any native error. + fn push_loop( + core: Arc, + session_handle: String, + mut push_rx: tokio::sync::mpsc::Receiver>, + output_tx: tokio::sync::mpsc::UnboundedSender>, + ) { + while let Some(audio_data) = push_rx.blocking_recv() { + let params = json!({ + "Params": { "SessionHandle": &session_handle } + }); + + let data = match core.execute_command_with_binary( + "audio_stream_push", + Some(¶ms), + &audio_data, + ) { + Ok(d) => d, + Err(e) => { + let code = match &e { + FoundryLocalError::CommandExecution { reason } => { + CoreErrorResponse::try_parse(reason) + .map(|ei| ei.code) + .unwrap_or_else(|| "UNKNOWN".into()) + } + _ => "UNKNOWN".into(), + }; + let _ = output_tx.send(Err(FoundryLocalError::CommandExecution { + reason: format!("Push failed (code={code}): {e}"), + })); + // Fatal push failures are terminal for the transcription stream. + // Drop the sender and return so the stream completes. + drop(output_tx); + return; + } + }; + + if let Ok(raw) = serde_json::from_str::(&data) { + if !raw.text.is_empty() { + let _ = output_tx.send(Ok(LiveAudioTranscriptionResponse::from_raw(raw))); + } + } + } + } +} + +// ── Drop impl ──────────────────────────────────────────────────────────────── + +impl Drop for LiveAudioTranscriptionSession { + fn drop(&mut self) { + if let Ok(mut state) = self.state.try_lock() { + state.push_tx.take(); + state.output_tx.take(); + + if state.started && !state.stopped { + if let Some(ref handle) = state.session_handle { + let params = serde_json::json!({ + "Params": { "SessionHandle": handle } + }); + let _ = self + .core + .execute_command("audio_stream_stop", Some(¶ms)); + } + state.session_handle = None; + state.started = false; + state.stopped = true; + } + } + } +} + +// ── Tests ──────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn from_json_parses_text_and_is_final() { + let json = r#"{"is_final":true,"text":"hello world","start_time":null,"end_time":null}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + + assert_eq!(result.content.len(), 1); + assert_eq!(result.content[0].text, "hello world"); + assert_eq!(result.content[0].transcript, "hello world"); + assert!(result.is_final); + } + + #[test] + fn from_json_maps_timing_fields() { + let json = r#"{"is_final":false,"text":"partial","start_time":1.5,"end_time":3.0}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + + assert_eq!(result.content[0].text, "partial"); + assert!(!result.is_final); + assert_eq!(result.start_time, Some(1.5)); + assert_eq!(result.end_time, Some(3.0)); + } + + #[test] + fn from_json_empty_text_parses_successfully() { + let json = r#"{"is_final":true,"text":"","start_time":null,"end_time":null}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + + assert_eq!(result.content[0].text, ""); + assert!(result.is_final); + } + + #[test] + fn from_json_only_start_time_sets_start_time() { + let json = r#"{"is_final":true,"text":"word","start_time":2.0,"end_time":null}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + + assert_eq!(result.start_time, Some(2.0)); + assert_eq!(result.end_time, None); + assert_eq!(result.content[0].text, "word"); + } + + #[test] + fn from_json_invalid_json_returns_error() { + let result = LiveAudioTranscriptionResponse::from_json("not valid json"); + assert!(result.is_err()); + } + + #[test] + fn from_json_content_has_text_and_transcript() { + let json = r#"{"is_final":true,"text":"test","start_time":null,"end_time":null}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + + assert_eq!(result.content[0].text, "test"); + assert_eq!(result.content[0].transcript, "test"); + } + + #[test] + fn options_default_values() { + let options = LiveAudioTranscriptionOptions::default(); + + assert_eq!(options.sample_rate, 16000); + assert_eq!(options.channels, 1); + assert_eq!(options.bits_per_sample, 16); + assert_eq!(options.language, None); + assert_eq!(options.push_queue_capacity, 100); + } + + #[test] + fn core_error_response_try_parse_valid_json() { + let json = + r#"{"code":"ASR_SESSION_NOT_FOUND","message":"Session not found","isTransient":false}"#; + let error = CoreErrorResponse::try_parse(json).unwrap(); + + assert_eq!(error.code, "ASR_SESSION_NOT_FOUND"); + assert_eq!(error.message, "Session not found"); + assert!(!error.is_transient); + } + + #[test] + fn core_error_response_try_parse_invalid_json_returns_none() { + let result = CoreErrorResponse::try_parse("not json"); + assert!(result.is_none()); + } + + #[test] + fn core_error_response_try_parse_transient_error() { + let json = r#"{"code":"BUSY","message":"Model busy","isTransient":true}"#; + let error = CoreErrorResponse::try_parse(json).unwrap(); + + assert!(error.is_transient); + } +} diff --git a/sdk/rust/src/openai/mod.rs b/sdk/rust/src/openai/mod.rs index 5c17a0df..ae0f1996 100644 --- a/sdk/rust/src/openai/mod.rs +++ b/sdk/rust/src/openai/mod.rs @@ -2,6 +2,7 @@ mod audio_client; mod chat_client; mod embedding_client; mod json_stream; +mod live_audio_client; pub use self::audio_client::{ AudioClient, AudioClientSettings, AudioTranscriptionResponse, AudioTranscriptionStream, @@ -10,3 +11,7 @@ pub use self::audio_client::{ pub use self::chat_client::{ChatClient, ChatClientSettings, ChatCompletionStream}; pub use self::embedding_client::EmbeddingClient; pub use self::json_stream::JsonStream; +pub use self::live_audio_client::{ + ContentPart, CoreErrorResponse, LiveAudioTranscriptionOptions, LiveAudioTranscriptionResponse, + LiveAudioTranscriptionSession, LiveAudioTranscriptionStream, +}; diff --git a/sdk/rust/tests/integration/live_audio_test.rs b/sdk/rust/tests/integration/live_audio_test.rs new file mode 100644 index 00000000..4961d83b --- /dev/null +++ b/sdk/rust/tests/integration/live_audio_test.rs @@ -0,0 +1,117 @@ +use super::common; +use std::sync::Arc; +use tokio_stream::StreamExt; + +/// Generate synthetic PCM audio (440Hz sine wave, 16kHz, 16-bit mono). +fn generate_sine_wave_pcm(sample_rate: i32, duration_seconds: i32, frequency: f64) -> Vec { + let total_samples = (sample_rate * duration_seconds) as usize; + let mut pcm_bytes = vec![0u8; total_samples * 2]; // 16-bit = 2 bytes per sample + + for i in 0..total_samples { + let t = i as f64 / sample_rate as f64; + let sample = + (i16::MAX as f64 * 0.5 * (2.0 * std::f64::consts::PI * frequency * t).sin()) as i16; + pcm_bytes[i * 2] = (sample & 0xFF) as u8; + pcm_bytes[i * 2 + 1] = ((sample >> 8) & 0xFF) as u8; + } + + pcm_bytes +} + +// --- E2E streaming test with synthetic PCM audio --- + +#[tokio::test] +async fn live_streaming_e2e_with_synthetic_pcm_returns_valid_response() { + let manager = common::get_test_manager(); + let catalog = manager.catalog(); + + // Try to get a nemotron or whisper model for audio streaming + let model = match catalog.get_model("nemotron").await { + Ok(m) => m, + Err(_) => match catalog.get_model(common::WHISPER_MODEL_ALIAS).await { + Ok(m) => m, + Err(_) => { + eprintln!("Skipping E2E test: no audio model available"); + return; + } + }, + }; + + if !model.is_cached().await.unwrap_or(false) { + eprintln!("Skipping E2E test: model not cached"); + return; + } + + model.load().await.expect("model.load() failed"); + + let audio_client = model.create_audio_client(); + let session = audio_client.create_live_transcription_session(); + + // Verify default settings + assert_eq!(session.settings.sample_rate, 16000); + assert_eq!(session.settings.channels, 1); + assert_eq!(session.settings.bits_per_sample, 16); + + if let Err(e) = session.start(None).await { + eprintln!("Skipping E2E test: could not start session: {e}"); + model.unload().await.ok(); + return; + } + + // Start collecting results in background (must start before pushing audio) + let mut stream = session + .get_transcription_stream() + .await + .expect("get_transcription_stream failed"); + + let results = Arc::new(tokio::sync::Mutex::new(Vec::new())); + let stream_error: Arc>> = + Arc::new(tokio::sync::Mutex::new(None)); + let results_clone = Arc::clone(&results); + let error_clone = Arc::clone(&stream_error); + let read_task = tokio::spawn(async move { + while let Some(result) = stream.next().await { + match result { + Ok(r) => results_clone.lock().await.push(r), + Err(e) => { + *error_clone.lock().await = Some(format!("{e}")); + break; + } + } + } + }); + + // Generate ~2 seconds of synthetic PCM audio (440Hz sine wave) + let pcm_bytes = generate_sine_wave_pcm(16000, 2, 440.0); + + // Push audio in chunks (100ms each, matching typical mic callback size) + let chunk_size = 16000 / 10 * 2; // 100ms of 16-bit audio = 3200 bytes + for offset in (0..pcm_bytes.len()).step_by(chunk_size) { + let end = std::cmp::min(offset + chunk_size, pcm_bytes.len()); + session + .append(&pcm_bytes[offset..end], None) + .await + .expect("append failed"); + } + + // Stop session to flush remaining audio and complete the stream + session.stop(None).await.expect("stop failed"); + read_task.await.expect("read task failed"); + + // Verify no stream errors occurred + assert!( + stream_error.lock().await.is_none(), + "Stream produced an error: {:?}", + stream_error.lock().await + ); + + // Verify response attributes — synthetic audio may or may not produce text, + // but the response objects should be properly structured (C#-compatible envelope) + let results = results.lock().await; + for result in results.iter() { + assert!(!result.content.is_empty(), "content must not be empty"); + assert_eq!(result.content[0].text, result.content[0].transcript); + } + + model.unload().await.expect("model.unload() failed"); +} diff --git a/sdk/rust/tests/integration/main.rs b/sdk/rust/tests/integration/main.rs index c63956f3..05576000 100644 --- a/sdk/rust/tests/integration/main.rs +++ b/sdk/rust/tests/integration/main.rs @@ -12,6 +12,7 @@ mod audio_client_test; mod catalog_test; mod chat_client_test; mod embedding_client_test; +mod live_audio_test; mod manager_test; mod model_test; mod web_service_test; From ec94cba4a87effab6f9ecc0996bb9204aa11675b Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Fri, 24 Apr 2026 22:38:41 -0700 Subject: [PATCH 61/83] Add ORT-Nightly as fallback for nuget package source at installation time (#664) The JS and Rust SDK installers download three native NuGet packages (Microsoft.AI.Foundry.Local.Core, Microsoft.ML.OnnxRuntime.Foundry, Microsoft.ML.OnnxRuntimeGenAI.Foundry) from a single hard-coded feed ( api.nuget.org). Dev / pre-release versions are published to the public ORT-Nightly Azure DevOps feed before they reach nuget.org, so any build pinned to a dev version fails outright today. Change Both installers now try each feed in order and fall back to the next on any failure: 1. https://api.nuget.org/v3/index.json (primary) 2. https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json (fallback) --- sdk/js/script/install-standard.cjs | 8 +- sdk/js/script/install-utils.cjs | 99 ++++++++++++-------- sdk/js/script/install-winml.cjs | 8 +- sdk/rust/build.rs | 141 ++++++++++++++++++++--------- 4 files changed, 169 insertions(+), 87 deletions(-) diff --git a/sdk/js/script/install-standard.cjs b/sdk/js/script/install-standard.cjs index 87c5b1ac..9ffb2ba8 100644 --- a/sdk/js/script/install-standard.cjs +++ b/sdk/js/script/install-standard.cjs @@ -18,7 +18,7 @@ if (fs.existsSync(winmlPkgJson)) { process.exit(0); } -const { NUGET_FEED, runInstall } = require('./install-utils.cjs'); +const { runInstall } = require('./install-utils.cjs'); // deps_versions.json lives at the package root when published, or at sdk/ in the repo. const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions.json')) @@ -27,9 +27,9 @@ const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions.json const deps = require(depsPath); const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core', version: deps['foundry-local-core'].nuget, feed: NUGET_FEED }, - { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version, feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai'].version, feed: NUGET_FEED }, + { name: 'Microsoft.AI.Foundry.Local.Core', version: deps['foundry-local-core'].nuget }, + { name: os.platform() === 'linux' ? 'Microsoft.ML.OnnxRuntime.Gpu.Linux' : 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai'].version }, ]; (async () => { diff --git a/sdk/js/script/install-utils.cjs b/sdk/js/script/install-utils.cjs index 01b14d1a..14df434f 100644 --- a/sdk/js/script/install-utils.cjs +++ b/sdk/js/script/install-utils.cjs @@ -29,7 +29,15 @@ const REQUIRED_FILES = [ `${os.platform() === 'win32' ? '' : 'lib'}onnxruntime-genai${EXT}`, ]; -const NUGET_FEED = 'https://api.nuget.org/v3/index.json'; +// Feeds tried in order. Primary: nuget.org (stable releases). Fallback: +// the public ORT-Nightly Azure DevOps NuGet feed (where dev / pre-release +// builds of Foundry Local Core, ONNX Runtime and ONNX Runtime GenAI live +// before they reach nuget.org). If a download from a feed fails for any +// reason, the next feed is tried. +const FEEDS = [ + 'https://api.nuget.org/v3/index.json', + 'https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json', +]; // --- Download helpers --- @@ -127,43 +135,60 @@ async function installPackage(artifact, tempDir, binDir, skipIfPresent) { } } - const baseAddress = await getBaseAddress(artifact.feed); - const nameLower = pkgName.toLowerCase(); - const verLower = pkgVer.toLowerCase(); - const downloadUrl = `${baseAddress}${nameLower}/${verLower}/${nameLower}.${verLower}.nupkg`; - - const nupkgPath = path.join(tempDir, `${pkgName}.${pkgVer}.nupkg`); - console.log(` Downloading ${pkgName} ${pkgVer}...`); - await downloadFile(downloadUrl, nupkgPath); - - console.log(` Extracting...`); - const zip = new AdmZip(nupkgPath); - const targetPathPrefix = `runtimes/${RID}/native/`.toLowerCase(); - const entries = zip.getEntries().filter(e => { - const p = e.entryName.toLowerCase(); - return p.includes(targetPathPrefix) && p.endsWith(EXT); - }); - - if (entries.length > 0) { - entries.forEach(entry => { - zip.extractEntryTo(entry, binDir, false, true); - console.log(` Extracted ${entry.name}`); - }); - } else { - console.warn(` No files found for RID ${RID} in ${pkgName}.`); - } + // Try each configured feed in order; on failure fall back to the next. + let lastError; + for (let i = 0; i < FEEDS.length; i++) { + const feedUrl = FEEDS[i]; + const feedHost = new URL(feedUrl).host; + try { + const baseAddress = await getBaseAddress(feedUrl); + const nameLower = pkgName.toLowerCase(); + const verLower = pkgVer.toLowerCase(); + const downloadUrl = `${baseAddress}${nameLower}/${verLower}/${nameLower}.${verLower}.nupkg`; + + const nupkgPath = path.join(tempDir, `${pkgName}.${pkgVer}.nupkg`); + console.log(` Downloading ${pkgName} ${pkgVer} from ${feedHost}...`); + await downloadFile(downloadUrl, nupkgPath); + + console.log(` Extracting...`); + const zip = new AdmZip(nupkgPath); + const targetPathPrefix = `runtimes/${RID}/native/`.toLowerCase(); + const entries = zip.getEntries().filter(e => { + const p = e.entryName.toLowerCase(); + return p.includes(targetPathPrefix) && p.endsWith(EXT); + }); - // Write a metadata package.json with version info for diagnostics - if (pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { - const pkgJsonPath = path.join(binDir, 'package.json'); - const pkgContent = { - name: `@foundry-local-core/${platformKey}`, - version: pkgVer, - description: `Native binaries for Foundry Local SDK (${platformKey})`, - private: true - }; - fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); + if (entries.length > 0) { + entries.forEach(entry => { + zip.extractEntryTo(entry, binDir, false, true); + console.log(` Extracted ${entry.name}`); + }); + } else { + console.warn(` No files found for RID ${RID} in ${pkgName}.`); + } + + // Write a metadata package.json with version info for diagnostics + if (pkgName.startsWith('Microsoft.AI.Foundry.Local.Core')) { + const pkgJsonPath = path.join(binDir, 'package.json'); + const pkgContent = { + name: `@foundry-local-core/${platformKey}`, + version: pkgVer, + description: `Native binaries for Foundry Local SDK (${platformKey})`, + private: true + }; + fs.writeFileSync(pkgJsonPath, JSON.stringify(pkgContent, null, 2)); + } + return; + } catch (err) { + lastError = err; + const isLast = i === FEEDS.length - 1; + const reason = err instanceof Error ? err.message : String(err); + if (!isLast) { + console.warn(` ${pkgName} ${pkgVer}: download from ${feedHost} failed (${reason}); trying next feed...`); + } + } } + throw new Error(`Failed to download ${pkgName} ${pkgVer} from any configured feed (${FEEDS.map(f => new URL(f).host).join(', ')}): ${lastError instanceof Error ? lastError.message : lastError}`); } async function runInstall(artifacts, options) { @@ -192,4 +217,4 @@ async function runInstall(artifacts, options) { } } -module.exports = { NUGET_FEED, runInstall }; +module.exports = { runInstall }; diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index 4276c740..0de13503 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -12,7 +12,7 @@ const fs = require('fs'); const path = require('path'); -const { NUGET_FEED, runInstall } = require('./install-utils.cjs'); +const { runInstall } = require('./install-utils.cjs'); // WinML uses its own deps_versions_winml.json with the same key structure // as the standard deps_versions.json — no variant-specific keys needed. @@ -27,9 +27,9 @@ const platformKey = `${process.platform}-${process.arch}`; const binDir = path.join(sdkRoot, 'foundry-local-core', platformKey); const ARTIFACTS = [ - { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: deps['foundry-local-core']['nuget'], feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version, feed: NUGET_FEED }, - { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai']['version'], feed: NUGET_FEED }, + { name: 'Microsoft.AI.Foundry.Local.Core.WinML', version: deps['foundry-local-core']['nuget'] }, + { name: 'Microsoft.ML.OnnxRuntime.Foundry', version: deps.onnxruntime.version }, + { name: 'Microsoft.ML.OnnxRuntimeGenAI.Foundry', version: deps['onnxruntime-genai']['version'] }, ]; (async () => { diff --git a/sdk/rust/build.rs b/sdk/rust/build.rs index 67b18305..9209032b 100644 --- a/sdk/rust/build.rs +++ b/sdk/rust/build.rs @@ -1,9 +1,19 @@ +use std::collections::HashMap; use std::env; use std::fs; use std::io::{self, Read}; use std::path::{Path, PathBuf}; - -const NUGET_FEED: &str = "https://api.nuget.org/v3/index.json"; +use std::sync::Mutex; + +/// Feeds tried in order. Primary: nuget.org (stable releases). Fallback: +/// the public ORT-Nightly Azure DevOps NuGet feed (where dev / pre-release +/// builds of Foundry Local Core, ONNX Runtime and ONNX Runtime GenAI live +/// before they reach nuget.org). If a download from a feed fails for any +/// reason, the next feed is tried. +const FEEDS: &[&str] = &[ + "https://api.nuget.org/v3/index.json", + "https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/nuget/v3/index.json", +]; /// Versions loaded from deps_versions.json (or deps_versions_winml.json). /// Both files share the same key structure — the build script picks the @@ -67,7 +77,6 @@ fn load_deps_versions() -> DepsVersions { struct NuGetPackage { name: &'static str, version: String, - feed_url: &'static str, } fn get_rid() -> Option<&'static str> { @@ -106,51 +115,56 @@ fn get_packages(rid: &str) -> Vec { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core.WinML", version: deps.core.clone(), - feed_url: NUGET_FEED, }); packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Foundry", version: deps.ort.clone(), - feed_url: NUGET_FEED, }); packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: deps.genai.clone(), - feed_url: NUGET_FEED, }); } else { packages.push(NuGetPackage { name: "Microsoft.AI.Foundry.Local.Core", version: deps.core.clone(), - feed_url: NUGET_FEED, }); if is_linux { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Gpu.Linux", version: deps.ort.clone(), - feed_url: NUGET_FEED, }); } else { packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntime.Foundry", version: deps.ort.clone(), - feed_url: NUGET_FEED, }); } packages.push(NuGetPackage { name: "Microsoft.ML.OnnxRuntimeGenAI.Foundry", version: deps.genai.clone(), - feed_url: NUGET_FEED, }); } packages } -/// Resolve the PackageBaseAddress from a NuGet v3 service index. +/// Resolve the PackageBaseAddress from a NuGet v3 service index. The result +/// is cached per feed URL so repeated calls within a single build (e.g. one +/// per package, plus retries on fallback feeds) only hit the network once. fn resolve_base_address(feed_url: &str) -> Result { + static BASE_ADDRESS_CACHE: Mutex>> = Mutex::new(None); + { + let guard = BASE_ADDRESS_CACHE.lock().unwrap(); + if let Some(map) = guard.as_ref() { + if let Some(cached) = map.get(feed_url) { + return Ok(cached.clone()); + } + } + } + let body: String = ureq::get(feed_url) .call() .map_err(|e| format!("Failed to fetch NuGet feed index at {feed_url}: {e}"))? @@ -174,6 +188,10 @@ fn resolve_base_address(feed_url: &str) -> Result { } else { format!("{id}/") }; + let mut guard = BASE_ADDRESS_CACHE.lock().unwrap(); + guard + .get_or_insert_with(HashMap::new) + .insert(feed_url.to_string(), base.clone()); return Ok(base); } } @@ -184,49 +202,37 @@ fn resolve_base_address(feed_url: &str) -> Result { )) } -/// Download a .nupkg and extract native libraries for the given RID into `out_dir`. -/// Skips download if native files from this package are already present. -fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result<(), String> { - // Skip if this package's main native library is already in out_dir - // (e.g. pre-populated from FOUNDRY_NATIVE_OVERRIDE_DIR). - let ext = native_lib_extension(); - let prefix = if env::consts::OS == "windows" { - "" - } else { - "lib" - }; - let expected_file = if pkg.name.contains("Foundry.Local.Core") { - format!("Microsoft.AI.Foundry.Local.Core.{ext}") - } else if pkg.name.contains("OnnxRuntimeGenAI") { - format!("{prefix}onnxruntime-genai.{ext}") - } else if pkg.name.contains("OnnxRuntime") { - format!("{prefix}onnxruntime.{ext}") - } else { - String::new() - }; - if !expected_file.is_empty() && out_dir.join(&expected_file).exists() { - println!( - "cargo:warning={} already present, skipping download.", - pkg.name - ); - return Ok(()); - } - - let base_address = resolve_base_address(pkg.feed_url)?; +/// Try to download and extract a single package from a specific feed. Returns +/// `Ok(())` on success, `Err(reason)` on any failure (network, HTTP error, +/// zip parse error, etc.). +fn try_download_from_feed( + pkg: &NuGetPackage, + rid: &str, + out_dir: &Path, + feed_url: &str, +) -> Result<(), String> { + let base_address = resolve_base_address(feed_url)?; let lower_name = pkg.name.to_lowercase(); let lower_version = pkg.version.to_lowercase(); let url = format!("{base_address}{lower_name}/{lower_version}/{lower_name}.{lower_version}.nupkg"); + let feed_host = feed_url + .split("://") + .nth(1) + .and_then(|s| s.split('/').next()) + .unwrap_or(feed_url); + println!( - "cargo:warning=Downloading {name} {ver} from NuGet.org", + "cargo:warning=Downloading {name} {ver} from {host}", name = pkg.name, ver = pkg.version, + host = feed_host, ); let mut response = ureq::get(&url) .call() - .map_err(|e| format!("Failed to download {}: {e}", pkg.name))?; + .map_err(|e| format!("Failed to download {} from {feed_host}: {e}", pkg.name))?; let mut bytes = Vec::new(); response @@ -285,6 +291,57 @@ fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result Ok(()) } +/// Download a .nupkg and extract native libraries for the given RID into `out_dir`. +/// Skips download if native files from this package are already present. +/// Tries each configured feed in order; on failure falls back to the next. +fn download_and_extract(pkg: &NuGetPackage, rid: &str, out_dir: &Path) -> Result<(), String> { + // Skip if this package's main native library is already in out_dir + // (e.g. pre-populated from FOUNDRY_NATIVE_OVERRIDE_DIR). + let ext = native_lib_extension(); + let prefix = if env::consts::OS == "windows" { + "" + } else { + "lib" + }; + let expected_file = if pkg.name.contains("Foundry.Local.Core") { + format!("Microsoft.AI.Foundry.Local.Core.{ext}") + } else if pkg.name.contains("OnnxRuntimeGenAI") { + format!("{prefix}onnxruntime-genai.{ext}") + } else if pkg.name.contains("OnnxRuntime") { + format!("{prefix}onnxruntime.{ext}") + } else { + String::new() + }; + if !expected_file.is_empty() && out_dir.join(&expected_file).exists() { + println!( + "cargo:warning={} already present, skipping download.", + pkg.name + ); + return Ok(()); + } + + let mut last_error = String::new(); + for (i, feed_url) in FEEDS.iter().enumerate() { + match try_download_from_feed(pkg, rid, out_dir, feed_url) { + Ok(()) => return Ok(()), + Err(e) => { + let is_last = i == FEEDS.len() - 1; + if !is_last { + println!( + "cargo:warning={} {}: {e}; trying next feed...", + pkg.name, pkg.version + ); + } + last_error = e; + } + } + } + Err(format!( + "Failed to download {} {} from any configured feed: {last_error}", + pkg.name, pkg.version + )) +} + /// Check whether all required native libraries are already present in `out_dir`. fn libs_already_present(out_dir: &Path) -> bool { let ext = native_lib_extension(); From c94bb03bebbc4ec49461c7ecd453982f0105d796 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Sat, 25 Apr 2026 00:49:51 -0700 Subject: [PATCH 62/83] Add Nemotron-ASR streaming inference to Python SDK (#612) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Add Nemotron-ASR streaming inference to Python SDK ### Description Adds real-time audio streaming support to the Foundry Local Python SDK, enabling live microphone-to-text transcription via ONNX Runtime GenAI's StreamingProcessor API (Nemotron ASR). This is the Python port of C# PR #485 with full feature parity. The existing `AudioClient` only supports file-based transcription. This PR introduces `LiveAudioTranscriptionSession` that accepts continuous PCM audio chunks (e.g., from a microphone) and returns partial/final transcription results as a synchronous generator. ### What's included **New files** - `src/openai/live_audio_transcription_client.py` — Streaming session with `start()`, `append()`, `get_transcription_stream()`, `stop()` - `src/openai/live_audio_transcription_types.py` — `LiveAudioTranscriptionResponse` (ConversationItem-shaped), `LiveAudioTranscriptionOptions`, `CoreErrorResponse`, `TranscriptionContentPart` - `test/openai/test_live_audio_transcription.py` — 22 unit tests for deserialization, settings, state guards, streaming pipeline - `test/openai/test_live_audio_transcription_e2e.py` — E2E test with real native DLLs and nemotron model - `test/openai/conftest.py` — DLL preload for E2E tests - `samples/python/live-audio-transcription/src/app.py` — Live microphone transcription demo **Modified files** - `src/openai/audio_client.py` — Added `create_live_transcription_session()` factory method - `src/detail/core_interop.py` — Added `StreamingRequestBuffer` struct, `execute_command_with_binary()`, `start_audio_stream`, `push_audio_data`, `stop_audio_stream` methods, and `_load_dll_win()` for robust DLL loading on Windows - `src/openai/__init__.py` — Exported new live transcription types - `test/conftest.py` — Pre-load ORT/GenAI DLLs before brotli import to avoid Windows DLL search conflicts ### API surface ```python audio_client = model.get_audio_client() session = audio_client.create_live_transcription_session() session.settings.sample_rate = 16000 session.settings.channels = 1 session.settings.language = "en" session.start() # Push audio from microphone callback (thread-safe) session.append(pcm_bytes) # Read results as synchronous generator for result in session.get_transcription_stream(): print(result.content[0].text) session.stop() ``` ### C# parity | C# API | Python API | Notes | |---|---|---| | `CreateLiveTranscriptionSession()` | `create_live_transcription_session()` | ✅ | | `StartAsync(ct)` | `start()` | Sync (matches Python SDK convention) | | `AppendAsync(ReadOnlyMemory, ct)` | `append(bytes)` | Thread-safe, copies data | | `GetTranscriptionStream()` | `get_transcription_stream()` | Generator (sync equivalent of IAsyncEnumerable) | | `StopAsync(ct)` | `stop()` | Drains push queue, sends native stop, surfaces final result | | `IAsyncDisposable` | Context manager (`with`) | Idiomatic Python equivalent | | `LiveAudioTranscriptionOptions` | `LiveAudioTranscriptionOptions` | Same fields: sample_rate, channels, bits_per_sample, language, push_queue_capacity | | `LiveAudioTranscriptionResponse` | `LiveAudioTranscriptionResponse` | ConversationItem-shaped: content[0].text/transcript, is_final, start_time, end_time | ### Design highlights - **Output type alignment** — `LiveAudioTranscriptionResponse` uses the OpenAI Realtime `ConversationItem` shape (`content[0].text/transcript`) for forward compatibility - **Internal push queue** — Bounded `queue.Queue` serializes audio pushes from any thread (safe for mic callbacks) with backpressure - **Fail-fast on errors** — Push loop terminates immediately on any native error (no retry logic) - **Settings freeze** — Audio format settings are snapshot-copied at `start()` and immutable during the session - **Buffer copy** — `append()` copies input data to avoid issues with callers reusing buffers (e.g., PyAudio) - **Routes through existing exports** — `start_audio_stream` and `stop_audio_stream` route through `execute_command`; `push_audio_data` routes through `execute_command_with_binary` — no new native entry points required - **DLL loading fix** — Uses `LoadLibraryExW` with `LOAD_WITH_ALTERED_SEARCH_PATH` on Windows to prevent conflicts with stale system-level ORT DLLs ### Verified working - ✅ 22 unit tests passing (deserialization, settings, state guards, streaming pipeline with mocked core) - ✅ E2E test passing (SDK → Core.dll → onnxruntime-genai.dll → onnxruntime.dll with nemotron model) - ✅ Full session lifecycle: start → push synthetic PCM → stop → verify results - ✅ Existing tests unaffected --------- Co-authored-by: ruiren_microsoft Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/python/src/detail/core_interop.py | 87 ++++ sdk/python/src/openai/__init__.py | 19 +- sdk/python/src/openai/audio_client.py | 20 + .../openai/live_audio_transcription_client.py | 324 +++++++++++++++ .../openai/live_audio_transcription_types.py | 144 +++++++ .../openai/test_live_audio_transcription.py | 380 ++++++++++++++++++ 6 files changed, 973 insertions(+), 1 deletion(-) create mode 100644 sdk/python/src/openai/live_audio_transcription_client.py create mode 100644 sdk/python/src/openai/live_audio_transcription_types.py create mode 100644 sdk/python/test/openai/test_live_audio_transcription.py diff --git a/sdk/python/src/detail/core_interop.py b/sdk/python/src/detail/core_interop.py index 1cd53e33..f93b79f0 100644 --- a/sdk/python/src/detail/core_interop.py +++ b/sdk/python/src/detail/core_interop.py @@ -46,6 +46,23 @@ class RequestBuffer(ctypes.Structure): ] +class StreamingRequestBuffer(ctypes.Structure): + """ctypes Structure matching the native ``StreamingRequestBuffer`` C struct. + + Extends ``RequestBuffer`` with binary data fields for sending raw payloads + (e.g. PCM audio bytes) alongside JSON parameters. + """ + + _fields_ = [ + ("Command", ctypes.c_void_p), + ("CommandLength", ctypes.c_int), + ("Data", ctypes.c_void_p), + ("DataLength", ctypes.c_int), + ("BinaryData", ctypes.c_void_p), + ("BinaryDataLength", ctypes.c_int), + ] + + class ResponseBuffer(ctypes.Structure): """ctypes Structure matching the native ``ResponseBuffer`` C struct.""" @@ -173,6 +190,16 @@ def _initialize_native_libraries() -> 'NativeBinaryPaths': ctypes.c_void_p] # user_data lib.execute_command_with_callback.restype = None + # execute_command_with_binary is required for live audio streaming. + # Guard with try/except until Core packages with this symbol are released. + try: + lib.execute_command_with_binary.argtypes = [ctypes.POINTER(StreamingRequestBuffer), + ctypes.POINTER(ResponseBuffer)] + lib.execute_command_with_binary.restype = None + except AttributeError: + logger.debug("execute_command_with_binary not exported by Core — " + "live audio streaming will not be available until Core is updated") + return paths @staticmethod @@ -295,6 +322,66 @@ def execute_command_with_callback(self, command_name: str, command_input: Option response = self._execute_command(command_name, command_input, callback) return response + def execute_command_with_binary(self, command_name: str, + command_input: Optional[InteropRequest], + binary_data: bytes) -> Response: + """Execute a command with both JSON parameters and a raw binary payload. + + Used for operations like pushing PCM audio data alongside JSON metadata. + + Args: + command_name: The native command name (e.g. ``"audio_stream_push"``). + command_input: Optional request parameters (serialized as JSON). + binary_data: Raw binary payload (e.g. PCM audio bytes). + + Returns: + A ``Response`` with ``data`` on success or ``error`` on failure. + """ + logger.debug("Executing command with binary: %s Input: %s BinaryLen: %d", + command_name, command_input.params if command_input else None, len(binary_data)) + + cmd_ptr, cmd_len, cmd_buf = CoreInterop._to_c_buffer(command_name) + data_ptr, data_len, data_buf = CoreInterop._to_c_buffer( + command_input.to_json() if command_input else None + ) + + # Keep binary data alive for the duration of the native call + binary_buf = ctypes.create_string_buffer(binary_data) + binary_ptr = ctypes.cast(binary_buf, ctypes.c_void_p) + + req = StreamingRequestBuffer( + Command=cmd_ptr, CommandLength=cmd_len, + Data=data_ptr, DataLength=data_len, + BinaryData=binary_ptr, BinaryDataLength=len(binary_data), + ) + resp = ResponseBuffer() + lib = CoreInterop._flcore_library + + lib.execute_command_with_binary(ctypes.byref(req), ctypes.byref(resp)) + + req = None # Free Python reference to request + + response_str = ctypes.string_at(resp.Data, resp.DataLength).decode("utf-8") if resp.Data else None + error_str = ctypes.string_at(resp.Error, resp.ErrorLength).decode("utf-8") if resp.Error else None + + lib.free_response(resp) + + return Response(data=response_str, error=error_str) + + # --- Audio streaming session support --- + + def start_audio_stream(self, command_input: InteropRequest) -> Response: + """Start a real-time audio streaming session via ``audio_stream_start``.""" + return self.execute_command("audio_stream_start", command_input) + + def push_audio_data(self, command_input: InteropRequest, audio_data: bytes) -> Response: + """Push a chunk of raw PCM audio data via ``audio_stream_push``.""" + return self.execute_command_with_binary("audio_stream_push", command_input, audio_data) + + def stop_audio_stream(self, command_input: InteropRequest) -> Response: + """Stop a real-time audio streaming session via ``audio_stream_stop``.""" + return self.execute_command("audio_stream_stop", command_input) + def get_cached_model_ids(core_interop: CoreInterop) -> list[str]: """Get the list of models that have been downloaded and are cached.""" diff --git a/sdk/python/src/openai/__init__.py b/sdk/python/src/openai/__init__.py index bec5d68b..2fa51a6f 100644 --- a/sdk/python/src/openai/__init__.py +++ b/sdk/python/src/openai/__init__.py @@ -7,5 +7,22 @@ from .chat_client import ChatClient, ChatClientSettings from .audio_client import AudioClient from .embedding_client import EmbeddingClient +from .live_audio_transcription_client import LiveAudioTranscriptionSession +from .live_audio_transcription_types import ( + CoreErrorResponse, + LiveAudioTranscriptionOptions, + LiveAudioTranscriptionResponse, + TranscriptionContentPart, +) -__all__ = ["AudioClient", "ChatClient", "ChatClientSettings", "EmbeddingClient"] +__all__ = [ + "AudioClient", + "ChatClient", + "ChatClientSettings", + "CoreErrorResponse", + "EmbeddingClient", + "LiveAudioTranscriptionOptions", + "LiveAudioTranscriptionResponse", + "LiveAudioTranscriptionSession", + "TranscriptionContentPart", +] diff --git a/sdk/python/src/openai/audio_client.py b/sdk/python/src/openai/audio_client.py index 0858e4aa..575e9abf 100644 --- a/sdk/python/src/openai/audio_client.py +++ b/sdk/python/src/openai/audio_client.py @@ -14,6 +14,7 @@ from ..detail.core_interop import CoreInterop, InteropRequest from ..exception import FoundryLocalException +from .live_audio_transcription_client import LiveAudioTranscriptionSession logger = logging.getLogger(__name__) @@ -61,6 +62,25 @@ def __init__(self, model_id: str, core_interop: CoreInterop): self.settings = AudioSettings() self._core_interop = core_interop + def create_live_transcription_session(self) -> LiveAudioTranscriptionSession: + """Create a real-time streaming transcription session. + + Audio data is pushed in as PCM chunks and transcription results are + returned as a synchronous generator. + + Returns: + A streaming session that should be stopped when done. + Supports use as a context manager:: + + with audio_client.create_live_transcription_session() as session: + session.settings.sample_rate = 16000 + session.start() + session.append(pcm_bytes) + for result in session.get_transcription_stream(): + print(result.content[0].text) + """ + return LiveAudioTranscriptionSession(self.model_id, self._core_interop) + @staticmethod def _validate_audio_file_path(audio_file_path: str) -> None: """Validate that the audio file path is a non-empty string.""" diff --git a/sdk/python/src/openai/live_audio_transcription_client.py b/sdk/python/src/openai/live_audio_transcription_client.py new file mode 100644 index 00000000..82277436 --- /dev/null +++ b/sdk/python/src/openai/live_audio_transcription_client.py @@ -0,0 +1,324 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Live audio transcription streaming session. + +Provides :class:`LiveAudioTranscriptionSession` — a push-based streaming +session for real-time audio-to-text transcription via ONNX Runtime GenAI. +""" + +from __future__ import annotations + +import logging +import queue +import threading +from typing import Generator, Optional + +from ..detail.core_interop import CoreInterop, InteropRequest +from ..exception import FoundryLocalException +from .live_audio_transcription_types import ( + CoreErrorResponse, + LiveAudioTranscriptionOptions, + LiveAudioTranscriptionResponse, +) + +logger = logging.getLogger(__name__) + +_SENTINEL = object() + + +class LiveAudioTranscriptionSession: + """Session for real-time audio streaming ASR (Automatic Speech Recognition). + + Audio data from a microphone (or other source) is pushed in as PCM chunks, + and transcription results are returned as a synchronous generator. + + Created via :meth:`AudioClient.create_live_transcription_session`. + + Thread safety + ------------- + :meth:`append` can be called from any thread (including high-frequency + audio callbacks). Pushes are internally serialized via a bounded queue + to prevent unbounded memory growth and ensure ordering. + + Example:: + + session = audio_client.create_live_transcription_session() + session.settings.sample_rate = 16000 + session.settings.channels = 1 + session.settings.language = "en" + + session.start() + + # Push audio from a microphone callback (thread-safe) + session.append(pcm_bytes) + + # Read results as they arrive + for result in session.get_transcription_stream(): + print(result.content[0].text, end="", flush=True) + + session.stop() + """ + + def __init__(self, model_id: str, core_interop: CoreInterop): + self._model_id = model_id + self._core_interop = core_interop + + # Public settings — mutable until start() + self.settings = LiveAudioTranscriptionOptions() + + # Session state — protected by _lock + self._lock = threading.Lock() + self._session_handle: Optional[str] = None + self._started = False + self._stopped = False + + # Frozen settings snapshot + self._active_settings: Optional[LiveAudioTranscriptionOptions] = None + + # Output queue: push loop writes, user reads via get_transcription_stream + self._output_queue: Optional[queue.Queue] = None + + # Internal push queue: user writes audio chunks, background loop drains to native core + self._push_queue: Optional[queue.Queue] = None + self._push_thread: Optional[threading.Thread] = None + + def start(self) -> None: + """Start a real-time audio streaming session. + + Must be called before :meth:`append` or :meth:`get_transcription_stream`. + Settings are frozen after this call. + + Raises: + FoundryLocalException: If the session is already started or the + native core returns an error. + """ + with self._lock: + if self._started: + raise FoundryLocalException( + "Streaming session already started. Call stop() first." + ) + + # Freeze settings + self._active_settings = self.settings.snapshot() + + self._output_queue = queue.Queue() + self._push_queue = queue.Queue( + maxsize=self._active_settings.push_queue_capacity + ) + + request = InteropRequest( + params={ + "Model": self._model_id, + "SampleRate": str(self._active_settings.sample_rate), + "Channels": str(self._active_settings.channels), + "BitsPerSample": str(self._active_settings.bits_per_sample), + } + ) + + if self._active_settings.language is not None: + request.params["Language"] = self._active_settings.language + + response = self._core_interop.start_audio_stream(request) + + if response.error is not None: + raise FoundryLocalException( + f"Error starting audio stream session: {response.error}" + ) + + self._session_handle = response.data + if self._session_handle is None: + raise FoundryLocalException( + "Native core did not return a session handle." + ) + + self._started = True + self._stopped = False + + # Start the push loop thread (non-daemon so it blocks process + # exit until stop() is called — aligns with FL Core's no-daemon design) + self._push_thread = threading.Thread(target=self._push_loop, daemon=False) + self._push_thread.start() + + def append(self, pcm_data: bytes) -> None: + """Push a chunk of raw PCM audio data to the streaming session. + + Can be called from any thread (including audio device callbacks). + Chunks are internally queued and serialized to the native core. + + The data is copied to avoid issues if the caller reuses the buffer. + + Args: + pcm_data: Raw PCM audio bytes matching the configured format. + + Raises: + FoundryLocalException: If no active streaming session exists. + """ + # Copy the data to avoid issues if the caller reuses the buffer + data_copy = bytes(pcm_data) + + with self._lock: + if not self._started or self._stopped: + raise FoundryLocalException( + "No active streaming session. Call start() first." + ) + + push_queue = self._push_queue + if push_queue is None: + raise FoundryLocalException( + "No active streaming session. Call start() first." + ) + + # put() blocks if the queue is full (backpressure). This prevents + # unbounded memory growth when the native core is slower than + # real-time. Capacity is configurable via push_queue_capacity. + # Performed outside the lock to avoid blocking stop() and other + # state transitions while waiting for queue space. + push_queue.put(data_copy) + + def get_transcription_stream( + self, + ) -> Generator[LiveAudioTranscriptionResponse, None, None]: + """Get the stream of transcription results. + + Results arrive as the native ASR engine processes audio data. + The generator completes when :meth:`stop` is called and all + remaining audio has been processed. + + Yields: + Transcription results as ``LiveAudioTranscriptionResponse`` objects. + + Raises: + FoundryLocalException: If no active streaming session exists, + or if the push loop encountered a fatal error. + """ + q = self._output_queue + if q is None: + raise FoundryLocalException( + "No active streaming session. Call start() first." + ) + + while True: + item = q.get() + if item is _SENTINEL: + break + if isinstance(item, Exception): + raise item + yield item + + def stop(self) -> None: + """Signal end-of-audio and stop the streaming session. + + Any remaining buffered audio in the push queue will be drained to + native core first. Final results are delivered through + :meth:`get_transcription_stream` before it completes. + """ + with self._lock: + if not self._started or self._stopped: + return # already stopped or never started + + self._stopped = True + + # 1. Signal push loop to finish (put sentinel) + self._push_queue.put(_SENTINEL) + + # 2. Wait for push loop to finish draining + if self._push_thread is not None: + self._push_thread.join() + + # 3. Tell native core to flush and finalize + request = InteropRequest(params={"SessionHandle": self._session_handle}) + response = self._core_interop.stop_audio_stream(request) + + # Parse final transcription from stop response + if response.data: + try: + final_result = LiveAudioTranscriptionResponse.from_json(response.data) + text = final_result.content[0].text if final_result.content else "" + if text: + self._output_queue.put(final_result) + except Exception as parse_ex: + logger.debug( + "Could not parse stop response as transcription result: %s", + parse_ex, + ) + + # 4. Complete the output queue + self._output_queue.put(_SENTINEL) + + # 5. Clean up — keep _output_queue intact so that + # get_transcription_stream() returns an empty stream (matching C#/JS + # behavior where the completed stream remains readable). + self._session_handle = None + self._started = False + + if response.error is not None: + raise FoundryLocalException( + f"Error stopping audio stream session: {response.error}" + ) + + def _push_loop(self) -> None: + """Internal loop that drains the push queue and sends chunks to native core. + + Terminates the session on any native error. + """ + try: + while True: + audio_data = self._push_queue.get() + if audio_data is _SENTINEL: + break + + request = InteropRequest(params={"SessionHandle": self._session_handle}) + response = self._core_interop.push_audio_data(request, audio_data) + + if response.error is not None: + error_info = CoreErrorResponse.try_parse(response.error) + code = error_info.code if error_info else "UNKNOWN" + fatal_ex = FoundryLocalException( + f"Push failed (code={code}): {response.error}" + ) + logger.error( + "Terminating push loop due to push failure: %s", response.error + ) + self._output_queue.put(fatal_ex) + self._output_queue.put(_SENTINEL) + return + + # Parse transcription result from push response and surface it + if response.data: + try: + transcription = LiveAudioTranscriptionResponse.from_json( + response.data + ) + text = ( + transcription.content[0].text + if transcription.content + else "" + ) + if text: + self._output_queue.put(transcription) + except Exception as parse_ex: + # Non-fatal: log and continue + logger.debug( + "Could not parse push response as transcription: %s", + parse_ex, + ) + except Exception as ex: + logger.error("Push loop terminated with unexpected error: %s", ex) + fatal_ex = FoundryLocalException("Push loop terminated unexpectedly.") + fatal_ex.__cause__ = ex + self._output_queue.put(fatal_ex) + self._output_queue.put(_SENTINEL) + + # --- Context manager support --- + + def __enter__(self) -> LiveAudioTranscriptionSession: + return self + + def __exit__(self, exc_type, exc_val, exc_tb) -> None: + try: + if self._started and not self._stopped: + self.stop() + except Exception as ex: + logger.warning("Error during context manager cleanup: %s", ex) diff --git a/sdk/python/src/openai/live_audio_transcription_types.py b/sdk/python/src/openai/live_audio_transcription_types.py new file mode 100644 index 00000000..11ebbfae --- /dev/null +++ b/sdk/python/src/openai/live_audio_transcription_types.py @@ -0,0 +1,144 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Data types for live audio transcription streaming sessions.""" + +from __future__ import annotations + +import json +from dataclasses import dataclass, field +from typing import List, Optional + + +@dataclass +class TranscriptionContentPart: + """A content part within a live transcription response. + + Mirrors the OpenAI Realtime API ``ContentPart`` structure so that + ``result.content[0].text`` and ``result.content[0].transcript`` + both return the transcribed text. + + Attributes: + text: The transcribed text for this content part. + transcript: Alias for ``text`` (OpenAI Realtime API compatibility). + """ + + text: str = "" + transcript: str = "" + + +@dataclass +class LiveAudioTranscriptionResponse: + """Transcription result for real-time audio streaming sessions. + + Shaped like the OpenAI Realtime API ``ConversationItem`` so that + consumers can access text via ``result.content[0].text`` or + ``result.content[0].transcript``. + + Attributes: + content: List of transcription content parts. + is_final: Whether this is a final or partial (interim) result. + Nemotron models always return ``True``. + start_time: Start time offset of this segment in the audio stream (seconds). + end_time: End time offset of this segment in the audio stream (seconds). + id: Unique identifier for this result (if available). + """ + + content: List[TranscriptionContentPart] = field(default_factory=list) + is_final: bool = True + start_time: Optional[float] = None + end_time: Optional[float] = None + id: Optional[str] = None + + @staticmethod + def from_json(json_str: str) -> LiveAudioTranscriptionResponse: + """Deserialize a native Core JSON response into a ``LiveAudioTranscriptionResponse``. + + The native JSON format uses flat fields (``text``, ``is_final``, + ``start_time``, ``end_time``). This method maps them into the + ``ConversationItem``-shaped structure with a ``content`` list. + + Args: + json_str: Raw JSON string from the native core. + + Returns: + A ``LiveAudioTranscriptionResponse`` instance. + + Raises: + json.JSONDecodeError: If *json_str* is not valid JSON. + Exception: If deserialization fails. + """ + raw = json.loads(json_str) + text = raw.get("text", "") + return LiveAudioTranscriptionResponse( + content=[TranscriptionContentPart(text=text, transcript=text)], + is_final=raw.get("is_final", True), + start_time=raw.get("start_time"), + end_time=raw.get("end_time"), + ) + + +@dataclass +class LiveAudioTranscriptionOptions: + """Audio format settings for a live transcription streaming session. + + Must be configured before calling :meth:`LiveAudioTranscriptionSession.start`. + Settings are frozen (snapshot-copied) once the session starts. + + Attributes: + sample_rate: PCM sample rate in Hz. Default: 16000. + channels: Number of audio channels. Default: 1 (mono). + bits_per_sample: Number of bits per audio sample. Default: 16. + language: Optional BCP-47 language hint (e.g. ``"en"``, ``"zh"``). + push_queue_capacity: Maximum number of audio chunks buffered in the + internal push queue. Default: 100 (~3 s at typical chunk sizes). + """ + + sample_rate: int = 16000 + channels: int = 1 + bits_per_sample: int = 16 + language: Optional[str] = None + push_queue_capacity: int = 100 + + def snapshot(self) -> LiveAudioTranscriptionOptions: + """Return a shallow copy of these settings (freeze pattern).""" + return LiveAudioTranscriptionOptions( + sample_rate=self.sample_rate, + channels=self.channels, + bits_per_sample=self.bits_per_sample, + language=self.language, + push_queue_capacity=self.push_queue_capacity, + ) + + +@dataclass +class CoreErrorResponse: + """Structured error response from the native core. + + Attributes: + code: Error code string (e.g. ``"ASR_SESSION_NOT_FOUND"``). + message: Human-readable error description. + is_transient: Whether the error is transient and may succeed on retry. + """ + + code: str = "" + message: str = "" + is_transient: bool = False + + @staticmethod + def try_parse(error_string: str) -> Optional[CoreErrorResponse]: + """Attempt to parse a native error string as structured JSON. + + Returns ``None`` if the error is not valid JSON or doesn't match + the expected schema, which should be treated as a permanent/unknown error. + """ + try: + raw = json.loads(error_string) + return CoreErrorResponse( + code=raw.get("code", ""), + message=raw.get("message", ""), + is_transient=raw.get("isTransient", False), + ) + except Exception: + return None diff --git a/sdk/python/test/openai/test_live_audio_transcription.py b/sdk/python/test/openai/test_live_audio_transcription.py new file mode 100644 index 00000000..e5964158 --- /dev/null +++ b/sdk/python/test/openai/test_live_audio_transcription.py @@ -0,0 +1,380 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +# -------------------------------------------------------------------------- +"""Unit tests for live audio transcription — mirrors C# LiveAudioTranscriptionTests.cs. + +These tests cover: +- LiveAudioTranscriptionResponse.from_json deserialization +- LiveAudioTranscriptionOptions defaults and snapshot +- CoreErrorResponse.try_parse +- Session state guards (append/get_transcription_stream before start) +""" + +from __future__ import annotations + +import json +import threading +from unittest.mock import MagicMock + +import pytest + +from foundry_local_sdk.openai.live_audio_transcription_types import ( + CoreErrorResponse, + LiveAudioTranscriptionOptions, + LiveAudioTranscriptionResponse, + TranscriptionContentPart, +) +from foundry_local_sdk.openai.live_audio_transcription_client import ( + LiveAudioTranscriptionSession, +) +from foundry_local_sdk.detail.core_interop import CoreInterop, Response +from foundry_local_sdk.exception import FoundryLocalException + + +# --------------------------------------------------------------------------- +# LiveAudioTranscriptionResponse.from_json tests +# --------------------------------------------------------------------------- + + +class TestFromJson: + """LiveAudioTranscriptionResponse.from_json deserialization tests.""" + + def test_parses_text_and_is_final(self): + json_str = '{"is_final":true,"text":"hello world","start_time":null,"end_time":null}' + + result = LiveAudioTranscriptionResponse.from_json(json_str) + + assert result.content is not None + assert len(result.content) == 1 + assert result.content[0].text == "hello world" + assert result.content[0].transcript == "hello world" + assert result.is_final is True + + def test_maps_timing_fields(self): + json_str = '{"is_final":false,"text":"partial","start_time":1.5,"end_time":3.0}' + + result = LiveAudioTranscriptionResponse.from_json(json_str) + + assert result.content[0].text == "partial" + assert result.is_final is False + assert result.start_time == 1.5 + assert result.end_time == 3.0 + + def test_empty_text_parses_successfully(self): + json_str = '{"is_final":true,"text":"","start_time":null,"end_time":null}' + + result = LiveAudioTranscriptionResponse.from_json(json_str) + + assert result.content[0].text == "" + assert result.is_final is True + + def test_only_start_time_sets_start_time(self): + json_str = '{"is_final":true,"text":"word","start_time":2.0,"end_time":null}' + + result = LiveAudioTranscriptionResponse.from_json(json_str) + + assert result.start_time == 2.0 + assert result.end_time is None + assert result.content[0].text == "word" + + def test_invalid_json_throws(self): + with pytest.raises(Exception): + LiveAudioTranscriptionResponse.from_json("not valid json") + + def test_content_has_text_and_transcript(self): + json_str = '{"is_final":true,"text":"test","start_time":null,"end_time":null}' + + result = LiveAudioTranscriptionResponse.from_json(json_str) + + # Both Text and Transcript should have the same value + assert result.content[0].text == "test" + assert result.content[0].transcript == "test" + + def test_missing_fields_use_defaults(self): + json_str = '{}' + + result = LiveAudioTranscriptionResponse.from_json(json_str) + + assert result.content[0].text == "" + assert result.is_final is True + assert result.start_time is None + assert result.end_time is None + + +# --------------------------------------------------------------------------- +# LiveAudioTranscriptionOptions tests +# --------------------------------------------------------------------------- + + +class TestOptions: + """LiveAudioTranscriptionOptions tests.""" + + def test_default_values(self): + options = LiveAudioTranscriptionOptions() + + assert options.sample_rate == 16000 + assert options.channels == 1 + assert options.bits_per_sample == 16 + assert options.language is None + assert options.push_queue_capacity == 100 + + def test_snapshot_creates_independent_copy(self): + options = LiveAudioTranscriptionOptions(language="en") + snapshot = options.snapshot() + + # Modify original — snapshot should be unaffected + options.language = "zh" + options.sample_rate = 44100 + + assert snapshot.language == "en" + assert snapshot.sample_rate == 16000 + + +# --------------------------------------------------------------------------- +# CoreErrorResponse tests +# --------------------------------------------------------------------------- + + +class TestCoreErrorResponse: + """CoreErrorResponse.try_parse tests.""" + + def test_try_parse_valid_json(self): + json_str = '{"code":"ASR_SESSION_NOT_FOUND","message":"Session not found","isTransient":false}' + + error = CoreErrorResponse.try_parse(json_str) + + assert error is not None + assert error.code == "ASR_SESSION_NOT_FOUND" + assert error.message == "Session not found" + assert error.is_transient is False + + def test_try_parse_invalid_json_returns_none(self): + result = CoreErrorResponse.try_parse("not json") + assert result is None + + def test_try_parse_transient_error(self): + json_str = '{"code":"BUSY","message":"Model busy","isTransient":true}' + + error = CoreErrorResponse.try_parse(json_str) + + assert error is not None + assert error.is_transient is True + + +# --------------------------------------------------------------------------- +# Session state guard tests +# --------------------------------------------------------------------------- + + +class TestSessionStateGuards: + """Verify that append/get_transcription_stream raise before start.""" + + def _make_session(self) -> LiveAudioTranscriptionSession: + """Create a session with a mock CoreInterop (no native DLLs needed).""" + mock_interop = MagicMock(spec=CoreInterop) + return LiveAudioTranscriptionSession("test-model", mock_interop) + + def test_append_before_start_throws(self): + session = self._make_session() + data = b'\x00' * 100 + + with pytest.raises(FoundryLocalException): + session.append(data) + + def test_get_transcription_stream_before_start_throws(self): + session = self._make_session() + + with pytest.raises(FoundryLocalException): + # Attempt to iterate — should raise immediately + next(iter(session.get_transcription_stream())) + + def test_start_sets_started_flag(self): + session = self._make_session() + session._core_interop.start_audio_stream.return_value = Response( + data="handle-123", error=None + ) + session._core_interop.stop_audio_stream.return_value = Response( + data=None, error=None + ) + + session.start() + + assert session._started is True + assert session._session_handle == "handle-123" + + # Cleanup via public API + session.stop() + + def test_double_start_throws(self): + session = self._make_session() + session._core_interop.start_audio_stream.return_value = Response( + data="handle-123", error=None + ) + session._core_interop.stop_audio_stream.return_value = Response( + data=None, error=None + ) + + session.start() + + with pytest.raises(FoundryLocalException, match="already started"): + session.start() + + # Cleanup via public API + session.stop() + + def test_start_error_raises(self): + session = self._make_session() + session._core_interop.start_audio_stream.return_value = Response( + data=None, error="init failed" + ) + + with pytest.raises(FoundryLocalException, match="Error starting"): + session.start() + + def test_stop_without_start_is_noop(self): + session = self._make_session() + # Should not raise + session.stop() + + +# --------------------------------------------------------------------------- +# Session streaming integration test (mocked native core) +# --------------------------------------------------------------------------- + + +class TestSessionStreaming: + """Verify the full push → output pipeline with a mocked native core.""" + + def test_push_and_receive_transcription(self): + """Simulate pushing audio and receiving transcription results.""" + mock_interop = MagicMock(spec=CoreInterop) + + # start_audio_stream returns a handle + mock_interop.start_audio_stream.return_value = Response( + data="session-42", error=None + ) + + # push_audio_data returns a transcription result + push_response = json.dumps({ + "is_final": True, + "text": "hello world", + "start_time": 0.0, + "end_time": 1.5, + }) + mock_interop.push_audio_data.return_value = Response( + data=push_response, error=None + ) + + # stop_audio_stream returns empty (no final result) + mock_interop.stop_audio_stream.return_value = Response( + data=None, error=None + ) + + session = LiveAudioTranscriptionSession("test-model", mock_interop) + session.start() + + # Start reading results in background (must start before stop) + results = [] + + def read(): + for r in session.get_transcription_stream(): + results.append(r) + + reader = threading.Thread(target=read, daemon=True) + reader.start() + + # Push a chunk of audio + session.append(b'\x00' * 3200) + + # Stop to flush and complete + session.stop() + reader.join(timeout=5) + + assert len(results) == 1 + assert results[0].content[0].text == "hello world" + assert results[0].is_final is True + assert results[0].start_time == 0.0 + assert results[0].end_time == 1.5 + + def test_push_error_surfaces_as_exception(self): + """Verify that a native push error terminates the stream with an exception.""" + mock_interop = MagicMock(spec=CoreInterop) + + mock_interop.start_audio_stream.return_value = Response( + data="session-42", error=None + ) + mock_interop.push_audio_data.return_value = Response( + data=None, error='{"code":"ASR_ERROR","message":"decode failed","isTransient":false}' + ) + mock_interop.stop_audio_stream.return_value = Response( + data=None, error=None + ) + + session = LiveAudioTranscriptionSession("test-model", mock_interop) + session.start() + + try: + session.append(b'\x00' * 3200) + + with pytest.raises(FoundryLocalException, match="Push failed"): + for _ in session.get_transcription_stream(): + pass + finally: + # Cleanup: stop to join the push thread even if assertions fail + session.stop() + + def test_context_manager_calls_stop(self): + """Verify context manager calls stop on exit.""" + mock_interop = MagicMock(spec=CoreInterop) + mock_interop.start_audio_stream.return_value = Response( + data="session-42", error=None + ) + mock_interop.push_audio_data.return_value = Response( + data=None, error=None + ) + mock_interop.stop_audio_stream.return_value = Response( + data=None, error=None + ) + + with LiveAudioTranscriptionSession("test-model", mock_interop) as session: + session.start() + + # stop_audio_stream should have been called via context manager + mock_interop.stop_audio_stream.assert_called_once() + + def test_stop_with_final_result(self): + """Verify that stop() parses and surfaces a final transcription result.""" + mock_interop = MagicMock(spec=CoreInterop) + mock_interop.start_audio_stream.return_value = Response( + data="session-42", error=None + ) + final_json = json.dumps({ + "is_final": True, + "text": "final words", + "start_time": 5.0, + "end_time": 6.0, + }) + mock_interop.stop_audio_stream.return_value = Response( + data=final_json, error=None + ) + + session = LiveAudioTranscriptionSession("test-model", mock_interop) + session.start() + + # Start reading results in background (must start before stop) + results = [] + + def read(): + for r in session.get_transcription_stream(): + results.append(r) + + reader = threading.Thread(target=read, daemon=True) + reader.start() + + # No audio pushed — just stop to get final result + session.stop() + reader.join(timeout=5) + + assert len(results) == 1 + assert results[0].content[0].text == "final words" From 573dbde9d52b990296b52ef9906785d048df4e58 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Mon, 27 Apr 2026 13:16:20 -0700 Subject: [PATCH 63/83] Add `Nemotron` samples multi-lang test samples (#672) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Add Nemotron live-audio transcription samples across JS, C#, Python, Rust, and C++ in their language-specific sample folders. ## What’s included ### JavaScript - Updated `samples/js/live-audio-transcription-example/app.js` - Synced to the final PR #588 behavior: - single-copy buffer handling in audio callback - improved queue/backpressure stability behavior retained ### C# - Updated `samples/cs/live-audio-transcription-example/Program.cs` - Uses spinner-based EP registration flow for consistency with other C# samples ### Python - Added new sample: - `samples/python/live-audio-transcription/src/app.py` - `samples/python/live-audio-transcription/requirements.txt` - Implements live microphone transcription with Nemotron (`create_live_transcription_session` pattern) ### Rust - Added new sample: - `samples/rust/live-audio-transcription-example/src/main.rs` - `samples/rust/live-audio-transcription-example/Cargo.toml` - `samples/rust/live-audio-transcription-example/README.md` - Added listing entry in `samples/rust/README.md` ### C++ - Added new sample: - `samples/cpp/live-audio-transcription-example/main.cpp` - `samples/cpp/live-audio-transcription-example/README.md` - Sample is based on the live-audio C++ API surface introduced in PR #655 ## Notes - Only sample-related files are included. - Unrelated local artifacts (e.g. `.tgz`, local temp folders) were intentionally excluded. --------- Co-authored-by: ruiren_microsoft Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> Co-authored-by: samkemp --- .../cpp/live-audio-transcription/README.md | 28 ++ samples/cpp/live-audio-transcription/main.cpp | 269 +++++++++++++++ samples/cs/Directory.Packages.props | 4 +- .../LiveAudioTranscriptionExample.csproj | 55 ++++ .../LiveAudioTranscriptionExample.sln | 34 ++ .../cs/live-audio-transcription/Program.cs | 166 ++++++++++ samples/cs/live-audio-transcription/README.md | 65 ++++ samples/cs/nuget.config | 12 + samples/js/live-audio-transcription/README.md | 58 ++++ samples/js/live-audio-transcription/app.js | 196 +++++++++++ .../js/live-audio-transcription/package.json | 16 + .../python/live-audio-transcription/README.md | 68 ++++ .../live-audio-transcription/requirements.txt | 5 + .../live-audio-transcription/src/app.py | 174 ++++++++++ samples/rust/README.md | 3 +- .../rust/live-audio-transcription/Cargo.toml | 15 + .../rust/live-audio-transcription/README.md | 21 ++ .../rust/live-audio-transcription/src/main.rs | 308 ++++++++++++++++++ 18 files changed, 1494 insertions(+), 3 deletions(-) create mode 100644 samples/cpp/live-audio-transcription/README.md create mode 100644 samples/cpp/live-audio-transcription/main.cpp create mode 100644 samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj create mode 100644 samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.sln create mode 100644 samples/cs/live-audio-transcription/Program.cs create mode 100644 samples/cs/live-audio-transcription/README.md create mode 100644 samples/js/live-audio-transcription/README.md create mode 100644 samples/js/live-audio-transcription/app.js create mode 100644 samples/js/live-audio-transcription/package.json create mode 100644 samples/python/live-audio-transcription/README.md create mode 100644 samples/python/live-audio-transcription/requirements.txt create mode 100644 samples/python/live-audio-transcription/src/app.py create mode 100644 samples/rust/live-audio-transcription/Cargo.toml create mode 100644 samples/rust/live-audio-transcription/README.md create mode 100644 samples/rust/live-audio-transcription/src/main.rs diff --git a/samples/cpp/live-audio-transcription/README.md b/samples/cpp/live-audio-transcription/README.md new file mode 100644 index 00000000..a9fca977 --- /dev/null +++ b/samples/cpp/live-audio-transcription/README.md @@ -0,0 +1,28 @@ +# Live Audio Transcription Example (C++) + +Demonstrates real-time microphone-to-text using the Foundry Local C++ SDK. + +Uses [PortAudio](http://www.portaudio.com/) for cross-platform microphone capture +(the C/C++ equivalent of `naudiodon2` used by the JS sample). If PortAudio is not +available, falls back to synthetic PCM audio. + + +## Build + +```bash +# With PortAudio (live microphone) +g++ -std=c++20 -DHAS_PORTAUDIO main.cpp -lfoundry_local -lportaudio -o live-audio-transcription-example + +# Without PortAudio (synthetic audio only) +g++ -std=c++20 main.cpp -lfoundry_local -o live-audio-transcription-example +``` + +## Run + +```bash +# Live microphone (requires PortAudio) +./live-audio-transcription-example + +# Synthetic 440Hz sine wave (no microphone needed) +./live-audio-transcription-example --synth +``` diff --git a/samples/cpp/live-audio-transcription/main.cpp b/samples/cpp/live-audio-transcription/main.cpp new file mode 100644 index 00000000..1a3341e4 --- /dev/null +++ b/samples/cpp/live-audio-transcription/main.cpp @@ -0,0 +1,269 @@ +// Live Audio Transcription — Foundry Local C++ SDK Example +// +// Demonstrates real-time microphone-to-text using the C++ SDK. +// Uses PortAudio for cross-platform mic capture (like naudiodon2 in the JS sample). +// Falls back to synthetic PCM if PortAudio is unavailable. +// +// Requires: PortAudio (libportaudio), Foundry Local C++ SDK +// +// Usage: ./live-audio-transcription-example [--synth] + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "foundry_local.h" + +// PortAudio is optional — compile with -DHAS_PORTAUDIO and link -lportaudio +// to enable live microphone capture. +#ifdef HAS_PORTAUDIO +#include +#endif + +namespace { + +// Global flag for Ctrl+C graceful shutdown (mirrors JS process.on('SIGINT')) +std::atomic g_running{true}; + +void SignalHandler(int /*signum*/) { + g_running = false; +} + +// Bounded audio queue (mirrors JS appendQueue with cap of 100) +class AudioQueue { +public: + void Push(std::vector chunk) { + std::lock_guard lock(mu_); + if (queue_.size() >= kMaxSize) { + queue_.pop_front(); + if (!warnedDrop_) { + warnedDrop_ = true; + std::cerr << "Audio append queue overflow; dropping oldest chunk to keep stream alive." << std::endl; + } + } + queue_.push_back(std::move(chunk)); + } + + bool TryPop(std::vector& out) { + std::lock_guard lock(mu_); + if (queue_.empty()) return false; + out = std::move(queue_.front()); + queue_.pop_front(); + return true; + } + +private: + static constexpr size_t kMaxSize = 100; + std::deque> queue_; + std::mutex mu_; + bool warnedDrop_ = false; +}; + +std::vector GenerateSineWavePcm(int sampleRate, int durationSeconds, double frequencyHz) { + const auto totalSamples = static_cast(sampleRate * durationSeconds); + std::vector pcm(totalSamples * 2, 0); // 16-bit mono, little-endian + + for (size_t i = 0; i < totalSamples; ++i) { + const double t = static_cast(i) / static_cast(sampleRate); + const auto sample = static_cast( + static_cast(INT16_MAX) * 0.5 * std::sin(2.0 * 3.14159265358979323846 * frequencyHz * t)); + const auto encodedSample = static_cast(sample); + pcm[i * 2] = static_cast(encodedSample & 0xFF); + pcm[i * 2 + 1] = static_cast((encodedSample >> 8) & 0xFF); + } + return pcm; +} + +#ifdef HAS_PORTAUDIO +// PortAudio callback — captures 16-bit mono PCM and pushes to the queue +int PaCallback(const void* input, void* /*output*/, + unsigned long frameCount, + const PaStreamCallbackTimeInfo* /*timeInfo*/, + PaStreamCallbackFlags /*statusFlags*/, + void* userData) { + auto* queue = static_cast(userData); + const auto* pcm = static_cast(input); + const size_t byteCount = frameCount * 2; // 16-bit mono = 2 bytes per frame + std::vector chunk(pcm, pcm + byteCount); + queue->Push(std::move(chunk)); + return g_running ? paContinue : paComplete; +} +#endif + +} // namespace + +int main(int argc, char* argv[]) { + bool useSynth = false; + for (int i = 1; i < argc; ++i) { + if (std::string(argv[i]) == "--synth") useSynth = true; + } + + // Install Ctrl+C handler (mirrors JS process.on('SIGINT')) + std::signal(SIGINT, SignalHandler); + + try { + std::cout << "===========================================================" << std::endl; + std::cout << " Foundry Local -- Live Audio Transcription Demo (C++)" << std::endl; + std::cout << "===========================================================" << std::endl; + std::cout << std::endl; + + foundry_local::Configuration config; + config.appName = "foundry_local_samples"; + + foundry_local::Manager::Create(config); + auto& manager = foundry_local::Manager::Instance(); + manager.EnsureEpsDownloaded(); + + auto& catalog = manager.GetCatalog(); + auto* model = catalog.GetModel("nemotron-speech-streaming-en-0.6b"); + if (!model) { + throw std::runtime_error("Model \"nemotron-speech-streaming-en-0.6b\" not found in catalog"); + } + + std::cout << "Downloading model (if needed)..." << std::endl; + model->Download([](float pct) { + std::cout << "\rDownloading: " << pct << "% " << std::flush; + }); + std::cout << std::endl; + std::cout << "Loading model..." << std::endl; + model->Load(); + std::cout << "Model loaded" << std::endl; + + // NOTE: CreateLiveTranscriptionSession() is not yet available in the C++ SDK. + // The audio client and session code below is forward-looking. + foundry_local::OpenAIAudioClient audioClient(*model); + auto session = audioClient.CreateLiveTranscriptionSession(); + + session->Settings().sample_rate = 16000; + session->Settings().channels = 1; + session->Settings().bits_per_sample = 16; + session->Settings().language = "en"; + session->Start(); + std::cout << "Session started" << std::endl; + + // Read transcription results in a background thread (mirrors JS readPromise) + std::thread readThread([&session]() { + foundry_local::LiveAudioTranscriptionResponse result; + while (g_running) { + const auto status = session->TryGetNext(result, std::chrono::milliseconds(500)); + if (status == foundry_local::TranscriptionStatus::Result) { + if (result.is_final) { + std::cout << "\n [FINAL] " << result.text << std::endl; + } else if (!result.text.empty()) { + std::cout << result.text << std::flush; + } + } else if (status == foundry_local::TranscriptionStatus::Closed) { + break; + } else if (status == foundry_local::TranscriptionStatus::Timeout) { + continue; + } else { + std::cerr << "Transcription stream error: " << session->GetErrorMessage() << std::endl; + break; + } + } + }); + + // --- Microphone capture (mirrors JS naudiodon2 section) --- + // Uses PortAudio for cross-platform audio capture. If PortAudio is not + // available or --synth is passed, falls back to synthetic PCM. + + bool micActive = false; + +#ifdef HAS_PORTAUDIO + PaStream* paStream = nullptr; + AudioQueue audioQueue; + + if (!useSynth) { + PaError err = Pa_Initialize(); + if (err == paNoError) { + PaStreamParameters inputParams{}; + inputParams.device = Pa_GetDefaultInputDevice(); + if (inputParams.device != paNoDevice) { + inputParams.channelCount = 1; + inputParams.sampleFormat = paInt16; + inputParams.suggestedLatency = + Pa_GetDeviceInfo(inputParams.device)->defaultLowInputLatency; + inputParams.hostApiSpecificStreamInfo = nullptr; + + // framesPerBuffer=3200 matches JS framesPerBuffer setting + err = Pa_OpenStream(&paStream, &inputParams, nullptr, + 16000, 3200, paClipOff, + PaCallback, &audioQueue); + if (err == paNoError) { + err = Pa_StartStream(paStream); + } + } + + if (err == paNoError && paStream) { + micActive = true; + std::cout << std::endl; + std::cout << "===========================================================" << std::endl; + std::cout << " LIVE TRANSCRIPTION ACTIVE" << std::endl; + std::cout << " Speak into your microphone." << std::endl; + std::cout << " Press Ctrl+C to stop." << std::endl; + std::cout << "===========================================================" << std::endl; + std::cout << std::endl; + + // Pump audio from the queue to the session (mirrors JS pumpAudio) + while (g_running) { + std::vector chunk; + if (audioQueue.TryPop(chunk)) { + session->Append(chunk.data(), chunk.size()); + } else { + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + } + } + + Pa_StopStream(paStream); + Pa_CloseStream(paStream); + } else { + std::cerr << "Could not initialize microphone: " + << Pa_GetErrorText(err) << std::endl; + std::cerr << "Falling back to synthetic audio test..." << std::endl; + std::cerr << std::endl; + } + Pa_Terminate(); + } + } +#endif + + // Fallback: push synthetic PCM (440Hz sine wave) — mirrors JS catch block + if (!micActive) { + std::cout << "Pushing synthetic audio (440Hz sine, 2s)..." << std::endl; + const auto pcm = GenerateSineWavePcm(16000, 2, 440.0); + const size_t chunkSize = static_cast(16000 / 10 * 2); // 100ms + for (size_t offset = 0; offset < pcm.size() && g_running; offset += chunkSize) { + const size_t len = std::min(chunkSize, pcm.size() - offset); + session->Append(pcm.data() + offset, len); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + std::cout << "Synthetic audio pushed" << std::endl; + + // Wait briefly for remaining transcription results + std::this_thread::sleep_for(std::chrono::seconds(3)); + } + + // Graceful shutdown (mirrors JS SIGINT handler) + std::cout << "\n\nStopping..." << std::endl; + session->Stop(); + readThread.join(); + model->Unload(); + foundry_local::Manager::Destroy(); + std::cout << "Done" << std::endl; + return 0; + } catch (const std::exception& ex) { + std::cerr << "Error: " << ex.what() << std::endl; + foundry_local::Manager::Destroy(); + return 1; + } +} diff --git a/samples/cs/Directory.Packages.props b/samples/cs/Directory.Packages.props index d799c4cd..77b68c4c 100644 --- a/samples/cs/Directory.Packages.props +++ b/samples/cs/Directory.Packages.props @@ -7,8 +7,8 @@ - - + +
diff --git a/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj b/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj new file mode 100644 index 00000000..3d91b677 --- /dev/null +++ b/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj @@ -0,0 +1,55 @@ + + + + Exe + enable + enable + + + + + net9.0-windows10.0.26100 + false + ARM64;x64 + None + false + + + + + net9.0 + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.sln b/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.sln new file mode 100644 index 00000000..f8c88284 --- /dev/null +++ b/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.0.31903.59 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LiveAudioTranscriptionExample", "LiveAudioTranscriptionExample.csproj", "{A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Debug|x64 = Debug|x64 + Debug|x86 = Debug|x86 + Release|Any CPU = Release|Any CPU + Release|x64 = Release|x64 + Release|x86 = Release|x86 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x64.ActiveCfg = Debug|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x64.Build.0 = Debug|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x86.ActiveCfg = Debug|x86 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Debug|x86.Build.0 = Debug|x86 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|Any CPU.Build.0 = Release|Any CPU + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x64.ActiveCfg = Release|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x64.Build.0 = Release|x64 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x86.ActiveCfg = Release|x86 + {A2B3C4D5-E6F7-4A8B-9C0D-1E2F3A4B5C6D}.Release|x86.Build.0 = Release|x86 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/samples/cs/live-audio-transcription/Program.cs b/samples/cs/live-audio-transcription/Program.cs new file mode 100644 index 00000000..3771b2a0 --- /dev/null +++ b/samples/cs/live-audio-transcription/Program.cs @@ -0,0 +1,166 @@ +// Live Audio Transcription — Foundry Local SDK Example +// +// NAudio's WaveInEvent is Windows-only. On non-Windows platforms, the sample +// falls back to synthetic PCM audio. + +using Microsoft.AI.Foundry.Local; +using NAudio.Wave; + +Console.WriteLine("==========================================================="); +Console.WriteLine(" Foundry Local -- Live Audio Transcription Demo"); +Console.WriteLine("==========================================================="); +Console.WriteLine(); + +var config = new Configuration +{ + AppName = "foundry_local_samples", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +await FoundryLocalManager.CreateAsync(config, Utils.GetAppLogger()); +var mgr = FoundryLocalManager.Instance; + +await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); + +var catalog = await mgr.GetCatalogAsync(); + +var model = await catalog.GetModelAsync("nemotron-speech-streaming-en-0.6b") ?? throw new Exception("Model \"nemotron-speech-streaming-en-0.6b\" not found in catalog"); + +await model.DownloadAsync(progress => +{ + Console.Write($"\rDownloading model: {progress:F2}%"); + if (progress >= 100f) + { + Console.WriteLine(); + } +}); + +Console.Write($"Loading model {model.Id}..."); +await model.LoadAsync(); +Console.WriteLine("done."); + +var audioClient = await model.GetAudioClientAsync(); +var session = audioClient.CreateLiveTranscriptionSession(); +session.Settings.SampleRate = 16000; // Default is 16000; shown here to match the NAudio WaveFormat below +session.Settings.Channels = 1; +session.Settings.Language = "en"; + +await session.StartAsync(); +Console.WriteLine(" Session started"); + +var readTask = Task.Run(async () => +{ + try + { + await foreach (var result in session.GetTranscriptionStream()) + { + var text = result.Content?[0]?.Text; + if (result.IsFinal) + { + Console.WriteLine(); + Console.WriteLine($" [FINAL] {text}"); + Console.Out.Flush(); + } + else if (!string.IsNullOrEmpty(text)) + { + Console.ForegroundColor = ConsoleColor.Cyan; + Console.Write(text); + Console.ResetColor(); + Console.Out.Flush(); + } + } + } + catch (OperationCanceledException) { } +}); + +bool useSynth = args.Contains("--synth"); + +// NAudio WaveInEvent is Windows-only. On other platforms, fall back to synthetic audio. +if (!useSynth && OperatingSystem.IsWindows()) +{ + using var waveIn = new WaveInEvent + { + WaveFormat = new WaveFormat(rate: 16000, bits: 16, channels: 1), + BufferMilliseconds = 100 + }; + + // Use a bounded channel to avoid unbounded fire-and-forget AppendAsync calls. + // NAudio's DataAvailable callback is synchronous, so we enqueue PCM chunks and + // await AppendAsync on a dedicated task to respect SDK backpressure. + var audioChannel = System.Threading.Channels.Channel.CreateBounded( + new System.Threading.Channels.BoundedChannelOptions(50) + { + FullMode = System.Threading.Channels.BoundedChannelFullMode.DropOldest + }); + + var appendTask = Task.Run(async () => + { + await foreach (var chunk in audioChannel.Reader.ReadAllAsync()) + { + await session.AppendAsync(chunk); + } + }); + + waveIn.DataAvailable += (sender, e) => + { + if (e.BytesRecorded > 0) + { + var buffer = new byte[e.BytesRecorded]; + Buffer.BlockCopy(e.Buffer, 0, buffer, 0, e.BytesRecorded); + audioChannel.Writer.TryWrite(buffer); + } + }; + + Console.WriteLine(); + Console.WriteLine("==========================================================="); + Console.WriteLine(" LIVE TRANSCRIPTION ACTIVE"); + Console.WriteLine(" Speak into your microphone."); + Console.WriteLine(" Transcription appears in real-time (cyan text)."); + Console.WriteLine(" Press ENTER to stop recording."); + Console.WriteLine("==========================================================="); + Console.WriteLine(); + + waveIn.StartRecording(); + Console.ReadLine(); + waveIn.StopRecording(); + + audioChannel.Writer.Complete(); + await appendTask; +} +else +{ + if (!OperatingSystem.IsWindows() && !useSynth) + { + Console.WriteLine("NAudio mic capture is Windows-only. Falling back to synthetic audio..."); + } + + // Synthetic PCM fallback: 440Hz sine wave, 2 seconds + Console.WriteLine("Pushing synthetic audio (440Hz sine, 2s)..."); + const int sampleRate = 16000; + const int duration = 2; + var totalSamples = sampleRate * duration; + var pcmBytes = new byte[totalSamples * 2]; + for (int i = 0; i < totalSamples; i++) + { + double t = (double)i / sampleRate; + short sample = (short)(short.MaxValue * 0.5 * Math.Sin(2 * Math.PI * 440 * t)); + pcmBytes[i * 2] = (byte)(sample & 0xFF); + pcmBytes[i * 2 + 1] = (byte)((sample >> 8) & 0xFF); + } + + int chunkSize = (sampleRate / 10) * 2; // 100ms + for (int offset = 0; offset < pcmBytes.Length; offset += chunkSize) + { + int len = Math.Min(chunkSize, pcmBytes.Length - offset); + await session.AppendAsync(pcmBytes.AsMemory(offset, len)); + await Task.Delay(100); + } + + Console.WriteLine("✓ Synthetic audio pushed"); + await Task.Delay(3000); // Wait for remaining transcription results +} + +await session.StopAsync(); +await readTask; + +await model.UnloadAsync(); diff --git a/samples/cs/live-audio-transcription/README.md b/samples/cs/live-audio-transcription/README.md new file mode 100644 index 00000000..9aa91e0b --- /dev/null +++ b/samples/cs/live-audio-transcription/README.md @@ -0,0 +1,65 @@ +# Live Audio Transcription Example + +Real-time microphone-to-text transcription using the Foundry Local C# SDK with Nemotron ASR. + +## Prerequisites + +- [Foundry Local](https://github.com/microsoft/Foundry-Local) installed +- .NET 9 SDK +- A microphone (optional — falls back to synthetic audio on non-Windows or with `--synth`) + +## Setup + +```bash +dotnet restore +``` + +> **Note:** Microphone capture uses [NAudio](https://github.com/naudio/NAudio) and is Windows-only. On other platforms, the sample falls back to synthetic audio for testing. + +## Run + +```bash +dotnet run +``` + +Speak into your microphone. Transcription appears in real-time (cyan text). Press `ENTER` to stop recording. + +To force synthetic audio (e.g., for CI or non-Windows): + +```bash +dotnet run -- --synth +``` + +## How it works + +1. Initializes the Foundry Local SDK and loads the Nemotron ASR model +2. Creates a `LiveAudioTranscriptionSession` with 16kHz/16-bit/mono PCM settings +3. Captures microphone audio via `NAudio.WaveInEvent` (or generates synthetic audio as fallback) +4. Pushes PCM chunks to the SDK via `session.AppendAsync()` through a bounded channel for backpressure +5. Reads transcription results via `await foreach (var result in session.GetTranscriptionStream())` +6. Access text via `result.Content[0].Text` (OpenAI Realtime ConversationItem pattern) + +## API + +```csharp +var audioClient = await model.GetAudioClientAsync(); +var session = audioClient.CreateLiveTranscriptionSession(); +session.Settings.SampleRate = 16000; +session.Settings.Channels = 1; +session.Settings.Language = "en"; + +await session.StartAsync(); + +// Push audio +await session.AppendAsync(pcmBytes); + +// Read results +await foreach (var result in session.GetTranscriptionStream()) +{ + Console.WriteLine(result.Content[0].Text); // transcribed text + Console.WriteLine(result.Content[0].Transcript); // alias (OpenAI compat) + Console.WriteLine(result.IsFinal); // true for final results +} + +await session.StopAsync(); +``` diff --git a/samples/cs/nuget.config b/samples/cs/nuget.config index 3a9f6b32..63954b2f 100644 --- a/samples/cs/nuget.config +++ b/samples/cs/nuget.config @@ -3,5 +3,17 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/samples/js/live-audio-transcription/README.md b/samples/js/live-audio-transcription/README.md new file mode 100644 index 00000000..dc7a3fa1 --- /dev/null +++ b/samples/js/live-audio-transcription/README.md @@ -0,0 +1,58 @@ +# Live Audio Transcription Example + +Real-time microphone-to-text transcription using the Foundry Local JS SDK with Nemotron ASR. + +## Prerequisites + +- [Foundry Local](https://github.com/microsoft/Foundry-Local) installed +- Node.js 18+ +- A microphone (optional — falls back to synthetic audio) + +## Setup + +```bash +npm install +``` + +> **Note:** `naudiodon2` is optional — provides cross-platform microphone capture. Without it, the example falls back to synthetic audio for testing. + +## Run + +```bash +node app.js +``` + +Speak into your microphone. Transcription appears in real-time. Press `Ctrl+C` to stop. + +## How it works + +1. Initializes the Foundry Local SDK and loads the Nemotron ASR model +2. Creates a `LiveAudioTranscriptionSession` with 16kHz/16-bit/mono PCM settings +3. Captures microphone audio via `naudiodon2` (or generates synthetic audio as fallback) +4. Pushes PCM chunks to the SDK via `session.append()` +5. Reads transcription results via `for await (const result of session.getTranscriptionStream())` +6. Access text via `result.content[0].text` (OpenAI Realtime ConversationItem pattern) + +## API + +```javascript +const audioClient = model.createAudioClient(); +const session = audioClient.createLiveTranscriptionSession(); +session.settings.sampleRate = 16000; +session.settings.channels = 1; +session.settings.language = 'en'; + +await session.start(); + +// Push audio +await session.append(pcmBytes); + +// Read results +for await (const result of session.getTranscriptionStream()) { + console.log(result.content[0].text); // transcribed text + console.log(result.content[0].transcript); // alias (OpenAI compat) + console.log(result.is_final); // true for final results +} + +await session.stop(); +``` diff --git a/samples/js/live-audio-transcription/app.js b/samples/js/live-audio-transcription/app.js new file mode 100644 index 00000000..60d583f0 --- /dev/null +++ b/samples/js/live-audio-transcription/app.js @@ -0,0 +1,196 @@ +// Live Audio Transcription Example — Foundry Local JS SDK +// +// Demonstrates real-time microphone-to-text using the JS SDK. +// Requires: npm install foundry-local-sdk naudiodon2 +// +// Usage: node app.js + +import { FoundryLocalManager } from 'foundry-local-sdk'; + +console.log('╔══════════════════════════════════════════════════════════╗'); +console.log('║ Foundry Local — Live Audio Transcription (JS SDK) ║'); +console.log('╚══════════════════════════════════════════════════════════╝'); +console.log(); + +// Initialize the Foundry Local SDK +console.log('Initializing Foundry Local SDK...'); +const manager = FoundryLocalManager.create({ + appName: 'foundry', + logLevel: 'info' +}); +console.log('✓ SDK initialized'); + +// Get and load the nemotron model +const modelAlias = 'nemotron-speech-streaming-en-0.6b'; +let model = await manager.catalog.getModel(modelAlias); +if (!model) { + console.error(`ERROR: Model "${modelAlias}" not found in catalog.`); + process.exit(1); +} + +console.log(`Found model: ${model.id}`); +console.log('Downloading model (if needed)...'); +await model.download((progress) => { + process.stdout.write(`\rDownloading... ${progress.toFixed(2)}%`); +}); +console.log('\n✓ Model downloaded'); + +console.log('Loading model...'); +await model.load(); +console.log('✓ Model loaded'); + +// Create live transcription session (same pattern as C# sample). +const audioClient = model.createAudioClient(); +const session = audioClient.createLiveTranscriptionSession(); + +session.settings.sampleRate = 16000; // Default is 16000; shown here for clarity +session.settings.channels = 1; +session.settings.bitsPerSample = 16; +session.settings.language = 'en'; + +console.log('Starting streaming session...'); +await session.start(); +console.log('✓ Session started'); + +// Read transcription results in background +const readPromise = (async () => { + try { + for await (const result of session.getTranscriptionStream()) { + const text = result.content?.[0]?.text; + if (!text) continue; + + // `is_final` is a transcript-state marker only. It should not stop the app. + if (result.is_final) { + process.stdout.write(`\n [FINAL] ${text}\n`); + } else { + process.stdout.write(text); + } + } + } catch (err) { + if (err.name !== 'AbortError') { + console.error('Stream error:', err.message); + } + } +})(); + +// --- Microphone capture --- +// This example uses naudiodon2 for cross-platform audio capture. +// Install with: npm install naudiodon2 +// +// If you prefer a different audio library, just push PCM bytes +// (16-bit signed LE, mono, 16kHz) via session.append(). + +let audioInput; +try { + const { default: portAudio } = await import('naudiodon2'); + + audioInput = portAudio.AudioIO({ + inOptions: { + channelCount: session.settings.channels, + sampleFormat: session.settings.bitsPerSample === 16 + ? portAudio.SampleFormat16Bit + : portAudio.SampleFormat32Bit, + sampleRate: session.settings.sampleRate, + // Larger chunk size lowers callback frequency and reduces overflow risk. + framesPerBuffer: 3200, + // Allow deeper native queue during occasional event-loop stalls. + maxQueue: 64 + } + }); + + const appendQueue = []; + let pumping = false; + let warnedQueueDrop = false; + + const pumpAudio = async () => { + if (pumping) return; + pumping = true; + try { + while (appendQueue.length > 0) { + const pcm = appendQueue.shift(); + await session.append(pcm); + } + } catch (err) { + console.error('append error:', err.message); + } finally { + pumping = false; + // Handle race where new data arrived after loop exit. + if (appendQueue.length > 0) { + void pumpAudio(); + } + } + }; + + audioInput.on('data', (buffer) => { + // Single copy: slice the underlying ArrayBuffer to get an independent Uint8Array. + const copy = new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength).slice(); + + // Keep a bounded queue to avoid unbounded memory growth. + if (appendQueue.length >= 100) { + appendQueue.shift(); + if (!warnedQueueDrop) { + warnedQueueDrop = true; + console.warn('Audio append queue overflow; dropping oldest chunk to keep stream alive.'); + } + } + + appendQueue.push(copy); + void pumpAudio(); + }); + + console.log(); + console.log('════════════════════════════════════════════════════════════'); + console.log(' LIVE TRANSCRIPTION ACTIVE'); + console.log(' Speak into your microphone.'); + console.log(' Press Ctrl+C to stop.'); + console.log('════════════════════════════════════════════════════════════'); + console.log(); + + audioInput.start(); +} catch (err) { + console.warn('⚠ Could not initialize microphone (naudiodon2 may not be installed).'); + console.warn(' Install with: npm install naudiodon2'); + console.warn(' Falling back to synthetic audio test...'); + console.warn(); + + // Fallback: push 2 seconds of synthetic PCM (440Hz sine wave) + const sampleRate = session.settings.sampleRate; + const duration = 2; + const totalSamples = sampleRate * duration; + const pcmBytes = new Uint8Array(totalSamples * 2); + for (let i = 0; i < totalSamples; i++) { + const t = i / sampleRate; + const sample = Math.round(32767 * 0.5 * Math.sin(2 * Math.PI * 440 * t)); + pcmBytes[i * 2] = sample & 0xFF; + pcmBytes[i * 2 + 1] = (sample >> 8) & 0xFF; + } + + // Push in 100ms chunks + const chunkSize = (sampleRate / 10) * 2; + for (let offset = 0; offset < pcmBytes.length; offset += chunkSize) { + const len = Math.min(chunkSize, pcmBytes.length - offset); + await session.append(pcmBytes.slice(offset, offset + len)); + } + + console.log('✓ Synthetic audio pushed'); + console.log('Waiting briefly for final transcription results...'); + await new Promise((resolve) => setTimeout(resolve, 3000)); + await session.stop(); + await readPromise; + await model.unload(); + console.log('✓ Done'); + process.exit(0); +} + +// Handle graceful shutdown +process.on('SIGINT', async () => { + console.log('\n\nStopping...'); + if (audioInput) { + audioInput.quit(); + } + await session.stop(); + await readPromise; + await model.unload(); + console.log('✓ Done'); + process.exit(0); +}); diff --git a/samples/js/live-audio-transcription/package.json b/samples/js/live-audio-transcription/package.json new file mode 100644 index 00000000..d3e9d4cf --- /dev/null +++ b/samples/js/live-audio-transcription/package.json @@ -0,0 +1,16 @@ +{ + "name": "live-audio-transcription-example", + "version": "1.0.0", + "type": "module", + "description": "Live audio transcription example using the Foundry Local JS SDK", + "main": "app.js", + "scripts": { + "start": "node app.js" + }, + "dependencies": { + "foundry-local-sdk": "latest" + }, + "optionalDependencies": { + "naudiodon2": "latest" + } +} diff --git a/samples/python/live-audio-transcription/README.md b/samples/python/live-audio-transcription/README.md new file mode 100644 index 00000000..0b89048e --- /dev/null +++ b/samples/python/live-audio-transcription/README.md @@ -0,0 +1,68 @@ +# Live Audio Transcription Example + +Real-time microphone-to-text transcription using the Foundry Local Python SDK with Nemotron ASR. + +## Prerequisites + +- [Foundry Local](https://github.com/microsoft/Foundry-Local) installed +- Python 3.9+ +- A microphone (optional — falls back to synthetic audio with `--synth` or if PyAudio is unavailable) + +## Setup + +```bash +pip install -r requirements.txt +``` + +> **Note:** `pyaudio` is **optional** — it provides cross-platform microphone capture. Without it, the example falls back to synthetic audio for testing. +> +> Install manually if needed: +> ```bash +> pip install pyaudio +> ``` + +## Run + +```bash +python src/app.py +``` + +Speak into your microphone. Transcription appears in real-time. Press `Ctrl+C` to stop. + +To force synthetic audio (e.g., for CI or when no microphone is available): + +```bash +python src/app.py --synth +``` + +## How it works + +1. Initializes the Foundry Local SDK and loads the Nemotron ASR model +2. Creates a `LiveAudioTranscriptionSession` with 16kHz/16-bit/mono PCM settings +3. Captures microphone audio via `pyaudio` (or generates synthetic audio as fallback) +4. Pushes PCM chunks to the SDK via `session.append()` +5. Reads transcription results in a background thread via `for result in session.get_transcription_stream()` +6. Access text via `result.content[0].text` (OpenAI Realtime ConversationItem pattern) + +## API + +```python +audio_client = model.get_audio_client() +session = audio_client.create_live_transcription_session() +session.settings.sample_rate = 16000 +session.settings.channels = 1 +session.settings.language = "en" + +session.start() + +# Push audio +session.append(pcm_bytes) + +# Read results (typically on a background thread) +for result in session.get_transcription_stream(): + print(result.content[0].text) # transcribed text + print(result.content[0].transcript) # alias (OpenAI compat) + print(result.is_final) # True for final results + +session.stop() +``` diff --git a/samples/python/live-audio-transcription/requirements.txt b/samples/python/live-audio-transcription/requirements.txt new file mode 100644 index 00000000..6677976f --- /dev/null +++ b/samples/python/live-audio-transcription/requirements.txt @@ -0,0 +1,5 @@ +foundry-local-sdk; sys_platform != "win32" +foundry-local-sdk-winml; sys_platform == "win32" +# pyaudio is optional — only needed for live microphone capture. +# Install manually: pip install pyaudio +# The sample falls back to synthetic audio if pyaudio is unavailable. diff --git a/samples/python/live-audio-transcription/src/app.py b/samples/python/live-audio-transcription/src/app.py new file mode 100644 index 00000000..083ebbdf --- /dev/null +++ b/samples/python/live-audio-transcription/src/app.py @@ -0,0 +1,174 @@ +# Live Audio Transcription — Foundry Local SDK Example (Python) +# +# Tries PyAudio mic capture first; falls back to synthetic PCM if unavailable. +# +# Usage: +# pip install -r requirements.txt +# python src/app.py # Live microphone +# python src/app.py --synth # Synthetic 440Hz sine wave + +import math +import signal +import struct +import sys +import threading +import time + +from foundry_local_sdk import Configuration, FoundryLocalManager + +use_synth = "--synth" in sys.argv + +print("===========================================================") +print(" Foundry Local -- Live Audio Transcription Demo (Python)") +print("===========================================================") +print() + +config = Configuration(app_name="foundry_local_samples") +FoundryLocalManager.initialize(config) +manager = FoundryLocalManager.instance + +manager.download_and_register_eps() + +model = manager.catalog.get_model("nemotron-speech-streaming-en-0.6b") +if model is None: + raise RuntimeError('Model "nemotron-speech-streaming-en-0.6b" not found in catalog') + +model.download( + lambda progress: print(f"\rDownloading model: {progress:.2f}%", end="", flush=True) +) +print() +print(f"Loading model {model.id}...", end="") +model.load() +print("done.") + +audio_client = model.get_audio_client() +session = audio_client.create_live_transcription_session() +session.settings.sample_rate = 16000 +session.settings.channels = 1 +session.settings.language = "en" + +session.start() +print("✓ Session started") + +# --- Background thread reads transcription results (mirrors JS readPromise) --- + +def read_results(): + for result in session.get_transcription_stream(): + text = result.content[0].text if result.content else "" + if result.is_final: + print() + print(f" [FINAL] {text}") + elif text: + print(text, end="", flush=True) + + +read_thread = threading.Thread(target=read_results, daemon=True) +read_thread.start() + +# --- Microphone capture (mirrors JS naudiodon2 / C++ PortAudio) --- +# Try PyAudio for mic input; fall back to synthetic PCM on failure. + +RATE = 16000 +CHANNELS = 1 +CHUNK = RATE // 10 # 100ms of audio = 1600 frames + +stop_event = threading.Event() +mic_active = False +pa = None +stream = None + +if not use_synth: + try: + import pyaudio + + pa = pyaudio.PyAudio() + stream = pa.open( + format=pyaudio.paInt16, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK, + ) + mic_active = True + + print() + print("===========================================================") + print(" LIVE TRANSCRIPTION ACTIVE") + print(" Speak into your microphone.") + print(" Press Ctrl+C to stop.") + print("===========================================================") + print() + + def capture_mic(): + while not stop_event.is_set(): + try: + pcm_data = stream.read(CHUNK, exception_on_overflow=False) + if pcm_data: + session.append(pcm_data) + except Exception as e: + print(f"\n[ERROR] Microphone capture failed: {e}") + stop_event.set() + break + + capture_thread = threading.Thread(target=capture_mic, daemon=True) + capture_thread.start() + + except Exception as e: + print(f"Could not initialize microphone: {e}") + print("Falling back to synthetic audio test...") + print() + mic_active = False + if stream: + stream.close() + if pa: + pa.terminate() + pa = None + stream = None + +# Fallback: push synthetic PCM (440Hz sine wave) — mirrors JS catch block +if not mic_active: + print("Pushing synthetic audio (440Hz sine, 2s)...") + duration = 2 + total_samples = RATE * duration + pcm_bytes = bytearray(total_samples * 2) + for i in range(total_samples): + t = i / RATE + sample = int(32767 * 0.5 * math.sin(2 * math.pi * 440 * t)) + struct.pack_into(" [!TIP] -> Each sample's `Cargo.toml` uses `[target.'cfg(windows)'.dependencies]` to automatically enable the `winml` feature on Windows for broader hardware acceleration. On macOS and Linux, the standard SDK is used. No manual configuration needed. \ No newline at end of file +> Each sample's `Cargo.toml` uses `[target.'cfg(windows)'.dependencies]` to automatically enable the `winml` feature on Windows for broader hardware acceleration. On macOS and Linux, the standard SDK is used. No manual configuration needed. diff --git a/samples/rust/live-audio-transcription/Cargo.toml b/samples/rust/live-audio-transcription/Cargo.toml new file mode 100644 index 00000000..ca732456 --- /dev/null +++ b/samples/rust/live-audio-transcription/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "live-audio-transcription-example" +version = "0.1.0" +edition = "2021" +description = "Live audio transcription (streaming) example using the Foundry Local Rust SDK" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["rt-multi-thread", "macros"] } +tokio-stream = "0.1" +cpal = "0.15" +ctrlc = "3" + +[target.'cfg(windows)'.dependencies] +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } diff --git a/samples/rust/live-audio-transcription/README.md b/samples/rust/live-audio-transcription/README.md new file mode 100644 index 00000000..a9f90f3d --- /dev/null +++ b/samples/rust/live-audio-transcription/README.md @@ -0,0 +1,21 @@ +# Live Audio Transcription Example (Rust) + +Demonstrates real-time microphone-to-text using the Foundry Local Rust SDK: + +**Microphone (CPAL) → SDK (FoundryLocalManager) → Core (NativeAOT DLL)** + +Uses [CPAL](https://crates.io/crates/cpal) for cross-platform microphone capture +(the Rust equivalent of `naudiodon2` in JS / `PortAudio` in C++ / `PyAudio` in Python). +If CPAL cannot open a microphone, falls back to synthetic PCM audio. + +## Run (once the API is available) + +```bash +cd samples/rust/live-audio-transcription-example + +# Live microphone (press Ctrl+C to stop) +cargo run + +# Synthetic 440Hz sine wave (no microphone needed) +cargo run -- --synth +``` diff --git a/samples/rust/live-audio-transcription/src/main.rs b/samples/rust/live-audio-transcription/src/main.rs new file mode 100644 index 00000000..b97e9418 --- /dev/null +++ b/samples/rust/live-audio-transcription/src/main.rs @@ -0,0 +1,308 @@ +// Live Audio Transcription — Foundry Local Rust SDK Example +// +// Tries CPAL mic capture first; falls back to synthetic PCM if unavailable. +// +// Usage: +// cargo run # Live microphone (press Ctrl+C to stop) +// cargo run -- --synth # Synthetic 440Hz sine wave + +use std::env; +use std::io::{self, Write}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; + +use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; +use foundry_local_sdk::{FoundryLocalConfig, FoundryLocalManager}; +use tokio_stream::StreamExt; + +const ALIAS: &str = "nemotron-speech-streaming-en-0.6b"; + +// Global flag for Ctrl+C graceful shutdown (mirrors JS process.on('SIGINT')) +static RUNNING: AtomicBool = AtomicBool::new(true); + +#[tokio::main] +async fn main() -> Result<(), Box> { + let use_synth = env::args().any(|a| a == "--synth"); + + // Install Ctrl+C handler (mirrors JS SIGINT / C++ SignalHandler) + let running = Arc::new(AtomicBool::new(true)); + let running_for_signal = running.clone(); + ctrlc::set_handler(move || { + RUNNING.store(false, Ordering::SeqCst); + running_for_signal.store(false, Ordering::SeqCst); + })?; + + println!("==========================================================="); + println!(" Foundry Local -- Live Audio Transcription Demo (Rust)"); + println!("==========================================================="); + println!(); + + let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; + let model = manager.catalog().get_model(ALIAS).await?; + println!("Model: {} (id: {})", model.alias(), model.id()); + + if !model.is_cached().await? { + println!("Downloading model..."); + model + .download(Some(|progress: &str| { + print!("\r {progress}%"); + io::stdout().flush().ok(); + })) + .await?; + println!(); + } + + println!("Loading model..."); + model.load().await?; + println!("✓ Model loaded\n"); + + let audio_client = model.create_audio_client(); + let session = Arc::new(audio_client.create_live_transcription_session()); + session.start(None).await?; + println!("✓ Session started\n"); + + // --- Background task reads transcription results (mirrors JS readPromise) --- + let mut stream = session.get_transcription_stream().await?; + let read_task = tokio::spawn(async move { + while let Some(result) = stream.next().await { + match result { + Ok(r) => { + if let Some(content) = r.content.first() { + let text = &content.text; + if r.is_final { + println!(); + println!(" [FINAL] {text}"); + } else if !text.is_empty() { + print!("{text}"); + io::stdout().flush().ok(); + } + } + } + Err(e) => { + eprintln!("\n[ERROR] Stream error: {e}"); + break; + } + } + } + }); + + // --- Microphone capture (mirrors JS naudiodon2 / C++ PortAudio / Python PyAudio) --- + // Try CPAL for mic input; fall back to synthetic PCM on failure. + + let mut mic_active = false; + + if !use_synth { + match try_start_mic(&session, &running).await { + Ok(()) => { + mic_active = true; + } + Err(e) => { + eprintln!("Could not initialize microphone: {e}"); + eprintln!("Falling back to synthetic audio test...\n"); + } + } + } + + // Fallback: push synthetic PCM (440Hz sine wave) — mirrors JS catch block + if !mic_active { + println!("Pushing synthetic audio (440Hz sine, 2s)..."); + let pcm_data = generate_sine_wave_pcm(16000, 2, 440.0); + let chunk_size = 16000 / 10 * 2; // 100ms + let chunk_interval = std::time::Duration::from_millis(100); + for offset in (0..pcm_data.len()).step_by(chunk_size) { + if !running.load(Ordering::SeqCst) { + break; + } + let end = std::cmp::min(offset + chunk_size, pcm_data.len()); + session.append(&pcm_data[offset..end], None).await?; + tokio::time::sleep(chunk_interval).await; + } + println!("✓ Synthetic audio pushed"); + + // Wait for remaining transcription results + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + + // Graceful shutdown (mirrors JS SIGINT handler) + println!("\n\nStopping..."); + session.stop(None).await?; + read_task.await?; + model.unload().await?; + println!("✓ Done"); + Ok(()) +} + +/// Try to open the default microphone with CPAL and forward PCM to the session. +/// Blocks until Ctrl+C is pressed. +async fn try_start_mic( + session: &Arc, + running: &Arc, +) -> Result<(), Box> { + let host = cpal::default_host(); + let device = host + .default_input_device() + .ok_or("No input audio device available")?; + let default_config = device.default_input_config()?; + let device_rate = default_config.sample_rate().0; + let device_channels = default_config.channels(); + let sample_format = default_config.sample_format(); + + let mic_config = cpal::StreamConfig { + channels: device_channels, + sample_rate: cpal::SampleRate(device_rate), + buffer_size: cpal::BufferSize::Default, + }; + + // Bounded channel (cap=100) mirrors JS appendQueue / C++ AudioQueue + let (audio_tx, mut audio_rx) = tokio::sync::mpsc::channel::>(100); + let err_fn = |err| eprintln!("Microphone stream error: {err}"); + + // CPAL may deliver f32, i16, or u16 depending on the device/host. Convert + // each supported sample format to f32 in [-1.0, 1.0] before resampling. + let input_stream = match sample_format { + cpal::SampleFormat::F32 => { + let tx = audio_tx.clone(); + device.build_input_stream( + &mic_config, + move |data: &[f32], _: &cpal::InputCallbackInfo| { + let bytes = convert_audio(data, device_channels, device_rate); + if !bytes.is_empty() { + let _ = tx.try_send(bytes); + } + }, + err_fn, + None, + )? + } + cpal::SampleFormat::I16 => { + let tx = audio_tx.clone(); + device.build_input_stream( + &mic_config, + move |data: &[i16], _: &cpal::InputCallbackInfo| { + let samples: Vec = data + .iter() + .map(|&s| s as f32 / i16::MAX as f32) + .collect(); + let bytes = convert_audio(&samples, device_channels, device_rate); + if !bytes.is_empty() { + let _ = tx.try_send(bytes); + } + }, + err_fn, + None, + )? + } + cpal::SampleFormat::U16 => { + let tx = audio_tx.clone(); + device.build_input_stream( + &mic_config, + move |data: &[u16], _: &cpal::InputCallbackInfo| { + let samples: Vec = data + .iter() + .map(|&s| (s as f32 / u16::MAX as f32) * 2.0 - 1.0) + .collect(); + let bytes = convert_audio(&samples, device_channels, device_rate); + if !bytes.is_empty() { + let _ = tx.try_send(bytes); + } + }, + err_fn, + None, + )? + } + other => { + return Err(format!("Unsupported input sample format: {other:?}").into()); + } + }; + drop(audio_tx); + + input_stream.play()?; + + println!("==========================================================="); + println!(" LIVE TRANSCRIPTION ACTIVE"); + println!(" Speak into your microphone."); + println!(" Press Ctrl+C to stop."); + println!("==========================================================="); + println!(); + + // Pump audio from channel to session (mirrors JS pumpAudio / C++ pump loop) + let session_clone = Arc::clone(session); + let forward_task = tokio::spawn(async move { + while let Some(bytes) = audio_rx.recv().await { + if let Err(e) = session_clone.append(&bytes, None).await { + eprintln!("Append error: {e}"); + break; + } + } + }); + + // Block until Ctrl+C + while running.load(Ordering::SeqCst) { + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + + drop(input_stream); + forward_task.await?; + Ok(()) +} + +fn convert_audio(data: &[f32], channels: u16, sample_rate: u32) -> Vec { + let mono: Vec = if channels > 1 { + data.chunks(channels as usize) + .map(|frame| frame.iter().sum::() / channels as f32) + .collect() + } else { + data.to_vec() + }; + + let resampled = if sample_rate != 16000 { + resample(&mono, sample_rate, 16000) + } else { + mono + }; + + let mut bytes = Vec::with_capacity(resampled.len() * 2); + for &s in &resampled { + let clamped = s.clamp(-1.0, 1.0); + let sample = (clamped * i16::MAX as f32) as i16; + bytes.extend_from_slice(&sample.to_le_bytes()); + } + bytes +} + +fn generate_sine_wave_pcm(sample_rate: i32, duration_seconds: i32, frequency: f64) -> Vec { + let total_samples = (sample_rate * duration_seconds) as usize; + let mut pcm_bytes = vec![0u8; total_samples * 2]; + + for i in 0..total_samples { + let t = i as f64 / sample_rate as f64; + let sample = + (i16::MAX as f64 * 0.5 * (2.0 * std::f64::consts::PI * frequency * t).sin()) as i16; + let bytes = sample.to_le_bytes(); + pcm_bytes[i * 2] = bytes[0]; + pcm_bytes[i * 2 + 1] = bytes[1]; + } + + pcm_bytes +} + +fn resample(input: &[f32], from_rate: u32, to_rate: u32) -> Vec { + if from_rate == to_rate || input.is_empty() { + return input.to_vec(); + } + + let ratio = from_rate as f64 / to_rate as f64; + let out_len = (input.len() as f64 / ratio).ceil() as usize; + let mut output = Vec::with_capacity(out_len); + + for i in 0..out_len { + let src_idx = i as f64 * ratio; + let idx = src_idx as usize; + let frac = src_idx - idx as f64; + let s0 = input[idx.min(input.len() - 1)]; + let s1 = input[(idx + 1).min(input.len() - 1)]; + output.push(s0 + (s1 - s0) * frac as f32); + } + + output +} From 1a59b7bd12824567abda09a66e66a61e4a0c2ce8 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Fri, 1 May 2026 11:56:51 -0700 Subject: [PATCH 64/83] Add a non-blocking initialization path to the JS SDK (#685) The JS SDK's FoundryLocalManager.create() and ModelVariant.load() perform synchronous native calls that block the Node.js event loop. This PR adds a non-blocking path without breaking the existing GA APIs. Changes Native addon (sdk/js/native/foundry_local_napi.c) Added a new executeCommandAsync entry point that runs execute_command on a libuv worker thread and returns a Promise. This is distinct from the existing executeCommandStreaming, which uses execute_command_with_callback and isn't compatible with all commands. CoreInterop Exposed executeCommandAsync() that delegates to the new native entry point. FoundryLocalManager - Kept create() as-is (blocking) for backward compatibility. - Added createAsync() that initializes via async native calls. - Added a pendingCreate guard so concurrent callers share the same in-flight initialization, matching the locking strategy in the C#, Rust, and Python SDKs. - create() throws a clear error if called while createAsync() is in flight to prevent double native initialization. ModelLoadManager load(), unload(), and listLoaded() now use executeCommandAsync for the core interop path. These methods already returned Promises, so no API change. ModelVariant.download() The no-progress path now uses executeCommandAsync. Already async, no API change. Catalog - getModels() and getCachedModels() now use executeCommandAsync for the native fetches. Already async, no API change. - updateModels() shares an in-flight refresh promise so concurrent callers don't redundantly refetch and rebuild the model maps. - The constructor accepts an optional catalogName so the manager can pass it in (avoiding a sync native call during async initialization), with a fallback to the existing sync lookup. Backward compatible. Backward compatibility No API breakages. All existing callers of create() and load() continue to work unchanged. --- sdk/js/native/foundry_local_napi.c | 175 +++++++++++++++++++++++++- sdk/js/src/catalog.ts | 20 ++- sdk/js/src/detail/coreInterop.ts | 10 ++ sdk/js/src/detail/modelLoadManager.ts | 6 +- sdk/js/src/detail/modelVariant.ts | 2 +- sdk/js/src/foundryLocalManager.ts | 63 +++++++++- sdk/js/test/catalog.test.ts | 8 +- 7 files changed, 265 insertions(+), 19 deletions(-) diff --git a/sdk/js/native/foundry_local_napi.c b/sdk/js/native/foundry_local_napi.c index bffebc4c..a947f10c 100644 --- a/sdk/js/native/foundry_local_napi.c +++ b/sdk/js/native/foundry_local_napi.c @@ -6,10 +6,11 @@ * * Replaces the koffi FFI bridge with a lightweight native addon that * dynamically loads the FoundryLocalCore shared library at runtime and - * exposes three JavaScript-callable functions: + * exposes the following JavaScript-callable functions: * * loadLibrary(corePath, depPaths?) – load native libs, resolve symbols * executeCommand(cmd, dataJson) – synchronous command execution + * executeCommandAsync(cmd, dataJson) – async command execution (Promise) * executeCommandWithBinary(cmd, dataJson, binaryBuf) – with binary payload * executeCommandStreaming(cmd, dataJson, callback) – async + streaming */ @@ -474,6 +475,176 @@ static napi_value napi_execute_command_with_binary(napi_env env, return result; } +/* ── Async (non-streaming) work data ──────────────────────────────────── */ + +typedef struct { + char* command; + size_t command_length; + char* data; + size_t data_length; + ResponseBuffer response; + napi_deferred deferred; + napi_async_work work; +} AsyncWorkData; + +/* Runs on the libuv worker thread */ +static void async_execute(napi_env env, void* data) { + AsyncWorkData* work_data = (AsyncWorkData*)data; + + RequestBuffer req = { + .Command = work_data->command, + .CommandLength = (int32_t)work_data->command_length, + .Data = work_data->data, + .DataLength = (int32_t)work_data->data_length + }; + + work_data->response.Data = NULL; + work_data->response.DataLength = 0; + work_data->response.Error = NULL; + work_data->response.ErrorLength = 0; + + g_execute_command(&req, &work_data->response); +} + +/* Runs on the JS main thread after async_execute completes */ +static void async_complete(napi_env env, napi_status status, void* data) { + AsyncWorkData* work_data = (AsyncWorkData*)data; + + if (status == napi_cancelled) { + reject_with_error(env, work_data->deferred, "Async work cancelled"); + } else if (status != napi_ok) { + char msg[128]; + snprintf(msg, sizeof(msg), + "Async work failed with N-API status %d", (int)status); + reject_with_error(env, work_data->deferred, msg); + } else if (work_data->response.Error && work_data->response.ErrorLength > 0) { + int32_t elen = work_data->response.ErrorLength; + size_t msg_size = (size_t)elen + 128; + char* msg = (char*)malloc(msg_size); + if (msg) { + snprintf(msg, msg_size, "Command '%s' failed: %.*s", + work_data->command, elen, + (const char*)work_data->response.Error); + reject_with_error(env, work_data->deferred, msg); + free(msg); + } else { + reject_with_error(env, work_data->deferred, "Command failed (OOM)"); + } + } else { + napi_value result; + napi_status st; + if (work_data->response.Data && work_data->response.DataLength > 0) { + st = napi_create_string_utf8(env, + (const char*)work_data->response.Data, + work_data->response.DataLength, &result); + } else { + st = napi_create_string_utf8(env, "", 0, &result); + } + if (st != napi_ok) { + reject_with_error(env, work_data->deferred, + "Failed to create response string"); + } else { + napi_resolve_deferred(env, work_data->deferred, result); + } + } + + free_native_buffer(work_data->response.Data); + free_native_buffer(work_data->response.Error); + napi_delete_async_work(env, work_data->work); + free(work_data->command); + free(work_data->data); + free(work_data); +} + +/* executeCommandAsync(command, dataJson) → Promise */ +static napi_value napi_execute_command_async(napi_env env, + napi_callback_info info) { + if (!g_execute_command) { + napi_throw_error(env, NULL, "Native library not loaded. Call loadLibrary() first."); + return NULL; + } + + size_t argc = 2; + napi_value argv[2]; + NAPI_CALL(env, napi_get_cb_info(env, info, &argc, argv, NULL, NULL)); + + if (argc < 2) { + napi_throw_error(env, NULL, + "executeCommandAsync requires 2 arguments (command, dataJson)"); + return NULL; + } + + /* Extract command string */ + size_t cmd_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], NULL, 0, &cmd_len)); + if (!check_string_length(env, cmd_len, "command")) return NULL; + char* cmd = (char*)malloc(cmd_len + 1); + if (!cmd) { napi_throw_error(env, NULL, "Out of memory"); return NULL; } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[0], cmd, cmd_len + 1, &cmd_len)); + + /* Extract data JSON string */ + size_t data_len = 0; + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], NULL, 0, &data_len)); + if (!check_string_length(env, data_len, "dataJson")) { free(cmd); return NULL; } + char* data_str = (char*)malloc(data_len + 1); + if (!data_str) { + free(cmd); + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + NAPI_CALL(env, napi_get_value_string_utf8(env, argv[1], data_str, data_len + 1, &data_len)); + + /* Allocate work data */ + AsyncWorkData* work_data = (AsyncWorkData*)calloc(1, sizeof(AsyncWorkData)); + if (!work_data) { + free(cmd); + free(data_str); + napi_throw_error(env, NULL, "Out of memory"); + return NULL; + } + work_data->command = cmd; + work_data->command_length = cmd_len; + work_data->data = data_str; + work_data->data_length = data_len; + + /* Create promise */ + napi_value promise; + napi_status st = napi_create_promise(env, &work_data->deferred, &promise); + if (st != napi_ok) { + free(cmd); free(data_str); free(work_data); + napi_throw_error(env, NULL, "Failed to create promise"); + return NULL; + } + + /* Create and queue async work */ + napi_value work_name; + st = napi_create_string_utf8(env, "foundry_async_cmd", NAPI_AUTO_LENGTH, &work_name); + if (st != napi_ok) { + free(cmd); free(data_str); free(work_data); + napi_throw_error(env, NULL, "Failed to create async work name"); + return NULL; + } + + st = napi_create_async_work(env, NULL, work_name, + async_execute, async_complete, + work_data, &work_data->work); + if (st != napi_ok) { + free(cmd); free(data_str); free(work_data); + napi_throw_error(env, NULL, "Failed to create async work"); + return NULL; + } + + st = napi_queue_async_work(env, work_data->work); + if (st != napi_ok) { + napi_delete_async_work(env, work_data->work); + free(cmd); free(data_str); free(work_data); + napi_throw_error(env, NULL, "Failed to queue async work"); + return NULL; + } + + return promise; +} + /* ── Streaming async work data ────────────────────────────────────────── */ /* Chunk data passed from the native callback to the JS thread. @@ -807,6 +978,8 @@ static napi_value init(napi_env env, napi_value exports) { napi_default, NULL }, { "executeCommand", NULL, napi_execute_command, NULL, NULL, NULL, napi_default, NULL }, + { "executeCommandAsync", NULL, napi_execute_command_async, NULL, + NULL, NULL, napi_default, NULL }, { "executeCommandWithBinary", NULL, napi_execute_command_with_binary, NULL, NULL, NULL, napi_default, NULL }, { "executeCommandStreaming", NULL, napi_execute_command_streaming, diff --git a/sdk/js/src/catalog.ts b/sdk/js/src/catalog.ts index d4331c38..c0d9d2b5 100644 --- a/sdk/js/src/catalog.ts +++ b/sdk/js/src/catalog.ts @@ -17,11 +17,12 @@ export class Catalog { private modelAliasToModel: Map = new Map(); private modelIdToModelVariant: Map = new Map(); private lastFetch: number = 0; + private updatePromise?: Promise; - constructor(coreInterop: CoreInterop, modelLoadManager: ModelLoadManager) { + constructor(coreInterop: CoreInterop, modelLoadManager: ModelLoadManager, catalogName?: string) { this.coreInterop = coreInterop; this.modelLoadManager = modelLoadManager; - this._name = this.coreInterop.executeCommand("get_catalog_name"); + this._name = catalogName ?? this.coreInterop.executeCommand("get_catalog_name"); } /** @@ -42,9 +43,16 @@ export class Catalog { if ((Date.now() - this.lastFetch) < 6 * 60 * 60 * 1000) { // 6 hours return; } + if (this.updatePromise) { + return this.updatePromise; + } + this.updatePromise = this.fetchAndPopulateModels() + .finally(() => { this.updatePromise = undefined; }); + return this.updatePromise; + } - // Potential network call to fetch model list - const modelListJson = this.coreInterop.executeCommand("get_model_list"); + private async fetchAndPopulateModels(): Promise { + const modelListJson = await this.coreInterop.executeCommandAsync("get_model_list"); let modelsInfo: ModelInfo[] = []; try { modelsInfo = JSON.parse(modelListJson); @@ -59,7 +67,7 @@ export class Catalog { for (const info of modelsInfo) { const variant = new ModelVariant(info, this.coreInterop, this.modelLoadManager); let model = this.modelAliasToModel.get(info.alias); - + if (!model) { model = new Model(variant); this.modelAliasToModel.set(info.alias, model); @@ -133,7 +141,7 @@ export class Catalog { */ public async getCachedModels(): Promise { await this.updateModels(); - const cachedModelListJson = this.coreInterop.executeCommand("get_cached_models"); + const cachedModelListJson = await this.coreInterop.executeCommandAsync("get_cached_models"); let cachedModelIds: string[] = []; try { cachedModelIds = JSON.parse(cachedModelListJson); diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index 72df7e26..72013815 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -13,6 +13,7 @@ const require = createRequire(import.meta.url); interface NativeAddon { loadLibrary(corePath: string, depPaths?: string[]): void; executeCommand(command: string, dataJson: string): string; + executeCommandAsync(command: string, dataJson: string): Promise; executeCommandWithBinary(command: string, dataJson: string, binaryBuffer: Buffer): string; executeCommandStreaming(command: string, dataJson: string, callback: (chunk: string) => void): Promise; } @@ -115,6 +116,15 @@ export class CoreInterop { return this.addon.executeCommand(command, dataStr); } + /** + * Asynchronously execute a native command without blocking the event loop. + * Runs the native call on a libuv worker thread. + */ + public executeCommandAsync(command: string, params?: any): Promise { + const dataStr = params ? JSON.stringify(params) : ''; + return this.addon.executeCommandAsync(command, dataStr); + } + /** * Execute a native command with binary data (e.g., audio PCM bytes). * Uses the execute_command_with_binary native entry point which accepts diff --git a/sdk/js/src/detail/modelLoadManager.ts b/sdk/js/src/detail/modelLoadManager.ts index e66f327d..423f4e42 100644 --- a/sdk/js/src/detail/modelLoadManager.ts +++ b/sdk/js/src/detail/modelLoadManager.ts @@ -37,7 +37,7 @@ export class ModelLoadManager { } return; } - this.coreInterop.executeCommand("load_model", { Params: { Model: modelId } }); + await this.coreInterop.executeCommandAsync("load_model", { Params: { Model: modelId } }); } /** @@ -54,7 +54,7 @@ export class ModelLoadManager { } return; } - this.coreInterop.executeCommand("unload_model", { Params: { Model: modelId } }); + await this.coreInterop.executeCommandAsync("unload_model", { Params: { Model: modelId } }); } /** @@ -72,7 +72,7 @@ export class ModelLoadManager { const list = await response.json(); return list || []; } - const response = this.coreInterop.executeCommand("list_loaded_models"); + const response = await this.coreInterop.executeCommandAsync("list_loaded_models"); try { return JSON.parse(response); } catch (error) { diff --git a/sdk/js/src/detail/modelVariant.ts b/sdk/js/src/detail/modelVariant.ts index 43484bac..273ab719 100644 --- a/sdk/js/src/detail/modelVariant.ts +++ b/sdk/js/src/detail/modelVariant.ts @@ -112,7 +112,7 @@ export class ModelVariant implements IModel { public async download(progressCallback?: (progress: number) => void): Promise { const request = { Params: { Model: this._modelInfo.id } }; if (!progressCallback) { - this.coreInterop.executeCommand("download_model", request); + await this.coreInterop.executeCommandAsync("download_model", request); } else { await this.coreInterop.executeCommandStreaming("download_model", request, (chunk: string) => { const progress = parseFloat(chunk); diff --git a/sdk/js/src/foundryLocalManager.ts b/sdk/js/src/foundryLocalManager.ts index f22acdc0..f3224e65 100644 --- a/sdk/js/src/foundryLocalManager.ts +++ b/sdk/js/src/foundryLocalManager.ts @@ -11,22 +11,35 @@ import { EpInfo, EpDownloadResult } from './types.js'; */ export class FoundryLocalManager { private static instance: FoundryLocalManager; + private static pendingCreate?: Promise; private config: Configuration; private coreInterop: CoreInterop; private _modelLoadManager: ModelLoadManager; - private _catalog: Catalog; + private _catalog!: Catalog; private _urls: string[] = []; private constructor(config: Configuration) { this.config = config; this.coreInterop = new CoreInterop(this.config); - this.coreInterop.executeCommand("initialize", { Params: this.config.params }); this._modelLoadManager = new ModelLoadManager(this.coreInterop); - this._catalog = new Catalog(this.coreInterop, this._modelLoadManager); + } + + private initializeSync(): void { + this.coreInterop.executeCommand("initialize", { Params: this.config.params }); + const catalogName = this.coreInterop.executeCommand("get_catalog_name"); + this._catalog = new Catalog(this.coreInterop, this._modelLoadManager, catalogName); + } + + private async initializeAsync(): Promise { + await this.coreInterop.executeCommandAsync("initialize", { Params: this.config.params }); + const catalogName = await this.coreInterop.executeCommandAsync("get_catalog_name"); + this._catalog = new Catalog(this.coreInterop, this._modelLoadManager, catalogName); } /** * Creates the FoundryLocalManager singleton with the provided configuration. + * Note: This method blocks the event loop during initialization. + * For non-blocking initialization, use {@link createAsync} instead. * @param config - The configuration settings for the SDK (plain object). * @returns The initialized FoundryLocalManager instance. * @example @@ -39,12 +52,54 @@ export class FoundryLocalManager { */ public static create(config: FoundryLocalConfig): FoundryLocalManager { if (!FoundryLocalManager.instance) { + if (FoundryLocalManager.pendingCreate) { + throw new Error( + "FoundryLocalManager.createAsync() is in progress. " + + "Await that call instead of invoking create()." + ); + } const internalConfig = new Configuration(config); - FoundryLocalManager.instance = new FoundryLocalManager(internalConfig); + const manager = new FoundryLocalManager(internalConfig); + manager.initializeSync(); + FoundryLocalManager.instance = manager; } return FoundryLocalManager.instance; } + /** + * Creates the FoundryLocalManager singleton with the provided configuration. + * Native command execution is performed asynchronously to avoid blocking the + * event loop during initialization. Note that some synchronous setup (e.g., + * loading the native addon) still occurs before the first await. + * @param config - The configuration settings for the SDK (plain object). + * @returns A promise that resolves to the initialized FoundryLocalManager instance. + * @example + * ```typescript + * const manager = await FoundryLocalManager.createAsync({ + * appName: 'MyApp', + * logLevel: 'info' + * }); + * ``` + */ + public static createAsync(config: FoundryLocalConfig): Promise { + if (FoundryLocalManager.instance) { + return Promise.resolve(FoundryLocalManager.instance); + } + if (!FoundryLocalManager.pendingCreate) { + const internalConfig = new Configuration(config); + const manager = new FoundryLocalManager(internalConfig); + FoundryLocalManager.pendingCreate = manager.initializeAsync() + .then(() => { + FoundryLocalManager.instance = manager; + return manager; + }) + .finally(() => { + FoundryLocalManager.pendingCreate = undefined; + }); + } + return FoundryLocalManager.pendingCreate; + } + /** * Gets the Catalog instance for discovering and managing models. * @returns The Catalog instance. diff --git a/sdk/js/test/catalog.test.ts b/sdk/js/test/catalog.test.ts index 8c320723..dd2ad3e1 100644 --- a/sdk/js/test/catalog.test.ts +++ b/sdk/js/test/catalog.test.ts @@ -155,9 +155,6 @@ describe('Catalog Tests', () => { const mockCoreInterop = { executeCommand(command: string): string { - if (command === 'get_catalog_name') { - return 'TestCatalog'; - } if (command === 'get_model_list') { return JSON.stringify(testModelInfos); } @@ -165,6 +162,9 @@ describe('Catalog Tests', () => { return '[]'; } throw new Error(`Unexpected command: ${command}`); + }, + executeCommandAsync(command: string): Promise { + return Promise.resolve(this.executeCommand(command)); } } as any; @@ -172,7 +172,7 @@ describe('Catalog Tests', () => { listLoaded: async () => [] } as any; - const catalog = new Catalog(mockCoreInterop, mockLoadManager); + const catalog = new Catalog(mockCoreInterop, mockLoadManager, 'TestCatalog'); const model = await catalog.getModel('test-alias'); expect(model).to.not.be.undefined; From 897d3e7809df96f7c61b1ddf80822dff71fd375f Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Fri, 1 May 2026 12:18:48 -0700 Subject: [PATCH 65/83] bump ort and ort-genai versions (#686) Co-authored-by: Prathik Rao --- sdk/deps_versions.json | 4 ++-- sdk/deps_versions_winml.json | 2 +- sdk/python/requirements-winml.txt | 4 ++-- sdk/python/requirements.txt | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/sdk/deps_versions.json b/sdk/deps_versions.json index 5fec13c8..4a7795af 100644 --- a/sdk/deps_versions.json +++ b/sdk/deps_versions.json @@ -4,9 +4,9 @@ "python": "1.0.0" }, "onnxruntime": { - "version": "1.24.4" + "version": "1.25.1" }, "onnxruntime-genai": { - "version": "0.13.1" + "version": "0.13.2" } } diff --git a/sdk/deps_versions_winml.json b/sdk/deps_versions_winml.json index 385767d5..969d03fb 100644 --- a/sdk/deps_versions_winml.json +++ b/sdk/deps_versions_winml.json @@ -7,6 +7,6 @@ "version": "1.23.2.3" }, "onnxruntime-genai": { - "version": "0.13.1" + "version": "0.13.2" } } diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index bee268a9..ac05c640 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -2,6 +2,6 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. -foundry-local-core-winml==1.0.0rc1 +foundry-local-core-winml==1.0.0 onnxruntime-core==1.23.2.3 -onnxruntime-genai-core==0.13.1 \ No newline at end of file +onnxruntime-genai-core==0.13.2 \ No newline at end of file diff --git a/sdk/python/requirements.txt b/sdk/python/requirements.txt index 666a3721..92c98b54 100644 --- a/sdk/python/requirements.txt +++ b/sdk/python/requirements.txt @@ -3,7 +3,7 @@ requests>=2.32.4 openai>=2.24.0 # Standard native binary packages from the ORT-Nightly PyPI feed. foundry-local-core==1.0.0rc1 -onnxruntime-core==1.24.4; sys_platform != "linux" -onnxruntime-gpu==1.24.4; sys_platform == "linux" -onnxruntime-genai-core==0.13.1; sys_platform != "linux" -onnxruntime-genai-cuda==0.13.1; sys_platform == "linux" +onnxruntime-core==1.25.1; sys_platform != "linux" +onnxruntime-gpu==1.25.1; sys_platform == "linux" +onnxruntime-genai-core==0.13.2; sys_platform != "linux" +onnxruntime-genai-cuda==0.13.2; sys_platform == "linux" From 33346876c99bcf5d1664583ddbe4c66dcf339bc4 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Fri, 1 May 2026 16:19:59 -0700 Subject: [PATCH 66/83] [Rust & Python] Add `id` field to LiveAudioTranscriptionResponse (#687) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Add id field to LiveAudioTranscriptionResponse for cross-SDK parity ### Summary The native core emits an id field on each transcription result, but the Rust SDK was silently dropping it during deserialization. JS, Python, and C# SDKs all surface this field — Rust was the outlier. This PR closes that parity gap with a minimal, additive change. ### Motivation A cross-SDK parity audit (realtime_audio_sdk_analysis.md) flagged the missing id as a 🔴 Critical issue. Without it, Rust callers cannot: - Correlate streaming results back to the originating audio segment - Deduplicate retransmitted/repeated results - Build forward-compatible code against the OpenAI Realtime API contract (which uses id on `ConversationItem`) --------- Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../openai/live_audio_transcription_client.py | 61 +++++++++++++++++-- .../openai/live_audio_transcription_types.py | 1 + sdk/rust/src/openai/live_audio_client.rs | 19 ++++++ 3 files changed, 76 insertions(+), 5 deletions(-) diff --git a/sdk/python/src/openai/live_audio_transcription_client.py b/sdk/python/src/openai/live_audio_transcription_client.py index 82277436..057c0770 100644 --- a/sdk/python/src/openai/live_audio_transcription_client.py +++ b/sdk/python/src/openai/live_audio_transcription_client.py @@ -6,6 +6,27 @@ Provides :class:`LiveAudioTranscriptionSession` — a push-based streaming session for real-time audio-to-text transcription via ONNX Runtime GenAI. + +Error handling +-------------- +All session operations raise :class:`FoundryLocalException` on failure. +Common failure modes: + +- **Session lifecycle errors** — raised by :meth:`start` / :meth:`stop` / + :meth:`append` / :meth:`get_transcription_stream` when called in an + invalid state (e.g. calling ``start()`` twice, or ``append()`` before + ``start()``). Message contains ``"already started"`` / + ``"No active streaming session"``. +- **Native core errors** — raised when the native Core returns an error + response (e.g. ``audio_stream_start`` fails). Message has the form + ``"Error starting/stopping audio stream session: "``. +- **Push loop fatal errors** — raised from inside + :meth:`get_transcription_stream` when a chunk push fails. Message has + the form ``"Push failed (code=): "`` where + ```` is parsed from :class:`CoreErrorResponse` (e.g. + ``ASR_SESSION_NOT_FOUND``, ``BUSY``, or ``UNKNOWN`` if the error is + unstructured). Once a push loop fatal error occurs, the session is + terminated and must be re-created. """ from __future__ import annotations @@ -91,8 +112,10 @@ def start(self) -> None: Settings are frozen after this call. Raises: - FoundryLocalException: If the session is already started or the - native core returns an error. + FoundryLocalException: If the session is already started + (message contains ``"already started"``), or if the native + core fails to start the stream (message has form + ``"Error starting audio stream session: "``). """ with self._lock: if self._started: @@ -149,11 +172,19 @@ def append(self, pcm_data: bytes) -> None: The data is copied to avoid issues if the caller reuses the buffer. + If the internal push queue is full (capacity controlled by + :attr:`LiveAudioTranscriptionOptions.push_queue_capacity`, default + 100), this method **blocks** until space is available + (backpressure). This prevents unbounded memory growth when the + native core falls behind real-time. + Args: pcm_data: Raw PCM audio bytes matching the configured format. Raises: - FoundryLocalException: If no active streaming session exists. + FoundryLocalException: If the session is not active (not + started, or already stopped). Message contains + ``"No active streaming session"``. """ # Copy the data to avoid issues if the caller reuses the buffer data_copy = bytes(pcm_data) @@ -186,12 +217,22 @@ def get_transcription_stream( The generator completes when :meth:`stop` is called and all remaining audio has been processed. + After :meth:`stop` completes, calling this method again returns + an empty generator (the sentinel is still on the queue) — matching + the C# / JS SDK behavior. + Yields: Transcription results as ``LiveAudioTranscriptionResponse`` objects. Raises: - FoundryLocalException: If no active streaming session exists, - or if the push loop encountered a fatal error. + FoundryLocalException: If no active streaming session exists + (``start()`` was never called). Also raised from inside + the iterator if a push fails — message has form + ``"Push failed (code=): "`` where + ```` is parsed via + :meth:`CoreErrorResponse.try_parse` (e.g. + ``ASR_SESSION_NOT_FOUND``, ``BUSY``). Once raised, the + session is terminated. """ q = self._output_queue if q is None: @@ -213,6 +254,16 @@ def stop(self) -> None: Any remaining buffered audio in the push queue will be drained to native core first. Final results are delivered through :meth:`get_transcription_stream` before it completes. + + Idempotent: calling ``stop()`` on a session that was never started + or has already been stopped is a no-op. + + Raises: + FoundryLocalException: If the native core fails to stop the + stream cleanly. Message has form + ``"Error stopping audio stream session: "``. + Note: the SDK still completes its local cleanup before + raising, so the session is left in a fully-stopped state. """ with self._lock: if not self._started or self._stopped: diff --git a/sdk/python/src/openai/live_audio_transcription_types.py b/sdk/python/src/openai/live_audio_transcription_types.py index 11ebbfae..596c2e7b 100644 --- a/sdk/python/src/openai/live_audio_transcription_types.py +++ b/sdk/python/src/openai/live_audio_transcription_types.py @@ -76,6 +76,7 @@ def from_json(json_str: str) -> LiveAudioTranscriptionResponse: is_final=raw.get("is_final", True), start_time=raw.get("start_time"), end_time=raw.get("end_time"), + id=raw.get("id"), ) diff --git a/sdk/rust/src/openai/live_audio_client.rs b/sdk/rust/src/openai/live_audio_client.rs index 8b285a96..38760390 100644 --- a/sdk/rust/src/openai/live_audio_client.rs +++ b/sdk/rust/src/openai/live_audio_client.rs @@ -83,6 +83,7 @@ struct LiveAudioTranscriptionRaw { text: String, start_time: Option, end_time: Option, + id: Option, } /// A content part within a [`LiveAudioTranscriptionResponse`]. @@ -116,6 +117,8 @@ pub struct LiveAudioTranscriptionResponse { pub start_time: Option, /// End time offset of this segment in the audio stream (seconds). pub end_time: Option, + /// Unique identifier for this result (if available). + pub id: Option, } impl LiveAudioTranscriptionResponse { @@ -135,6 +138,7 @@ impl LiveAudioTranscriptionResponse { is_final: raw.is_final, start_time: raw.start_time, end_time: raw.end_time, + id: raw.id, } } } @@ -660,6 +664,21 @@ mod tests { assert_eq!(result.content[0].transcript, "test"); } + #[test] + fn from_json_parses_id_when_present() { + let json = + r#"{"is_final":true,"text":"hi","id":"evt_123","start_time":null,"end_time":null}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + assert_eq!(result.id.as_deref(), Some("evt_123")); + } + + #[test] + fn from_json_id_defaults_to_none() { + let json = r#"{"is_final":true,"text":"hi","start_time":null,"end_time":null}"#; + let result = LiveAudioTranscriptionResponse::from_json(json).unwrap(); + assert!(result.id.is_none()); + } + #[test] fn options_default_values() { let options = LiveAudioTranscriptionOptions::default(); From 7e9043fa9d61dfb8686e2d3c48472026e0cd6fab Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Fri, 1 May 2026 16:22:22 -0700 Subject: [PATCH 67/83] [C++] Fix `Nemotron-ASR` of LiveAudioTranscriptionResponse (#689) ## Summary Adds the optional `id` field to the C++ `LiveAudioTranscriptionResponse` for parity with the JS and Python SDKs, addressing feedback from the multi-language SDK parity review. ## Why The JS (`liveAudioTranscriptionTypes.ts`) and Python (`live_audio_transcription_types.py`) SDKs both expose an optional `id` field on each transcription result, parsed from the native Core's JSON response when present. The C++ SDK was missing this field, creating a small parity gap. ## Changes - `sdk/cpp/include/openai/openai_live_audio_types.h`: Add `std::optional id;` to `LiveAudioTranscriptionResponse`. - `sdk/cpp/src/openai_live_audio_types.cpp`: Parse `id` optionally in `FromJson` (no breakage when field is absent). - `sdk/cpp/test/live_audio_test.cpp`: Add 2 new unit tests: - `FromJson_WithId` verifies `id` is parsed when present. - `FromJson_WithoutId` verifies `id` is `nullopt` when absent. - Updated `FromJson_BasicResponse` to assert `id` is `nullopt` by default. ## Testing - Build: 0 errors, 0 warnings. - All 154 unit tests passing (152 existing + 2 new). - Backwards-compatible: existing JSON without `id` continues to parse correctly. ## Parity Status | Field | C++ (before) | C++ (after) | JS | Python | C# | Rust | |-------|--------------|-------------|----|--------|----|----| | `id` | | | | | (inherited) | | Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../include/openai/openai_live_audio_types.h | 1 + sdk/cpp/src/openai_live_audio_types.cpp | 4 ++++ sdk/cpp/test/live_audio_test.cpp | 21 +++++++++++++++++++ 3 files changed, 26 insertions(+) diff --git a/sdk/cpp/include/openai/openai_live_audio_types.h b/sdk/cpp/include/openai/openai_live_audio_types.h index d7d31f12..056afcd0 100644 --- a/sdk/cpp/include/openai/openai_live_audio_types.h +++ b/sdk/cpp/include/openai/openai_live_audio_types.h @@ -19,6 +19,7 @@ namespace foundry_local { bool is_final = false; std::optional start_time; std::optional end_time; + std::optional id; std::vector content; static LiveAudioTranscriptionResponse FromJson(const std::string& json); diff --git a/sdk/cpp/src/openai_live_audio_types.cpp b/sdk/cpp/src/openai_live_audio_types.cpp index f781a992..df69e212 100644 --- a/sdk/cpp/src/openai_live_audio_types.cpp +++ b/sdk/cpp/src/openai_live_audio_types.cpp @@ -39,6 +39,10 @@ namespace foundry_local { response.end_time = j["endTime"].get(); } + if (j.contains("id") && j["id"].is_string()) { + response.id = j["id"].get(); + } + if (j.contains("content") && j["content"].is_array()) { for (const auto& item : j["content"]) { ContentPart part; diff --git a/sdk/cpp/test/live_audio_test.cpp b/sdk/cpp/test/live_audio_test.cpp index c6fc10b4..08f44d6e 100644 --- a/sdk/cpp/test/live_audio_test.cpp +++ b/sdk/cpp/test/live_audio_test.cpp @@ -38,6 +38,27 @@ TEST(LiveAudioTypesTest, FromJson_BasicResponse) { EXPECT_DOUBLE_EQ(0.5, resp.start_time.value()); ASSERT_TRUE(resp.end_time.has_value()); EXPECT_DOUBLE_EQ(1.5, resp.end_time.value()); + EXPECT_FALSE(resp.id.has_value()); +} + +TEST(LiveAudioTypesTest, FromJson_WithId) { + nlohmann::json j = { + {"text", "hello"}, + {"is_final", true}, + {"id", "result-abc-123"}}; + + auto resp = LiveAudioTranscriptionResponse::FromJson(j.dump()); + ASSERT_TRUE(resp.id.has_value()); + EXPECT_EQ("result-abc-123", resp.id.value()); +} + +TEST(LiveAudioTypesTest, FromJson_WithoutId) { + nlohmann::json j = { + {"text", "hello"}, + {"is_final", true}}; + + auto resp = LiveAudioTranscriptionResponse::FromJson(j.dump()); + EXPECT_FALSE(resp.id.has_value()); } TEST(LiveAudioTypesTest, FromJson_CamelCaseFields) { From 58bba9383c80a8ec0fc9416db106ec736da56589 Mon Sep 17 00:00:00 2001 From: Baiju Meswani Date: Fri, 1 May 2026 16:51:30 -0700 Subject: [PATCH 68/83] Preload OpenSSL with RTLD_DEEPBIND on Linux to avoid clashing with Node's built-in copy (#691) When the Foundry Local Core .so is loaded into Node on Linux, the first HTTPS call from inside core (e.g. fetching the Azure catalog) crashes the process. The crash is in EVP_KEYMGMT_is_a deep inside libcrypto.so.3. Node statically links its own copy of OpenSSL and re-exports those symbols globally (the node binary is linked with --export-dynamic). When core is loaded later, the .NET cryptography PAL pulls in the system libcrypto.so.3 for SslStream / X509 verification. The system libcrypto gets loaded with the dynamic linker's default flags, which means its own internal function-to-function calls go through the global symbol scope. And Node's same-named exports win the lookup. Node's OpenSSL and the distro's libcrypto.so.3 don't agree on the layout of internal structs like EVP_KEYMGMT, so the first time anything chases one of those pointers we segfault. Fix: Before we load the core .so, dlopen libcrypto.so.3 (and libssl.so.3) ourselves with RTLD_DEEPBIND. That tells the loader to resolve libcrypto's undefined references against libcrypto's own scope first, so its internal calls stay inside libcrypto. Anything that asks for libcrypto.so.3 after that gets handed back our already-loaded, properly isolated handle. Falls back to libcrypto.so.1.1 / libssl.so.1.1 for older distros. Best-effort: if neither is present, we just skip and let the load continue. Linux + glibc only; macOS already isolates dylibs via two-level namespaces, and Windows isn't affected. --- .pipelines/foundry-local-packaging.yml | 53 +++++++++---------- sdk/js/native/foundry_local_napi.c | 72 ++++++++++++++++++++++++++ 2 files changed, 97 insertions(+), 28 deletions(-) diff --git a/.pipelines/foundry-local-packaging.yml b/.pipelines/foundry-local-packaging.yml index bf05607f..bc6d254e 100644 --- a/.pipelines/foundry-local-packaging.yml +++ b/.pipelines/foundry-local-packaging.yml @@ -761,34 +761,31 @@ extends: flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - # The Linux JS test job is currently disabled due to intermittent SSL errors when running get_model_list. This issue is under investigation. - # Error: Command 'get_model_list' failed: Error: System.Net.Http.HttpRequestException: An error occurred while sending the request. - # ---> System.IO.IOException: The decryption operation failed, see inner exception. - # ---> Interop+OpenSsl+SslException: Decrypt failed with OpenSSL error - SSL_ERROR_SSL. - # ---> System.Security.Cryptography.CryptographicException: Error occurred during a cryptographic operation. - # - job: test_js_linux_x64 - # displayName: 'linux-x64' - # pool: - # name: onnxruntime-Ubuntu2404-AMD-CPU - # os: linux - # templateContext: - # inputs: - # - input: pipelineArtifact - # artifactName: 'flc-nuget' - # targetPath: '$(Pipeline.Workspace)/flc-nuget' - # - input: pipelineArtifact - # artifactName: 'deps-versions-standard' - # targetPath: '$(Pipeline.Workspace)/deps-versions-standard' - # steps: - # - checkout: self - # clean: true - # - checkout: test-data-shared - # lfs: true - # - template: .pipelines/templates/test-js-steps.yml@self - # parameters: - # isWinML: false - # flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' - # depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' + - job: test_js_linux_x64 + displayName: 'linux-x64' + pool: + name: onnxruntime-Ubuntu2404-AMD-CPU + os: linux + templateContext: + inputs: + - input: pipelineArtifact + artifactName: 'flc-nuget' + targetPath: '$(Pipeline.Workspace)/flc-nuget' + - input: pipelineArtifact + artifactName: 'deps-versions-standard' + targetPath: '$(Pipeline.Workspace)/deps-versions-standard' + steps: + - checkout: self + clean: true + - template: .pipelines/templates/checkout-steps.yml@self + parameters: + repoName: test-data-shared + basePath: '$(Agent.BuildDirectory)' + - template: .pipelines/templates/test-js-steps.yml@self + parameters: + isWinML: false + flcNugetDir: '$(Pipeline.Workspace)/flc-nuget' + depsVersionsDir: '$(Pipeline.Workspace)/deps-versions-standard' - job: test_js_osx_arm64 displayName: 'osx-arm64' diff --git a/sdk/js/native/foundry_local_napi.c b/sdk/js/native/foundry_local_napi.c index a947f10c..d84b3f67 100644 --- a/sdk/js/native/foundry_local_napi.c +++ b/sdk/js/native/foundry_local_napi.c @@ -1,6 +1,13 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +/* Required for RTLD_DEEPBIND (a glibc extension) to be exposed by . + * Must be defined before any system header is included. Harmless on non-glibc + * platforms. */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + /** * Node-API C addon for the Foundry Local JS SDK. * @@ -151,6 +158,67 @@ static void reject_with_error(napi_env env, napi_deferred deferred, napi_reject_deferred(env, deferred, err_obj); } +/* ── Preload system OpenSSL with RTLD_DEEPBIND on Linux/glibc ─────────── */ + +/* + * Why this exists: + * + * Node.js statically links its own copy of OpenSSL and exports those symbols + * globally (the Node binary is linked with --export-dynamic). When the + * NativeAOT-compiled core .so is later loaded, the .NET cryptography PAL pulls + * in the system libcrypto.so.3 / libssl.so.3 for HTTPS (SslStream, X509 chain + * validation, etc.). libcrypto is mapped with the loader's default flags, so + * its *own internal* function-to-function calls are bound through the global + * symbol scope. They resolve to Node's same-named static OpenSSL exports + * instead of to libcrypto's own functions. The two OpenSSL builds have + * incompatible internal struct layouts (e.g., EVP_KEYMGMT), and the process + * segfaults inside EVP_KEYMGMT_is_a / X509_verify_cert on the first HTTPS + * request. + * + * Fix: explicitly dlopen libcrypto (and libssl) ourselves, before anything + * else can pull them in, with RTLD_DEEPBIND. That flag tells the loader to + * bind libcrypto's undefined references against libcrypto's own scope first, + * so its internal calls stay inside libcrypto. Subsequent dlopen calls by the + * .NET PAL (or anything else) for the same soname return our already-loaded + * handle, preserving the isolation. + * + * Notes: + * - RTLD_DEEPBIND is a glibc extension. On macOS the dyld two-level + * namespace already prevents this kind of cross-library symbol clobber, + * so this is a no-op there. Windows uses LoadLibrary which is also + * unaffected. + * - Best-effort: if libcrypto isn't present at the expected sonames, we + * skip silently and let the original load proceed (it may still work in + * hosts that don't export conflicting OpenSSL symbols). + * - RTLD_DEEPBIND on the *core* .so by itself is not sufficient — that flag + * does not propagate to libraries loaded transitively after the core. + */ +static void preload_isolated_openssl(void) { +#if defined(__linux__) && defined(__GLIBC__) && defined(RTLD_DEEPBIND) + static lib_handle_t s_libcrypto = NULL; + static lib_handle_t s_libssl = NULL; + + if (s_libcrypto != NULL) { + return; + } + + const int flags = RTLD_NOW | RTLD_LOCAL | RTLD_DEEPBIND; + static const char* const crypto_sonames[] = { "libcrypto.so.3", "libcrypto.so.1.1", NULL }; + static const char* const ssl_sonames[] = { "libssl.so.3", "libssl.so.1.1", NULL }; + + /* libcrypto must be loaded before libssl (libssl depends on libcrypto). */ + for (size_t i = 0; crypto_sonames[i] != NULL && !s_libcrypto; i++) { + s_libcrypto = dlopen(crypto_sonames[i], flags); + } + if (!s_libcrypto) { + return; + } + for (size_t i = 0; ssl_sonames[i] != NULL && !s_libssl; i++) { + s_libssl = dlopen(ssl_sonames[i], flags); + } +#endif +} + /* ── Helper: clean up loaded libraries on error ───────────────────────── */ static void cleanup_loaded_libs(void) { @@ -224,6 +292,10 @@ static napi_value napi_load_library(napi_env env, napi_callback_info info) { /* Close previously loaded libraries if any */ cleanup_loaded_libs(); + /* Isolate libcrypto/libssl from Node's static OpenSSL symbols on Linux. + * No-op on other platforms. See preload_isolated_openssl() for details. */ + preload_isolated_openssl(); + /* Load dependency libraries first (e.g., onnxruntime on Windows) */ if (argc >= 2) { napi_valuetype vt; From 649afdabb88cff8562406cc53d44a2447b45aa9a Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Fri, 10 Apr 2026 19:12:49 -0500 Subject: [PATCH 69/83] Add WinML 2.0 EP verification samples for IHV testing Samples in Python, JavaScript, C#, and Rust that verify WinML 2.0 execution providers are discovered, downloaded, and registered correctly. Each sample runs streaming chat completions on a GPU model. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/verify-winml/Program.cs | 245 ++++++++++++++++++ samples/cs/verify-winml/README.md | 16 ++ samples/cs/verify-winml/VerifyWinML.csproj | 25 ++ samples/js/verify-winml/README.md | 22 ++ samples/js/verify-winml/app.js | 201 +++++++++++++++ samples/js/verify-winml/package.json | 10 + samples/python/verify-winml/README.md | 30 +++ samples/python/verify-winml/requirements.txt | 2 + samples/python/verify-winml/src/app.py | 209 +++++++++++++++ samples/rust/verify-winml/Cargo.toml | 12 + samples/rust/verify-winml/README.md | 16 ++ samples/rust/verify-winml/src/main.rs | 257 +++++++++++++++++++ 12 files changed, 1045 insertions(+) create mode 100644 samples/cs/verify-winml/Program.cs create mode 100644 samples/cs/verify-winml/README.md create mode 100644 samples/cs/verify-winml/VerifyWinML.csproj create mode 100644 samples/js/verify-winml/README.md create mode 100644 samples/js/verify-winml/app.js create mode 100644 samples/js/verify-winml/package.json create mode 100644 samples/python/verify-winml/README.md create mode 100644 samples/python/verify-winml/requirements.txt create mode 100644 samples/python/verify-winml/src/app.py create mode 100644 samples/rust/verify-winml/Cargo.toml create mode 100644 samples/rust/verify-winml/README.md create mode 100644 samples/rust/verify-winml/src/main.rs diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs new file mode 100644 index 00000000..52e50c4c --- /dev/null +++ b/samples/cs/verify-winml/Program.cs @@ -0,0 +1,245 @@ +/// +/// Foundry Local SDK - WinML 2.0 EP Verification (C#) +/// +/// Verifies: +/// 1. WinML execution providers are discovered and registered +/// 2. GPU models appear in catalog after EP registration +/// 3. Streaming chat completions work on a WinML-accelerated model +/// 4. OpenAI SDK chat completions work against a WinML-loaded model +/// + +using Microsoft.AI.Foundry.Local; +using Microsoft.Extensions.Logging; +using OpenAI.Chat; + +const string PASS = "\x1b[92m[PASS]\x1b[0m"; +const string FAIL = "\x1b[91m[FAIL]\x1b[0m"; +const string INFO = "\x1b[94m[INFO]\x1b[0m"; + +var results = new List<(string Name, bool Passed)>(); + +void LogResult(string testName, bool passed, string detail = "") +{ + var status = passed ? PASS : FAIL; + var msg = string.IsNullOrEmpty(detail) ? $"{status} {testName}" : $"{status} {testName} - {detail}"; + Console.WriteLine(msg); + results.Add((testName, passed)); +} + +void PrintSeparator(string title) +{ + Console.WriteLine($"\n{new string('=', 60)}"); + Console.WriteLine($" {title}"); + Console.WriteLine($"{new string('=', 60)}\n"); +} + +bool IsWinmlEp(string name) +{ + var lower = name.ToLowerInvariant(); + return lower.Contains("winml") || lower.Contains("dml"); +} + +void PrintSummary() +{ + PrintSeparator("Summary"); + var passed = results.Count(r => r.Passed); + foreach (var (name, p) in results) + Console.WriteLine($" {(p ? "✓" : "✗")} {name}"); + Console.WriteLine($"\n {passed}/{results.Count} tests passed"); +} + +CancellationToken ct = CancellationToken.None; + +// ── 0. Initialize FoundryLocalManager ────────────────────── +PrintSeparator("Initialization"); +var config = new Configuration +{ + AppName = "verify_winml", + LogLevel = Microsoft.AI.Foundry.Local.LogLevel.Information +}; + +using var loggerFactory = LoggerFactory.Create(builder => + builder.SetMinimumLevel(Microsoft.Extensions.Logging.LogLevel.Information)); +var logger = loggerFactory.CreateLogger(); + +await FoundryLocalManager.CreateAsync(config, logger); +var mgr = FoundryLocalManager.Instance; +Console.WriteLine($"{INFO} FoundryLocalManager initialized."); + +// ── 1. Discover & Register EPs ──────────────────────────── +PrintSeparator("Step 1: Discover & Register Execution Providers"); +try +{ + var eps = await mgr.DiscoverEpsAsync(ct); + Console.WriteLine($"{INFO} Discovered {eps.Count} execution providers:"); + bool winmlFound = false; + foreach (var ep in eps) + { + var tag = IsWinmlEp(ep.Name) ? " ★ WinML" : ""; + Console.WriteLine($" - {ep.Name,-40} Registered: {ep.IsRegistered}{tag}"); + if (IsWinmlEp(ep.Name)) winmlFound = true; + } + LogResult("EP Discovery", true, $"{eps.Count} EP(s) found, WinML={(winmlFound ? "YES" : "NO")}"); +} +catch (Exception e) +{ + LogResult("EP Discovery", false, e.Message); +} + +try +{ + var epResult = await mgr.DownloadAndRegisterEpsAsync( + progress: (epName, percent) => + Console.Write($"\r Downloading {epName}: {percent:F1}%"), + ct: ct); + Console.WriteLine(); + Console.WriteLine($"{INFO} EP registration result: success={epResult.Success}, status={epResult.Status}"); + if (epResult.RegisteredEps?.Any() == true) + Console.WriteLine($" Registered: {string.Join(", ", epResult.RegisteredEps)}"); + if (epResult.FailedEps?.Any() == true) + Console.WriteLine($" Failed: {string.Join(", ", epResult.FailedEps)}"); + LogResult("EP Download & Registration", epResult.Success); +} +catch (Exception e) +{ + Console.WriteLine(); + LogResult("EP Download & Registration", false, e.Message); +} + +// ── 2. List Models & Find GPU/WinML Variants ─────────────── +PrintSeparator("Step 2: Model Catalog - GPU/WinML Models"); +var catalog = await mgr.GetCatalogAsync(); +var models = catalog.ListModels(); +Console.WriteLine($"{INFO} Total models in catalog: {models.Count}"); + +var gpuVariants = new List(); +var winmlVariants = new List(); + +foreach (var model in models) +{ + foreach (var variant in model.Variants) + { + var rt = variant.Info?.Runtime; + if (rt?.DeviceType == DeviceType.GPU) + { + gpuVariants.Add(variant); + if (IsWinmlEp(rt.ExecutionProvider ?? "")) + winmlVariants.Add(variant); + } + } +} + +Console.WriteLine($"{INFO} GPU model variants: {gpuVariants.Count}"); +foreach (var v in gpuVariants) +{ + var ep = v.Info?.Runtime?.ExecutionProvider ?? "?"; + Console.WriteLine($" - {v.Id,-50} EP: {ep}"); +} + +LogResult("Catalog - GPU models found", gpuVariants.Count > 0, $"{gpuVariants.Count} GPU variant(s)"); + +// Pick a GPU variant (prefer WinML, fall back to any GPU) +var chosen = winmlVariants.FirstOrDefault() ?? gpuVariants.FirstOrDefault(); +if (chosen == null) +{ + Console.WriteLine($"\n{FAIL} No GPU models available. Cannot proceed with inference tests."); + PrintSummary(); + return; +} + +var chosenEp = chosen.Info?.Runtime?.ExecutionProvider ?? "unknown"; +Console.WriteLine($"\n{INFO} Selected model: {chosen.Id} (EP: {chosenEp})"); + +// ── 3. Download & Load Model ────────────────────────────── +PrintSeparator("Step 3: Download & Load Model"); +try +{ + await chosen.DownloadAsync(progress => + Console.Write($"\r Downloading model: {progress:F1}%")); + Console.WriteLine(); + LogResult("Model Download", true); +} +catch (Exception e) +{ + Console.WriteLine(); + LogResult("Model Download", false, e.Message); + PrintSummary(); + return; +} + +try +{ + await chosen.LoadAsync(); + LogResult("Model Load", true, $"Loaded {chosen.Id}"); +} +catch (Exception e) +{ + LogResult("Model Load", false, e.Message); + PrintSummary(); + return; +} + +// ── 4. Streaming Chat Completions (Native SDK) ──────────── +PrintSeparator("Step 4: Streaming Chat Completions (Native)"); +try +{ + var chatClient = await chosen.GetChatClientAsync(); + var messages = new List + { + new() { Role = "system", Content = "You are a helpful assistant." }, + new() { Role = "user", Content = "What is 2 + 2? Reply with just the number." }, + }; + + var fullResponse = ""; + var start = DateTime.UtcNow; + await foreach (var chunk in chatClient.CompleteChatStreamingAsync(messages, ct)) + { + var content = chunk.Choices[0].Message.Content; + if (!string.IsNullOrEmpty(content)) + { + Console.Write(content); + Console.Out.Flush(); + fullResponse += content; + } + } + var elapsed = (DateTime.UtcNow - start).TotalSeconds; + Console.WriteLine(); + LogResult("Streaming Chat (Native)", fullResponse.Length > 0, + $"{fullResponse.Length} chars in {elapsed:F2}s"); +} +catch (Exception e) +{ + LogResult("Streaming Chat (Native)", false, e.Message); +} + +// ── 5. OpenAI SDK Chat Completions ──────────────────────── +PrintSeparator("Step 5: Chat Completions (OpenAI SDK)"); +try +{ + var oaiClient = new ChatClient( + model: chosen.Id, + credential: new System.ClientModel.ApiKeyCredential("not-needed"), + options: new OpenAI.OpenAIClientOptions { Endpoint = new Uri(mgr.Endpoint) } + ); + + var oaiMessages = new List + { + new SystemChatMessage("You are a helpful assistant."), + new UserChatMessage("Name three colors. Reply briefly."), + }; + + var response = await oaiClient.CompleteChatAsync(oaiMessages, cancellationToken: ct); + var content = response.Value.Content[0].Text ?? ""; + Console.WriteLine($" Response: {content[..Math.Min(content.Length, 200)]}"); + LogResult("Chat (OpenAI SDK)", content.Length > 0, $"{content.Length} chars"); +} +catch (Exception e) +{ + LogResult("Chat (OpenAI SDK)", false, e.Message); +} + +// ── Summary ────────────────────────────────────────────── +PrintSummary(); + +await chosen.UnloadAsync(); +Console.WriteLine("Model unloaded. Done!"); diff --git a/samples/cs/verify-winml/README.md b/samples/cs/verify-winml/README.md new file mode 100644 index 00000000..1db07d36 --- /dev/null +++ b/samples/cs/verify-winml/README.md @@ -0,0 +1,16 @@ +# Verify WinML 2.0 Execution Providers (C#) + +This sample verifies that WinML 2.0 execution providers are correctly discovered, +downloaded, and registered using the Foundry Local C# SDK. + +## Prerequisites + +- Windows with a compatible GPU +- Windows App SDK 2.0 runtime installed (preview1 or experimental) +- .NET 9.0 SDK + +## Build & Run + +```bash +dotnet run +``` diff --git a/samples/cs/verify-winml/VerifyWinML.csproj b/samples/cs/verify-winml/VerifyWinML.csproj new file mode 100644 index 00000000..c8324f60 --- /dev/null +++ b/samples/cs/verify-winml/VerifyWinML.csproj @@ -0,0 +1,25 @@ + + + + Exe + net9.0-windows10.0.26100 + enable + enable + false + x64;ARM64 + None + false + + + + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + diff --git a/samples/js/verify-winml/README.md b/samples/js/verify-winml/README.md new file mode 100644 index 00000000..443a3b5b --- /dev/null +++ b/samples/js/verify-winml/README.md @@ -0,0 +1,22 @@ +# Verify WinML 2.0 Execution Providers (JavaScript) + +This sample verifies that WinML 2.0 execution providers are correctly discovered, +downloaded, and registered using the Foundry Local JavaScript SDK. + +## Prerequisites + +- Windows with a compatible GPU +- Windows App SDK 2.0 runtime installed (preview1 or experimental) +- Node.js 18+ + +## Setup + +```bash +npm install +``` + +## Run + +```bash +node app.js +``` diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js new file mode 100644 index 00000000..a7760d32 --- /dev/null +++ b/samples/js/verify-winml/app.js @@ -0,0 +1,201 @@ +/** + * Foundry Local SDK - WinML 2.0 EP Verification Script (JavaScript) + * + * Verifies: + * 1. WinML execution providers are discovered and registered + * 2. GPU models appear in catalog after EP registration + * 3. Streaming chat completions work on a WinML-accelerated model + * 4. OpenAI SDK chat completions work against a WinML-loaded model + */ + +import { FoundryLocalManager } from "foundry-local-sdk"; +import OpenAI from "openai"; + +const PASS = "\x1b[92m[PASS]\x1b[0m"; +const FAIL = "\x1b[91m[FAIL]\x1b[0m"; +const INFO = "\x1b[94m[INFO]\x1b[0m"; + +const results = []; + +function logResult(testName, passed, detail = "") { + const status = passed ? PASS : FAIL; + const msg = detail ? `${status} ${testName} - ${detail}` : `${status} ${testName}`; + console.log(msg); + results.push({ testName, passed }); +} + +function printSeparator(title) { + console.log(`\n${"=".repeat(60)}`); + console.log(` ${title}`); + console.log(`${"=".repeat(60)}\n`); +} + +function isWinmlEp(name) { + const lower = name.toLowerCase(); + return lower.includes("winml") || lower.includes("dml"); +} + +async function main() { + // ── 0. Initialize FoundryLocalManager ────────────────────── + printSeparator("Initialization"); + const manager = await FoundryLocalManager.create({ + appName: "verify_winml", + logLevel: "info", + }); + console.log(`${INFO} FoundryLocalManager initialized.`); + + // ── 1. Discover & Register EPs ──────────────────────────── + printSeparator("Step 1: Discover & Register Execution Providers"); + let winmlEpFound = false; + try { + const eps = await manager.discoverEps(); + console.log(`${INFO} Discovered ${eps.length} execution providers:`); + for (const ep of eps) { + const tag = isWinmlEp(ep.name) ? " ★ WinML" : ""; + console.log(` - ${ep.name.padEnd(40)} Registered: ${ep.isRegistered}${tag}`); + if (isWinmlEp(ep.name)) winmlEpFound = true; + } + logResult("EP Discovery", true, `${eps.length} EP(s) found, WinML=${winmlEpFound ? "YES" : "NO"}`); + } catch (e) { + logResult("EP Discovery", false, e.message); + } + + try { + const result = await manager.downloadAndRegisterEps((epName, percent) => { + process.stdout.write(`\r Downloading ${epName}: ${percent.toFixed(1)}%`); + }); + console.log(); + console.log(`${INFO} EP registration result: success=${result.success}, status=${result.status}`); + if (result.registeredEps?.length) console.log(` Registered: ${result.registeredEps.join(", ")}`); + if (result.failedEps?.length) console.log(` Failed: ${result.failedEps.join(", ")}`); + logResult("EP Download & Registration", result.success); + } catch (e) { + console.log(); + logResult("EP Download & Registration", false, e.message); + } + + // ── 2. List Models & Find GPU/WinML Variants ─────────────── + printSeparator("Step 2: Model Catalog - GPU/WinML Models"); + const models = await manager.catalog.listModels(); + console.log(`${INFO} Total models in catalog: ${models.length}`); + + const gpuVariants = []; + const winmlVariants = []; + + for (const model of models) { + for (const variant of model.variants) { + const rt = variant.info?.runtime; + if (rt?.deviceType === "GPU") { + gpuVariants.push(variant); + if (isWinmlEp(rt.executionProvider || "")) { + winmlVariants.push(variant); + } + } + } + } + + console.log(`${INFO} GPU model variants: ${gpuVariants.length}`); + for (const v of gpuVariants) { + const ep = v.info?.runtime?.executionProvider || "?"; + console.log(` - ${v.id.padEnd(50)} EP: ${ep}`); + } + + logResult("Catalog - GPU models found", gpuVariants.length > 0, `${gpuVariants.length} GPU variant(s)`); + + // Pick a GPU variant (prefer WinML, fall back to any GPU) + const chosen = winmlVariants[0] || gpuVariants[0]; + if (!chosen) { + console.log(`\n${FAIL} No GPU models available. Cannot proceed with inference tests.`); + printSummary(); + process.exit(1); + } + + const chosenEp = chosen.info?.runtime?.executionProvider || "unknown"; + console.log(`\n${INFO} Selected model: ${chosen.id} (EP: ${chosenEp})`); + + // ── 3. Download & Load Model ────────────────────────────── + printSeparator("Step 3: Download & Load Model"); + try { + await chosen.download((percent) => { + process.stdout.write(`\r Downloading model: ${percent.toFixed(1)}%`); + }); + console.log(); + logResult("Model Download", true); + } catch (e) { + console.log(); + logResult("Model Download", false, e.message); + printSummary(); + process.exit(1); + } + + try { + await chosen.load(); + logResult("Model Load", true, `Loaded ${chosen.id}`); + } catch (e) { + logResult("Model Load", false, e.message); + printSummary(); + process.exit(1); + } + + // ── 4. Streaming Chat Completions (Native SDK) ──────────── + printSeparator("Step 4: Streaming Chat Completions (Native)"); + const messages = [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "What is 2 + 2? Reply with just the number." }, + ]; + + try { + const client = manager.getChatClient(); + let responseText = ""; + const start = Date.now(); + for await (const chunk of client.completeStreamingChat(messages, { modelId: chosen.id })) { + if (chunk.text) { + responseText += chunk.text; + process.stdout.write(chunk.text); + } + } + const elapsed = ((Date.now() - start) / 1000).toFixed(2); + console.log(); + logResult("Streaming Chat (Native)", responseText.length > 0, `${responseText.length} chars in ${elapsed}s`); + } catch (e) { + logResult("Streaming Chat (Native)", false, e.message); + } + + // ── 5. OpenAI SDK Chat Completions ──────────────────────── + printSeparator("Step 5: Chat Completions (OpenAI SDK)"); + try { + const oaiClient = new OpenAI({ + baseURL: manager.endpoint, + apiKey: "not-needed", + }); + const response = await oaiClient.chat.completions.create({ + model: chosen.id, + messages: [ + { role: "system", content: "You are a helpful assistant." }, + { role: "user", content: "Name three colors. Reply briefly." }, + ], + }); + const content = response.choices[0]?.message?.content || ""; + console.log(` Response: ${content.slice(0, 200)}`); + logResult("Chat (OpenAI SDK)", content.length > 0, `${content.length} chars`); + } catch (e) { + logResult("Chat (OpenAI SDK)", false, e.message); + } + + printSummary(); +} + +function printSummary() { + printSeparator("Summary"); + const passed = results.filter((r) => r.passed).length; + for (const { testName, passed: p } of results) { + console.log(` ${p ? "✓" : "✗"} ${testName}`); + } + console.log(`\n ${passed}/${results.length} tests passed`); + if (passed < results.length) process.exit(1); +} + +main().catch((e) => { + console.error(e); + process.exit(1); +}); diff --git a/samples/js/verify-winml/package.json b/samples/js/verify-winml/package.json new file mode 100644 index 00000000..cc5b735f --- /dev/null +++ b/samples/js/verify-winml/package.json @@ -0,0 +1,10 @@ +{ + "name": "verify-winml", + "version": "1.0.0", + "type": "module", + "main": "app.js", + "dependencies": { + "foundry-local-sdk": "*", + "openai": "^4.0.0" + } +} diff --git a/samples/python/verify-winml/README.md b/samples/python/verify-winml/README.md new file mode 100644 index 00000000..a5566bbb --- /dev/null +++ b/samples/python/verify-winml/README.md @@ -0,0 +1,30 @@ +# Verify WinML 2.0 Execution Providers + +This sample verifies that WinML 2.0 execution providers are correctly discovered, +downloaded, and registered. It then runs inference on a GPU model using the WinML EP. + +## Prerequisites + +- Windows with a compatible GPU +- Windows App SDK 2.0 runtime installed (preview1 or experimental) +- Python 3.11+ + +## Setup + +```bash +pip install -r requirements.txt +``` + +## Run + +```bash +python src/app.py +``` + +## What it tests + +1. **EP Discovery** — Lists all available execution providers, highlights WinML/DML +2. **EP Download & Registration** — Downloads and registers EPs +3. **Model Catalog** — Lists GPU model variants available after EP registration +4. **Streaming Chat** — Runs streaming chat completion on a GPU model via native SDK +5. **OpenAI SDK Chat** — Runs chat completion via the OpenAI-compatible REST API diff --git a/samples/python/verify-winml/requirements.txt b/samples/python/verify-winml/requirements.txt new file mode 100644 index 00000000..83318744 --- /dev/null +++ b/samples/python/verify-winml/requirements.txt @@ -0,0 +1,2 @@ +foundry-local-sdk-winml +openai diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py new file mode 100644 index 00000000..1f7af326 --- /dev/null +++ b/samples/python/verify-winml/src/app.py @@ -0,0 +1,209 @@ +""" +Foundry Local SDK - WinML 2.0 EP Verification Script + +Verifies: + 1. WinML execution providers are discovered and registered + 2. GPU models appear in catalog after EP registration + 3. Streaming chat completions work on a WinML-accelerated model + 4. Web service works with OpenAI SDK against a WinML-loaded model +""" + +import sys +import time +import openai +from foundry_local_sdk import Configuration, FoundryLocalManager +from foundry_local_sdk.detail.model_data_types import DeviceType + + +PASS = "\033[92m[PASS]\033[0m" +FAIL = "\033[91m[FAIL]\033[0m" +INFO = "\033[94m[INFO]\033[0m" +WARN = "\033[93m[WARN]\033[0m" + +results = [] + + +def log_result(test_name: str, passed: bool, detail: str = ""): + status = PASS if passed else FAIL + msg = f"{status} {test_name}" + if detail: + msg += f" - {detail}" + print(msg) + results.append((test_name, passed)) + + +def print_separator(title: str): + print(f"\n{'=' * 60}") + print(f" {title}") + print(f"{'=' * 60}\n") + + +def is_winml_ep(ep_name: str) -> bool: + return "winml" in ep_name.lower() or "dml" in ep_name.lower() + + +def main(): + # ── 0. Initialize FoundryLocalManager ────────────────────── + print_separator("Initialization") + config = Configuration(app_name="verify_winml") + FoundryLocalManager.initialize(config) + manager = FoundryLocalManager.instance + print(f"{INFO} FoundryLocalManager initialized.") + + # ── 1. Discover & Register EPs ──────────────────────────── + print_separator("Step 1: Discover & Register Execution Providers") + winml_ep_found = False + try: + eps = manager.discover_eps() + print(f"{INFO} Discovered {len(eps)} execution providers:") + for ep in eps: + tag = " ★ WinML" if is_winml_ep(ep.name) else "" + print(f" - {ep.name:40s} Registered: {ep.is_registered}{tag}") + if is_winml_ep(ep.name): + winml_ep_found = True + log_result("EP Discovery", True, f"{len(eps)} EP(s) found, WinML={'YES' if winml_ep_found else 'NO'}") + except Exception as e: + log_result("EP Discovery", False, str(e)) + + try: + def ep_progress(ep_name: str, percent: float): + print(f"\r Downloading {ep_name}: {percent:.1f}%", end="", flush=True) + + result = manager.download_and_register_eps(progress_callback=ep_progress) + print() + print(f"{INFO} EP registration result: success={result.success}, status={result.status}") + if result.registered_eps: + print(f" Registered: {', '.join(result.registered_eps)}") + if result.failed_eps: + print(f" Failed: {', '.join(result.failed_eps)}") + winml_registered = any(is_winml_ep(name) for name in result.registered_eps) + log_result("EP Download & Registration", result.success, + f"WinML registered: {winml_registered}") + except Exception as e: + print() + log_result("EP Download & Registration", False, str(e)) + + # ── 2. List Models & Find GPU/WinML Variants ─────────────── + print_separator("Step 2: Model Catalog - GPU/WinML Models") + catalog = manager.catalog + models = catalog.list_models() + print(f"{INFO} Total models in catalog: {len(models)}") + + gpu_variants = [] + winml_variants = [] + + for model in models: + for variant in model.variants: + rt = variant.info.runtime + if rt and rt.device_type == DeviceType.GPU: + gpu_variants.append(variant) + if is_winml_ep(rt.execution_provider or ""): + winml_variants.append(variant) + + print(f"{INFO} GPU model variants: {len(gpu_variants)}") + for v in gpu_variants: + ep = v.info.runtime.execution_provider if v.info.runtime else "?" + print(f" - {v.id:50s} EP: {ep}") + + print(f"\n{INFO} WinML model variants: {len(winml_variants)}") + for v in winml_variants: + ep = v.info.runtime.execution_provider if v.info.runtime else "?" + print(f" - {v.id:50s} EP: {ep}") + + log_result("Catalog - GPU models found", len(gpu_variants) > 0, + f"{len(gpu_variants)} GPU variant(s)") + + # Pick a GPU variant (prefer WinML, fall back to any GPU) + chosen = winml_variants[0] if winml_variants else (gpu_variants[0] if gpu_variants else None) + + if not chosen: + print(f"\n{FAIL} No GPU models available. Cannot proceed with inference tests.") + print(f"{WARN} Ensure the system has a compatible GPU and WinML drivers installed.") + _print_summary() + sys.exit(1) + + chosen_ep = chosen.info.runtime.execution_provider if chosen.info.runtime else "unknown" + print(f"\n{INFO} Selected model: {chosen.id} (EP: {chosen_ep})") + + # ── 3. Download & Load Model ────────────────────────────── + print_separator("Step 3: Download & Load Model") + try: + def dl_progress(percent): + print(f"\r Downloading model: {percent:.1f}%", end="", flush=True) + + chosen.download(progress_callback=dl_progress) + print() + log_result("Model Download", True) + except Exception as e: + print() + log_result("Model Download", False, str(e)) + _print_summary() + sys.exit(1) + + try: + chosen.load() + log_result("Model Load", True, f"Loaded {chosen.id}") + except Exception as e: + log_result("Model Load", False, str(e)) + _print_summary() + sys.exit(1) + + # ── 4. Streaming Chat Completions (Native SDK) ──────────── + print_separator("Step 4: Streaming Chat Completions (Native)") + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What is 2 + 2? Reply with just the number."}, + ] + + try: + client = manager.get_chat_client() + response_text = "" + start = time.time() + for chunk in client.complete_streaming_chat(messages, model_id=chosen.id): + if chunk.text: + response_text += chunk.text + print(chunk.text, end="", flush=True) + elapsed = time.time() - start + print() + log_result("Streaming Chat (Native)", len(response_text) > 0, + f"{len(response_text)} chars in {elapsed:.2f}s") + except Exception as e: + log_result("Streaming Chat (Native)", False, str(e)) + + # ── 5. OpenAI SDK Chat Completions ──────────────────────── + print_separator("Step 5: Chat Completions (OpenAI SDK)") + try: + oai_client = openai.OpenAI( + base_url=manager.endpoint, + api_key="not-needed", + ) + oai_messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Name three colors. Reply briefly."}, + ] + response = oai_client.chat.completions.create( + model=chosen.id, + messages=oai_messages, + ) + content = response.choices[0].message.content or "" + print(f" Response: {content[:200]}") + log_result("Chat (OpenAI SDK)", len(content) > 0, f"{len(content)} chars") + except Exception as e: + log_result("Chat (OpenAI SDK)", False, str(e)) + + _print_summary() + + +def _print_summary(): + print_separator("Summary") + passed = sum(1 for _, p in results if p) + total = len(results) + for name, p in results: + print(f" {'✓' if p else '✗'} {name}") + print(f"\n {passed}/{total} tests passed") + if passed < total: + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/samples/rust/verify-winml/Cargo.toml b/samples/rust/verify-winml/Cargo.toml new file mode 100644 index 00000000..ca5cac04 --- /dev/null +++ b/samples/rust/verify-winml/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "verify-winml" +version = "0.1.0" +edition = "2021" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +anyhow = "1" +serde_json = "1" +reqwest = { version = "0.12", features = ["json"] } diff --git a/samples/rust/verify-winml/README.md b/samples/rust/verify-winml/README.md new file mode 100644 index 00000000..12d3f927 --- /dev/null +++ b/samples/rust/verify-winml/README.md @@ -0,0 +1,16 @@ +# Verify WinML 2.0 Execution Providers (Rust) + +This sample verifies that WinML 2.0 execution providers are correctly discovered, +downloaded, and registered using the Foundry Local Rust SDK. + +## Prerequisites + +- Windows with a compatible GPU +- Windows App SDK 2.0 runtime installed (preview1 or experimental) +- Rust toolchain + +## Build & Run + +```bash +cargo run +``` diff --git a/samples/rust/verify-winml/src/main.rs b/samples/rust/verify-winml/src/main.rs new file mode 100644 index 00000000..3ad7d555 --- /dev/null +++ b/samples/rust/verify-winml/src/main.rs @@ -0,0 +1,257 @@ +/// Foundry Local SDK - WinML 2.0 EP Verification (Rust) +/// +/// Verifies: +/// 1. WinML execution providers are discovered and registered +/// 2. GPU models appear in catalog after EP registration +/// 3. Streaming chat completions work on a WinML-accelerated model + +use foundry_local_sdk::{ + ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, FoundryLocalConfig, FoundryLocalManager, +}; +use std::io::{self, Write}; +use tokio_stream::StreamExt; + +const PASS: &str = "\x1b[92m[PASS]\x1b[0m"; +const FAIL: &str = "\x1b[91m[FAIL]\x1b[0m"; +const INFO: &str = "\x1b[94m[INFO]\x1b[0m"; + +fn is_winml_ep(name: &str) -> bool { + let lower = name.to_lowercase(); + lower.contains("winml") || lower.contains("dml") +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let mut results: Vec<(&str, bool)> = Vec::new(); + + // ── 0. Initialize FoundryLocalManager ────────────────────── + println!("\n{}", "=".repeat(60)); + println!(" Initialization"); + println!("{}\n", "=".repeat(60)); + + let manager = FoundryLocalManager::create(FoundryLocalConfig::new("verify_winml"))?; + println!("{INFO} FoundryLocalManager initialized."); + + // ── 1. Discover & Register EPs ──────────────────────────── + println!("\n{}", "=".repeat(60)); + println!(" Step 1: Discover & Register Execution Providers"); + println!("{}\n", "=".repeat(60)); + + match manager.discover_eps().await { + Ok(eps) => { + println!("{INFO} Discovered {} execution providers:", eps.len()); + let mut winml_found = false; + for ep in &eps { + let tag = if is_winml_ep(&ep.name) { + winml_found = true; + " ★ WinML" + } else { + "" + }; + println!(" - {:<40} Registered: {}{}", ep.name, ep.is_registered, tag); + } + let detail = format!( + "{} EP(s) found, WinML={}", + eps.len(), + if winml_found { "YES" } else { "NO" } + ); + println!("{PASS} EP Discovery - {detail}"); + results.push(("EP Discovery", true)); + } + Err(e) => { + println!("{FAIL} EP Discovery - {e}"); + results.push(("EP Discovery", false)); + } + } + + match manager + .download_and_register_eps(Some(|ep_name: &str, percent: f64| { + print!("\r Downloading {ep_name}: {percent:.1}%"); + io::stdout().flush().ok(); + })) + .await + { + Ok(result) => { + println!(); + println!( + "{INFO} EP registration result: success={}, status={}", + result.success, result.status + ); + if !result.registered_eps.is_empty() { + println!(" Registered: {}", result.registered_eps.join(", ")); + } + if !result.failed_eps.is_empty() { + println!(" Failed: {}", result.failed_eps.join(", ")); + } + let status = if result.success { PASS } else { FAIL }; + println!("{status} EP Download & Registration"); + results.push(("EP Download & Registration", result.success)); + } + Err(e) => { + println!(); + println!("{FAIL} EP Download & Registration - {e}"); + results.push(("EP Download & Registration", false)); + } + } + + // ── 2. List Models & Find GPU/WinML Variants ─────────────── + println!("\n{}", "=".repeat(60)); + println!(" Step 2: Model Catalog - GPU/WinML Models"); + println!("{}\n", "=".repeat(60)); + + let models = manager.catalog().list_models().await?; + println!("{INFO} Total models in catalog: {}", models.len()); + + let mut gpu_models = Vec::new(); + for model in &models { + for variant in &model.variants { + if let Some(rt) = &variant.info.runtime { + if rt.device_type.as_deref() == Some("GPU") { + let ep = rt.execution_provider.as_deref().unwrap_or("?"); + println!(" - {:<50} EP: {ep}", variant.id); + gpu_models.push(variant); + } + } + } + } + + println!("{INFO} GPU model variants: {}", gpu_models.len()); + let has_gpu = !gpu_models.is_empty(); + let status = if has_gpu { PASS } else { FAIL }; + println!("{status} Catalog - GPU models found - {} GPU variant(s)", gpu_models.len()); + results.push(("Catalog - GPU models found", has_gpu)); + + if gpu_models.is_empty() { + println!("\n{FAIL} No GPU models available. Cannot proceed with inference tests."); + print_summary(&results); + return Ok(()); + } + + // Prefer WinML variant, fall back to any GPU + let chosen = gpu_models + .iter() + .find(|v| { + v.info + .runtime + .as_ref() + .and_then(|rt| rt.execution_provider.as_deref()) + .map(is_winml_ep) + .unwrap_or(false) + }) + .or(gpu_models.first()) + .unwrap(); + + let chosen_ep = chosen + .info + .runtime + .as_ref() + .and_then(|rt| rt.execution_provider.as_deref()) + .unwrap_or("unknown"); + println!("\n{INFO} Selected model: {} (EP: {chosen_ep})", chosen.id); + + // ── 3. Download & Load Model ────────────────────────────── + println!("\n{}", "=".repeat(60)); + println!(" Step 3: Download & Load Model"); + println!("{}\n", "=".repeat(60)); + + // Get the model by its parent alias + let model_alias = chosen.id.split('/').next().unwrap_or(&chosen.id); + let model = manager.catalog().get_model(model_alias).await?; + + if !model.is_cached().await? { + match model + .download(Some(|progress: f64| { + print!("\r Downloading model: {progress:.1}%"); + io::stdout().flush().ok(); + })) + .await + { + Ok(_) => { + println!(); + println!("{PASS} Model Download"); + results.push(("Model Download", true)); + } + Err(e) => { + println!(); + println!("{FAIL} Model Download - {e}"); + results.push(("Model Download", false)); + print_summary(&results); + return Ok(()); + } + } + } else { + println!("{PASS} Model Download - already cached"); + results.push(("Model Download", true)); + } + + match model.load().await { + Ok(_) => { + println!("{PASS} Model Load - Loaded {}", model_alias); + results.push(("Model Load", true)); + } + Err(e) => { + println!("{FAIL} Model Load - {e}"); + results.push(("Model Load", false)); + print_summary(&results); + return Ok(()); + } + } + + // ── 4. Streaming Chat Completions ──────────────────────── + println!("\n{}", "=".repeat(60)); + println!(" Step 4: Streaming Chat Completions"); + println!("{}\n", "=".repeat(60)); + + let messages: Vec = vec![ + ChatCompletionRequestSystemMessage::from("You are a helpful assistant.").into(), + ChatCompletionRequestUserMessage::from("What is 2 + 2? Reply with just the number.").into(), + ]; + + let client = model.create_chat_client().temperature(0.7).max_tokens(64); + match client.complete_streaming_chat(&messages, None).await { + Ok(mut stream) => { + let mut full_response = String::new(); + let start = std::time::Instant::now(); + while let Some(chunk) = stream.next().await { + match chunk { + Ok(c) => { + if let Some(text) = c.choices.first().and_then(|ch| ch.delta.content.as_deref()) { + print!("{text}"); + io::stdout().flush().ok(); + full_response.push_str(text); + } + } + Err(e) => { + println!("\n{FAIL} Streaming chunk error: {e}"); + break; + } + } + } + let elapsed = start.elapsed().as_secs_f64(); + println!(); + let ok = !full_response.is_empty(); + let status = if ok { PASS } else { FAIL }; + println!("{status} Streaming Chat - {} chars in {elapsed:.2}s", full_response.len()); + results.push(("Streaming Chat", ok)); + } + Err(e) => { + println!("{FAIL} Streaming Chat - {e}"); + results.push(("Streaming Chat", false)); + } + } + + print_summary(&results); + Ok(()) +} + +fn print_summary(results: &[(&str, bool)]) { + println!("\n{}", "=".repeat(60)); + println!(" Summary"); + println!("{}\n", "=".repeat(60)); + let passed = results.iter().filter(|(_, p)| *p).count(); + for (name, p) in results { + println!(" {} {name}", if *p { "✓" } else { "✗" }); + } + println!("\n {passed}/{} tests passed", results.len()); +} From 08f6872d6439888b9bae8073a5a4e7d36571e615 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Sat, 11 Apr 2026 01:44:01 -0500 Subject: [PATCH 70/83] Pin foundry-local-core-winml to WinML 2.0 preview build Points to 1.0.0.dev20260411012949 on ORT-Nightly feed, built with WindowsAppSDK.ML 2.0.297-preview and ORT 1.24.4. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/python/requirements-winml.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index ac05c640..5cbddfd2 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -2,6 +2,6 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 # WinML native binary packages from the ORT-Nightly PyPI feed. -foundry-local-core-winml==1.0.0 +foundry-local-core-winml==1.0.0.dev20260411012949 onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.2 \ No newline at end of file From c142f45b8a8da7123d30f2714ef346707dde5003 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Sat, 11 Apr 2026 12:04:53 -0500 Subject: [PATCH 71/83] Update SDK references to WinML 2.0 preview packages on ORT-Nightly - Python: foundry-local-core-winml==1.0.0.dev20260411003620 - NuGet: Microsoft.AI.Foundry.Local.Core.WinML 1.0.0-dev-20260411T003630-592f019 - JS/Rust: same NuGet version from ORT-Nightly feed Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/python/requirements-winml.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index 5cbddfd2..e63815ba 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -1,7 +1,7 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 -# WinML native binary packages from the ORT-Nightly PyPI feed. -foundry-local-core-winml==1.0.0.dev20260411012949 +# WinML native binary packages. +foundry-local-core-winml==1.0.0 onnxruntime-core==1.23.2.3 -onnxruntime-genai-core==0.13.2 \ No newline at end of file +onnxruntime-genai-core==0.13.2 From 3b993d1a3dff83ebfbb2b3ae6467351454984571 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Sat, 11 Apr 2026 13:11:54 -0500 Subject: [PATCH 72/83] Fix verify-winml samples: correct SDK API calls and package resolution - Python: fix chat client API, web service endpoint, pin dev core version - C#: fix DiscoverEps return type, DownloadAndRegisterEpsAsync signature, ListModelsAsync, web service URL, add Betalgo.Ranul.OpenAI dep - nuget.config: add WindowsAppSDK/Windows.SDK patterns to ORT-Nightly source Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/nuget.config | 2 +- samples/cs/verify-winml/Program.cs | 61 +++++++------------- samples/cs/verify-winml/VerifyWinML.csproj | 1 + samples/python/verify-winml/requirements.txt | 2 + samples/python/verify-winml/src/app.py | 10 +++- 5 files changed, 32 insertions(+), 44 deletions(-) diff --git a/samples/cs/nuget.config b/samples/cs/nuget.config index 63954b2f..89435736 100644 --- a/samples/cs/nuget.config +++ b/samples/cs/nuget.config @@ -16,4 +16,4 @@ - \ No newline at end of file + diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index 52e50c4c..be9e67f7 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -33,12 +33,6 @@ void PrintSeparator(string title) Console.WriteLine($"{new string('=', 60)}\n"); } -bool IsWinmlEp(string name) -{ - var lower = name.ToLowerInvariant(); - return lower.Contains("winml") || lower.Contains("dml"); -} - void PrintSummary() { PrintSeparator("Summary"); @@ -70,16 +64,13 @@ void PrintSummary() PrintSeparator("Step 1: Discover & Register Execution Providers"); try { - var eps = await mgr.DiscoverEpsAsync(ct); - Console.WriteLine($"{INFO} Discovered {eps.Count} execution providers:"); - bool winmlFound = false; + var eps = mgr.DiscoverEps(); + Console.WriteLine($"{INFO} Discovered {eps.Length} execution providers:"); foreach (var ep in eps) { - var tag = IsWinmlEp(ep.Name) ? " ★ WinML" : ""; - Console.WriteLine($" - {ep.Name,-40} Registered: {ep.IsRegistered}{tag}"); - if (IsWinmlEp(ep.Name)) winmlFound = true; + Console.WriteLine($" - {ep.Name,-40} Registered: {ep.IsRegistered}"); } - LogResult("EP Discovery", true, $"{eps.Count} EP(s) found, WinML={(winmlFound ? "YES" : "NO")}"); + LogResult("EP Discovery", true, $"{eps.Length} EP(s) found"); } catch (Exception e) { @@ -89,11 +80,10 @@ void PrintSummary() try { var epResult = await mgr.DownloadAndRegisterEpsAsync( - progress: (epName, percent) => - Console.Write($"\r Downloading {epName}: {percent:F1}%"), - ct: ct); + new Action((epName, percent) => + Console.Write($"\r Downloading {epName}: {percent:F1}%")), ct); Console.WriteLine(); - Console.WriteLine($"{INFO} EP registration result: success={epResult.Success}, status={epResult.Status}"); + Console.WriteLine($"{INFO} EP registration: success={epResult.Success}, status={epResult.Status}"); if (epResult.RegisteredEps?.Any() == true) Console.WriteLine($" Registered: {string.Join(", ", epResult.RegisteredEps)}"); if (epResult.FailedEps?.Any() == true) @@ -106,15 +96,13 @@ void PrintSummary() LogResult("EP Download & Registration", false, e.Message); } -// ── 2. List Models & Find GPU/WinML Variants ─────────────── -PrintSeparator("Step 2: Model Catalog - GPU/WinML Models"); +// ── 2. List Models & Find GPU Variants ──────────────────── +PrintSeparator("Step 2: Model Catalog - GPU Models"); var catalog = await mgr.GetCatalogAsync(); -var models = catalog.ListModels(); +var models = await catalog.ListModelsAsync(); Console.WriteLine($"{INFO} Total models in catalog: {models.Count}"); -var gpuVariants = new List(); -var winmlVariants = new List(); - +IModel? chosen = null; foreach (var model in models) { foreach (var variant in model.Variants) @@ -122,24 +110,15 @@ void PrintSummary() var rt = variant.Info?.Runtime; if (rt?.DeviceType == DeviceType.GPU) { - gpuVariants.Add(variant); - if (IsWinmlEp(rt.ExecutionProvider ?? "")) - winmlVariants.Add(variant); + Console.WriteLine($" - {variant.Id,-50} EP: {rt.ExecutionProvider ?? "?"}"); + chosen ??= variant; } } } -Console.WriteLine($"{INFO} GPU model variants: {gpuVariants.Count}"); -foreach (var v in gpuVariants) -{ - var ep = v.Info?.Runtime?.ExecutionProvider ?? "?"; - Console.WriteLine($" - {v.Id,-50} EP: {ep}"); -} - -LogResult("Catalog - GPU models found", gpuVariants.Count > 0, $"{gpuVariants.Count} GPU variant(s)"); +LogResult("Catalog - GPU models found", chosen != null, + chosen != null ? $"Selected: {chosen.Id}" : "No GPU models"); -// Pick a GPU variant (prefer WinML, fall back to any GPU) -var chosen = winmlVariants.FirstOrDefault() ?? gpuVariants.FirstOrDefault(); if (chosen == null) { Console.WriteLine($"\n{FAIL} No GPU models available. Cannot proceed with inference tests."); @@ -147,9 +126,6 @@ void PrintSummary() return; } -var chosenEp = chosen.Info?.Runtime?.ExecutionProvider ?? "unknown"; -Console.WriteLine($"\n{INFO} Selected model: {chosen.Id} (EP: {chosenEp})"); - // ── 3. Download & Load Model ────────────────────────────── PrintSeparator("Step 3: Download & Load Model"); try @@ -216,10 +192,15 @@ await chosen.DownloadAsync(progress => PrintSeparator("Step 5: Chat Completions (OpenAI SDK)"); try { + await mgr.StartWebServiceAsync(); + var webUrl = mgr.Urls?.FirstOrDefault() + ?? throw new Exception("Web service did not return a URL"); + Console.WriteLine($"{INFO} Web service at: {webUrl}"); + var oaiClient = new ChatClient( model: chosen.Id, credential: new System.ClientModel.ApiKeyCredential("not-needed"), - options: new OpenAI.OpenAIClientOptions { Endpoint = new Uri(mgr.Endpoint) } + options: new OpenAI.OpenAIClientOptions { Endpoint = new Uri($"{webUrl}/v1") } ); var oaiMessages = new List diff --git a/samples/cs/verify-winml/VerifyWinML.csproj b/samples/cs/verify-winml/VerifyWinML.csproj index c8324f60..b7365b3e 100644 --- a/samples/cs/verify-winml/VerifyWinML.csproj +++ b/samples/cs/verify-winml/VerifyWinML.csproj @@ -17,6 +17,7 @@ + diff --git a/samples/python/verify-winml/requirements.txt b/samples/python/verify-winml/requirements.txt index 83318744..0e92a289 100644 --- a/samples/python/verify-winml/requirements.txt +++ b/samples/python/verify-winml/requirements.txt @@ -1,2 +1,4 @@ foundry-local-sdk-winml openai +# Override with WinML 2.0 preview core from ORT-Nightly +foundry-local-core-winml==1.0.0.dev20260411003620 diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index 1f7af326..a99d7ac4 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -156,10 +156,10 @@ def dl_progress(percent): ] try: - client = manager.get_chat_client() + client = chosen.get_chat_client() response_text = "" start = time.time() - for chunk in client.complete_streaming_chat(messages, model_id=chosen.id): + for chunk in client.complete_streaming_chat(messages): if chunk.text: response_text += chunk.text print(chunk.text, end="", flush=True) @@ -173,8 +173,12 @@ def dl_progress(percent): # ── 5. OpenAI SDK Chat Completions ──────────────────────── print_separator("Step 5: Chat Completions (OpenAI SDK)") try: + manager.start_web_service() + base_url = f"{manager.urls[0]}/v1" + print(f"{INFO} Web service started at: {base_url}") + oai_client = openai.OpenAI( - base_url=manager.endpoint, + base_url=base_url, api_key="not-needed", ) oai_messages = [ From 70156d26bd382515ef6df5dc6d5c9856e72541ce Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Mon, 13 Apr 2026 11:38:59 -0500 Subject: [PATCH 73/83] Remove misleading WinML sample status output The verify-winml samples are already exercising the WinML package path, so printing WinML=YES/NO based on EP name heuristics is misleading. Remove the extra labeling from Python, JS, and Rust samples. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/js/verify-winml/app.js | 7 ++----- samples/python/verify-winml/src/app.py | 12 +++--------- samples/rust/verify-winml/src/main.rs | 15 ++------------- 3 files changed, 7 insertions(+), 27 deletions(-) diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index a7760d32..68fdee14 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -46,16 +46,13 @@ async function main() { // ── 1. Discover & Register EPs ──────────────────────────── printSeparator("Step 1: Discover & Register Execution Providers"); - let winmlEpFound = false; try { const eps = await manager.discoverEps(); console.log(`${INFO} Discovered ${eps.length} execution providers:`); for (const ep of eps) { - const tag = isWinmlEp(ep.name) ? " ★ WinML" : ""; - console.log(` - ${ep.name.padEnd(40)} Registered: ${ep.isRegistered}${tag}`); - if (isWinmlEp(ep.name)) winmlEpFound = true; + console.log(` - ${ep.name.padEnd(40)} Registered: ${ep.isRegistered}`); } - logResult("EP Discovery", true, `${eps.length} EP(s) found, WinML=${winmlEpFound ? "YES" : "NO"}`); + logResult("EP Discovery", true, `${eps.length} EP(s) found`); } catch (e) { logResult("EP Discovery", false, e.message); } diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index a99d7ac4..59edc532 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -52,16 +52,12 @@ def main(): # ── 1. Discover & Register EPs ──────────────────────────── print_separator("Step 1: Discover & Register Execution Providers") - winml_ep_found = False try: eps = manager.discover_eps() print(f"{INFO} Discovered {len(eps)} execution providers:") for ep in eps: - tag = " ★ WinML" if is_winml_ep(ep.name) else "" - print(f" - {ep.name:40s} Registered: {ep.is_registered}{tag}") - if is_winml_ep(ep.name): - winml_ep_found = True - log_result("EP Discovery", True, f"{len(eps)} EP(s) found, WinML={'YES' if winml_ep_found else 'NO'}") + print(f" - {ep.name:40s} Registered: {ep.is_registered}") + log_result("EP Discovery", True, f"{len(eps)} EP(s) found") except Exception as e: log_result("EP Discovery", False, str(e)) @@ -76,9 +72,7 @@ def ep_progress(ep_name: str, percent: float): print(f" Registered: {', '.join(result.registered_eps)}") if result.failed_eps: print(f" Failed: {', '.join(result.failed_eps)}") - winml_registered = any(is_winml_ep(name) for name in result.registered_eps) - log_result("EP Download & Registration", result.success, - f"WinML registered: {winml_registered}") + log_result("EP Download & Registration", result.success) except Exception as e: print() log_result("EP Download & Registration", False, str(e)) diff --git a/samples/rust/verify-winml/src/main.rs b/samples/rust/verify-winml/src/main.rs index 3ad7d555..c24125c9 100644 --- a/samples/rust/verify-winml/src/main.rs +++ b/samples/rust/verify-winml/src/main.rs @@ -41,21 +41,10 @@ async fn main() -> anyhow::Result<()> { match manager.discover_eps().await { Ok(eps) => { println!("{INFO} Discovered {} execution providers:", eps.len()); - let mut winml_found = false; for ep in &eps { - let tag = if is_winml_ep(&ep.name) { - winml_found = true; - " ★ WinML" - } else { - "" - }; - println!(" - {:<40} Registered: {}{}", ep.name, ep.is_registered, tag); + println!(" - {:<40} Registered: {}", ep.name, ep.is_registered); } - let detail = format!( - "{} EP(s) found, WinML={}", - eps.len(), - if winml_found { "YES" } else { "NO" } - ); + let detail = format!("{} EP(s) found", eps.len()); println!("{PASS} EP Discovery - {detail}"); results.push(("EP Discovery", true)); } From e35fe79eab97face35eb1d7649a02fc7615071c7 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Mon, 13 Apr 2026 12:10:27 -0500 Subject: [PATCH 74/83] Fix preview sample package wiring Align the preview validation samples with the packages that are actually published today: keep the public SDK layers where needed, route the preview Core.WinML package to ORT-Nightly, and fix the verify-winml sample code paths so Python, JS, C#, and Rust all use the intended preview bits. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/verify-winml/Program.cs | 10 +++--- samples/cs/verify-winml/README.md | 4 +++ samples/cs/verify-winml/VerifyWinML.csproj | 4 +-- samples/js/verify-winml/README.md | 3 ++ samples/js/verify-winml/app.js | 7 +++- samples/js/verify-winml/package.json | 2 +- samples/python/verify-winml/README.md | 6 +++- samples/python/verify-winml/requirements.txt | 8 +++-- samples/rust/Cargo.toml | 1 + samples/rust/verify-winml/Cargo.toml | 2 +- samples/rust/verify-winml/README.md | 4 +++ samples/rust/verify-winml/src/main.rs | 37 ++++++++++---------- 12 files changed, 57 insertions(+), 31 deletions(-) diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index be9e67f7..49ca61cf 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -11,6 +11,8 @@ using Microsoft.AI.Foundry.Local; using Microsoft.Extensions.Logging; using OpenAI.Chat; +using FoundryChatMessage = Microsoft.AI.Foundry.Local.OpenAI.ChatMessage; +using FoundryChatMessageRole = Microsoft.AI.Foundry.Local.OpenAI.ChatMessageRole; const string PASS = "\x1b[92m[PASS]\x1b[0m"; const string FAIL = "\x1b[91m[FAIL]\x1b[0m"; @@ -160,17 +162,17 @@ await chosen.DownloadAsync(progress => try { var chatClient = await chosen.GetChatClientAsync(); - var messages = new List + var messages = new List { - new() { Role = "system", Content = "You are a helpful assistant." }, - new() { Role = "user", Content = "What is 2 + 2? Reply with just the number." }, + new() { Role = FoundryChatMessageRole.System, Content = "You are a helpful assistant." }, + new() { Role = FoundryChatMessageRole.User, Content = "What is 2 + 2? Reply with just the number." }, }; var fullResponse = ""; var start = DateTime.UtcNow; await foreach (var chunk in chatClient.CompleteChatStreamingAsync(messages, ct)) { - var content = chunk.Choices[0].Message.Content; + var content = chunk.Choices?.FirstOrDefault()?.Message?.Content; if (!string.IsNullOrEmpty(content)) { Console.Write(content); diff --git a/samples/cs/verify-winml/README.md b/samples/cs/verify-winml/README.md index 1db07d36..03587ff4 100644 --- a/samples/cs/verify-winml/README.md +++ b/samples/cs/verify-winml/README.md @@ -11,6 +11,10 @@ downloaded, and registered using the Foundry Local C# SDK. ## Build & Run +This sample uses the public `Microsoft.AI.Foundry.Local.WinML` SDK package and +overrides its native `Microsoft.AI.Foundry.Local.Core.WinML` dependency with the +preview package from ORT-Nightly via the shared `..\nuget.config`. + ```bash dotnet run ``` diff --git a/samples/cs/verify-winml/VerifyWinML.csproj b/samples/cs/verify-winml/VerifyWinML.csproj index b7365b3e..ac342bae 100644 --- a/samples/cs/verify-winml/VerifyWinML.csproj +++ b/samples/cs/verify-winml/VerifyWinML.csproj @@ -5,7 +5,7 @@ net9.0-windows10.0.26100 enable enable - false + true x64;ARM64 None false @@ -17,7 +17,7 @@ - + diff --git a/samples/js/verify-winml/README.md b/samples/js/verify-winml/README.md index 443a3b5b..05dd3555 100644 --- a/samples/js/verify-winml/README.md +++ b/samples/js/verify-winml/README.md @@ -11,6 +11,9 @@ downloaded, and registered using the Foundry Local JavaScript SDK. ## Setup +`package.json` installs `foundry-local-sdk-winml`, which layers the WinML +preview core package onto the public JS SDK during install: + ```bash npm install ``` diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index 68fdee14..23a37196 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -161,8 +161,13 @@ async function main() { // ── 5. OpenAI SDK Chat Completions ──────────────────────── printSeparator("Step 5: Chat Completions (OpenAI SDK)"); try { + manager.startWebService(); + const webUrl = manager.urls?.[0]; + if (!webUrl) throw new Error("Web service did not return a URL"); + console.log(`${INFO} Web service started at: ${webUrl}`); + const oaiClient = new OpenAI({ - baseURL: manager.endpoint, + baseURL: `${webUrl}/v1`, apiKey: "not-needed", }); const response = await oaiClient.chat.completions.create({ diff --git a/samples/js/verify-winml/package.json b/samples/js/verify-winml/package.json index cc5b735f..5dfd9d4a 100644 --- a/samples/js/verify-winml/package.json +++ b/samples/js/verify-winml/package.json @@ -4,7 +4,7 @@ "type": "module", "main": "app.js", "dependencies": { - "foundry-local-sdk": "*", + "foundry-local-sdk-winml": "1.0.0", "openai": "^4.0.0" } } diff --git a/samples/python/verify-winml/README.md b/samples/python/verify-winml/README.md index a5566bbb..5b9941d9 100644 --- a/samples/python/verify-winml/README.md +++ b/samples/python/verify-winml/README.md @@ -11,6 +11,10 @@ downloaded, and registered. It then runs inference on a GPU model using the WinM ## Setup +`requirements.txt` already adds the ORT-Nightly Python feed and combines the +public `foundry-local-sdk` package with the WinML 2.0 preview native packages, +so a plain install is enough: + ```bash pip install -r requirements.txt ``` @@ -23,7 +27,7 @@ python src/app.py ## What it tests -1. **EP Discovery** — Lists all available execution providers, highlights WinML/DML +1. **EP Discovery** — Lists all available execution providers 2. **EP Download & Registration** — Downloads and registers EPs 3. **Model Catalog** — Lists GPU model variants available after EP registration 4. **Streaming Chat** — Runs streaming chat completion on a GPU model via native SDK diff --git a/samples/python/verify-winml/requirements.txt b/samples/python/verify-winml/requirements.txt index 0e92a289..54039b3e 100644 --- a/samples/python/verify-winml/requirements.txt +++ b/samples/python/verify-winml/requirements.txt @@ -1,4 +1,8 @@ -foundry-local-sdk-winml +--extra-index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ + +foundry-local-sdk==1.0.0 openai -# Override with WinML 2.0 preview core from ORT-Nightly +# Use the public Python SDK code with the WinML 2.0 preview native packages. foundry-local-core-winml==1.0.0.dev20260411003620 +onnxruntime-core==1.24.4 +onnxruntime-genai-core==0.13.1 diff --git a/samples/rust/Cargo.toml b/samples/rust/Cargo.toml index 7be551ea..0a4dfd1d 100644 --- a/samples/rust/Cargo.toml +++ b/samples/rust/Cargo.toml @@ -9,5 +9,6 @@ members = [ "tutorial-document-summarizer", "tutorial-tool-calling", "tutorial-voice-to-text", + "verify-winml", ] resolver = "2" diff --git a/samples/rust/verify-winml/Cargo.toml b/samples/rust/verify-winml/Cargo.toml index ca5cac04..6394e993 100644 --- a/samples/rust/verify-winml/Cargo.toml +++ b/samples/rust/verify-winml/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -foundry-local-sdk = { path = "../../../sdk/rust" } +foundry-local-sdk = { path = "../../../sdk/rust", features = ["winml"] } tokio = { version = "1", features = ["full"] } tokio-stream = "0.1" anyhow = "1" diff --git a/samples/rust/verify-winml/README.md b/samples/rust/verify-winml/README.md index 12d3f927..b7d85579 100644 --- a/samples/rust/verify-winml/README.md +++ b/samples/rust/verify-winml/README.md @@ -11,6 +11,10 @@ downloaded, and registered using the Foundry Local Rust SDK. ## Build & Run +This sample enables the Rust SDK's `winml` feature and the SDK build script +downloads the preview `Microsoft.AI.Foundry.Local.Core.WinML` package from +ORT-Nightly during the build. + ```bash cargo run ``` diff --git a/samples/rust/verify-winml/src/main.rs b/samples/rust/verify-winml/src/main.rs index c24125c9..d1e2c281 100644 --- a/samples/rust/verify-winml/src/main.rs +++ b/samples/rust/verify-winml/src/main.rs @@ -7,7 +7,7 @@ use foundry_local_sdk::{ ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage, - ChatCompletionRequestUserMessage, FoundryLocalConfig, FoundryLocalManager, + ChatCompletionRequestUserMessage, DeviceType, FoundryLocalConfig, FoundryLocalManager, }; use std::io::{self, Write}; use tokio_stream::StreamExt; @@ -38,7 +38,7 @@ async fn main() -> anyhow::Result<()> { println!(" Step 1: Discover & Register Execution Providers"); println!("{}\n", "=".repeat(60)); - match manager.discover_eps().await { + match manager.discover_eps() { Ok(eps) => { println!("{INFO} Discovered {} execution providers:", eps.len()); for ep in &eps { @@ -55,10 +55,10 @@ async fn main() -> anyhow::Result<()> { } match manager - .download_and_register_eps(Some(|ep_name: &str, percent: f64| { + .download_and_register_eps_with_progress(None, |ep_name: &str, percent: f64| { print!("\r Downloading {ep_name}: {percent:.1}%"); io::stdout().flush().ok(); - })) + }) .await { Ok(result) => { @@ -89,16 +89,16 @@ async fn main() -> anyhow::Result<()> { println!(" Step 2: Model Catalog - GPU/WinML Models"); println!("{}\n", "=".repeat(60)); - let models = manager.catalog().list_models().await?; + let models = manager.catalog().get_models().await?; println!("{INFO} Total models in catalog: {}", models.len()); let mut gpu_models = Vec::new(); for model in &models { - for variant in &model.variants { - if let Some(rt) = &variant.info.runtime { - if rt.device_type.as_deref() == Some("GPU") { - let ep = rt.execution_provider.as_deref().unwrap_or("?"); - println!(" - {:<50} EP: {ep}", variant.id); + for variant in model.variants() { + if let Some(rt) = &variant.info().runtime { + if rt.device_type == DeviceType::GPU { + let ep = &rt.execution_provider; + println!(" - {:<50} EP: {ep}", variant.id()); gpu_models.push(variant); } } @@ -121,23 +121,22 @@ async fn main() -> anyhow::Result<()> { let chosen = gpu_models .iter() .find(|v| { - v.info + v.info() .runtime .as_ref() - .and_then(|rt| rt.execution_provider.as_deref()) - .map(is_winml_ep) + .map(|rt| is_winml_ep(&rt.execution_provider)) .unwrap_or(false) }) .or(gpu_models.first()) .unwrap(); let chosen_ep = chosen - .info + .info() .runtime .as_ref() - .and_then(|rt| rt.execution_provider.as_deref()) + .map(|rt| rt.execution_provider.as_str()) .unwrap_or("unknown"); - println!("\n{INFO} Selected model: {} (EP: {chosen_ep})", chosen.id); + println!("\n{INFO} Selected model: {} (EP: {chosen_ep})", chosen.id()); // ── 3. Download & Load Model ────────────────────────────── println!("\n{}", "=".repeat(60)); @@ -145,8 +144,8 @@ async fn main() -> anyhow::Result<()> { println!("{}\n", "=".repeat(60)); // Get the model by its parent alias - let model_alias = chosen.id.split('/').next().unwrap_or(&chosen.id); - let model = manager.catalog().get_model(model_alias).await?; + let model = manager.catalog().get_model(chosen.alias()).await?; + model.select_variant_by_id(chosen.id())?; if !model.is_cached().await? { match model @@ -176,7 +175,7 @@ async fn main() -> anyhow::Result<()> { match model.load().await { Ok(_) => { - println!("{PASS} Model Load - Loaded {}", model_alias); + println!("{PASS} Model Load - Loaded {}", chosen.id()); results.push(("Model Load", true)); } Err(e) => { From 2bb79a9240949d9d7ec6163d493eea0e7db03475 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Mon, 13 Apr 2026 18:54:45 -0500 Subject: [PATCH 75/83] Simplify verify-winml sample flows Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/verify-winml/Program.cs | 129 +++++++++------ samples/cs/verify-winml/README.md | 3 +- samples/cs/verify-winml/VerifyWinML.csproj | 1 - samples/js/verify-winml/README.md | 3 +- samples/js/verify-winml/app.js | 137 ++++++++-------- samples/js/verify-winml/package.json | 3 +- samples/python/verify-winml/README.md | 14 +- samples/python/verify-winml/requirements.txt | 1 - samples/python/verify-winml/src/app.py | 106 ++++++------- samples/rust/verify-winml/Cargo.toml | 2 +- samples/rust/verify-winml/README.md | 3 +- samples/rust/verify-winml/src/main.rs | 156 +++++++++++++------ 12 files changed, 322 insertions(+), 236 deletions(-) diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index 49ca61cf..c7aa3f8e 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -2,21 +2,20 @@ /// Foundry Local SDK - WinML 2.0 EP Verification (C#) /// /// Verifies: -/// 1. WinML execution providers are discovered and registered -/// 2. GPU models appear in catalog after EP registration -/// 3. Streaming chat completions work on a WinML-accelerated model -/// 4. OpenAI SDK chat completions work against a WinML-loaded model +/// 1. Execution providers are discovered and registered +/// 2. Accelerated models appear in catalog after EP registration +/// 3. Streaming chat completions work on an accelerated model /// using Microsoft.AI.Foundry.Local; using Microsoft.Extensions.Logging; -using OpenAI.Chat; using FoundryChatMessage = Microsoft.AI.Foundry.Local.OpenAI.ChatMessage; using FoundryChatMessageRole = Microsoft.AI.Foundry.Local.OpenAI.ChatMessageRole; const string PASS = "\x1b[92m[PASS]\x1b[0m"; const string FAIL = "\x1b[91m[FAIL]\x1b[0m"; const string INFO = "\x1b[94m[INFO]\x1b[0m"; +const string WARN = "\x1b[93m[WARN]\x1b[0m"; var results = new List<(string Name, bool Passed)>(); @@ -40,10 +39,19 @@ void PrintSummary() PrintSeparator("Summary"); var passed = results.Count(r => r.Passed); foreach (var (name, p) in results) + { Console.WriteLine($" {(p ? "✓" : "✗")} {name}"); + } + Console.WriteLine($"\n {passed}/{results.Count} tests passed"); } +bool IsAcceleratedVariant(IModel model) +{ + var runtime = model.Info?.Runtime; + return runtime != null && (runtime.DeviceType == DeviceType.GPU || runtime.DeviceType == DeviceType.NPU); +} + CancellationToken ct = CancellationToken.None; // ── 0. Initialize FoundryLocalManager ────────────────────── @@ -64,14 +72,16 @@ void PrintSummary() // ── 1. Discover & Register EPs ──────────────────────────── PrintSeparator("Step 1: Discover & Register Execution Providers"); +EpInfo[] eps = []; try { - var eps = mgr.DiscoverEps(); + eps = mgr.DiscoverEps(); Console.WriteLine($"{INFO} Discovered {eps.Length} execution providers:"); foreach (var ep in eps) { Console.WriteLine($" - {ep.Name,-40} Registered: {ep.IsRegistered}"); } + LogResult("EP Discovery", true, $"{eps.Length} EP(s) found"); } catch (Exception e) @@ -79,55 +89,104 @@ void PrintSummary() LogResult("EP Discovery", false, e.Message); } +if (eps.Length == 0) +{ + var detail = "No execution providers discovered on this machine"; + LogResult("EP Download & Registration", false, detail); + Console.WriteLine($"\n{FAIL} {detail}."); + PrintSummary(); + return; +} + try { + string? currentProgressEp = null; + var currentProgressPercent = -1d; + var epResult = await mgr.DownloadAndRegisterEpsAsync( new Action((epName, percent) => - Console.Write($"\r Downloading {epName}: {percent:F1}%")), ct); - Console.WriteLine(); + { + if (currentProgressEp != null && + (!epName.Equals(currentProgressEp, StringComparison.OrdinalIgnoreCase) || percent < currentProgressPercent)) + { + Console.WriteLine(); + } + + currentProgressEp = epName; + currentProgressPercent = percent; + Console.Write($"\r Downloading {epName}: {percent:F1}%"); + }), + ct); + + if (currentProgressEp != null) + { + Console.WriteLine(); + } + Console.WriteLine($"{INFO} EP registration: success={epResult.Success}, status={epResult.Status}"); if (epResult.RegisteredEps?.Any() == true) + { Console.WriteLine($" Registered: {string.Join(", ", epResult.RegisteredEps)}"); + } + if (epResult.FailedEps?.Any() == true) + { Console.WriteLine($" Failed: {string.Join(", ", epResult.FailedEps)}"); - LogResult("EP Download & Registration", epResult.Success); + } + + var downloadOk = epResult.Success || epResult.RegisteredEps?.Any() == true; + var detail = downloadOk && epResult.RegisteredEps?.Any() == true + ? $"{epResult.RegisteredEps.Length} EP(s) registered" + : epResult.Status; + LogResult("EP Download & Registration", downloadOk, detail); + if (!downloadOk) + { + PrintSummary(); + return; + } } catch (Exception e) { Console.WriteLine(); LogResult("EP Download & Registration", false, e.Message); + PrintSummary(); + return; } -// ── 2. List Models & Find GPU Variants ──────────────────── -PrintSeparator("Step 2: Model Catalog - GPU Models"); +// ── 2. List Models & Find Accelerated Variants ──────────── +PrintSeparator("Step 2: Model Catalog - Accelerated Models"); var catalog = await mgr.GetCatalogAsync(); var models = await catalog.ListModelsAsync(); Console.WriteLine($"{INFO} Total models in catalog: {models.Count}"); -IModel? chosen = null; +var acceleratedVariants = new List(); foreach (var model in models) { foreach (var variant in model.Variants) { - var rt = variant.Info?.Runtime; - if (rt?.DeviceType == DeviceType.GPU) + if (IsAcceleratedVariant(variant)) { - Console.WriteLine($" - {variant.Id,-50} EP: {rt.ExecutionProvider ?? "?"}"); - chosen ??= variant; + acceleratedVariants.Add(variant); + var runtime = variant.Info?.Runtime; + Console.WriteLine($" - {variant.Id,-50} Device: {runtime?.DeviceType,-3} EP: {runtime?.ExecutionProvider ?? "?"}"); } } } -LogResult("Catalog - GPU models found", chosen != null, - chosen != null ? $"Selected: {chosen.Id}" : "No GPU models"); +var chosen = acceleratedVariants.FirstOrDefault(); +LogResult("Catalog - Accelerated models found", chosen != null, + chosen != null ? $"{acceleratedVariants.Count} accelerated variant(s)" : "No accelerated model variants"); if (chosen == null) { - Console.WriteLine($"\n{FAIL} No GPU models available. Cannot proceed with inference tests."); + Console.WriteLine($"\n{FAIL} No accelerated model variants are available."); + Console.WriteLine($"{WARN} Ensure the system has a compatible accelerator and matching model variants installed."); PrintSummary(); return; } +Console.WriteLine($"\n{INFO} Selected model: {chosen.Id} (EP: {chosen.Info?.Runtime?.ExecutionProvider ?? "unknown"})"); + // ── 3. Download & Load Model ────────────────────────────── PrintSeparator("Step 3: Download & Load Model"); try @@ -180,6 +239,7 @@ await chosen.DownloadAsync(progress => fullResponse += content; } } + var elapsed = (DateTime.UtcNow - start).TotalSeconds; Console.WriteLine(); LogResult("Streaming Chat (Native)", fullResponse.Length > 0, @@ -190,37 +250,6 @@ await chosen.DownloadAsync(progress => LogResult("Streaming Chat (Native)", false, e.Message); } -// ── 5. OpenAI SDK Chat Completions ──────────────────────── -PrintSeparator("Step 5: Chat Completions (OpenAI SDK)"); -try -{ - await mgr.StartWebServiceAsync(); - var webUrl = mgr.Urls?.FirstOrDefault() - ?? throw new Exception("Web service did not return a URL"); - Console.WriteLine($"{INFO} Web service at: {webUrl}"); - - var oaiClient = new ChatClient( - model: chosen.Id, - credential: new System.ClientModel.ApiKeyCredential("not-needed"), - options: new OpenAI.OpenAIClientOptions { Endpoint = new Uri($"{webUrl}/v1") } - ); - - var oaiMessages = new List - { - new SystemChatMessage("You are a helpful assistant."), - new UserChatMessage("Name three colors. Reply briefly."), - }; - - var response = await oaiClient.CompleteChatAsync(oaiMessages, cancellationToken: ct); - var content = response.Value.Content[0].Text ?? ""; - Console.WriteLine($" Response: {content[..Math.Min(content.Length, 200)]}"); - LogResult("Chat (OpenAI SDK)", content.Length > 0, $"{content.Length} chars"); -} -catch (Exception e) -{ - LogResult("Chat (OpenAI SDK)", false, e.Message); -} - // ── Summary ────────────────────────────────────────────── PrintSummary(); diff --git a/samples/cs/verify-winml/README.md b/samples/cs/verify-winml/README.md index 03587ff4..da4616c4 100644 --- a/samples/cs/verify-winml/README.md +++ b/samples/cs/verify-winml/README.md @@ -1,7 +1,8 @@ # Verify WinML 2.0 Execution Providers (C#) This sample verifies that WinML 2.0 execution providers are correctly discovered, -downloaded, and registered using the Foundry Local C# SDK. +downloaded, and registered using the Foundry Local C# SDK. It uses registered WinML +EP-backed model variants and finishes with one native streaming chat check. ## Prerequisites diff --git a/samples/cs/verify-winml/VerifyWinML.csproj b/samples/cs/verify-winml/VerifyWinML.csproj index ac342bae..0eb65cf6 100644 --- a/samples/cs/verify-winml/VerifyWinML.csproj +++ b/samples/cs/verify-winml/VerifyWinML.csproj @@ -18,7 +18,6 @@ - diff --git a/samples/js/verify-winml/README.md b/samples/js/verify-winml/README.md index 05dd3555..e187bc23 100644 --- a/samples/js/verify-winml/README.md +++ b/samples/js/verify-winml/README.md @@ -1,7 +1,8 @@ # Verify WinML 2.0 Execution Providers (JavaScript) This sample verifies that WinML 2.0 execution providers are correctly discovered, -downloaded, and registered using the Foundry Local JavaScript SDK. +downloaded, and registered using the Foundry Local JavaScript SDK. It uses registered +WinML EP-backed model variants and finishes with one native streaming chat check. ## Prerequisites diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index 23a37196..38886a7e 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -2,18 +2,17 @@ * Foundry Local SDK - WinML 2.0 EP Verification Script (JavaScript) * * Verifies: - * 1. WinML execution providers are discovered and registered - * 2. GPU models appear in catalog after EP registration - * 3. Streaming chat completions work on a WinML-accelerated model - * 4. OpenAI SDK chat completions work against a WinML-loaded model + * 1. Execution providers are discovered and registered + * 2. Accelerated models appear in catalog after EP registration + * 3. Streaming chat completions work on an accelerated model */ import { FoundryLocalManager } from "foundry-local-sdk"; -import OpenAI from "openai"; const PASS = "\x1b[92m[PASS]\x1b[0m"; const FAIL = "\x1b[91m[FAIL]\x1b[0m"; const INFO = "\x1b[94m[INFO]\x1b[0m"; +const WARN = "\x1b[93m[WARN]\x1b[0m"; const results = []; @@ -30,15 +29,15 @@ function printSeparator(title) { console.log(`${"=".repeat(60)}\n`); } -function isWinmlEp(name) { - const lower = name.toLowerCase(); - return lower.includes("winml") || lower.includes("dml"); +function isAcceleratedVariant(variant) { + const runtime = variant.info?.runtime; + return Boolean(runtime && ["GPU", "NPU"].includes(runtime.deviceType)); } async function main() { // ── 0. Initialize FoundryLocalManager ────────────────────── printSeparator("Initialization"); - const manager = await FoundryLocalManager.create({ + const manager = FoundryLocalManager.create({ appName: "verify_winml", logLevel: "info", }); @@ -46,8 +45,9 @@ async function main() { // ── 1. Discover & Register EPs ──────────────────────────── printSeparator("Step 1: Discover & Register Execution Providers"); + let eps = []; try { - const eps = await manager.discoverEps(); + eps = manager.discoverEps(); console.log(`${INFO} Discovered ${eps.length} execution providers:`); for (const ep of eps) { console.log(` - ${ep.name.padEnd(40)} Registered: ${ep.isRegistered}`); @@ -57,52 +57,86 @@ async function main() { logResult("EP Discovery", false, e.message); } + if (!eps.length) { + const detail = "No execution providers discovered on this machine"; + logResult("EP Download & Registration", false, detail); + console.log(`\n${FAIL} ${detail}.`); + printSummary(); + return; + } + try { + let lastProgressEp = null; + let lastProgressPercent = -1; const result = await manager.downloadAndRegisterEps((epName, percent) => { + if (lastProgressEp && (lastProgressEp !== epName || percent < lastProgressPercent)) { + process.stdout.write("\n"); + } + lastProgressEp = epName; + lastProgressPercent = percent; process.stdout.write(`\r Downloading ${epName}: ${percent.toFixed(1)}%`); }); - console.log(); + if (lastProgressEp) { + console.log(); + } + console.log(`${INFO} EP registration result: success=${result.success}, status=${result.status}`); - if (result.registeredEps?.length) console.log(` Registered: ${result.registeredEps.join(", ")}`); - if (result.failedEps?.length) console.log(` Failed: ${result.failedEps.join(", ")}`); - logResult("EP Download & Registration", result.success); + if (result.registeredEps?.length) { + console.log(` Registered: ${result.registeredEps.join(", ")}`); + } + if (result.failedEps?.length) { + console.log(` Failed: ${result.failedEps.join(", ")}`); + } + + const downloadOk = result.success || (result.registeredEps?.length ?? 0) > 0; + const detail = downloadOk && result.registeredEps?.length + ? `${result.registeredEps.length} EP(s) registered` + : result.status; + logResult("EP Download & Registration", downloadOk, detail); + if (!downloadOk) { + printSummary(); + return; + } } catch (e) { console.log(); logResult("EP Download & Registration", false, e.message); + printSummary(); + return; } - // ── 2. List Models & Find GPU/WinML Variants ─────────────── - printSeparator("Step 2: Model Catalog - GPU/WinML Models"); - const models = await manager.catalog.listModels(); + // ── 2. List Models & Find Accelerated Variants ──────────── + printSeparator("Step 2: Model Catalog - Accelerated Models"); + const models = await manager.catalog.getModels(); console.log(`${INFO} Total models in catalog: ${models.length}`); - const gpuVariants = []; - const winmlVariants = []; + const acceleratedVariants = []; for (const model of models) { for (const variant of model.variants) { - const rt = variant.info?.runtime; - if (rt?.deviceType === "GPU") { - gpuVariants.push(variant); - if (isWinmlEp(rt.executionProvider || "")) { - winmlVariants.push(variant); - } + if (isAcceleratedVariant(variant)) { + acceleratedVariants.push(variant); } } } - console.log(`${INFO} GPU model variants: ${gpuVariants.length}`); - for (const v of gpuVariants) { - const ep = v.info?.runtime?.executionProvider || "?"; - console.log(` - ${v.id.padEnd(50)} EP: ${ep}`); + console.log(`${INFO} Accelerated model variants: ${acceleratedVariants.length}`); + for (const variant of acceleratedVariants) { + const runtime = variant.info?.runtime; + const ep = runtime?.executionProvider || "?"; + const device = runtime?.deviceType || "?"; + console.log(` - ${variant.id.padEnd(50)} Device: ${String(device).padEnd(3)} EP: ${ep}`); } - logResult("Catalog - GPU models found", gpuVariants.length > 0, `${gpuVariants.length} GPU variant(s)`); + logResult( + "Catalog - Accelerated models found", + acceleratedVariants.length > 0, + `${acceleratedVariants.length} accelerated variant(s)`, + ); - // Pick a GPU variant (prefer WinML, fall back to any GPU) - const chosen = winmlVariants[0] || gpuVariants[0]; + const chosen = acceleratedVariants[0]; if (!chosen) { - console.log(`\n${FAIL} No GPU models available. Cannot proceed with inference tests.`); + console.log(`\n${FAIL} No accelerated model variants are available.`); + console.log(`${WARN} Ensure the system has a compatible accelerator and matching model variants installed.`); printSummary(); process.exit(1); } @@ -142,13 +176,14 @@ async function main() { ]; try { - const client = manager.getChatClient(); + const client = chosen.createChatClient(); let responseText = ""; const start = Date.now(); - for await (const chunk of client.completeStreamingChat(messages, { modelId: chosen.id })) { - if (chunk.text) { - responseText += chunk.text; - process.stdout.write(chunk.text); + for await (const chunk of client.completeStreamingChat(messages)) { + const content = chunk?.choices?.[0]?.delta?.content; + if (content) { + responseText += content; + process.stdout.write(content); } } const elapsed = ((Date.now() - start) / 1000).toFixed(2); @@ -158,32 +193,6 @@ async function main() { logResult("Streaming Chat (Native)", false, e.message); } - // ── 5. OpenAI SDK Chat Completions ──────────────────────── - printSeparator("Step 5: Chat Completions (OpenAI SDK)"); - try { - manager.startWebService(); - const webUrl = manager.urls?.[0]; - if (!webUrl) throw new Error("Web service did not return a URL"); - console.log(`${INFO} Web service started at: ${webUrl}`); - - const oaiClient = new OpenAI({ - baseURL: `${webUrl}/v1`, - apiKey: "not-needed", - }); - const response = await oaiClient.chat.completions.create({ - model: chosen.id, - messages: [ - { role: "system", content: "You are a helpful assistant." }, - { role: "user", content: "Name three colors. Reply briefly." }, - ], - }); - const content = response.choices[0]?.message?.content || ""; - console.log(` Response: ${content.slice(0, 200)}`); - logResult("Chat (OpenAI SDK)", content.length > 0, `${content.length} chars`); - } catch (e) { - logResult("Chat (OpenAI SDK)", false, e.message); - } - printSummary(); } diff --git a/samples/js/verify-winml/package.json b/samples/js/verify-winml/package.json index 5dfd9d4a..29220599 100644 --- a/samples/js/verify-winml/package.json +++ b/samples/js/verify-winml/package.json @@ -4,7 +4,6 @@ "type": "module", "main": "app.js", "dependencies": { - "foundry-local-sdk-winml": "1.0.0", - "openai": "^4.0.0" + "foundry-local-sdk-winml": "1.0.0" } } diff --git a/samples/python/verify-winml/README.md b/samples/python/verify-winml/README.md index 5b9941d9..a0b3dc17 100644 --- a/samples/python/verify-winml/README.md +++ b/samples/python/verify-winml/README.md @@ -1,7 +1,8 @@ # Verify WinML 2.0 Execution Providers This sample verifies that WinML 2.0 execution providers are correctly discovered, -downloaded, and registered. It then runs inference on a GPU model using the WinML EP. +downloaded, and registered. It then runs inference on a model variant backed by a +registered WinML EP. It finishes with one native streaming chat check. ## Prerequisites @@ -11,11 +12,15 @@ downloaded, and registered. It then runs inference on a GPU model using the WinM ## Setup +Use a fresh virtual environment for this sample. + `requirements.txt` already adds the ORT-Nightly Python feed and combines the public `foundry-local-sdk` package with the WinML 2.0 preview native packages, so a plain install is enough: ```bash +python -m venv .venv +.venv\Scripts\Activate.ps1 pip install -r requirements.txt ``` @@ -28,7 +33,6 @@ python src/app.py ## What it tests 1. **EP Discovery** — Lists all available execution providers -2. **EP Download & Registration** — Downloads and registers EPs -3. **Model Catalog** — Lists GPU model variants available after EP registration -4. **Streaming Chat** — Runs streaming chat completion on a GPU model via native SDK -5. **OpenAI SDK Chat** — Runs chat completion via the OpenAI-compatible REST API +2. **EP Download & Registration** — Downloads only the WinML EPs relevant to the machine +3. **Model Catalog** — Lists model variants backed by the registered WinML EPs +4. **Streaming Chat** — Runs streaming chat completion on a WinML EP-backed model via native SDK diff --git a/samples/python/verify-winml/requirements.txt b/samples/python/verify-winml/requirements.txt index 54039b3e..9c319abf 100644 --- a/samples/python/verify-winml/requirements.txt +++ b/samples/python/verify-winml/requirements.txt @@ -1,7 +1,6 @@ --extra-index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ foundry-local-sdk==1.0.0 -openai # Use the public Python SDK code with the WinML 2.0 preview native packages. foundry-local-core-winml==1.0.0.dev20260411003620 onnxruntime-core==1.24.4 diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index 59edc532..40044b98 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -2,15 +2,13 @@ Foundry Local SDK - WinML 2.0 EP Verification Script Verifies: - 1. WinML execution providers are discovered and registered - 2. GPU models appear in catalog after EP registration - 3. Streaming chat completions work on a WinML-accelerated model - 4. Web service works with OpenAI SDK against a WinML-loaded model + 1. Execution providers are discovered and registered + 2. Accelerated models appear in catalog after EP registration + 3. Streaming chat completions work on an accelerated model """ import sys import time -import openai from foundry_local_sdk import Configuration, FoundryLocalManager from foundry_local_sdk.detail.model_data_types import DeviceType @@ -38,8 +36,9 @@ def print_separator(title: str): print(f"{'=' * 60}\n") -def is_winml_ep(ep_name: str) -> bool: - return "winml" in ep_name.lower() or "dml" in ep_name.lower() +def is_accelerated_variant(variant) -> bool: + rt = variant.info.runtime + return rt is not None and rt.device_type in (DeviceType.GPU, DeviceType.NPU) def main(): @@ -52,6 +51,7 @@ def main(): # ── 1. Discover & Register EPs ──────────────────────────── print_separator("Step 1: Discover & Register Execution Providers") + eps = [] try: eps = manager.discover_eps() print(f"{INFO} Discovered {len(eps)} execution providers:") @@ -61,12 +61,29 @@ def main(): except Exception as e: log_result("EP Discovery", False, str(e)) + if not eps: + detail = "No execution providers discovered on this machine" + log_result("EP Download & Registration", False, detail) + print(f"\n{FAIL} {detail}.") + _print_summary() + return + try: + progress_state = {"ep": None, "percent": -1.0} + def ep_progress(ep_name: str, percent: float): + if progress_state["ep"] is not None and ( + progress_state["ep"] != ep_name or percent < progress_state["percent"] + ): + print() + progress_state["ep"] = ep_name + progress_state["percent"] = percent print(f"\r Downloading {ep_name}: {percent:.1f}%", end="", flush=True) result = manager.download_and_register_eps(progress_callback=ep_progress) - print() + if progress_state["ep"] is not None: + print() + print(f"{INFO} EP registration result: success={result.success}, status={result.status}") if result.registered_eps: print(f" Registered: {', '.join(result.registered_eps)}") @@ -76,45 +93,39 @@ def ep_progress(ep_name: str, percent: float): except Exception as e: print() log_result("EP Download & Registration", False, str(e)) + _print_summary() + return - # ── 2. List Models & Find GPU/WinML Variants ─────────────── - print_separator("Step 2: Model Catalog - GPU/WinML Models") + # ── 2. List Models & Find Accelerated Variants ───────────── + print_separator("Step 2: Model Catalog - Accelerated Models") catalog = manager.catalog models = catalog.list_models() print(f"{INFO} Total models in catalog: {len(models)}") - gpu_variants = [] - winml_variants = [] + accelerated_variants = [] for model in models: for variant in model.variants: - rt = variant.info.runtime - if rt and rt.device_type == DeviceType.GPU: - gpu_variants.append(variant) - if is_winml_ep(rt.execution_provider or ""): - winml_variants.append(variant) - - print(f"{INFO} GPU model variants: {len(gpu_variants)}") - for v in gpu_variants: - ep = v.info.runtime.execution_provider if v.info.runtime else "?" - print(f" - {v.id:50s} EP: {ep}") + if is_accelerated_variant(variant): + accelerated_variants.append(variant) - print(f"\n{INFO} WinML model variants: {len(winml_variants)}") - for v in winml_variants: - ep = v.info.runtime.execution_provider if v.info.runtime else "?" - print(f" - {v.id:50s} EP: {ep}") + print(f"{INFO} Accelerated model variants: {len(accelerated_variants)}") + for v in accelerated_variants: + rt = v.info.runtime + ep = rt.execution_provider if rt else "?" + device = rt.device_type if rt else "?" + print(f" - {v.id:50s} Device: {device:3s} EP: {ep}") - log_result("Catalog - GPU models found", len(gpu_variants) > 0, - f"{len(gpu_variants)} GPU variant(s)") + log_result("Catalog - Accelerated models found", len(accelerated_variants) > 0, + f"{len(accelerated_variants)} accelerated variant(s)") - # Pick a GPU variant (prefer WinML, fall back to any GPU) - chosen = winml_variants[0] if winml_variants else (gpu_variants[0] if gpu_variants else None) + chosen = accelerated_variants[0] if accelerated_variants else None if not chosen: - print(f"\n{FAIL} No GPU models available. Cannot proceed with inference tests.") - print(f"{WARN} Ensure the system has a compatible GPU and WinML drivers installed.") + print(f"\n{FAIL} No accelerated model variants are available.") + print(f"{WARN} Ensure the system has a compatible accelerator and matching model variants installed.") _print_summary() - sys.exit(1) + return chosen_ep = chosen.info.runtime.execution_provider if chosen.info.runtime else "unknown" print(f"\n{INFO} Selected model: {chosen.id} (EP: {chosen_ep})") @@ -132,7 +143,7 @@ def dl_progress(percent): print() log_result("Model Download", False, str(e)) _print_summary() - sys.exit(1) + return try: chosen.load() @@ -140,7 +151,7 @@ def dl_progress(percent): except Exception as e: log_result("Model Load", False, str(e)) _print_summary() - sys.exit(1) + return # ── 4. Streaming Chat Completions (Native SDK) ──────────── print_separator("Step 4: Streaming Chat Completions (Native)") @@ -164,31 +175,6 @@ def dl_progress(percent): except Exception as e: log_result("Streaming Chat (Native)", False, str(e)) - # ── 5. OpenAI SDK Chat Completions ──────────────────────── - print_separator("Step 5: Chat Completions (OpenAI SDK)") - try: - manager.start_web_service() - base_url = f"{manager.urls[0]}/v1" - print(f"{INFO} Web service started at: {base_url}") - - oai_client = openai.OpenAI( - base_url=base_url, - api_key="not-needed", - ) - oai_messages = [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Name three colors. Reply briefly."}, - ] - response = oai_client.chat.completions.create( - model=chosen.id, - messages=oai_messages, - ) - content = response.choices[0].message.content or "" - print(f" Response: {content[:200]}") - log_result("Chat (OpenAI SDK)", len(content) > 0, f"{len(content)} chars") - except Exception as e: - log_result("Chat (OpenAI SDK)", False, str(e)) - _print_summary() diff --git a/samples/rust/verify-winml/Cargo.toml b/samples/rust/verify-winml/Cargo.toml index 6394e993..00f28ae2 100644 --- a/samples/rust/verify-winml/Cargo.toml +++ b/samples/rust/verify-winml/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "verify-winml" -version = "0.1.0" +version = "1.0.0" edition = "2021" [dependencies] diff --git a/samples/rust/verify-winml/README.md b/samples/rust/verify-winml/README.md index b7d85579..add24a17 100644 --- a/samples/rust/verify-winml/README.md +++ b/samples/rust/verify-winml/README.md @@ -1,7 +1,8 @@ # Verify WinML 2.0 Execution Providers (Rust) This sample verifies that WinML 2.0 execution providers are correctly discovered, -downloaded, and registered using the Foundry Local Rust SDK. +downloaded, and registered using the Foundry Local Rust SDK. It uses registered WinML +EP-backed model variants and finishes with one native streaming chat check. ## Prerequisites diff --git a/samples/rust/verify-winml/src/main.rs b/samples/rust/verify-winml/src/main.rs index d1e2c281..aaaba62a 100644 --- a/samples/rust/verify-winml/src/main.rs +++ b/samples/rust/verify-winml/src/main.rs @@ -1,13 +1,14 @@ /// Foundry Local SDK - WinML 2.0 EP Verification (Rust) /// /// Verifies: -/// 1. WinML execution providers are discovered and registered -/// 2. GPU models appear in catalog after EP registration -/// 3. Streaming chat completions work on a WinML-accelerated model +/// 1. Execution providers are discovered and registered +/// 2. Accelerated models appear in catalog after EP registration +/// 3. Streaming chat completions work on an accelerated model use foundry_local_sdk::{ ChatCompletionRequestMessage, ChatCompletionRequestSystemMessage, - ChatCompletionRequestUserMessage, DeviceType, FoundryLocalConfig, FoundryLocalManager, + ChatCompletionRequestUserMessage, DeviceType, FoundryLocalConfig, + FoundryLocalManager, Model, }; use std::io::{self, Write}; use tokio_stream::StreamExt; @@ -15,10 +16,14 @@ use tokio_stream::StreamExt; const PASS: &str = "\x1b[92m[PASS]\x1b[0m"; const FAIL: &str = "\x1b[91m[FAIL]\x1b[0m"; const INFO: &str = "\x1b[94m[INFO]\x1b[0m"; +const WARN: &str = "\x1b[93m[WARN]\x1b[0m"; -fn is_winml_ep(name: &str) -> bool { - let lower = name.to_lowercase(); - lower.contains("winml") || lower.contains("dml") +fn is_accelerated_variant(model: &Model) -> bool { + model.info() + .runtime + .as_ref() + .map(|rt| matches!(rt.device_type, DeviceType::GPU | DeviceType::NPU)) + .unwrap_or(false) } #[tokio::main] @@ -38,29 +43,53 @@ async fn main() -> anyhow::Result<()> { println!(" Step 1: Discover & Register Execution Providers"); println!("{}\n", "=".repeat(60)); - match manager.discover_eps() { + let eps = match manager.discover_eps() { Ok(eps) => { println!("{INFO} Discovered {} execution providers:", eps.len()); for ep in &eps { println!(" - {:<40} Registered: {}", ep.name, ep.is_registered); } + let detail = format!("{} EP(s) found", eps.len()); println!("{PASS} EP Discovery - {detail}"); results.push(("EP Discovery", true)); + eps } Err(e) => { println!("{FAIL} EP Discovery - {e}"); results.push(("EP Discovery", false)); + Vec::new() } + }; + + if eps.is_empty() { + let detail = "No execution providers discovered on this machine"; + println!("{FAIL} EP Download & Registration - {detail}"); + println!("\n{FAIL} {detail}."); + results.push(("EP Download & Registration", false)); + print_summary(&results); + return Ok(()); } - match manager - .download_and_register_eps_with_progress(None, |ep_name: &str, percent: f64| { + match manager.download_and_register_eps_with_progress(None, { + let mut last_progress_ep: Option = None; + let mut last_progress_percent = -1.0f64; + + move |ep_name: &str, percent: f64| { + if last_progress_ep + .as_ref() + .map(|current| current != ep_name || percent < last_progress_percent) + .unwrap_or(false) + { + println!(); + } + + last_progress_ep = Some(ep_name.to_string()); + last_progress_percent = percent; print!("\r Downloading {ep_name}: {percent:.1}%"); io::stdout().flush().ok(); - }) - .await - { + } + }).await { Ok(result) => { println!(); println!( @@ -73,63 +102,86 @@ async fn main() -> anyhow::Result<()> { if !result.failed_eps.is_empty() { println!(" Failed: {}", result.failed_eps.join(", ")); } - let status = if result.success { PASS } else { FAIL }; - println!("{status} EP Download & Registration"); - results.push(("EP Download & Registration", result.success)); + + let download_ok = result.success || !result.registered_eps.is_empty(); + let status = if download_ok { PASS } else { FAIL }; + let detail = if download_ok && !result.registered_eps.is_empty() { + format!("{} EP(s) registered", result.registered_eps.len()) + } else { + result.status.clone() + }; + println!("{status} EP Download & Registration - {detail}"); + results.push(("EP Download & Registration", download_ok)); + + if !download_ok { + print_summary(&results); + return Ok(()); + } } Err(e) => { println!(); println!("{FAIL} EP Download & Registration - {e}"); results.push(("EP Download & Registration", false)); + print_summary(&results); + return Ok(()); } } - // ── 2. List Models & Find GPU/WinML Variants ─────────────── + // ── 2. List Models & Find Accelerated Variants ──────────── println!("\n{}", "=".repeat(60)); - println!(" Step 2: Model Catalog - GPU/WinML Models"); + println!(" Step 2: Model Catalog - Accelerated Models"); println!("{}\n", "=".repeat(60)); let models = manager.catalog().get_models().await?; println!("{INFO} Total models in catalog: {}", models.len()); - let mut gpu_models = Vec::new(); + let mut accelerated_variants = Vec::new(); for model in &models { for variant in model.variants() { - if let Some(rt) = &variant.info().runtime { - if rt.device_type == DeviceType::GPU { - let ep = &rt.execution_provider; - println!(" - {:<50} EP: {ep}", variant.id()); - gpu_models.push(variant); - } + if is_accelerated_variant(variant.as_ref()) { + let device = variant + .info() + .runtime + .as_ref() + .map(|rt| format!("{:?}", rt.device_type)) + .unwrap_or_else(|| "?".to_string()); + let ep = variant + .info() + .runtime + .as_ref() + .map(|rt| rt.execution_provider.as_str()) + .unwrap_or("?"); + println!( + " - {:<50} Device: {:<3} EP: {}", + variant.id(), + device, + ep + ); + accelerated_variants.push(variant); } } } - println!("{INFO} GPU model variants: {}", gpu_models.len()); - let has_gpu = !gpu_models.is_empty(); - let status = if has_gpu { PASS } else { FAIL }; - println!("{status} Catalog - GPU models found - {} GPU variant(s)", gpu_models.len()); - results.push(("Catalog - GPU models found", has_gpu)); + println!("{INFO} Accelerated model variants: {}", accelerated_variants.len()); + let has_accelerated_models = !accelerated_variants.is_empty(); + let status = if has_accelerated_models { PASS } else { FAIL }; + println!( + "{status} Catalog - Accelerated models found - {} accelerated variant(s)", + accelerated_variants.len() + ); + results.push(("Catalog - Accelerated models found", has_accelerated_models)); - if gpu_models.is_empty() { - println!("\n{FAIL} No GPU models available. Cannot proceed with inference tests."); + if accelerated_variants.is_empty() { + println!("\n{FAIL} No accelerated model variants are available."); + println!("{WARN} Ensure the system has a compatible accelerator and matching model variants installed."); print_summary(&results); return Ok(()); } - // Prefer WinML variant, fall back to any GPU - let chosen = gpu_models - .iter() - .find(|v| { - v.info() - .runtime - .as_ref() - .map(|rt| is_winml_ep(&rt.execution_provider)) - .unwrap_or(false) - }) - .or(gpu_models.first()) - .unwrap(); - + let chosen = accelerated_variants + .first() + .cloned() + .expect("accelerated_variants is not empty"); let chosen_ep = chosen .info() .runtime @@ -143,7 +195,6 @@ async fn main() -> anyhow::Result<()> { println!(" Step 3: Download & Load Model"); println!("{}\n", "=".repeat(60)); - // Get the model by its parent alias let model = manager.catalog().get_model(chosen.alias()).await?; model.select_variant_by_id(chosen.id())?; @@ -175,7 +226,7 @@ async fn main() -> anyhow::Result<()> { match model.load().await { Ok(_) => { - println!("{PASS} Model Load - Loaded {}", chosen.id()); + println!("{PASS} Model Load - Loaded {}", chosen.id()); results.push(("Model Load", true)); } Err(e) => { @@ -204,7 +255,11 @@ async fn main() -> anyhow::Result<()> { while let Some(chunk) = stream.next().await { match chunk { Ok(c) => { - if let Some(text) = c.choices.first().and_then(|ch| ch.delta.content.as_deref()) { + if let Some(text) = c + .choices + .first() + .and_then(|ch| ch.delta.content.as_deref()) + { print!("{text}"); io::stdout().flush().ok(); full_response.push_str(text); @@ -220,7 +275,10 @@ async fn main() -> anyhow::Result<()> { println!(); let ok = !full_response.is_empty(); let status = if ok { PASS } else { FAIL }; - println!("{status} Streaming Chat - {} chars in {elapsed:.2}s", full_response.len()); + println!( + "{status} Streaming Chat - {} chars in {elapsed:.2}s", + full_response.len() + ); results.push(("Streaming Chat", ok)); } Err(e) => { From ff3ab25ed58115d28b3dfc884a5d6a0a0ff68e06 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Tue, 14 Apr 2026 12:40:50 -0500 Subject: [PATCH 76/83] Clarify Python environment cleanup Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/python/verify-winml/README.md | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/samples/python/verify-winml/README.md b/samples/python/verify-winml/README.md index a0b3dc17..a256fd7a 100644 --- a/samples/python/verify-winml/README.md +++ b/samples/python/verify-winml/README.md @@ -12,11 +12,15 @@ registered WinML EP. It finishes with one native streaming chat check. ## Setup -Use a fresh virtual environment for this sample. +Use a fresh virtual environment for the cleanest setup. + +If you want to reuse your existing Python environment instead, delete that +environment's `Lib\site-packages\foundry_local_core` directory before +reinstalling so stale native files are not left behind. `requirements.txt` already adds the ORT-Nightly Python feed and combines the public `foundry-local-sdk` package with the WinML 2.0 preview native packages, -so a plain install is enough: +so either install path is enough: ```bash python -m venv .venv @@ -24,6 +28,13 @@ python -m venv .venv pip install -r requirements.txt ``` +Or, after removing `Lib\site-packages\foundry_local_core` from your existing +Python environment: + +```bash +pip install -r requirements.txt +``` + ## Run ```bash From a216117c90717e62b9c3d84d066520a61a9dfcd9 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Wed, 15 Apr 2026 05:46:04 -0500 Subject: [PATCH 77/83] Stabilize WinML SDK samples Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/verify-winml/Program.cs | 12 +++- samples/js/verify-winml/README.md | 5 +- samples/js/verify-winml/app.js | 17 ++++- samples/js/verify-winml/package.json | 5 +- samples/python/verify-winml/src/app.py | 16 ++++- sdk/cs/src/Catalog.cs | 5 +- .../test/FoundryLocal.Tests/CatalogTests.cs | 66 +++++++++++++++++++ 7 files changed, 119 insertions(+), 7 deletions(-) diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index c7aa3f8e..563e8804 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -52,6 +52,16 @@ bool IsAcceleratedVariant(IModel model) return runtime != null && (runtime.DeviceType == DeviceType.GPU || runtime.DeviceType == DeviceType.NPU); } +int GetDevicePriority(IModel model) +{ + return model.Info?.Runtime?.DeviceType switch + { + DeviceType.GPU => 0, + DeviceType.NPU => 1, + _ => 2 + }; +} + CancellationToken ct = CancellationToken.None; // ── 0. Initialize FoundryLocalManager ────────────────────── @@ -173,7 +183,7 @@ bool IsAcceleratedVariant(IModel model) } } -var chosen = acceleratedVariants.FirstOrDefault(); +var chosen = acceleratedVariants.OrderBy(GetDevicePriority).FirstOrDefault(); LogResult("Catalog - Accelerated models found", chosen != null, chosen != null ? $"{acceleratedVariants.Count} accelerated variant(s)" : "No accelerated model variants"); diff --git a/samples/js/verify-winml/README.md b/samples/js/verify-winml/README.md index e187bc23..b7173d0b 100644 --- a/samples/js/verify-winml/README.md +++ b/samples/js/verify-winml/README.md @@ -12,8 +12,9 @@ WinML EP-backed model variants and finishes with one native streaming chat check ## Setup -`package.json` installs `foundry-local-sdk-winml`, which layers the WinML -preview core package onto the public JS SDK during install: +`package.json` installs the repo-local `foundry-local-sdk` package and then +runs its WinML installer script, so the sample always uses the current +branch's WinML artifact pins: ```bash npm install diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index 38886a7e..3f589727 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -34,6 +34,19 @@ function isAcceleratedVariant(variant) { return Boolean(runtime && ["GPU", "NPU"].includes(runtime.deviceType)); } +function getDevicePriority(variant) { + const deviceType = variant.info?.runtime?.deviceType; + if (deviceType === "GPU") { + return 0; + } + + if (deviceType === "NPU") { + return 1; + } + + return 2; +} + async function main() { // ── 0. Initialize FoundryLocalManager ────────────────────── printSeparator("Initialization"); @@ -133,7 +146,9 @@ async function main() { `${acceleratedVariants.length} accelerated variant(s)`, ); - const chosen = acceleratedVariants[0]; + const chosen = [...acceleratedVariants].sort( + (left, right) => getDevicePriority(left) - getDevicePriority(right), + )[0]; if (!chosen) { console.log(`\n${FAIL} No accelerated model variants are available.`); console.log(`${WARN} Ensure the system has a compatible accelerator and matching model variants installed.`); diff --git a/samples/js/verify-winml/package.json b/samples/js/verify-winml/package.json index 29220599..f8ba84ad 100644 --- a/samples/js/verify-winml/package.json +++ b/samples/js/verify-winml/package.json @@ -3,7 +3,10 @@ "version": "1.0.0", "type": "module", "main": "app.js", + "scripts": { + "postinstall": "node node_modules/foundry-local-sdk/script/install-winml.cjs" + }, "dependencies": { - "foundry-local-sdk-winml": "1.0.0" + "foundry-local-sdk": "file:../../../sdk/js" } } diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index 40044b98..3071a1cb 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -41,6 +41,20 @@ def is_accelerated_variant(variant) -> bool: return rt is not None and rt.device_type in (DeviceType.GPU, DeviceType.NPU) +def get_device_priority(variant) -> int: + rt = variant.info.runtime + if rt is None: + return 2 + + if rt.device_type == DeviceType.GPU: + return 0 + + if rt.device_type == DeviceType.NPU: + return 1 + + return 2 + + def main(): # ── 0. Initialize FoundryLocalManager ────────────────────── print_separator("Initialization") @@ -119,7 +133,7 @@ def ep_progress(ep_name: str, percent: float): log_result("Catalog - Accelerated models found", len(accelerated_variants) > 0, f"{len(accelerated_variants)} accelerated variant(s)") - chosen = accelerated_variants[0] if accelerated_variants else None + chosen = min(accelerated_variants, key=get_device_priority) if accelerated_variants else None if not chosen: print(f"\n{FAIL} No accelerated model variants are available.") diff --git a/sdk/cs/src/Catalog.cs b/sdk/cs/src/Catalog.cs index f33dcaff..e0c7c5f1 100644 --- a/sdk/cs/src/Catalog.cs +++ b/sdk/cs/src/Catalog.cs @@ -15,6 +15,7 @@ namespace Microsoft.AI.Foundry.Local; internal sealed class Catalog : ICatalog, IDisposable { + private readonly List _models = []; private readonly Dictionary _modelAliasToModel = new(); private readonly Dictionary _modelIdToModelVariant = new(); private DateTime _lastFetch; @@ -97,7 +98,7 @@ private async Task> ListModelsImplAsync(CancellationToken? ct = nul await UpdateModels(ct).ConfigureAwait(false); using var disposable = await _lock.LockAsync().ConfigureAwait(false); - return _modelAliasToModel.Values.OrderBy(m => m.Alias).Cast().ToList(); + return _models.Cast().ToList(); } private async Task> GetCachedModelsImplAsync(CancellationToken? ct = null) @@ -216,6 +217,7 @@ private async Task UpdateModels(CancellationToken? ct) using var disposable = await _lock.LockAsync().ConfigureAwait(false); // TODO: Do we need to clear this out, or can we just add new models? + _models.Clear(); _modelAliasToModel.Clear(); _modelIdToModelVariant.Clear(); @@ -227,6 +229,7 @@ private async Task UpdateModels(CancellationToken? ct) if (!existingModel) { value = new Model(variant, _logger); + _models.Add(value); _modelAliasToModel[modelInfo.Alias] = value; } else diff --git a/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs b/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs index d270ac15..500a6407 100644 --- a/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs +++ b/sdk/cs/test/FoundryLocal.Tests/CatalogTests.cs @@ -118,4 +118,70 @@ public async Task GetLatestVersion_Works() var result4 = await catalog.GetLatestVersionAsync(model); await Assert.That(result4).IsEqualTo(model); } + + [Test] + public async Task ListModelsAsync_PreservesCoreOrder() + { + var testModelInfos = new List + { + new() + { + Id = "z-last:1", + Name = "z-last", + Version = 1, + Alias = "z-last", + DisplayName = "Z Last", + ProviderType = "test", + Uri = "test://model/z", + ModelType = "ONNX", + Runtime = new Runtime { DeviceType = DeviceType.CPU, ExecutionProvider = "CPUExecutionProvider" }, + Cached = false + }, + new() + { + Id = "a-first:1", + Name = "a-first", + Version = 1, + Alias = "a-first", + DisplayName = "A First", + ProviderType = "test", + Uri = "test://model/a", + ModelType = "ONNX", + Runtime = new Runtime { DeviceType = DeviceType.CPU, ExecutionProvider = "CPUExecutionProvider" }, + Cached = false + }, + new() + { + Id = "m-middle:1", + Name = "m-middle", + Version = 1, + Alias = "m-middle", + DisplayName = "M Middle", + ProviderType = "test", + Uri = "test://model/m", + ModelType = "ONNX", + Runtime = new Runtime { DeviceType = DeviceType.CPU, ExecutionProvider = "CPUExecutionProvider" }, + Cached = false + } + }; + + var modelListJson = JsonSerializer.Serialize(testModelInfos, JsonSerializationContext.Default.ListModelInfo); + + var mockCoreInterop = new Mock(); + mockCoreInterop.Setup(x => x.ExecuteCommand("get_catalog_name", It.IsAny())) + .Returns(new ICoreInterop.Response { Data = "TestCatalog", Error = null }); + mockCoreInterop.Setup(x => x.ExecuteCommandAsync("get_model_list", It.IsAny(), It.IsAny())) + .ReturnsAsync(new ICoreInterop.Response { Data = modelListJson, Error = null }); + + var mockLoadManager = new Mock(); + + var catalog = await Catalog.CreateAsync(mockLoadManager.Object, mockCoreInterop.Object, + NullLogger.Instance, null); + var models = await catalog.ListModelsAsync(); + + await Assert.That(models).HasCount().EqualTo(3); + await Assert.That(models[0].Alias).IsEqualTo("z-last"); + await Assert.That(models[1].Alias).IsEqualTo("a-first"); + await Assert.That(models[2].Alias).IsEqualTo("m-middle"); + } } From 41c965b476cd7e067191c3dd55d8ac6b39737358 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Wed, 15 Apr 2026 10:27:46 -0500 Subject: [PATCH 78/83] Revert sample sort workaround Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/verify-winml/Program.cs | 12 +----------- samples/js/verify-winml/app.js | 17 +---------------- samples/python/verify-winml/src/app.py | 16 +--------------- 3 files changed, 3 insertions(+), 42 deletions(-) diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index 563e8804..c7aa3f8e 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -52,16 +52,6 @@ bool IsAcceleratedVariant(IModel model) return runtime != null && (runtime.DeviceType == DeviceType.GPU || runtime.DeviceType == DeviceType.NPU); } -int GetDevicePriority(IModel model) -{ - return model.Info?.Runtime?.DeviceType switch - { - DeviceType.GPU => 0, - DeviceType.NPU => 1, - _ => 2 - }; -} - CancellationToken ct = CancellationToken.None; // ── 0. Initialize FoundryLocalManager ────────────────────── @@ -183,7 +173,7 @@ int GetDevicePriority(IModel model) } } -var chosen = acceleratedVariants.OrderBy(GetDevicePriority).FirstOrDefault(); +var chosen = acceleratedVariants.FirstOrDefault(); LogResult("Catalog - Accelerated models found", chosen != null, chosen != null ? $"{acceleratedVariants.Count} accelerated variant(s)" : "No accelerated model variants"); diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index 3f589727..38886a7e 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -34,19 +34,6 @@ function isAcceleratedVariant(variant) { return Boolean(runtime && ["GPU", "NPU"].includes(runtime.deviceType)); } -function getDevicePriority(variant) { - const deviceType = variant.info?.runtime?.deviceType; - if (deviceType === "GPU") { - return 0; - } - - if (deviceType === "NPU") { - return 1; - } - - return 2; -} - async function main() { // ── 0. Initialize FoundryLocalManager ────────────────────── printSeparator("Initialization"); @@ -146,9 +133,7 @@ async function main() { `${acceleratedVariants.length} accelerated variant(s)`, ); - const chosen = [...acceleratedVariants].sort( - (left, right) => getDevicePriority(left) - getDevicePriority(right), - )[0]; + const chosen = acceleratedVariants[0]; if (!chosen) { console.log(`\n${FAIL} No accelerated model variants are available.`); console.log(`${WARN} Ensure the system has a compatible accelerator and matching model variants installed.`); diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index 3071a1cb..40044b98 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -41,20 +41,6 @@ def is_accelerated_variant(variant) -> bool: return rt is not None and rt.device_type in (DeviceType.GPU, DeviceType.NPU) -def get_device_priority(variant) -> int: - rt = variant.info.runtime - if rt is None: - return 2 - - if rt.device_type == DeviceType.GPU: - return 0 - - if rt.device_type == DeviceType.NPU: - return 1 - - return 2 - - def main(): # ── 0. Initialize FoundryLocalManager ────────────────────── print_separator("Initialization") @@ -133,7 +119,7 @@ def ep_progress(ep_name: str, percent: float): log_result("Catalog - Accelerated models found", len(accelerated_variants) > 0, f"{len(accelerated_variants)} accelerated variant(s)") - chosen = min(accelerated_variants, key=get_device_priority) if accelerated_variants else None + chosen = accelerated_variants[0] if accelerated_variants else None if not chosen: print(f"\n{FAIL} No accelerated model variants are available.") From fb0a07e040ab23aa51cfe91048ede2662327a733 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Wed, 15 Apr 2026 11:43:09 -0500 Subject: [PATCH 79/83] Fail partial EP registration in samples Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/cs/verify-winml/Program.cs | 2 +- samples/js/verify-winml/app.js | 2 +- samples/python/verify-winml/src/app.py | 11 ++++++++++- samples/rust/verify-winml/src/main.rs | 2 +- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index c7aa3f8e..91c4db51 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -134,7 +134,7 @@ bool IsAcceleratedVariant(IModel model) Console.WriteLine($" Failed: {string.Join(", ", epResult.FailedEps)}"); } - var downloadOk = epResult.Success || epResult.RegisteredEps?.Any() == true; + var downloadOk = epResult.Success; var detail = downloadOk && epResult.RegisteredEps?.Any() == true ? $"{epResult.RegisteredEps.Length} EP(s) registered" : epResult.Status; diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index 38886a7e..0fae5f52 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -88,7 +88,7 @@ async function main() { console.log(` Failed: ${result.failedEps.join(", ")}`); } - const downloadOk = result.success || (result.registeredEps?.length ?? 0) > 0; + const downloadOk = result.success; const detail = downloadOk && result.registeredEps?.length ? `${result.registeredEps.length} EP(s) registered` : result.status; diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index 40044b98..f60a1231 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -89,7 +89,16 @@ def ep_progress(ep_name: str, percent: float): print(f" Registered: {', '.join(result.registered_eps)}") if result.failed_eps: print(f" Failed: {', '.join(result.failed_eps)}") - log_result("EP Download & Registration", result.success) + download_ok = result.success + detail = ( + f"{len(result.registered_eps)} EP(s) registered" + if download_ok and result.registered_eps + else result.status + ) + log_result("EP Download & Registration", download_ok, detail) + if not download_ok: + _print_summary() + return except Exception as e: print() log_result("EP Download & Registration", False, str(e)) diff --git a/samples/rust/verify-winml/src/main.rs b/samples/rust/verify-winml/src/main.rs index aaaba62a..53aa0dbb 100644 --- a/samples/rust/verify-winml/src/main.rs +++ b/samples/rust/verify-winml/src/main.rs @@ -103,7 +103,7 @@ async fn main() -> anyhow::Result<()> { println!(" Failed: {}", result.failed_eps.join(", ")); } - let download_ok = result.success || !result.registered_eps.is_empty(); + let download_ok = result.success; let status = if download_ok { PASS } else { FAIL }; let detail = if download_ok && !result.registered_eps.is_empty() { format!("{} EP(s) registered", result.registered_eps.len()) From b597d6fcf5470585986f6cb41841e646de9a7148 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Mon, 20 Apr 2026 13:12:23 -0500 Subject: [PATCH 80/83] Restore WinML bootstrap auto-detection Restore the bootstrap auto-detection behavior that 4a7dde2 removed across the C#, JS, Python, and Rust SDKs, while leaving the C# WinML minimum-version alignment intact. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/cs/src/Detail/CoreInterop.cs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/sdk/cs/src/Detail/CoreInterop.cs b/sdk/cs/src/Detail/CoreInterop.cs index 7239a48e..ef27e9e8 100644 --- a/sdk/cs/src/Detail/CoreInterop.cs +++ b/sdk/cs/src/Detail/CoreInterop.cs @@ -59,6 +59,14 @@ internal CoreInterop(Configuration config, ILogger logger) var request = new CoreInteropRequest { Params = config.AsDictionary() }; PrepareWinMLBootstrap(request); +#if IS_WINML + // WinML builds require bootstrapping the Windows App Runtime + if (!request.Params.ContainsKey("Bootstrap")) + { + request.Params["Bootstrap"] = "true"; + } +#endif + var response = ExecuteCommand("initialize", request); if (response.Error != null) From 3fffe15436246bfd8a9f2fd91becd328479863d5 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Sun, 3 May 2026 00:23:06 -0500 Subject: [PATCH 81/83] Lower C# WinML target framework Align the C# SDK WinML package and test project target frameworks with the WinML GA minimum Windows version. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 4 ++-- .../Microsoft.AI.Foundry.Local.Tests.csproj | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj index 384b4415..2235821a 100644 --- a/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj +++ b/sdk/cs/src/Microsoft.AI.Foundry.Local.csproj @@ -43,9 +43,9 @@ --> - net8.0-windows10.0.26100.0 + net8.0-windows10.0.18362.0 win-x64;win-arm64 - 10.0.17763.0 + 10.0.18362.0 true $(DefineConstants);IS_WINML diff --git a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj index 5280da42..df1344d8 100644 --- a/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj +++ b/sdk/cs/test/FoundryLocal.Tests/Microsoft.AI.Foundry.Local.Tests.csproj @@ -24,8 +24,8 @@ - net8.0-windows10.0.26100.0; - 10.0.17763.0 + net8.0-windows10.0.18362.0; + 10.0.18362.0 None $(NETCoreSdkRuntimeIdentifier) From d0c91140736a2265ede93decc37fa59f7fd773bc Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Sun, 3 May 2026 10:39:57 -0500 Subject: [PATCH 82/83] Align WinML support after main rebase Lower remaining WinML build targets to the GA minimum, keep the C# bootstrap logic single-sourced, and auto-enable C++ bootstrap when the WinML bootstrap DLL is present. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .pipelines/templates/build-core-steps.yml | 10 +++++----- samples/cs/verify-winml/VerifyWinML.csproj | 2 +- sdk/cpp/src/foundry_local_manager.cpp | 20 ++++++++++++++++++++ sdk/cs/src/Detail/CoreInterop.cs | 8 -------- sdk/python/requirements-winml.txt | 2 +- 5 files changed, 27 insertions(+), 15 deletions(-) diff --git a/.pipelines/templates/build-core-steps.yml b/.pipelines/templates/build-core-steps.yml index c21e0b92..6f0663a2 100644 --- a/.pipelines/templates/build-core-steps.yml +++ b/.pipelines/templates/build-core-steps.yml @@ -58,7 +58,7 @@ steps: inputs: command: restore projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' - restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.18362.0 /p:UseWinML=true' feedsToUse: config nugetConfigPath: '$(nsRoot)/nuget.config' @@ -67,14 +67,14 @@ steps: inputs: command: build projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' - arguments: '--no-restore -r ${{ parameters.flavor }} -f net9.0-windows10.0.26100.0 /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + arguments: '--no-restore -r ${{ parameters.flavor }} -f net9.0-windows10.0.18362.0 /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.18362.0 /p:UseWinML=true' - task: DotNetCoreCLI@2 displayName: 'Publish FLC AOT ${{ parameters.flavor }} (WinML)' inputs: command: publish projects: '$(nsRoot)/src/FoundryLocalCore/Core/Core.csproj' - arguments: '--no-restore --no-build -r ${{ parameters.flavor }} -f net9.0-windows10.0.26100.0 /p:Platform=${{ parameters.platform }} /p:Configuration=Release /p:PublishAot=true /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + arguments: '--no-restore --no-build -r ${{ parameters.flavor }} -f net9.0-windows10.0.18362.0 /p:Platform=${{ parameters.platform }} /p:Configuration=Release /p:PublishAot=true /p:NetTargetFramework=net9.0-windows10.0.18362.0 /p:UseWinML=true' publishWebProjects: false zipAfterPublish: false @@ -84,7 +84,7 @@ steps: inputs: command: restore projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' - restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + restoreArguments: '-r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.18362.0 /p:UseWinML=true' feedsToUse: config nugetConfigPath: '$(nsRoot)/nuget.config' @@ -93,7 +93,7 @@ steps: inputs: command: build projects: '$(nsRoot)/test/FoundryLocalCore/Core/FoundryLocalCore.Tests.csproj' - arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.26100.0 /p:UseWinML=true' + arguments: '--no-restore -r ${{ parameters.flavor }} /p:Platform=${{ parameters.platform }} /p:IncludeWebService=true /p:Configuration=Release /p:NetTargetFramework=net9.0-windows10.0.18362.0 /p:UseWinML=true' - task: DotNetCoreCLI@2 displayName: 'Test FLC ${{ parameters.flavor }} (WinML)' diff --git a/samples/cs/verify-winml/VerifyWinML.csproj b/samples/cs/verify-winml/VerifyWinML.csproj index 0eb65cf6..b31353cb 100644 --- a/samples/cs/verify-winml/VerifyWinML.csproj +++ b/samples/cs/verify-winml/VerifyWinML.csproj @@ -2,7 +2,7 @@ Exe - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 enable enable true diff --git a/sdk/cpp/src/foundry_local_manager.cpp b/sdk/cpp/src/foundry_local_manager.cpp index e24be049..1a25ff0b 100644 --- a/sdk/cpp/src/foundry_local_manager.cpp +++ b/sdk/cpp/src/foundry_local_manager.cpp @@ -5,8 +5,11 @@ #include #include #include +#include +#include #include +#include #include "foundry_local.h" #include "foundry_local_internal_core.h" @@ -17,6 +20,16 @@ namespace foundry_local { +namespace { + bool HasWinMLBootstrapDll() { + auto exePath = wil::GetModuleFileNameW(nullptr); + std::error_code ec; + return std::filesystem::exists( + std::filesystem::path(exePath.get()).parent_path() / L"Microsoft.WindowsAppRuntime.Bootstrap.dll", + ec); + } +} // namespace + std::unique_ptr Manager::instance_; void Manager::Create(Configuration configuration, ILogger* logger) { @@ -154,13 +167,20 @@ void Manager::Cleanup() noexcept { if (config_.web && config_.web->urls) { initReq.AddParam("WebServiceUrls", *config_.web->urls); } + bool hasBootstrapSetting = false; if (config_.additional_settings) { for (const auto& [key, value] : *config_.additional_settings) { if (!key.empty()) { + if (key == "Bootstrap") { + hasBootstrapSetting = true; + } initReq.AddParam(key, value); } } } + if (!hasBootstrapSetting && HasWinMLBootstrapDll()) { + initReq.AddParam("Bootstrap", "true"); + } std::string initJson = initReq.ToJson(); auto initResponse = core_->call(initReq.Command(), *logger_, &initJson); diff --git a/sdk/cs/src/Detail/CoreInterop.cs b/sdk/cs/src/Detail/CoreInterop.cs index ef27e9e8..7239a48e 100644 --- a/sdk/cs/src/Detail/CoreInterop.cs +++ b/sdk/cs/src/Detail/CoreInterop.cs @@ -59,14 +59,6 @@ internal CoreInterop(Configuration config, ILogger logger) var request = new CoreInteropRequest { Params = config.AsDictionary() }; PrepareWinMLBootstrap(request); -#if IS_WINML - // WinML builds require bootstrapping the Windows App Runtime - if (!request.Params.ContainsKey("Bootstrap")) - { - request.Params["Bootstrap"] = "true"; - } -#endif - var response = ExecuteCommand("initialize", request); if (response.Error != null) diff --git a/sdk/python/requirements-winml.txt b/sdk/python/requirements-winml.txt index e63815ba..5e4733cf 100644 --- a/sdk/python/requirements-winml.txt +++ b/sdk/python/requirements-winml.txt @@ -1,7 +1,7 @@ pydantic>=2.0.0 requests>=2.32.4 openai>=2.24.0 -# WinML native binary packages. +# WinML native binary packages from the ORT-Nightly PyPI feed. foundry-local-core-winml==1.0.0 onnxruntime-core==1.23.2.3 onnxruntime-genai-core==0.13.2 From b7c22c1a3df07f566f118fd3ff5e29d5e4c0ccd2 Mon Sep 17 00:00:00 2001 From: Bhagirath Mehta Date: Sun, 3 May 2026 17:01:40 -0500 Subject: [PATCH 83/83] Align WinML samples across SDKs Add a C++ WinML verifier sample and expose the C++ EP discovery/download APIs needed for parity with the other SDK samples. Update the existing WinML sample paths and C++ sample build instructions. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- samples/README.md | 1 + .../live-audio-transcription/CMakeLists.txt | 14 + .../cpp/live-audio-transcription/README.md | 34 +- samples/cpp/live-audio-transcription/main.cpp | 15 +- samples/cpp/verify-winml/CMakeLists.txt | 10 + samples/cpp/verify-winml/README.md | 41 ++ samples/cpp/verify-winml/main.cpp | 392 ++++++++++++++++++ samples/cs/Directory.Packages.props | 1 + .../AudioTranscriptionExample.csproj | 2 +- samples/cs/embeddings/Embeddings.csproj | 2 +- .../FoundryLocalWebServer.csproj | 2 +- .../LiveAudioTranscriptionExample.csproj | 2 +- .../ModelManagementExample.csproj | 2 +- .../NativeChatCompletions.csproj | 2 +- .../ToolCallingFoundryLocalSdk.csproj | 2 +- .../ToolCallingFoundryLocalWebServer.csproj | 2 +- .../TutorialChatAssistant.csproj | 2 +- .../TutorialDocumentSummarizer.csproj | 2 +- .../TutorialToolCalling.csproj | 2 +- .../TutorialVoiceToText.csproj | 2 +- samples/cs/verify-winml/Program.cs | 111 +++-- samples/js/verify-winml/app.js | 90 +++- samples/python/verify-winml/README.md | 8 +- samples/python/verify-winml/requirements.txt | 6 +- samples/python/verify-winml/src/app.py | 106 +++-- samples/rust/Cargo.toml | 1 + .../rust/live-audio-transcription/src/main.rs | 10 +- samples/rust/verify-winml/src/main.rs | 160 +++++-- sdk/cpp/CMakeLists.txt | 2 + sdk/cpp/include/catalog.h | 1 + sdk/cpp/include/foundry_local_manager.h | 25 ++ sdk/cpp/src/catalog.cpp | 7 +- sdk/cpp/src/core.h | 11 +- sdk/cpp/src/foundry_local_manager.cpp | 147 ++++++- sdk/js/script/install-winml.cjs | 20 +- 35 files changed, 1067 insertions(+), 170 deletions(-) create mode 100644 samples/cpp/live-audio-transcription/CMakeLists.txt create mode 100644 samples/cpp/verify-winml/CMakeLists.txt create mode 100644 samples/cpp/verify-winml/README.md create mode 100644 samples/cpp/verify-winml/main.cpp diff --git a/samples/README.md b/samples/README.md index bcac6bf3..ee1925e0 100644 --- a/samples/README.md +++ b/samples/README.md @@ -12,3 +12,4 @@ Explore complete working examples that demonstrate how to use Foundry Local — | [**JavaScript**](js/) | 13 | Node.js SDK samples including native chat, embeddings, audio transcription, Electron desktop app, Copilot SDK integration, LangChain, tool calling, web server, and tutorials. | | [**Python**](python/) | 10 | Python samples using the OpenAI-compatible API, including chat, embeddings, audio transcription, LangChain integration, tool calling, web server, and tutorials. | | [**Rust**](rust/) | 9 | Rust SDK samples including native chat, embeddings, audio transcription, tool calling, web server, and tutorials. | +| [**C++**](cpp/) | 2 | C++ SDK samples including WinML EP verification and live audio transcription. | diff --git a/samples/cpp/live-audio-transcription/CMakeLists.txt b/samples/cpp/live-audio-transcription/CMakeLists.txt new file mode 100644 index 00000000..ff6ed40d --- /dev/null +++ b/samples/cpp/live-audio-transcription/CMakeLists.txt @@ -0,0 +1,14 @@ +cmake_minimum_required(VERSION 3.20) + +project(LiveAudioTranscriptionSample LANGUAGES CXX) + +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +set(BUILD_TESTING OFF CACHE BOOL "Build C++ SDK tests" FORCE) +add_subdirectory("${CMAKE_CURRENT_LIST_DIR}/../../../sdk/cpp" "${CMAKE_CURRENT_BINARY_DIR}/sdk-cpp") + +add_executable(LiveAudioTranscriptionSample main.cpp) +target_compile_features(LiveAudioTranscriptionSample PRIVATE cxx_std_20) +target_link_libraries(LiveAudioTranscriptionSample PRIVATE CppSdk) diff --git a/samples/cpp/live-audio-transcription/README.md b/samples/cpp/live-audio-transcription/README.md index a9fca977..3059d6f6 100644 --- a/samples/cpp/live-audio-transcription/README.md +++ b/samples/cpp/live-audio-transcription/README.md @@ -9,20 +9,34 @@ available, falls back to synthetic PCM audio. ## Build -```bash -# With PortAudio (live microphone) -g++ -std=c++20 -DHAS_PORTAUDIO main.cpp -lfoundry_local -lportaudio -o live-audio-transcription-example +From this directory: -# Without PortAudio (synthetic audio only) -g++ -std=c++20 main.cpp -lfoundry_local -o live-audio-transcription-example +```powershell +cmake -S . -B out\build -G "Visual Studio 18 2026" -A x64 ` + -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_ROOT\scripts\buildsystems\vcpkg.cmake" ` + -DVCPKG_TARGET_TRIPLET=x64-windows-static-md + +cmake --build out\build --config Debug --target LiveAudioTranscriptionSample ``` +The C++ SDK loads `Microsoft.AI.Foundry.Local.Core.dll` from the executable +directory. Copy the Foundry Local native binaries next to +`LiveAudioTranscriptionSample.exe` before running the sample. + +The CMake project builds the synthetic-audio path by default. To use live +microphone capture, add PortAudio to your build, define `HAS_PORTAUDIO`, and +link PortAudio with `LiveAudioTranscriptionSample`. + +This sample requires a Foundry Local Core/catalog build that includes the live +audio streaming model `nemotron-speech-streaming-en-0.6b`. If that model is not +present in the catalog, the sample cannot run even though it builds successfully. + ## Run -```bash -# Live microphone (requires PortAudio) -./live-audio-transcription-example +```powershell +# Synthetic 440Hz sine wave +.\out\build\Debug\LiveAudioTranscriptionSample.exe --synth -# Synthetic 440Hz sine wave (no microphone needed) -./live-audio-transcription-example --synth +# Live microphone (requires a PortAudio-enabled build) +.\out\build\Debug\LiveAudioTranscriptionSample.exe ``` diff --git a/samples/cpp/live-audio-transcription/main.cpp b/samples/cpp/live-audio-transcription/main.cpp index 1a3341e4..532b97cc 100644 --- a/samples/cpp/live-audio-transcription/main.cpp +++ b/samples/cpp/live-audio-transcription/main.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,8 @@ namespace { +constexpr std::string_view kLiveAudioModelAlias = "nemotron-speech-streaming-en-0.6b"; + // Global flag for Ctrl+C graceful shutdown (mirrors JS process.on('SIGINT')) std::atomic g_running{true}; @@ -117,17 +120,19 @@ int main(int argc, char* argv[]) { std::cout << "===========================================================" << std::endl; std::cout << std::endl; - foundry_local::Configuration config; - config.appName = "foundry_local_samples"; + foundry_local::Configuration config{"foundry_local_samples"}; foundry_local::Manager::Create(config); auto& manager = foundry_local::Manager::Instance(); manager.EnsureEpsDownloaded(); auto& catalog = manager.GetCatalog(); - auto* model = catalog.GetModel("nemotron-speech-streaming-en-0.6b"); + auto* model = catalog.GetModel(kLiveAudioModelAlias); if (!model) { - throw std::runtime_error("Model \"nemotron-speech-streaming-en-0.6b\" not found in catalog"); + throw std::runtime_error( + "Live audio model \"" + std::string(kLiveAudioModelAlias) + + "\" was not found in the catalog. Use a Foundry Local Core/catalog build " + "that includes live audio streaming support."); } std::cout << "Downloading model (if needed)..." << std::endl; @@ -139,8 +144,6 @@ int main(int argc, char* argv[]) { model->Load(); std::cout << "Model loaded" << std::endl; - // NOTE: CreateLiveTranscriptionSession() is not yet available in the C++ SDK. - // The audio client and session code below is forward-looking. foundry_local::OpenAIAudioClient audioClient(*model); auto session = audioClient.CreateLiveTranscriptionSession(); diff --git a/samples/cpp/verify-winml/CMakeLists.txt b/samples/cpp/verify-winml/CMakeLists.txt new file mode 100644 index 00000000..fa858ea4 --- /dev/null +++ b/samples/cpp/verify-winml/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.20) + +project(VerifyWinMLCpp LANGUAGES CXX) + +set(BUILD_TESTING OFF CACHE BOOL "Build C++ SDK tests" FORCE) +add_subdirectory("${CMAKE_CURRENT_LIST_DIR}/../../../sdk/cpp" "${CMAKE_CURRENT_BINARY_DIR}/sdk-cpp") + +add_executable(VerifyWinML main.cpp) +target_compile_features(VerifyWinML PRIVATE cxx_std_17) +target_link_libraries(VerifyWinML PRIVATE CppSdk) diff --git a/samples/cpp/verify-winml/README.md b/samples/cpp/verify-winml/README.md new file mode 100644 index 00000000..675fbdb2 --- /dev/null +++ b/samples/cpp/verify-winml/README.md @@ -0,0 +1,41 @@ +# Verify WinML 2.0 Execution Providers (C++) + +This sample verifies that WinML 2.0 execution providers are correctly +discovered, downloaded, and registered using the Foundry Local C++ SDK. It then +uses registered WinML EP-backed model variants and finishes with one native +streaming chat check. + +## Prerequisites + +- Windows with a compatible GPU or NPU +- Windows App SDK 2.0 runtime installed +- A Foundry Local WinML native runtime copied next to the sample executable + +The C++ SDK loads `Microsoft.AI.Foundry.Local.Core.dll` from the executable +directory. Build or install a WinML-enabled SDK/runtime first, then copy the +WinML native binaries next to `VerifyWinML.exe` before running the sample. + +## Build + +From this directory: + +```powershell +cmake -S . -B out\build -G "Visual Studio 18 2026" -A x64 ` + -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_ROOT\scripts\buildsystems\vcpkg.cmake" ` + -DVCPKG_TARGET_TRIPLET=x64-windows-static-md + +cmake --build out\build --config Debug --target VerifyWinML +``` + +## Run + +```powershell +.\out\build\Debug\VerifyWinML.exe +``` + +## What it tests + +1. **EP Discovery** - Lists all available execution providers. +2. **EP Download & Registration** - Downloads and registers the available WinML EPs. +3. **Model Catalog** - Lists text model variants backed by registered accelerated EPs. +4. **Streaming Chat** - Runs streaming chat completion on a WinML EP-backed model via the native C++ SDK. diff --git a/samples/cpp/verify-winml/main.cpp b/samples/cpp/verify-winml/main.cpp new file mode 100644 index 00000000..6b3570ac --- /dev/null +++ b/samples/cpp/verify-winml/main.cpp @@ -0,0 +1,392 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// +// Foundry Local SDK - WinML 2.0 EP Verification (C++) +// +// Verifies: +// 1. Execution providers are discovered and registered. +// 2. Accelerated models appear in the catalog after EP registration. +// 3. Streaming chat completions work on an accelerated model. + +#include "foundry_local.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace { + +constexpr std::string_view PASS = "[PASS]"; +constexpr std::string_view FAIL = "[FAIL]"; +constexpr std::string_view INFO = "[INFO]"; +constexpr std::string_view WARN = "[WARN]"; + +class StdLogger final : public foundry_local::ILogger { +public: + void Log(foundry_local::LogLevel level, std::string_view message) noexcept override { + if (level == foundry_local::LogLevel::Warning) { + std::cout << "[FoundryLocal][WARN] " << message << '\n'; + } else if (level == foundry_local::LogLevel::Error) { + std::cout << "[FoundryLocal][ERROR] " << message << '\n'; + } + } +}; + +struct TestResults { + std::vector> results; + + void Add(std::string name, bool passed, const std::string& detail = {}) { + std::cout << (passed ? PASS : FAIL) << ' ' << name; + if (!detail.empty()) { + std::cout << " - " << detail; + } + std::cout << '\n'; + results.emplace_back(std::move(name), passed); + } + + void PrintSummary() const { + PrintSeparator("Summary"); + auto passed = std::count_if(results.begin(), results.end(), [](const auto& result) { + return result.second; + }); + + for (const auto& [name, ok] : results) { + std::cout << " " << (ok ? "PASS " : "FAIL ") << name << '\n'; + } + + std::cout << "\n " << passed << '/' << results.size() << " tests passed\n"; + } + + bool AllPassed() const { + return !results.empty() && + std::all_of(results.begin(), results.end(), [](const auto& result) { + return result.second; + }); + } + + static void PrintSeparator(std::string_view title) { + std::cout << "\n" << std::string(60, '=') << '\n'; + std::cout << " " << title << '\n'; + std::cout << std::string(60, '=') << "\n\n"; + } +}; + +struct Candidate { + foundry_local::IModel* model = nullptr; + foundry_local::ModelInfo info; +}; + +std::string ToLower(std::string value) { + std::transform(value.begin(), value.end(), value.begin(), [](unsigned char ch) { + return static_cast(std::tolower(ch)); + }); + return value; +} + +std::string DeviceTypeName(foundry_local::DeviceType deviceType) { + switch (deviceType) { + case foundry_local::DeviceType::CPU: + return "CPU"; + case foundry_local::DeviceType::GPU: + return "GPU"; + case foundry_local::DeviceType::NPU: + return "NPU"; + default: + return "?"; + } +} + +bool IsAcceleratedVariant(const foundry_local::ModelInfo& info) { + if (!info.runtime) { + return false; + } + + return info.runtime->device_type == foundry_local::DeviceType::GPU || + info.runtime->device_type == foundry_local::DeviceType::NPU; +} + +int VariantScore(const foundry_local::ModelInfo& info) { + const auto id = ToLower(info.id); + auto score = info.runtime && info.runtime->device_type == foundry_local::DeviceType::NPU ? 10000 : 0; + + if (id.find("whisper") != std::string::npos) { + score += 5000; + } + if (id.find("reasoning") != std::string::npos || + id.find("deepseek-r1") != std::string::npos || + id.find("gpt-oss") != std::string::npos) { + score += 2000; + } + + if (id.find("0.5b") != std::string::npos) { + score += 0; + } else if (id.find("1.5b") != std::string::npos) { + score += 100; + } else if (id.find("3b") != std::string::npos) { + score += 300; + } else if (id.find("7b") != std::string::npos) { + score += 700; + } else if (id.find("14b") != std::string::npos) { + score += 1400; + } else if (id.find("20b") != std::string::npos) { + score += 2000; + } else { + score += 500; + } + + return score; +} + +std::vector FindAcceleratedVariants(foundry_local::Catalog& catalog) { + std::vector candidates; + + for (const auto* modelBase : catalog.ListModels()) { + const auto* model = dynamic_cast(modelBase); + if (!model) { + continue; + } + + for (const auto& variant : model->GetAllModelVariants()) { + const auto& info = variant.GetInfo(); + if (!IsAcceleratedVariant(info)) { + continue; + } + + auto* candidateModel = catalog.GetModelVariant(variant.GetId()); + if (!candidateModel) { + continue; + } + + candidates.push_back(Candidate{candidateModel, info}); + } + } + + std::sort(candidates.begin(), candidates.end(), [](const Candidate& lhs, const Candidate& rhs) { + return VariantScore(lhs.info) < VariantScore(rhs.info); + }); + + return candidates; +} + +} // namespace + +int main() { + TestResults results; + StdLogger logger; + foundry_local::IModel* chosen = nullptr; + + try { + TestResults::PrintSeparator("Initialization"); + foundry_local::Configuration config{"verify_winml"}; + config.log_level = foundry_local::LogLevel::Information; + + foundry_local::Manager::Create(config, &logger); + auto& manager = foundry_local::Manager::Instance(); + std::cout << INFO << " FoundryLocalManager initialized.\n"; + + TestResults::PrintSeparator("Step 1: Discover & Register Execution Providers"); + std::vector eps; + try { + eps = manager.DiscoverEps(); + std::cout << INFO << " Discovered " << eps.size() << " execution providers:\n"; + for (const auto& ep : eps) { + std::cout << " - " << std::left << std::setw(40) << ep.name + << " Registered: " << (ep.is_registered ? "true" : "false") << '\n'; + } + results.Add("EP Discovery", true, std::to_string(eps.size()) + " EP(s) found"); + } catch (const std::exception& e) { + results.Add("EP Discovery", false, e.what()); + } + + if (eps.empty()) { + const std::string detail = "No execution providers discovered on this machine"; + results.Add("EP Download & Registration", false, detail); + std::cout << '\n' << FAIL << ' ' << detail << ".\n"; + results.PrintSummary(); + foundry_local::Manager::Destroy(); + return 1; + } + + try { + std::string currentProgressEp; + auto currentProgressPercent = -1.0; + + auto epResult = manager.DownloadAndRegisterEps( + [&](std::string_view epName, double percent) { + if (!currentProgressEp.empty() && + (currentProgressEp != epName || percent < currentProgressPercent)) { + std::cout << '\n'; + } + + currentProgressEp = std::string(epName); + currentProgressPercent = percent; + std::cout << "\r Downloading " << currentProgressEp << ": " + << std::fixed << std::setprecision(1) << percent << '%' << std::flush; + }); + + if (!currentProgressEp.empty()) { + std::cout << '\n'; + } + + std::cout << INFO << " EP registration: success=" << (epResult.success ? "true" : "false") + << ", status=" << epResult.status << '\n'; + if (!epResult.registered_eps.empty()) { + std::cout << " Registered:"; + for (const auto& name : epResult.registered_eps) { + std::cout << ' ' << name; + } + std::cout << '\n'; + } + if (!epResult.failed_eps.empty()) { + std::cout << " Failed:"; + for (const auto& name : epResult.failed_eps) { + std::cout << ' ' << name; + } + std::cout << '\n'; + } + + auto detail = epResult.success && !epResult.registered_eps.empty() + ? std::to_string(epResult.registered_eps.size()) + " EP(s) registered" + : epResult.status; + results.Add("EP Download & Registration", epResult.success, detail); + if (!epResult.success) { + results.PrintSummary(); + foundry_local::Manager::Destroy(); + return 1; + } + } catch (const std::exception& e) { + std::cout << '\n'; + results.Add("EP Download & Registration", false, e.what()); + results.PrintSummary(); + foundry_local::Manager::Destroy(); + return 1; + } + + TestResults::PrintSeparator("Step 2: Model Catalog - Accelerated Models"); + auto& catalog = manager.GetCatalog(); + auto models = catalog.ListModels(); + auto acceleratedVariants = FindAcceleratedVariants(catalog); + + std::cout << INFO << " Total models in catalog: " << models.size() << '\n'; + for (const auto& candidate : acceleratedVariants) { + const auto& runtime = *candidate.info.runtime; + std::cout << " - " << std::left << std::setw(50) << candidate.info.id + << " Device: " << std::setw(3) << DeviceTypeName(runtime.device_type) + << " EP: " << runtime.execution_provider << '\n'; + } + + results.Add("Catalog - Accelerated models found", !acceleratedVariants.empty(), + acceleratedVariants.empty() + ? "No accelerated model variants" + : std::to_string(acceleratedVariants.size()) + " accelerated variant(s)"); + if (acceleratedVariants.empty()) { + std::cout << '\n' << FAIL << " No accelerated model variants are available.\n"; + std::cout << WARN << " Ensure the system has a compatible accelerator and matching model variants installed.\n"; + results.PrintSummary(); + foundry_local::Manager::Destroy(); + return 1; + } + + TestResults::PrintSeparator("Step 3: Download & Load Model"); + bool downloadedAny = false; + std::string lastLoadError; + + for (const auto& candidate : acceleratedVariants) { + const auto& ep = candidate.info.runtime ? candidate.info.runtime->execution_provider : "unknown"; + std::cout << '\n' << INFO << " Trying model: " << candidate.info.id << " (EP: " << ep << ")\n"; + + try { + candidate.model->Download([](float progress) { + std::cout << "\r Downloading model: " << std::fixed << std::setprecision(1) + << progress << '%' << std::flush; + }); + std::cout << '\n'; + downloadedAny = true; + } catch (const std::exception& e) { + std::cout << '\n' << WARN << " Skipping " << candidate.info.id + << ": download failed: " << e.what() << '\n'; + lastLoadError = e.what(); + continue; + } + + try { + candidate.model->Load(); + chosen = candidate.model; + break; + } catch (const std::exception& e) { + std::cout << WARN << " Skipping " << candidate.info.id + << ": load failed: " << e.what() << '\n'; + lastLoadError = e.what(); + } + } + + results.Add("Model Download", downloadedAny, + downloadedAny ? "At least one accelerated variant downloaded" + : (lastLoadError.empty() ? "No accelerated variant could be downloaded" : lastLoadError)); + + if (!chosen) { + results.Add("Model Load", false, + lastLoadError.empty() ? "No accelerated variant could be loaded on this machine" : lastLoadError); + results.PrintSummary(); + foundry_local::Manager::Destroy(); + return 1; + } + + results.Add("Model Load", true, "Loaded " + chosen->GetId()); + + TestResults::PrintSeparator("Step 4: Streaming Chat Completions"); + try { + foundry_local::OpenAIChatClient chat(*chosen); + std::vector messages = { + {"system", "You are a helpful assistant."}, + {"user", "What is 2 + 2? Reply with just the number."}, + }; + foundry_local::ChatSettings settings; + settings.temperature = 0.0f; + settings.max_tokens = 16; + + std::string fullResponse; + const auto start = std::chrono::steady_clock::now(); + chat.CompleteChatStreaming(messages, settings, [&](const foundry_local::ChatCompletionCreateResponse& chunk) { + if (chunk.choices.empty()) { + return; + } + + const auto& choice = chunk.choices[0]; + if (choice.delta && !choice.delta->content.empty()) { + std::cout << choice.delta->content << std::flush; + fullResponse += choice.delta->content; + } + }); + const auto elapsed = std::chrono::duration(std::chrono::steady_clock::now() - start).count(); + std::cout << '\n'; + + results.Add("Streaming Chat", !fullResponse.empty(), + std::to_string(fullResponse.size()) + " chars in " + std::to_string(elapsed) + "s"); + } catch (const std::exception& e) { + results.Add("Streaming Chat", false, e.what()); + } + + try { + chosen->Unload(); + std::cout << INFO << " Model unloaded.\n"; + } catch (const std::exception& e) { + std::cout << WARN << " Failed to unload model: " << e.what() << '\n'; + } + + results.PrintSummary(); + foundry_local::Manager::Destroy(); + return results.AllPassed() ? 0 : 1; + } catch (const std::exception& e) { + std::cerr << FAIL << " " << e.what() << '\n'; + foundry_local::Manager::Destroy(); + return 1; + } +} diff --git a/samples/cs/Directory.Packages.props b/samples/cs/Directory.Packages.props index 77b68c4c..83ebec18 100644 --- a/samples/cs/Directory.Packages.props +++ b/samples/cs/Directory.Packages.props @@ -6,6 +6,7 @@ + diff --git a/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj b/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj index bd42e38b..5badd1c9 100644 --- a/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj +++ b/samples/cs/audio-transcription-example/AudioTranscriptionExample.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/embeddings/Embeddings.csproj b/samples/cs/embeddings/Embeddings.csproj index 4d948c56..0faa7f31 100644 --- a/samples/cs/embeddings/Embeddings.csproj +++ b/samples/cs/embeddings/Embeddings.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj b/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj index fe890be2..da4fd798 100644 --- a/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj +++ b/samples/cs/foundry-local-web-server/FoundryLocalWebServer.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj b/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj index 3d91b677..7e67214b 100644 --- a/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj +++ b/samples/cs/live-audio-transcription/LiveAudioTranscriptionExample.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/model-management-example/ModelManagementExample.csproj b/samples/cs/model-management-example/ModelManagementExample.csproj index 4d948c56..0faa7f31 100644 --- a/samples/cs/model-management-example/ModelManagementExample.csproj +++ b/samples/cs/model-management-example/ModelManagementExample.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/native-chat-completions/NativeChatCompletions.csproj b/samples/cs/native-chat-completions/NativeChatCompletions.csproj index 4d948c56..0faa7f31 100644 --- a/samples/cs/native-chat-completions/NativeChatCompletions.csproj +++ b/samples/cs/native-chat-completions/NativeChatCompletions.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj b/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj index 4d948c56..0faa7f31 100644 --- a/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj +++ b/samples/cs/tool-calling-foundry-local-sdk/ToolCallingFoundryLocalSdk.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj b/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj index fe890be2..da4fd798 100644 --- a/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj +++ b/samples/cs/tool-calling-foundry-local-web-server/ToolCallingFoundryLocalWebServer.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj b/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj index a3533047..996b3675 100644 --- a/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj +++ b/samples/cs/tutorial-chat-assistant/TutorialChatAssistant.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj b/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj index a3533047..996b3675 100644 --- a/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj +++ b/samples/cs/tutorial-document-summarizer/TutorialDocumentSummarizer.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj b/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj index a3533047..996b3675 100644 --- a/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj +++ b/samples/cs/tutorial-tool-calling/TutorialToolCalling.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj b/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj index a3533047..996b3675 100644 --- a/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj +++ b/samples/cs/tutorial-voice-to-text/TutorialVoiceToText.csproj @@ -8,7 +8,7 @@ - net9.0-windows10.0.26100 + net9.0-windows10.0.18362.0 false ARM64;x64 None diff --git a/samples/cs/verify-winml/Program.cs b/samples/cs/verify-winml/Program.cs index 91c4db51..69caf2d5 100644 --- a/samples/cs/verify-winml/Program.cs +++ b/samples/cs/verify-winml/Program.cs @@ -9,8 +9,7 @@ using Microsoft.AI.Foundry.Local; using Microsoft.Extensions.Logging; -using FoundryChatMessage = Microsoft.AI.Foundry.Local.OpenAI.ChatMessage; -using FoundryChatMessageRole = Microsoft.AI.Foundry.Local.OpenAI.ChatMessageRole; +using Betalgo.Ranul.OpenAI.ObjectModels.RequestModels; const string PASS = "\x1b[92m[PASS]\x1b[0m"; const string FAIL = "\x1b[91m[FAIL]\x1b[0m"; @@ -52,6 +51,33 @@ bool IsAcceleratedVariant(IModel model) return runtime != null && (runtime.DeviceType == DeviceType.GPU || runtime.DeviceType == DeviceType.NPU); } +int GetVariantScore(IModel model) +{ + var id = model.Id.ToLowerInvariant(); + var runtime = model.Info?.Runtime; + + var score = runtime?.DeviceType == DeviceType.NPU ? 10_000 : 0; + score += id.Contains("whisper", StringComparison.Ordinal) ? 5_000 : 0; + score += id.Contains("reasoning", StringComparison.Ordinal) + || id.Contains("deepseek-r1", StringComparison.Ordinal) + || id.Contains("gpt-oss", StringComparison.Ordinal) + ? 2_000 + : 0; + + score += id switch + { + var value when value.Contains("0.5b", StringComparison.Ordinal) => 0, + var value when value.Contains("1.5b", StringComparison.Ordinal) => 100, + var value when value.Contains("3b", StringComparison.Ordinal) => 300, + var value when value.Contains("7b", StringComparison.Ordinal) => 700, + var value when value.Contains("14b", StringComparison.Ordinal) => 1_400, + var value when value.Contains("20b", StringComparison.Ordinal) => 2_000, + _ => 500, + }; + + return score; +} + CancellationToken ct = CancellationToken.None; // ── 0. Initialize FoundryLocalManager ────────────────────── @@ -173,11 +199,10 @@ bool IsAcceleratedVariant(IModel model) } } -var chosen = acceleratedVariants.FirstOrDefault(); -LogResult("Catalog - Accelerated models found", chosen != null, - chosen != null ? $"{acceleratedVariants.Count} accelerated variant(s)" : "No accelerated model variants"); +LogResult("Catalog - Accelerated models found", acceleratedVariants.Count > 0, + acceleratedVariants.Count > 0 ? $"{acceleratedVariants.Count} accelerated variant(s)" : "No accelerated model variants"); -if (chosen == null) +if (acceleratedVariants.Count == 0) { Console.WriteLine($"\n{FAIL} No accelerated model variants are available."); Console.WriteLine($"{WARN} Ensure the system has a compatible accelerator and matching model variants installed."); @@ -185,46 +210,72 @@ bool IsAcceleratedVariant(IModel model) return; } -Console.WriteLine($"\n{INFO} Selected model: {chosen.Id} (EP: {chosen.Info?.Runtime?.ExecutionProvider ?? "unknown"})"); - // ── 3. Download & Load Model ────────────────────────────── PrintSeparator("Step 3: Download & Load Model"); -try +IModel? chosen = null; +Exception? lastLoadError = null; +var downloadedAny = false; +var candidateVariants = acceleratedVariants + .OrderBy(GetVariantScore) + .ToList(); + +foreach (var candidate in candidateVariants) { - await chosen.DownloadAsync(progress => - Console.Write($"\r Downloading model: {progress:F1}%")); - Console.WriteLine(); - LogResult("Model Download", true); -} -catch (Exception e) -{ - Console.WriteLine(); - LogResult("Model Download", false, e.Message); - PrintSummary(); - return; -} + var ep = candidate.Info?.Runtime?.ExecutionProvider ?? "unknown"; + Console.WriteLine($"\n{INFO} Trying model: {candidate.Id} (EP: {ep})"); -try -{ - await chosen.LoadAsync(); - LogResult("Model Load", true, $"Loaded {chosen.Id}"); + try + { + await candidate.DownloadAsync(progress => + Console.Write($"\r Downloading model: {progress:F1}%")); + Console.WriteLine(); + downloadedAny = true; + } + catch (Exception e) + { + Console.WriteLine(); + Console.WriteLine($"{WARN} Skipping {candidate.Id}: download failed: {e.Message}"); + lastLoadError = e; + continue; + } + + try + { + await candidate.LoadAsync(); + chosen = candidate; + break; + } + catch (Exception e) + { + Console.WriteLine($"{WARN} Skipping {candidate.Id}: load failed: {e.Message}"); + lastLoadError = e; + } } -catch (Exception e) + +LogResult("Model Download", downloadedAny, + downloadedAny ? "At least one accelerated variant downloaded" : lastLoadError?.Message ?? "No accelerated variant could be downloaded"); + +if (chosen == null) { - LogResult("Model Load", false, e.Message); + LogResult("Model Load", false, + lastLoadError?.Message ?? "No accelerated variant could be loaded on this machine"); PrintSummary(); return; } +LogResult("Model Load", true, $"Loaded {chosen.Id}"); + // ── 4. Streaming Chat Completions (Native SDK) ──────────── PrintSeparator("Step 4: Streaming Chat Completions (Native)"); try { var chatClient = await chosen.GetChatClientAsync(); - var messages = new List + chatClient.Settings.Temperature = 0; + chatClient.Settings.MaxTokens = 16; + var messages = new List { - new() { Role = FoundryChatMessageRole.System, Content = "You are a helpful assistant." }, - new() { Role = FoundryChatMessageRole.User, Content = "What is 2 + 2? Reply with just the number." }, + new() { Role = "system", Content = "You are a helpful assistant." }, + new() { Role = "user", Content = "What is 2 + 2? Reply with just the number." }, }; var fullResponse = ""; diff --git a/samples/js/verify-winml/app.js b/samples/js/verify-winml/app.js index 0fae5f52..acb692d3 100644 --- a/samples/js/verify-winml/app.js +++ b/samples/js/verify-winml/app.js @@ -34,6 +34,25 @@ function isAcceleratedVariant(variant) { return Boolean(runtime && ["GPU", "NPU"].includes(runtime.deviceType)); } +function variantScore(variant) { + const id = variant.id.toLowerCase(); + const runtime = variant.info?.runtime; + let score = runtime?.deviceType === "NPU" ? 10000 : 0; + + if (id.includes("whisper")) score += 5000; + if (id.includes("reasoning") || id.includes("deepseek-r1") || id.includes("gpt-oss")) score += 2000; + + if (id.includes("0.5b")) score += 0; + else if (id.includes("1.5b")) score += 100; + else if (id.includes("3b")) score += 300; + else if (id.includes("7b")) score += 700; + else if (id.includes("14b")) score += 1400; + else if (id.includes("20b")) score += 2000; + else score += 500; + + return score; +} + async function main() { // ── 0. Initialize FoundryLocalManager ────────────────────── printSeparator("Initialization"); @@ -133,41 +152,61 @@ async function main() { `${acceleratedVariants.length} accelerated variant(s)`, ); - const chosen = acceleratedVariants[0]; - if (!chosen) { + if (!acceleratedVariants.length) { console.log(`\n${FAIL} No accelerated model variants are available.`); console.log(`${WARN} Ensure the system has a compatible accelerator and matching model variants installed.`); printSummary(); process.exit(1); } - const chosenEp = chosen.info?.runtime?.executionProvider || "unknown"; - console.log(`\n${INFO} Selected model: ${chosen.id} (EP: ${chosenEp})`); - // ── 3. Download & Load Model ────────────────────────────── printSeparator("Step 3: Download & Load Model"); - try { - await chosen.download((percent) => { - process.stdout.write(`\r Downloading model: ${percent.toFixed(1)}%`); - }); - console.log(); - logResult("Model Download", true); - } catch (e) { - console.log(); - logResult("Model Download", false, e.message); - printSummary(); - process.exit(1); + + let chosen = null; + let downloadedAny = false; + let lastLoadError = null; + const candidateVariants = [...acceleratedVariants].sort((a, b) => variantScore(a) - variantScore(b)); + for (const candidate of candidateVariants) { + const ep = candidate.info?.runtime?.executionProvider || "unknown"; + console.log(`\n${INFO} Trying model: ${candidate.id} (EP: ${ep})`); + + try { + await candidate.download((percent) => { + process.stdout.write(`\r Downloading model: ${percent.toFixed(1)}%`); + }); + console.log(); + downloadedAny = true; + } catch (e) { + console.log(); + console.log(`${WARN} Skipping ${candidate.id}: download failed: ${e.message}`); + lastLoadError = e; + continue; + } + + try { + await candidate.load(); + chosen = candidate; + break; + } catch (e) { + console.log(`${WARN} Skipping ${candidate.id}: load failed: ${e.message}`); + lastLoadError = e; + } } - try { - await chosen.load(); - logResult("Model Load", true, `Loaded ${chosen.id}`); - } catch (e) { - logResult("Model Load", false, e.message); + logResult( + "Model Download", + downloadedAny, + downloadedAny ? "At least one accelerated variant downloaded" : lastLoadError?.message || "No accelerated variant could be downloaded", + ); + + if (!chosen) { + logResult("Model Load", false, lastLoadError?.message || "No accelerated variant could be loaded on this machine"); printSummary(); process.exit(1); } + logResult("Model Load", true, `Loaded ${chosen.id}`); + // ── 4. Streaming Chat Completions (Native SDK) ──────────── printSeparator("Step 4: Streaming Chat Completions (Native)"); const messages = [ @@ -177,6 +216,8 @@ async function main() { try { const client = chosen.createChatClient(); + client.settings.temperature = 0; + client.settings.maxTokens = 16; let responseText = ""; const start = Date.now(); for await (const chunk of client.completeStreamingChat(messages)) { @@ -193,6 +234,13 @@ async function main() { logResult("Streaming Chat (Native)", false, e.message); } + try { + await chosen.unload(); + console.log(`${INFO} Model unloaded.`); + } catch (e) { + console.warn(`${WARN} Failed to unload model: ${e.message}`); + } + printSummary(); } diff --git a/samples/python/verify-winml/README.md b/samples/python/verify-winml/README.md index a256fd7a..ed90e583 100644 --- a/samples/python/verify-winml/README.md +++ b/samples/python/verify-winml/README.md @@ -7,7 +7,7 @@ registered WinML EP. It finishes with one native streaming chat check. ## Prerequisites - Windows with a compatible GPU -- Windows App SDK 2.0 runtime installed (preview1 or experimental) +- Windows App SDK 2.0 runtime installed - Python 3.11+ ## Setup @@ -18,9 +18,9 @@ If you want to reuse your existing Python environment instead, delete that environment's `Lib\site-packages\foundry_local_core` directory before reinstalling so stale native files are not left behind. -`requirements.txt` already adds the ORT-Nightly Python feed and combines the -public `foundry-local-sdk` package with the WinML 2.0 preview native packages, -so either install path is enough: +`requirements.txt` already adds the ORT-Nightly Python feed and installs the +WinML SDK variant, which pulls in the matching WinML native packages, so either +install path is enough: ```bash python -m venv .venv diff --git a/samples/python/verify-winml/requirements.txt b/samples/python/verify-winml/requirements.txt index 9c319abf..81f6eb21 100644 --- a/samples/python/verify-winml/requirements.txt +++ b/samples/python/verify-winml/requirements.txt @@ -1,7 +1,3 @@ --extra-index-url https://pkgs.dev.azure.com/aiinfra/PublicPackages/_packaging/ORT-Nightly/pypi/simple/ -foundry-local-sdk==1.0.0 -# Use the public Python SDK code with the WinML 2.0 preview native packages. -foundry-local-core-winml==1.0.0.dev20260411003620 -onnxruntime-core==1.24.4 -onnxruntime-genai-core==0.13.1 +foundry-local-sdk-winml==1.0.0 diff --git a/samples/python/verify-winml/src/app.py b/samples/python/verify-winml/src/app.py index f60a1231..05535680 100644 --- a/samples/python/verify-winml/src/app.py +++ b/samples/python/verify-winml/src/app.py @@ -41,6 +41,34 @@ def is_accelerated_variant(variant) -> bool: return rt is not None and rt.device_type in (DeviceType.GPU, DeviceType.NPU) +def variant_score(variant) -> int: + model_id = variant.id.lower() + rt = variant.info.runtime + + score = 10000 if rt and rt.device_type == DeviceType.NPU else 0 + if "whisper" in model_id: + score += 5000 + if "reasoning" in model_id or "deepseek-r1" in model_id or "gpt-oss" in model_id: + score += 2000 + + if "0.5b" in model_id: + score += 0 + elif "1.5b" in model_id: + score += 100 + elif "3b" in model_id: + score += 300 + elif "7b" in model_id: + score += 700 + elif "14b" in model_id: + score += 1400 + elif "20b" in model_id: + score += 2000 + else: + score += 500 + + return score + + def main(): # ── 0. Initialize FoundryLocalManager ────────────────────── print_separator("Initialization") @@ -128,40 +156,56 @@ def ep_progress(ep_name: str, percent: float): log_result("Catalog - Accelerated models found", len(accelerated_variants) > 0, f"{len(accelerated_variants)} accelerated variant(s)") - chosen = accelerated_variants[0] if accelerated_variants else None - - if not chosen: + if not accelerated_variants: print(f"\n{FAIL} No accelerated model variants are available.") print(f"{WARN} Ensure the system has a compatible accelerator and matching model variants installed.") _print_summary() return - chosen_ep = chosen.info.runtime.execution_provider if chosen.info.runtime else "unknown" - print(f"\n{INFO} Selected model: {chosen.id} (EP: {chosen_ep})") - # ── 3. Download & Load Model ────────────────────────────── print_separator("Step 3: Download & Load Model") - try: - def dl_progress(percent): - print(f"\r Downloading model: {percent:.1f}%", end="", flush=True) - chosen.download(progress_callback=dl_progress) - print() - log_result("Model Download", True) - except Exception as e: - print() - log_result("Model Download", False, str(e)) - _print_summary() - return + chosen = None + downloaded_any = False + last_load_error = None + candidate_variants = sorted(accelerated_variants, key=variant_score) + for candidate in candidate_variants: + chosen_ep = candidate.info.runtime.execution_provider if candidate.info.runtime else "unknown" + print(f"\n{INFO} Trying model: {candidate.id} (EP: {chosen_ep})") - try: - chosen.load() - log_result("Model Load", True, f"Loaded {chosen.id}") - except Exception as e: - log_result("Model Load", False, str(e)) + try: + def dl_progress(percent): + print(f"\r Downloading model: {percent:.1f}%", end="", flush=True) + + candidate.download(progress_callback=dl_progress) + print() + downloaded_any = True + except Exception as e: + print() + print(f"{WARN} Skipping {candidate.id}: download failed: {e}") + last_load_error = e + continue + + try: + candidate.load() + chosen = candidate + break + except Exception as e: + print(f"{WARN} Skipping {candidate.id}: load failed: {e}") + last_load_error = e + + log_result("Model Download", downloaded_any, + "At least one accelerated variant downloaded" if downloaded_any + else str(last_load_error) if last_load_error else "No accelerated variant could be downloaded") + + if chosen is None: + log_result("Model Load", False, + str(last_load_error) if last_load_error else "No accelerated variant could be loaded on this machine") _print_summary() return + log_result("Model Load", True, f"Loaded {chosen.id}") + # ── 4. Streaming Chat Completions (Native SDK) ──────────── print_separator("Step 4: Streaming Chat Completions (Native)") messages = [ @@ -171,12 +215,16 @@ def dl_progress(percent): try: client = chosen.get_chat_client() + client.settings.temperature = 0 + client.settings.max_tokens = 16 response_text = "" start = time.time() for chunk in client.complete_streaming_chat(messages): - if chunk.text: - response_text += chunk.text - print(chunk.text, end="", flush=True) + choices = getattr(chunk, "choices", None) + content = choices[0].delta.content if choices and len(choices) > 0 else None + if content: + response_text += content + print(content, end="", flush=True) elapsed = time.time() - start print() log_result("Streaming Chat (Native)", len(response_text) > 0, @@ -184,6 +232,12 @@ def dl_progress(percent): except Exception as e: log_result("Streaming Chat (Native)", False, str(e)) + try: + chosen.unload() + print(f"{INFO} Model unloaded.") + except Exception as e: + print(f"{WARN} Failed to unload model: {e}") + _print_summary() @@ -192,7 +246,7 @@ def _print_summary(): passed = sum(1 for _, p in results if p) total = len(results) for name, p in results: - print(f" {'✓' if p else '✗'} {name}") + print(f" {'PASS' if p else 'FAIL'} {name}") print(f"\n {passed}/{total} tests passed") if passed < total: sys.exit(1) diff --git a/samples/rust/Cargo.toml b/samples/rust/Cargo.toml index 0a4dfd1d..37a579a1 100644 --- a/samples/rust/Cargo.toml +++ b/samples/rust/Cargo.toml @@ -4,6 +4,7 @@ members = [ "tool-calling-foundry-local", "native-chat-completions", "audio-transcription-example", + "live-audio-transcription", "embeddings", "tutorial-chat-assistant", "tutorial-document-summarizer", diff --git a/samples/rust/live-audio-transcription/src/main.rs b/samples/rust/live-audio-transcription/src/main.rs index b97e9418..64d1477e 100644 --- a/samples/rust/live-audio-transcription/src/main.rs +++ b/samples/rust/live-audio-transcription/src/main.rs @@ -12,7 +12,9 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use cpal::traits::{DeviceTrait, HostTrait, StreamTrait}; -use foundry_local_sdk::{FoundryLocalConfig, FoundryLocalManager}; +use foundry_local_sdk::{ + FoundryLocalConfig, FoundryLocalManager, LiveAudioTranscriptionSession, +}; use tokio_stream::StreamExt; const ALIAS: &str = "nemotron-speech-streaming-en-0.6b"; @@ -44,8 +46,8 @@ async fn main() -> Result<(), Box> { if !model.is_cached().await? { println!("Downloading model..."); model - .download(Some(|progress: &str| { - print!("\r {progress}%"); + .download(Some(|progress: f64| { + print!("\r {progress:.1}%"); io::stdout().flush().ok(); })) .await?; @@ -135,7 +137,7 @@ async fn main() -> Result<(), Box> { /// Try to open the default microphone with CPAL and forward PCM to the session. /// Blocks until Ctrl+C is pressed. async fn try_start_mic( - session: &Arc, + session: &Arc, running: &Arc, ) -> Result<(), Box> { let host = cpal::default_host(); diff --git a/samples/rust/verify-winml/src/main.rs b/samples/rust/verify-winml/src/main.rs index 53aa0dbb..4baca8bf 100644 --- a/samples/rust/verify-winml/src/main.rs +++ b/samples/rust/verify-winml/src/main.rs @@ -26,6 +26,48 @@ fn is_accelerated_variant(model: &Model) -> bool { .unwrap_or(false) } +fn variant_score(model: &Model) -> u32 { + let id = model.id().to_ascii_lowercase(); + let mut score = model + .info() + .runtime + .as_ref() + .map(|rt| { + if matches!(rt.device_type, DeviceType::NPU) { + 10_000 + } else { + 0 + } + }) + .unwrap_or(0); + + if id.contains("whisper") { + score += 5_000; + } + + if id.contains("reasoning") || id.contains("deepseek-r1") || id.contains("gpt-oss") { + score += 2_000; + } + + score += if id.contains("0.5b") { + 0 + } else if id.contains("1.5b") { + 100 + } else if id.contains("3b") { + 300 + } else if id.contains("7b") { + 700 + } else if id.contains("14b") { + 1_400 + } else if id.contains("20b") { + 2_000 + } else { + 500 + }; + + score +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let mut results: Vec<(&str, bool)> = Vec::new(); @@ -178,64 +220,84 @@ async fn main() -> anyhow::Result<()> { return Ok(()); } - let chosen = accelerated_variants - .first() - .cloned() - .expect("accelerated_variants is not empty"); - let chosen_ep = chosen - .info() - .runtime - .as_ref() - .map(|rt| rt.execution_provider.as_str()) - .unwrap_or("unknown"); - println!("\n{INFO} Selected model: {} (EP: {chosen_ep})", chosen.id()); + accelerated_variants.sort_by_key(|model| variant_score(model.as_ref())); // ── 3. Download & Load Model ────────────────────────────── println!("\n{}", "=".repeat(60)); println!(" Step 3: Download & Load Model"); println!("{}\n", "=".repeat(60)); - let model = manager.catalog().get_model(chosen.alias()).await?; - model.select_variant_by_id(chosen.id())?; - - if !model.is_cached().await? { - match model - .download(Some(|progress: f64| { - print!("\r Downloading model: {progress:.1}%"); - io::stdout().flush().ok(); - })) - .await - { + let mut model = None; + let mut downloaded_any = false; + let mut last_load_error: Option = None; + + for candidate in accelerated_variants { + let candidate_ep = candidate + .info() + .runtime + .as_ref() + .map(|rt| rt.execution_provider.as_str()) + .unwrap_or("unknown"); + println!("\n{INFO} Trying model: {} (EP: {candidate_ep})", candidate.id()); + + if !candidate.is_cached().await? { + match candidate + .download(Some(|progress: f64| { + print!("\r Downloading model: {progress:.1}%"); + io::stdout().flush().ok(); + })) + .await + { + Ok(_) => { + println!(); + downloaded_any = true; + } + Err(e) => { + println!(); + println!("{WARN} Skipping {}: download failed: {e}", candidate.id()); + last_load_error = Some(e.to_string()); + continue; + } + } + } else { + println!("{INFO} Model already cached"); + downloaded_any = true; + } + + match candidate.load().await { Ok(_) => { - println!(); - println!("{PASS} Model Download"); - results.push(("Model Download", true)); + model = Some(candidate); + break; } Err(e) => { - println!(); - println!("{FAIL} Model Download - {e}"); - results.push(("Model Download", false)); - print_summary(&results); - return Ok(()); + println!("{WARN} Skipping {}: load failed: {e}", candidate.id()); + last_load_error = Some(e.to_string()); } } - } else { - println!("{PASS} Model Download - already cached"); - results.push(("Model Download", true)); } - match model.load().await { - Ok(_) => { - println!("{PASS} Model Load - Loaded {}", chosen.id()); - results.push(("Model Load", true)); - } - Err(e) => { - println!("{FAIL} Model Load - {e}"); - results.push(("Model Load", false)); - print_summary(&results); - return Ok(()); - } - } + let download_status = if downloaded_any { PASS } else { FAIL }; + let download_detail = if downloaded_any { + "At least one accelerated variant downloaded".to_string() + } else { + last_load_error + .clone() + .unwrap_or_else(|| "No accelerated variant could be downloaded".to_string()) + }; + println!("{download_status} Model Download - {download_detail}"); + results.push(("Model Download", downloaded_any)); + + let Some(model) = model else { + let detail = last_load_error + .unwrap_or_else(|| "No accelerated variant could be loaded on this machine".to_string()); + println!("{FAIL} Model Load - {detail}"); + results.push(("Model Load", false)); + print_summary(&results); + return Ok(()); + }; + + println!("{PASS} Model Load - Loaded {}", model.id()); + results.push(("Model Load", true)); // ── 4. Streaming Chat Completions ──────────────────────── println!("\n{}", "=".repeat(60)); @@ -247,7 +309,7 @@ async fn main() -> anyhow::Result<()> { ChatCompletionRequestUserMessage::from("What is 2 + 2? Reply with just the number.").into(), ]; - let client = model.create_chat_client().temperature(0.7).max_tokens(64); + let client = model.create_chat_client().temperature(0.0).max_tokens(16); match client.complete_streaming_chat(&messages, None).await { Ok(mut stream) => { let mut full_response = String::new(); @@ -287,6 +349,12 @@ async fn main() -> anyhow::Result<()> { } } + if let Err(e) = model.unload().await { + println!("{WARN} Failed to unload model: {e}"); + } else { + println!("{INFO} Model unloaded."); + } + print_summary(&results); Ok(()) } diff --git a/sdk/cpp/CMakeLists.txt b/sdk/cpp/CMakeLists.txt index 41f12c27..61dc5b6b 100644 --- a/sdk/cpp/CMakeLists.txt +++ b/sdk/cpp/CMakeLists.txt @@ -73,6 +73,8 @@ target_link_libraries(CppSdk WIL::WIL ) +target_link_libraries(CppSdk PRIVATE ole32) + # ----------------------------- # Sample executable # ----------------------------- diff --git a/sdk/cpp/include/catalog.h b/sdk/cpp/include/catalog.h index e4e5d17f..aaa6eae4 100644 --- a/sdk/cpp/include/catalog.h +++ b/sdk/cpp/include/catalog.h @@ -44,6 +44,7 @@ class Catalog final { IModel* GetModel(std::string_view modelId) const; IModel* GetModelVariant(std::string_view modelVariantId) const; IModel& GetLatestVersion(const IModel& modelOrModelVariant) const; + void InvalidateCache() const; private: struct CatalogState { diff --git a/sdk/cpp/include/foundry_local_manager.h b/sdk/cpp/include/foundry_local_manager.h index ce8725c6..fcce6420 100644 --- a/sdk/cpp/include/foundry_local_manager.h +++ b/sdk/cpp/include/foundry_local_manager.h @@ -2,6 +2,7 @@ // Licensed under the MIT License. #pragma once +#include #include #include #include @@ -19,6 +20,20 @@ namespace foundry_local::Internal { namespace foundry_local { + struct EpInfo { + std::string name; + bool is_registered = false; + }; + + struct EpDownloadResult { + bool success = false; + std::string status; + std::vector registered_eps; + std::vector failed_eps; + }; + + using EpDownloadProgressCallback = std::function; + class Manager final { public: Manager(const Manager&) = delete; @@ -63,6 +78,16 @@ namespace foundry_local { /// Once downloaded, EPs are not re-downloaded unless a new version is available. void EnsureEpsDownloaded() const; + /// Discover available execution providers and their registration status. + std::vector DiscoverEps() const; + + /// Download and register all available execution providers. + EpDownloadResult DownloadAndRegisterEps(EpDownloadProgressCallback onProgress = nullptr) const; + + /// Download and register the named execution providers. + EpDownloadResult DownloadAndRegisterEps(gsl::span names, + EpDownloadProgressCallback onProgress = nullptr) const; + private: explicit Manager(Configuration configuration, ILogger* logger); ~Manager(); diff --git a/sdk/cpp/src/catalog.cpp b/sdk/cpp/src/catalog.cpp index 82aae3be..e5387d6b 100644 --- a/sdk/cpp/src/catalog.cpp +++ b/sdk/cpp/src/catalog.cpp @@ -68,6 +68,11 @@ namespace foundry_local { return out; } + void Catalog::InvalidateCache() const { + std::lock_guard lock(mutex_); + state_ = std::make_shared(); + } + void Catalog::UpdateModels() const { using clock = std::chrono::steady_clock; @@ -121,7 +126,7 @@ namespace foundry_local { newState->lastFetch = now; - // Atomic swap readers that already hold the old shared_ptr keep it alive. + // Atomic swap � readers that already hold the old shared_ptr keep it alive. { std::lock_guard lock(mutex_); state_ = std::move(newState); diff --git a/sdk/cpp/src/core.h b/sdk/cpp/src/core.h index cc37ce9e..bb078d8a 100644 --- a/sdk/cpp/src/core.h +++ b/sdk/cpp/src/core.h @@ -95,9 +95,16 @@ namespace foundry_local { CoreResponse callWithBinary(std::string_view command, ILogger& logger, const std::string* dataArgument, const uint8_t* binaryData, size_t binaryDataLength) const override { - if (!module_ || !execBinaryCmd_ || !freeResCmd_) { + if (!module_ || !freeResCmd_) { throw Exception("Core is not loaded. Cannot call command: " + std::string(command), logger); } + if (!execBinaryCmd_) { + return CoreResponse{ + {}, + "Loaded Core DLL does not export execute_command_with_binary. " + "Audio streaming is not supported by this Core version." + }; + } StreamingRequestBuffer request{}; request.Command = command.empty() ? nullptr : command.data(); @@ -152,7 +159,7 @@ namespace foundry_local { execCbCmd_ = reinterpret_cast( RequireProc(m.get(), "execute_command_with_callback")); execBinaryCmd_ = reinterpret_cast( - RequireProc(m.get(), "execute_command_with_binary")); + ::GetProcAddress(m.get(), "execute_command_with_binary")); freeResCmd_ = reinterpret_cast(RequireProc(m.get(), "free_response")); module_ = std::move(m); diff --git a/sdk/cpp/src/foundry_local_manager.cpp b/sdk/cpp/src/foundry_local_manager.cpp index 1a25ff0b..ab974bc4 100644 --- a/sdk/cpp/src/foundry_local_manager.cpp +++ b/sdk/cpp/src/foundry_local_manager.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -28,6 +29,69 @@ namespace { std::filesystem::path(exePath.get()).parent_path() / L"Microsoft.WindowsAppRuntime.Bootstrap.dll", ec); } + + std::vector GetStringArray(const nlohmann::json& j, const char* key) { + auto it = j.find(key); + if (it == j.end() || !it->is_array()) { + return {}; + } + return it->get>(); + } + + std::vector ParseEpInfoList(const std::string& data, ILogger& logger) { + try { + auto parsed = nlohmann::json::parse(data); + std::vector eps; + eps.reserve(parsed.size()); + for (const auto& item : parsed) { + eps.push_back(EpInfo{ + item.value("Name", std::string{}), + item.value("IsRegistered", false) + }); + } + return eps; + } + catch (const nlohmann::json::exception& e) { + throw Exception("Failed to parse execution provider discovery response: " + std::string(e.what()), logger); + } + } + + EpDownloadResult ParseEpDownloadResult(const std::string& data, ILogger& logger) { + if (data.empty()) { + return EpDownloadResult{true, "Completed", {}, {}}; + } + + try { + auto parsed = nlohmann::json::parse(data); + return EpDownloadResult{ + parsed.value("Success", false), + parsed.value("Status", std::string{}), + GetStringArray(parsed, "RegisteredEps"), + GetStringArray(parsed, "FailedEps") + }; + } + catch (const nlohmann::json::exception& e) { + throw Exception("Failed to parse execution provider download response: " + std::string(e.what()), logger); + } + } + + std::string BuildEpDownloadPayload(gsl::span names) { + if (names.empty()) { + return {}; + } + + std::string joinedNames; + for (const auto& name : names) { + if (!joinedNames.empty()) { + joinedNames += ','; + } + joinedNames += name; + } + + CoreInteropRequest request("download_and_register_eps"); + request.AddParam("Names", joinedNames); + return request.ToJson(); + } } // namespace std::unique_ptr Manager::instance_; @@ -142,10 +206,89 @@ void Manager::Cleanup() noexcept { } void Manager::EnsureEpsDownloaded() const { - auto response = core_->call("ensure_eps_downloaded", *logger_); + auto result = DownloadAndRegisterEps(); + if (!result.success) { + throw Exception(std::string("Error ensuring execution providers downloaded: ") + result.status, *logger_); + } + } + + std::vector Manager::DiscoverEps() const { + auto response = core_->call("discover_eps", *logger_); if (response.HasError()) { - throw Exception(std::string("Error ensuring execution providers downloaded: ") + response.error, *logger_); + throw Exception(std::string("Error discovering execution providers: ") + response.error, *logger_); + } + + return ParseEpInfoList(response.data, *logger_); + } + + EpDownloadResult Manager::DownloadAndRegisterEps(EpDownloadProgressCallback onProgress) const { + return DownloadAndRegisterEps(gsl::span{}, std::move(onProgress)); + } + + EpDownloadResult Manager::DownloadAndRegisterEps(gsl::span names, + EpDownloadProgressCallback onProgress) const { + auto payload = BuildEpDownloadPayload(names); + const std::string* payloadPtr = payload.empty() ? nullptr : &payload; + + CoreResponse response; + if (onProgress) { + struct ProgressState { + EpDownloadProgressCallback* callback; + ILogger* logger; + std::exception_ptr exception; + } state{&onProgress, logger_, nullptr}; + + auto nativeCallback = [](void* data, int32_t len, void* user) { + if (!data || len <= 0) { + return; + } + + auto* state = static_cast(user); + if (state->exception) { + return; + } + + std::string chunk(static_cast(data), static_cast(len)); + auto sep = chunk.find('|'); + if (sep == std::string::npos) { + return; + } + + try { + auto percent = std::stod(chunk.substr(sep + 1)); + auto epName = std::string_view(chunk.data(), sep); + try { + (*state->callback)(epName, percent); + } + catch (...) { + state->exception = std::current_exception(); + } + } + catch (const std::exception& e) { + state->logger->Log(LogLevel::Warning, + "Failed to parse execution provider download progress '" + chunk + + "': " + e.what()); + } + }; + + response = core_->call("download_and_register_eps", *logger_, payloadPtr, +nativeCallback, &state); + if (state.exception) { + std::rethrow_exception(state.exception); + } + } + else { + response = core_->call("download_and_register_eps", *logger_, payloadPtr); + } + + if (response.HasError()) { + throw Exception(std::string("Error downloading execution providers: ") + response.error, *logger_); + } + + auto result = ParseEpDownloadResult(response.data, *logger_); + if ((result.success || !result.registered_eps.empty()) && catalog_) { + catalog_->InvalidateCache(); } + return result; } void Manager::Initialize() { diff --git a/sdk/js/script/install-winml.cjs b/sdk/js/script/install-winml.cjs index 0de13503..848353cc 100644 --- a/sdk/js/script/install-winml.cjs +++ b/sdk/js/script/install-winml.cjs @@ -21,8 +21,26 @@ const depsPath = fs.existsSync(path.resolve(__dirname, '..', 'deps_versions_winm ? path.resolve(__dirname, '..', 'deps_versions_winml.json') : path.resolve(__dirname, '..', '..', 'deps_versions_winml.json'); const deps = require(depsPath); + +function resolveFoundryLocalSdkRoot() { + try { + return path.dirname(require.resolve('foundry-local-sdk/package.json')); + } catch (err) { + const packageRoot = path.resolve(__dirname, '..'); + const packageJson = path.join(packageRoot, 'package.json'); + if (fs.existsSync(packageJson)) { + const pkg = JSON.parse(fs.readFileSync(packageJson, 'utf8')); + if (pkg.name === 'foundry-local-sdk') { + return packageRoot; + } + } + + throw err; + } +} + // Resolve foundry-local-sdk's binary directory -const sdkRoot = path.dirname(require.resolve('foundry-local-sdk/package.json')); +const sdkRoot = resolveFoundryLocalSdkRoot(); const platformKey = `${process.platform}-${process.arch}`; const binDir = path.join(sdkRoot, 'foundry-local-core', platformKey);
Commits
  • 3bf0909 3.4.2
  • 885ddcc fix CWE-1321
  • 0bdba70 added flatted-view to the benchmark
  • 2a02dce 3.4.1
  • fba4e8f Merge pull request #89 from WebReflection/python-fix
  • 5fe8648 added "when in Rome" also a test for PHP
  • 53517ad some minor improvement
  • b3e2a0c Fixing recursion issue in Python too
  • c4b46db Add SECURITY.md for security policy and reporting
  • f86d071 Create dependabot.yml for version updates
  • Additional commits viewable in compare view

DmE$&K1k;AVqxaHJ!-8g?+SJS$xomaI;;)A_TOJTE)&3i^bfx1mbEJ9>~vV? zoQLJUrK=#~1QJgiNjQ?IVJyX%73@lEbpG6xsInB7E@0IvV=xpvbe)U9;Z=&i4b=x& zLnQN@;}b*p_$o!MK6;eABI(2aPMyLO6UHtRD!)yOky~Q9`GHlh!o%&qzc{jyc^yKx zW5S|73^R;UI)ttL;CiOL$x#w4C1lyRmC_)TB)v2!cKfYa41X?s+ur@0H#j){^p`vH zcfYZ&z8YAfi{C5uZQ<+-(12Fz{J&Y>z-CAj>KS>_PKAkVkdsRzyxMxK_u+^2KPJAG zG{@t96zbof->u8lpHT0t^36A`^3^pgKp^7-2j>bLQ3dOaag(I6IQ}NmJvA5_@i>%0 zL-NK&b#Gs@vuJ14&Z3;6U2iY4cCvJyUwpn)wzTMZ_qp5#k7mYr{5+G1E0Ol1+yb)Q)(%UyZq;`3a=(!4V&??UhTpB~J;`_tG* ze`Yz~Hr$E(_NQCUKN{|QUiyvUPVu*g?wanhxbTXO-LKBQxC?uV`lsJ!=fBT96Ld|G zPfiWKKR*B21vVvJB3)EZ2rtLXO9)-!Ln*g3(x-F$Y!HZk^#xxM-e$h`mGh6}XFG|x zqK~v=7H5B?&k*L`HQmjArhT4xp>xl3&u!=QZa<@ZD{&fguj$uI7L$3R?{_iW6=Zr$ z9xI+%Za$Puh*653b2$lvdP(gRiL3}qOIy;X5 z6&Wd9rLgm!C&0vQ#?UiA6z$%{|DK#VY}OGTuks4)EEEkLq|^}H(yX?SEwKG}9hmTR zzkGi8@HiOqa92VfSL!|fx?|+Z?#`JfFRsK~`djc+rQM54#~0FM?3sB!Bk->T9fjaX zl*gq7cXg@0)^uHQzP_DD2z1Dcd!Uz>EZRIk#JB5~2hB?&>0bEq;ilUMr)HAyxMNY| zdXAaiOa=*e;c%Zewe z!Tlr&4$T{v;L3wY5(5Rig2SRLNW!=4XsZ!P5?;65^L!EsK41apwnRxrI#cBoTOW?R z9NGp3`yu#l7A*vSRsA&tI9_p1`&v6F$o3DFl6m@Ag^ja+nKT*(54(nlSR;=)~0=Y^{3$%b0k1ivUa8qTbuF z$+PXdZk;pYvp74>eRrKRvCjRoO|H}RE~5G+PyJk9&46)*Yf_4gq7nSe2%cdxjl7@< zFX_`G1fU~%=!g*PoKt87kB}^0Ge9AWB++@}`Sam-N(bK~27v3hTmUJM!NbABdiZ>m zD7T+fV(jC~3uj@zS=i|%>;1MU(qM+^2A)x7w-Fsk`2-<PBB@XO^*QnFj&)>&KUGDj_K-U=F&i|$*gQ1U~BU-}P&Ka`= zpZ2q*+R7ML7#Lx$ix=STG>5yXzZ2SSf;3Fkp!VtE(6b<9awDSo{99__C=Da`_+r6a=2ORm~K%z!7`Qx1VU^ zZ)1Koh8YB|mLi0UpHtjgjgTZ{e98x&%tn)pih^kZaHw?phW6gbVb`+$VR)m zBbHkmEk7hFI#0iAZQ?tbx?*4EueB@wqxn&5D?vc7G?$=6Ec7vliMDIzgXNbZ4a#56qXtCwtH=3V9$8ItB0?gS%k@zhPVz0N znFo#F`{-l24HXW24w*ip0#UnYAS4J&ucL0zlMN_bo`wWP$9PZ&V5QY(pj1Evpk1=8 zE#WKD(a^&LCp>GR7p(XmfKWaN9lHw0$PLsO$Oz)JmJv-RS=?zy2WBb0OCq6yR_isl z;TuO5>DaCC*s_9v*JZ47DlEt}yNvHoI&~XN?GsncwmwjwRXCAj$yob7@%ZZBw(kb0 z;jzjoYM7zLX4+nXMZh;6Ajny8JC1ISLc18%jJ~;MNiy-i2Md|48^(|12k+EA9fwxG9A^jU zuZV!!jsv}uyQ}Y<&wQrs0k~oA4_}{-ZslNJs86GR7qn=9`0oAq`F(ML?4ye&xd{yT zk1DwOWh-K3kIm>yZS*qC1QlNT{y*s%d(oHr4?eb`FEyh-vJqIAl(}2eBgnFk=Z2CO z$u9WHw$Ft-2N39_?^HGGv2b(~6Qzj?VysY6LDFrk`g8aJg@&2*RQFVHbP|H)4qc|& z9)I_?5Rz@GZ|%fZho=Bpkd=K%2eM#qWfy@l4gMyh^&r< zi|+^-D&T5@4oS8U@suqM_;@r(I|#E``B-1h5R?I7bV8-0{dn9K^)h z_g7DI15WmDPv82>z<&hUV^}~kDBGZp0E@cVIt2tLe~yi#x55^~LbQTV0;|-^RKSp& zP)#m{&7LaKVHUA>0ZSi#870K$mdu_8z}Be6*1M;K*!r~=o(564MTZn-dX-Nvckg*o zemPGDD-3zq6Q3WOLKg-?(o$FBj0qK@y{UVrP=_#YtrkYs-fx)KGrJ^}KX(homUVJIJ`&^%Aha(5rG5@N z@_I zxc0&D_dI@ms7oTnL}fex31HfqwDI&(98me;Z;5>~cEa}n3eDifd<1d8s{-Qt-dZok zbcQ|E3EC58htQ`%(*m9b622hg3u-yGU2vKEnw0Z)|NTangY5iZ)4TC=x;ISXp%59n z2IPvD;A;iJ#0tV7G$-_)m`7Bf>xH>A?{%DXZT9QvFfeef!;0I`%2m7IyEq$TEX*z| zgNn)Uohbko3vWWxLMW1g44Dr?nY%MsP~6?Ex*)7!R?PsF1k8B?B~%j-c0xKE#_+;~ zECLf#>{|gU@AFuy8<*&kg$YQlA-cuEIo4SsVd1pn~ps=DwkC^5*U%?2-H|#I} zJZ*_tJTcA2uetP(3M*eNt zZMbEDo=cv10cZE74wUvvrebzOL`>(dzmX;v9Vi)i6>x|0?eo;x7v<49ji%XglmjZ4DWEryO*^Ib0Xlh%tb|#<*@Wi_T`KaaJo8=q z&KUZ3+1opl4SoHb9tGT{@66xWX}as(l`PjO!p?8&=RdOe?nlF!{EfqjFayhjY0)l= z?|vfeTv~6s%NaIEJ8ktWZ|<=7{c~(=yKsA+m!4t6Jl#ucfl7La=@RTWQ3Rz z#ek0BBw^F98AA@RBP@*&e+eB}4xA*@=jI{_xemLYfv4~R)-5g#Cl6L>`ho+ZCPtFXGSX)`c$k6uyk(n<)z|3a{`3imW ziB%#AE^5tnbdFEs{0Byxq8UPPYUJtaLb?-aSucs&3unQ-^mXjx?B0OjUO`k;@5bq$ z{usjOWg`WG@F>+86{m;d3|9WU4R+HY;{%f*7Br*PR{Nb7%A&(nQ74>Z{IF6^bwrb- zKxCVB7Za$XIH55}-8CS2m?tERqXO`}K=w>Dw*TICx20Jb?)=C46%mT$gmFmE5d!r$ z7&;KV8Y>d=Ot_9hdnh0EIVMwUw>F2jpe-8`9i#3CPIBjHnD8)nTmqjd9Z#ko*)nM# z4)Qn0qE9K3AwKmZoF^b_xscwub4eo9J*sJ>blhh#RL;8ih zIJ4nL37wFq6JIydNZlmNIXhki3%b`gceP@Mwe3+<2WHe=X4KEuPenyaPJfaK#q=xK z8sY#ET0q$(VeZ$wo_utF*~G%iFk-Iu4=fR(`lJ8r`H ztg?OW1uZ7GS`Q1f;r4oI<`Z3GyNuDd3ML3S;l$OgS5&F&uncyoL&DZJfiDdY!gkYX zC`(1O1@~wt8IdLtZPZ>~SORt1j%wSh?7?wMA}sI#3A#vDjxqOt&`b z_}WMojvN3vX8M6 zN9&4k-#~&P>`=j-v(MK3pc%2fUkGT7!(nVBSAoF0KIbD=2>+$oFu%}{ZZ*&92xt2X zkeci7^P}zaE^l~Pe#HK4knawsd(UJWHuf!IpZSI@d79A8EIwJhX+c67pbGPtc|1D5 zCXdcOX)H`Q=0BfOG;L)-K>RGlB1?uiANdTKk0&$o{m4k?H&!&7>Ri4Fo<^I?ZlD(I zp|z0;?X>rerH-6qk_PIYbjKy#UxetR%6&a;jv?8D3w9rv3EubQ{r>x(9iRW7-M^~p z5vwYel~d4IAM@FROxjF*hJNz%*!Z+3JC;DbXt#0`u#z%bWTKp+^cUi82eCT4#6P|S z7S|Fg^8Bhx5~^i&IVe<;bdu3e*YU%n?jcM|FeKWugmJJ;@W^Cy{CEM@zR9+8wN8wk z3uW*2U(xq9uf+Ek@=Z-f@lK$C8Bd);pG|a}yX-xnWA>0`;QD%U@gx)(vHM>_aBFBj z4W>8Z^)P2r->h3NW)WgzQLFJIPz{E3X!v;{G(O3+s%v5PrXAD0rR3s2W_EfZI8t~R zDSnggkI}@jDCWI0cBrvPv`(_s5g=Z0QwfdRLt`uye?&U0#s3IlK`Nd&jfM!-UrTo2 z_^rBXJI*1h=#XRw-Y^Q$VZJ)X?OdvT^~;N@?Q_YyKfYf#w5RddmG|xYf8ntFzw!~g zh@>20tx^z8!2`?tKNb-KK?J*dh{Or^BI&0Fl7jL3>EJYKo^@ZG~jQFg_-YgnE{cYWhLpJkt$M zs(7{0ZBwC>D&zS)y`X)SF};p_QZG(V=vV-cMAIo$AjpOX zXnVq)*iI$^#Hax63)Tx$?F**iS`%8nVb&R6mh^&NESLRd$r&YCskh0zoqz(RwnVL@ z_GjFUfp?7dNy`>m-1aOSyKT{!TQ>5oMfa|cTHm+t|Fiw{Uq2XkPRUW)4N#ksd%^MQ z#3LRwG>n;l*b;+WbkRr;7>48V^PemjyEk8l!E94d27uGCYCI{8=!`QV5d4FCOhWw+ zI(+t~{lo*!=s~bWT+9pC@o#i=5&50MJybAsd>;|l@)I0g8iKI)-E8{<%?rO}(TR8M ztMd}CJ$9weVwznti)CL9w!rJD!}<;Ab^RpjunvY3shit=;MvWD-~PDtD@2^U`}v#8 z(~U#bI~3GK-N_q&IdfrNT)?^T1PN?dKMjNf!3fhW5WXe}3nI zpZ3!HKmS;5x6dVp%h{KEX(n`L#&OjY{d$0wOAR_0v7^P-_=R_9&2r~!w#CiWp|-{; zlFct~KECie>8f#OTd)1FTd2*5-AdB~fB8+!{cBa+%GHJ$=L>&-rj2{k)bQW2eOqzo zF0K+FhzgGyr6693jm;`Alxz3J)6~SKy!O+1IK&y;yx^oXo`dt@FD?#Th6PgONbbJW zJ@Z&}SEMVsiy9LNL@H+#!UHkcne#goFAJee!*9 zVgeZviwJoLb{$X0lS#v7!{B4Zi4jA1CK)?J`1Ll^c?oydzq5AV0{PRcFkV(EPZ7A$Fi61?CDJOK)_ zs~@od0upUi>ZWa^R5vIWq-N*Y~&O_sPo#$Z3BXcRrq1 zt5lx)B)DncrI_^f^9oj8U9KsrUmMjb{D3FVe)hh7|C7!USJG9GcebMccIsB=*t)KI zztg^7U4B?sSo`KwY9cuN9+*(gZpo5{o%hoUL|$cOxN|5OMSxs|GH^no3UNIlOY zwc!9RqLbZ!H_mw^Nr^xa%@%@ zLJ}NF=#1pvQnpmWoKs#2j(ZL)| zip*!DXlv6Rgr9^5Q9ACb$`;X?f~l~P8n5zxdrXD)z2hEh?1-vjBmGV5Yh*|k+ z@`yr^m@KI5k{mfd<9)ucQ52DEm?Yc#^RMe>1PWzgz`#Do)vkZp?e~86vpj5F%(~As zTcgH>Q9q;nBt2nCH|Lg@9uRTdHLqOgTJ6@cXL+1kOa8yw_djX&f4+N?5lpa$gP}zD z#b7-nggPZ%per>V{kjPkp1p+X1gR7E;-@*HAD(2hZsN+lHfOmyMu0KSU5D2R_V zN5Qj&f>n(1+@7ae7+A568I=i!lbpR^Nhu}3pIh_2{P;vy_v7JHg)t(t3{rK%cr%ZM z#oAZB=uJF#(O{wtJSrFW0=VVVlg^hl*RW)0n(h$h3cx}>LBU=hx;TSP12ZgY(~pt5wo`+OM$=fxgkf!~0sIR;wNm z6~KN6(vB740MTI(Bgca+VFJcl!Cfpo^Qc>=z-d2xGUk?vdmD#ps(C#HAy@ER>}NLd z>f%b_sqa5_FAdG;7OIZN$KG)-UDMD*1Ja%*lM?}R^vpV;Sw<#~24xOb1z!6GDk}7I zr^X`sO(1P_`zYkSO?Pv^yo^Z^wy)H2w4}N{$137gdQH% z8$iPsLM{3_pUSgy*i3R-<2zshIz0aIAT%76P`L77%U zI6JVvKr?Oafx*389Id``wMulcwYIkQu*GO!wpQ(M9avpFL*SWVXcD-nsK}L9Xyp~` zx_UHHtLY8y4gRTYl-;}bTrX)i7G2(y@P&}VrEoD;mI^!V*QM}?uo0*YWV>6g5W+1M z-docfff=XUol&-@Trn@k(s@-5c9~ z*R7k}5=_|QZ?Gas3#A3$B0-&f=%)31=B$_1F~a8B)9V>D#^cHbAgj=c<0vS0aU5oO z@b!5@AFl7DkB^&)ah)&M7b71l?4fWXd-N1)B9AL)!Hvf&XVduHJb(!G7BiU`H=d6N zjxsQkIW>AZ3I@hlW&}S9D-^X&3+ucLCVk<(pdcTEfsqqi;6hOypN@V#t%PcR2px7Tc{GiEWs*Z;e6T^vn_BfAgoeeHiaRmAbeS_ z$bI|glPj?Hom#A*u|a?3_pW_sKO6m8^AmNzdoXP?WI;o*A2xo|TM*{SWx{TYNNxl- z(tUxoMWl0m`)N>VxH~sSm~$g|;pTW)8e!0YKs_Ih^5O>~&^s7RoSM%Im)FfTm>TBD z>*l}4C5Rp|LO>27ik>F~XeJ;n4ZJY-T~!v;Ot5_CQrxU{0)qdrOlSiGU~2-3QZ@0_ zN~mNcV|Oj(dcCDhlC&;aepsNpmP?vw-vR;5u=WZ|Y_LG=h_!DkKQq#8xYvar!K0=; z`yb4`2~<<(+AqEn2t$~HKn#NjI0X_gi~@2rAfO^DlQB@0Fv%dbpjM?vOBh8aQ6U1N zG6*V04TGmnN5iZP3ZhjzIYMn~s|DMNRrCM7^wgg3-v7Gm-nGtM-zAW2!p=^}llR@v z`#is4dV_z~djBI0m05nVi9h=61rX;H=pb&fQ5;ZIqRU+pj~^A;TEs>{PpoopkzTnK z(YDynvGdqZmuXzOm0Oq$AOqg!EHqX?1jOscEBlu!=A_IhY4n+pj!g1~{^Vrx&Ou33 zL>>Z?Ni)DM@N_JIgqMpr)bZTSrsnpFh>D9_H-2s2H0qUp@K_wWP49_?O|af`Py0i4 z0UH;4V|rvxWfx%3molb&Z0RM3KdEh$V-DI@Pl za{MeVrodEp`?%$E;_o>BjXW(1aP5OGq6Ec9Z<k@QDT>~an}rm=uuN(*Lz-S0 zPA%(W!lg4Q0A`&>-Bz2$Ky?my7Yq*=iQbWE6yxc-@!0_v7z4^(PCzd`{uY+7%844a z=oR%68CQIe#%q;-e13q&g->Z4&FdO&joX&?PLjs2;AAN|LauV8D<|jY-`yW+TUu|v zsP$)j)l~rWDs7?3VkLm+ToymdJ1VH&Ov@N>a>gfM!Q0iI0c0V^r*uO<-+qNpMnFnbl(%NS3;Msu8}kR& zg0d$EcEzs5UHjzUS3$vpef&h_G1b|XNkt!gapm{0_xaVo)+7SO$F9Q5PH#Tu=`7bc z)JKat(3^4KVN}i@)9WKY=VaB4dne}POk96%dY~=pO!2ORk!G*H|J1prU`5V`$B_kk z`Q~mmf(}2Y)iE;MqIfZ2v6O zo|ug`58GdM>It>C=Y7n>@Ap-Fc12>8?1BlV?IP&EYM4ryd`)Oce6ZdNJcGh<*khgG;MIEczb! zy3F>D6Bm$$#+O|vSDa7WAFFUsXWKiL7g!Mh355OL!qr_T5XA8?CdN15bbOuJH!^fX zC0cy6^FabfA=KU}nO)kN2kin42ei7`2M6a4Vt(eSKQ?vTFSw}wNI9p%Ro0fdeQmjF zw`0!TYvrn=OLx1^DZeIb6RS9L3?}vm&Ed?+xwmH`nR6{OE$8mqvi<#4D_n0Kx$)+r z>yTH|Y!A<;cQ$QSoi03lfj6hZt4g$De62(8weA&bP4*Y4k2Ppc z#p84Kt9q}EWTx)Fvwr2Avi)za{m|T;DmffGwypBVt+8Wi`>*xv+b^3Mmzfrky7pc~ zYLwUTu0$}Q>j9J5dG>VCp28_9mWa)rxFT#a?z zi%199!-(v!`q%xBTE|zcaQVG%belH6O?3W%~kQV$gP9Z zi+zf$x!Nj$^au|r*krCKCp$)tt}z^Cu|#W8$^#T;I+mAgtP*uoG~XzxWhm<~Bn&BC zsVDGOH!^sLB?6PK3^L=fzLRhRcj-R4x`i9KFf+UV0Sh)yFS@ob?4O%)tNqn9rc!as zU_8A<&!+}H28wE+gs5<+GNl8}M0PSJlzt4wQP612T=QO0cPNRePnVnfL3X@ z6-d3N=?&c>h80&vGKcN4T2qygp&kAahT|Xkvq;Yv@qN!kiSH5k zMOjg0#(4E=RrTuXAeOjww*tBxQ2JQDM|cH`rw{;R z9chuyuY}{H0ctE?HLiUf*H?|@k+nMCi1k4S3{DiuSBC->dK_swCll7Buqk0=y;4}Q z)zxZ*F7SKNJj9@dJMM2ab#%lJbh}c~}l3qb{pvbC(%gCQR?nly2(+x;3tz%ytOMSaW;xeI9OZ zjI|p~M9?B=BoAeCnS;x;EMYza7ne|G)o?c8Kl?epqb43=A4RV5Tp68UfqecYh0>la z$N&;G05T1N3|+2|;&9X-@qMjh_V4_CNG*hF8TGbvf*M#*R}rz=bNCD-qbj}u&r-Ay z%WS>@U*EB#@b7h7+`rc68+c{q_Z*z$KtVJM6en=1T;GZ3sYVmPdIO&M3i0EHoGanC z_+|O6FW%eSMUtv}kQJQ=HBf186^J#7nUnf0L`EX}53!cZWTnm#)=Mu#Lx#KfTWCL8bZ}areV7g{G0#R@0;IIib>bF4;CCHy zR?LVBA5`$2%9DG&Yrzf-aY+o6yNGQk#0he6yekI7N9_qRT+E&*VB|!#=o$CBnejC) zbwt(aY4mnLlO;`p{mO7sHXqkBhQ-yp=pmfGbD}%g(^lwi;juYqKNZ=#o@Bl@yMFBU zM`bX-7Jq<=awMmIgICj?fscI7=AKQATqIU}4!>(yXNy5C1Y2!=QpG_~5FsdgaNI3f z4%Jp3`87d)2cy?VN#gtCJWFk}pnv1S5o8xNTu^Y==N7=EfabF55`v3s)}c>y2x z;{37p8)IiLi&}a=H~Xf3=Q17U=V_-t00r)YGh-cf7W4a49zkE1(Bmw`(@_TjLk^6| zU7VjsVhTSEX&U|8kR&bYp7=#(@4Tm{^XoH~9M3;~`k0+te!g4&_b$f9_zeihia-z!r8i)|^3zkU(dv@~K-VM`Fdf6q9lkUD8KQ}Vh0qV-4mWsZWT(rYnO zl;Li|42ff5>nK|%Bj-V|#4)j;fOsUE?%ti)FIJ*VEUdOV#{o)+F_lz%TOR~R-z--U z!-Rj*2}^2!6dv!v|eS4E-+BCqE42b&WJlkI=0aI^GtqC;(Nn6 zT*akrPJ7>9VC>R3(Mf5lL0!U7APmc8+E4*Q0*aMsMY$X00OvzZIj7GFG$?+ua$di@ z=+TXN?K$dvR&CMis!!~o{iMz{-f^F6y=cc-Gr!Os&CPqo^6EXymOk*4eh#dz!mNF` zS`4*8kR+?{PeD)vK+QR_1B3Q%R6$yz^bY!vVaa{Gc4I$bO@LdSx6iE&{k5fltv~eN z8+vQQP^9YChTh(nVN68xcwInB{;9|>F0M{GeA~pPKCCDl@DdDRh~}x1zAyVTzI?jw zuUZ1(IXNrUJ2lgLwnKhJ*c7o{)ze?&77e}28FiRix!YGXSKu2YZxsK{G-!=TOR&UB zZZxW`4o8&urd0T(tgemu62rG(D{`xU!m*&rmtb#v>YR zV%M2PYa9|d9rxhOABuK)*-6q|SzN+XC?v(WN`u&|cd#&brtK9ri_QF1>XaKW+t;fIO(RLrNa{ zY!sF6&*0C8?PHMMqFTWy3cuwGn*_wPAWSMG5PP=>Jy)47ie`IctDTC>h*49 z$m{AQT})wC?Aqh3wI|OuJN)w1z11)&IOi}v41{q)N@ z|M&>vXWQJ?_ufnYa(=98QZj$CzVUu)ZsaqVU;1xlPTULh%P=f2^{;a*FAmt~ZI~zD zH7mHh(05~~sW`85lTVYOq%+BSdU>ATi1dUw2yF+&2}xmGo5mDY6m7B&2%`XVbHgZz zGz7pPAg{?csdW9iH6j50XS3ENH9}$#Ld>-}(G7*%=;NdC`#e5cSSViX=0?00f~Of7 zHaVJ3wH-4&G|jMS(;*|%p&`?0hIgL3>S2r=x=`DLh^H^zQYdyr<=`dq8>cIQ$(*e1D#ot4I8gKK(Pk{@vo} zG1L@QuLPQJY}9Qu)#!9X_>QVnS%%7?8Cvi;g0&HR2=i?649o=Utz5+I+>E?-u5zeO zBLx1n^uQ|7Upr8JO8z~ykp4W*Q3QBf>)?)k+JH+NlMXz+x) zrQ_B4tYcFJ=Y2&EQJej)Iz+w7F3Egi&A93VsB^&Wu_kqeRn^P%iIb1e68P}ZCBU4M z-n`s=PZ+K36HQ5@wWM)WW;B%UkhvR-zpHU!s@1?3 z5BKt|D(Tu0n&d$fQAqt2nOpD(wQBs~5EEHq6nyW%O&YbFMsjEi5pD>^ItvokZlQJh zac+y6Th0JtRK6WG8v^?$KzVG+)Gh&x{je)a^)6!_L9=@@Xazbez_9@BzB*D?=C%y& zj~1Z#r$RjdEelzNGpq2K%tx8mA3X!!WZg5IIfb1YLxY#pE8#_LjPj=+=a^g|sc=20iZPL9&ExC^J0l62IHD)<0pd+03}?_}x=0UHq~? z75gUq%r-2m<@t(H-1iq1DhhkM`To40;+Q5qq=Tq~E?(89`pkx~6XA>Of)qfdN|s6{ zJ+90iGNQdsr9ukRaP$w?t283A(z%;wI5Ih1LWmUc{lz^K`Sa<7KllqmiZaQA1dEjd zQuCPmXDVj&c9y}9BQW4m6I4qOsj@m;xh&}&GILp61Zfu$#8V=i+9C8ZG=t|miHHSG z_OO^M3hFgc@yYi%cl8@+y3|NcblZt-N5g8uL%CtSCcI)Gi+Q8!-w}yv3b@=uRJ;&f z#}rz|7710(Vn`VP=yIq95+GyCGEi1tAQ8$apcHttQFLJqs*T?I2Wi!DHG=oGO@|@5 z&Tg&}-QhA!cObOnj3U-~slIVS!+;^jJV!Mw|+z+FamGN{fWBo||oz5?4@J^#T zm-VYt$|dSn%u;!<7}V%`Gp42b{^duDSQ!GVKrKeV#8fMMsAeW;osDF{?`u)DDP4oo zmfD;sN;O$wp%AIVXP~QFGyx1fV&1d>7iH^Ej2^;Tr z*pI6_DHB^M-HJjw##FK?yG1sX2S=)vqH*HK;1JNxHZ{|f0m{bBbfFMUdLLgOb$OnY zULRI3#ea4cD!_aytj|xYB*(G(m(x$EUADI@Vi)OIMO-U9w!!;q)~(-EdDJc{DgRKY z7G)=V;=`IvsjU`RnYDm|q4Y4ZG!XI0u%KiK{3OvwxN zaxSArbBZA2i%sbZ%^iJo6Q_(!?bqF5_rzkm#H=<(So2dh$HmaRUm9F1$>=8W9h=u+ z?mpQ6SA4x}X1i*8?_!v(@IA3SJ6w%E%eZb>Ctqj%cU~@o*AA{_z1Y%X=XUpL*bf#Q zef^m3qgS&%_Dm_BaiZIw+H){^_LCVIbG&t0qjvEX-a!Jt+h)R)HEFxd!k?Vdq%XPh z>FQXnu1D*t^)uHv{cbDaT4W9X^Hfuhp?+6X=sz9yoi__Kv>JFMypwh0U0iU*Ub@c@ zx9n(h^SDdBCBdst$mB=q6{E9{D=AOz#njWElxAhuBrn`4Ef*@4$>*;H-jTAny0%t^ zMXp9~=Rf%AkN#0(qcSh$T3tW)QeQUZqsH|=dMVA!lyB~ugxFpFDP9tap+1wQ zjHW5wUtG1~Se#!!b6We8tVK8F0#Baf+qt)H?25?`VhL|@oOc&(S#PrAVzm-V-*vB4N%MD-E-9fXU)oF!;oQ#k`VR3O<#eHs?>b7lYk9#!OPj!< zr9~!!qgEPQu2*mLKUll2(M2@VLh9NOvEKOdgN%m`Vl9_?hkXO7wqlO^)fi3*<(Dz5 znZ);%`zMB6){p;+uacr>OKSE`#C~`>Q+j3T(of8UySbe8f|*O3j(gH+*(E7$RB+09 zR-$0aP;_pXlUjQlzGm$etGk>R{BmkPokl;pD_KlQjDoKHJN*t2uzDO z{4Q5rq55Stdx}m~`uj%vZnByUm>2=_<_ml_DMFOC660$bI#kZC5aMb(L5lnp;I7Nc?wfu% zaX<3V_=yV_OcSFnfQK#9@shWa%hzl5ZYiFBt^aoL>$SQ~xB7H+TU!<3qu+}+*5^im z*L_$VvEdUF)H6f?JMj6g1p%K|cGZ)zhT^j~z=C|%r?ca8KJSCkro^a^D!UrKyZ2;k z%%Q`J{W3-}y1h^gfL!R)uZzbzYCl`=MGzsRvnN34 zRAe9wa}bI{#6Bl~Km|dU^*92IcJ56&^?VAK9&;?;(P9A6c89!4CW-4yOVkNOK>!us zN%0RYc1BzS$+K2BbU7|DcRlawBnTBGx;QxnW5by}Rgl~zNWAYd*y$uNbG=jq)dhd% zy6AGZ*aSDRG05peERW=GZ6he1Zi#9eZ9w}zCJFs-z8-tVGN!-F(e%2wnE2hCq($)s z`AKN_q3`DpAIacc7Z^^hcLNdLv3`bG2;a6UsiLT;YNKuUqr-OSCYyA#jRyh~Zk6Ug z4orrdl#bf^h6WLeoBl^#M~_arMy+pvGZGPNIyz}qm3*r9A6I(6gw=+({n(jVM*}cz&JxN%b)EqpY!Yf>By%EgOVD~Zl=lNZr1#>G0% zTD;4>KDs>9BtKe9ggYgnET1M2Ve;Rfgx@0H@x&c7-n&amkPG^P$90~QSO5qXIJdNTmbir*?fvI?3HcQ?%sNj% z9LCyUWe%$EpTFIDvnLwW48TUOAnNK+sk3+p(SFG_zp(ayijr&Aws0XIRdaLo+LTBr_cX@h;(AF6GNLr+cs2OaN@C z4MRJ9TnmL_VA*_>i_m3g5S%FFhB8qs4u#8C&X{shAcxXMIDw0xE8(9ackY88e-|yJ-|?(_$#fChJTT7^miBduiFR!u?AV zO`s#^lUbU~<#KIJAa{}86(si)%bDjIA01!i7u1-0#zg2v<78x8pUiydyT&i`As7ng zhJk+P^6{Jc8V8P_G@vsK+V8gJnNo9=%Dns=^EVrrxOQlq&mW5$lqB8KncCE%EI=O*CM%Jtr0blS@u72Q_uE01i z(KN@fG4-9>4t#rO)APA^evePCpGeM`5K0-#D_R^Ln%#-od2mKta>4nQzsIeue>ppI^@~)-t;lxwStsD!|3aW}mIs&E@01A)$%;eTQ6(b_#Chw$7b9 z*Sg?seNFnAGp*KLwc`Qf0l&GV9UImzt_<9AfBEc*+DexqXWzlVEj^1W(-YUJT2)^} z%wPF7U3I5&^=pgVGuC}i(`Wi5@7mqS`R7FJ@N0|9mxu3sksg`w^rIi{pV=4p($Qd% zcmC4dHm}Dla%)cU2JE#qnct}ldNTjz-wYP5dOtJ&4{z*$&#yBx98Fdnt7Dy0%;;aT zCorY03q~79G)j0jQs1u7YlWZ?NYj7$;$Feidk;p!vP>d$vr1RK_04z?oMRe1m+$S* zVoKwt2dMV+V*#p#%}W#y{+6UN{&O++DY^UhUN4 z#V;MNsH&@rx?|Egb%l-2e-j*^Xj{byEME7fe^()w-`~@xHfkKpa!B#_7S<|*_vDmS zl)nhi$&u09Yt|hD;Vt3uNR}Y$rHH4aWvs9<;-1V*gF^mx zIk#sh)0Eks-=4`B;Bc+N+J^?FB@f~7NTKTd*=!RX$A@EW`--x4)R~-D5mlTJ#~gdJ z!Qk=xRTn?umF>ZC!?+8do@VinS<`!6@{gZ<+9=jx=F~Rj7?g`A_UtKt5mF?hXBrHJ zDPoUl$xWE3ya=l6baecJsv~%1IVXCHD)tRk7))PKar?VN5%*5bGvDM?bhuO@Y^<_r zE;@csGO6FL@>ns|h8lSGM|`EIUoCvYH8x@L87JDSY$v_eCYN(&Wj*njd-kzc!u1x17Hf*uZvW*$9{jZFYzA}3le9(nms)Kw+{GF z3691@I0+Oj(-*rojep?m7w672-~@&5$*FXI&EpPwKg=bI`39rtdie*gA4OkF>{et&wr!Jfv(-{>6}#% zDE`>paZq9J^t+_*$9>I2rD5#1vIBjL=d6N$D=TO_bJ01RUH*I5RZZA@dMo2-U~Y*Mn>Om{q^d}3hUNnqzba3U-&_;}DevPe zx%8o|?Lgl*agS6J;DfxNmf_GQOki-K16VjFScV_=1_~+PSX~-hm)2LK8ns$rUQJIJ z(vs4nXcHg@v!=oXeSbgQ3P$U&!Fx!PGQKbAdZ@<2LK^md=|CFo{C{Vf&Y~A-Sq&{P z5Vb4(%}&lp|7T-!`lbiJtlo54Fa6!}+38PiMy4O#^Mv!nw$sdn6BeV!%-0po&4QP> z!TzQ1mX0OPc>T_MFxFJlyHVcQ@VPoCq4#tyU$){z?4HMORH|!Fu4s6qY%$eHR{@te z$q(H)$3feZdM5cJzDkPf%)i<=!tiyiKAu~k>0Pp|t@p(DDI<-wb~27!U+nF3Dr@J8 z1-h#~7I9cZRcFIENf_mJJ1pwFHAJlHJ~gf0D!ax;kCddw6YVEGR;;mj;$6JDO{H<3 z72U|`G+}wKIp=Ax$>P|o6;{8*Y_iy8-Mgv!mFu-t2@7BSZwavEAFTu5MljNzV{wCFf=dYX}+TNji;D;BR_kZ$B z?}pz_oOz?8F&ug9{HC`*G`OIg$!oELnd)`EC!Rn;Oicr^;V@ihP z?5bt&Zhvv*tVn)40u|yKrR!!I^;DO$!g#s5TD42CfC@V$_JnR)#Kl#=1m2D~DI$h; zjiSce3%>wv`5U;E=<9}U-L&Rd=Xt7b|A%h-0$Gj`e!cuT-&@06d-<_}nftuQfBpD* zf#(kRc=(36j>S8yYC0d!liL*;|xUJo5 z&Y32X_?~pQspk~+(aO_*##ctXr%()fYJ8B93CE%!r?Iw8%@2G=>V;Q?NAJDV`Sr~A z2fk=*(1!|hbA8f}p>IR%(vtu|Aj;b5Z(6|u1jvmTUp$=((eKdGa5rk-aGXiI4yVaG zIErDoYDL($L*Q6_ge6sQFqEzQOCJw~`)4w_HzDku!^mEsdAuud&z7P3VU3K2grD;r zms>w`9JbBKHIv@n`{w3yvlv!%L+H}|O9p2Q`|laejBj}Tp;P5|rM;427bES`Yey4a z4%H^k4%Pl^{rew@uZ ztQE(I$%Z7MI~QPiQV{c6H3RnE$8R=G`kOm8RmVubc`)yi&5M}okGA{0GC!m-U+X8` zL1(eEVd_ze-Nj3BC|pbRNbE8e=Xhx4p{m%qooJ(gm~ zi355yrhNX3N9Z0EGNjNlY%mKT;T^3!>+bil8IfE0%Rqw%QE`zwOq-xPzY$&quZ+)u z9mg(Jgu9^+&c|Wkq$aN=zEP~j-{egP=0f8+>@Gx{kJ!7jE+ngD z2j9}6;}r%;(Yqix!0>!8%!q)M1RwSE?VXKKV8ONBswEbTlW7uWeONv|gzv}ie{6mx z2O5usXBpo~rq(xQ7WBX%{WV#JA}&{t1IJ|Gy1g zuoU;k?2=8BX>y+BruGbVp;i}fvC~ovn}zVvw61tk_B)^7>8L)|6wpPCBNf)+q}GVa z;U3=8-@SfEX;D6gf+DJi`s41;$GyfNlcAD9(zreYmAV43%3x3sqC70J79R$~4DmhT za99oXjqtEP$1lo{T$ z($UXxwnsK(_CfozS}i+*8kRluK%qT`+hr@n2&gT+Pe*o$J{(0aAZmw@eFcbBrFzMP zXTuQ)6-w=t8-xY4saij;pw~mA-$vo{P84Bxq8Fg9BGv1nY_18{I;1TrW%4G#r7p?M z*mgxaG)2v>fObq~kjBVg0#2;5tB2_O?RjS-e?9X)zWbH z(%$eZP{^>`tTnZ_T?TK!@}Kd>6E^9U-T~9bxGjZWJGG!@FBu{Z4qe8b#^18KNN7Wi zNqBRJ*@d!xvj1CXH%97L;qUS7Zxrbhs6L~f9O;MBFFqac?Le^_wdT}F|J3en@k8Bd zqp9TcX4(VAC15Dh-(~nUz;+76Z55DnH#1I+4X2|LnGW0rDwfdg=nI4u2Te}c2hh2X zoGend6B?)$s+KC$E}^_>B+-kKpO1nYSox{wn&J$xBa&5ZKP_%kY%za<=W4s|J6+TEx7h}ptgd?Ms*F9n_#`r9}WgNNUz>YTosQhvZnbiNEp6-p&c}KIMM6jS{@X_)kH^DMnYgpLOX} znq=jKbQ0>ISsF#f2s9Ci-PE?Z7;Gg5y*u85R!>iX= zV5R%8gHFkTlK^%?K@*Ej=JEz7NM{7B#pLW#Xre$607}!$7(6DEn_1zyDi<{J-wRXZ@^2?>NKP zx3`sCNQU>|9>9YE5dOAgTUjw(_7XHdP;YiCL?862srt8rH;Bez>+6@UCznSLN3<$h z!TjzZ8khIs^~E(NtZa-nwDU-O!)G2s@HQBA{u$pMeW2H`->`bTGty)dXv*tk46j1 z)2Da!I&6Qp(4f;=Yhf(+Je09IL_0+35WvxkFl!Y>%SCFdHBOoMoK)p}uDejxq0dQ) zld>P&b)h-vQU+Ag&*EHY<`_~*uaX|zZddRY;6V&=($6BE^+`2RzT)sW_{iyv-^9<}z+dLQ5a==t|QaxjzGELN0@Ulb#1R+I*7oxXLjl_cXpBcu+) z)AkYq^a~=Nb=y?IZ^ zi+4VI5aiPAG2`?-PpsZrr*uV&X6;z6erL+LvRqr;kXga5P(7k7^whO2z4MTNZ zYmF2RwMYFHC(2HPvz!t?O41cXpVKK{zQ{zw)@V&YhY5AQ6XUpSk+JrgfX>siGm~Zo zyQp>tMHe~>%W}C!-gZ+IQ{5~7AD^H9Z4?ti2>j6>l72c`(Cqtzc^L+s9XSXm!e)BM&B~d1uO^5ZZbe#&Z596pE*cg?(y$YDIS+DW|ifAYH>dv+=f}I`t^=YV{|pP5gu_7~4aH{Fl2I_fNm| z-O3YR6rurC8s~d!iNc67s2Mj{MhT<}BLf{8pK4d5pt;STc}kv<)-F~6q3a32tk&fS2Yjw_Lw(ICrZ_Mjt zg?R_>=HxWnMevuYuW9{&&u(r@Xcq4czWl)$)NJ0LfBz3hET6H$Y_aQ7i>sY6VL7^X z$KCD*Jvu7_p|2o2aK~|9tmQ|Zbt}l_Cq~*W~cTzck)4aIW>_yTh%BI%oP{zd~E8I z6Bk&ss5tELR+3-!IEggjFB$%xzrcsqFD~TT#3?7BS5;ODoyv75|ocsuApJ^b*Jh4J+}pHcGCh zK1HFc8KC5;V{IDiZ99cwawW%5-Zc5e@&RN^Uj9YcxY0&y&Bo_kq*< z`2Lq62eZJjp&47wEZ@G@WsmMnjZi1wKAqtu6}r!!$)6JO`Rl^xum!me3Hn0!rwR)| zZY(|q0v}K;NapMFr>-kiS znUT~pZom?dNCJ#)%`Lst<>T)JE@(@r5m3;|rTxa1Vl`dKZ6KJRfd_*PSo$p5Ay$US?!m zRsr4O5ixoPluCy_AvFjKyWwP?!bszp;`4oA0n z`(zrw$YJ)9GTJX5u)ss%?B(iT@Dp0PI{2*n%T<1>{?fntFY*1Ke^FykpKDuQU@1A@ zzc8b`_a1e>;azET_iTwQy8S_P$5cmpfvNPIaX6&iGUAe{K9K+7>V+*1ZNMMJ@YnRQ z=`uBYBr3}o!~>Jxh0?LIP?$5w)e^%iV~wImSwD!{u@)|U2ZSyB$lklO0~MRP8kS3) z#%0R8O*~m#xyb>KxPm~21Wp_fsN!RlLgUDEGGvuMu#fbu*x;bis+VB0>Fk?RDXvPt z-p}a{j5W-rMCs=s0MZ#h20x#@7nM>8F(r39Rk@oV&++bGz}(aw#^yB4J@vrIhl3}%d|53{#d=l zL@#DD@da2GsJ`Y!7yQleg0Ut;^%}__e0$rX(qS!kSO=U&oj`_IFD+_|j{O`|CyPFw zEMTC-v3D{DBpye1$Ij+|F06K798(iVNbI^A_zvo8K40SwXPDd0VXW%gW+z3zo<3oA z>{T`c%?s$N7(I7VeD0fSQYCLcjI@pzE@1;-2?X4G6zj~ zg;xhB7G=aZtbx$hNban9odEjsiauDMB3BB(dFc7e;?6wwoO~5$w*^k+^~Ex@fU}*ocho2n%Z~ z(=RWZcs?iOYk1UHV}`oNj^wd`p-IM z&oxa|AVPQfQ~7L&4Kn-K6)TH$%x7RAg(Aido>3$#lSQ57_}}A(B`{_|;VP5BwD45slxZU5y@@iS4I=~Z0;>BtH%c}P`l zt=(%L`js|~MK-lXcJJf+*N_!hRSg9jx$G@0R(JH^bdF}WaXPQI7-<(6WOL(;qXU7x zv`v(r%$RC#8x^w_FHfD%jLzGf7o8`vE^v&FRyOdvqD`)gjZeoIFRF|=98>PozIlal z`(lvN0~0gLvR&Fhp@+Vlu1yRR07Qu`xadBdu&%^@tCf?c!R_T?P%w(}5Qp5%&@qvz zIpA|o6hlf|FT>nV#8Q;EPeSUZ_R#{ZB3&LXBZeEOD0B-jr8ZEaN_iSIMFpUC{|$^UbH2D6(98M?9@Vg>>dzoADC z=}KXC0hJ%N#^Zk+Az<=3#oExWQ%rr+zF3gK942J@dTB0eE`XiaFh?l+U`iW8-YT9DanxejmdpQN+~w#ht0+7rWZ;BNdr&p%V`` zmC$`?Kl#POn9$ZFqWNf2mxS1`H&&d~5<;o&c4b{y*p-LMOFoRozk^Z(htbtZ@B4#} zcb|Rk(cXOR5Ki;jyV$cW^ktgsJ^5yzX#ar6czs8G;x~ZHcnGi1k4CW1<7~7La2&8nUGL<~d zH*E~+oz%}%qgoo{%`gH}xy&fIlx}5oOLA(u(8|?VjeDbdn*YLEalW^COQtt}9#b<^ zT-Xq+$$8oSP=$Qexlm9j6x44Y%ABT!9p&Ib8 z`)@-ZTygxVaQh?WbH|_h)GqbR=NmX)FWmlkt5=V{S6zWV1P7q}b)`?gKp)QD+aFsz z>$x~@V}aTLRNbP{@9Bc zD)^(S{|-scyZ9;~#V7NgpDw|P85YhJJxF%2%lg!B@B62|i~pEZ|0}+oP8v{$CKI-* z_`~s4;%9P|^B!Q-f9RVAA@)5azA6t9{OLA_ulRHPqG5y;?~VC=nrJhic|~%#r`Mkm z$h+A6Op7~?umH~C*Re_lIKBUIfo8>9wgJ!U#V>hGx8B<0JX|hl-IY1q8nKI2ySelsd=?zS+t_+UcTfBeGOqA|x z%6Q*ez(fQz^jR4i*6C$=WJRHJ?_xeK>xP|3AzAR4l)4WUdxW(4w5=qp&_$p{mO14F zo(Scx?gP0KJkjQ3oSJZ-wvcn(VKqHiBw1|iievVsNw7>3h4qp(3Yq~SZK1)7Rz4$ z;+a=xUOU&6w$EF#-)djm?B(4{HI~j^O5*#wd$$f-vhA<@YF1{zRj0YUOV7{A_jSFC zEfLS?-xrw4yy~=sH{;FKIeqG;Gfhgwmzq*155! z85qekAD*`Izt$%~R`C#qI0hCumKxw&@N7QcFjvp8LC-LE8m(JXGY$(ibDnXbnQ=cu zyKu2~Z)Z%1IWNSqrp}ybwb^p+!lp^#hW2|25hm1EIilAd!Xw&@W+B)Pg&G}czK1}k z5duV=3t_^9?$TaxUkcRxVu;ud`uE~{5f#w_IT%lnehb7^QDA~% z-bjuOlz8rTlbCm{q9RGV7*G}FtfRu3IcJ2WK0aBsQNdwoH%^F?Q@Y~sP~z+&bR zR!iTIa@3hRFAzmOtR?Xsn|Hy$_}}>{|G1>M_krJsX&=Pp=6~t-rE2`cE4*aI5PmIc zD9~e@^S6rU_g)`sDSWJg;rqhg>na|`x(Z$B>&|;HiCiW_+T`WxdsW*izJ;Yb8Ry@K z7m|V)M(?3_Vl3b=Cylk_6RwMDtYtXLP53!D*{Fi1criB6R)hF}I_k1O2$w-1k*FVq zFl42zaCU}cnGW)v7dh?qHjQ@2@1MBp188Zl+cCR&??~3AC9ezE1Zy*_nfC@mvEzMy zh5T6%Wb(J2dOU`%-D2nFqwPmyX6!8r+2-8zRr{{ZxqI`k_IyrtOq_nGh}f3aerG_6IvYB zTk2H5!UNpyUVkFiY`UZ?Zkb^r8QLIwb46i~F=BNLuX}CYbNBIYPWAd8iweVj?_My1 zngoxd040Z^;p!l%J=%QVZDn8#=>6k+X>Yig3}G)aIYLkd8UlFXC(77 zUZoEllS=JGL2|qVy!|%82OTJZfNwED{+kT*Az?XFvUN{RcB8+aW|UVnPSoiyY}&}=vDfo9+%Qu?@#;s-f{vh^2W058C>dOrS)3~`ggIQu<9 z$pSTpBa2CVN8Fg2# z6fxN9d>wKQ<>4Nzw=>&Ejx8_M0k8uOu))N!JC;dUOz4TAzb|R+I*LWz*ziPR{vBcM zSs+7CUu-iSY_~UmmJ?4mxA9TJDtDpKe z*IBstpqpCw_gdxSEnZ*4Xx{Phz;KJj)1C|S7NI4peALl`U5b&Gbqw595!}-wXm$6J z!^BLs6$T$Qb$fv28Pi<0XWqu5yN@jw4h+{S$39#Clt@9Li;Ib`3qT9g9=5*o4ZejK zlinmr5ciRLNEU>e(QCr`0~3#M@W}7ior|4Ge4n|!)-(?JxAX4}g3Un?5)Z(Re%eL=8)wjz(Nh&%h9_>j-dpECRdOxeQ++UntkuSybDkW z8~|Q2UourFi6H1DTujtt&!{a-wRIc#5a0+IMmYqA&pKstSaS`0ygvpGitvjcj{cwG zt2-;}XvYcGHM+2)`DJ&|Q{}@Fe0S88bX2>`=ks+NG<9>N32oKM3R_NtURi#eG(m3N zqm?*Z84F%*@wRsxdyo-}p2;EFyOAW@yW-N8nJ&{Oqq6&1YShr4t!Zmzng>5y?+(nF z=vfb1^)aB*jv|EU*pEJiJ{WG?Qzu!rNW#$;9K(mli;VBDExmF4gH1$K%u?jlV}L9c zDfV?JoRuLzM&)o@MGr$XD98sLCg5opqsO>(PGbRW@vOHzD?8bIUW!Gt2Tt){;n)^y<*r3`ccj#2|A@R;qYLyj4PN*s^b36^g|Tf}!Q@NGbOWYaKe zA;F1WfbF4%LLAjr@uulsKGc61ChNmGKEpbTLzH5eNQrd<`8v$@j%oA`jkemRB^ayi zg9^F5pp`H1^5i4eqK#^5`dfkm3`=ToJuNLJH3*3S_PALn&z=ap4%)<@AgQ;=V6{BwOYIofYv2CzUw^k$Y9CzYI;D2!P4L>J4k2!>Ls`H*}|w`2%xBu?nrimj2*nbQ7#~I$53g>B7x#u;;cOjz|Y|uM}NWl!e}p-@odU+Pgb@zWmCJ z%Z!cT4ViF9=2Tz$0JXtKcNUOvna?Cm2?2Z5+7HF1V> z_;XN%+FfY3Ik4&!gtb#M&34W=N|IhD;nR~#y1$`k|{MrGRz`DH>O~EQG{$>1N4XR zMG{yw3YV9Y!98qIU}=KxExES}u@l=Uadksg$9 zsV7lzg$6Jmn!vjqSoj=whHkQmJ|Em701tN4AjxZEL=p|EmlhDf+^D}|59=H+ z3y~2Z7K1*S*dm8iM9~0nNPw9x@AdC^*1L65@{d3~s*_8;zr>rYUU9xY6B_J9#SvTf zC9)S6SB%=M35lw8u_>qO&Rs!evu^%Y!1fzYgD_SU zrbhUM@QO@p^i-l|c>JsS^FK!VKdxCH-A?7}eZ<)mY+7XRL}fUc1zQ@{O)0lG)bEIs z=TFfeoFcE42s#k^4&f_jKkKd$r+uyH!@uV5v?LcDSh7-#xjX|gJ&9+4IBOcg z1<<7mTGB80Z}5siAs6CFLYgYHMQAppWYa_-c1g20+Eef;sA9TwL_AHb;ZZkEl741| zEJlN#aH;-*Ne2w=rSVqi2}hT6{9KD2wo`Y^500Bp>6|K!9~V9Ka_p(L*B?C3jwqWb z_*1Y$Q|FrOsP-K@^k?K-30z;D>!u&MA$p$EXQBMeJN8sZ_-7v;Dq{(D{1gd8*th$P zDg~j}H4lqpPkV>Q!E$5$znH&*((_)-^Qdp|^@3eDrRm8A^{5?-^nxeiI?iPs6E~%b zGiA;FvgTfLuF4caf~~_QONi}wa1t!jhm=D=uS0O*kqQzGo!5$A__XGBwsyZDzUt^0 zL*TgB=SBZqbdSCLTCWwho`)4Lkz!UzVBwO!4_Hib7!k9 zc_Yx{&yywP9XKL-nzS@g$Y$%ss1Sh}o9Psr>J-lbGU`a){*@z%eF?Xer%oP8Y)w4% z?QzK5JexQrC^t3h+O`9AD?6cilLyTkBk18;x7U4rQ+{UwFyrmDMWHWv%7!I-Dm3M%guPH;>8T^* zJLutR!leFF{ET31dFS^YcZXWj`f}9yqj#5npK-jgHRw}PIj+h~ZbTx4d^`6%@+0g4 z%FI3eRVbK~yDnkG8JAK35L*z&9aZPLYPKvR~NE$NCx7K{{F zax**e3iSF9Lh*x8uXg>*@ui*8ZF{ul+zMm;>8`8SEY*eVg^Od-!RAExLY$b@aqC*S zUAtX#Q^Kupvv`R~0V5p|=%f9fnr-rZP3wV4J}n;&8!B4tc?f|JAXz}VI*DezDsN*4 zTOgWiNCL9WUDWxWG0MEmE#Aa{bG{4o?OkMHkg7(kMcQ%cW!Z`N&Bpd5W2eam2dQHCL+xi@a`lk+`2=j+LOW^=IWzwXhhY%nkqINx*Q~` zYq?BdGAgEF zlMSP`DCdc$HcyMSEb45Y2K8R1cw1&s-e8<1=R8wf8o4KWnv5>l(B>=v$_Z_Twv)bL z#i0X-x%KlzxlG{U+#iLuamv*)`>FFzGMBem|2j<)Mhkte*p)wZUcorwneR^Ww4Uk! z(MiHXq=LBv<#+R;i6Lz zS&5E9_V#)Av&HhpFgy{ssNAVr@!}yvjmF!3NGm~HhCB3mc=*qFpOI33vAo$A6~%em z=WX}L=beVnSYf~)KRAyr57VtAZ!wQIz4MCeJY14)!oxGvGY`=%rL3T=@I=QEmtqPW z{`Am($W=>oV0t@38j4aw6`v+XXH`5oJSVm@JEG5AaOx(yb80w4YJi3eDTzg30tg*LuT{ZWk={(GGg@faP@d~1V!*TRFKr27Mfd`2^TV7_9 z=v7u`<$_;W34ALIx+r<#{0Kv_G#@Wz?<$6237=LjXHyNE!>bfmW#LuKdW&9}F>_xx zli8dYdbP*OrrBR>QzK8N>%!H^4_ay>M|90DipF>Qhqeu)9$3e%bI9kSPLR*7L#@Cx z(>R*gpmXHeh0uZ$;XdSyoEnrI%2?MHcy*(Xrp3pOdD-i+fxmh`z61#sj%-s=Q`JJU z(;-zp_mIyR*|Z@smG6LzX@LO!y1hcg&!ndZF{*$D(@gRVaw1JXwEOhe7!C3V@#sT7 zYDqbQYE&vni4yr5`hO^-Mh-I<^Qx4#1@LW`6fBsnkh2BzrS|rX58}H*7|K)XT3UT# z)*Zh~Jm8T+-YvhdS%?NPHa3;kH6QF(r8^{IS>U5CWuv+wTYzO5oBaHJsBR4PY2Ies z7I5x*$Y;Am1q<%171(@)A8i}3!?2(OL9y&kvp=Uz9NTR{4-cjrv*;VXJK^3E+E!u7 z!7L6B+1~OS{nDgb^;@4 z(4X`V(eImA`q7BC2Wana?~hlXT4BIL^Kjy0yvq-GI;L)dJN+;z#U_E+J2++sqV^RFUpx*?_GWfwdyDm)Lo*b-H~#Tdx)S_$VP` zhwyif84qFefH$JxUkD`>UQ?p%JrhSZ?WvLy1CYkb{{BsyU;!e2(VB}uTI*8vu;cSA zgvokVcd}tM1P~@v&s5paF%Rb`-gJqK@2FS4 z0jXcTpTFqG|3O-cOC}Dk4MGJHWlDrjqywo9o6(>bk?;xXbIP?9rrI|mCCo_(zK~Rx z|Avq&r+kkZ?^a>P`=t{VaZ(@!|By&wsB-~zT}x!@*k+_+jTmdK25B@ZSm-q%vKLv7 zg6N19K}AYepgE$fIijssZXiWTbTw=qk=k{k(2n=EN|%4&%R`2<3ZI@pU+|Ej$03Kq z2U1h*&Xs^tdYA~qRJ|>Ue;;I_N7%sd>W_9gl{GuyCNw0^P1v85kaX_;uJ-(fq~p(p zE4yz`i5TDG8PnVdp~U{(2St-e=Lc90HXEK7&!H8`E`KE$08ry|l%U0#TVOW9+U1?r z#pu56=8@3@q9<~W1G6%e8SoS7-|ZoqwVa((e%$?-Z;#|YOfy8HgK0e@@Wg=5qFY)K zqqB$A=ayF7f52louI}c!kEQign3V74c|ff3Ku}t7?@4e+5qyJHl2@Bo*N|Q>TF$m^ z-*h{@K5@CQ{Iz?LiR8WpMQ*@N?h%4EVMAy}FKaDO$oT#;I}!dkYuD5M%72T|^%#N| z-r5m-ej9k0#Aa_af+vd@P%&a_myKE+%uEQ9s~$HYYNznaXW|+h@T4$9VcO;a2TB4h zqM=5@t1uD;!m0x>Ev=0^@kIdwO}3#Wj|X5^V!%xhJ>ep;o}7{N37El38rnlfbQHPt zNo$A%XLba<$rV{;rx%3^kr=JK^#p|v&8?NUmgB!V7&(%~!-x+e;y%S?@HPnEefm#B z`;Q7Gu2CE)gRv%)syu|SIqB>Sp5VyV37!yKzN2_nEYHL$WEO;Ca+}-51CDS)!96UQ zp(;;`Ql?r8p&X$pU#AOV?vbR-SPpj?(EV@>lyBj~O@{ByH|0%Qv5Bq6g6T*%h$QXk zHjQjS%RofRUZZ)k4zGT#x1Z_}xF+w}^e=|YW@nr64!pbPyb>d&A&sl{WPpIcM!D2^ zxdN|AJZySyXl6b1skhw-^c#?L=BSpI9Bk?EygT1z?iNK~p3CNLE{USa_(Gr1#JcgH z^u5hJLc4%z1Xn)hCYXWjZM32YIzKT7!Ee+IeCsqc>N=Rt8trhx5mw<>i#I`V4(lDj z@-`x>R$fv|wE~Fsc1{R)?P{v%K}W8?b$VN6cj;{vk#>1oRRfi#BO;Lg0~Y|O9F&48 z<&4?j1;>!=7Rf-eDZ|1(-)WJ_EWJ}uZjwB4QuSP`2 zvu-`E9}y>r8XNrSG()T3()iJ0QM@85hF3w?WeYx%Pqv~kbgA1YHq!6*o6;ned2~)) zNIg>o3yy1r&)px=TP8c)iLMkK6voQ&8b!1o#ep;jE)lPCvij+{$02&gh2ZX&2Nysa zK`B>D3a?j~u?htrbw{0^Fue85^(MC$#c!I}-Tix^=BQ@v_%tGnRz%wtA6%rjLme!a z%Z;@9W*V;(mR!oUrCNL}K!cQyGSrCm^|IBmPf_E#UeXl+r%O5kYeyC)^ zl;^XbuX%d(kpA)Ks|#x-h3z>Qx-4yrRW6sU6Ib><(}L`jXSz6 zmcumM4S00i^KgUw&W~1pU7-_{>Soel!kNHxtT0TQU}6=H$?I|_qyC_9f~z8fh+GwC zc@H&5c2R4T{rQD3E#fS7M^jLZwQqMr$J1=l5b+zji8WUUgCWdT%s1mTFo7C6hqf<4 z;k`3~lEHFQNxFN`!Oc!M$#WlEv2EAvF|5qbDopk`1*LUrm)`E)nr57!)vH~tv9h}b zclVwoy15gbQVUbz69qcI5-mo)_weUFWIMouI$1lBSzF-*NxF=Wum)%Z*G7q;FKY{- z4dcD>?$E@@7Oy4q_Yco+VX67^QYu-WsVWKu{Y`W%qw0{kho{2~3(F5Oc-F>Q&x+n< z=qv$oqk=JMo)M<05Hgr8_I&$cV1V;xJ)G|$dxTl?8ql~9DcS9HI;VbFiLOdTscFkM?47_E zjVGUG!KyhMpxjxVG3^lG$mKh=u=7%fdeF8NP`*)~k_K}Fu2>`n8qzG0`pYU%D90{D z2dwx`5nbAKUb)ctv{t~P_LU*u5;jJ*Ugr}mGl&9^`;n%iLg|==1~H92!>g<7fsmWp zUaP}@>H3P$Um@#Jz7wO0U89uOCnm6IQn32;lcVlA%R2WQ%{d(u`P0XD55zAUg9%by z-0@G2{sC>-JxBj*eNxAWvg3xJK-o0EjAI-=Gt4zgrC;fyY^$G{D0W>4Hzt=eqI!N} z3dyoaNtLc@udSV5S{*H(5nk$Ca-R$B+af}mcuu~UtBj62c=9xunB%O>Vtd^OTkjGs|>}GmJTtW9!ldV&NPV5 ziWk<4z(ttLhO?%4q46SyvyNc*A0rCisxHUwewbrT;NLGco1#;pGIKaCwb?IjP=ZVE z`qFWgEih?lswC2LzznGLFq${EdlD(|M7fgr`*QYb1Mth%zwiITc&Z!+qv66+PiAbE zL|4pYh!`D-P)JI>k3B6CWv5q0Gdx)ZR!K^+ELe0ly|b&Ls*BAJ)k)Toc+h(`R@)}; zSa|2L&n4CN&LR8ekD0u2ebk+-3fJ~hM;auYK**^-yAw`;%_|_>eh*cQGhtqQDN-P$^!N9qE_jZO~zVw5TBEv`0)TZ|vA zXVx>D{c}c}{f( z_EC#2c!Y(@zLYIi!^aIv1m;OYI%Fde+x4X$JN@|)jOS0$NBGcKW$)wX_gz8Ggrt$%!MpzUu%l!5qwF82TMuOx?A(>Dzxvmm%uSIT z%H{%ng;XjHZ1YQ0uA+zD_MaOUR8;_}5q&=dNDV=f9ghPjo>Z|>9C!^h*m&rVY@^wj z#B#+ATP zzapEZ8*iIHd1~*!DmVQEW$E;Ss7-K$3NO^z_d)EYFWes(>+4W9-sif1AMR@Rgt95$ zEIi9DS9op@EL@}pG8&hbLeYjRN5qDQ_!QIp6iRt@+{N38M{DEq#KvTNf11r;IWhmq zKZQD;LXB~X{OyF08teXf{cU3d9j-3SjpF%K)8L7e z8|H9>(KVLRlu&^wrQFVyx~H8Gqs(BafNu)ryBD+ne&N_A8q7}`WZc`g1v1L*E~8x8 z>13L&GUzB6Cc%e~#7|ie*Asp8XcGP=T~F(SuS0Aoo@Pgbk7$1Se2u0{!6)u9Z6vA zZ8hu}^8QWl{e0yKfu>Wyao7SBz#Kq|P%iw!U+SFXEJ3dUa-IW}e?l^b2aH@}8OB zI{SCVRBGeYz{kNme>r&eQ<=8@Jm7xUYNqs`#@AoBJjrFwPK)aOd%Y!gF8Qf`Vu@XN znA%{2sTog(?yg-}R><1Host7TBaXsDU>cJbwy`y$6DYb=lQRnij^P~vO_$~vFY^=putP;F zqi*ck&JcKB>88AldfdMM6+@w*3H2(ZGW9t-CdbEa3Z)^5sUM}=cmIJyCw2SUKUXc# z)!TXD_BivE;}_P?zx$D-OZ`mcZ9Hq7fsu>YU)<;5Bq$v}&R$^Nu$^jqQ{_i7wX!pD z_48YLhC^v>Z`KXiSeM9LnpSsii_YSA`XBN0)We}m+fu4&od>WP7>u*h zw>O$f`&KjK_NpLifLS=#ILo3S$gEqR<|OvZqYUas#pw$kmzj@uITcm9#| zLohR2RyIwfAEGO8Vn@4F+lqA&9ZPQOQU?aXbmIKHS@;O0Hvo|KRl$lIeFIku4gG*qL~ZlKsw~zzt`iodAd$B zFOQA&nXxX=3C#8q#f?=Cwwq)HbM;GyW-K^d{(gP_F99>(=*hIA!J|ck&PA8+oyGa! z(Rpc4iqyF&$1e=~c8)g+Xlb|s%v~ETw~Iyra?A*)25XH4Hl&tpJj6ynH z!!zMz!26YzU)Z`>E$Q-r0G~V%Ok5Vq;TNcp= zoWGjixXAU_bzMDWM=h@%?e{*A^1aXS(fH7=!8;kdQ!YZ>cW0McjpjKXX5w_`)D&h*V>R;jk^qQz}5QcQlA^lckictQ|%etpViV|c)*ST-T>&s zs_adBta|(RxT1jj6gY}*PtW>)@Vo!Z`2M>$H|;726bab6OHx*|b%Se$tcnBEW;=?C zl~Z`p`8uuCtA$B1zU6s=a`oGc+CM;j+o;1ekF2xX$J4PFh-~|G4y4u$`*7Ex@(Os-Qc#Mqs@;FpXzhfP2l!`w6UI zM5$lAx%iHv^U8TRNfvY$6x44e_Uhfm#Y(tQijs*1J8N4G8Q+u6F$PnaYqH+!t3Y~5 z38$1$b@UUd`Ma5tnVU~H)o0>D`*?)r;oxYeLTby#B?#>TJUY7sq`u+ zVHC(vpkz+rQ`jIBW!3Aw8<8&E9CgZopB^wihZ+#lPErFDHcYMmC9eouan+Mku>sN93l9%NC zlIy);@$}30E!&5ed^s{zwtAoC_5QvGZ{7_vsWV^Kny9e(iGDMrFLcfrNN&oIe$Kcw z3YZSW3-SAw3}n;x*NH{|tXSZdtcg|@ZbU`!Aq%>*7Eql<^?9j7 zw53=#;lv7yR(YAiQBWpVIM|AuM?|*b@qe-`(lH6i`kXxNuF*`&3en(S^Ecvy;0wh3 zW?M8XX5N&=x_3A9fB)dxWV0nJ%eu!d5?QOs5|v!N^NyVP@p<~;o!8v=^b?|AIR z%$UINhiq%}{g;2+rZ;M(=X78j!~e#Tvo344jUBqAx%HDE?(LrSdmQgd0A979Ugy0p zJlr*bqP7X51zW3ym7ny^z4*Zd0PkG{Ap{aHr*8f z%}Gn{S>HF=`yZQe6FAucS9GqocCrH|iBtSMY{%#GsQLL0#zOlBUcS@VH2?7i2j>Ug zfa2u1{k(B!b{SMl8t83oqe+ZsI;j-H=0=s_!{h@@|8NMVWeMotF{3`o+|0SO_06d7 z%9cI;~Te=8xmT*e*282s4u*7 ztZ;uE&qqj45zZAB?iX60iif%2OL3Ig52uB8mOA=OV+TLahk+vFsA`LumZo+se7hDW zyKOA`pSMCW$&I7I2oSpuI<}rXG9LX#I8m z$-)jEt*P*m#^%bC?QQ!Vr(N~$nMS{H@FSm?1N*kWem}ndeZkTH^()QbPT(4g=mNpo z$Ne7RH@Ihua%2i4GxedMK%pUh>LxjLpKi&9oAWv`g}i8s#|{~;Dm-j~E)Pekv?@^Y zg*!xHGi*g?IH(n)^_>`J1RbmSrFQLkTUk!u=#HbUOtF-3URA>d~s#2_mar12jP5j zF>)vQS#et;RHs~j{H4~~0$?)~`wPYA1wawn|wt)OqM zKFBv+o#mgIEWN{HyGq5X#~tBfWrN6D!g2B5Q=WT7J?FP8-x&QYo7xfE`L|oo1Uue{ zv&Edgh+LiGgElU+oM{|uj!5#LbDfKOWInq**HP^wVi=nm6)_lAH$dO_x=?ga!%$qI#=`5M8ObG*td!@MOa%9Po>9YZM|UM6S^qC+a%ITLbp z==PS@H>8*H4L>2>)mX9d*L_O}L_f8CvtI^X{vd;rI{1my;%t)CYAqgIg(VctT=W>* zYtc59Fc#4(5~Fw)chI5*o3AKzw*y=DC6uSoqSb(UMcTB@NGj0-%F(ji$@o_CE2vW` zD|a#f8sE+?`zxevTu$3D&|-ABUmQ~fK+TaB9xOgjYBbR&gMz^E+Yt|k4G{h`n%F#u zk{q&)UglAVAoqFZb2uXqHH^&Xvo!!(7lstHoD=tNE=#lb>u_^54x<;JueoWJ9Rvxx5*IKD2mt{=0HF9q%rjQ2wb%s3sk*j? z#}#@`86q|m2(t@puX};K7t;oQUV$uKDN8*F-2x+{U0u`#kPuu=SeJg^aubpyr=`cJ zmNT4j8vrUvUxg+5mcTBt%e9ouki=E)`_LLkn?K8tKa-5_ac8z3*L+RF`~E(l$ACSh z)?yV$!xK3#&SjRs6Jvd4&WMOhhdZlzEL7vnMGi3~u&+av;d0GkNC3511m(=|eVd^{ zc|wCDN$Z(#7{bJWXv9GmB|SEYruueQmrRKEB9S>mHjP2_?Mc1C*FT$H>ge$W@%lNU zB;EfQfbQsT>C_W6qK)RW3P1)>O?^jCU2W1lYk1Z$2D#YUTHeA_dv{NM)}TRada>Un z@cnTh-+5_=*Z~J~kL%OOS-X|96z2q=WenuAj2r z-F}oaQg-$7^iS9Z9g)*V#QW58H@hiAjn1xicFpDqR^sBmvYrn-`}&Fu+SA)N{xTdH zG3&907cu8pP5XCyZe|}kl%uH_U-E%ZU*DFgca#~zmFnCz9<%uq*mg#Y!HgAYgX-Mm z*&h4}Rt;ZQo^5E`oh`o8Q{$`Y{^3qfN%5T@>VHsXqzw+P$Plg&c?>nJZd$3~;n5=r z4G9fod^KLd2F9pA&-`otitp6_fM@>j{irfSG<&GIVWEG**@Omh!SMoRU`pVZqA9#7 zu69$VOySw_Q3&lW+1jNW92h8?qTx*;cNAV3y`s>J*54r~u)4i3x+PYVprh)YdbB*Hs6^Ubk=kBEyj zomYwrP-nf=MY|}n;2SCqA1W^#DJ-m@`*tCgP|IQ(=CUk?Em(rUHNhY8SbRfmf8S;O z%bya~=p8h%^LBH1ag#C}l3Q$(>gKqkyKb)QL1Pt!d&8qR#*gg%>}51K<_zC_Uc0)kgRfpes;+SzBhq z>7>iFc-=1qSt**RUeO7W7QiNx`XTEuw0l2ZYTW>UTc!kB<@w_`d{a<}iJ!_-_+$f) zOw_fn4_ywzB~%lJpAuyaVv_?+jcY`$$m+@Dk08-d1xYiam_VeF?tL+la0{~&?vcB- zzw#yH`-kT}t-jg&{sIX5*M>a@Jcm7p|6T*8aQR*$x&%I;qlf^KK@kL50#u1S=x7-G znLlr>%olLLfH!l<>7R5P#ShmKE;% zGv;)B2$z1$JqVSvrnqHj_P?!zH5#eimc+-N79Jz(Gj>kCVcIV(9TJ@&S!ul)(O=j^ zh@h^P)ukMj*ru7wQJQc(9NBbN)hvl(mJs4qiW^X$s$gj4tCE*}DVhLlc1Cf=(vFVN z`V8ekp@-0es9K%mL1O_*RUMAIO7R3d?`~yMh;o+5dZ^(f-$Zevx1@w~vy!}2A$+ci zmqG$d1Gg)@#;l|#!IQFtSxYkViSlsABpcx*oCx0C43yBX&VQeAH{*MpjyDp;s-&j$ z12{E)1-Ek$M2BCHF>PN`;mw-^sT|^cZgsix3EPawV{{LkS7M}JWs~uJhsPAjw#)8O0f51|F3zu;V9PvkeihB1T+@UTSyG3@}zYg#S1XmUVv zv8m850R~$gf5Va3CykzZ)(9C*qMw7kC!o*noYQ(j2|seVrlyMU&Z<~DpU)~RI4X>B zA_qo`)4!}wGV_CVKH4z3R@_hAi8wzb9XVpH=#{v1954x4dMh7>M2>r$mW1~w5O8B^ z?xM6FD*|?h)zCrVQst@R0G59ttN_M5FRlAWXSd+O9;*Yv8NCTj?ja4~JvY6Bi~F1E zfL5<2k(bE&w)rcJZy<($4qv0gky8Z&n-9W9=5#1%Q0{E&w%qMt0Sz z0n8jp008q6c&1IMY3H^2SAX~D)Eg&W`w#x$$}R@r`2ClLXK6pw5CL(jfRGnW9Q4eo z&;u@X4#9B?S_|PwMlk%72Yqls)+>PhMMJQ4C!(o_BBMctqbL9bNn?k(z{2CYDppHe zj8O%s@Lx&b@Ib#C6t-lGodJ@!m`=oLLlOg;Q|`=|J%UIKh`OSAW3ojuacXTH!Z5Al z(OZJSicMwhc*yzy8fL*eo&emAbPfpM4}xzE+`R>)j|WZ0lIWA z23`#N9+=YzuE>X5hJ6MgYSoQLfM?r$Z%Gyw<97l#xCdf#kgUwSN7U*-yuE&)cz_s^ zu}$$Fct$sFb(8t~-K<19;UB%p&fdVsQzW;lp5HyJ`4$_BJ%VUAR-M3Id>oY zsQ_A60LlJN8yW!@)Q-`|AezTJfn-l1F&WUI1_a{%3c}H7KS6w&3@-_+p{U}lxs^_^ z=HPaN(H%E{(R~6gLwdv)5$5r%9gp6}&riL*RHZIY&3LBWb9B1rSQ(d2HJ;-geqn0( z1?O`-NLr5LS>{s&T6(9p{Nwy}gYS zZHhNHUa2Z_ys}4;-75xZzo&f1wbtSnFQEGVQShWmOOnv&5fRqDnVDN1>F=B!_f>3Z zDbLg+w#ke%eTFZ&)kQ+@@C8OLn<&*3{^54c?GI49b0B`Mx9=keg%Yis9EbsefSo}2 z-Z(_A^L(*V6H%n{y&7SA11S&c(ss(czOw}8(z(Dmq7 z#umOGU+tu2Y&02+_w%Ou=eCaDY8+uct8HV$2CoBeu02T}pC6tz-gCxe-K>-eMX{lYeD#qBqj2%_&#({OqVVOly|vy&fM;#`o9RTY#Tj zF(G}+#}ps=P1#OJtf%gU9Jx9bh|yX^41t8-9>u@2(}P|VvS5aXaTCRM%?#qR`P71u z08$P85CM5t(jZ7c_^{A{r^GyKgen07lQOYVbSm!BC^DC+8(A*kRp+882&?mr0w z$E}E1;0TCxm8hG)W7pX=lJULf7GPvYIgaub!CwI%TZKRc_+H2B8$WP+AwMLa%sKFI z?$yS`*qI`IWSg=tcTa}__6%{7; zu)Kl}WWiWSmoO4_#jpT=}`fY`mQpu;;v;ggjZ#r-6>n{6bx zZ+w&T+A}tnPAgtWVH$PYne(Zks(kw?<+3nJjM7%7JE%)jFpHP9`u{BXP&Co1{6<<- zri_v&S;?cZWX5-yZepqDsD%3K){Tsmkjm1}MUYp5WjmVZ_E+;-#&$ED^{pD>me>4R z*JeL+vXv(VTGY8a>q;l$Vd4il*m^2TPvOf_8tBkuN9_z5igj_xVu39Go=A`5Gf^$g z6i*yOS%L&13s77LulmrGS-GxC@_@3+xE>9uIl^Ju<|0b$czp0WIzCj6*1rt-FUx^) zZPmN+l>+}vRWVK+38S!2+>W0jTvn10+gvn1{cJEphhf~MpfT-zIET_puue*|b7dI6 z5x4x+BNxLT{<>kZ$EuNwX7Yj$BW1a?d}|#_@lxHg6W?y{o0?aeaYZCv)nC#kp6Z_$ zP%F*c(w!&G7R^Cs3x9h-?~djNzidF)V&sprRU-ya$JjN;c z6@6KqBSUe=;eg@%wWl`L=j^+E&xezf_>o3Iu{*NsXf2;&eRvi1nEPk2nNIw=fZ|kn zOq>5|@f-g+sN!38Uy2)c-r3nTxHzMdB40Mnpj4l+$hrKh@bim#8RcIYKCfGRbYj%` zPq&YVGCKWhR2fQel~#6pOyDz$sEy6KN`J95{fC~*cycE%c0O^LOy_M<)~6u;Ax?f#*T;V-)a?8 zRUJUi=F+H!1ok&fqZ;ee$^3olu^#?7KmW)0s$kzkV^i{^)9bDynEilu0YMB=g~Q@8 ze+S4Zb{Q2_N|+83!Cp}Od?5}V>kD2#PxN`Xi04~GYh@dS(OR1J z=jR3`)ij5CV{nlqL%E?DSa*_ffY=**@MHHne1(}!18xnA~5pF~M9XQYhZn9ioGFvk-DMcniC za#!PAUPyN?xA2OutH~6Nt9~w@nJU*mK7|)jEDsqS)YETpVUKg;-n46Gq~JLroi|+@ zjg|gveYcP+Za^q-3tXFZQ_S&0SIdn?@f+p_rEE|t&9Y$R28jH4)!t?U`!&ALX{U6m ztG#u{`}^_uu3ZCJP`YfL@!9N{mgXcF*-Z@X3RgCD{%EMhw`;tT{;;6@?VI=YtI#gv zh8xO6p-7mZPR*0IG_R~M%IO3;Z?EuUbt=lM0NM^U-u!wl5y|YchUO*;HFx?^wwaC9 zT1aX)s(vDodA=imwBr_PHudDjaebvsy^+?U1yqhC;$kF8uS?%oqh898%a>X(E?!*z z_rVKF5slX2O>b>#~#xEoOZ{(M{`HGxtuPj%@{bWvLKf)yDasV?6smST&o5I_Ibi>NQivWg=-uF&eL?$EYXutuqm%jDzd{eICAv$pdPo*V zb+t;HCP)0E6!CZKTRv+VBl_x2u<1d>hgDsgs;(uTK#2k8a@1+(>+hHE*6?r?sB?qT zE|C!%t?%FE{v;ei25HYmP%uKF=ft*~9$7t=P4Fhcy+=qn2k8`?s&|7+RpO;WPJ<8`o-Xs+9H5CA~?os;Ysm+VP~Mq{RE~$2J^a zSzgck7~GB1>4`1G2K3wV*B>5>JULf%s^Jy{YQOHjZFep;trI+Ba?;}4f0#8$wSzuV z+h6&S32T>Ia1o=hhg@FU*4T{wp#DB|cWRKg63JH$(Yrw+3^|=b#~y(=wkmd9RzD5e z%e9ae1@8M;jDR^v7Y`m<1Ia# zJZXMv$!F3tY)c<7MAKE@BACqo%CT{|M0hs6EEPvskr6kMR5pqtA1n_Gg{)avDB}Wj zMsW-Rly~Gh;SOpJ-$%XjtrIlw(9&H0pQJbbcMaTs?{{WZ;_oUL*I&YuA@jf@VeR0o zI;>vaE)O0x_m6d0@2c!5!Mb*Y)Uw5>H;X0=wN#3F$?aYdu~+CoeY$@vxPq~D+^hjc z&-@VO!Q{|jk6Foc7bVYKJm`UE5V7Euqa7VxD4jRL?O}r8OrLd$yJsJKAFSPNo2;z` zuzav4;w@_x>vGCBWkcR&9duh-ThF)VYRLSpnQ@NpKwZgu-`^i4%WD2;{Zi$u3W-kNI`QR9fQjqz+zVYvk;J@`OaP2|4XVV13SLSrDd)!Z}n7?&6;l_{WW+xcJ;g_)P zp}IkWQ%X#Ss3GyX#=U6%lDQYw{jzAzcjr%Fv4RFT`c=QGWq7Pu%h7Kjp$XyYLk@lt zkp@T9-b*C=-QLb{u%z`-1v5OyTlDimTF!$^`g#4YQDx*VI4wMS z+xqL%VDK%t+Z3<)4at?K6jqWJCKSWoS z4Hfw3S;μdAtFq%izaHl|F;VNB5RSjx%3X{qsT7q_?;`xg7=d{xf`hOSPy1*Zxy zImxO5zk-lJF2_&lBBQiyWavIn$(Z|B)i}01^ew2vLTOjyU{@_^r|cIey*!g?X`X3O zaI$S-kjt4&3n~pbs~h453-in~vj;w(o%1L+KW=5a-h|e*?F^2^N<-b1`FfQ7s-#wi zkp-u^QyQ(UvRYyu;VO}!^_RrpL2za(3>_r6IBQa1$^*Lh}t zd~yhVmc1ghCb0WyW@m@#l-w9PUkm&0A{|VBxW3KbIz;3VF!Um{pP?l!&P;YZ_xXw+ zcOOcXA)nA@L}(d$LYj$0xH;HCQG~|)B;#A?bV6t5xPS83$Qk)9^Y{L}na}{e2glEa zr&P|uw43wRDwUR(1VNmnQiI!LS?=|uWyWp6@6w8%EdIw%e_Uk!ZdSl$Qul-nk{I#Sbu z8`PtEjN5@+J=*PE=jt@ep1&X8|6Jer|NIs<$bLwsDb{6P(c5kN-EN2Mgx;hbN|?<; zFzvzcoKi08z$?uS6!TUYJ23q(aCyVF3DvtmyBAg zy&~c`>a$-y-Y`KA$Dy9)?5dS#ojeMgjnD|#l@RsBEAy$M`XSGzB~GZK;jVMqvq7(ft6)PzYussTY| zP$FOmR3%|lqzXu_v|7RtL_lRQ7!(ymCXE^fTc>IgMgd2()eedS6n$;ApsiRW-?P)| zd){;JJ->6l-~GOG1YsxH?7fp`uV=0Gtp9*mBq7J0ge)dXp?L8#Cut7feizGX=|@XH3fw0_#_+2=aG&q0wXnux(fFo0{c7NZY&Q9Te{NR$ ziTl7L{G+8bvS{zyQeE@@x0u@rQ)+X>Pe}wLMYTDJf?=D;^WTNjtHX&4ZtEHsw2b&h zUJgrI)Reka@hodb?QPS?Ka@XyP^jGN&>6AJWOLv2j5fb7Odp=`GHBY%e{H?=UfU-V zX?MMR+CDkB{RgkBV`&h!^=9MCp-4fuNuEGfj3uNM=1Z^vtR^?BC8Jh6pRp;{>nACX3Ad`e0fHPl!Ufy)m7F;P}JxiPA^ zM$HH1Pjh(@EXcXUXPztd-`)i-Bp_XJitzCb7));fRCTrz<6I9hQDIKf5BXz zjqucpZ3n87PICt4#qMf8SS34cp5e(Eh`8Ohs8jXloW$WbQ*RdStsFaaWZ9L4i%jxg z=&oFOx#NhzmGSMf0-p>qi3bwn4+PgeS3Lg7?ZAoyh6R0HOurlN+}1mpZ|L|w@#czG z50-qle}CB%&xSARuFlMEFy63k{n+Lu0G^j)#P>hb#N6%q?@ET2@43oa=2WZ( z0hKz&OmI^GV+IhYLk*Jr@~3*CCZnGS4`Tt)t&)Mj zU=>yewjWL0x-jR+FwnqKo(DlC0JwH^TmtM2%wqul2nb$T!`~WCfUJQVZ^ea{;V3F`AhWPXV6aXyJvSON;clI}J7@{n&E4*irlRR}m>ohxTm#gU)u|7=Bx$kzIIHM5ZvgzUAPJB${l(!-kQ#86|V)Wb5P@6Di#Z?h#UZHzv07Jy9uzi8X3X< zIEuFhWZlurXyKyBC@O{&c{`C*#MB!4=R3`|jg=zXZu##maL}aYYLlLQS*z3GEUR}F zqY7I^^l3oMw!zBAU;6FGk5Aa+;#Cg@7c2PK5<{NoaD4jVEs;}6cWXrR zrn-Lf9%uz=PE%13Fez=tW1xD&z}#y6tO{c*!ZR?7NAfaayp=~r!`A{F7Lc&j-ZdMK zW~@=1+`17!wpV)2&!JZi)~s~P@VnAAuJKbIP8_*ei2`ZmrvP6YUODT{Vc~gB@YIzj zFWfS`zYBakr?RwWC3_AWZCW%(UO9k&q*<&Rz(1a2K6Pc_Xd4Q}eUwqTtJi7HjOR7N z^PV>s4jx{~ZTV5@Dx`Bvr?hnuYJtjvTN^p({P#~7Z~|;X9XSBR+vovIxL*Hx(!2am zSLD~1f5jJONo2Y#L@P_oR7WXY6VvO;^BNMjkO}0jq`X2xOX({{Fd!9-u#qan2>~e{_{4SS14pTe z0>O%qY!SFHPdDk_g@1lHY_%_#YOrxt$fyCXadFG{KwCWI^7fzY_wOMy2Vnx-6|@CH ztwy)PG5j94BNz~)K`;{W^DX_o)E6Dl2jZW9yRsC)_XsV}?>{H=s~V+6dd zxWhvrUgn^Tk`8xEnam5LZs8Zw&E{%Tbk2+ z023uaj=R;nI@Qn8zc7u;tZP7#R%0+=%$x_d&r0GKaMHQv&=5-?j`@o z_2>F?FtBJ0Hm`{Itnlf1%pW(;K0Rxg_cU(lu)$E{i{xR}l``u3qxHmsK7zqgqadV+ zzz%HXF3_IKy;(c!hSBiY3OtQ6U2iMQD`i;)UVevMy6~>;t55I81-xYifA(i`+Esbm zsh<9O(OVMN4xHJl`zrQ-jV~e0c`3I!deFJWb9t1?!h6Gy+PWnRW#nsMawUJ&-#7X_ zS$=5Hc^TUw`%uw+5t%OaQ)8}k4Ili#A}Q`s#x4wCBHF)J2=)l+Y_b*8RcOKSD%M^~ z5!Z>GQwwjFYa4%fT7B15wnUJAqeT`VlPqzNQSb>zT*F*H^G=((_Aqkc zl9k)(ZoD5~`QjW1MiDJ+2WC`hVkS@QoS5DMtt!t)hLuTM#HA8`i`ZErs1pgaxp1lA zf9l2+Xq&b;9H;JC>gM60bcQ`*Y?RI^nY?msp@oB$jkCysE(_JKFZ{Z|QO7|tXFvn~ ztG=;>EDLj{a|ofBa4vYW>ZYlF_PPQ-H&;5NKu~N-!J=POexCXWhs48@y8wP~S%qij zIswL&$hfjcK^(*D_ZkuwEQ%W-QT6iwFMiYhqm$2U{cUB>Ij0x;Jr)(csscZ+PYI2SCRH&TwGw>xGdWy%d92y^+tdd6lNTeQyuJ_=-cIIxpkaF(H%HF;7NF0({Ev z5G0Z0&XB(D!h140Y&-l=>J76G>T}G*LK!F-vyp7+k_M@vl)`!>=CX`wZ+AIg5jzXF zvdK)rFC)@AQS{)RmI;SLgU{)3&>vFZlQ!P5+5N0QTael2BMWGxljY_8+QJ_oVy=l> zzE-RfK->T`^8Jk&$Mz&{`0Xn>(SmHlix4j3%pAaPcH>4ti~@Yb&LXm0M%KdLqtRJH zh8RSsTEwoRg$e;0b&;S?6X^iA2;5r?Y$2Uo9c3o6WDrIT@l2MP2(Oq;6XBU>X+lHp z^4Tgwy?@y|AESSbFE#}F0f=gVb0DcKft1UjBDbkXmwgr4D&TNZh^{)|un0_IL99Cf zY^%ZN7My88u6}zz9o3#E_gkS(uszO6F7S`dC?SNk=v}UL9E)*H99ZU( z!6r&HEpm;;i<36b8-~XHjP;qiy-L*(bqCYh%;`b{&mNGqzvSV6D z;-Q3eQN+1C*e(tw965-MkyjZJevQ`>1VVquk!zxt!tw2QdZK$di() ztI+Rq{m<&Epz4_9%w5T8hKZSb?`P&Frr~Qd^N}MEGUuK9+S@ych@l6 z^BIET>o1E3?c0c)+!Aj5AhR8(9_D|$kz0&z&fl*d7v_H>r*zLFN)QfSr z`Igdwk>Oq2m}q`}xgeRsLbjg!fUk?orvO2oqRZW1@>238bw1`RiaP1h%OE^4&tKd{ z;}K2$HAQ~*d8FFfF6Im)A~D}zePg{hk3g!0-&~IwRQr6KTB+7w1bShx^y8wt7RGHg$PyA*-}d5vfEl z+%i_=idEhL_0D+RsyrCC>>XX|Acqzh2{{=P8TA#k<)t?iQP4cAnR_6a%CLnN&{8ST zI_tzRTw=omKNVevCL6k^|J^&Y6Uc{3e_aL!%HZCxdn75WE^fT-a1wb^d>V?6a`0nT zw(R>=2o}%t6 z(R`!?UIU94@(VC=S-$Ya!e1BmEiB^D^u7>z7Y4!2AqNgGuYf1uFd^XQAOcHwuvwv3 zL}#GJ4Z#HPi?Etqjh~Dk@9qf#J9&TfIiPQW|NCs^5glsY5{jpn>p*cy2MM$vQ$__- z=P?4`6@{_V$kNqJqW!;XzyIIYB`0PgT*OR;n<;XyHJ`cAvs|k(kAdW`Fv?X*s|<`1 z(|cj~hA1&e;Wmdc1Av&y&3R^<$5lxnR57MRRNi9537aK$Xxxe3SL zwomHkpXmR@#5;zTjk{*M-S3iZm|yPCCeAUWY}|F-E)ms>c>+B>MX}<)v3~Wxy_J>| zMpA-OzD)V7vCmeNgo56Uy<%{_;Mt)ALddU`iKH%Y%OrM>G{I6qI!wVWR0`B=6il|z zVhNZgWbBR$Tc=zB+QqPZf8)(>;*b~vIC)Y+EUxmUUnIupQZTaTJ8DTRZ zB+rCL9d6kX=?bz;2kN6MbFNGWJ622`4xdxG_{H;z72`QMv;T*z53ZJmu1p#LhVx3H z00l{l&Pj6ntCC8p;{h8DZb?(r3n(qN^_Q6AXFgqi=RmtK^_R6LzF3`l>CX3OrMT6= z_L$2uTNRUq61(4`T;J^gh32c=Zi81VXlt|Q5$2%;Twj#X3&yR>VeUUZ1QcPk{cdyK zYUE*Ya{XWPUp|6>4mkjAP|_}{?htqn%fNBwx&Q$^fg|!UEVrMDsLS10p@YZtMN8G0r;2Zn{ep7+~qCPH0LG^c1&4Z)QnQ|G1zAwO?KF3u%&52E!6kH zLLlhs51}Z0iC5?fw2sukW}uc+SPDRCSkd4>0W~U-h+ivkffYZX30Z(S1TX^N;{j;9 z<+NTx999(P&WCqqo&uN-Py$(tihK1#b!;WJz8`#J7mPL6qxQfVx`)_0?BV+LAeNPx z@G#Yb=89gHodAk_r`hEHRD3PKe}YOC6s)xzretT8E}xZawKTV9$Lx?jr;gq3x)4}c zXUJt(7-7$@vG~lNJv&Yi>W#(PVE^Q}nqm-h#1ykaKr^>R$j@uD&o567K4n3rDmqJ^ zQC-)RM<(BRT3R3plvWgyxyP`Ev)6n0xh&l2PFlgj&ziegDFAC!@BkTr+A@O~)8AHu)KyygcjEF_l`a?v)CiW}GT_C`-&-2yJ$YlFh zEo})Eg+cpe^R((xW^P%sY5!OIhV=Qra# zTc}rYn`t};3op9@mh{-wjG-{3J)mAjJeEzGqv$KPq7d0wo8x8F_Uruzd^vu{qpk(7 zGyMZ0MC1F``;>_*^2^uEcM{LxdnO{B59*?mR~W^w+~^CA*w zc@$4zNu{~tO+L-kJ@63cFbIF=W628rWKjYPopBkSZ|Cja)SV*@wmwC76 zg1?m~|FUNwhZEg%)jc_m)f+)C^dOBE?ItAH0Da54X8KL1|s!v|oA#-)MA zP$L$;rHxqp(>6Q-+?h}@APzwW7+cP`8k}V(Wpx`wU@aTcyMz+ZIdfTrs-xHccCP^L z*Xl3h6tQs($^?a$Fn{L~|Gds^0EWMDBmPZq#2Y1CRu+4H|G=Zx+uEBqRE#QL`AJR| z7|Nm^vnrq*18j2q%w@m_qaIhXjiHoK%8K|~+Rj+OU4h-~NeSw8m8}$sP+MBP{}uRk zy^h_8#&>){8~87Lwz~iQ_)4ggD|HjLsJRlbefPpT7L3pmcB0-&yvN|z^mQ^Aki!}n zYUjkB1)ShX{Tdc3O(3!r`so**(BIi&J*pne%l(v}>HAKYaK%1ge>@6aVR}fIX9+mr zaya0HD^#5nb48|%0DHB2t;+sW9HYcP@5Zj%%(*z<)9PzSZ-_h4Cn*8;*QgAw{evmy zT-~r{J@;c$3Aac^70oSJDNXSgGdtiI1wnPD;y~EY)`a>OF#wwb#ME{KY{!M@g4PYGGZa0 zExh|azPf?XY1H@}M6>q7+GZKhY&jjEWs*PBo9YzY(zlv+S1fy3y^WiQCJITt8zs z{+N5MF$dEPCf?ALa~~*d4VX6Qcdmb?YK_toOv$NuFyC+yI|1lSjXIS?sMd7S)+o8y zJk*vrHk^$cG{!;f)ixsV>26c`sbBi3W-@T?IH`{7Z332vynTsWzjE$ak{f>^ugruy zX3X1X%(ePOqmp#@m!v_DzBS}8Ju4EmPwJubxDrXBlC?ne;q%Y$cD|RV zNovFrN>)6?1gK)!49t4Srr&)$BsSa$04IQdhkOSNtx43@TntKd>Jzn~WUx}Sh7660 zg5~6jgVZI5DRL#i;hOS*)zsmt8*x4C3T-8A(!6QvT5QT{o4Y~~q(7gEH0wGb4 zZ?EBu8?um7i67=iKv;kldouCvYoFIXt2&~C0;5PGrK)cEsp8R?<;R;`J(?EPr=}N= zoqXn1TUA$fdlBirfn`AYIVv8SuCu3dB)B zMrH5sFn_&#CMX0IS6m%(=OdWWDuFrlC$JUQM(O}$3+gDJH$+`($Kg(wr*J50gJNMLkarjXKcysjy-47(-UZ><}TRDI@qTIE+ z%U{nR&+j@`iQ3d{ts$TvoL+pme(CmcHX)qZt}y`!J^jBJ-~aoq``;XK zA}n*{O|IG5?Z3Y9HTNehZ=sY;E^^CW7@iaN;;q%EbLrRLn$*_5=_b%;;^U3-hjsbi z*32Rv-+HQLM*V7Is<&rI%lCC+dN@p39q^`D1B5I;4nR>ri!N>X{%Aw$v+9U5Tbu8E zbo`6ezqd@^k-H01(fDqe^N8YyJb&_6`&A>zB_Xznmiw$G07*Am8{xS*Rih^PbFg-p z2$Fv)RE$v5uwPb3=A40F(eENZZ=%5pu$r&Ls_jt*?k6!P&Lk`E+@{jpwbpRtq6&#)3^%Z0%GfhY{GF*xTMu^DZAH3kia;w`UDA++P^OV7q$Jt;*|M$ z%NDs%XQaKI4%gBl+f65K~UuGUUX)t%= zxlJKA^KF{uPgDOV#rJ)v2?Y~GA|>C6S2B}mNzh!wi~0$JR_Q~SQf6(pbogh-si=H) zf3uSNMb*f5+xpaAvtt*&d|uu6#NNmHLYt( z{Xz$-lOQ<#`lE-Eed9-7`-TZRD`~wp^Uki>%w{UHdLvIqPB+D&5zsvYWsLB*hg@z* zT8bDVx8b&@rS~W=JW-Q$dmLSgkrOcut?;@L}^&r(@%b!5D<(8W` zqYBSgOt{&Xbq`0U8GKuQ8&b-5Lu`?>asrW+_h@V_HT*Zi!-P%?fBzE7Ov`N)n~=KV zz!TAnm0rj*vAd9oW9EE5<;H;K;v5<-vDJa_?~J^I{V zk~OdU#`)L37s^Ind9*G0-q198XGN6Hj=Jm}b;j#9#J$U@>)W{ELww&ip8qkw`L)h! z>U_S+WeVIOV?=ATJxiD%%aK6pPh*Pqfg{{nukGHsBT|D)X(PX_Vq_`$cMSY7_saMX zOJH%<;@tlDgN}LXh@Q2(1~^G2xfDz*L!msk0k~L6s3fVpb*^3OJR%z3Bqw7-FJw#P z2Y#rC5T7d!{u0kg1Le!A{R0Hx|Am2;1_NrYsKca?7oa_Q6M(WJR@-i{lC^IZ#Ijwk zh|c(!W>`{W7*|RmCX~>Lvy^FzYD;v}sNL)oss#;*LqE(3X%?cU%^(>O+4jvK61{qx zl6Y#^8}VRqrJn0k)ps-}I-^OaO~ZA+fo`xV->lBkbbSe_xrCs7bE3FUS$MMWB;B8e z=|TKhV$$(qb?S{fxhL|Y$=8c7U*Dho_}18HcuMHSvsqKmpLYIhedE8m-Tr<0RrMwY zx7T;{a2-LHqYz0j74J*-FS4l0sC=10Qre3V5^bK)a;`J;8);))N@&6FGn=K z;GaT(_^$d8-%8Y=ljaOsRUmW{yBR>OP*NU8)l1ns zV?SAI@^H=zh{Lrq?1FA0Y~1g8q3d~5n~x(JQmHm}mlhD;)E2(UHjIecVi><>OD}hy z&CP2S54$QfUBv+*d$wJ$xp~z_D_UFe&Iz*fHMJcHt=`gk}LB69<1l;L9l9CfCTG|ru$%dw$7+-Vqz#&^x^-zjduUpVkS zKZ8vh2G))3u#Kq=4g3z!O8eGzrvdycIhWH!~W;-aS zU_rVkpw$ONdZc!!1j!r)jU@Kxi5rEb9OB?Hka`*Ph+rF4zk{L&XpZWtOmryCbB~U* z6)bLnmQaGm7x+h%dFQvxt$80`)LJ(Q6$8BG1gm}DxpoMXk(T<&+Cu?3q5yjf2TFS=GS^g%7!ap;7Kqq8~5Ci;kbJfaR?_zaLah@_7|0|oE zSF8Nc|NeKiS9?P!;`VQXN-uM&A-5N6Qo|(d2&EXs-%?N5jw!Qa+Cbtm#G{GGiKqbw zY#Ey*w9poCa{=ZDlmWW}(x%?`EI~c@p$TgV^!Lye0P%+UP~6YU?;Y?mNH^<7Gbtqu z%FejELe{Lo|@EO7#T9!hU}2E9R~Q1Ww-WJC&Oov778 zTkt?O3TNZF5wOLc-Pqu$oBaIm~QE~1{j8K38=0rXq|(uyF`Ol>_AflT&$oZ zOI&VKyA|^TG0XvZp4(5AW(IH_(s$!j#_EMwu0sIV3bUUWM^cyKR93L^kwwPEOr!BV zG}o5mglzKZcpqQTOVbNzK^HvI2%R>-n+R_SRh3ZmYXpi0YHkg}>DW`S2;CYx%4&3$ z<%X>ycTLb>>NBoxWT9k3p!q`~nt4=_XEVgWv#Y&w1}Kbj)KGnwDmLrhiX%WB1wwu- z6dJQ*r-^4#{iS0QdQroGR?r{@5#a&sWD&|-1Z{KC5p>!$4Y)~N75-H;NuUxf11SKP zrR9-R&apKFTaHC>VT?c7{wP~U21*O?|ABXSsXlefhxYsbJ{wQ`=yY7Ro~a6S3O1p;Q7$MPAqv(lH<+2W>&TTR1^ z7^Z@FoP~XD&hQS+EaI2cmy2Jf!c)QQW*o99<3<{uEDaCQiO`0pqflx zLcPE&xZB=53Mwa;byJ{r>#<(78N_GZmk!?3tKv=`@DRZzK;{X9%!7w3GgmC{mXL=O z{6@Nuj3hCq7w9I)7DK>ff;Tk8j2eZf$*`}Co4a_B1xmBpEhOMRNqDjG+8j3e3Z8@K zw(RzfBp(^H-?9ht8Tuv}SSG&^{Zk2+DL@(1;nPBW{R%?M&0XQK83u+>m__AigH=#p ztq=Rhz0OCIi>M@|oQ}e_p0+-Q@Z3d#K-IFoIq6by^Qk7*M$om* z0qrXXsM^vkK3NGUnq&OS zdRf$}vTsG-c3w>aw*}Tl`3D@U{a-YwYw~vjUQcG9r~p8!hI5 zWsbD425rA@ytf+8LpJ(;;18lKGLZOfB8y09WDcYkn(`XS!WyMZyiXHJ1Wya$<8IQ2 zfCC@3j+;w$Mx}piv~%=qGjwBI9S)(y(i_>Oa1gl6#u^tHNvIf-amxoxi?s*DC`~O) z;&34SvYrJehnrFaw`CO&*Q&4z0lWdRizt!=1UL3d(H=>pg1^(srAXCFeD;oa`yZVKE_cPZyTals zJw+c42xry48LS9)qT#p!fSiA~a^FjlkJu0#xs9t3c?|~x=tIL>?G0i-wG9Wbj$}3c zkZxS%gokBAOiV!AC6TibQ2o^rGFV#NzDNQh$G9pCS=_JYB5d50vXX_OP(ce# zAzR|uC?8M+y|Eq4nZ*ZhSY1rsa7agAe1{}m2R7#!mW_*6isUg4M}!p^#CC?0dn zg#P6KKScqk{*H+ZLo~iu-Hi?0Aih2C+ppVZfV1v$lPw+~0Lm7w4ghn2Mt5KU*}$!V zLI~*xWO{%MFu9R?ZTkOkE8oKlXlsnB6Q8b*xI8Lo+2@=T0{hC{WRNZfmGNzbQFU^c zktA~eWK5)MRR2B>9VIhR2!8zliJ=ISK zegp6vvc;#X@baPw)5yV)N9w#*r`i4hdpFzIB`;z(Z~Ts4gmNh;loD(hzd~KnN&q$m zK;b<<#P`3HpCj9CWecOJ&$t(U@O_>$<7iV|mcypfE=s3nZ7g2yBWGq2XDK*gV}#4Yan9E9}{-W%3H4zZhV_ z!R*;r03QsYC$U+5EahH6S`q;{%z!80D;T{eV1Ivc{`ra7Xne1@RTy|7DZZE9$2b4% z5frK1$AXJhR+jb;d3l za9B=r40#aVJoR3lJ$3mJo_*MI`TsV)Y-H0O1n`pOUNq_xFP(k!e~?xA^9YU7rItK5%dsZtst+%zpU<+@N$Fn>~^TZ4Jep2Xt*IjXK4X zI|IvxY?IlLx06}-l4z^gCd8_#7!IObB*Ns&r1G7*{--~ORJn?ps$8?`;+sO~H=ZtO z2Api0pFekG$B>= zAmovqHg&(3W;}-Lj&F=S7ZbVL=J7u7?yzI=XGaBf;+A-ym!Irnr6?iTQbUBAUu7QRPN-VkX@Jn--mW>vu!l`@{w)#f#3wG zCG1!xnn&c$(GUuiKv^gLclE0|*Sy0szr8rahH_`2Fsi@$N`8&(eq8e5p&#aS=u(Tz zPk0J5uN^VjZU&5M^Qp%L+?GnH=*Up3)S)CVO6W)vKu}>}_M#Fn4b*6ALxIDppecJyFfn--SmOPQ8d24EjVi8CfMlvVg7rFHx{;`68gZXkqxD; z2_%b?J_VreGFbpaI5Kpc&^H%A*E%V-6Y1T3h6w0z&xjkKYCP=I2%V_VVu;|Ofw5oI zXf+S|=^7Puq8<;atR-agLFXkTg%Z^Jg_b(l4y%D77xd{UT^#yc8eAIK=|%O>;Mid> zkn(t1JfsMO4jM)SWN?v!>-T9|xuFsrVA#n@QTjP|*8p_EAwy`l%@lP@3r&62B!KG> zO*uzi@S*+wv;Pef{ABUA^#o<#1)6=_r#_itSVK9pNXbo*6MY05D|OGZEN8CV_e*%o zYd?`y;O(TqGQ*Me&gD{!h=@2!DTo4Lvgmo#W$yxSr|51kS7M1W~L${hEha?2=QtucTO0P zT#-|nvxW)vcrz7QvtSL|pkKqr>Ggr1dp(2tRZ7eU{vaGwi9@iekRQ1iE}sK^!MU4K z@OFHT%NPQF3h^I>xW-{8gCsb;eg~{rD#HJWs0^isaQRe}-omQFOdvQyCjtHdef!^q zf%m$$vvJH(%$!HhgZ>3`9*Ci0b9k9b3sJ-S#SH+nmxCFRhNwAMF(Q_VeZ^-tNyWGk zX&8-SJZlq^CC?LL@(kf)CX$?RvKglP z4b0*Lb0$T3=tv-*g3Zwl5OVq~E%zBdGYl^OHi_rEYSLU$R5)GO%kfo#4q+@@k!v*M zP4x6N>qjGj^#?q`ZQV)eSV=~0i9<(2llxp@wAd080m;@6E=};b*L!3oFVZ>c&(FNO zLkZNnNH!COSxY%|@8631={;;$VdPZ*J~tRym$pRiwH`dS1^Sxb-DD=`fS!nc5J@36 z+j^oPCqr7@Hvum$bCaQcU*_5SNe)#DQO4EE#hWKwY@lONChUPs562X!gt@KE%DEGe zI)grufAz^%07}b9sFi6g$yIs_7*JX4R_M(mn0X+;m^TR?Vv!aBfF*-Rzx@p=WM_d+ zgILrjEf-RZll&VIlPmD0tlpl0oy62ER_^o`Iyjy;NTW9Q>0AqX^x22SY z8jSd9VfIIb&CB|U=k{+;vSUWiND^8=SMt`G!ZVVSu^SoLuz5)&5_Wt$*YkwQ@uAob zRCf}Pk-iK-RMKCw0T_9p8xWKmDS)3eO6`of{p~By5~Ea5qLekBfg@=4sPTnAYPbbv{BPTO``o_5 zpq+OlO4yLU@;pJAi9$9(?NZ5^>shk>B&cEP0JjOqPKjhg+!E2(Q{T=VhpQ+&vjHx7 zLtFr8V=kUjwGRNyQ>(po=E%s##aC0c+6U}dAv_uKZ8rMGgzZ;TucQ7X-b#=eK>?e* z&fRxW%aWs=ZC@uZwnI%@P%RVYA4woAG7fUVSBKiry5CQGKnG0at^D*ue*U-PXAZK` zd=3!mH4nJylS2k!EY4QY3Bcq*ayU=M(5Us!1_lB3;BRn)teL>if^_lHgx$1Nv}F(pgKPu?U`V%OER2W^#pak>YQiQ3$PNiH zA79#p6=OG@MbGuhP;91x%!KB9_<~fPJ0fPXn8@V0d#FBvb!`;}5-C;{iomGapNnA| zT!&m23NOHy6mlJQQ@9`Dj>$X^7-^Ukn z=87BJ(SF6Y1X3*%#0(~Y5=TWSh&&IjeBsE>73%G3G>Mz*8HKi+yv1ilhz3yug+E(7 z3r*_iyMtY@s_hy?gIDAA-BcJtg*TK9EJhQ!I@~C9Jp?Hk&Bsy^PB(~eo8cxQxFE`mEJ=F&v(OMZqn;V3Iyc(MeA!t{24`_1eA*ho)GZb zZ3)uvK1SsKOMd>l3wGnyv_|oGHhq2dLc_t8%eX9>k`ynt>scuYn2~iwC0y%8bq zhMD(kq3gGBiQPg{3=ud4w&Ao0ymiA^OVBwsI);D(0gN1A<{l<7Kalmajh!f*1O8B` zpG?u;U_}J}SWcDrh*7^WSv)hZ<`FZm3J*3YWH2Q$ zA6ylJ#`n3`IAsR#LqFu_Znz5cXg{+pPHh1X6gYP`txM5m(GAXJFqD%QgW}m`u)4zY z2;f{bQ4`S$ZYzPjWT+}ybaWpB%2*YH9fJkcSM)nYeS#H{Hsc?k6F}KL2Nl(+_A=MI z+t4^F0BV9m%HUsOw5Tsj7cXeMHK)IeV>lhZYZ|ni0#G0)j2?qfOs=TVxsI|eKtr}~ z5ufo1if zBsUB>J+dKrUVJiD>Yv$B&&zQ)$1xaSv3o&CLp-yh*c8y~86RROI;KSoSUjHuS$ilp z(VR&TO`Ih-{0t}U0)=Ga$77Zw$AD7@V`dmUgUMtjY7KM?RD@~+x|2$>lH7{krp!DTYb^RjeU))Hp;^lH} zIwEGV@Gma%O^W{g;(&6D}N1XxtnnV$!M`%UD@^w_E-2s9ZU09vkdlm__!SJ+$Fa$;$FaJ+sBF zJ={n2rCqu7+NPe;09s$|Z-oJ4{%Ru5m|Ac?=~~z}A1pg~Wy!G)oYB0@Bavs0Hidj{ z@*l?cpOMyY5fQr$gVSHKDe_Y4R0!6J|6@C)+Ndwo&28#w?BltnrC;T9MI05GM?!kl zrbwU`OB<$15jzB6sgV*d3TA7w7%4s$)n6V_1!*W+;aplg8oS^Cby4QsTqZ2iN(Ukn zJElQ^2>jXh0c{VWVJhf85|r7s<^JHw>L2!`?LEj#-ch!ydZ$pbYexGhv*Ht{??0g! zr6djDcUn-%%qCahUB!3H2+X>r9Kq5^Kj-dV@8_j&+UYjG*z!^aBkkKYa|4pMNtFxM@uHVzc6-lpYDfv@9_B-@m6rh#Viyb{*8MbXyf?_R@;28VCP6p)Cg4+QH z5)8f#(+6Ezbx+$wBA8$nf9F|+iv`js{S>?w8wbp?auAVLp8)6FE!|sU63IX}1As!? zklx)Y8kQ(?rodDtR}uk)vmHQC1N$zOxdCeW}T`%|a1rtU^ z`3p!D8nfhwXC>!o8;a5-XVa2@4H0f)_VAcJrd7s~rkY5C|B{(qTS$5K>Oi3>1M8+{ zX;}GqELlS&;X&epglAr)2}v9*xx2^9dWIcM6gXq{rcZ93JDTjY%lhD_X6wt(?e4iR zvStwB2eYVqB-d0_q)Kp~QTn9N!L(>F@QF`;)=gZzmen(sr@q_sI9@wjnI;6)Av!8P z#5)U5C0OsiJb=q={kn=cy1;sJcTYEn5&bZ@$%}XP2fYr_Q6sY=wsjRzHNg!c0C02F%wD^gDFU#`d{z?WluNEg_i!K`D=#b>8^ zi!QbbPG*2$OCD4s(Mg7%$^le2R4mnO^b3{epil<+GNyq&$S*?4NkBT9%00TCsN>Iv zM>)_1KhQmM`%g7sUS0jW8SF^-)vcG!ZUY@|u#BuC?p~gzA|E(Vbg0#2m4mUz`o|e24DRj6#$MbWaFk2~A#JbJkYo@2kH(DR zuWbA?h32lat~HD{^}305gx7$H0Scqvez`>MnL3EMiA5LMv9P^ts)AYfDg6z^#fY9NXZ7NC#S@dy*NC185me;s_*fI?FPUoqV&;9^#j zl9lobrZFJOV+W9Q@h9 zO0yR%-L5aXKH>drqS%Sc4@7P5LNuV4tb!1Ur45PqV>hQBm!EK53fSV~uHZ%4{N0&M zHEuP)=#RrlMvXe2`oMLMIv0W^>Lk1ia<^XDF>btd`_Jg0zz_aYu;7px7Jp6X5)VZt zZFeK~6uS1gSBJV(9lS9gs$MBK^w}&@Og=kfdy`1{iuO^?Z^ah0wJWidqs@XCQAZTW2L?ql=F-8=k@}~nwfkUsRQF-h=R!_* zJLu_YOW(C8x-2c7aML{Y5c+oQcRr2=nnks_6b0<^CARI<=jo(|am={ng zIu=EC^NP*}Q#LKM^yS^3Uk87mrGAdr)Q6mc38+DDF37CcPQl+Ex z2v&%CWWi5v{*=;D!ok0VGBb<)R7{Z8G9S<4bG=+=A6+LZ20;+LrYow_RXn?%%&oVT z24b5PUIYmTQRBuaHsk8WgRs=3GGQAQ%$gR1Tq_WGK)mCP87RZTSxl-B;j9UR5VZuA zlpQt{<5h?dHzHmAG*={Lws{@!Mnq%;SQJ-BWi?-H_#2@;aFsK*jSdzS7gJ+aA`OjSn9ydwBf)U zkcxn42)Q>bD@%#*MW|W$YibHY^hJc;Lhpp8WZx{YK~lnDF;tcu#ZHK~*rCwdTZo8k zxTadoO(|k^N&jkj+B(TC+2ipT)Shc2?N< zaN*RvJNY&?Jk5}e&A8^?At@89QO;z{E`5&gWrSs=_Zm>sp+Kd=ijWWK8RoxiV;@7D zu}KurJVh9`@wf`Le5}75S=Jok>U0I@OxX1vieDp+y$-R#+F*s2)Ab=9^-0H|Tnq}D ztQq6C7z(3!upCKaD9E7T0~iLqJ!2!M$N4a7y9jdhK!@wK7++)+AFPj86N!=9c z8LD@l95Bc}oKt0tnmkWHW-|sF-GnhrA7M{$yc92<>g>EcrLvWPFL02g$0kdJ=I4n8 z*9cI*TKFNpzT8RNLKF9|A`_FikK4;YfqJ~X4L%HXL-j~Cu6n_J`PkS54v%V6KVJBk zIVS2Var4m3D&tZ=Z&g-W9UNZra6}5{cdvb=EFkZ;^}*!?iK0n3qDI#^IR zb{CM$5os_10+`Gwu=CHsTqk4z@NEEVLqb7?$u43l%EMK~{ts?00)F@E)UtUgmG0zp@SD{X)mKMdSO+0y4!J+4%8?^+Dmn z{$99dkggrvHV*7v@A1S1D|Nc7KeV;>cxM3D*SmM`gv}T#6Mx=DxL7v@E>t!Nhpg-F zgDx#j52im-Ph>5;?r`+$~_(xpx7~guy}=P)rv)tEtbQD*>Za>b+R+c9as?(9%SoA zx0D2@4Nb{g&!@8+&(9P8oE#A$e)Mz?(?-$1wMimQAQHoZn6Gfo6{Wbqu-UVfbEZ^g zsmB|CKc3n!`up*$)W&sY5{zR~x3qG;;e;8C138AAd2ATt9(X-^F*tT&9Sjb?i}KV` zutuMw4`9s8A#rRt2-{njbqyI;V^y{(D;2awV!pg+ymi)ed?2ii#&7SMhiU~X%89c< z8BqGE)v7WHScb~Y(1MOLw-`j;N^33O1zv`>+~1W0dV=M%bb9<1_|s81s?DNP@B3e& z!yqi8RyLnKnZ0Wb937U*gGGBeB=}37eMnaoesUL0v~FWT6`t|`@b)HPOPFtkg+$qGSn2h1xm458 z9J&I8Hc*iJsl6!s1GnK?gsqV{##c^P@>p_uT=8~lwRV1 zs>(U^bj+q52V}Yl#tu3Siat?K#OFOEvmLfu@0LdLyLC?Mp~RzJ`U-!(d44%7+>gSeQSw)feDaV`*Y7Z@L>RPjw_AF|FTYK;wg+Fgh#F^GM9ZS4z3b!@L!zuc)*wg=Tka8j@L`s{Y& zk_->FOWae>xA06_E0lQbi*k3qAH7 zn}R)+KJ}vB!R1&T3DST6>H1V>NyxwO&2D=#mSZvS17%qG6cK=DQ91ABqL)x0@eim6 zdLR6r6HX1jV->6(es_{-SG;`8kWzeSzY_TwVRDa`f4w;;DBO{4!A_LAWx6=B&7H>0o!*AAbbRAd8s|W)P{ynQHP^7PX zecWW98NJN>5kfYUR~eX@8)Oz&6!V<;W^DTygPd0N1?aN9NG zwnyqjE_5xoVA0QPj?ZxlH}~+WvZ}dxqSzYOiL@9kIDbcVTf>IQy8;8hi-uzog4Y)QE8Vs`z zITBdRh(Fjf+cVq)R^tE{IKgvQZsWgZg3mM`CI{d>4yEcmSOVRK@m>iY5#Rn%iMv z4Cn14_kpxm^}!tX;0cu-*M^4OzS?Iy&dXb67pr8B2*Z0)R)VYP%zeR?`mM-w7FXRG?bMc+iKQFq6?suKHczG_x=~ar;tG_rGQ#e;rm=@BlD+_ax$4cL=EYQi2 zKX#)(ape7`oUo`Ko@a_G$Awo~j?4{3%>cd?)7 zxLHiGALCQ@>KF`}rm`O!yLsMxHlF{=AbqXq{LL1VJ@jD05#h~eV`lCr^cU?jUNorQ zW4*Q|d)zT|IX-yNi3JITW>5XNdCELxK{uh$-1$~gHmOp2ZXSR;ma?oT{iMpg zb7qx(iNz(A6Y5pa!Wze%fG*24L=A@MxN^kT5J=-G?Fka9|Jr}+>R7nk=$tBA&I#Uj z>Bcz)_?9~06I`jwj0xcE0{6jAL2KD{gpB@8PSc`i)Nf2TTZOM1vC| z+&|_`!P7sd9LN1;41b^1zp8U}pYHA{$0@%ltAF;n92d+L?&W&EtIyuezopE7GM*Q# zPc$YGa`3opZ}0i3Y!O4C_1AdgmZPyn?L2n(WBa1B33y|7ddRgz!n~2hRkK_PbW9=7 zi^a$kpw8*e^pQz~g^75HCtPJf!-Ie1unqBU?-%~V_yQf@SYJ_vy13SWK2Tg(TT-Z* zE-aiz+}MD$fp|{#1|=f4PT<1E18cR)_!aEQO2ZXSFe}#}evmL>H-y>oB;riZG=y1T zmImfa{1shVPy;CJN9WGvB7_Yipl`^Vk(#$TFO*T<1GHn^frByN8x3X%n8BC{)nz1| zvF=m*{%d{^zSiM6IU1&a*CQK7r{ksUdWmyk>m*|o^F2XXJa2WMJc>pmK~wVPboHKe z;%}T#HrKO&zT-?^JW0Z+mEUW+_2!l0i>XEXxLWs5rAZ4m$s%{X&^>A$vFag8?zj77mwG|spv&N%AxkvOv=_~YpfV(GqkUG-ae>IpczVfFC$YJ>_#9n6ZQEagMh>BRc{WXM!@rit(Nd2?3=h zu*Jp<^@4dC&5&_|G0n5Rq7Ui6FI_Kb{*51-*My&=P|wh#XRHAwI~f0T#(JZA;UGex zeSin@?5{${>3slwm5@|Bq7-Zd`ic#NyN#Re0_>~kyD!Ob*J~SxOvc&B{x=1KYM{On zzC#xl9YFB45>j9O41I?Lr)Wt;FgMx0iWV*JMpWPnYQsb@VbrIM=QZLN)6fNQr#S1y zcoILRC7I456rjZjN{1y|p;>tg)`5qZ9`9hJ|09z>(b6uNxdr3>9}v!RpJ?jVrJmq@ zCOYisr@tVTlW=3mG#BsRM$uQ%2|g+v`Z&EQfRViSPjn8u3QrLi`50ioptHSHH|dgW zZvvZv^+nN7QP}(@_^$AL-y)w#k=zu6XSEm68)=@cC_L-_l;zb}RZ9Y%)i~x8i7v3w z-pY;?6*yPu)4WV|;F0>YXi)(tUWbk8q_b4*hePoshNF)hb&D?6A^YU%7Xxb3%iEfS zC$RG*GxSCYEj7Oj-=tDA*)C;01_?j_{?03B-0`nCYoPujoA3=@i~gGhyvsTo0sYRv zH3)TgoPEW3sg9QQi)``Z|L8>8SFdM+yJhfn+*XfYu6lHdV8*CixqCbL~v3Ev<@ zVaGSn#@c5ecbwiuM_v-u-9G3zy~oU2#|!AdX^eaVU5sal^4R(C`$H@GqSbbGzVMUu zwzD(WgUjrY5N8t-E7~kcaG7>|8p_}zQ%aKrkx4R|S?npn-TLNwWp`j!kFsf|JCN9| z4?RcO4P|nXv)m|d6mJKMRwARyU^w6>*Kg?o>)GUv+@8c_Wfix2B-S@J9_$h=IDfme zQSv+fV}M@A4#rNB6qnh-qzjH%y~7P_S!8=&b!Jgbs|TQL?@RwP5qo41}zMtpFaAO+ew`Q>)!JZon|3otybYw0syhm;u zh`q^|Po5w2Zj8J8yL){p%XAeSc{C%ruP3*ru-s@C*JsXMR6gcyWw}{w&y0E1uruk! zjf|a;ZC+$Gkon6ZnQ8Abk8>+#&aFTm_V>ST(|IFE`@VHYzn=^Kzp{_x^MS){H^0pM za@+TASu%_KpL>58J9oY7XHDao30^Ssvh&-?%WIwAYMAh77V`E#ThXoeWF%=M`)RZG?K?WI za-+imaD8Wa)+}Qhp(jtO^otAH$FpYI{HO7mkjFUC3Oz?^y+U|n z-;!*84SN>*c`E%k0oiHVUu|c+PBqJv|0x;UdT`s-DSWJU)1WXoe+e}%&Ay;Td%1g|z4V`u2-?xDj=?0OBa$iXs% zH_4JAW2`YbTD@e-_mO^v=!fHj0JpYU2R(F-@Mj=%Pw%mYo}*rf(7LYyty{hDH~eoI z*sogV?6H2`a&XMz(j)xq42jiX){RtiLqGju37Nmgo(!A1HeNalyvzW_jS#w_PYEcn z4R&_fAxQg=lDYui=^0b_fmJ04vtKv+%yyqP5 zKeey+C=6z!I1yHvMLjtp5GY5MYEBqOLZ^*JL zy}ykIUa!ar<{sVXCnMq~EE@GP_-xN8dGR@C;&Z@wd!SL>(AbuQ(8Kz*Yb72s$4Tz! z$nWS$O!?4@p?&}9dJnyzEn|1h$Nu}_mTq{id-W_9Q1U8zI!`d&JyoY&p~pqHR^jrwgrN#u6u~Gi$*K^ zF$XYvC${V^Yp?pgHVTAfpw3da&KvtZ3s*e)6C zcyke2VVd_iVO#mv4~{<%FYVY=I{M4^ew$#!%{L*lbTms-QaU1WZ%eRNTQ`{T{kYalV|ku0_|RzJVkGXh z)-VSeH1o)Tc5xNg)-EzzhPd&-wJ$he)>FQvG4sgHkvI)Jh=2S; zyTMl!2P@9TEScVH9MXUPbdSQ(Q5nZR#jl=bAw^{;c$=tAnyv@4$)lM~rL(+EB~907 z9`roD`rx+*H!g2F_E|aiugSeb(#I(*3+dynnu(^2(uW}qd}?2IT^(By0oZ#Ru&h}9eH}PdxCBk9Md8HTg&|b-+2mha z!iiQ=2MWtrDy70*Zw-a1BGlR2SK#)5PX*+i37lOQ>!n=EgtB@XOiDhtp|1<$W;k*z z!BayOQt!Ycf&~T`6T$OGvy6MJkaQEDja3`5g%@)2k8j=qbql=e<96w%2CnTLiVlSI z4BP?awmO78DyTz&Wq&y6U_on3VM1*HA}2iqLeoM^tuhv>2_jb?LPV$9+8$`385J6E zehgq5o2X1NC>1;(QB@*hfyS5(5@n)_hME>>kH2oe87x3;H$VNc|DtDkNcqJGXj`E) z!OS}+-lHCh0=9LPdgq3mid+ra)*CC|9CY4(Fl6}XL>nSy1r=@_6Cm3P<~Td zomB8bTK%3wd3W+{`|r!c|4t9C;}9qhL`qR7>fx5gHzgsPcmRLERz*mN{-Rsc*&=oTKf99|~F5zY`Z#awN=X&DMat861p2XHc z_t+px$KksY#(VGFq^u8%U13P>#cmhgaMxiCoevc! zDcl8}*$2D2x*ZPZ9(~b%*}24_V3DVplu+~AR8en*IblD}GX)i!pHfkDwz&_Tg%4gH zX}kb$MnmQK^M`xFdeHjfBUnp5M^Yqss!#pJX>;tZ)eCkm_$G1R0>*-h1#8}g)n62< zbq*~Ky{yguLj52K+IRU0;otn61M?k2V5PC^NPJ^nPFV0lhrK}|u?8_F39!HzVLe7+ zU&s!qDCzl19r12Ik{K5H0U2(MwhZ6Q(w&GCU7?&5r zqo-Egd2mQSt|*UUF^!h#4}^vgxUssW){M$rk8RY{FoqX7vE|HIc@X>hIHS`?>hFkI z`fji5Et8}LI#Xp&Tg`Re-3#@&l=NNk7vy2JXS)pBgHF?@Z%H~YUHDHs+p1SXa`)!m zce|FfJAd`-12Jd!uRgm!j?d74|I5GntcveD%4ltB3g@u6)sE-OT5{7fR^nqaOIS<% zvgemScGx{jd1f-@ZhGO$rAs&L??2ozr@@(9#KV!pLU}va_;em~mbsDlFF5qb<0t#k z$6x(mP4;S=^NSY)+aEUZzKVD>|vzu1nsK{P!FkxiN0s z>5Iant8IK#b%sSdzAga%1Z~+Sm-`XS z&f2CQGIvS9y*VF@iECdy&y=??Ejf`Mz`vV~>Y;B7)3rn2q$RB_3Uj8c`G*bCe}`S3 z8aqS%Lg&Z+%cg8L?xJrC%E97t%uH(J=U(rP_%$faSGct+L{KOv_r!f0Bg(^M&k5Rw zzC+waaCTZ$Q^$R0%i${Vok*04G9~rc1zKxqx7RoDGZkUyV;7Ji*LfUz?dc$Hv&Jbu8pD*jCM#62VQ;?l%B zaoyiI$Q*NtH_cFOpPQz3p`xdi6K6XCfF z7n>#Ja2E)Bt~i<)beg`Y|0{5X^(_Xbc<^hRyxz;h2K0Mp94mwFR?;ndUk2HPnOivO zv|{ZyIOb+ouMrFkqK(bi`X@j-Rl~{YF*PkZdRLFFyJr3oiU&UrY2P<)2MwUUjCcNH z|D^|B<`>nxB$+Fo=ILGdtR~Rs4PUv~<)I!-WbGpr4C&ZFHG6QQDVcGjo*-MW>)B$C z$TRPALTR63^{I4KHUE?vRb%6jz4KVD)G%7q=gDB|JDT`ASU(IfyQ5N!s1v-|Rd&2> z6lXsEtD+z8nX$bkf%i7QV6k)b{5P8XCdBad4lut_@W@zIhoZ zJK5iU0OOE)`n;b`Y3`RH41ckExL3g7rJNwMEsJliIWsIVHq`s__g_jM-hTVm@2+-y zc3Q$=v)8TLUtVC(cUN|0NUJg!kWv_xoMN%+_{@RPe=z8f+Ign z8Kzu~1z*iYGIStzltAPwa^Xyv!>{^-*kZ&w4wa+Q&shY<~bJVr&6by}~cctl9?X-lv=8Xi)xtqL3B#liK z%GXW`ZLa<`OLuzze)_I$`#;Q`M=f_E0XvqJ*&X{%$I`xx(nq4>`{}9R+CBFb==gt= zZf~+Ws`Qw+%(>OuHw;Fk$aRpC^-#J`d^rBT)LlSM3H=m*zRX&|gd{Iw8_hC=Jnb(s zT`&e`89^pkxvJ~Nv4-~8D>n=^NG-u|`m0~!A&Lm6hIWxQiOYjsFrBW?kD^!JDbg?>nm>NT9D4u`Im=sCH%hRxdsxElK)j|vL!wB za$oX1eUBx|uXYJe?f94Q#@_)-m-$WRZIkBaEI;$|O)`9MFuNmHx|Mym1JnV)u61{B z(JMefTXIRW(3~O=oB_8afM*WkIfIfy8KpqS&(dmi?mSQS78=>H3VA9RM*PQ8^P)IFfnX|%~uIphRwk(|d zqz^7IPp9|N3}glXfi2tB1)e`j2pT_5Ioo=1pKCokHy}-2O z&4Z$k`{2y7>;Z3>0OSC6u@s8rO=s#683B~+l>kN|5DmB&phk!S)FnAAU@#!X{(O6= zB_x1kh0<=?;}zr;Xj-nG27!7`4AlAXA$~r~g&;hL?GKDVs%fHO6{r=!dtpsBXt_b6 z0=hm>|Q0rf3_e4+s>Ge*-A znFlnmSgQN?@tKK{)2w6o4i41ft`=Y6`~#{pLi=K2?jlU->0mC7v&^z>FdTDWMu{bg z?+z+SEX;dq5N%%g5i{L_ps2M#0w%IK!bm@9oTFo3XGg4!jYT0==4?Xfl?;>fg36_i zsY3O9QxtZ!FaeFC`#eT)1a!|Ffq^3+5l$fV0kx(C=r|sCY~jngQx|ykW-4A+D$2@nc~>(~~a9a(-y& z7}MNA`A}U6^`VyhGeS7U&?}~)SRg3$!}xqy&l>6%KCOR)y!1mol#66oru_Lys2#v^ zY?!FJ3gAWE$K3#h|b7 zUS<->A1Z94{>S;%zf8D7n>0ti4%@E_R6g$l7Ix4Sg*oe+TCiZt;D+kX$eZRB^Jb$$ z7WT39vVb=OlBsslb{dDXZF@jG%hBrO{0C(`?go)(yWl~8+F=*J{tzXz|0`;7XmIF; z`7c0~$OC;L$=BDNSOB#GOd3zW_yuFCK!20|h)*xAa8iump4 z(hZ7nk4hNq`)wJ3HLUT>}!ljS%t&^XZ83g!VU#~Z6z$bR!Y7Gqj~9ZL+(%^tHe zluY%ly8Rxd=`-9vH5Ou+gY6>+5*B99gGd?{(`+=+Y{c|jPBHcJT5fLYb;{Y?q#4{D(7svl51+k?eUIXXFW3Au{qBG^)WfabAbW zljq4|94G4aDa?s_G5fpvkQas5L!uj#ti?!t9>U|z-GLtUOJ zRe`I6_7tCUBK}}5kC$i%*}!zWxm+P^iA9K-8D3;M%1Be9kin-~H5(fX$Caw8&D*M~ zTw_;*mYeG`>gvj~m}t1bGROj*0Y9ayL(|pK)iDF%pjnnUzNeCtnN;At-g0Liv?1pm~&|Iu0bMS6S!=N06L^q9s{>gpgrT+ruM`E9Yr`4W;) ztg1=y{6mmx5_7gL@zmpO`UkfW)8ow^$Dww{E98838O^ZgbK`EEOL?7w5)9|EzaVe$ zREbSPo_d*JC5N8jl%&6M{K!8dmODK0$JMJzS89 zF&H8{24ib&9fMgIvrs4@=&`V7sbBCJYHH9GM)%{M1&qfh3L~y4-=-t|_mvw)*I`>m zdj7}ujqj)<3mo?QndrV$n7>o(@Dbf5K`EMGics4AM|O@&LYHbPS=J2Rs@-n+eWT)v&JX)%RPoh!IExu7a(eLiuMq0Tx*hXHt5V6#0tv2dwow7vw32%6uR_3|A!-llO6pr@?cRA;X$9VB3sQUB;+- zFEBTqB>R}C#4!ZXD1$JU>or}JGzT6+ElTiOun$}rx9ef!%U(HhKh7aE;$Cg{vX1eYjMipem4&0Ro!3nr_0%L;_wcOt;hZ7N$GvdWsVX3x%t3 ze$J`ki?HsBFqQog(-Lusn1&8$3259HVgF-lWt(O(!aZ zpTp26aU@7IEUS);BvQ*`^ojmcRQ|kL3Ii;W6p81(9XUQ z{nQhXJb-qp&;WK6+QAQ(Z8{6u2`c18f<@SKD5Yp5+lUDX6VOvc=?Qe;Tje3?6QW7G zUSC!jMlVKqPPZ40)t>diz^&VhLn)JASitOb@lpD-w8mUx$ZMl&0^g z5nubg_X9rI)nMaDzv3^UHX`&Zs^;!<&3ixaFN=U8vLUz`u{$d}8xc{sL5cE4pLO8Y z)l!-3BML*@NnB0DDV6yT5x@J_Y92Om|M#t1H@UsU86OIQ-=B2#ot%?10Y$6R zjWOfb5>z0&=!rfoy}vH#zNzuCh;M0 z9;E!ft2+>Pf(H`@bxRV`zK@(=k|?0B+xM~mMhd?4%`N;c|7T{h^1+w!xm^#LiRZ4f zoWE}7C7-MKZsuHLs~<34M>CPKpp~GZa5}`AH@nFaM02aUb!A2@fFOXJ0wi-t)fE33 zcpyeS#(zI^4vav~LzPr})tYP~==U23GLuZ96miYioLP}SQdLO4>`)qCd*hvheODo9 z&#_C$bq*Dm3rl^WLa6rkyF%-VazGGjU~f0mzRLk?2im&Iebsj7t5?ld+g*5>mOm8M z9}46+DiGw6pUKPrzI|Er!tN0ky{ku?=aC+yvTNyT(OMS0r_GvUp4XOO6B-#vv@s?~ zeJVNRyq*{*<2v;+yQ7ph(zoJH}6da(=V~Vq!My#PCg(tLr#U}j2_Sf{9 ze5kzZFR6h2U7D``>5YzR$7a6+W&&eSpTUEj6AR6qd=7Q>XCZGOK07{pBk%KB_~UAE zMu6A=*iaDCzAs!4l4z*&i68tIJW(jXdwh0i1Sd1m{4;_12wQyHImAZq2y#d3XN1C3g?Z0 zU#$`JU*Nw%p&~fV@{B<3YQ|&;({*Z~Y{wX6x%v#DXA(sx3-;~QQYhxBQKKnq`@rIt zU6C~_^fe1zO2UUfmTP9^Pr#1d)GGK6d5IM=E#C_^0>^9LwuhhY_>f=tJAU=A3ar1k zT?4IE7pX_}RWX`&Q7oIo5u~*oD1c%Cl(cF|XUZtPi;tr%b~npz7Ov~2!}RS=+?x%} zsx6ocIrE4|<5wPAxJy*;@(qhG^7{^VhIlRWC|MCv0oCT>1me-mmGMWnJdNVJoY_J< z+WbPvy_`&&qq8w8=1$p^XxQZ&D0z8HTFf~o62uIYO-;>Rwb%2Pyn+ys6y6-@<~bT%9P8n_(R?Y25M8^HZsd{nR;F2hm1ci>v2ZS~=n? zdYUxu0_LCuiZ*{VM5xL&1j-=JH4$>ON4|Fw+qbM9zXL()|&9ER7V9#NdFSnk_jpD_h&*eqI zB8AI?@l*}twsuvfMn>ztJxUmpd7hJ=o}g$jH$8QV={4!)#oWP!B@n!@?DDa*weS{_ zQc49$k(-Xyrc5JJr|JE=G@Y*2uGRuw2(@yN{yXFTPLG26?bV0=t3sg=7%j*XX|PcF zXIHDX&Z7X9DA4Vo1_@ljVE%>7Arh5tBcjHJ>l#i&h3Y4mVeymC1tHUmZXI7yrG*7^ z5j!jvtDQMTv%_Lk^Tn8H-hAx|xVV{OjNA^QyIGp9maLf;uOJFCr$Q6?w% zIZ)Q+R#HGK#i-f`Lt_TAAfCmD!ZD(u#0%m{s!-{{N(T$Ao%r>&xZzkT9mT zB7{}HI5;#Ete8HH&;QnYCzl3G3?+@K^!UNK{C_OjK+z0R0rothbLV=7;8hhwH;q6&s>!U={h|n8$ z%ay_WLR?%&5HarAXt4AdBdC$&&o}E{vgHWmY0JM=R9{V6W*9Iac~rgdrbJeEH;po5 zfnQAc>?~q()f*Y+vJ;GSz|c9~|Hn2IPH2q6oc=us+;6KlKY97h!|Qbk8M9MPC;v#T zU=1B_y1u9M&hk*&w%O^G3%mSpv`qZji?r{$$wdk;&{>&cjjzImj?th7BmndKX4$hu zon?pr!CX{naU+;>m8i4m)K#{Z(d6QWjciXMrr`<;qnH22kSa#=Lg zqMx^=$JK&;Ke^TGI-D!b*?mA@=UOlgXgcxnj;iEVL51=`Zfm@t$pD|<>bICi!{sif zY2?k3Bcc=;%(pyQrOb z9YG}fCHJ2LMimH<+Rtp+b1!`Yz)~l;%6s(_O`ivpMmD+lHw4x9M^?0%N_1}lLdcUT zf-gbH6Q}aa35y^a1*aC#b3A&3Nt?;Vg?YLN?R{Y?3~_xPO0iae{uPYfFf;EX8_f#) zvTnm!V!#;0!Wz6233C$(i&lH5+t~_*i4I^4l?&?WfCY;fN?OYn)RuJasR81&u@7}4N;;Z>Wb^W%_4(Th*(@l@LXB@ z@GN(+HB#A}1;BW%h2&KX;4PqQ7TGmlAA>>#s8)dVkbTpO#_D-c47Kg0%h&}djdINg zOYa0C&bpBaa20zs6N?KCVX*RR&V6d%e>?vEzusZ*a(}vIpjmYAo2sjhWj>u-CG#)G z_^og>f3rJb_59PH`^Gluo+hCttqh?^lL;xAHe-zHCs|SR3FNovLxhh6Wa|~@@;%AZNljt0-?C&J_^>IJ@fPD15@imHV4VSgp(Y4IU7< zs<%*hBP(iehx}T~p1=~YnwxSuv?=J}G( z$8GP7fr#$Ia(>`1IOJOn=Cq z=5i!pdU(TS;V;&z#yDO9H{Bz;Ft(2z*~i6 zT&V{TQV1}~VB-V-#An&_Npcv{z9-=L2JLJ1ssApP!s%DvNQ&$~_`Kwx{8!N*R%NXX^#B%J#7AFF-#fIw3RTkX z7i{dw>2R>E*m5=FyB+tzckqj6`RJ~}N&^0Ov)?TwgMThDTIVa^QkSLuxUO?*rD58JniYe; zn@_nt+HVc1iZK_z!9T4HB{kPL8nZ&bVdn^nbC&tLd3$U^%gl}5nJ;%({{MOZ{Xc2# zx;mUU+cSKTEz_1Au{~*#V`_N4Ei4}MZ5`9L+uAa5lgKhMgmDQRe``yYe#@g^JK2qm^Bl_}lFgbPu z<#w=QtnGp;9P@pV_T6Q-mBb25S$g7Q`}(^3ri4~B0Mr(u#%nbkEbs~82*5jY+C>II z7xkTzrvOkowhH>4=GS#+|B66dt(w+vKJjDJ_uq2seq3jlx;V1V#qa!&QAmt9@~lIA z5SaVEJM+6gmyo`Y?~poJ)_CTRnp&!ZgUmZ+dUMnp=b2{KbVAfbU(@2iC6P37vh9ex z41zJHX`eYw{bAsBI;kwy$}y=bYfY7@-q99?Jg6*5;@RhHfEJdGmR22eKC+?zMfMYC z%^F+Ny>9QeR83bU%_jb*{O|vC)SYD51$5$a^osnU7|-$mo>yF6h)?`!=sGc8_#kS3 zHx{Fsf5cRVBtKfb_Bc=~6YKKN2sWoxuY~9*#mQ;@pk$Kh=bWBe5IfhANkpNJ(N zIty@_5ZFiB_a|pRU0c-l-N7H*SCECY+~5BojGL^?q^e03bwD*LeFFhsh-%R&9y{B! zJqTT!Zkzqn&%U&YmS45EG(Tb6Z5fV+lNn(toEWvZcpaGjGAck~;863>frhKSodc0% z2YN#SE5vIxZI1)X#@)+Pe99}pl}aIxw-<1{)%6G2m8&clUOGX^uVRJr&RC2qPOtM# zj|jc~7UEeEfJH~hdKa{KJ+5zxW>56IKaoT6n@uT_=dUG$w;ufL4u|$L45_A2^3ii>qOIURA0xA?{1+&@?~e4&iu&*!_^bhVv3 z(7?h+@nRrg3B|HGTND5pOqM}Jm73~O1N_PXq9t`UMHV-nX z(?Au1<8&slI3elYn5>A5Qu+4A_wEqBk<9_~B;P6oj@JADjF8?UK*mF!3Kt*;ZH!9M z*>G&tizYbEXA{?mzwHj##oAI29~@LwaBDhg%~4?4zx{Bb{Sp4TXjWe%*{7d2_$2(> z^VPwDhpVG?_gLx9SpDYvseS*a&in7TP0pTL4xxc-F(_dS)*Ujuo8HW>HH21ZT?=f>VBImCo>8l#+mOEtvmh_DUVK~HAX z`n%^gO-DU(IlCt2wuTwCg_{Y*K5${)o^$ZbUMT5#+5B#YOF;mX^srb?khv>`ImOZ$y7qj+Cx zkfNkbCG{?61MX#&-8!>(ZFTrdwrxj(?9 zQP_ps(}qTMnT^$ZIp$irzV%{3)S0kQgst~v5mpKk$6{-u1c^~xjGSnb$;UEvuGAZ` z-Hq5D%aL<;Bc@Ki3LzK6B3CpbnX30fvig{Bt-o~&4`Ug~pw>NJbODx;tcez+o(yt^ zS^0fX8K17}DNKZr6E1PyXp}Z&1r=jeFM09Wh;+^w@xb@qjZjXORRYnZtQln_WELTD z2r?cS^)iyZH-PF`fxogUcC{~5!~*7!>$*7>3g%-2Hga9L&atcaaosedJLkb-2kE~r zT`%jJqcV1XI{pd~=?tKVAl{51u7G!iaIUo9D+H+!O%9Em)eXu5$@A4S5Dv>XX@HeI zs5;9gz(8v@p*J(~`UIG1eIE4Sn9FnjC&z8zrwBhrSoib$Bd>!4mNqnCu^fJ;G=tL0 zed)C_18N!bbOP+crQ1k;d_vEUHm4zuPzg_*2+;HvFwDBH>3PryK_X~y0I45ix{W6G zgLxLx`X3$$m!$wuha`6?{90y|$;!>2`tSdK9)wb5x1iU$%WsN;%FF*c8)U3Fclzed z0t0;C!ML^#Vo>bngww9v(~w>PEkP5J^p+c*y^e)@SC}almmHCSWC4nl_@_IF#1Cf* zM3(@@b^EQT`yskK4PUAMv6Z5IB7ce~J_aO(V4rbd8K4jKBM)v{rPVR|vWVQJa)dfb z;4X#r5CrauVbOUV2+Ql=A$n0*mwFhaK@>hYGl{$yB08=iEQFU9e;O7+;1($>hBzMt zc3uF$6#@k-k@mgo^v=k-A!Wt2kNp>+OgbckMcwVH+pt8^fG`!7v#`7+FNL!Zdl+vG zQjOEp9hcih`k{e9mAw9;@qsbf1t4f9f{6}jXX%Z?%B-XS)UP<8SN{!dgnT(!2`lxy zr6mVuWAb8pU{QAh5k*2gfYQJLFFJ_UFBm0A6Wcn1g84P01VUU}JHLt;#8*@e5ct`n zg%qA2+dCHbxnoSM?IP_NGYO`reqiZm#Z3p`uwjS8jMh7uBVqN2=cvH5gpq(mobMH9nk7yV@oorJx^5W;H*K z@^s0`f#qall&8Zi9P3D65qMv_oP?YStz3Btu+E6Y7vdmm*9cuksxU0x21v62)(#2j zG(vh@LOP5>EC8 zA~_{Dx8C4FQy&iSU)I!y^qZ+!bC_`1Ob zG2%XmRY3$>d%NO7o;UcQZ62%Rg%xH^^Q*kg0=QuLFJPy1<{pIjG7rnLp*v{%UmyyGIH(9Az!N}yNDy>sqLfSc8rv~20K8s8U)*4B za1d9@&l?>G{+g2@CGh-Mrez$?kxKe}j=-|zi~O7UE&or(=>Iy3FP>j;U4AJ)8SSg8 z=#Vy(?F6ektjja!_}%y9w{}DnxBg|HAF&n?Gu}m|AdF`lGIP-J+Vlkg#%k zQ)dnNEu%K6ASk{7FyMiZexm^38>Ws3-a9h=0p=UW5mmS9u(Y|7r7{fBztY{Uvve^h zGOZ=mKJAW-R3d}O5-@tjEh&(ugN*4S?T`w#OlN?02xIio!TL!4l-IgtoERyuc|F0Q zdOi+X9pp`*aYEYn8pm7D0pc46Keq4s_pYXK`rksI2RC*YLmY7+HHCa&L2!Jx=o?wl z2#v)CCNP6%GWaD74W|6&_A?rYOf7?{@(~!t?J*FH3|W}}o5(XzbYC}-rpkaZWfpOC z`uMYkIPAORT~@$k+|Z0L?O}g6PINyi?G7?qKT1j`vNn)X7vK7mwV}CYj&^O31*HkI z5W@r1Z(xAPGXqMygMV7=ZUyUrl<^fuAX29KRkKF8xFC4vrI-<|FZIc{q<MsmN=0=L|MmplEdW6^BbYqfQA2 z-_|(}q6r6ts5-wo$d{y1NbFOCiqO&==t-3wQdrBa{4RDVf#UJ?*eH!cX61`Q2Sq+y z2umnEPn903y_xg9S<#!yO8QjfkB8_pbic8PJ@@YQZ&0hnjxrp*X*MROzFJjNxhlCm->K);nlT6=ta_uN~&Z6Hf3}d)^ExnkcM=mpT%Xhs}nE z{24$ay(2(#{s$DAMw*4KV9#%DzS-OuRtJAjOJ+HKVJ zH0ZcRm4y})Ez3ZHa73J0SeR7;$7U_YfZcn#{wQ<{%UYE<2(ScL7+#dOF}5a;#Mk3jNCY{e^|)F{K;rql$rCH&BcyT1R5D zkk5`t$vLO4BcQ4)9q>??O2a3BcXX=K+=7F&?~MCzI6GC!50sDn_f%7OnEQ`5;o;0M z4s0WsDv+DN89e9$^Mho)1SGaJ{7eTTSns&olL+c{sJwD@<~s8b47!Cm6DqO*mKPpA z85ZusskD%CPBp@(hrAQZ-n{66Am}b40YL&c@F;jsfS^G56QZUtbGU=NztF(^a4DQY zb(Wv}VQFD4Kqd+cVP(Ki`MXI!UUZ{Qz?crva}j**8f1 z1ITYJ01$`pDN+TH$;*&Id15;x(X}tS>GOonu1(4NZ5Rl%7kNDd^{dK2+IP@JY_LGE z{Xg`RGM($7!oUEznB&6&mIw(cLBaumkc3Q#X|~HofoQyF`quo&*-ghs4kB`u(qBrB z&u%K`X7UkD%7@caKv0=IhZ9V(kdw>^p%EewdSDqNr^oPstdo0@+chO8XAukelH(&1 zz!SoqZa9yKf&~F>%c@~I9u04VupPW)+U3vX09}S@`GJqks~MYvY>rW2N*OjF;_9?v zk<{-K4P;~@9{fxDvK1jiBJY+*ozK(Srha|C-R5}DZ=bt(D~3+T+8meKQJFS0LrDJ6 z2qS_pRfzJ4w}Bo+(O(?gwEVb4%{!zpM!mG<)y7-{$in6w7s}JB=Puij))xEv(O)I3 zo$l?3kyk`l36nX?U4Q%f_ScWPj;{PAf7mi6M_d)&&>`JhX&D3f15hm5fVk1m5u6mk zLAAbGQ=)Yf1LrdE91AGldgCDnoFn~jm~)PTxY5iTK_;vPZ^hcMM%V;Q16YXh?6&12 zTm=#i$tc6CPP9aWB667G-cIZS^GWV95J4gyUr77Db^F5r>dVZzANw!3K62MFoYH7#P0#eR&YtP1G7rxodzJ+f?s9{=qkZ8HmoX))A4FsCH1KL``ee` z73zT#cmr4u;5sQj11eE(u#pVu0#&`k`mQb-(F{XeU|=2RUsGJXJ1(3?C^ca`+q^)S zn#M?Pwf3QVp=xx&T4DDA9e){tK?SeXN&@yRW;dieq~Ixz;7^YR)#o25#Sm?UaTq(f zbG}qzY3_@dvPNruH`TTA{2%iRo(iWK0$Yb96m2J6BHm00P|?3CgEdfTq%f`mS9%nF zA+e~2q#zoRiOZ^5emqx)p&p|O{}*L%0@u{FwtZ(HA&{V;5W}EGKm-yr0YpSv28Al1 zGHBdTt4SCH+SY0qML@-=)gEgd8fralRcL#RMeSHD{Jkw?)$p`ms5*uiRtl)VuQSOnKcj5Tds6hmP09j66c-6 zwWVc)xo#F?`=>|y>#5ekW?yhS>~_!VTpgb--UPl*SAdO#J#WneMTH$Q3KouM0B1?O73 zavANEJ$3#1WgoG%SlJGRT&jrAl^X-Xa zOMZFxmMd-(6F#gL(QI=#7E68Wk-WI&wrus~6>s{ld1h>tV<&g=mBdRjOGWGw+O?CN z>niKcIDBbP#q?bF?-FL}eEbf-XiTCQ4&^1ZaD7N`TUX8o%3hxnuA3s~$it*6TAe&x zKYm1C6Rz`8L1|{ea8O0lP+kFfLt4BIWDwU1Z`iQ=k9LaPqB)8&c_?ezJ;&H`m>2ex&F2g0%d==eS3C0nwLj!jJJNjM=A!8pi&HpG^ zj!j46opFu!s=TY9?ofcoYRUuy=AE7Pl>_#cd9#4jOROR4#tWvQro&J^qT-C`Hx9Er zm0R||{a5>9!R~~=V&G@L(b|`$XWl%rNIL&f_QHiRk0P%~p^>~CB>|8Y7hC+`gg;51 zyK1Gy*mUfcsaaZ6i*UpU5ZcT&Wo>xP+L&Lt^Rk$lVkeM;6 zto+)myPHJb8Y&0A$4Q|M8pDqN>bVgFWl5@ODsK~lrF>0q_c@fO{pTN~oP9I#&Dh4% z6Io71$wCCjllgpv$A80eg0!!g@w}ktMCFWvt6KWj%N7$xI#P>lq9c~*yz99SPsktz z43&hAHGwZF9q<{_>DmWcdfVGkXYIwOx3Jreu)EyGpn}~+F|cLOQq^sV`Z|Lm1He(P zbr)nbT}qQ?|5W93`3k=j077p|Ka7%YLwUoUw=8apmd30Mh*^OOh=7eVQ`%;1cGKn+RoXn~m-cKC}&V#OK=xs&Ik{oJnVe z4riD5uCN_c!*lEvCscpu>i@R;g>%|*Ko9AEcNV4&>d8*GGgW*0@GHD<{Dizhu!)0u z^IK*B?AC)_cr&tdxwj=a2wr_wA{X3dPf+{p!vBO|1?TUfeTEl~+b*D~M|d0-M-?s% zl?eHEj#D{UicD;CrK)mt`o=zdz0_Ub~+bIgEe1_KFwB2>}<+ubS35L!)+>BeSLWS28BH7eLIDmS~a)! zpu9^7Q!?u=>~;D|ow8l8>r!8D$|xcFN7+3b4>eAbjghRMV>0zm8e{GS=)W%m zPyXq#{-fpd-}GNdymVr8aYac)!R8>%K^}F%uLd9RD0fCb1zG*guEXt3IANL3FrSO@ z6i33&2P5*8E>ROakk32o|W z$-tW4j+N%$!>kA+kHqwlerpRfE*|&lPYheB7E7wEb`G`d7PB`LYz%QaQ?Q|6{i=Ou z4pb#32;SRiEDC&2qt4oXyF-(kUAdP2IpwUS{bQtExU-MxnAj_?@s`)d%B=bHxh(Q` zrO0F3rTjo$jH8s{!ilLmw5saRcYNLuxM@e`^j~S{zv|b=&Z@dKyQ(`|^VuiV+7kAb zg9{V*yr+JFSBAP`(y|WWuWXf3@#Qe#Id?zQ1Ll08&k_Z+AU%U3Hm8rhL4HN-IJl~+ zD=kZY)h`h6wXSXDRoz*NFGOq4=~mG{i+sSp?=;nxJ^oa5bn2}bmqx&i5CyZ7j)iu5T%>z#fE=s9b}Hvm=g$I|L9oZ46$tG(>TuysjyhPtVKc&VawPNJU9H#*xf}!T z_;%p|p6ztfe@~ncb39@zSn9k^)O=<0t;gwel)T?o8(;nS=eubrX%JpbJ6*w2T2-s7 zisjej3DTRdF~BYTJ|LS0Y4#d&>^nCc>!V8xAIz3}NNF`v3*?wQvPFZ!!fB|6c#Ff1 zE%Z1oGAOMm^{Qt88gd!0xrU)3rD1r2%}6w+nyX2vkkZg~(F}UwRIBBIN_uFjMG$V4 z^l(xT8f(u|j@4A(qda+`Myuaec7aEjOcPl zNex>l3MFc4P~L5dC8S4-$0(-Y+P_V7)sv_&vgc5{rLm?aD~WEZabkdXsWD~MWJ9;h zqMgb)$uw7-BRj^*n>-u}5iZF|?X$L0Sm<(1gE=`e6>g|7rhg{yJE5W`Rie-p>Bv97 zC29|~hpIw<`8xAhl$}3sR=bC)0N=X%9bx0c#uKEr=pc(WdCFL3(r z^Q+h4$Ki>V30%fiF)M#{kz6dj73=##EFQCc<&FYAE5GP&;>Z=Xrbsy2Y< zAK;}auggJ!3XssUAT6VVYnwb{`&@2#hcR}eu5}>2^-k?g3`v;+2w(XJc}W=84&oD} zsQyb*Uc-=vfmcArT{zbqQufR~NW`mJZ?-rHWAghL;R@c*bsTuCyrz)9JLtoA97r_e zHc%MlL_x-qh(i?KoJ8>}b<~>Y8S=-g;xZC%HnIJV>o69cc`hd(aWKN1fhkyoP%uKe z)(G^b=|zl?1d%2)%29V{-51>W+2xxkx#6|8 zeuo=f-+K!Obl_w@PR-9(Uma>&^?KL-)>a~dPm(=ei=^fg8Te+Yd%GzlJM&ZX&B;t$ zVAh$mTwW_s*{u4cB+AG4`uD!=_)cX36|B{0k#uuVS5BGzx}rs`M)>0O&3K1B$i`Xq z+uQ0Oj3$z3bCEFC&lW?vm6F&53shW)EPjJF;Cg-6oA%9_no}jRAGX?6HOzkYr*!++ z(~@;l5(F3(=j6N;jXm6Ucb}`}HMw)Yb8lYTIUs@WDs1g0$=9+6+4^;e6=}x;k^fXY zdF9~8Huo}T|B)&EBh!yZ{mNVG4BCG2gT>(FOV1Kme@7Wc+~V$sO#oLql#No(rRxZm zzxZl%)7jhUvAU17QxX(cTb#Qey?uLQ)0cSnx!0h5g79A}avo`Xjr{LQ&XSWu4z>Lu zn`I=F;dOvwc9daz*~L55z2vR;mc?qyP?l|^?xti$Ms}uTMijWVIVRdAI+m9k%k7Ni zr2qaA=*^w9G~? zXnI8;$i9a%zF7gX?;`Ourw=!LhaENmOYB@S`JLbYklm$Tn#8k=inAxr>AoAK*5WET z3$NL9e@|Xp8}P?d@hr_}S%gU}zkN#Bc)Q7NFcXlQkBf%>X zX3)qqivmodfZ>TQBD$ECMU6F|3vM(yu)aYJ6-`Xz)0VzzU*I=5uFZ0(D*O1f zoL`69%PY4aKecs>Q+b0U?gXQs-|4N{Vyp}=&(>7FgD}rXXZ7X}lWgD5oD z90c;N@u^9a9em}{1yk>pM{{)QmM?psRr`HuMQU$9GLFi&fWF?{BQ?)xqxJ32Bwqoh ze%o>D1rsx@=7GHc_PuD|-)6lWi~5B#?SJ*(c2A*_=bFnb@6iv;mlX3HU9rXA3AA9- zvss(IWM|9Q{%dOl$iVb34?dgq(QDT4mu=Ozoxgm++Y)Lz_2QiuKo!%!{7%32>3!#~ zzGIGld4KJXAJ}bC5SB4}j{dvv2X_7jL~!oJfmxd_u;+e%p=0)s^i*pQ=ZUP@@X zyDfOeys)-HiISTaEAIru-VT)T880OMJR2r6R6ig{53sX&CSOaQt}k4}W{(J**=&{S zoNEHpi>>ca)X4u`Up8!?GHuvuWZFYrb^o;c)y2Jwh9?cgshc9Bncr6aQUAVR@q>`c z#XlSO&7+4HuYdHdX3uj)-%)TEWtA67J<`RDMoho1~}eCzz}c;g;p z$i!(s#^H`{pK|YJ^@#3|3xh}8Um4WdqG?aX<6#04Vw6`S7&tavwzy0EiEs9m!_C=Z z&r37D4TQag;Vz8`<*77^eb}q*n%6?u^&cwiwBCc z^AgDGZT6#ff9S}YFPYlCMg&I)CMsu|G8kMzglBAJy#qDbF^nvfRp!+=W>=Qeh5x zc6kDFE1y$LEaWD3<@_r76-RuEBXZEehsO-gFyB3RB^`lv-me%P z$uS;0z+hJ;- zen0sO%MsFB(MPvVetwU)B`>??;$|?vnOnZhL06si(HFc?i}%RO<{qy4^1|84zXSfQ zLYM#iJM&&wM(qgGylsN{2&2r2dW;dyUty3~uk5u;)PKE3ENEdSit#|0MsvkOd02^L z=I;Knv=&vP_7iq<;sb~35jS0$?yYstcBtpD)5m@|;&$TA`22cmrDuTE9Tw8ZvIM@% z`9q~084!fdyJVLV_EpjNK!!GL{mU^rPQuDB(Hj74(8PryZ zmi_9`nV7!UikFpzhl*k~$MG^b4y1f}C&LCqg<0~PlyR0-G!zO8wVbK>R&v!~jc{LH zP6~^Dp@CY4AYow#Tttdk-~GAdD|a4I1XbVj3l8|h|6Z`&5IIi^GMDQ&WR=SPr5ZOYE56UvIUOqw847jEJlTfva>ZDIpD`6Q; zl-Z9RMp1kCQ_Gw7T?&R!o})w%mgguJj$E@4mI^ydFIojg;g+}lN_9K0vs73*znn(y zgn6Zxa@Lp7*_4YPy~au6Oh3Ys#EThK@>~;8EWRAII1n(K$S@#nHcw3{tdh7Z9CnEoTPIQ$WOb<>`p&itxH`YhjNs=epQJ7pCR- zw(jg#3$df4MVL{p#y11x9JyL-yMhuH_xK5WW{8BBJJ$DJckR?K%gIWL%y7iPfq|Jv z3Hx(}*xt-@6*KuI#5YCWickW~NJ#ts9yrF5Tz}uRuUaqvyB(i?&}?f0D-(-|bFShC z<1Ac5IlCC{G+(-fYfavKS{F@{o68Add#+c)N@J^(#i=0|OO6ezgDoUi>~TC|wQ)b) zU*I%m9xu#dYiOsfOBvg;qnv3g$5xjz;uoN*83@Ib2sSUQ-4kzjRlks0a^TgGF7dZ7 zb#!Yh3}M>_&Qs;h7q;_w>QL1#d*NyanjR3=buIJq_Yt&+){ z+zXuCQ9DZ}{mGm5{ZD0vm{rgB_6VBL&Ck!zbGawc>4KbR<~igS1dSXUYKc%}?0uq2 z+(vP2VrTS=xzWr9KQJr!?x^hiFn>nx6PB_gXrlEuhWzKJuVC@xc=O4c=O_Zi z!>p1;2xcM5qsqVvXR`<-ek_bKaHG76;0?l4SOFbcTcd2tXgk3|G^-@)n^E%$h!hAS z&U1Abn@B*6YUxih2Z`QCbgA*ls+D%A6ch(d4EqENse{V~&m(F?t$C0EguYKuTK}CR z{_D(&T_Q1ryf%$=N3<{WlZbEphky4-d!7j9mzA;kIduvyJ-9tzR7%0J(n(&IHwcDK zg0;QQwjHaHd1E=&49+BZkIAUE_* zb^VmYVqz?}ud3K4tM#8*mFlYUs;crlMRDkz;#EK4Hdk4m=sak{BzAW7HdzI^pHMcA zF*669y!P7$D0J73%#Bbq>(jrlJ?)=7p^x5TQ-Ek&TTDk7Hg+gZqs(=DKZhko^+LEF z>4^k}DrON&%tRCsH!i7?^}%iWhi9u2cj?9=!l)Z-60=LlQHnc0kvWUm=X41}x@%Ns zV*rJyU7C}Z!x5gp6Kcs%3MiZX&jMtmy1X~S4~dKbC+WQ5eaW^71mdW#QxfqdW!qUv zpr72==y+&CXP^>1 zMzx^HGf>`$&tr;-E!>q7+<-6`ukzCi*gDY$2%1DTeJM;+v;oPjxQ>q&1K+(l==w+V%-Et z^@rW)QF^E*9aK^Jf(!N zvej9L@Rk^k$IRs5%Xp(*aVL^b`?huw?iA_~yz)4pv09Ukgr23~xh8Y$0c~Z8I8@Ar z8{kUW%|2$WaC7*{{{hc^Q%vOzkGJRxgEpbB*|+BhK8W#**Bw$YoaFfmu2l)$x;(k_An`*<`V@jWJABnT}FH^o5ZOPVoe*=*bvqUQB=EmOVcg;5GBfl77d zVS)NUyQWadH7Jep2qzATS{^L2&(tNsNwCbdCcOIbsp8S%YM34#^A0%ncJYZ>sQ6u9 zJZe0e`$PH8ov1?z-1hy1F8AtLZNuBD$8L+Cd;P89ZIj)1uFCvr=uQ9qOR@34-Y-;t zHaua%!ab5yABi0(wrS&qY2)-wm6cmP z8Z&3r~%#`7=2 z>qpIe{yYBrxX{-n-v6t88)bGliPTuv=n#6GKwtB5;i)EX1h66c>PZu85V?jZJrv}(F62^DQu8+u}o zF6G4PVq-6W_23n$K5gN>?)+gd_wt9il~2_>EK17{Hyl;Y=Q>df-KxG_;HUlI#oHOJ zFV1^SHuSApxBDGN>HN0jae>{A(&c%A>BV19S64gE^PPFz;IqCXSggOcBk9n_UG8W2 zsv2nuFU3}s?)9V3|Gj<3(AMP$XjAJM+M%{&E!A4fs`#;2f98WmW6QD2my!lPyq4iJ z=i6;u3c<-%{6S9-PECV=g$-N&y3i(sRep%*q6)g^u(D8w=RHuZU zP&h8r5})qTq|hmw@4%xQdH1e5A8DO$y&ibS9 z_D6h)Pl&NZKrey3aYdR{Xhy;WzL-mA<){tyK0(O1@133$Ph6o@+!yvbN4ZObR?Ma{ zqg8_XHeCg~JGGBaRsS)q;`6-~Z1^PJP-#j3H3XoTo3`e|>wmScF=LlVpAUibi0(Z` zSzgT)WwSoKtByIt*MQ+_d7sX=pBuwoo?0o4OO*RQBrhIgNSNYxvRzBDFVW zSoiVE8Kq6@eV^e7U8=OLFXj&EYmRQ>Z*$+egqq*EEDkDy@FvCFp5ro~MISG+TiJdX z1y|wR%BJ*2+gML8hvKWOh{K;(^UEJzx*p~p<9oz?{*u}K?uV!LiFlsJFHK?v`zqR| zb?tGKJgwT;=H9lTYwpoX_n^O(cvu|bAE(@Rd%tPl|D0d_ua8%6|IlaYrCgWomkwl~ z&-}@B^ec}&m*g+z+^rJtTi~88{Jq}r(Oz~)bK;yor)6Hpg=y6I=irT%mqf-P*(HzN z5R@Ds&^6KVZ!>FF)?a;z3WKO`n2DoAGb)eUcwH4S!l_`38C-$xi%iwzoSpi(SL&K7>ok>`N{32%W%2diu3es-M8-(#{qP>X#f-;% zG-Q)1nojL!bfxxxE}ydAxBc%^Q=zp;?pc0Y%M>$x#1o$RUJX4=+V_^fwM_tR)xX=f z4PUwt6h_ecpw@3hl-YOahDzt4^YL`4)yi4&r0_N+a9ftsQ!D9v>0G*i50OPcXx}T) zb+APFgh+ftXNO-G#%ZTR^~t*BE&Km><7SJuY3jwfQSY6c z_Wgt_zg7Ra&T_`%(dePA0XDfVg}(Dg|GsYb0xzZAoQm%kJFb3u%ZA08F!1q2%WDH| zhpwR9vEyc_y)@v)--dlcFiU)i-!&ToYJJ0h?O8=+>eO{f5ArXG}KMoTJ}ELIEF>F<{xF8k~OpV)u7&(21ElhK(wlXOh_f) zN_(c|Kl3)MK@s6}zzXy8QBMdHdar=!`RsEi*OQ_P)Czhv5Y`Jjv1HV*ME)cn&Bv5J4BnUnm+-M>}J&p)b}JQuLBi_Z_mH=b==bA zKaQ<#d!|hG`u%Oy%x~}95lz`0@cz9!=NFBBl$v?6^8DqhOSzhYu^T^b$v$QH_Rjuk zr+%}V{`IBDcJGbearXW7kKQ}ZA`(HEX+OL){)|mTux`Pwu)7O(E!OO&gpJkW@dkAW zWQk^!VX{anDD9-Aj-~U`rr1%XKkP`b6O{EP2l%=ryZL(ix&9z>6Db1%d|zh+t~Kp< zoLpsRS1zq`bVPE%&e6_JUf5XJSgLymM@WF8U=aK@R_!17t9^U=EQp<7WM)caWF+P0 z>+9y~7T}9dLEfPWAC1p(n(3)BGNV?VGhKc4p$tyf}WXA)Z&DWI2iwv?45 zN3CCwmsc6O*htmNm-R8~Jw27j2fg9FRWE9l{Yt&#zeTBUP~7eEh8k`40O!~}1Gfb<$ojdF~yONH&#Q$b@xVuF_o zd9`8(kvsm&lk(nBTt!`UsU^b8s_;YDO8W1wv%+osF~55JSNk?3E*5#qVtXCL4uRMD z8;H^prdhmW$s(6ivn#x*QcCorOKoEOS%bPh7RFZ;1eZ{DZY)?*yL=N)=rq3M8qH8M z1f&-staPXl!5hnQemBISJMdHk?q#F0ExaurKREHm22JTQfexpr5gAgWSy50{I$VT{ z7gU-$P&tY4G@&PEi0C7*u6mcusQ0IcsX@e7p5F)=BZ_W#;;lA_P{3$oq&0~66Gg@8%?mQ5(gOJ}B6>RV0 zur{;Or>0LWW`-f3?(f8SO?5}Ok0i*soDdS;!z%!|n}P5ERs|u}2f`s@eg5{$uv&38 zVT}!t1UmwUmIQ)mIRf&b3Q78@dDxYp-%0zLW~bSSX=}cG(|-Xx6rseKS%b(S<03Nk zT3Z`NjxN`b?vaDa)u|*iKlH`8Rt5pii-ib6BoWlDRT98xni{RAHIVUa?CC!h!As)# zNDhEFWjc-m4wt8gI)@4kvc|7O_6I-O2-|4HJcCbK;`hdfm}8mboVD62MTRzLe*@t> za-&J2SVqAm792>&3^V(YP<7$7X2v)YpkqFQvCUU~uXp(P4Euo8XkGyS8!#%SeSB7~ zzB3`?FKeJ6&d!kk`H!l8+ zb>|AoPHVA3rj**2>RiE|)g6|)?E2w^o7A&x-VZa!@4C4*USHa|c5a;Kxqc;Jwf&LF z!{F4ywUAT7hB+bO*xCsSyJc=BGTLL8!rsmZm1AOI<2+(p$hW}j4wc27skwHA{gH0? zw8$gab6%R8c^}H|5NY3MvsQC4KJRqse`1r!e!ERluJ zEL90NMuj*JCXp%Dcr(_D{9V?d8j)gdY13(ZhLe-DE(C)ZP|*8c8k3%%K5c{tIFQod zN%x$m0-_nu_cM$ogO^IWFI|d{``2M(q7JVqhmOPLTL#nb8CA{1_g%{}i6)%uGQ@U0 zO1YdX*JV03&)~y)yvqdt_M!IYimkeHYyN3iV_4G#H}Vytl{0^Tv=R_%@eHS-=SjxX zGyY2-TVMr#&X@mQA0-)ATuL(jeELxLC7j%hec0K&yHW8GL~GxSoXo!b@mAeehBbp< zHLv*x`}}#zd{E74httL+qhjmH?2&KwmT>VKnoK`7pIh_iBnqcPe$g2Yr?XC<=1(a8 zrudKGKLVnir|Hhi&QmM!DtQh2YbW6FecVymIWV(-f3#wSX{N#C+c$jP$Cp@9XoJAL z)(|%eEk8^91?k>+TBTTrzMiG!S3z@xp3Y^VFB325|F`+GW$)8IR-L?ngPO+^0SZMJ zBmd}cW-^K~W56rGKE2T^vLe&lzPNWIkRuH-G@$p=8bRgDm*+zdjpiP<@K}A4(lHtw zyeRIn)xGPFy?q3=VBS!c8|3HZq`;dLF3a!%bvcM<@w2rUgf5Jx*43p}BML2OrJ%Ke zdguM1O5J1GW)ZZEV|Zf7dQST*d!(no>At{sy0`sS#_!a!0eHFkyj^1bwU)09n!KXm zvxeW22ntbi5)t4eSu42S&VA^YP0$(nj2y479#y>F^$}q=HC|l+6SsGCqJ(%<8olUK7m*=4e4H^<-_&i<_)Z2 zYmjD!z?P5tAM>mK+hRz{=CgN>gV|M|MK_0_RqGb+zFrmRrQQS5E%6huzwgbr^_ETF zKNWa#!UEG)w&p0?Z=k!QdiOjv26aUu;B^uELqWkJQY1em!)_%h(EZz@aMkL7BP)@H zb4C{@Y+dNpn}N?M%StLIs_qfkkfVT$_>Y@6J2XJkTXUE17b^|OPaJLoHT?2^zqB1yz$RSYXYz|2(XBJ#QwVv% z49XxGgJ!T4(nhWXH$Dn}3by-Izbm4ry`-(s#D-O**xT=l*aZ@lL!@9SaZubi!cGA0T@0+P3Y~4NLh;5$WZfFi=z5`d*fPV z5u2AHHdjIZFJY(@_wK7jaftx>FIxToxqbilcBi=uJg2mFY4$+=l_;gWC@(cbqecV? z3;tNo)&iH!vMo>6%h1OIUe~^=K4DE~zh^@AJ<5ACg30ntS5WCih3glKtezbFbUXcz zsKElrS}MYA{Nw+U@!|tVR^(K%pq^sy1EZh1z3gUNou!|Opc@$(db17ZoC8N zQH)}|VBVm;SN)=l8xDDR8t!mRWKAtnFe>4r72}z}v27BZoGqWCn5~rNk^ZZfuC^rY z`^jJJyC_2~K<)@J?gf1e6L1hP8IyVS&p>3^wn+*M;xuA9m0%yP-!7?Bdh|G5KQrKi z$fcnkT$?+{K9G>yE)WUk-DZff6!o~2M4}fTW+Hf%#+}ClLyNnJeMhiAdn@0HDq<54`klXg}cET-MNDW&%p+Wtr!<2Rzja-p(`( zZMzw9aKcpg#NPQWt>vq%U0@83CKfL1oKiU_D*5wCNe}G|ceAVP?N4)|2{zM`bq}Z|?X@c<$CosUfZ1#)X?t6q6DA)&Xl3D$ z&-1oN!CKFQaQ(90_5#^#6n@1w*QpQ55?;}sNODrghfXPXJ)CZdmB=!z0sdO$G-9pe zmDpW;?ZD) zoqr0SrNNSj9E22_IaG+>@hvZ5@lf6s-&~D#YJO_Ac?Qy!K-%}$S>9GIv74yszuFgT ztqR|!{-f(qwTALC6og@Q)R^oY`%RIQ+uP|-Xp*|}=BxF)n?eT9E+`sDja$>1RWwey zjBlZ}&=isvU)%RuQPz5jo}90czIxPd( zJuZZDdWNntR8G(Fu;tTalO?&mdWEHt3U2fO>0dW?n8T%muE$bv1*d*0`KefNEBIK& zWAWzBgs$9r@9Kos&V=3%DjRlG7VW6)-2#-Jx&NLxA+;-Ps`C>RHg;2FLYktK=Fc}M z=8oCU`LIK|g<^sRAxgtShZB7tHlhdj?@^jdm=@di9%9$jC%fe-+5V5OR zDNYuhLe1Pm9^0*?)f7IN#Q^qRE=WL#<@WJI~B-HwNyo z;?p)!hyH5cQcPiqT|@+?%*68nqjE1fBsmLFB)&M^zWGtTE*tAot$T?jLzJcht5s5U zi0>3^9h&-U<2#jS4JcoX>eX6ntxZ~yVY4{))S+eJUziSx48_@6bckZD=d$pwkSiMT zlAl(J6Z)q};7U9eP0rAB=|lQx+R#O7y}~Mor4U(B+jNs*b#4Amv<=c6a{FnL_odmd z78m%7H(!id$&*u0KDzmfbiBi&uNJeVi@uUp{IsaJ|JeI)`mZ@}BzBD1qLpk7ZtFPW zo7?|EkeJ#3!B~-{;IT_GXe-;(=2$x9_A#6FffwiErtSWa9@*v~>=5W8zn?|Ha;x;M ziViq(b>*Oh*Y8LRoFs=^f>5d}xj#H-jO6}Mhu6V_f~;!?w`;aLY&XG9l-#3U7a)CZhc}fa*K^@UDVRlJA>mG$SkUHWPP}}i_ z9i@SfJFfIz>Fwx${P@Zp-p{BY9?@-@-5(bdmkl6GAwja# zsbakXJ|;Bakybr`hB4!EP$YT&!K{G{g71P>}2@e8vl=vHTKBDBi0GCOMZ~qCk zluN8}amc_C#Yn-I~>ewq27oB6QkbjZggm6M^? zg5Qq~lo?Ez2V+sqF^P)jtpsaur3fEPTqf)7y_RpXCT}IlV!WA;D>2B0X2k`r(jx9C z#gUEg!wF^OS<7s7_Kj0W`~EiT3G^GAg2w#Sf2nX?z3r4dT)G9Z1@<$HNys#}=RcdW z@p}DLgo?RQstg^8@RoxE+!q6^2ADqkS#_Y$-Nu|Eda>bX=uyPGf$b+uj;|PKl>{W^ z0ql{tq6~WyOPQ1n?EpzL60C=dPS4O! zmIz}w<8-W5JQ_nc*&yH6XeFns7&6N~S~T$34l0R|OXG3I=_iXA6hjU-nk*IW_%u_M zerk>f4M&4L&3Y>egB^u$1@W&yF$H-2(#8bCt|$F)yDORx9{Oka+WYbnbmS?>~-^c>_kGI&_*~W za!lmmbe2@q$B1Su+^TM3yDuoRRa!t8f~$1V%=U#__l!HI6Y8e*<8nP!#XcKGF6Av8yUcq>?5(U+m zqt?NO1}G&g1@JfgL|mP7!mbl=^s-x`t2tb;?q>SQ`KO<(FIFujQ_%xP#r)&4<}+SE zHzBcF5_dY}__P!LseRgAoWy`wA?vR=+R$2)gh=X<@s2?U-V#|*99 z5*bA#p8nmwl3jbAT1j^0T203zD~`mLNuP*hgDpeii+1&QnhtQ1X->;c8TKUJ=UUZ9`0t#LdJKkmERrm92w zEUMfQvoALpQmWh?EsTj%)loer-vz#$A-o)Ip|$e065AP4jPjks!s?;s7)03}Ewmhw zyDn_^VlDQ1JH>Wc*6zi|qUFkYT8pfr@dl-Jr;#G<`+MLxF8o7--n8#dscUfKg2Hi1 zi+vB!>8RXsXTLcmWkhv4J!hYRvcvivJy2;; znLl_gOp$eJyg|iw4?AtEoL8waD5JThm~`i^aDTFNX8X3z8DfsuN-8Ri!ff0chnT7a zqc0|u98&oD)3x~gsj6wQ3z8nK5bDl#P7gY4-Ki;@`7pTCYN&}VT&yXQib@zsD&Lop_pkl!Z5B7ZnXw^dBcmcAgqQ1nd~UXa-9MtH z_*J(cN6aYbh>VYPC@9Sd9cmtHRLth#R*6fF!gR^MXk1^j90c8KR3wQvL&5V?U(h+B`+4uTGiOFCVQ9WMXC-UE1=Dbkr9I#L9PpugtzIK0e zIAi#m@!gf9)Ve;K{R_2Ds5&wf6#qjUKxJO}`Oo0Shb_e~hhP)*@M0ubE0fGWAILT& zj$-0AXXI+2ZiD<`3vm=OteX6uHb{&;WK))RL$Y@sKtub^1A~=6+IE_q2oUYk+T@PM;J~VQg;x}FRe-B~&_pMCs!qLl2-YpKys{7k- z_=VJu4fsU#i8#@*NL6^*`6uJC$yLv-syW-SqAJSZGW2uD^CiqG)n1q8dzoB~jv)yU z^S&JKBj%msEN|2~hMM~%Jpd(6QFk#CSjJM*^5ZSZ+Aq~DKXIjS%ZFtNsS4C zwfd_XEg>018;+9pz3LYQe(a_X1O6JHafBM+EbFftGa$LbG#0K3Cdk1K?S}@0wMW4y zSq$_DiLraN80t}|2R-F!a@<}PU3=i~9(tWaq2mVm=Zd1eVn{r6RUZgbYvRcjsfB3J zld^xa9=c&dZoYo`=Q`}+n1vPzA6${xC*Ei{6hBBcX9^Rw^8Fs~6=zm$M9TgXfcucQ z$0cv2c*4S+rlIQ}X43bnqP5lEHrFpP{qpXs6WM<*gIx!FlA;hDs#|^ey%T35PmbO) zuYb+yqZ21+?raYW>w1fcUCLUO$ASuFZT?_br)Abb{et3#JH>}0?G zyJQRG>gK4Png1;NaQOJy&LZ*jnSI3=oLDLk_XYAWc^I{GNM9|s5|ODF27LTuV0weO zR?zFoRFs~0`VAorFcq^zHjS>Lk-cOzy-G|M(}&u=mk4=QH-gsdX?{6e%)VmqgxQBl zShg1Ptq)5*+v%~q6i$u9yoyxXdD6a^KXZj=wqRBMU-3!Iihg6{?(*DSRAcXwPpkPM z^g73K2;q#W%4`y59MQS!+&ej#2DZ|*GHtITGz>A7DY<^`+M|r8Gcuedr19z{^i-iD8G`{=oe~#=YGQY zq8;w#e!`zG%7-mj0TszC3+dSo=lEeKv;At-7iq|l9o<~Ux6#}ngV<#!>955X_ty>v zZqZuu@l*N0pfaes@DD%9B?vUecw#+bH#=|P;J(?^+{wF6-r3l-;pRQbe(l3 z^!ZG(^4q4HfKoX6w#F&)X{O510-Uoow7Knq&;r{j^)WL08};I8SpUT+60jni*OgI% z-)*s^7Ft}B5lcX< zMP4|j9r>uUh8->WrE&`FqoPMEA6MIhEUdS}+RVG(9B5lu8;J|+{`p9l|!m|g|M zde1Ahx4XP*pogcH%EkPa5B*(%Sg5KbB3_96|gEKZxHntTbyW_PM`P;&`|diBbQEJp80% zg3-iM2J76v!7PD``Nr-}g#7@;Lgvz_*+C*fqMf584heAu!h&_tEM7x=TQ~xn&}-D< zoU!@Tl)W@AyeXc%w|_~j&I|R!G9bSY zugX(4&2@+r+52za^p8mCl1=uJL3zm^Y>|e=>l>(Asy0a~U(#h)8XMcVq>B~R#bs0* zlpqI-t+R@Y9mogs@oVI(65hck!SQ1z)A6gv_t7uhRLuKoUT-J8GfEmtHAz8B&BXcZ3(4BNK=XiKPBdn38C@S3grYXrgdzDXzj4NC6VOQ zO$bAiC~!e+1MylVcrZv@-q77$b1|np*SO&HILcOL zUM+?gQc_d04JnbG-IN>AML9NI+i&OCB;|8T3rkNz?F2biW1+4)b6!MBL}sem^$R~Y zqHbzK57aWO+oRgIwKIa-&FiBVfoV1Z+LnL&m*FKYoozdgvKr=CNz_9+wSIBs`~`Uy zSyb3w%*y*LQd1&QK9M2&jtTurLrNMnRVmqnT|Fr=sl~2cj^*W=rYs`E!hC*2_YN7; z@yDR{Q@RCsxFsvaL`K%s1Zh?Ak$r*qCT8KQc6y%#rf>M#$rDVyUD1p~%ES4pxeg9P z`dWpcE3S`YQ0A-Ro5;6rNh5;zy&gxr{YH<3JaHd0CDfN#vcz%@uSWBZ`;BpYUupKs zQqO&*C;sQ;N|Nq0Yg@b|y-d1bt!PVW>6Xj)7V;7@cC^{|J7w(PZ|MUT>*51aq7q(} z8+t8wK`nJueyvbG5J1_N(C$=UJFp@7NHFGxSNyJG%6EuD46h^YAE|%a|2Mu9F-ewX z9lUk`MH~p~8FEV>l~|g9NDlJbA$r_Mwp{4F0?#T^@&Ruu(VYHAy)VU>Rbu~#=Zmir zL#wNSjgOE*bOxhJM37e?UGdph`TpGmQ`LR57=?D}sQiPQdIYkGgy()8BuC(|ZUdTy zX9H$#7ATgcQ8GUNGK=*J?K|g&r>9iEtc59j+Yw z9%5mNO4vR+h>cWGfGoV_$Q<}+A^>|sQ-YutbeAOzz4C`U7;M2M$3^9wZaNWCwt|Tb z2}KkFX2@3EHYMac|LzobIwHb3?x-vx_IMnXbO63eURN6~!paQ#4bZO=lN}1$3VJdf zr|!s-Pbjk~66>7V>23#GQ=9zId8 z<3`hxWULFwhBv;88!UD*zMN=CE?xgnKh)Hf-1{|b6tG6hS{Z&Oe+_T_kp9u6IepAN zX33b+t&b)JpTs*zRn|!bcBm(7CGt$C`+Bp8S|fvgYt2IC4eGOAl27*$i$#f8NUg!U zlG^d-`uVyY`T57L;awyZk3{^Z+mN1Lg*<-a%#!Gd3zh9>3UFe`@~`3JYK9Uflt$^U z$?IiKV$!~^X7h=^sFj2<}?10A?Q0Y{Vo2q~p<^^PW&rvhXZEBQ%g)V87 zpZnXs+P`xRsgGt%=p{UKEw5m#r671SMWE*Lhq?_^pl*!FG8;0jT$LebLoVuDEj79Q zE@LI@Lx5=-o|s$eGKMM0X*Y?uHL(H;fggY*aRr4j$BYq70MMq-B!Jh&padyY=b4C9 z=Gc)A0Y*eqJUUfD2sP)(<@RqM0ohYfCz+p)rKo)Q?z@I`l zge(a1lP|K&#G3DHZnlDAqQi}KNB_@(je%FL04v&o?>z2^Ei4>->D?}qM?u0t1)|w> zy-t0@=lq<9*E?#pL3}Rn%*{UT!kaoBac_X~tnLuC8M3tG+IL|KtoeeNfjXG$7yipX z^h=Mw=2zJ!Vh*u_!mptzb0{-u%8UTgntdAr0LrU{=PzzxtH-tO+jjD1V=DkF@8UX3 z7%SJ?h-Ew=EUhO8i1TsX_5>fXw9dCvK{N>Gz{ACH$gl$Xo{{>79Gm(cw^?{>B%@2c z-Zs(f3<+rye=lUyUG?D5A#uPpEv$I$>P|1=E*M1vL;qwlCG3p5kWe9eRueBmiugls zSh&NB2YKR`qL&8_*S#5^Uxy|cz3m-g+viRMUhBy^+s71lk$7T^n7eS_lChi_{qJw> ze?P5k-^7^rxeG1RF2~L2zc6uO+{Bpk6K&XGC3Ho8Zf&<0pn!^)lZ*Q+ZFPi2j&y%j z;1tSiut3s7NFqJ+yr8PwlhS&eZ^2)<6LtUQjF3DSk!7?H+6&Ii??F#wZZ@86Wn(4b zoD&1xMFe9`)I9R@;Ko4UWKe@R!{>6x)NPM%#(RDH9&O$%5{uq9s0M?xjXO54-3}|4 zFGZ8qZm)cf3A^fhp!Gwa#`+R-Bm{BEc8k-m@CXQNWyaNKAh4&9zu z_rk&svK9aXO^B}6UrJ1Y11z1(uVR!$*)I1MH&S8iB9_QFp2s*RiVIIEyAJEZBAkAA ziilMgZ+Nl^ver~ps%m~Cq^-w1Pd?dXIJU4F4m_>5MPk-;vt!O3rkKGGVC@pO7V=rs z_X|@^u+1Yv`8VzR-@_RH@r=Rf_N}UuH-R4Nm@4LnhKV^Lj{6e%yZZH=eXc3DEstJ( z`3soJJoS5_Lmk#GkUx7Fky{&KQJO< zcEr-zzdOgBakjROSeZ~AR~Ja^rZl(!qIDATWMXUPjL;gdb+N-HPy*1M{c4dtxs8Nk{8i*v4k6bP!R>aa$XK*w&CMT8H`iQC zFkM@D^O5<2aC04A;xb?}02>6yK4>%o*1vF1KfaE|TNU%@)jT?V_ktnGgWWR^p3i=| zXZCOZ*fx3EmA~3|n9k0%%7GqL?zxDf^R8O;>JzNGbLq6Xc0DatIdiS9F`d}CqSbTf z^mfc!Hn)M1OsB=`V-$fLMc@vlWQQWKR6gb2`)*=|q;y;a$4?->r+8*mcZbnYNow8M z?cpY{wU)<8<)}TKQk0Xgk{0RBg`(E_eB#J~$0B%g#Bf*=;ekf(;50_%fgQ@^u)~+x z#iVSsbQ;o^mt8k8?D}Is-y7c_!;(sDJN1NXY7N|}%35DA&0K1_62-1Mi`O@v2Xv7V zQLrAD+<7*MOC_%2Ukh9OMLg?wq*9lY(!3bbzW)l0=X#=jxBWFfXYAp-@!cxt4LV4v z1Wt87*5?qkVeyHQ-3f37Rjnj{lh&c?Ma&n8RjH{t`TE=E8k<>~z0Z6*NiZt}bv4Ar z1zognzU8je+w^FD1?RnI7Kh?2$`^QSe(`qZFT>m3dcG~OV~%NC_V;7AO&+^y@t(mK z=fyw10u~-Js#lTMmJ4gbmMr@{@V6x}EZh0J6PZjU>{6>(fbx1dgmYODud^>6S5ct{ zb&&eH$3|FM6~*-9MeJc5CX{&d&KMVFiOfR@t^!+0Jtu?=ZPyp0o8lm(h7z<|dm$weEJu z`nx(eB5+pv+2Ll`j&n}uCbFw6?UcC==?goRu1)_Bb6*10-ECq6W}vtw$3^4QN&LSX&2_hN4HS zLfa!2$^Yzl?CI&f|5|t5b??7^!I0t0moItq?Y-akeTK8z;q3+eUbdy%i}hj~8zs!R zs{3;1wYMc3qrAx1lEctL+8tak0wpyY$i@g4NK z2>$P;14ZxSOCBTg%n)dv52@%5*Mzw8^!x-_8#5XCA} z&#i~7^&n^rVMDIrzwI5`4tR_B6aXJs2ST$t64giOM}sU;>Y8?B97!hW#@Tur@nST4 zGD&@lKSj`s_9;qt=z7kqM7fTlN7F-v%@A!?rOb^=&RUaX)C zLRDMD1J)gx)uxgy+zxL4*m#k7fq8XhthrAjxA$pk!mCqvehg>ZjKsut--U7mFPF8q z#6u^SnJ_WPKuw&KOAFNP7opA|I@>)k6*`4W6aiR$X^*mEAYj zv-9E1ds+C>y-$_Jz_5Fn?xDXQ@-G@E^!w`@$`aTOsF-}OLUpqAFG#WW>L^1F9N!d-d%YY!j=LXH-j$9L%D~UR7|eY_6_fr z5yeYEiJMjT_GI-pJyzXOR{0YRvWYA)#EDCy$bz=|%uJ#@!@0Seh(eQPc>=U$kO2pI z92M4zwh!6bdUr3dwVCDJ8foKxtQ8vv*>a^7B))(5>|j8jFhl+R{H?Hcxg(d}*lNf& zhvvUwAi)}uWVnm^keu4&AAr1r%6Dn>t-d92jlh~FBFpcR7S(_4&yaOs4Bjj)er>hF zRAj(IS(Lj9&4=1=v{+bx8xVj_hfbcEL1>-HpfsksgDu-$vO;9`H$V5EWSu(<%#C&$ zm}l>>v#>WEuV?RLn)=c!4lyq5Eqqz{nx7hw%KtPVgEM#3@dhiuMV2k*zATnieOesL zvo>*Tado_5U3ekzbWN)98v{C|{E)CiaXa>cJdN^PaMq{0ZZ)p^DWTZ5K~io#J-dkcFDJJa!6Rc_0~oQYKyIAi0s>KSJ5 zyi87DQEze!HQ&gPGs)C3m0#u8GDXjbY06#13FvK8DrL$;223u;Pa*abhl~CARg@Nb zE{#54kM?O%p`s|()v+qIDz%Yac&y0iSn>Qv^B?6rYI&6QUsyjMcVn|o6}2Pxso?hW zz$u7H@`rvWjPB=5?~cjvo)tiZm~WX^^Ppmbk{H#M4DUIw+rJo8-RkX(R9(OSk>JbMT_1r*-GFpAq|2v^kx9r@B$-T>T<`j5 zv3UUmI5T*24xV^b_ON28>O6bgnku*rLWYyjtWJ{@H$SHbm5-@zJT19^Os80v`RaD^ zVHIvXU00Kz(*mnQLhgu`l$F`YZ%9ad?|F>k{M{h^y}vIwAy6REC22e=V^%~W9&WX8 zZ_%B)xIp4c{8Pw4k?46e?f6QZk+4N;OlG3$v>4)3?3ndar`KM#NS!5H11MDnasrrv zOT>-?R{AT5R>5>FupwlB0JY&4;5olLWqgo(U7FTR?_E>OV9QUS5jq`4reMQkW1?}8 zUxU=5v>`j;D3N_}NZu2teF#^n%!h_o2LI7{w{>pP`kbJd1?jJ6p1oRek=OUO{4bjS zY<&L1P>rh#D8`~a|5(~)OKcpL_eOjE1oHu z)bP1V*WbTC%f#3@ZOyn{@5kq0fIWo%;?Gr6()4qdT*uQ5&6FW39z~oZq>A*#C}C6i z{yd6^uEI|PO(9^KQq1mzD0!Cs!M?(<{gWzwC^H5#D@&`sfG%P!mv~(((6WVRm-CYsp7E;28A!a7M>kKHuyk2ZXi(a9*OOD^QZ9u{m6#p8zc3V!Rj1fROw{~lkz zAUpd_6$bY9aj@Myib4w+@+t^|b%RTjAucM&g=AOjwVkyuZ>Z*tDC~rG5?Jf4%})x< z1sZnn%LT-<2Dn#LT`L|OoanOEur$xjBn=2ixgUWtqIZ|A)esBT(w&2i=oH3Xy^b2@ zx}r{7G!dm@=?*&)N(i=RgM6erHfgduywi5tizet=ig8#gcj>7+(JOpLWT84a{|F*g z2hmW*GkCI|=U4eCzdiH!?}kdn?$gBB;8+eMGFwCJKNur%(1EE23XYGe)~xkBEe&(9XP z#u))T>!Av1A%5I0asGA2{ZEz}jseTkn8AvJNHL{DO$;zg^fmq5~{7QlDJ9N;O)CDuX zb(^M-yccekz)_&!>sakt4;!=DQ9A?ua?~D0Y7Y^c866rCftp1QgH`;C?awBDmr%Zf zi)q>6x1MlHCBVYqX1Hla-sDE=b75Aht?ztO?g%q$^>)xws*l6=yWB-hh$(N%>pQEP z%AFsAm-3KQA-7jo*QHz(QoL!k;97Xw)DWWftgK&_JP)5wjf6*heW_#ZuP5tm&iwoN zyUckPC4a{u(Irjs10l?ehcdhgu<1hQ4h;8b1#FxP3s*|)AZlzf87G^PW+bB?cNHFw zecV4SHqo(vL=ptU5Yuyx~$VRy`inA3xlEX~7(Y9Sv4!kT{JDd#Y z_7(%)U=J)?d*a)|N+65p3zIo5o|RkV*td}Tlq2Xx6y|N80o6ur9cfDz>) zK>%GD5YtrWZo!#hmis|hZtp)F&C~5prC%JATfB(qq8uJM4a4oask4iV0gx87tuUEp z=NCU&lh7VJS?siBrJX^28^uAH$o)2+uAk6C>zlT6#@K;*qo%oUFKc1HJtp@e>>yq~ z=0BcEp)9R+q=~s?YYLZQPI*P4r0OkaQVb~^8SUuO!=tG^3wH#FX7SS}8ZzB;dMqX~ z^!D3im+2kg^KUTeC5jz!&K)BuNi9xW%&8WCitn;z;nTfB-%@Jgt;7l-N=&O(&B)JG zaNCRtC1c#8ry255j#rlsYph43x?5Vmb*HJc-Bk-<=2Tz0{l^FH9cTjYkE}^6QfHN+ z!Hdp(&LPs;J%!#q8u5gdoK|Fqdck7-Omy~Xq#OYFvSA2d%!A4?d@fmb04Jfp7`2G` zO?CH(5<>q{oyvlamt;~}>fKI63`)TZ3h67|>#Z@ZLeZjRDTpwjDG@#sW{&Va7SGBT z8-hXtC5C#`4JP6cN{A;Gk#Go`k-u1{|NN7euRg!>`9IHJC56HuPhUQw-m5D;5FLdD zZk&_u5PCqRWB($}=4(EnCV`uRR`x1_L0GI$ZV;wYr+52eeH2lSrZTkO%BIG#^F~Nyg`tyus0vP2^Jwf>0XyuWJS+M znFAei;^JkZ-XD2bgO60#Hz&4QU{|s5p5K8apiWU6PBtZoh=Qdki~Jtne~!k@*8+E4 zyLL@_O{)Hp##@7*4cB6eVwe2LFW}~9hvHuMKr+&c>J_M*8#H$Z7ZY7Q;*4ut!j0Nd z-UpVdfEOpQQk3`5Jzj11MsW|-dJysmR0gX?o%L?XKQTEN0U<^wtF4$v{tyiCE*G6H z(ObgC07V0WE$Wu6QZO4ZE}JlWI{Ed$w*buo(%@p_Zs!F8s~$)P>89KsjLZ4P{Y;m< zcWglj-%STD1|S;U%M@6-#KeLr>yi%ujkFW8cS0+2{=V>v)aQk-{}+Fu2fNc9*Jlh1 zHKqX25XnewIV=i$2b8I`auH7%#(o)iF$s#8Ci0eA%p9(<$|%9%?nw~MYoU8w*+CAp zflMx!G_pgI`B`|6lEu`;)XbK=ApxZKK+`283UY{qq9MmLGOwE(sa#-G4qZOuQOSHZ zCJ4g*$j3*BbquVE^6^t(3%!Aq;w}-BqeU*lH@u= z(p@9bc@kpv1Fu8G_wSw$^jwi29eBTf#^}L+`B(`F>C&yZ4y!--dlAkM6bHBU<7*xQ zlkOqr0=PQsh&$aBgcmxJD8_ZaenkMWR|V)`(o3iGqG zFgQWtvW%`JLYP<}&2L<6DJ^s;0u8AUOC%;C`j*k%EVU!;RteGWVL63RlB3IHa?!0v?xQka@@^D91oZMZ_L=#>c&M1Gq(Q3 ziTX2IQa<)Rn&|8){`P3>u3GVpqU8EQ@jc$h{Mw`^g^3P@@yUg;a!=huHYsTZ9&xI2wq~ROMw}z?`Gn9h%t%GHvwRjrmAIdo8EG@ixplkktjQBgY-hO> zR0eS;+wLO&_O4`+q896Y&!cFqqFx0Xv6PfVFY*FM(O5=N3)_*6>s^qv)^onwzZH<; zGV?AJrUprTF+Q82zaae`|Ew4?2P2M1l;fMlCe>UhiDH_?`BJ%DTecq-)E7?I#9kq! zp%_H#-9I>|UB7{*Hq1m<-?{~@=8jv%Ca9z3#IZMwO>Y!Lm`1DH;4kkW4jXKohY%x= z7Doc9T$2q`a41njSLo^te`dug7Trr`0H zgI^A|rmebh*6Z;1QpttJU$Z{$xOea4tNr(X&RX(7_#n-xy)Z}hmH1x6w+-JuHUmgr zlh~e!;-V)qB!7rDjV1Bb&Q1aT`7d9ue1HB5SM@WQLi|Xm^F}2O24gKXvfOFYd30$W zy^OY)iaY)p)BXn!MC(2K2V3EcA-QnkoB}T@eDLw}{-6NpX3f#*W;uQd<*ec8anXQ& z;a{22$mPOIx~n4%B@H{f=vyF!b#FiKnBBN{a?PW$JykKU6&Z616%C@;Ry?=8ZWRC*lJ)zO*O$SBXfahId64Aa zj#1WL_z3|;O@$%Na5M^7UE@u<1d7uyx=c@>?qcBEFA{#JNw_HDX=k+DY-%Mf{~4W; zUqdknE;L@tBb!>MX3S`L$kX)~&hkTW{%(-|UZ0d6zANsV+jOyHsALEUEYB{f93ZmC z(817RNkE{e+OEaKO`A{$7wN*LNg`BIxWL{g%up7Xq#~bho;BWx?w9tiVY(141IbI>GZitc$s>%Ln{bPrOMZ3ey1U1^}54H~>Icfck@S z^h6O8zl_HkEjkWY>mwl^qN(T?vn(Ru@61}N3H^X|V;Sq#Be>B+u0iMT9QaRs`+q%{ z|1(?W`Kg`TMj97gJvd}+*MN0OC-UDu;EV;6(i{8dIh?l_8DYH*rYMwwfdtMPCYwm& zSWkwGZM&$U{4jofQHwnMkjMlgzHq+{1B^vB>lkPS$j#OuW!0jn55RMDvg{nVU-&FP z)90?jACv{*xJ%f$orqPzCVAjiLCk$s@nz|P5pf^16Ec+8zjXGuHU|O z^K?NNy?x-4_BUb5_K&_e69hSlFP5hquQ+}@>zCQOhqWf9j~EBr9ghPBWgkX|Zc%)^ z(9QWFZ|Jh~>xG*2)m70FMs<^GX<41o$+~%uBGp~M2^cMPj)-1;oOzyPzn>m=dnK~W zUb?(JLbQxkeQpZpm}}AFORPX~sADCrR#uqgD2^K|Pq5k-P2rXo$trR-vGT>CsyQsG zH@#Pwv(c?6r}DVlpXyidC*l9se@Q<9;GLvM{iH;@jI~uAc9$h8oTh7=^pg^Do6z6z zL0iBF7(|H*Z3M*&h-;7gHf`%xR#WL#wjL5-yN!k;7B78!ljxaaE?R`gN~}~xgrS%~ z&}mEZOr~OGgT=}G#;lPa%iAo~e*@EYI5~^NSL$4CHiNP*I_G_S2~=D=at(8&A?t8| zR$wlQM>~hSe{B#~+A)^FdNl^w_eH2_XPsG}a%$hj^*fsnqepM}&E1C(UL8uB_e?YR zF3_lv1@MfKT|@}{P5oil+&;T*$*x)XwLISPB7Lg$l3k7u!9NLZCbq9Y)PP1@3la^M zN7^&ty&rt>rY1$DI^)+{Oo3u|{qrd8+bHO?*B>~g)t>nz?77f1{cq)*x^;^`*Z5!G zu1(fhb(7WqVSV-gI<)`S*R$S#7cIKdt!UPfYg;*)*{F`9 z>Iu5M&$V1M)zYC8divq05GW}Vk^YR-uow#@2^n5EgDLTg+dv+b)Cr+oQ=vdkt+KNkJo8D8=HTTAHImvp&%@romFEh+Ym zF|L~$aQ)2G#g4ZOA9-5(ds(iFDa)@)iGDR}YSz@Ok>-(R-s^+x!(~~oJf<>2u?**F zap#ia8JC-RvaG4}drPu-#nYk(FB@Wm#g5&lHT5ac#+P_qj(1*p^19|A|og}d?Q>XZq_fkwVLZ(M-5*t zceYAweRRaI!@}2aNvQeunc{OTj+RDw0-8lszIW74TtrkWaA4bq~D~LY>`+PMGE75aIQFcD1 zKfySM(kqisa^g8fm{?FkI$1xs*(ZDpYP=~`k4E<%bNrLggOiW`{CR{yhGuqn%3-`z zOt@4`bD+#xCb<3A$m5A8Z?ond|8$0VRNd6%eS-6$0k$vKPB1uO?V!%h0-v(wQPz^r z6=y4>b5$BK5)z$DhTe#(cgPSH8s>vdNC5YBb*!8Yo8tae)5{p`MO8qH=L~;o>$Gb6 zxXp@>KY#UMd{_I>^nL2Cc7h~Nc&y!@iOW~fpDI?mP0uRbhjln_JlukF>xm(Yq*%Wu z8@{~6ZAB#H4K`PaPLt;Jc!Jt#WitdZJa@^hhNr>|IQ+KPMX`0`^YENl<7qFS|1-Wy z3dN-T@ThRA77o4bu)}1)u@}2AX@`q%X|bt@H*5G-dk&0#;r|P|X2}IyjC5%RRaxq; zhy5$3v-TD1zd)J;bGA=$chjqVT@OUl_h(*Ac>L}jRc1p>Y8ot!^JC!z++O|AU97kU zb4qf(XHGF5ja9*WQvV{}L1Qw~2pmMQNqc_mP2JXT8W!7$AKkzOByHfJ4iLQ(F0MSi z3o2!wVQ#nm7-CTZpOS7n;&lv*b%L@NgNX-5wa>{eJuIzTs!O@4{9Z22Ezp4J)+71i zjK2~4Sbgc-r7-Kb?OKz$C%NKS?hnKD-yFMBeH`a?T}r>7-qNK{D~^A*Jqrs!ZM}Ls zaN;0iq=^O_fgL_?PjJJmoxCybeHS{%pd|=|vUJDBIHI0IDn*=6(<+E4&VUcV72X4P zKQr4``6k|M@4yeCB>=cX7w5tc;{*c((9#)$$^(Ds$lN*n{wVV?gNi}rChX*$bPg~Z z#Z*mFprjq^FchIMai>Zv#>=I5SyV?X=${rv0g zqp*`aD^S}>h#%Tx1zFDlqQigL2RWd=Ce>XLc2r@w|JJq&UD-{#RVNAbP8SHvgy!>6AiU$spTysUBJu} zxQU8xzQ?1Gp3+*>g5zv5`a#azAqvHlaJSiv*wg*2*|j9U8sInX@J6LtaZuf2lZvYE zFywGRG;2c-UbX;@rKXlypJzbdZ5B13e%#W+~AT&ip=?> zAnkWi62+#m>Oj{76P1eQgOiT0mRiL?YN#C*-bWqg6OY?cw(BQ~dHWh(?PE>Q| zX==z_Jrbh7laK(74`o7o9n|1=xnz8cM0ojdU8t}eP17et;0Jm`GB8{f%$_{AFpVU$ zVLB;(lMkZ-AC<_M0PqpB-a7y&_Sb#>a1rE#~>wILdH%+#ebgE_Yk<_7LAHt7-WE^6(cpikE? zVcDsui|HK#6Ohw2*<$x(F(73zV)vA_oO55vIr1~R`_oG=m0mg|DYV?xu;OM(XuQ(2 z1LP>0T;8}XCx=ZH$e9f&I$s@sR3TX1*1tor`Ug$CMmE32{q!E41MU%;oIAaYx`b2S zAXoVdm{uy|5-aAFE8XRIT8W(bU5uHH^ObE^y%z}3&nJxt1QgxmP(;JBY+@PhK7%>&7+}wJjE(}j z)sf0Dusq1yAPok|JT>SQ)88y=J{;@`ql?djZH$~&1Z-B%?Ep#u<3$mb-Uw8US z2`5k^0|nBeG%ql_q``4ev9mjtqi&hV`P%~Lj)|O`QBw0LPQk?JpqWM;GdVdkjdCU$ zsb?sIu(Kp?kc1f^f=Qf3UXp_-2s_7JA|rK+qcT7ku=XA6a;0dZ+eDr_Yj^2x5J01? z%Zx5{Z-ZWaL&@brBCtQqpf9|P65f`0P&Nrq6U97;-g`PuoCxK6NZ1pFdu!;$gSQJ3 zX1!R)t<7u?TuiHp1(F2$(J$VPdcZ%YUVMN4&R3o)(b60D8fSRRrERxF;0&I>@>MTb{F z{3?+bF!jx5PI0+3gJ55?|GYYrS29-)u*|O*_}Z zFYMqJw(v`?-kj7b{$71KN3j#JHR^C|EL6Z$`JpCGDtg%O)l%C!A#rm;&1Thyrl}3i zNjH9u+h8h0rFygcffs)5oHv&l44Jj2EAF+%WheqTYi&z++e;K{C5m8*@b!j z7Pt~%Fwue@;1@fQ$tel4u*eJWQ!xVq3>+=8at%}#msGi{1J)aDuZ)u@Y#f*dp0;{) zJS`x|)IyIgR!ztY@VBtX#^P9aCiZ1&p~~SG7Mcdc>O~l3=kO(8qpdGF$60T5P*w1H z+=YjFn=h(%D)2tcGS274E8{iS$9<52_e&4H4ncxLVR^y5@sCpj4p~g`&(j)M_O@g? zh8bAqOzPd{YdR&BUm0m?l1r;p9^<>c=DQi_|1ami{-R0kj~_z{$yKr}SQOq;7dKH~ z;Czf<&$+Bx@bNb#&Q|=YZ(FM7DR|6Iw=HBcNg}v z=I;O2c|&hiMd6R8ChUo2bIa3f3N<-f?cF;88-VmDNaaAZ7ee7n=W#gm{zMieqa+N7eK zMyCs5W%OaOf1zbRomv4Mj0UP`Ec8Dhe8x7HGo4b6Ap$}Z>S^DG)qg9Ra|ooqQy?p- z|I#y|XFTo_Yvw(bhSlS)rx|r2o82!o%!5eA9SDL7QdQZhk8(ca#pBdHWt!g?m zaeL$}_vT33Sq{}LyX&`eZXP(g^y<C$jAD19KH@((t=4mhtqShkH96uU|Ir z%&Mi&KfFD0cG8yDGxuG2^$*ox=H^$Kho@(b`@R>xZJL+CP8+X{>zR{eKb-a357)+Z zUgSNg@D2NXAqty^oi?IXac}||x;3>x$1{M3C!+V|r;l01#C?>0lTzlNs&p2ahAYIC zR#O@Ii~@@^D~m-%^B)C#Sa`1Zb&=5}fZ30{9-UBcG>>s`=Z)V+=k4r!`IS+mRsR16 z@%{Jj#NgRTV3gVWB|s>%PL-ar8C9KUmoZEg&E1N#P~1d!6Ecp+*k{<~oYUod?-Gh0 z+>Le){WY^O^TjIOs=0Tr#_|t{`&JRWK!D57XqzlBFH+wRIHK?vI-cbXI9`%*v;pDdK%~KbLTWKvL zZ#7XxOFlg~N=jT2v!*iUUvJV>C2M)ZNnP{FMk`~Wbyp^yS9W1Z-aY-uu5HH-re8l} zxy32Jesy$NbxL&Ftf`Bqw|)Q0i}zJxOxdNf%w|pHfwKAQP?|bue2y~qI&3^VEwL42 znp4d_%`ube)zRraqtWSL3@1WadhW(sckX}jTkXZa<$awZ{G#+)kI647U!QRZ=|8*V ze|dcVLyhsjb#0RM8uF(GZ6*XvT*Dp(Q_&zvU>?7!g5PA0+Ap; zzz(cRj2T3&F!J(}yq;}e#m5YGpRruuhEnCqXmFh;%c#Vc*vu%x|mWj4;(jiF;M zW?ELK+T8oi*I4zcB{X!*j?EJm-gucg^sK^i&)|{XklCMtcQEm8)5oJ9#NGNp@AlPGa0R(* zU-frSv+Spy+q0kc-M-rR)U&hlGqb9Jk?-txtlb%W_6n7|a&_{8@0Q=$)%JnufMn8Y z^btBrfBEI-SyJg{>6n$t0~_W~m{fntV~?+tcT@V{n;Gs_gV9Hau0EW2=ugS-zYmrF zOSh&vGzr-ILnBrwFa&bP*Eb#Xr8g#r`~=BeXu55K>L}|8znyQ4Y@BcjqrXYUK1q*aHyi{p1?3d1ohw z^G;s@AwDJy)4|KR;nN{~>H;710qfD2rQc%4oPv5?c{S0`*NK~`U9+k$q059qp7TxJ z{p#{JsOKH~4xX>&(o+Ksb*_Y#pOQ*MG|~o+*SW%a%Hv3g*OM<_&pf(4b6n34*CtrE zzf5xZ5>(+wuQDeXcD}9n%iPRy549K9J{rp0f6w-1hRqLOrN-?xRP9)({cX?7HTzy} z`ihxp_rq874E*t+CUd;O|JL}tWY6U8?rB?$?DPFo(uMB)uXru0^qc4&FkL)DT1JZs zYFPJ(a>yy71`DUv(CjUS$;nai=GP}fQKZhVaxUdd-U{Q?xqISYh$nZU$X+~xRUDFgRFq>zFB?dK3_CI}2v;w%$9ErD4)I_r+!N<8nMRsH2ZzU* z&ZjTNgc0PFU(*?)d#L>4GvY0Qb<_h;ick=QNKWSI%WNRM@a0ew;{|+0>eX(}-)(n1 zi#f%$`k!6OEc7U~Z-9T_{+Fd_5EE->o2Hsy1ohvYuSwC4)*q;@1=Jnh-oj)5we=N0 z^;u`8qe8j^oUs`;ejC?qEt?@9*~)RLMcobv!D>!5IODQq@HcKQ@8RSCBKEA|g^sI1 zgpm4thJ1$R5x`6LXm{U*Q>SKr2(5JRF;E5{>`X|4QhNDv zU}Sutz6qdNhqNP21J+-6-$=t?XJQ&L%09f1$f-k&vwu@UZr6C&kVxB;2*0EAPT=PK zKaK-RZF7b&O*}NWTGgK0WjO9fwmHvU^bCsYea{}NI;5J-(&!)WlK9^BsAh1WFFN*p zeA{y`U^6W?2a%+NP)UB2!kwFSSd$CUr)x8JXlH)yMmDfpeitY>bpLno>^C3&@os-w ze_DGId38cmQAne;{gc|!+&5_lt)#f$nb>gv7zstta$*3@)(6%YZYbIzW*9br&O5-9 z^UoA7GEkh>)ImBkn)Vjo10Y|~OZx!u*HKtU9@MY%JB?K(q*i_=;z?S3bx`pXuYr6W zR9B2pO@q9^A}gYDiJlwFz=?LC>3Cw!?A@}0g_d&pO^7Inn98GZ=&UMzA=^cM}LLySW5H9K^3$;tfL|>^sOqW#+xg8-+x9 z01UkMyT`<~#FQl`f^rgH?d+9fY{VO)SH6!gL8$sPM2$^X)@&fJ z=4SPyRl_QReJye*p1sX|+iY=U8{+0~E-eWjK>}6loDbV~H!EnlE+}764%lp?z~5Z5 zn8qeYYN>9Aw>PsN#@%g@eQk(rBSNpU^W2zi%r=CVXB5lOYGUEw4PFsmV+!=!KR{&B zG)0sdgDsec0&5VhoSTT{B_PA6(ylw7e)u$Ppy9}tI_Hx>Vim#T``B?bZUM>KQuGiH ztUR*i$f~Dl{F@#>56xGuZt##a9hR>>Dp5QvA1Z?(j7Z+bBp)F#xek)pn=I%@5ifL! zrOl@rS;4!CJ1Mq9Z_y*8zRax5Y`CX3@dk9q>sef=@nXp7K!{(#|JkcQ&VAE1;v|0HB^0P zCTfZOP!pc7vo2~3Q(qm|oYnuqjq^G=8~Klc>$*2opAC#7^s_QU)(-d9tKat7d3Wxd z8FWc~ReI1%lbetmm>ZZ_a^r~SY6X6-+ROY=+|wS^SeHhfo_{8Ht;us#_$0g zyTOHqJHz4|;u{BalA3NF@U7Av2rQDSn}gQv|7fCDSa<4>z0ds}yfJQI$O*j_bJd;l zN6^T`RUcS0qHk8zISd6&!d`3Nx&<8~aO89k&-V1I=@P)P_mVQdD+Jt92Li22ZX6X& z1FS57xWEy2+GM1M^WJ*0=G3J;Qr>nDGnOyCx~esME0@n*ATy$zS+>Z5c6*uEnRJC~ z`F#E3Td64+qyGq?M92M+Ub8vHbPk2zOKHduq6bY^P#Fb+)N^q(iqekHHMu+-O=~hT zFkt#QZetq_?{+kG6q34-AvEzTC>z(p7iDudwUtJ{FLP6Dq{t0}C{ONggkuV+^-x$c zq49a+9O!a?ETA$oh8uZon6)Z>-XZqQBKd=>@$Bq4BB&8G|yxk7F+99+b%`r8(=)3m=J`CT^ViNT^+XSS>{-)y zv`-bE%BTAN=JgTZ4m8e>-0+E)UJx>+uQO1DQX{f$Gly*~LcE^;NsT@!ke^@&J z`X@y`4JzlN$ubkA){DxciHWdT7X>=DxSp$7SDGWju0;AWQ;OA-u;CrvD?XX~bB6$C zH#I(S=GNmw_deJCK5@6S2&?s$s6Tm9QkRk;8t2}_QOr5C7U{#6^WT0M)O~kf(#c!K zo$zq1N`aX%KQ-*N`W!-(&uU>v3 zza6Yg*chVR-%~(;gpb9NK31tOW}O$&@9U{me1EGtpO{bv%()4Mjw-2J+D1DLP!XLb_tS7N|Slykc%FCmNv+j(n9{@nVZRe1@D zP;UBVREb|qIV-oAqUC=I$-yS2PpA0BOfH@ucQiXUyJH|tlM&Y`|3^Z0|Llhct2B9< zympCz8x@L99p+h3hXyfVy>P!)LWsF;8K4D!O`K6c;puVWZ!2+)Gq&r=1EmQ}BTUng zjibVOAm$Qy0HF|{_sPBbE$MHkeT@M8Du2*##{e&X)eh#d^t?`}SUaR1=sXX$@P=l4wB50T7d)&) z09%-SLxf4-K8D_sR5BZwd#x);C4y(qI>!t#qnJ^;xtL+erVkxRZv;)DSc5fzJV%~W z`VHZ1`LvZoZavJ+8ibYVGCFXRY`Mw#wNx2bpz z>l_o6(=*a}+McYcZn$DrRnX_c3G|CW#5~4JqnzM>a zm_|IBV64%ga#DLQ_7T$F|CzP?!V2NEnRf?H+>3n~ESjXEeBLOHUfRGUE@%FQunK7) zf0j({aK77L;!xSiT~5d71TrN4~gW zsmXsJoYF;`hu;yHsNkc(2l3B$#?m!QN(&9r4Vb+WN&r_v;m`~y$`&JzzaE7zP8DYv z$s9Q|75qF7vz@kjvZQDME8y4?`kex|Z7ZE}){H$@8gMJ5(bY2WkMaF;9crw;*x-{% zLP}iaWU&Elj*xz>97Ik=$^6HSP88$w-eSM~*D(ZVQ*d0<+-X@|6alqGY3U7}ab-D( zpN8E}ZYm5meD&d_nRTJI@Je>EzE7Y7kkppd!BjeZ}a z@%bm1)$+UO2LDv_cN3fB}Qf?o&0aW94;=+fhGfO-Fb|WzX~78 zs2^hD?zIw(~9b`V7;rALfJs<-nU%#UsqEQ+f(|4UEVbJ2Zgt_1+Nm8=ZE;6EUr| z_U{*QFgP5H>KTehIhdw%3=cRHEqE5CXl8#7cVIK62r9mo5!{TG)vE^RFH=k=9htnk z(jwqGD$9lurDG%VIT5ogBRpvE+=0gPzGLbVXEjxOD>%q1>}|lUxa&r;RS~ns!l>xC zq+cG~wO*6>(TR#CZeX)#qWbRlZV6i4xjg-+kFM_C=*qL?Op%Hla&kCamz*OoP{nAw zR}CffB`}&QSIA-Jrz2P5#C%Fx@5ggOO685NzANjO_KW##A}kf zs7q#~D{AE~wE|>Wi~6nJp<~3rq!23oJFX;z&FcgNK~qae|Abn3Afr7G8JF0h6SxNv z*q@y6TC&0?2VJAXKM+t3;bxi;JcR_P8?sBJb0ogn*=1N?t+)9-|E2S{b#oVl%3D@@ zJX+!bjrEp=-j6Z~1JsNk*%GT<5c&>!QLk=cTLSR}IRHm6LjfVG;{lSv!=&T!$OL>& z50&Yp4LjsO**NiFRgBEhCY3NfIS_oG4tNcus0oy3PzgK{Xrkn2dTBnyeiv9x4UG`F zP7O6_2hRc00852|tF+p{1RIRNrvkI0cDk{P9_cSlW|dAko)C-^JxYISOtQ^y~q5Ro+P4?Z~&Y)f6+0_2{@p7NbaSCa!sw3Wz z^GdU>drPEp$bO&`>4fkZq4Sbx3-E1-Jl#R?SR$f#F^1?}K$IWz9;QvZX9|c4^j+C? zf6QR(5%OG~xz~aVojbM2Ofa)%l>%+%-uev}2}pLoH?%-S-~qkyfkR1R)=D_b*};)D zZBM655(qNT-}ntHU0F4jaD9M3#^X0FOLgixkp4<)$^b)4;ab)8&IHc*Ejp@F&ro` zhi!YOu>;>tokJs00dVOLuLqT#2nIyfM$X>rnx-c9;|U?pU+VDC|m)fotT#I7ikhq>lf?@haYGSVf@1bKuFBZ!Sru91puqgKlHhm z0sPC>iyGdK&(3&|ya2Y3j&cp}>wx1xRg}xUM%ylZQ=(MR$Z4qWbi=-J1Ky^DLa+O= z8ghnq0FJ2?W|IlngLJR6kTTplAQ;c1w^&jHTt216vUf3sPj4__mQhEA#58iOf=v+_ z=jqGJ^t2jr#LkFWoL?|TkByjY>E|BntdDWn`M|a}C0twWtuU?j*5X@B0&Xn{vc`(} zM8&2`|4p8eBX6F4bhUIN*PpiL()UP>uI_F=dHdn>il#qizoebuXTqB}Au~d3bwwqx zV%|_KrrEU++uJs)ALWg+K`JhZFpcn0IBatlM>dAEyUE+EyZudCY*vqA>=D;oo>p9*^dMcTYxdw=k5wU*v_KkFUp#5mKHb z>@^IZd2T?S!omaCJy={w;53YZ*ojyX6GLH4FFL1eKr%7JigfA$NRXpi zJ!-I0C{l<(AS7<}>a|21R|1LWLMxQE2YU$SfH3c;PR}kYJB+?aJ=k$a(FB!7XP_yv zwlYH8m<--F#0(_xF{Eh$(FZ!gWU5zbap3x>!%_J*xHufRbg53-7K>;bjN`~|K(jeB z`}fiL|I#etjo(o?*NuWjbH;f0>y&FU{uIH0UPgw3-ta^;VW!ONx}~020b>{bluX4Y zwQ`)&fc`{ANuuoU$xjkcEMZiW?Ij*_&T>x1eF`snrp)yD=L&)O&dMd7jPb1V>&~r1 zzAHYD3>8(0UaYXhwlb;gN4Ys9rl@Y4t}0ELV=i7G%bGEP>yR$P_ePb9bZbuIfpBs#=)alBT?M2z9WSRtmEv(+5JXN z1C!U0_&%H!Yli;9zt&eAu5Bgq+b8EAk^NJg`f8AsuS2qnXFocRBv)k6J6V=_;wI`+ z)Z82AsA64;iv)fGUsuP_h~*U2ROkxh99=?;H6SZT3}r46y@A$x5igGSS%xJX>Msgo zh(C#uZ6g_(fM53lMu}Ygms@|w&p+yaXg76tzx5n`uWw1}`^%iD(;b3UF!`+uE|fp` zTKm1@v$a?E-TCaNUy%#XjQ{oW-S6l9mWY+rrY|>MzI*4h7k`c~u=FEIQS4%Yhb_;b z>zra!dy*AHXr3*Hccv;C3QWo!K*WP)w>t^#l_)>I3>k2Lsgo=+Ai}&MZ z7*6meXrN8kQ?^&pMAVqT%db_hrCI|(!gT8bd<690Z_1zs&R`#e7(>@$^iS;xHO4ia z%*w}8i!L8OvibyS&`&*%tQt6m611A|W<2UxB)-UtX2|^JCa-j-_|M3Lez#NBUZ*+w zwQTd+IXf;cpTF8@a?V5EoC9XYF=O^8OUmYG%NPQv2{w11xvm%x#)x=>fYh8nK%-W2 z513J7sFjsACdQT5ODow-lYu3)yn)rYP{>vrIW?{?JuxI|R{yQ^=UK%jy~D9orA(Bo zDbz32PhHFm=^e(+98tEWaB|_~)KL1^Hd@;RZ>G5^?cBW4<&;LlE~ZIu8MhSbZvNi< z#p8Y8(J*!JV(r=bh8U4(v#_+m_luCf*<6ERLkgT3;Lh-Hux*TXu#I+ZiUD;6?c4-S zq3?3~NvaaB>AReAg2Wg8DPzV`HhuPceG*;`YlLu&$Un4(UB;deSbJuD8LqI|rLcb> zju6^1w)6zC)oYX-@JQTVX2y0I*BqD|wE8Z-iR9;$o9=E>YA?9~A3oxQ*L{1U=UO=5 zHE%)j`VNMD9jO`8DOlj6jRGfHte^9ZHOWN)J|jCJzD^BTWdkY?G%IZ7N%63zEXTxd;Ch35To5+cAVj=Q;j%97?qY9dkPOP_l-kEDzQVhFj z3`Q}0Q5m#%)`@1H=5#hSIyp!SR|?OmGLXC?eN~Xbi>5%C_jB{dQ0-$1gLegs_e|R3 zIK<6q3LP}TT_SFy^mAwm5)ZT{A4M=J=c=^YLAZaLDb21g#yALdKYv(C=9s+TF^Pla z;0L+`oGZ~R!IW*F_{ZxHMhte+Ed1qVH)n<7Aht*bht z1gj+3Ixg?wE$2`%>`%aKk*YErAvd9;h&jNBr2c$AgX$x$eA)VlS>t{g=Qo48G&o?d zSW!f>-~%N{gg=M7=Qb`2hDX}BaJj#GaK`hs&kuh;hi<*;%ViH%)pp;#ll0|A_1zCP zR)&2)rE*F0f2dEst8u{f_uAhal^LzsJv_={R2@@0{Vc~*LNL8f&MlxDN4F>p0%RUJ z+gRCd-tT$`tZW{nuOU2bXmYJoHnR@1G>v|Z$~Ocw)kx19(=du%%5tyJ620bU@Lp3; zuHhLXJ@QGI8fG@=rRr^}J}aP*h@p8v6QY-JHiYyMyw}mz(J(>dbNDfV%v5$m87OPW z1lDL+%BPi{!=_`%uC87eSex8Oftt1sW^)Sas-Rv>>3zh$bn@2B`>Ah8O?oo+!1;h< zF8#28;;Zwwdg8gm*yfM@1w2(`z^H-6o&awd)!~c=y|kXx32qdHvFu6C6B!B_sJ6i2 zG!+F6#v!3G>M&#qwxp2434^?Yk8nV``S(0}DHLpmj>hD5m}IG~)K>KI+kgx-K! zIKJR04e9Fccm}5Y$ztcj$6BaN)9MiSr~D(Xlb_Dij>5JoqSVrF3LXH^5~fyfgDl5| zz72DlQSsR!!FoV%<;ORN?y&$iCr(>CBeq`SQ@2I_uk)9*@K?-G?N#-!a=*JvSsmNx zo*kf(Ifw4al{Is(Paq9c>SEvI^ZV+-* zr#Xtrc5(Mnao(EfBRy}wuEMr1P#SU{q$#w-1{%`P*t?Hnu{X-O?|OX0aHaFPkMy;B z!;uV#Nx~xoqLh%D1mWt&ht;sz8R?wk-U>z)Hu*~vZg;0XZmm|5x-arz(w2+cN<8pN z&y!9Z>d&Y+lY{CX0F6y*So*bYqw)REq$13NuvLcwBsqV7^`4`ff496}KaYVIlMlR@ zMuh~NfV8%6;Kj@x@7$a=moLA8T$lK<4?vQmd#LEz(6d3+k2l&kBUyg86@+el2t0sz zKf%Tck~bu6&Mg_bG@_6m0u&)l+n@H3n72PjI)v2uaq|Vd8oIoYEKDHY-@u8m_nyAe zgc>w}pV=pu?7fQ|SzERbg=cj50q>oUWp`yvZA!ar87O~G_kpCV-VLF@6m>-;>Ke#> zziT?GU3y~WpVwD^UPil1He|`&H@$RjlryN&0gMi=W6}>!BFD9{P9hQ4!Rt;>=w+Yc zBbQ4&hMO}fGc1RuPoF{iVK;xn21C)ToXQB74qpx-D zK6+Jnc>inT!`4)V-Iq=)6tixNQ*Owh23NEH;QVpIf- zfuWj&Ri#ywTIpCx!XmP$sKnrkpdd8Xu-Mf~SVce-)G9OosarvG+EJmM5r>-ld~s@L z?w#MgS1<`DCnqQQ&bPep`z-c*@T-_V)7UG9}8;k81SOx;c_Br;XYf`|rJg80hnFk-W`sG?29ZS%~n0$UBIc4!)ta z&ldtilmnZ4xKT$G!<=ZvGHmspt(*%96r%q-hy;ag5;Tdx)16L$U@EfZhUb*FPACM& zbmDZjdKdi&Sne@T2LNsl6WNrN`vJp7LWq};VNf3eh)nR=>;9Oqz+aCyebM@24Q+T4 z`$1qu&-;tm!{fU)%-i+O*scu@t;atu^!wtD^CiWdE0A%L{FX{9^#k!h>zTCgS;3L1rcMo>uqkSJudACl-0N7Gw0}`9`GhD6wOVjPN3}opxA*k-5&kHVqIB6d+bGYa#6Y^sCm`ctUnIvLMYAg&S3Fi*p`QY{gGI34ekjH+?lm=(pBnzefG4k@M-#O_ zZLDS~068_DH9R5$xC>DE4Sd^P8jeK}CO~ayV{k&)5t3LsvZ^DRj6hlCz~S6TXG{BnUTO>Y*g9{-b2I{;;XOi6q#0bqu-@3Z;q&3QK4=Z*e0K0mhW&}dQIGpf^RsFP+)IT&z~8p7e|Ug%P&9D(^->+Aei`G)Jd9k0(>s4 z976@xQQa`Knuz~n&!0W+W=xB|EU7R5e1+w~M$q%MZ`khI%i{H5!j~Vpq`fSGmLzUK zgcq;(9hZ^~<)5=J4f#&&`m=ZLV-V><=|7f?{AgK0duoiAW+PYZB)prhJtM2Uqc(|* zv2+v#N-WhT8-^7_()othVj^gT_BAY!vs~bx@^=0;OzN5p*EU@bC|+GZc)6fqU~i*EsPrOfEjFwB*wz@adNUNDgPnQ%6S3`kG^EXGZ2`GTKezZ8dhFn%MXmAF~!WWct3WB#hX zUSP(&P{@on^WYUSHoYjEWgh=pZXPPm;aHeh)2FsBkyx|CJlO_o(dl*97c4F@c&^;A zxTnL`I^2Z(0*axMc(Q0z*#^F+sB&>%sHe!Vxa738u0E8`y(7`E;=A;apJi|@ab+{rN=UcB}m}*o!u8PN@;~7`JHH&72 znkbK)C4Q%ic@u?h=7lusVRCDJ>LFDc=BqgE!&tvyGz_6U23H^YX&g#m=b=(bPW#*0 zhK2gmY0b8wp$30DSAHloP5w1LqkTWV|6Bhh3by4)ayT2tno`s6zzIHg!$`&*0Lz>N z&f@{g0KI}I`x5}OE?t9NJhmapvIUs3Ye$2Ta%q~s4Dd2|wg?5U@sIC7HqM z%$42_+u8X`T<43Ox(p6u+@bSPV1ywc8eMvJQgK(toZ9b~M`o}d&#KRtzld)b8Jfpo zL33i__ND@(WQKTsaCI8`K*cP5W2utycGTnO}eI*ayG0@8o$Ca@B#$YsN_TP$|+qr>C1S@@!9s33%m|*$LDHBznp# z?JLVGaUmdyvy0?zm@1W6mF_t!Z))>Ycq|S{H}_>^5IN1FTbHyM+Vrk2=qapEN=i{y z6RQ_B)6mmZ3=h&&fB&SY6e66tDJeC}CGFjJdDDrD-yT%1%ML&0@_(ft0thzYQ=iQkpYY;_P$QofxRXrj%yV5k@z!ybvodz)lkl z!`lHwr7y_m+oA%~>B9i^&`TPlCrc_K0&4)?*7&#FzEI*Y331sXys!^H#X<+`fXKhHc8H0jcL8s#l-0>E7$xpVS9L zes((tVrc&q|M&K-SJ6#S?X_CZ9i1-yK`FtyI@T9K@5AQ3(jIY&F!NB~^pt@6Kem2X zeX)Y=YU?i(wMU?diy8BTJ43WC(nmASgmo$&!P55gn5OC}$xuVFN+H@L1JEejnxn>= zxf8ZRxHyS?$wP8gW@D{pf<<=wKA7&dx3~1b+a?4OC_y1ZtDs8ak|o|DcEu$umvj#|A;Z0bAyK3f7s=$na#bBktwKIh(HSiaggj!qN9&25a zVy2KRE~c3mdOwj?3euQ7JCA7B;S_42=|3$p4Cp32>lJhw;FMu}UiP_V9-+Jn3XbbY0oasRevYjo`(oS| zc&@r!De$zM4CV8j@)XH5D&WeP`m(N-50WW!2#MlrC)_cQ0)WjAOuaS%;BrLjoAuRG z7RKkJZ~Jc%mQD7@#sIfYO~0qM#F{F7;8;y+>BLRooA18o<}a6!*POf@e0SKk-Hv8L+zw#y(CbK zG6fcIsG+>)o)t8fGJjRC3G5N32k8Kwz5-KHq-ubs?!>2_2B0F`OUWQDd1k~?D>2NN zVY+=n>>M^2Lear1*Hy+4>CFu!mij%3_3zmY#iCt{Y!VA~Dt;IzXt$&+yYkHXbbr_K z?owv%BA1dI>F#0AFa412+h+DzI2ux_*hDjjfsAVGE7#+D>FHiJ33+Ig9JE)m@rkf; z9#+f@m3cfSkVzdvp$Vc?#bKP3%*(0RdHKVb7i0$hNBQR#y?*Kk ztI)jUqJk?4ujfWex^J_Mj4_<&Sb5`Jw+Bb3L`uRq){&|(wRLLK>7NtGm+QI~M$tyo z=#AR=5#l+mO%UsJ1NB@(`5%b-)+T?fqy$dn zk{}DMG!beDO)069SY)+ec|;b=14rk-&N42oD{lI)6Ojs6F%#>o=?e`#SKz!N4$)@s z|8O{|=n90uv1E4%A*x1vpj*BN&T~J1+Q>;CNlf#V=?l8{4-=pfWajhpx~ykIU?mjg z2L+4M@+(8Y>ElQe!L;jAq1CNNcv}2MX@xD~1Zn&dRF2n04CDUih_1T zzOBpJWmWogoipE(n}MSVZ{X!su;TWYSaM7voxB{EJ0U!haZ`LNsyzxiBh@xFzeeJX zji)LJM3i%ra1tD=BT`|1C~z|BdmDm17|3tLyMabM_@t}Jm^{N-yCk)36r zLK^M=q*4ZxGHO1{#Dc@NurY6&5hN72*>SDi9L(~W3?}`)3^r!@?ee(}mNskbZNePd z9=P#sWOl548%eR*xgth;e%ozW1|BVnj5JBLEJ{h8k-MZ!Q#q6yzA?T~I^ZOha|0vo zb5kw7N4&7xH(fVf3vRY`Mdqe@!D7%P5}tyA#FjcLvX+~)6l*E*8A(adKvPVoz|ti{ zR%dziU`0Y1vFt7_bvkfoqSUgyj>GJMD_8_v0&^gO(BcEzZ|89*XWYle26*0d#!Kz% zICed^is2AOoU>sk<}}s-i+i8b)lSA`n$aP=6sFk4Chu}A8lS!@UV~_6vTIC8$MX%LzY>*71#Br=&tI9KBo(r$mHKe1C*MAg}1spP>7qgiQ+A zkYj=o1CS`I0PZlNOn^@W(hU9!0I1N>4{6yoA_)Q=7L`SX=n?RW=(VY&V0M z5IA1M#A)T5T`bxF3dSMEbS)5ElE7=?U&zDqe8Z+r=~^#|z3BGQ`kE<`w@nnL6h8Io zF~gsrgxXVH|MA(l(zS0_TgYZ{=~VQ!fr1iC^q_Qf%XYGYW?n0FMjyMYyb$Om8n|$C zw>vu^VBt3Zo(MlnyQ20KqqBVgG6)4l$Sc7Dt`2QFP+G^mJ5=g*^x%;RY5J`Kqh~wu zUT_^&Jeq}JR%$^4AU;RDUl-EUa2P<$rVDXDcqtud1mSmTV&+DEcYQVzmITmKv zx0Fck50*(0fN0atiwto^T_x#oe#bf-TafRB0imu(_Bb_b2f3M!Oeb=o5R8Kot#ok@ zY2V>M4&y)8S6H@@RB1a2Bh3lNIAIGB#X#kqXh>rv{feQ~de1+8(;n+AZk0WRq>1%{ z7U~?tPXXyJBs6C)s3rCa4dAv(V+Ju1N}L7ETU8J252S$$?&o(JE;-lH9P1CW$Mo#(43vWWsqxBCFB3Z$^#Y z>6Qgw(!5uv++e`b-ynY~W6+|$RQf+IjD7Lq+~(m;CefajOG14ayY6+^Mq7(imBFu{ z-+PL!BE{(dT5g|mz2E6usHGzxRpUQ@#J?sfD)met;y$z4p}0XF0XL%wALL(7Ia`yE zAWneoJtCW$q@@usT8Kr9l$UDm!o80J?>@vtzJu-Fa8rl8`ru7(f$yEAA719epxqHD zg0HjVW${qrfB>ZSi#|-8x3phOo~PW1wxrT=!&KaeUSZ~;P)JS(TTE>;C#VAZO11S0 zR>*nT`S@WJ=f(Et$AA>MyqS}o0LTacBWh001Rh?N#A7zQxaBCQnLi{?2E8@~e&hH4 zn~zn|>MHUI7R%zh3C!EgI=rt{Qa*U`q(mY~Kn?@x3l#Y` z9YL2K#B}Z0l$v48gpi_sBszVy|CYWX%;Y|#(f^fL|}(##+VHmdAHDz2wjb@Q~0HPtGYH46?kSFVvX zVr4!DBxA3h<4#h7E{1$yKCr(}h~qUh2jSte#Ar*0QA7kX=po1oR`?jb;sCBA?qUR$ zkUs`w@*77pK<`3y1j|OyZKqcT18R&R4nM%>0Tl<13iJR$JC)!b5VyU>RDkwiK{Hzu z3L!#(o;6sS0H6#3LLwIjJ2T+1?TwOlF>TvnI@2}QOa?j}3OL-eh(t;IKA*qM1o+AL zxA75IMY>~$wHD4LaK8w7?9gjVI3w;YVN0k`rfZAq7U)!Ss%KghQgH1L?tAe%SMq zM=-mJDS-LXh4Q@tZ5DUz37=#ch#Vb-e3~@0Tp>iEA3oT6D?i}npuVB!P&gDWh54ho zgis;~dl67mY?nTen%*eS(@nh+(q?PMo*`6RI4cuS{&|yPr4Z0B)OqtUdbNp z!Ln02OTk?tqe^GVgt>h)gcO~X5V;`1R^3$nYK2jsVI*(~%dK^bWYur^D^oR1Ow*}p z(gz9;N_j3O4ihV2TJy9_Sl7wS^Ke9(#9Sic2`Fu@0=mw`ETnC!KB^5b(gaDUc^)`z z^2Rd0Re7F%>J=76sAw}WTR|04v<#_$g5zZ8F{FG`%@PSUw}8U^_!Dwad!-L;B(}1v zU_Ml{@=L_>rn>uGuTD%zuNHC^*F7`Dp6L)pER4Mn+w+Ofx1@|;Hz@bIn{?|ZBlT=d z?)5{5A@<4Mr`f3TgLt^Q?pf@auK#FXHk0e}CC5ZlDM)=Vlz*)&(Zez8`=rW-L!l!g zJIAeaGqR|C3}�Q|YFSfeVf->2k1hT0T>*2uc(r(94;W0E%@Q_l zpY1qR=yRY6Lu|1o$oGUMXhE(CR~Td#eR*_h=-%DMyl9>*sYQFn&TX@$kMd~>Zp)h%*#?FrFXv5}8u1exRarP{%_(&1S8rzWarw!7! zA39jY`JE*$KMKF>ROYgJT60Y-I!oe(tvw=1r9dFp4=elgRgV~Ve7acslGvJ>0H*N4 z?%W+ddr1Eso8QPH{ztohYhU&y;myRHfIA|o?&>}Vmn+kJe)YNa>O}cIOnkV~eZ`%79ZmZb2Qo}!Ht+H9O_I7-)*lZDd+y8vw-IKc} z#%gVN+%~(1Q?~Jj|D%0>@4f$DkAy7I#?|-b_jwI{jZ~4XR3~RjO@!se9L6;-LFbU= z>+Yd=7Q42)C!>Zrcdo?2#KCcB=)|&9?=J8De%fDWG-{}gwo(fz6LtJzu9CfiNs+hv zaA;iA#VM-q$+{*+{{>bw9uT)k>gnw1V<&c42vTVyJw02#i~QA>?lAvF5+{mwwxY11 zSaVeJrFBPVpD0n^nXl-~ukq$_zhuYX4@-tP7SLy*-%&Xx5>(nu5|mQWJAoG8(X^Y5 z^@VnyLCZP3EkU<_YhPt(1Q9{gF)txYiOM}35)Vnu8aA$oLe`$4x0G#M41Ja9Sn;e> zxOHxZfQEiED=Hz&!X!}{QR_V)h1*}Dn`JF%z`6{kl7_LtnpXK$c&)%vvN0j~u0X=t zvQn||6C<1%i07+1fCQi}19_5pH|F)O#IlYkbgfU=KV%SFr2?fwOoS zqhaIGuWO+jJJ~+<%lf5e;gshO;P~;h?9+gV<%XZ$gV615m^J*ziur$QR3T;2|2*GB z8MRvm*pZO|2DTJc)=s^d+5V#~<0kbc7}HijN#>*(0X)p+0cn#DFr<7 zv|YNb=LZOwx?JhEhdug+xuyVdgiisW4+lt_;s|Up`$;>6PKIyLi}g&(6D2Ke6_*0C zG9HAg@$N#?D7`^LVbaYZPG|~>b&uX&F;1iFjryWL1pM7}E+tL!^>6JfoW1fdhjwK3 zy8uyU8ub@voyRChbW%$8*FWxPmR<;n(hQW@}Up)|sxk zg(ackTAmL46o!mxZ7rMOG1cgz#E0LLon9T);I~gTZ3^S{}|VTO_d=R)XER#2_#S1#`)GA_%4dZ7YD- zWYO5mRC*9R$%b8o<+*a}LGU*5H&K-o&+h3U#L!*ZcOg^>E-4wGf1FRl_`F4O^0)S# z`E;!y4#%@!xmDE$3h1we!BwKrL+g1Rw*+g0i?3}xlz6G*vw(-}&{JfM!ND41k=VVb zqa5!c56_mqi;{hqEK~N8#+o5rq-ei6ibVouU2FS2lWkT8AswrXD81N(va5}z2WLgm z-ln%`A69%^Qt5Cj?FPFloe?_k+`*D_N2xitR@|_9>I=SLx$T<$zi`*^c797a^&kli zdU(`Z+U2;9+tJ0JpTCdjTE<#yA15)JwLPD#%H`H#M)4XvIi9Ry*RK_Z@^n|{@0{7{ zHvY~xef8$}`+AhS1C2d(?)=PX-PL#A&+Dlgf5)@v_6m!d_4ObhrHrhvPZ{n?{CQJ; z*ZRMwJWH)#k2XEhWd3pR+(#dO9PIYUe68=;_}sbUfZE>AoohBe*J!L2CPAay=h^3# z<44Wvr7{$U%*_RK4lSyU&2#VREyk9Ey4<)bew@#uYddqTzx=H3GiyoW<~%OGMeuob zGqsTWD)4g5?9vV#LXS{Xfx1XKl1_D*8`68>9|zL z5s8K=c)nx${qSM)xU~F2z4%f`oDa=%>p{J$ZQKT~=K+5uR_8e3G7p>=IV+xaH7pGU zW;l>MM7td48YaoLhFL2J$IwR2K`H>G;I}s+_{fAEoG5q51|>S2P>{oFS4|05NVi$> z-IC4DX#yTs+E5Bj*UVcpYw8-sHlEB{<|^=D9x&wrZ+Wof_S;VU+X&4!DM>adnPO6; zk7?zI{U?sJ%B0@o(`bWwiJZDbg7W# z?p*o9F2P&|#|L^Y)Z)<2&YG19-WN!t!#_|nSXOH3vZMc*@EJPD3Z?Ds%jD+! zYY%wAjgj=tf5DV#^6%{+FO%0GDu%-M{9d z^O_5UcaM~E3oaJKTAbOC$$&vCzetG`V`EPpuT=eJ3hnBs}kq+OD~e!}gxrr&&-I`a6dsq1WpbGJ2ZYmzr5%@6e3E^m^zwp4ueMMXoy zG3Sb}zUl}+9v*hAqvG=i$HFT zvndu}4#IyJqb5~quB1so(^D{{IekzJChZIUf(iU3KYoj!G?|26jJX1f1Pi@}vM7B( zZ38O_{GX6H%4`xA`!Bmbb+P}>QYZJw!t;*R;X4QH%LVW9OCKgo zf7T?KxiEt8ds$E_SdSsB!%Sf2Lzn|)t|NqIAhCUI)Iu`eqBuqOF}p!!NqE zIJi9jvY4YbjRh)C*R11kur)!#5?v1KyFBk&bn3pA3#H6yD^26@I&^Cm%j!bxlUoh9 zbWOdil}G}Sq>%*@OdQV9+n|@o2d``h0M~>AVhAd6@Gz|V`RTeF-~Al$sl|6kJM+O` z%IU%G6A9N1V;5^0pcQ+0&j1Q@lBZ&DR``-nJ|+dcjY*9l?Arf5ZAaR4qa0dSCZKKC z$65h~2+=+>;;){~moq#lAN|fhGGOo&&r^dHg5-vFwGR=oLg^l=Q3M(XOkB3q0LDt#c(cBz}sg48slM8u_<)vrVZ!zJ;I}4zkfnX z?!%Gn*TmnkKV$?o_hK+v@$Io7V8X4+E+XGEdkul8lJAs7`tMKkolIvU{(YO@pmD26 z1wR{Zy=O@yj1fsR0*NI@l7lX52XrJ+sxzpkj5U=!D!Pt$MSJ%>0^I$TUqAn^eUD)2 z3RBAKpwbp0<8^!q+Qz;g2`bVmCymE~R2?>!7a@Z+TcgUB%k?Lr%m%OFGh>4j8>2nV zzDAmP;|dX?Drj9IusXJwcs&qcRmvHNVhWuMD-Y@|*VOOaYEd3jpA+}}?S#fNac30? zpRSF2S|0O9q_FzofY5IJ{LAU!ks@7m4JPHS(o_JxTD>f zGxr=X1eODC&#)wX<2R_!DK7*N1|GS{7lMKetnhJ&!2wOdq8-1<@$kXCg|IdqUxYDl zj{r27m?$5+3Z9f4s6hQ_Op%wz>T(!SlkDi-k%J@DK5ihz_?(9pcN z8jyoD@EzXB1O^h7Q5_KVND(qn55R48OQhTls-G2#P-(d;b~o zDhOJKzy=AXD-s zg^QO#bZ{62UQA?Q3&U&_`gSe9(gp)g_$|DEx)<-97ZV-Kr%%7bAu@A)5C!`$)m zT=D%0za#Cxj=x=6{YUftAAj&o9S`gfFB~_$u&Xb(5rFp-%!SLiPi|~~JoSx^R7>H30W-QMe)id z`q>)K8Sx8X|LxFcXWdF=Eu(XMGPnBdbS&u;i4xz#IRsKVTV5w@Aa!qK99HJx1~?hg zzIXk%nG4XqZ{wr=c!pM7xlUQ!OMNYSFf!ehtPv#8iXdmEHYp;wAl(kJONR3Mv>3|9 z&^cFB!}}Z)ATWQD*(l&JBopG;iMg#z*FH!UMTx^Ms0AGc!CgpUXE7okLZ2EPh?J%oL9ngY=~A zh)|Gc5Hf@v`PTVvm_!-xOD(3Enfg%exdO9uZF?aK;|W2SHfo%x!`v`F6$$9^#My*! z*!N{pmdjk%w$@yx+NADJCq-+6V|6;Y9u8KZK(j&KLclC#QAB1K5?GXI3$~jgI_rK` zhl$BD6SGQg_iWP{%mV^zXv_R;a;|%Uqx4+_qWD2P@NM>msp2p1TnH|=+?k7yWbT7y5ec>tX}Jy~Xu~BkG9y!#DLF(Up(sT3 z(58?se{ZQZ;;fE0%1t=3{bxRID-cs7LX$6^>_Ci;r5AQZAg0Oh;^B_O?FSnck5KG$ z`$z(Os-8t%l&3Sf&jLZ5jbOU9eU#1PBjK*FS7`0uw(pzJC{(v#L##rjWVp`niwiDg zthPV(ckK+yMAMPh=eu0zx2+nSIk<7*k>^>?%vMskSUsj&1o4(~#ina(KfNf{*5&-E zzQz!DZOw}`!2_5OocQDYepiO|uK4zaY36SI_n03(JA%$&FrbPB^cWz3p}l>Q#@74i zsv5$qXsrDByhUqts+zv|qGbC;8TpRmPXnzboqpyYY+N4MJ()`D5=iL|e$*`{4%Fga z+Q{L<@;3bzy-#K_OQZYhY%#Y%dA8X3WkVm=`R7)Ruk$ZuXSvGaqI>$ma~!c$-o-d; z!r>h6%rBAX2lKCa8u>@h=YL@C5C5{a{-J%Yr3WPJ^-OO+Ticu*nx?$**qnGx@zI!Q z$C437QK!DMOD*5>@HL$S6@`*!1a=Y!{|1Z}*72Yv(K&pMw~(u|kf(ZhsON4>Mf^t> z;%yl>lXU{Iq>?TABZDzy95)(9P7zBpV+Hdbytfy^8MV? z=~_Gcr-9Y~wf`DNXDM&+o4UT~!>t#_^?e1miKE6?^?=jy7`fq{Xp zM~gjjemJ-~`3vd7=K;MpK3qHyePdUuU&Oo2umju64bQi$fB5IC+m5#Da(-xxpV!iu zW^7zTk6!w?Jhu-i#nyH+b_fbXgDAbMYtv~$Wlu<9hORfEu%_XMW|g~)b3_?qx*p%%xdNFZTofvTC~ zjadqYf|>Q?M6h^k|2Ol~AY^YnODZy(s<(wWqypoT!hi1po;ZSXj>%s&B{X?`?g5kO zxwVdTSSu+axNGulO?k}PH3u8b7;pP;eZ4PzwfO8*rVX=<;{*Y*siFHcxuJ|4BwTrg9Iq8T- zx6Cga=4XfK#*X#=oZUTox~kEqs<4hcIEUZ51Wz%J0ZdE8t_F9t&62h`;-ZPF!qbUl z{{4CW7{imYdD{Bl+P7p|@SJVFbJFYQti#SrBa-D`aR-)jbwz5M)dqvVIFcX1GOLr> z3EWPmS9uNR#gS*0mJMLf?GbC1w0U7HE^<4Y7kkYvVx_(D1K||-kKNr0iri|7s?Mg@ z;pE6U0c}%tQW6nz-flbsZ-m!+N+O~LVk2x~n%DIV#I(&xzA6-pIJ_kngRYi`=tSj* zrf>TAbgX>)A6Ll!{TbuzqK#vwxK#s zYSQSr88+66fYhxoJ#!b%T`~OZ==*-nB`a%05C8bEfALWJNMiH;T^pENUpjcf_$BM5 z-xt@O@7myzCE)fSTt1$*yWiDr|FX6pnl7wqiHKP6{9FQ*#^>ZOX*-?%S<&eEr(5kT zU`HOYc0mMGyK+dYp^3koH}<7lhoju={{5o+{TA%|pax9duWUi_HH_B9h)Z@X-)H3VHvNMro2k-#DDJr(Eto%%biaqGY)`dCJ!yK7qYBS$f zx;U(Y9tKS5fpAA@t}k09u#Ql%jPdXD`8!y`=q=y89iMyYjt7!$&LrDNRsJQaabBTY zD?QnfDGuY9xi#XbYO}=#{<2HjT%32S=}4G$B+e#`ymz;+nJx@@6cyee*ooF`5_(z? zU0&$~WeXx(af9Qzl1jOO8~UiH#f@D$$u&GlxnYezN7F57dFLrQ zl_Yem*=nrK@@@P6mYT#pT^H9A$@i3E7}3}ke3dCdmVLjA_QH|BeDrx-SL~A#qTkVG zxVYT?)oZUGP~(U_i^An1jWQRQc^?+F?SZN@m&wTWJXqp+z0yUu^w-FL$t;q5uxQyq zz0xiJpBWE*-apXjX_GPhiVO*xel)gob4q%#Sq9W61Qv>83%k1~y9;~Sk&%%S zcY}d^LVOFjim(UyoH?(Plh-GQr-sc^v5WguY}E>S1s#`D>#X9wgDg77oX@-J-b>Fu zu>2sTE)HcLTteCx{9pRJ0-Cbea>BDOqU61opLI6~wfLj>FrTk;`g9$ivZ=l-HXxY=r{n|%U}x|TAl{g$bS5lm*zVwib%swdx1#W? z;$lsMa^oSX&P-ZaV`e5^IkHNWe80e=BwJr#u`2y1&vWt$&mrl{xD8d6VOi_eTy#Qu z*Qnap#u$ecC_spO3y2^>ArX!MMD*Q%pl?pnKEQa4Vu?$fwiak+)I!sc3$=I1XxS{t zw-9THD4tQd#AfkPIEm7nwqAxj7LhJYNMcp$j^DiU4jG@p|23Hld`|ao7Z zU=2}^7j!_^9a$>ENkzbo=(`W8>Hh5&1doG+3AE&keI#f17IltKmFx4jBMqa{RB5V? zKLpN)kXnp@9W652;(6GvLV^n!FztqMyVX7n5c@+fm?RLRhblr^fy^vI(@7X(p?pn* zRz4){yLK&;GR`|-(G5WPo$l#dUQG~exy{QX!}${z`U_&cj@g|NJNr)L^j%u-R|ygh z^poI${B&C_H9CGTzE3bR9N^phOn~4P#oeOTPNhs54~hbq3WOFcZPta2fnGL~)<`|) zI=p9sz}8K8o-EK4JVUgKWCnoj%+Tl)*^}bR0M^T{uDhGlaL1@q%?4ry?xu- z1zCI?nGBbUdv37Iri+!Y#mW&??r{37JS&60)6`~K8}&Vm#A-E%74?@DjG6=qUY~zf zbrYC8{EVSaqtpH2wZwa@bxQ?c_P`I?s{tgGHudz1n!o7jx+Vhu3dA%Jx88ir)Ea+@ zGrz>=G#uHa8MnFs4Ap_$nzR(SVR684(uQF|vy!H3w-{OvntDc3d)OKmU5SaBecYY5 z?cegx;EkOr9cvZcRRS@74H)*J!{2m)G`qZbIBz3x=zyc&15{jkKNA#W7x9$P7DVwJ zP1^%lmmIg?cn8@D60(MX>Wk_m1U6}nnT3QtPNxfK8qaxZz$GJ;P0mjI*r0)GCxTBV zH%)*%pT6J&)Uz|+Ip}VCX-Uv+_t+Spi*EQZ={ka{P69|;c>&nyG5ye!_k<~r(Io;Sm8W*2?BoJe#fKfSmz%-fWn> z=Ij><3jx5TK@atd>@NeWB?AIpknn13Z2^S7X~Qt)Ezpg@7zGcaM&d7qV3#9UDFG1u zBOb8%M7J*wUdF`kS6BxXPWZvbt~!3+2jhpC*rCr8darEW1_7YdWJKZuO-Lg~$^@Wj z6ByOYIRx2Cczdljj6(W>cmequ$I}fpo&RnA{e3DPyS%|S*!>50-?>ZOs(t5R+7j5D z;@e0|y3duA6!*o&bxtdZi`LF;qlqr@bT-2ZTMC7!y;Bu{m_1P_I1~|T`Jrq&T?gQX zfTFoi1@MfbOO#PP2v%U040lE>K&zv)@NRcW2`0Oq?xOC=P6ms18ETedPx-k{c}>ZW z=?@D$_sOWUP%btT(19r6&D74GY@>zfti`ru>PC?+dAG!UjLgWFU>;kD<4xYZTT)n} zdv^rWW%6!u#A`9`Q<5W8n70j>@)}6{J_y)rfPVY${;*}IW;iyKskx=4g`;I(x zcDT5bc|5XnScM64%+GDS5;zkQ&+KD0yHT+?TNo+JsWZ9GswaE)ItSv9Dt<+l{#nm;Vm5OUHM%MMpz9Dpn1CA&ssBJDA&uav?-@Xfc*AF~RNIxV z6rEqD^B2#5EqTL%JzkC`3oy1b^b}4 zFdK+#(A8so&iXchGvRaAt)a1@1xs#c5Wn&80=a3_-@zMWYycet6Ny29^B4d_qm&@; z@sopld*e@eX*N#s?hu9puv+ovfZ=zwOp-xL9Ecx(i4)it2EK7JD(W&A2t*&8H1bRg z8E#5^&icVRGr+B_q7DbT1E5=3;5#HK4qF_%1Q5#_iR{kz3G$n$2@@((3a$MA-hY4Z z$^ZUHFeT)(zMY2o+3cNTdT8B4mVRm9&WHM6qHh1G0jm!hfYqxKwk!|Ke0FHznr4W{ zuZ4Pmw{HePh_xNhXQ&x&Rma_`_~-5cFt&j#%NXDayeX<%jKK(N4<+u}d$D^cn;u$w z$LZ(`XtkTbv@lEp{>^&*hR;F3&>ADR?r%`&hb0NoL&*F(OZ?aCp5rc~e+hnL#&Yb7 z)R4>*zAnXH@k4|dN)%sE!0HPMA`FER(!RaE4=v$8nEm{>@fjS|_8vlF0EL0tmDQc0 z?8?f5tP6@Aow?U9UvKKU4*f%vhx&v17veFL5aH1@oMzMthA?8O2O;AT?B#hc6M9)EWBc&D^KPOq7@<5oXEdzeQNF?>1ZA^ZzJk0vf_8mWbIO6$o z(@p}st6dQ@C2h<8mTQ5leptXyu$*vzIh4HrS#zJJNo0>AOHB1d;Fh&D0@~^*tDm$K41gZ*q`QH{7`iJlbA&rKfTyBbnmj@r=^?&Uu~%WIA1tx-^0b_@ecnit?~aY3TOLLEcnj?Xot;2jbSfc* z#HJJgiplJOYki@eWGF`8Im(eg9Nx}+jS?xXnCijSCwAsvD?i*>!V*bxU=(6w?f-mg zEA@a`1x2ZFus>o-wO0g0YI!_giuJ>(qV%cE%tIOI?GG4g?3SiaE>=!S#8Sn;U$^63Nz=!54QM#z@=<|QKUv#mRnvS znSjjPUi(~)&=>AHM?5=PulZm5`DVt{7bQV6Gn0DNTjB(=OTv2jm6@`VgYTb7`r%k; znbM@GdU}&+TSK3$)VuTt-2PBHquAt3;kRAFt_8J)kN#Ze)}B&-P%XORa;xx4$+WNP zCjRDgu3E&Ll6o|?e#&1AIh2*=!kMzorc6`ro>^|Mm;e9v-%SPF@&YT-N3(i!)!LMo zx!?Abv=;5Qs@i?Lu+MqvyWCIG>xTV5`e5?-xkPdg$%GCaOR;_`MAM+2SZj zUnQ&)oL4cZy%pIh`@1@oy@Hh9=`mgJsWNJ=s^uxg)FVnY0yFgiv-fC&TpSY|nHwb{GzTAnV@jjYyB}vLS%9uad-m#9<_Z!m^M-X6t`?0xORgPrYX}RA^n4k; zF7KU)rJQ*KYa@V*+ zgjTgC$t7lq+OIPlQz;n{XJKQAw6vqVE3rX0Hv!M}9$DNcYQ8`_0+}q|z7DUb7+4c|YuFy> zfm_3?KCvH&9RP&j2W`u6GT-Y!!IJXBx|mjWz5q(-_lCC!7WaI=B~v!?c;c^V=Q;k8 zq4hogc>Hbl`l7!}2A)Nfo@^EeZi-3`tP!7q1cN_OWq@rnBA}F{-Qs}Pn3zttfZ?w; znQYJPX}_cmY0qr|4nRBFpW8PI{yve>0ze>s02SjMvevFZ1WY-28If;%mL6(eziz5tmxt{koqW0M(C`R_s*9$9=U(Hr))B(!ugTYhwpK=VTaeXQ9Cpz zt9ApsIg13@5AG zad+gskTaWy)D+)+tkJiTETfU0LSo6d?C&cq(Uxcn?`rz?6?PY1BR74e#Xa5o_F~&L z1P0{EyO-{Emm>1mTY_DRN-i9@dk&9v;TfW|h@On(cf-Ra)%+^KHmu;w%8ohPG}s)o zv57Y>!)+QGNV>}=yP>SY*%2Yk-D?O9uyPmJ*f(&@-lW*VBS?}>;?7SpX7?Ir{pb8k zR#D^6szc!7(Ab)yp!4cKvTV(%xPCdkzSbhJIWg}_?*HTmFPT-NDT4U0fAywqwG7q3 zd-nK!#~zYug|I)|3{Qbmqa0;Ahgf8vp_2x!F@xF)J_+zU z*pQNDe==>_YjNgDh&{&RRakqc0V)Mi6{8{~)Rq@cUS9~f)N$-$bh{)zl#w~_4v_W* zJ_r3bZQE17aLv5dp|Z4{~@B|jYj5CD>VQy1Y#vaTIiq@~G*(;}y5iNTCD%8xt>#@J)r zU+0pR774Y!2XDmuh#X&g&yZ*|sJ|Vmd^p;V7_hML>n|C_!mm^v+CIcGO>@hF#$VPB zF%B(RC>)7RF(M>zuG~M)fQrpKZ`+rywI0|z*|w+XO3ALi)-aK^rZr*p)XD)9_Fnx+ zHTz3?Z?}jZ8br~Di#$EsR4QeZzf!tpyMu%Gf@00FB|a)!Ctsgx+ia;_+m(UDD_7dA zLgp-zSUmTI7z9YOKoYG&jT4f^m|!90nEKw{D_}jhT?u+IshyFqt!=x-@tYt=64m3j z8W>w4bcf76k-w*p6D57&X+-_HHxm99GFyAFDUaW*6(D~}Tqi@JlyA%&((G$sO$qN8 zJ1ucYL2gs$fU*;q)5r--!1Hp3gpd|%gi7f4OE!@9{d>?7#@y&F@uz-UUlGSl>_m)R z^5M=>UMoBuqMbdFkTa$`Nt`uxwtb$fjpw6}-DgKm4Ql@-)@@zKsL1e16^H=c0_vy`Aiy#G9$^h=k5L`u zxwq}RcICkafdxysHmm$9`*my%Fef~j`a#H$ornxP9 zD|D<_W{$V+>sV^4{3eyTG}6SxRME8Hp!=+jthGXcpHMK}#DV)&A*$Y#KIU9OLXwZq z&n&)!3CP3f&GpASLPeqUP&cU?&DxeX*T-&ru1#QgbxwuJ?qlvRz|&%sdV5-WN;p1A zH2qic%CjtGafw8t^B$J#Q>}frTI>7bL+c_6mZk{(Ol%z-=8xMA2~f=kqFUaAvU+#- zmZ1WGs@pnqZ5=o(RjoNRj)?%_1Q8@vv#1SJ^()f8zXVNX34xC-`mKGD`^l)ePC!=b za@x1FUJ+~_p4r-~^4Ro-hAspWH0UT<6k4o*4nq+dSv)DvY2-3{Z)4F+pDIRpNu`A>e&i|(A7nemqic8 zm)*Gr$LHSQU9PjQo;dTzhZC3C&+b|6G8pa!yqS5|0=wR(e=3vyVI;WndoibC*EKGr zPwY*?bce!J?JU-#oqJ-L%4%lGQg_B;0A_{Ff^iV9b>0E5CbgrGhkQ962|YA4$ ziNIj%Eh0bJ+?U z6lgnNq-`l4=|3DXw(N0hxa;iBaae#qHat71EV5SF*zxqaX64Tdz0LN7?mcE=My0c( zizo|gJVQIuoy_*@r7>nIz3}NYN_(ZigsKl>PczZ7IeV6c2P;*233D1Xog3qU~o3;g|6=#p$iQ4U<|TZImqwh2GAAWzf@kX68`| z)gy)YG!&6;&2$~RXk`>l!c8a-b+uW^R*)Fb<__n-+5Z5)GZ~6Ww`M}2NlKKEh-}jL zNApx-Xd`jV%s9lFX+49*mB^VsR3@XA#-=Efhxl9#WQPIk*3KI~yHcgQV<<1UMA{er zR;HemKUBP(f3;Y$Tb4ryX1%P^sNGUtP!b49x4tt(&$1`Qt+6Z~f=vcKF9+8F&TzUo zZ2knjd!`8wf=yxLJkO}^>C(^{@@*ZDg$07)?O--Qs)4xe0+9)YSm3JJz&aCnCzKq1 zQ?vDwZ)?~EJ%`zvstKZ03l@a=2u5qHOBi^Yv4$MtQmARN?`Giw) z{a9F0YZJ3%Nuy9}CHygk4@h*T^8QC^$|GafMR$WKFy}b;sf6P=uzPDJ^sQ==r`cDm zpge?m1Pm&VhNI(hN3+CMMD=XC^nUifE2&n~IE;LGJEu6R^D@zSD9Fln9As~PAiHpX z`^@3?tdI-0g-W5P?VByH^kM6=MzrxXa#AE8t$D`wPu$eAc+6nJwqx=~56rdg7< z#@NyQswN?QNVL%wzNT+JB^~RltZF*F(Bpgj#*U>kkCC~w+(DiDlj~}%XFxf{PCFaw0w^@9X*tkF zK|Y#6N(fjsoQK5_aaqvW-ta>9(VG7&?N^%NhBxh7LLMa^?=g+W8Um_2l^ci?^v#s| z`Up|oZTGmi9*vGTnQ3p^&&H7^8n#a@sdi@b^asRg)1lB8G zhX((;t2^1e2BLtZqLksIv2hY% zfF{sT6a)dWRRi=X&2$@r28be>(l_%!AeKEAbi$DR-}fkcp4a#P{WXIfKO4J0_jOw^|8BdQy z)6F11KgZ1g)TST;Ww34t_A!}zS}(XEz=m;7FE~g`I&@QFAQa$A`cpU2a7P6)7q-b- z?%VkKEx7YXQ~MwW#;WAaAr7L`m+?!sv#}i_uDKK(MyDvG-sWW9nQNzJ9Yb!glH#d`;$ zv8YFSkaiG?%Fqu5IVzEOXJ{MNoeGO=BP3u5F`U_Ss;%pQ$0kMFw4cM;%n#$@&%70L zEAOk%gqgi$2|gtWfuUSr4)xPLxu4V4gTqu#>WTe7#v547v#a@X=cC{P_1NViJn z$h_J}b+^R*;H6P2!P=@fT7lzo{*Iq7XE0Px`pbqk6!hoJS^D6BdZa`3 zjpFHUzI*QtUKL|UgiF+KK zL;>CnlNQUpXK5MLS84}`MY%~g%H>nFMMdskP&|#MS93EXCUM)%M>j{1Et%3Ica)AR znlgP^ra!MWvi*5&SYF)6ncvla^})K#9{@`4yMtvX@xXD zy|0>tiN|hTytNPlNC!V4HQ<$Booo5E?K_NjSoBQW-Vc^$zQ~{dgtX6*OPujfTRR=g z)@Nugq|hWHOM&+Jz;3t5OL{A-A9fTfoohp!a|;k@4cUPFJ#615SAsNh{9=idD3*xK z>BVj{xi=Q%eC(GQtd0i~bqgp>Q7{%9n|D}JFbY|^h!q=|^!0@b$v>|?zFp-Sl;`kV zI~EjcM=Ka5dUeq=4P3G$|MTvHi#MTPI4?J3BUbA~X676&k;>c1FqB`1l2$o>9zHM# zu~syHN2O@zv_tm_&6dTBy|#xgW=<%R`LW4dCKp|3Ha$>H7uZ-&tAkC{d1M4Z!6f4g zeQ#3_`mQ6%FY@Q2G-&^9Z7#~c(Qyw6S`FJW%IKv+#yQfzH=J5OM?1CNu*TAyZg|z8 zkU*URO!SDhEWlFh4XZn<_sMrINVz-jqk{LkJ!Yfmq-2Mn*9?bMSEisGh!Ic z?iy~J#^YICp6<-hPN7l)@)7(syg%dRY(BQRZtvTBoi_h61_B$LG$7NDHJj3UKU?vP zw~O?a8sfw|Zm?@1K8x{y0K*LQnWYifoR79byg9jHJ>jFE<7XNT9Z4nFqeqy!LQnuv z%lQX}etpWBvKTmPILBpNO;dfqt~1Zb08l#CO=&7jyhkQ&ii$~jY$KnKkdZnQ!E*S2 zk1yBIJ_vbIb%pG$8t#4)xvTb{qr(8SZuJe}IDdE0<1IQ zghrI4#l%Ro*jtGz4bQOlI>=}HuE|y1+Y{=>86i8u%28n2{-H3-v>(@$@=@ZJ}@d} zyQYrLJ`d!>{WFA2IBBV)`K4&ly#}~y3jT{azTW^CR^)8z$tC4B^a}H)@CMxleTH_I z!9*G$J!(5RF+5bGEanFI6{;B!T3S%HY)Vv&+n`nn0n7rk>LU8%s7i1_G9*Xe>lO*05abWi3TXWn!sX9fS!sc{C z%D1x}7E>^(>(s$fv?8jAIwnHxq@7CA2@@;a;R=ZyRY26M!u{094dJ+6{^t67}at8oP>3@J(k zVdF93;i%)MkopF1!M3Cq`=gF5Hi+O18Ja0PTX;eOxJE0g;OqVX{&5^eJ5>mm%lc7q z32S*-|Z;<2Ib*ax09u%q$XTiKlA&^ z$_rUP9R1@yvDJ9xwXp#^pqBrYW5hXY%t$Xg$3%jQ82$36d04c*MfQW-sFQjvcDSa!65EJi4Mb5W0=V&g9@F%?1q z)v%9kLKTjtcW)dkw|UdtKmhQT7Zu7}?6YHsdZpaiop?}JSO ze=9vNYFqRRd@P3Uu~NFaa~w>}7C@OGt_(#%i0>MRpt)?&la(e-V=04_zI^u_aQsno zMcYl=klb<&c5wtpit?8_BS1bxUM>NovSyUtWv}zrQE2cEl}w@ufJGxXu9ux9-Rl^ z#cS|s$61_ArY9$;<(NNZsWjRMBrN&bjVd;NJ2q(ZrgMuxmL2UXz@U!Bm{8>y@r~d1 z+=xjZ?};%Ih5KyWsUPmOyUTSkKO_}DfX?i@;dS?rSX^%S${`; zS%3J@cD@$y*ZK<-Z#`MW5bi|!*bM4pGZ+GrB00mznJSkn@zzCDi{M6fM0eqij-5>A zpJ!SPJ&(L@njnFj@uBeYWPGj(C1@9_C0LLx6u&8zum7+>eCwd!9?b0s#ukkoB#VGR{n4Dto)xuS@I>N zZO!r}to)z${g}szg^@hXnUe>NB$~6_Ie*TS=(RC+MN2?Y5tX|ybOB|-)$6w){~(^q zwUs6N1}ZizjlJXfPV%x9QA=Z?$;az{u%~-uRO zn29Yga^7hBqkW;o%DKR3!Ccy^d^2awqWQnCDlo8QETZjaSS|T|)#uZkWUci5zTfTM zxv00qm)7@#+#$8jETzCCDE-C(PSy!C^I&O%mpt9fCqd7>O0z)8zmsL=6R+o9tm+as zQ0W{8Bi%J}w{N^rv0D1+DjzFzJ-uMdJF9#R?9J6uyd<4tuV-GI?|h%XeTA7}0bOmm zeU+oZ9K#^BYMNtVpr>(u0!F?{eSdU9Jy)#UHY&L@M7!6Y$|>zx!oTyi*CJp3-P=B@ z{ro!+!sz9H?aMRh@zf6!TT!W8qiKaNUL|zkSnA6+*s`QfDJSQOOl2j@%$AwUmbpap zT?D=^(FJc^W8iVfC9qnh!2VY9fH3nn8tB$-)rDmTkSMKw4U>o30$JOmeR+Rd|j&1&Lcy}K3KyLR9*B;g9{U^P0sG9WL3(%ckIeyj?^JIvxa zfyER7&svSAO;KfFM~GZ>DoVufTeJTpIJmw0pY4Ap+r8iI{b&30$dyxkeLDY%h_4?o zp}Hh8n$~z%DO`9)JqsC-Too+jl^fA5~ON@#T?!ZWup)JpW~sCa&jMZB`IKLrE~ zD|wGqZryRK!eFbaH{I)w+ZCqT-5hkMQu!qdy{l6BG8ewAiR)HIw<`>@6Ri>%$x@9< zHQq)&Udpom6wb6}f$Xc;k{&M^{%M>`G|sYtSo|#Mb(U%P_*bIgSN7SY5spUQXjWRj zcWMghwJ-5W{v_req}mxWCWxnQ3xTu55n7eZZJyj;Ax>`I&Dr~FGxYY3i~U~ijK zilp)FAtXNk=oe4wle=H=7urK@SQDx?(Z{+o03(4xs6LpB;o_8D!GM-kDN3%$D-WQb zKQ2daUwH|DSdj~mxnxTOI4*`@gD3^BCNe03pvuX)P{(5J_jJjdR=hezfyJvO#9&A{ zEh6;f;F1knZdc!gk`hZ07OGR^oJ%ynUc%$6OCYEOI>Yc4`X%p)DXu|(fS}DNSJge4}3keJRZQ6?szWTQGhU6$_q_Eu>utfKv>G52`+ncO+P+LacNJV$-Ak9 zv(OH3{Kt6uaRA(biy;71C{E)C;IE$Mg7lk!vqS)5dP3wdi)I}b0ZfbkXNL~vkwkw| zqX&MOVC6|jBWRzA`{<-KvY+wx+!YvC#OSF(2prAE?d|>bk)&8K+ zxvKUu(-sxyVjtsv%C|a`$vSu+8ey~Ck@Bz`-N~Wd=pX_VVPrc4&FG+y<35HL`&)i- zC)S0nN{lNT2tZA-6nSO?- zg}URj$>nP@kV-SzLP0zRvC2$l(?=VY7cZ`%H!8RFcP|tq7?c?wm!sd};Qey~Ki%G& zt@f2C;3(c~win;QGosuxB4N^erCEzT&qfCHG-Sh1r18<8N#hR_dPJwW!L*`z%;DYI zh!(MO`0A$O1oIk`a2b*QPltlHk61alp05O`ihzn(Z5TwcfT(%{tQ?wFq9I@-z@Y0= z>JL|mMZ-W(MfGP94n*^4>SnuA;fYL9b9OOl+YvE%2Xaz0*^}}8dD#zI5}&L78ecMQ zs@pIq()~w(P8&#@kLRu>CyskvCZy=8bZx46bi_z%bVQcYC%bfm2xNzgTzegU+5W&i z9ncwkl44m_*3EWPkT;s_<58v}MN!CAoGxMO6jQK&osUf{K&_}>9c7dWq*QP!8^+yj z=5-XL&D-2LUy*midl0_LELQr@rA7DYW}vzQu5i~;`CX<)3=3pV zbG%P>_IB5gO5x(ZIkZir9B54_0H77@mYbv}Wp|x4d|+t^cS7P6ep>ncD4Idyq;2r4 zO3I*4>-?&~3gSTvT%3`#nHX6)9M7OW=|A0h_11NDTzLazJEHW*j7-N+XVE7G%IbWM z^8)Hfzol&-?U)eU&>7)tWPG3bwSb?p>qyLBs`8qUvWVLKP7uvB20;+GO_1}1C^$u)WuOrh%%WB z>V#_Xv+~^@I^bhbi0mDnbK+&qc{dxOjim?6BrNEpD5$aArNhJJRJq5C{jY9cH0h@C zQe6^S)iAs|tFW4`ieobta=wahF9CBpqJz!w_TLbq>Ji(mdeb><%*wIYsxVD#R~S0K z#s$ym^*b;oE>T)d6$a0j_v~=^^myrV^220my=Un{5F(R6`(SfqvdU>?*l7O3g;+WX zxiW6vJS%a@lz;So*_{%QHL`RZNJ7I5ck@Ws6s3^GG8y1F6=)gc!aA5sRZic5QFlo> zk`JKI(~7-Q7xFyu9_i^l1zgLj{VDipHb!X?s+7r4vP)63rols$4lL!CaM~O(|lC z7)>jHS!~Q))JT16@C)BZdeC{JQNe-S1J0o48@d3;c(NWT*4h{2`=8~F+3U^*2(w=N zcz!w{oF_Jq`-)7>E-dMzi%X;Z>rZ4*4uMzV@vx@=;aJAcy5wzBK1t#<_-R>JuBWS& zS=}#vs#cOKAAaK*VNV*(1>xp;X;Og#M7Z9bX%by~dU^-ICYYrP!2A`}O5iH;2rn5&eCqr%{mO>V$`kNA<}NGorVk{%+2y}cnCcKvjr8X5~c{PEx3eMPTbg? z1!{+O`t*JnVM3@~i{@AS4fc`etg1Iz2j5_Yu#7~#Zr}VRDX-b?Yi*)gM?OOVI&EQRC0ILXLxjRi;`))G1+Awyqs7QMA7t7PvsnZ$_aOt== zprKow0sq$`>E_z&Nnw#;XjGrT3Ca5er+Z%l+ti}rA~(6l0*!m1cP<806X?y--#_X9 z?k)J?r2D2h->xq1?Fy3&ieQn$vzhKn!vfI9fPZaZcNs7Tg7rCYhfuzS9|L8R0rR5w zDyD{ z#;&m9@|NEJ(N6rUyyXz}Va^?R=R)^tZQmNS4$wDfywP$^`XTS-8EHoQXfw>rP_=+G zCiP(op%9_U6=4hV$-C0k?K5~f@i{{$7#6W;3PS`7XvNP_AtbD$oMFA!h3)IRPrU4B zg?Di$mceBZkaj1M(s|L<0%OX8@3)a>_^xP4X`j}|&JfCjyu|1uKyUb+QiP6Ft{8P}COPw`E~ zIH3*$aE0sL`9NWGAK0NfO@<4`!_CrdH5NtqHUI*Hj2%C_~lgtO~7>&Ii}){hpMNZ|CVi^ z=xY4rUDC$ad1zz|@Bz6rn{|*N94$5@x5#u9}ZP2M$viv zR8u-i&f8l}d|I7aO=i=onN|9UVm_mmQB+d933o+N#`yG9@k_+EzdG zN$-W~=i$Ij`|TPpz0;oCcqzS|)#M{8pX+iuyqq{fy!vQ8>-IqPM7PChUhMZF-7!_% zkB@qYj;A!2PTja5YI`HFJvrbynZHBcJ%(=LouU(eTp!N)I~E+z031I9JK0p#d8l z9&0{s5LZ2y_v(OU(8`mwDJJ|T57#CSz9cWMUW;w5_7_Nj1_<&JPIo6XubgSEdis2g z`_qKxf53#{?bgqU2gCns{r%r7GSiB7zS}cGVLze%9lIuxLH{(iyThfi?FH0eK}zjs z_pu{RIe>j|8KV|=Q4Tzl4I+bH%{49mbdIMWe$qzTX&~*i>0W4XrOTk(yGl4|0Ekz~ zkd3rMU9!p9hZ-);IC&yP?jv-!d**J(NwF(&uRd73^&qi}@+G`zf|brLcuY1R>T9?uC4T5$V%a_>Fz>2W@`vs6gN z_lEZcQ{JXsNiW71Pq1U}kiLbl_ymHGmfb83IRbtd{kR zkDwE|J`d*0kmt>N#@wyzGW5dh1kG;*W%O-{h(%n+USI3f>s%suc>Z+J35Ei4hrbgy zN&k$iG*em$J+u7v5WU17@2_tc(N#utuD+V4V#Y5Op1MWAHsxzS@YwVLdc9ihA8Rvb zZv7*{1CQMDUYB|5fPxhPN&ZveAN=sfsZ*OyikPR)gn#rRzxsR92ANSmX4e{ST5gzI zd=K_kHCDx!+~rw5yLUZKicmi1KCRK))NL-eF-j5pQ$`|ldp_{6T-bP#9V)i7i?(6g zY7YlFvSJ>w;%xhJLDg9C^LHIAPo!~ks}8of_qx7&u*F>7a`<4ec|o{)qyc`K>xFH} z7M(~rKz87p%nRNXx>px$%DPu$K1B^5Svo_k!4$V^+;RJ`FF5o~@8*=x^A67%ya6ix zrg9iEPr$ShHbY0q?W?&-7l{j?%!Rn+6u}cm0N~S_2^IPt#AXOx$qD5J;jDhU@2_-^ zxjWCj_t*RdBEFBlvfnvGwMK`Z*DNN_Cc%>&C<>r^An&7e7Zc#{h#L`rae(2Wd5OzH z63L^mG_Ypz`hkjFaP#)dMkK#CDjk(VV50YObp;lGo`vT~>0Q=mo>K%k=19y~+^mn^Vru6;lnxKxZIbNgMKEe8JdOWfaL!Hu9ov~7@F>&cAcWG_xsC!o4 zc!~?1BDZ_b%H1`@v#71N-0kj*@%`Vk_0M}IItmqV(>6Q4vcJJH%J8T9hjWgej?JxN z&_8LUQ^8)62CiP29vr-uOti1j_7OX6x(J}2ylLY4J{AjK@K1^grhE0K|7Uo-*LwQa z(>wpu^W&|Dh1VtuFEI*x>9k99+O?UD-v&fiJpp0tN#M73X5-`S&hUD5bP^Vf7S7>0 z!$x_kp&?gH`w+MYV!QlOq26(LO&&Hke(g9#Hpn6&gf+ZwXgV}%@oUFx*hh{--9mh` z&lKsaPT})mXDRr#rPsKi?Y6@b7;Iea44J=om$HqqKgxYke->t!>U?}f^)u`I(aI;1 zuOi#tOPH?23N{RrTgwIL3Je(t4HIkzGU^KN&sLmUF@Vgv5;DTs$9C1fS`z?3LH!Jv z!po->5Fj1Py*FBb51|HG7-F-f*H%<|e}<1k$m{qxxsUj*e$6Rx6(&K#7Ur`cH6+;J z!cI}?Xbmt4Mi9aN2r;F7Wv@E3^xy|oS+|zp#3~6o9kj=armjJFNEP%f{ z_hNkip1=QfjYl7gpm$TbQdO?>O6@*exu3iKUFsdDt#&R#X!GWu7AOrlIbJ7+rPP$j zMxP0PaEUr4{GWa-djEX=vKXB;G15o0m~adV2#x*#JHdfPf&e?S$7vPY0|XX&jpw|=Pq7?SAW*u8ErR^O^bzau?ff6f_}*I@ zPVnu2kMAVD(|AH3fiE+0WfomTtG;Z^8H3!%0Hneda_P>{S7g7uKyfw$z?oGLVJ8%v zq0P@37kjT^W@2uFwuHr@eh|>KYfTsXC(n}>Kg672Vg+KFmY+8n(f2V91;#34>r~J5Uy)%tOK1>t8KgXtnOLeKLfOT zwD%TwAMM=t#ZM8xseSL+l>gRs58V2F7gqd!>&5u~pY!+62MnB2w!2I9zu74EOgf>= z@bc#<^#dB}6eglD)(3m8I)?!A+)Mqqo5{KK$`SX@AL2gnNU+r3qclmV3`<OYH1WLbQj$$hPjloQ5B{x{Pijj;LH03t~}$b#};#~<(4s-ss&P`o-%8W zrHME}9@{M0XXjZc$T)&Eawp7 z5_m&O+kPITj^=ufgS2E%!R8FMZ$B_e&oyV?MQW&F6o`XCeJmNkp}j zOS^Rm&s+NTDG}q=`g0z;%eBwAjU8N#-xHV(>0 zWlTvk{K`aN1Ou0Il%Z;ttfeP*zN~+T8Q*_~SGMHR^$zQl+cibC8KxDDc zpVqcq^f}vDJGQ^^u+Jga{U_2VLtD;mX&G|=bN|Z^TGXu>(Pu=_3iOxD<{I&X&JW`% zBgW(3#CjufDaTo9?A725)`DKK=^;ne{j?RWnQzO#C}|D1MK_9Gm3n>7n2~-B-N0CX zO61(Fs^qUu?3Em{g{R4+2O~OcA3x&L?$Lxsm~9-oVnlqV8pH%|$me*0y7?m3j(-r9Da<>0`?+x6x5biM*Xl5q*B#weJcdRS^}h^uS{7;qqzF zy5|hY^pr)4;PDG5U;gzi({L4q2e3~(sci%|!nxPpC)4Ln$qiN39}hastIFt|?gr(h z33$sEw2wkmgFS7WJHtIa)#Z*U2WP_NH#5q^Nk!M){s1bvZSFwCLYM?CBygv76!37u z^+_x8c91RgM5~7f$ofO#TDrcr2SqQ`tCYT*VHE#RmAwzN&Qio`na&#&93pVV&v%je z`@=E=@PBvazVJWlLVBeg1)dDJ3xI1;?{ya~uFlV;tb5gaAV_Vg#Eun7I~pCcA1EC% zD5#`?mQFeVgre>vf-H-7d;-t>aNRaomDUe}miqAW=6mmMcGLm#@~Z1L*N;M^LtA^9 zJ!jHy#P8vS{>k0}?*T37tbvH!sHK!FCSbl&_kQ=Q{@46*8&dDEUcqUy5?hBQI0?_@ z8K^&ceV&2(qJdhOGvDCZtsP%~y7KE!J=gfJG*Dj{^>|hOCWnaM^tWy!Zpnhsa6+?L z@yCzEcloNBkv;%AR{+Zye!q{P`E;Z@TgEG3(~$RGAZl^e0mxygNR)7h_)D%hq6d|D zS$fvZCIM=Joa|Gtav()RDEi?`?cb5N_=3v5>BO?JrJPhzYpGB6!Ilo9!W6 z)6$X|nWlp>qlt6)0O_5yWRkBeGp?n@Co?iE69vlbCe$f?PGn|oT2}f_!Da`07!PwG zY_-(0ZLfgR27c6wfQROhW@~TCq|$dT_~t9)J%2to0WfdsyyV zF8nJ#V{paY;u)dSY_*jC<0O&kLL-p;fFuv;S(0<9x*QE9G#7I55&u-cA;MZs@$pNS zE|j6=35Bml)jcB37A8d_ld$!5ngZaBwoOqw7u$3t`C>xf73$#IXgAJTP9UM@l}?-S z8O8vbx8P5L#g+^6gXI`hXcd5UjO865>OAeUH~%^E^vJ_0@0z!smW@6=GB$8;S@%So zO~qz_?_7r-JS}=v4hGM9HXLv=MBkx=1;>n6LyzWbl*-Gorxh}OIw?OR$?`HQHk6Jr z0F-p6m|S|)>Tw%=4p3wV+=v*SyON4sBDXXVF{-5|Dx@PQJoZB;Zk}MAH$m|4Xg+PO z9o!=dsJ*!!m8|4CN73elwvQ`Xcm;>2zuWRt#>ekJie=Yc9~ZvGTXMd;5e2S+MsSoM zlQ$zom9Z1`-OvC~idsY3(W+|UvE^spC1j*b*9!rMe_yXFfE8_RPAgtNzO}jKJDq6U zjgGFDU)E0Yc=$e`y|XM{qvlu)imOXms;dzYiT1L>r&wI5^lZuOn@)FTRiw`THpSQOPRPXo*_~ z*|EpEgW?p=l3+KzfR@=tZ1zwG-_4xW*SGDrv{agGR5fqxrU0=cfVEKl#a<~px>kR= z8Mw$MYdmi*?~8-|bvEDA1xFpfP@6kielb|8-g5~wlY#76XAD3~1;jU1xmX@sd=#Ka zpp5s1Qxqv0J3?`tSj8i_z9N5X~76lU7S zV>ZOao(GbyuKxVvvV3En<+cFzU-Op-0Rg}RrKWT<7~M7(;>$GAu#g@FD%@L*;KrK( z&spqn2@9O~b3+`H(7s9CGTQe&Wh-Yv5t!wKTV#s8@(aaE!cAX*4-O6zaGYQvnT}a5 zXw{{CMJ1!Uy|sx3N}87L8NGeLU^c`vr8PBGW~adwKD1d?1kc0yu-9P2sSpoopAm_x zJniuRjB1}A3Ha^mqWSZGyEgnk{=aTNs2HtVu>_*OpIn^~@C@ri-8&5B+jGTL0~NiX z#==wkd;m5ax|r}(m482YTOyRTZ>hf~W z5gF98CY=I592Si@`6>Mr*{^lJ9f*UR<2r4<%bv%1FcygNfNFByfy))B?e6SQO*$^` z-+7VLc_(w8T($t03(eOYBv!V zj!@MeA`f9Kip(BqCd4VYS=W8F3%QV8S{sknYnq@fzT-lgNo`0=Y%6)#!e@s%L6-P& zdr~M`yHykLv?`AGtg0X{pQ^&qvU_XekC9vG$m(pT;5~diDFl679EMdjS&`7+7E1Cf z^!Lr3X*=z0{+hoP34Q1oUcHYN*SwweHFRZTTQyjCcWhS+_FpA zc&ph<^<}T-$X>e;xEGO0ZS zG~$C?L#sZ@g}o-)#Z(jxwwO~=l&S+&;AZljiMl(UAmP#c}r-a`=@9{0}H|p!p$) zA#{@qLb=Wye2CS`#cmGm6rTssw?qU)HI6U~Kq`PfFHzs65QW6xM#9V+j@$xW6k{St zBCsKDN?PicR3pBjWQdpBLgyEISABk4pK%%8Y|)N(K~hC-8KcIPi)c(ry|0tqf6;!F z2QH0D1;H?l8p9*dvmj{OeOc3n1$nH9y5=i|+Y z(>5U-;gf+%dV#!2W^lQQ0U;7kIA}$A_@ULcK)y zFgaWedcob~kBM(bx6cWspqH1SN{x2g6`S?&waq81+>vP*=@4Jp9fU}6op<`1BW@ox zrJq7$sr1y2(q3PX8cKV8EKilYU5~v8N%fpW$fw7hUW=az0M>z<^Z0Y_a{RKe;Qu($ zk?P9y+MWQ03YZ5lcM+^lyaRBL`@VgkzKL*w4K5^b(9iV8rd}LHHwAA5T;&)sEN3~f z#g+?1T2e|?&?7(cZnae>;LXSplGm*godBtGbd)!sHV|^)0iD zvXK3=vAsLB%%V@PA8k5>>&|JEC`STE0<|ly5lRptf%Q+q6N`-RbAK08=l}F)H?+IU zs)M}cLn24m)Mp49Q3WcEb?XM}pyC6Go^MFpg}7kPuJG)!-0li+%m!9eAnf8bouvar z1`_c)< zD})tD!86gIopAtOLt?PIO(Q{ZO&QYGR4LD*YzD`lJ*USB_6{8iP=lS5Yu*P79M2|f zB~(t(_qg-(-};370|mSP&3|uXu_e!lRl*5cUHCpHB3LqyRZ)BWAzc>qY9F*)^HssH zd}Wy8>U$l5%GK1NMfsnY*_tApOi_9{b5%@RhpI!b!}n8u(BM2=JUz+y{_3~F5aS#0qQB2ra|gDi zC_fkx3qN`F*m)E&_HRG|1HOK_>nBwCfTu>WOo@*TvANj~-`T709D*&55qASAX{^{% z$c9m=rSMbbr^N7fsst}4-n3Jq{C3#Y<7ltrzy}QgkA-&+tAwm=!bJ+h8k*d439OD9m8qzI7@;!jrxodt;W4;T)pby4t!=sYt=Y!9vr_@^4dmM zv_AJd{|atBgZ1~y|EO>9N1B#D=KmTxhbS`HnYp=LzAU@4hmbHwCHLLVG3jCba_fFe zU8V?ebtBt8<38kvewWXAj`ZpEp~-MwSx0C+6yh%uo_|-mO|*|8SJ9;)cwVJvS1(BS zxQoih1rt0K`C0C39qlui%D-(QEfy6NOM8(pEsG)3S2zDFFwsHP-7_0LQzbAKglqC=W@cPcchicg033t_R zwtGY_Q;_9)sD>NiAAWy~4?Ov}Gmy{+`$tHvVz%3$JJw3Xi^<9hy#u6Y$p!Qd zemU+@-`{ebXVd|rlleRL_m>uJ0WK%#OL(Mxk%9VS{LmC+b!OEW9(Rtm1YP`bX$hOK z&7>yomrveqJm=x|73mh8yiv|J4clI1^Q|%|DKq=K?X4a=UX1U5brnuc9z5@@90>^E z&68&jl08@d_!AQ#gIbZqA~c>`77BnuM|U%r$k;<@^7zt74GZIU9*OBh`)I0cJCas1 zLV-%ZyNI6as+8l3T%@pw(Ikppm>vlH0~aO0Ox(b0PZT3h$X^UKV3J$<4LliG+5c~X zfvSI%)OUD}1n3k9HZ3m)k#Q7XEi%#MSMO~Ql0wiiXtom9K|IvGx%N^0-uZjyzgkUH zTq`O&`$vGDy4HjU{cI(PwOpW~(8`(FOu{A^-*MmFy77JGul#Cqd8Z$$O{gbTBC4*a zARsIqyejt?h7#(rBmgIGLLV6xgi}yan|$)H^ZZ9U+F^L8Ao3}#fjw@s5>z^GJgndl zy~0D532>6UKoVhjNV{3EAa3O}UPaPntlK6jq+ZmVf;_sBg|Zc{vf*q=JPRqJ$U?j) zu|u@&?Vhwr)+Q9$$DK$FOHL>hhOJr~cVcZql`u|O6t_0UDk=Qt1Gr7bB)%DbF#hlH z^;CB$+xaxhY?@DyW5^cTOR2>SMykY>@ph_&DS0a(p>wfi+)5sS_Bor~pNnd&UI5r_ zN}9r0;*WBksiBFg_PPOF(P<*RQ3F)mP|cYr2Y!h`wPIYRz)z8u|+9gmRC1XCH3BH$I;tHu45E*O8*Fto*Gs zp>aMu=f&5^_<|p*+aJA{zmsou(HLFLRE;Ay=PpeJ8+mFjZnEjz?QoQTAbB8piq%Me zuF94662+NE+~+CD)^ik$>3@CjDgCwLkrtXO4WfG@=2kh4^%>0Q>HODu4C+@r@lzPp z<8n8LnBI$+6bBhGk47;ly>Y`hFO3n?ps~rwkQ>@(K>vB9X1{%1d)vH?&qgjv2Ci#j9ShMzFCbp*Qd5#L+gLKl`ag zDq!g!on{-$-i+wE*`J=SkbfZ+!1Hf4DNYDPv$+mVqOervHP*JTTKvyY$n&Hop?+1 z;ndt&k`f~Xc8!WZD~pTlMy*qhQ$Cp|fOsJqpqIKKDP)mBnxbrd{}vs47L{2%v_qFh zBU^^;1d$b|Le5r*b(pwSo{Mq*6B02JK>_sbnP}>O(uhvISm3hL$Qn#ujPEnQ4pW!o z+XCMGYyLXsd;`=JgH?zYL<`HmFox*aD9{FivJLJ(yd84-MLLrWYz8cskaj*w9Wix7 zOfKpu3dpHIB?rBJ@=hpeU}r%p8AZnU5N;qulfD*Fuf`#vrUzEBrU(jsM5hPhIIEBo z1vM;48G^dPb9|x42n>D&a_bFnTO_WDKSO*!>sd=C&{lehnGaO!8(UGG*e_y6w5@Mi z`xA{7(YDr;GCY$E1?>;c?EZWHo;tlvABem^+XF~N9dGTGW#FeD#P|0=Ny004LjMTd|^Ed?U@Y&m`N2@EBB}ET4 zIgPhhT|UP{5d)Yt6(Pd^nSg?5Pg2la^u|f!+`9t(bzHcCf55U6F;n($EP69Cz+Gkk zMvhy_h153UHC8^N4xgL6DVL1zep-paN=Xj`gPsyn&rCB zw^A44^Kk1KlraIPeW<=SBix-;{n7f3cl46SZG=kudq0~%L`U}nh@XaaE!R-eV%r8? zL|#m^i^Ia~A)200AGQ`06?XAwyX`*$-<&#Y+^ArnEL(gV@NDsOBoC z3SScQ_w}mw(vt;ruJO;VNVF-5&=_%vP2Bw9X@&?Sg%E~e0_2n6Nv=i)c+1GUT-sfI#oSK>SbPHU;jz5TTlNN@#@&9Gd=F4=+Ww>l&_<-~9y3D6p zKh$+ZAAI|jr&&e6XEp!v#?0`M{~!I>|97KbbxD(2;v$P#%y+I+N#a%V(~|g}*OfMB zYf_}i5Qgjv>VrZPsL2a51fqug2XVo1>8FIR8(l)nn*DxXq)?wnY3rX zsPR)Ln#Wj2cM+8S+S(x*lX`jn()Sr3jy4B1xQsRfwia>&CiL$O+4o6>dOho7T~uLx z(f8L%K4?)n*_cGXcLR5C&`%=cd(AuE)FpOjlG9)DnV3Yr0erE+r{?cC>6_vxVBjBc zLxM}cADw?j^(Fg&QpuuRT|4e?iQW?xBRmVaeCXwhAhh`c|5vzuDF>7zD%8uSaHS=TAdJXM_UZhb=e(o_SG;_8x&=&;I_%wYLeTYVjqTf)_?5%#W0}4RR z7}3vDe&7)D`=5jLrPpr76wf=n=`02KhXP1{UH21r2Em zQ;vq2oA@gNCb z-@rT`jY}Vuz=)C}xhI2)VM)kAs34AYk;IioryfgnmC0NY;ioXI<6g4+nc-}y483mB z82Qq71QLssE3Q6h&;KF;|^gDiIrWcqfarHl=&5tZAr!_OD^^n8O-&z7v+J0Hl zddT*iN4t8U}z2L^LbuS|1;TmsXP6CaH=gos1fP}3}_ta#L1N{ z`fNz3L4GNQ&`D7BQV*iEl}lQhz3BZCo+L5-IJ3~<68fPIXqa}KSqNdO;~kx01)I`` z4=)=n2;X{;x;=IH#HOpYooNMWo6Zxgt77o5PwHi|u8&Hwp1;=kNthHnU!V?A+OKp3 zE|MBP4Bw&NtJ>OA;aRtN_ z%F~Xe6IY^-y*FL#GXR>_cT4{;nZLjKd007GZcF;;ukrmjAiRID@f`W2Kx1wyKkB_H z>-JPq$GI@Ta?5a$4R=7{4oA(U?&;8+Q@{cf;3kZxa$l{+?on4^BTWplwXZQmevv~q z@uT=tzCqujQplQc8%O8F4)$`C>adW~6!wj27uG^b?nO6Tr2QGlor& z=BqqEuZEdW?H{(yC|Nrg7nAp#W%D40v==gBaAhycW&{Fi88LBjEK+|4@%I#0pC5Tc z?MaDFpJbEwAuNniD7IBY%knm1#K5uxgb8X(lmP81b`^po?Wf#~C{tFy2zthkEHBmJ zB8MRJV!qVED-U*Zn(9`rOl(a$bx5j!kh1NgM+fXcVT2@FZ89{;;-UHson~DQNh;7* z17160Y(DfaggB0-(Cx#84y58iFD|&^-y&5)HMx;V7hI18xm9^Bl7osjN#8(rAx0Itg4}=)}s|(;_}BZM9?r1<72tnAw>8k0LcJ7M1;(w$v-3|EKU9;1^}@=AoCn$l)1YYpIx3!D?Hj!P5jtEAoyX5Tl_a_&lb%dvT77Ua;-9)ZgET{_<|QpJCR zPv-?})f=U9^No>SJrHYDD@41lUKbb&45L+uwhi<5&TcHXGDJCdbs<(wLng&gn%N3y zk!pSrS9kLh^XmedDb@*!wl$x^2S8Fo6B-_AI3nR7V7*hpOUF&icN+Gk34)cf&_q{+;#x+0K6n`(}n67XNG0!0_`9?MI{k zL&kTl4;A|FYh0$DXp6$&^d*~*Qf*Lbu?)2&AW=bFOtxTk=_Q^i5o)4+i4{IqoH}=B zUBWT+dV&Krmq|N`oO#Wdry-}i3wz`XTF@%8&l%0=qmwBJn72qEG7{;u&@mgZNg~>< zn3)h-7u1n$Q{<3bG=}c?nlWPj2(UOH=ss#wx|#DD(w`@r&SvqRAtnK+1FxRZk0Z<~ zv|IP-=7l_{xe)tH-Zp+)IX}S!&Hofn@iO@;C}B~MYUQ_;4(Y5QDHv3u6IRGu?DNHW z#R4+EWdBR%uf<>SSp--+532EX8f=)&bs=ZANpD?U==WI0OEq>JS*(TEBtGL8 z_QGZmA&IophKhin#~&Te#T14<7y2y(1yIP}--m`7cDv96)1Nd!4;OzV*#QF`59B=$ zSg#_9B)&Vp8zqE1K%r{hpFTZP|{`v>DtT7t*gHQybKhftDO zW#7?iu?l*1YctgFFH`cD@zw_i_Y9pjPrQcBZcge?{gckgF z{2=T5WA1)ck<=e#I!B@}=$Mgz7y)syJn_waimrtGh7L1oIXar*Dy|KhPB5)1IR6YKOcbV{66du)!F6dBz1MU~ zhQQUG2c%thp(g#nB_#_W**^?Xr?0RvrVL0_^InmrHN&ypIhn9sz;{W?uykh}cp2JA zj9UxWC0;-sBj4nN1wFomo;WI5e^#_Wwa?&|i8`~z$b2mU?A-dtDUaFp*GNsD^Qj!r zKxL6B6YLv#w~f)KR@(z3o0m$5T zHDD*47WC0qb`CY>5c2~pk*?fm%xRR17?6q++LC3M928&~9m08kKa`wNFCvO};t<3P zNl{_9I0kV6Wg)WcVpYRO!=k8L=!mS@?2N&rf|(;3-zmRh?2onunEy3@Vg3-WLdIZJ zM;7V{h~FI}6|5m05+`G*x&<`Eq3S|dsfn%St7h;aNdby1>0+b^#{j_b*a@0p3yD0U z7_6tFZpp>5I@GmJBSzt`(&+-5ppu5_7`_MGgz*GXnzlww!sZc56K?{35e|;;!F3So zznKyKJmw#)=P_~XZf?W1@aN%!aSQGG9{zq|TiNo}dKU8*4ZT^n!tC+U6>G}AJ+iI- zM4A6)r-9+2`&0i5-yWni+53Yl1OgyN*1Zt|aDWIIVZv@4&CoRVUSl(*Q8>>IQKv_N zaeJ9>0|ICS#u_?)RbAaJ46z_!G zROiqBbeWKh@8A1hqN+~b2KCX4lln>`jh89zJc$hSu_?B6q9G_dlXLH1K!eszk!{mH{9jWWAJIz}{qd*(nyJ)8vg~fxD_dBQUEPL$()tUd=qj)#VGm z?M(%9ikC*jwL5`}LPwB5mvn7^@Tj7!Cf(z!9K@(W>{p=KTk5fGbZTGpP%%0QCpx;yl6& z0%>sG6tT-GxYkM9KwQLcG=oS1^D|T`5k4u@8fVvEjp2XQCxyAx$T*I$x&Br59KsK3 zAO00>_{(V|tfH-dd2nX0)81p7kBOd=7FX?EyUVO&q}-VeZX%Eaq)Ro4P7$;vor`c% z>T2*Y|Ee_ZyC^MCP2f#kFrOmznTKBA;f#Oz=phSs4M=^iCl&^4409*31i(bcPh9 z|2u#F?+PWdL5aiXm9^Qqh5DctG{L(RVj8SMjkQq}onNT3%nE*B5CoblCxkMAYgGv| z?$CV`4LU(gF<+t{GTQSyApf7QK#k#+Ps4GP;T+Yd(p*3xEJAX6C5$6=>^7_K@Gmqmh!L{(;;!2X#*R zr^e?V=RbI@`2Gs?Vgk0Jqa#AqPu!I=hetzx3g`hva%SOE{BH~qK6?IR(JX&qWa$la z^cU=Sf5qoqJZLO3pfR_iyrPf#B(3@!m5+i5dG{z9>r?97)^mID4njH-f7km4ddH}< zWB9AgEKSr%rSWM7k?y!%BRkekAed_kwjdj*KZlA;^MFQdb=7WTKqQbRr70CJENe}ETq^I~>xo3W?W?1%0jch&Z}8)HK&vs|-rp0tE@%%Gn^aII zW|-`WcFHrIa~_JF1TmRXy3=GQ&Os7qO5H^uy6Mu2P)nrIgx+%*Yo$`ISaNu;tK7X6 z`&Wf~VFkfs!FH&+mFG$}yuh+zUy6+%CxQTbqUeFd+X%_5&lml!1*Q$K9$15XmuxGs zHPo%B*7C$ru{l5fbdRNb3phSl~r38zTbPk_xsM-fUdTmEgxDxx_ne7+1}ao~W29FJc8ri7qhIMeV!{9m zSS?40j?>i0O+p#FjiH7_w(80m(hGdz%%b1wYQGM{#<@*0{=|}SbQN->$WMM(_dWt@ zWdB#r$slAp$KcCgLrg=GtxVidnKz-WIG%F>t4V1qw3;juCXwZjA<#|HCD;csBhuX< zKHn-9F+sKPn1RP4*EhR803irMzgHN_VC@hQ+v}4J#@1->A{yCqL)?Mz1Q8LuCio;N zfY^d~60CH6zS|kz6C5YWbVNGQEE|Lu6=4Z9%-!$=& z$H(zaszlijN5p;(kE=1`K!0YNA90PSoCHGm8_GG(?joYoRQ)#b1q+ zTNdO&fEYm@$Z3(2B^dE*rd~gW`;S^`M0Lh&kmNLo=_IYG~PU!|A1x>|#%HYaA7m1JOY!;19-LtjTY@nzzn?mk{{@W~U6)jpgu(ny)=2<-?pUd`bNUg`BN_5I()ve6g zy)_@_gFZH0_BcrWB5cU4?M+bRhDZa6NF0h7@}Zk}o;|i^MC&<{JN+Fh3c$lUct&p! zT|!-fR2i0{EZD*z9jWkZJ__@c98lk(q?~sz=-}1Y^S@UezBuFP#Tkd5Tz|w(+Pi1J zXX-w`*P%M9d67SQrry8Ye|@sXzYG;40e$z>y-xRDDon0>ELILpE_h%hh;zEWXG#SW z;!sd+?BUI8$Gk`)oGtyPme4#RKTN8(&_Mz#@__f|VZ_S^hJsl$=jPG7zaqP$Ju|Uy zLJYinW^Csm5kR7Qy+_SK zmM3vFjt8AQlE5fWqdBYnll$LVh&PGx807pV`AG{(N)+Ye`CEvnJ@)~GVjf#@JH+k^ zd_X4;t>!Pc5Qe{kxwXJhBNZc&JLK)(%YRPm|K$Jma)_${ z3|{}-UoEkpIB8jjc%r-HD?DRrWm^(F#XQmxmR zfA1m5kciox59E)>wl0_d?*EePpC|Y2z~=f&5q9#LMu;sC+g+K3`aj+=MHAET$M?K1 z+4vc9Ayu$6rL|}-> zZ#~HP{_0_+gYliE`*D1)%p&a&L<)%a!ob$gn$WR;2f@JXaT-~Ocjck#1c(|4PXU75 zE;@$W>2mIsk!U<4q8f}KQo^VCNlzfnMY_hq;{k34lkv-!>?h)#P2MMH$9RG+i@sPO zco23>1gi&c<_O}pu81U^zy)O2+yRK8LQ8-zhZt23Li1$gu`YEj1m@$k=#uF4+a+lG zCPL?UPrpU@QT;XoR_V}Ug`y#RB?|rc{&9Q-rh5YO?G4g^s*2m_nUmS>Z(<(k4h|$( zQ-8S1MeG&OFDSr`EYkOjZt{52bOWLU?kvO`?-7?wh7LF*ZzAMtkjK3vu?Qj&h#NC< z-veB*ho~M9s*IBK&X*|btl5h?5Q<2C38U90xcFZsD@oU2r8O-(lu0v zToQ6M1t!0Q>%|qWfTkRB7o$qVIRv80{R-FB&&SV7xW2b(=6uE4?pv@eidc6oY*u)l zcK7|$IPSFx_8L5gc~d-UKb`O{;u;h;kvb55h0+rl-`_oAnBy_Nt3QtKc@(Bb2*x7< z^(<7J%2D4+SYM2q)U_f5t}Q8d`6YDXDd5=0$*r!H5WGtx_#dPYmWxNhV`Ri{$|bkK zy-1Y{w{?x!aD8vNs=Nu*kV7;3NlogKJzh&1j%r|y>{T>PnuW2=3ykl=qzrmA7n3rq zt06-7?SU5}zAq*=E5KAPP{{m_V>}B%6kQT}I_yAr=<$eOCxjhUV|;)03_b0)WP%!m zGcvxA*m~p~S4?+y;wj7K(*B5gOO<6|Oh&$=8PZfJH_Jl2Uj&l=@M@D|shn#%7YTOF{`tMf zLjykQ`zt<AbBg^@pRMOSBD(&C-fz z7U?=esxG9ehmvUDq$)|NT?<9|$^FbKT?PqvQSU)<=5aa|(J0D52s26b<|oSO3JU!X z(WFv)KpXX%qy4(pf6b<6K~ms_O42w5QN|d2Z*UU@f5>zc&(9hiDkHiW32psZ5&=i( zokvtm3RGtyRK(qks)Lk7e91C{HbBXc8ho$uUwGy=X24f$F#mJ^tWg-Kp`OH3Ard07 z^(+6sW=ZilN<7`O5!AEKQCHGtR_?$9Yi@CtDlsCWeuUZ@S*DPR>nWr`wH$0fQMRj%JwVC%lHWpB8`Gz#tjbRn-XRL(@LlBWR37+XFN zN4jY35=nwpZ_E&vOXpJkt_fQCqiDTOT>WG3wa+c;yRYQ$vQgHHoBJId4^69d+PLWt zAqsYj0@j6Q2yf;J#LZP#Y}K73ri3f9PxsC_ zwmOO{vClbMvT^2Uqae@sPO~7-xw}cA7PQ_B@_w>DN>kte&-rz_*5k%Sduopx7q7Br zv&D`AlOipzmVkWz)RutmkdRw#tk9pj-sgW8-m-+1q;=&}CdHcO5iw4z=iGZ#m_Ddn zkZCp|;VZ^PGx}@lwwhc2x^Gjo_Ofx&)B2aOBiL(>X|vhUjJ{}X_Of`DHrqr!M(zvB zh8gLPF)PgEEdXHSpw^*#KDU0KaDQW6-GqBSnPw#~E}~IUrhOKT9+FQ-z^l0M{Z7*0 z(TY|Tw&N+-bf!4b(z<|NM@vh?q(q-%-hHcGr=Daa3Z|Z%mbtZZ+G=~B=s9Aa z%4wOL>T5Po6K0I6S}w{(5pY;;63-sRWa0B9em zeRWMq4m5Y$$d@v6%0oF+&N#P!aXNt0_U)Ch(}D4y{E@;!N(;>^f&WfF39}OaL^y^p zrfa-u>_GsMh*dr;cBx7)-xTBt)q-os*?+a#HSxH)<=GKMAccIbp~@>2OR8Hv30BQ_&IPPI_q=2{hwk+$> z7~1{b>Vx(PT3ywZst%sGVtyR~N4oV%SHd{RszGf3!T-Qx|L=l*rHV2e8|G_i`7XDW z;dO;!XCmrq7b;PurjdsG@Lgghf1E<+efXe~-bLZSHS*htUju-l+jd+XURQj=ZpnNL z`Wm8V_OH2%UVRvZfu}?m`1Q2ryXQ8(Sfk3iiq%6;0->Bwh_?+rVl7`~Fs)flBlS!B`FI+!8-8g%) zQSsUQ2z}wOfWNK6hboaa?6}-IVTet=X|)6OcP-7+dX$YHQVlaK;a}Ywz(ReOITuTpya#eLqLR zm{uZjjE=RHIL4aT9&PDQn|4UDfyy02+&R08&?_iUwV*8GTxJXWSs{QN_Gv z+CtPSFQ8QEV41J2ojO@A<0U9%M@8Cm*z&V0Bovv{*HSsIL6*Nj6l^|21n z82O`dbf)N7lBR!f*kikykn#`XD{a1b$ZjuSa9!~0lCY_nG;~f@{W@Pvsl72A!p-eW z&dqhN);%#$N1OyF_eD#DO*b7NtLUQAbu;HtAf24YHxlYnPw0xML=m}oC&ijd(G?ji z_s^r8fgT(kiBo6ne$@G?j;F4_<;sd zH!Qbw&(m=Z5nszrnZgb{>%uT$iKcz^V|oB@=loc~@-JOO&fU1QLq5C|hrdtEW@Kmy_4NpkyBW zFtD@RH{fW>r_bQnwb4NzjV02 zb+PK1Z;a77cGin`&iA#RCgi)_er8F{5;=yM#=QK{wwU#}=c~oLP75ks)0Yqb^3dO6 zX~~4|3OnXl9Q5AsQ=a-mW&39=gX--Yet7$dZmprujQZK1s?{$d4OKI&*FT#y^zC40 z!_PkL6*2XHTU7Gr`2KJ4(I8t#Rko<-3Hx%-H6EQUXw%!tYslt%D4ve5X%_8I;Ej~H zZu*gG-H_q2%-@y&)ugy8e$*U!NQNJ4p@UuM7Yu#o1l?m5Q>E$Ybe}S7ny#d`RjE=^ zrW7cgo#h48&yR*m_mz#G$l|lVWxTbcnb69w`XuRg=ucfkZI9RDhq|gN{GG>@I$aj( z*6_}@1!PcI>!>MutZ}>K?5k^)?SA!woa8X&Ia{x&B+6AOr8-)ys@Tt*TWLv=N=E9m#g73>!wu95FTur;Rt@69%yv?6#EqE ziv6J2fl8d8MT0~<_<#(ko?CR#9Z3(y*!f#m;V9rQ?`nRtHp(SNLnVDP;YwCsi zvt5OO7tUf^PWA4Kwq~#FXR{;5#Toadv(f{!ip>gt`f|n6jmz8OI0bNW*cZ2N>Ba|J zA8d`B_NBR2VL^nC1ij?^^v1OAeO_dIU$|e?nM&EeKec}U)QqXP zke)V*>#P@eqqY31fGwU%I@{0g!>u^`;vV!bm=p17v6i_OV*LfU)1E=qoQMSl3nUq5 zGcN46exhQ;748#joxDuF%xL+BMbTHHC@huG`=nk9DrwCjEjOf?j2r+1NnV;lAMrfo zqT`hORTRn#rRxjJ7Or(_3XHl$Z#uOG!n_`qf@#Uvwina(1owRLQ%{Fd!Odrhm zVazjY=eOluSf1Cg*|8x{zziI?XwT&}M**jv2O|1qE}^3XjGh-L-bC=}U%1rXY0#OK zLx5DX@&WTp*r13WA6>3o*XeT!$YJpY z@c7eS zm?YNWc@RHh@)JynA4V9eg-NraOc5q|)dPYhO(Wy0_Pmbz(W#_A&R-FQBKMk%U&(P- z2`pbU0z(OY);Pb+QlkOSYhu44bS`a>&*>Cq5Ty$}4?XrTRZq+D1zaA*Mm&e!rxWS9 zD$<(g0gWP#XAz;fp<1{FkB%eYIBN0`xakuZo6GQ^BjQhJ$_zR>u8yTJ86U77he;sl zh)N^01+XenSb7WR7nojhvi74MvFh#=?6mu&^ziVLOMke867Y+|i}7_=<=qMH~M zwXoXPPl3~FwIsN!9j{xOpOn14o4o?Pf})!nvFkp z%c0?RO#Sq|OAsJ&_O4WSPaU^_vY2wYH-#>fiJtA!%FIu581sEH!QJAjj79@&Ehu7-guf6$g)Agg=O(XB2s6&Z=V0h14_FC8SuYcP2>&5(s z1{qITocG&T&cCAGN%0=L`=PE*(mTE~90;Ae|1SRh-xlHj_A8KQhQKT6sKZQ@(e>F& z@2pdR1F_3%gMEI42L6C5?-Al@fM3HR7!3rOkpF=SR%(l$f&Js-P{qvBPwXj40=*jo zjFo_uIe5P#?|?DLNkke#w;q(55Fns0fsF5l>5J*ElwVL@pwQ=2vHs$>!5mH=Ej|4} zB4=-CuxTF$rFQ)64aWsiz&D)%E%g!4YDgE*k@N%t-51XN(ypTkNn( zcjIlJ;_YXiKe|3|EO+Kx}LI0Kl%^y&;RXX`)?mGC_}Z0x#pPCEq=bGmdCX6 zzH{-bAZEK#{Bz-NtBxYMcQpK=1uxT>W&_n3Oh)Ktysfu74o|SR+@Uih7K|>(jjIXi$4U4@=pr3cAtKWzJhU~)+-@H5i zYcSZtXvdBWQxoNRnEHo`h~JQ(Xj-BMPVlZv#F}50_Wo{V5cRiki@uXHGRhuh6g68s zOP%;S`RMO<$XUF~kPLVxI>dW5Z#T%`ze=_^n$bHjTaAJ3y)o>f?!7mM^{e;amYMs# zsk8ZBee0(Gn47t}qD-A1(q1!Q_ec2aVjRXT|8;boYi$kNww&)d$TNPa4 z`Hp%?^v0dM<$cG87^l;X`zJf_Qn`|{kwpC#rKoXT(ShbFGo~!x--#y|OQPm7$8`(1 zv?_zH1ed|?tN!!uiz=AKtkE<&lNlH-QSeswRnr(<^To_!UIN=o*M;HZCym@8^S>P+ z%feqDvIc;hoQve`)Yxf^JSv(=efz4R4?pDA!ALHW_h>0>ZQWT|P@3=A^M=_1F_V+k zL(bn3j}qg5Ge5_UHa`N$q!PWFL7DeJp@NJ;mFU$h4N)NG<`$JUXGzbIDI5(31mWCx z+xlvv7t4WIL6^(Jor z(Pi{oQ?H9{(jP1)U!4{1`m8SGw}RWxFd1`P!q?Ol*1Zq6$MHWbcH6M#TGW5IfBv^8 z(*MY%!E<>u)F;BCIi_n{xPEaGW>+^QWn%v?#==P5glf5}nzNqC7Jot8I zEVLtAP&kK(jmWehU@Oc51?v7EUGEw_7}52*z5x@wWrOfoDjDBZ)(aW#l$Qg4&d;P^ z5Kh`~(I&zu+7G*CA|(psYh*!L=w}K$%6n)=LvAuq15}|t1mJabR?JE^X$y{>8#67mG#fg&3T47LOnWBt&=*w zIsC1;&edmoUR|9!xM*Z*(SqswuRePcvIV_bt$zzyEoC;eTXk|NfwZ7eBbO zZJMtK6jT=qGpQ6?1q6KoL~ndWhlbu7b^X;_WM4&^)>w$-*>k{Znm$U@(%Yh<2ST1=YaSe{W z^*nQ7Rfe1VZLpy+oi;6gZlJY6zg)-eroN#u4xC}f6V*hpL#0W(=G~DfjGa8@wLb09 z&B?c73$#M?#gpWvfzc+RIqCY=5yrSx7Nv@9!h5?dKGd@$UEA11UTS4%&7bHWKEKB* zc9NEljFPld0Yjm(2{&~${y4vSj5l$$y!Q9_NJ^m?i)?dR?4}wTtKUIiEb$UrROc#R ze1V#4_)BH)7qcNs)?E5}QSI%R+V89Ejs$LMXX#XVEHf0-fn3jK-Wh0lys6!uY5VJ} zFfi^feL%G~Mw_0lN;=CE=m>?AKLj86y@&$EGCYf%4h$Y;QV+bAI4!dMA;$JuTcF>p zAv*QIFU!mB>z|N^9`#r^Al7hrQTTtyW|A zc6ZNDXXh9u_YOF9W}wnwvBUD?{7fU&?HPK`Pll{whNi$4y11remzTm{JjLbmWiMla z{`gs*G5G^KuP3bP-mDWCHqkJD*$jIv9)Dbfv5qdaJXtwaMcJo3+nKd;P`OiKrFP=w zx5hZP6|}B#hPhQ{f|ap3uCO!~?-_0NpW>u@Y4l2Zimn!`&dcqK*EiP;`DmqeX$>vE zntK&npy27|9!DY;Ia5kiB89F5dct|;PIUoYl>J(?R26@plCRL!F_FnQFnHpLEHiy+ zZud(LXz!aXFMS#J#xwmgW4~zMF9I#o1(cmSB0K7x1^#T^^7#yVK!c}z=bG;izWZeyA;jpf0j>Pj<3+P35B7oyHhH_f&ums|LjCdW*q zn?*7yh1^Q}C51=U-*whwI#_>i{<;3L`OukIvMup_@)(Owr%-rWEM4|aeB>x|l+G%p zSjgN#l|9a-S!UJ;Y1J%zl9(edNzJnKyEgghZ{9|OrrReUpStYn@VRW8^rxmXjB_0; z*voZo+xh2r+Bs%&+2KB`zh!nT*LCETPm2k+PG4;;FVPDr%sK9~^2wTMsZQdU2D@>` zT=B_@V6lu1Sc`e}xKd`9ZD$hFW|=!@+LjW%rDI~NzMQyRRzcZ3iF1t1>Uc<=!R?!6 z*@1EX!VMj@T&vcQGk=cn|B-&^p(2oP#u2Xd;L4?w8n-nF3Rg2HnV-Rz;y5Y zXFtww>9fgrHZsQu=>35GulCPMT{%Pj`;`xGQq$DxNA8=i-~aLHtI;EY>IHF)EB9Zz zFu9@^;qRWl|N+1B#(^la- zWt>7+3p1~2&>}E3y(;vlkCm@+>5CRjrvAg7)MnYYSHCM?L9`}T&bf#GWF{R@u1TYW32?$7c4pWG_{{juo+cUIuJWJP5n zdX<{{i=C>16DxS(KU<*v7pW=9PA)1oz~`z{SDIaWomzYuVaH(Vr4R0uEM;YP6Fpyk z(h0pDi|j#)`x?S<80G@|mJHW=Xq-B`%s2BFx2t4D0(Pi>XIrMJKtD`M#y4g9Z3Ab@ z$-n2XL=;N;K=w@XR2#zf=bUvG3~K$R>HF8k_0gPP{mL0x6DPLhFjz)>^Ux ze(hzMo&QN9!cgOs#-4h~iFehIX)JalspHcZ7w!ABsgj)5_5Mry*8G>o*WSq~ z#YAXE=gRVwZP_u7iT>IG$aLstg>p_FCyyE#9BtF~sC!u($9TDa=G@QS>{(m3)Z5sY zf=0zH)pdj}jW`rD_Q&bRBpCG<6=~0~C-N5;v!`4$8jqTHq3uz=N^pOivRf1iB2))Y zao?|7Wz#<{kEV=yY9@4wHE|LOcts4T?^@T@iWUtuaot0cZ*N<-ZQa($T)?oQg~KgB zjS?7T(a=jvV;F~W0tJFA=?9+PTB%l+w>t{J={l}{-8+S1?DB++?`@YBGY`tOzvr*8 ze>O2@v!!*(3P$i<{2k#3*|lIS3iF``b<)YOz8tO&-?b!67deVawRbJ0QafJ-l|tD_ zs}4V=+=w-tXEom5A2~tnSzGs4?z=YSu3< z0?Wc{Nlboavex3t4-UTjmv@X?m)CqT-9IaSN_*8j!D` ztQk0wTXW*+@Tm88Z=+i&c_oga@7ua^Z*ca!zT2=gSGlHlJZ*e4j}kFIU=y$tq zTJlEBi9RuRsrG4(EW)lW&PwFArOseE$LB;=oSURx`*d5(Kl%6`qnw%(*L$fiYleVU zn9ilLK%h1?>t=(^CD~;itS^yP#*M*;O66TdC0%a1ampi-vWw#B&Pj_^o?_em6{Yw> zD{=CyNSE6>i7h3j3bdK7gmD$#`~n&|eWsFYMn!x#(!jSCH5rR1VSdgKrTGCQ1^jq6Zo3Rs|4KV}I-h2_iC3+319Q;pX2-Axi$VTJ(g6VK}8I3WBP3s;@N|W z!KTx~-d6yT8yX7yN}OiOlQxQBZp1ZIcfW*-J!|A%3*Snadn4{8}EjJZ=qf z9B*dyq^T`7w_yoBIkIrN$gW}OPDP%2)UQ?SXyG`{N|hu_dLp2mvY{xSNGG&NC#0wE zLh5ln7K4xf-l1A4eF?D|92)c*33gF_N@b>RV{;sRM;`W(wF}+Fi;4aZ1c&i=aAFz5y;~$S9NXT|eC!`RA0d{Ey?SK!YjThe^LM;QV?qREL+M*pBCHbr>S`mDDgN~}lcxjl#y&TUv-`sU< z2lJFcM*3gp8jbNbFi&UmguRBZcV!rwEn81d$_|;3FSJR@9)8q5{FA9x$i{CcoP9W< z;lfP+-ZKAZ8w|*RXG}8QuKly`U$re9&$_c%!sxPl5aDG=Zm#3;Bw^YAjxWlM zq%7E^#{PMZ7$jbF=IJ}7u${KBfgPQXl16ULscQXX@;P~oC|)nA)z8uJRsEU}51Nq^ z*Tq)fYyf2nIO8UoNc6!vI&RM$>_JLgJ-kMp+qy(wxkeRfu)-dzwYdI?!tRp#jcMz$ zDbW*{*~UKSDRZU9ZAmoU8oS(b#px#(BAy&y@}%CREVyk$?$O-6ONUQztZGhj$`xd0 zr&#K@G>3DBYwT#7?9-F=y2Ckbq01t)o-O5Y{z=Do$Pir1A*0#$vjjSa_+J*82T=Q@ zgMSH|HQ;j2#3+8wkqjd^3A=|i85()g8yl|d1)OsI1NraqTZ8UBo#frUlF}^79IdoVNmvJ zq^qRhFb{%u=x<1Cr>y^zTVitl?nXE1gF`Y>Cp;YLNFY}l`H#GHMyby75?O(4nUWoXtKMEFq&Mmi_8i{!*FX_u+?$t&{ zLtZ z)Y$2l*GF)CW^tTCmTLXYPXE?Y&c^VioVN9Lep{yV59bcoQ+zn($5-W*4_h(!CM`2# zs8SthFCBbVn^_H(C>He?Rn}w(cApOCiCp1&3#$%S(SdFpG>NZ zab03(H>u+G(n)RN-|X%Fsp^BmB#+yap^0<|);#1MS3sJx%^>E6od{hL33>*f?AAk%Jfbh_`}Zl^Za%29*OEO!&Ke?!N)*-1#GkJU(9^r zTCpan+LF0#SFN(-jk`lY2#1p95D;wLSG+n>uXc5mUQNj^nZn)U#|f#Q8Avi&QG8*;NXzg2xA2Y>w2&=&3 zbuPL`FuLVsex^UGVf;i+SV42&apDvK&Nws{5P4aV@!e!Y)3Kv$FgWsYenyu(7=V{L zUxGFTSpFehvoX~BHO2?q81ER|{ZQWF>S@OvW+F>n7Dc>ihW*fK1Om!|rvx4n=1_lh+$U!=T98LL?6UTueWj9+F_KJ~n@bY?VfifE=U z--{J-Rn@GDYVd^KNZ%U^g?$95!xo@O`=5>{w#kz(sCBGxJzt-s81q zSsiC*?M`=f(fujGynW-*-4(81?B09xi-}W?C;WGhFD&<*(z_XutNG5PJIU zQv1j#b;}X^sM7Lt(KxpueIyJ&wtY`5dSTFwf9V4)FZhrRhrFw9rxg|0R8H_qs_Xoh z3coC^P=d$VyM(ZDK0tK`T+Rm&2#uEqhxaO)!*$Ey{0?Fr+>Pq+>d@y@-dJjT<6UW5 z?UmXa>Y7L8XZ;}5g!s>R`P4_Q`%@ojC!y9bJJtNk#TCO%-ynT^>Cxm>8~#6uk0?{O z76tcPz$hm0rNUKCxm@eAd3U!W(k)Y7zd5=^RW!F!`glihU%KbsyVjrTb1BoLdQuSs z^j)?DGVq$1q5I!&WoXDFGC;Q%)Yxwoj)P=$5aU zY6RjKf+sE2Z6(XnUT;Sg$Va zTf}-2?>>nz6D0`Jy}4B!NXGYD;ccpoBE@|7$McsW&?4Ru5=OLuekb1FG^A#yYWC&OqX)D^v#8-glovu$59V*$UcOrE*KzoPGUQWJqRS~a%<}&P zehS*<(iEL?g}!nKqAOT50PiNieomo+jd-)hR?p(H1N#|^UZke=1wN&!3WpI|JMEO*G=ye)?GXF zc*KKR5~H%Cbd7V{RdyUn1(13|SU`nt)OB`l3(vNSKmj{pEQq;Rw$!cYEz|8FwmdXP zgeTbUX2p!a+8k(fa1|~B`8cCUYw-3+V%|bTL1cVYPUTE%@HhV4Uq$(zL5uk4$(Y;k zNM!=2psuNGg>3sD4mHS!0%6m957}t4#WIO@PaCbDu7;gvCb(E2V%0;S3kbQWG7;?H z-KB)d1SMDxz-Y!!(YFXlCw2#n7Y7hB!=D*sl{ER|hq1u6jO_Wc4@?-3J2f6N*PlbX>|p*kfXe)`3IT;)b!9 zLyYvkDg=h9_20;t#WDSDvMxvM724t`iDfL>^>Z4YBm8VUG_4AG`NkfsahNJgp?X6Q z>D<>N(O=g55Wo>mk-&+zkRB6MU>nmT3*a)XZ;T`&w@|CVxI$IOgz^!4sRB&zvPAg0 z4cZX#($1iad;59OtZ|~C`L}kvs zmPz(nWTTETCjaoFQGih;-n4&YMo1ewn*Ne>{gKX9X!3fVT#vkQaiw}}Sbgz7t{;g4 zEsR|3M2cB1_Q#(DT%LbAc>0=|^IzWfOxaXyz>jmy6)Av*9=NiAKJmbyA$*#D1gD7D zF%P+}u76Zy%L6#H3sK}?L;FiY4B8Ys2=8Np6k1w9u(^O?A6wW{64!p%8^|G$h=8Vo zNg=TsC1%XI#8=c~BPdB7KoG?ZZ5jY}r|)a75&Q#T71R#1qx>Oq78B>qjYqyLG9) zobs>!urQnh!&HMVRkf$V0&fJyM^E5eI2*2dupu2JZt+Nmy4)DNn0uW#DMj|fNfaK3 zuvLOF=$FMm!xuCXo=6hh7@SfL(pWG(gt2q|E6`1ExO|Hz`SNrH@xk*L8X%5@RAar6r2GtzZR^t2+{7blyw6M2`Z^2_E1+WdZ zjMN_N0DO3N_7>LLBkoc$=f8|ZwVbKUs?5r6SXHR&8B!&Br!b#*2 z;Q)e6Um}5DLm}uwYKVOaMVqkhw*t%vCLm0iv;Nf|_8%r>VXK*Qx7^9b$!R!VsO)ws za1t&L5-Q`I4m5hs<2uLCox8_$eF7&<=>AM6KW(y7wK4rNKS?W^Lb24MOZmcfvEY&_EVuFBaQCFJrO>iE&kL|yJBqU+$dmIx!nxu-8JL);6K+L59Gx;@; z-KH=Y>jP?XZz#v~bI1kKZbkZEYGKy4?6U2&L&d=A(mVU|@?G*{3zIlpF_&lzR-YzG zQ*X!*^xfg^T!9iYY@UA!yE_Axb)AQPP$X%iZ^HYBOLYk+e~ETfSkES1# zyV{OlT_SglDa=T)ot$m!+W*bo>=NJpSCgy!jk9g{W=Yqz@z*H~cR+q`RnS})vvyj-Mt#$+Oscp z;a;w>bJNDe69fk1<+q{(-#7)$U-#61SKw-ed+b~s%N5@eT;0ZEW8-62M7T$|^~sth zOc2=0Z$26k-8pac3N}J%2S0+(nPyO{Qc*+=p`h6DL>9zLSwXSydWACft6QIgeqIK3#q>4~S<0OrBxRtkHE1 zHsucyIP^pKnI6H_u!df>HIlS4RNIRI7oB$i1UP9Mg{Azs zfZ$>ACa?x}N0pezl$}INo&NXsk?0ZpfE65@5rk~-82IJ>II`5oA17e4r9}mbMjV%f z^dxCM4Q)Flo7O^(4HVIRGQLX$dN-1Dg|=lK&tm;@cA4g3!`t!lsfo)quC>rk@I-t6 z6ur^bXR%ZCI$PT$qFwZYDV98AMLZpgsj#*i$WH>`C%%a}yF9{RO99qCLElyt6O(V< zjV*;$NCW}rrP!SHF#nGbMJaJA6Q)geqFDU^3LMsKR9R1 zegJ!k$0M=Q(cpGL_HPBupOF1Q9{$6&LSfp|PB5nWEo$QbPB6BFV%qMi3UZBUi6wMV z;!48-ytcE&0?^>p9!>u~`Uja7l;l~9KaMXUxXD8*!5-#-+)JD`fIfy{hF%}sYr=|5 zz+21GOozlbB(Vi0CC$bePfX*l2n;T8xdh9mG4g34Ln2`*2xAEiEOH>7+K$r>icSO8 zwAz$xqDUqbWxwXON@A5Jum1+!gG({i+(P$&V8U0jLya&5#tU&Upp0V%9VKszS){uH zCLHQJKdTWuAAseN`U7T|;P9sitkrk=H0q%x7+>M7`Tct?`)hYGtQ7YTT;49WH4k)+ zR@}!sScI zf8a7Rpc@WS8@lfk8sDR0EJyLVet;+HjrM5SFM=6?J!U0UkgGE(iQ^J~Jji(jvoBO;-g9U^s zRY_ICbMw0GWGOr3zH~kg0z4@`;4#H}+(!tX*wu|HJWhU3&figw7vK|~`d9sx{4wD} z64#HH#Irc|3Sm~_8&?NtP3!|ldx1!jMT8N~1zluB4JU>woj2&5=JKj9qs86AsAo|&$_wFJw^<6w^0IZ@W0 zspXA7JLE{PC^3r;=!?6Y+{fjg!~4cIF~1>J5D9^Ze^;=~FFv=e5DnpMS@^E-dRi%$ z!O3;{G`Fj&V*+~u1xT9OPJ64exvdc{EqlfFV)KoknMB*@+2}^nMp@q|yLu#DT-rhB zmw^o_MXk+03(j>p#&;DS^i00K41HXPLqY+kuRoVJC#FryG%USopk`H@MU|c+D#ZD< zZE|D(il}vnD1cs^&t<@$-gH&g1WSLL*Z#hLN$B`p-t^eVfB8;}h81%imz~V%6vXFo zndY0P#lsFN4gpfs2AKI}-t(Kw6|}kNUN<#0E^YI7|1_VBuiA6BnH}ZY-^UmFD=`S2 z6?3|~8U$wUf}lJT$JRb4ch7}`rB#&pop|ty)pc0)s^{c(BG;IV?_;@Rmi&oH9gZ_w zS$eQHzPXnzT_fvY>9PNst+g}B#W&M~ZF(*;x$$aaL~ECsyYuzWOb%e$j#pM*%Sp0L zvP^MoH%z`Wv{xr}vd%enoJF7Nb(*Rmb)vR<_!dPX-PrmSCF9ewB;O%R>e}}7W9Dn= z#rd=uzQ>PkSC7`8QwR`mfj8^SC{FGJ`cZ!w>%u+m;9vOhIgN+xguW@ObA`S!FrYIj z+|QNkI-*@mz~zbZDh! zUMDd02;2+1Oah1o>|KhlQozHlQUW#H=pve?zVN~Gg`PF#7szw|-v0{|pJ*JM*cuUT zIcU^yS^+E<&-Cv&?nPjFL*@gNb(Y4{g#!ryX=x)aD>yh6845<7wi8>+^iO zSS&qQp91+|fyDm^F9Rz9tb+bH%qY3cMuZ*jPU*t=b${?Sf?_6VUOXs>@8U02zhmHY zo5r?15MzO6e)O~f#iJ|G?AyXW$5~8ro;a=HzJl`-pP!+-*uTz&o!=1e#@qQ*x>akz zv)yy281FW}?dD_A_9pCKehqrKWVYjhXQ#rPwWQX!_g z6@Uxj{CmYVJz+tH<2E2R9Ch8Tr%ajRY(2#)*4b)GU&SnUmx|aqE5iD$>|+GRBa-}% zo!zyIRbe*aorTU>fycsZ!a6(CN?hYtUr^*L@~(|@ZjR6N&oO>FZ}qM5Q~h(y%iG2C z#QqUGZ%wiH7yGZ?t1iU-ShXt!ruH^syNHnbW0Km)MT4!eY(k&2?`$nR?e>JVaA?Ja zR$YOf6OAR>&%SW-OSz1Jk2w(`od*P_&ROLA9roC*2Y-!i3qP*EUO3hDySDUKLV{=dcpb$0mMZ zv2Rv7`N&@E8M$&$wMgyUklR_LKXL%iwe!GW1ej8DaohL=bNdH z`}E|?Oljt{X>lP&V-=Gs^rxv`OdqTGzc299(%X6Y+0NS5JZiKh-vaA|zwB%f3ZcF=i9yKC?>uqi$SoOML zUypVCH1pB11pnyb$!+UJ&LKT|Qd>dG)%}shfnBS-?zLWxrBBp@@xn1g!M6WKc=+YkqNc{5BO{@ipCS-iSbDH>z z4dP#~kK-#L_meumB_*R-q&St@d<6Njt!M!r;0zEUbndaxg((P9EE#*DdL!XdD91Py zsvj(4^TDn>4gF_=eFI;fS$)kilZ2S&Z7|S z2JLHnD8u`tulj#6_aC{0&N137DH70S zR<4Ho<1PEG9D?F4Z%47C;}^Hbzhg3{LyIzO-Q}QnScf5MoTG}U=saZ6y2An&Q~}mX zS34tDyYP@wYb&Yb06nME3g<_M-(TqVGj$-x)XlZf&D*Bd+kW^9>4BW1ogsH>=`L>G zC81|-u&oQX z|FytTugAeZEJ`n*)-RXz3mrYVkz;x-RmzkW{I5)DskZLmg(*#?d7-AdNv1}jp+=!) zcl2(S957w{xODmfQ{Ch_Bz-;*`eo3ANpwfkJn&aNC5!k34}tXqF1T0+1ob_xZy2ews=9@XNRJcSM;M*D@40>^yDQ zIW6u%M;zuxEqf}$&6^N(0LR0yTlEON>JN;C@aJ#w_i~8oxN&o;=ssRdzqDrwHi8y3RohWk+_x?lW zLs<5TsSo})^Y?$wXxuW|$Ze_ItZ?vp9>q;@;Lshy}iJo(2T zw(Z=U5PwNj@+ygS^4)Q}Ehq94wr-}Z3kvlL3S5En6uK;uWGs?Y&X82PQwG90VM3OH zz)D8L_H_iD=hEeZpvLs_V}ik5h0i#C%4clx#m>i$M<=QN_~d%cvClrdr#aS44Q$H$ zmFnMAU{Rr{Ame-8E95uKU&rP*^B01u&{CDRSYG-%_nMQuMJLPxXGSe1)>H8r8X$JA zUb$=X)SNSrkp#Q0dbt9Tblz1GR3ntGIYmNsBMpj7CrQNzMXk!%afq|~>{Vb$$e3#X zX@Q(^SSEMo>%InWwkF%vqWOIHRWzTsOeV*nZ9T3l)EOhj9cxsHj~=weSNu|2TVC%K zus?_5I^w`dQ@t7GsUe-8;sq0JHG#dPyT;)(I|k$d^~MY ztq~QLI*7QkVn!__YRdK8R7yv~<9?#+bNY!qf=xCQl_7ayOg6irOyCACDVQpKoYq=| zv1N+@fM!;tbvZ2R3L};|o!0LA;pOSD<5)RscWoDGT`E*GleY1 zYkoWUJ*Vj3n>{jKEt!=cp79aP51`a6`=lT46$ndl1|#JlPF~A;cXfqoVGthz4;8&Znm7A3ERhVz=$-OCBMr=8l&1smvF9Vy6b( zn9r_7gk;MMd1z>=o#ll>gWH3ruUl56*>}G8|Eux+ z*Dsb4ZLCkOn6tyuK!;{M@Z4Pl11OBtmhSMFpq8|O7qlfT=BI&YSN83!IEh*;ntQ5L zHM_*Q3yDLMDEBe9meY}25Iv!s%gopSC=Uz{i+RxZRnCSPjWmBqWOo<>{$zZU+STL*%*kV6x7Mxc zGpe5JDcMP6vPDzN9xz3agI)e{gWJp%lvZGyC}|t1#a(k#)#JibB&j&A8J;^=FjthB zEy~VV8oJwefBbG8o{Fy2)rsa>-e?(+NXCT>Rlq8^E7OH6HyXs|!e}NmBKD_04r94l z5*S2F#8r8@<2@2z+E%wbx4gNYn^{$xBx$;p5NcQvd`XEy52zFdWPH0ldr5z&xbe;W zg>9WQYuLC(D2FMt)&?5pcnYi3wq4afRX=I{+!)4qj%Vd8ISXCrux!qxXm|BtF-IF! z&60#!fSjQV`+=`T$)5?QJoGH6D~{}D(v-AD`o_!M4ZD4!+%qRWcsly^CqFk%?Wfab zIr3>IX038_okLrDB*JhPr-{?}XJtU6!IYGm-%`4-+!j6cXnmIQv$XNUgm((1O&ZN- zA$JBge3+baC}q0d#U0xxoE}}=u~iv(=>3ZpY4-YolS3EZ-DvlSty`iWsI3qFLtkZV z%Jz_iby3Ap``1BeIqG1`i{#a7&wNnvL19%Be=i)A#?w)6oO4gc04d`5Bz3%J?Or$| z!WL6i3pZSwJlMyRi`f|_BYdTEa`H!%xsM>S&HdoCih|sOo}~i=hv{}R79DPuvu0Fx zICw`lA0~WPi?1_Qeywf^Bk_?j4e=qmebDHWjZ=*zmKzYH3~Joj2>gX&L9$pSx^Y8( z;foaV10H19#*GH^sj2hkShqv){ud(zK@sjgkh@AerHzP;_4GI*2A5+jzW_`5jwedy zR8sa|8?!dMw2Zr!jPEZCM-9Dc$>K|I#y30L?)SCX<__Fw>J{5AVs!4Fb)2q)d99h% z9Z7M4gT~9al}+2dcn`*mjF25Xf05n2-`J*Q8Z}1k$rX(aL)eaB`hdwyl0iL|0cPvuDqq)~+LuSnElC)mCM0dgh=WWGLw;;p33C z&del;mbQAst+_SpCU3=Dfmd^#!xtyFR$0VcAzkCEXejAdafY7%`u?;0;a=vl`G0+F z7`yzpma*TijV5pTtoPpBxAXVkMpOT9-}rpipIh9&clh1y=KWK${mzg6`1sGx%PS|p zj(x)ZW7d|)Sj}EE?QhQ-?u~Ba@7*`L{I{>4@7?!8OIL;;5K>M0CyOF`qiC+WoQ>UF zT@RE}bms%Em2sH!CNqSIKg6s-l#7~O2Pv;;0<``Msk8G8eQELk^k2A98DalEG95K> zOCUDdsJx@n?!M&^B_>~tVmB>4^Z#IcIl4W&eL1=XB50WFyx&x1Bdi z_|M|E`)~3%%6>hA1%vt1N*_ZmuOb=lufk^}H2q~@o(FQ>_H|)_VS%gYFt^LJoy~*oFS+P7EfaX#RF4>lYd9_3=O-am)NW(4W3+o~>sa@nt^p@w&I>$UZtjtimL ze#_+yCRGhJ=sr6-W0AN1mA#pB#CSj*@QYH`i7#?>v%k6k6*esEuEmh*3MoYp7E73pan$ zsqR#NQ;?FPJQ=?=+0kx~{NNussU@|i9JW%~TSIHTx3103_GdftPGF=X9q@}h3dPxq#P1#&^Ug3H;|> z;*Z{}&ry*`MSpNFtRoLJP1n3Cbg3$wa|0Wd%jt;b^=P!-zhSMiZn>g*C0Ot1LZZqi z;Bf>NpfSKPx%8m@ zz(CxgM`lGKhm=HO3^SKZU#DrqPOoLhkp4M4Q)b-fW9GivGxlQcVD*#5J}a!gI)s+c z#6w=y)#i&A(X>;WWPU#ga6IW%GGm*eZIn$eo0X8ugXU)3;icw@=u%4X^;uf&YtDT> zNcx({b2gcqdsR1~6Yx>O{RKOjn7<#q9bYrA%0Y8qyinp|pXe!VGot&6o>@BT^DUUU z8X_Fpy_}?GC+7h&BE)(qOegk484}#V8`d_6Vv{_=8r??|8g~}a>UalOL)iEEQ|Fo{)7LToU9*h zj-yiA;+j4>kk^*fVQQD+9oM|&NLy%!`!RWKqD#q<2P4x!^&2Sd#OJMPaTZ3TsXRBJ;u=7*`RwHgx+D3iMLl5*it zdAp{ZTpKun4D8c>7-Jt>8q4T!l5D%A|F1d68a99dL06F)*NhQwSA58l%UWMps|nP$ zP!iLgp{R=j7vUYuW`}_;huvN(1E?x1hZx^Hh|12R|LFsp$dRcSUr3gon#U71><0xx zW`!}v*oAG$vgNCwX8NfO-I*+1aP_ zANBs_`ET8SLP>aB)AZSU&$4_;Y;v6Wc76VD!~4Ixk|E?jiVg!tSfqYr!hZ^ok6(YJ z_gr|u$RpiC>7lmX0zZClZ@$h@R+TbR8O3C91J>O^Mv28KyN0Yo`v+EMnPFo^U-rRi z@?Gu+J?+E;M;kG8jfv46Wx}VYSkB4IlAQ3U zen~p3jtc0SzcSWeo~rK%BiI3dj#sZ8+yB)1i&kuV`9O) zm^X>#oG^)qG)i z&koS^jKE#V&grsq$MadwD29us(qEOGA07j97o(6Jl{I`05cFeUD;Q`=6R`Z07L7TN zz>;}3il8i*E)$X;3dkq+!kO7!NL;rg;cslFc|A1U`iVQ;oOY+27&Zz^P#rKZ5I`6q zTX=j@SXpZXf1~3NUVo@WKfaK!Slq++8wl?W{beB9^bo(Ttx)#Mv+$m$eu}3!;hP6A zudT;Vi#d-dM%uCpM)>->>#f>P>7M5y9LbMwsiftz_~uX?Au`I;%OknuO!}EoW3npTnFV)pcFmk^D+j zJG%~ccWN;$M{n&ctF``X{lnHoo%f$S^$p@p$d>C>EDLuGd%UI>FXm-Z*BN= zO2e6Er&Du3)lZl1m(l$cETsiyFlJyts>dE&>C$H&3zdi0t@rAP%39}{RGCUIu#-r* z!R^7T7(=32BDXUxW*>UBUFS_nqswy_oG~pm&&+mGvQ8UXLO&CjPFlAz2@HNKYBZVI~H?nJ=G>W zVN0HPjoTs$ocr#B#xnNRfsJJIk)zNoz;ENUKi`H9jnQ5o%2WdNh)eP9*LE$R;1 zDcNfJM+2XTetxIpmw+6D3k6h8L@rmB;nFd#*`;+e`(KJ_oI2% z_qC@%)BK!79OIB(#o~$*P(}#h{xuR>*$QosLIWyhBtAkr_ZSI(Mq9A(N*l3C_ZIL+ zdS9)U9%&mec{(xxU|ro-@YJujsPGWKH>>9_N7~wYp7K!xLKO&732>sWYUuQXAF}{v z%0i%^k-Sa<4xz1=dJ%VN9Sc2OXtjjBt_k=#iR@7 z%qw=?^;dIGgRXjINMy?#vyQx&H_Wc+u;^Xf4UXy$6q<)*W?n^J*D>xKi&cy%jfTRE zp-jWLpsj@N?sg*G)>^NURsOqVtHbyq>sedr=XDM(*Q<<>9$Fo6g5%%Kuu8vOkQp#W znDqgKSwC}1t4@qz9Ic+w7qQBE)_!`cVH~5L*0`HckuQk0{Hy}{M zi5SP@`eYWX4BT)8jv3l-RT+pk`NfoGI^G|Pykt6=7Sc+*Y#c)=5o8Q)cJrwvIXyzm z2Aq)W-;);F)Y*MIqt4x?(C)iZ(1TMtkMxlI5D36F4T4F60|*xWr}-P;etluxcSjF6 zU75~rjjx@1Jf*O-v88YC^wH4pukZivW0vJqvwrOrgyBm}v(^gCvNj%^{paqpFTWeu zxBD|e&w!&>a!UQtrui61E{&_hp;J%4XDA;C=_>z8fw>0IEeq{(@1UQ-v(caJqTyId z8_F&18ycje<&KuZoROU3l7g#ut`#4-Dp%jhyP9{WD5pRfDhN$UTNWIek`j`Vc)-iW z#nZ*J#lFPS(cZG5WIWn3+Ook?W>8R2pbQ!A>`X{V3Jpz32n`OUKoauz^z?GY18_Db zav@h-eSFCHKK6;x2R`}1cW=hmvk}kChK4Spbc1|GXnSx zb9G^pw~RlyW~F+3V|-DBqAohJ@tBqg6j_sb%Bc<%q3lGHIzlFjXqc|)sS5(JX#Ia0 z-!Utl^l$!Vv+?}d=E|$z|1C7(Z9;dyngPnnv}moA&pm_s6|H+xffK z$pXyYNC@K@GVxt$fkI2~{A3FJ*_16gnEExNR-f@+% z%t?%^Hr4An8Rc2#tt?oay8K>7Q>d%aH`gu0zBy8`&AL-#vL%dA)l+ubRe(bIf9=#b~Bx(z^8Jmwc;mgbPdc8LTZ&?|#=YY4**0r_X z-eBT`x_=T=%4b8^lX8yZ*vZW>8+RP@Ml;HoPYRl_Ol79xavws`6&=@J5tWo{4ZvaM z?Z}B@`=k}xl(A2yT@yz);pvGnxpz__q4*MaaD*=K1F!lbY4bm9&hua$J|5lJas>ok zzb>;eXTCy9#;qSH0#{f5v?e}ZH($5gjHrhAb-~jf#_#De{kdVIjsCTs$HD|5vlHg6 zExB@N+hM(%0qJ_5I!g6@LcSlko!;{IWi3S;9R~i(D=}rnBIR1??#~jxA29y;r;ANK zN$5Fk)Vz91))Ozg0;MaPXG^b%Tf?nfr(h4o#d4U&I6PB(yn0HkjO=q z(-Z1>amLr{LS5EMpO}V<*zvw@m`?bDVq*EYl3C{JBJ%WfX|o^YIa*?FmReetkeQiN zjBkEudrELpXE3H`aBxC!VnQgUYFKD{C}vr3C!!%sdrQn|X#*bAz$}Y~Q50OgiaDgf42Qffn~d+|qPeIq?D}B%&G@>EVp4(tn9oQK?Mw&`4Gm352@UNO zbPXNwaut#H=E~uf+hDHUk>@j#f%MN>J?nj@9Mp|(AHzL)w9(#xLDmU-sF!P95CteSp^F~Gwe1{_scBHW_~2d zQRXOC1h5_BC<3 z`gXT5jv(XvU*6vVzuNZY+x_?a!`AVqDP^NC&Ux6JvpHuwRek%)ME!^0Ze041J$(7A z;jfeet&U5_wn=JWY*Q@9!@2dvxpPt+EIn5?*>VmSWK%CGFH@T|91l&|=x^h3c-eeM z=WX4Zp>25G*59O0ZO%EodJ!{z4i|9n?N6?WaZKKA_3cZ_D?{5r)!Kx29Wx&H;e-;J zF+RWyFeckBKi4owGSaVrGQSf-S9TztK zyYU^1cYV3o#GI|~(aOmGRVaKns3CCVwBjSlnv`pYF6Up|eYW#zY{-JGs& zh4dGUnkhrQ!*-IKMGI*6AI_v#PVX}^wIm(y#cQ*1hUPb!X}4ZS(s0V(h6>w!7LGQh zZcg#0^(HN7i_bgyZVi{PsaB{NMkuB@Is?+Hp*@2UXz{DYq%FSkWN-{#v1CLh(IL|J8E`SGx6v zlE~=?L5kqrq|T14T|Pc+ANE3a?r2LxdAZCqU+=L+N~+uN-!7!`^xpm3q~Kl3hn+jN za3R|VpAKmml$P6H{G5Drcn)fzSNzVVbhZx%YuS}0s)B4F8_t97LAO7)2@~^$I zqN}VG?|&Bbr_at;GMbELOB>Ii2^b0kZ|Eo(|2VU1tD7WJm8mKwt2f7H6lCX!v%HNv zyLMjaNL+2}(j1yQoe-Y$`lVGGzs=nS-3FJGuA`~~MX-oxDQ^#rx3tR%rq((XE#LXh zjEn1%sx0NX>ZBR=IR#1aJX=Ph$b$#X{~%XO15LM{>)J(O!7e-tcmuF4#lm2LD}GVh zYc@IR_QZ^-(ws}}c!*ujH)M-w7vXVQ{^XB7wTAO)+seLqvp&;ZE$yxa$MY--lrG+z zuTSv`Exu;!8k(=^Hn^g>=HL~aQ*XJ+v(=yDGTMs2qHro&Wb*Cfr)yNbY6_Q^9*c^-e0B zswwl11!}TCeG-S-S1%jdAF}_W%_UNam&D}06+ca3D4{3Q#wjaWS})D(;OahTVhfE_ zrK(xU#-KScR5~2!oC*WdWwd{XZvY&2U8Nh!;l#w-+Hxa;l#wl-g|_sP2x8=It+4S4 zsOqQ%HJERA5mirAL&1!>V6l;Sm7t=}(YHA$JvfenUK@bQf$%)wjYDxmahjp{#Q0cD z0}=`~Ugy|qO1Er@-Xc_XuQKX3X;vzI5HAr7dCU=DGsy)St}NgoU>ZYm6X4Cz4GiEQ zr9qNXMn?wJ7h$R=>GSV4&}KxkdyXfxPqngp74tw$jDSkaK0r}etFmijex%;>Y`xKV zqXkDO|2nzZz+aoU1hDahWd1(&`8)a-wu{fc8DEm3*jl7dAT^OtI7nvUe~n4&i!e5r zj&JHDxzXFfoWm-kn@cODl@3UfbmC}WZGKa@B24MMb?Ha(@u5Bl!9wCcBIHe(UO~!c zC3u{O{ZJ}nYyY@L6wGdeFiG9R-RM>?eTqOXsSZ@~)5ZHEdv# zxUnK#wix~U73kD|2s9Jb<$Wmih7&H>DK5bkr8!Xlm~ov))npjcl<7$6Af3$N6q1-|+jbK(uHXwYgGbGl z&vDPB+p$2)QCMpi1ftg)q+G)>#ZccHN7V;JUzC@wg=tyLDEz29yLB-g&_|4^U+oaJ zfQLGgV(6{3dWtJ{+qJ-o559f2Wo4E(JQ<)sTQSROc~X{YZhvfH!4k` z3mm8d-g(dSF0G}7reu5{`-B^_fM3C&{WX75LlP03nl=eYrsH0E<|E2SkaDfeE2$e@ zi&v1}IHIfmiB0*E;w8lpTNnXCt>H`?N`|rnUxQxY!D5U&5se^C#DH`d8Xl7u%(m#T zP!h1&fWm+dTPtT!$3!`lynU*=o!r}Qfnkot{3EMkZNL?}2bwYxNt3XK9jlpWzG^cB zttMksGBiM&3E5z4MvH+CQB;8d{AutF=Z2oSMdakq{TvHgj|O5Z|1G{^vz%W}07w#! zz${is#i3m^FUi5wCf|hO@HRI?(iN?ui`7?|nL7E*Y*8z?o|!k4?zvz~Ev09K8Mu*= z1sGY{C#KS6#>QsHAreTINbPbNBlzo3=8eXioHRCvS3^b!W&IQ2W3U}Z&wFrK=|F?D z3opoMfOgWnnR)c)Db3o2dAzbRU1njHO;cuJfnB17g~;+s(4a~OCYDE~WV6AnJlz05 zMds{)QGynEGYBHochpp;FG7kH*Qk$}Y&4V8x17<|HtO>UGQOh=&zjo(YyDS=f8V}b zkiS}VeZi+Izjf$1a2|%IikPH2X_;?YL4VW9?Y$*PT1l2`k5*Oe{`t^cz_$XqYbe@N zeW-=Qn+I-qzy5mQpz6?+0W`ff`J^bfPbm7MuW%eV4jQO7Y*$~iC7H6YUwDr+pK9Aq zp^j-j+R%~F$xoNRn53Kq{u97lr(th^*T+!CFh7-o2qv2oBO(GmU-=k^qg(R0ss`Y1 zN9{Zh)TzJT;!nO0*8ih_;dNns1WiWBZbmLgb=gfvY_iy{OH;NHo<&dqCCiBp(` zRX0d@+J)apU_de1%+W@XlDryoHrvhTh=|bfR5K1|$_yv4)DWinKi(W!FrYKr;-Uss z9WMyRIvL*|ea;%P8n^RD-^^d!7l<|pr8^c7cvxU>RQAm||SLU;^L z7*)}3p;&5-n_#CWuaf2u&pC$j&r)j~(m@CuQbIZmYFI_LtFp-v%*IA0`TIP7^m5lV z60tPs(gvKLFsYC*x3q_r?{6+JBAI9e71 zG8>o~dNTuaEkwcNF{8NRKPd{fE14xSYPZSd7_tFAj8h*s%mP)|n7&szQ#rdE5B*s6 zal`bXZ3Qbw8f&n08659nEiICRnu0b4hqO-8(i=w~r=r5qPeu+lnY*)j_$cpGSRB19P0Ly_v_k`K zZV#j#(4K3#Xn#0L<)_e9R%F$~3Thsdk#XW&ln3Tft3E}2d zXw@%YBG^b&P@$t(Oo&<kqI-P0Lhe>M;hcGJ!2DDKyTpq!l8oYu+UsDsQ_l+e&rP-^g5fb^n zAX$txlE_zxx;r^(85SUU44N(GKF96R142^zLtU_~hyS#-oV|ay!MEt}V%*NE%P%#m(D{jh1Ah@J=NiPJidO1$X&ikAwVv5B_+>03a z6<;wpGRS|}G;!Up7YbR&P^#DZAEAHW82j%(`I8;{jxR%MLIhRdhV@wH>qUa-#XTtc z+jnV7yTp*(GG9_Owz#ona>GFMu($(9*&H9rg2D=d?jN9s)hV7NDk}|P(fL+lZMO7H=2+KBZXX?fd)k5s5h45S^NtY{Ha9SW}5mxU1M2VeN zgFd!gG@K&5y7L^O{MPP7eUj+!Fi+oLL#5K?4AgO}svyKUcTRsaR88mZ%=>ZYsehLN zzr`wfv_P*{PHYRm*8aY(z`oX&zODeJVjkDejLY}sj^MvJ*Uy}H1itc#l9ZMc%b}y= zP5#SkO>OCI#zEJ)&GWcQ4tH}DTd%;;I5 z_Y%rgR1Ec_ph^8oX6tb4@-|zCj@t8a_0dDwwGCT-iP7I zj5}6^>5Voy#NePTf<{%~_p~<)soB6vq-S}(=|gV!$?j1^a7;@y=VgR~r>*Ypft>IL&gNtBGp_`c#d zYUl5}Z^oA#1i+!ozU%n1@d`502S*-45(!hcv`gkoWazLCAp7aYe71j#xmzKCh#qc! zco{bVgV2;dwl!y4V-Krk9bnbu<_&|-v&*gu6E=4ky&h!3vI$+9Isn#5qW}jjLohe{JfLm78G^;aY-lm`uZI!?z<+4vY_Vyz z(S*dVZ5E(c`4<6JYO+p;yv>jPJEe|2wcIU@V#sH161_+jT2KtBbQOJ!VVu`7?7^_f zH0tOO9-eC4P0N~^Y1G2ZNX-m%GhoPB(1Q)k$93_s?8$BWVfB z+b5`pNQW}?E(PYIgkmS1#O-GnwE8KQL2*Gu9&?~4nr zOtJ9e#;}k5l*>wIN)HKrDO!teHy~UM8c)onnF2%M6s0juj8>_Y*dl1Uj46G0iUP5+a)yqgo+mQ5ge-TuR@ECKiB8^s&(O->ilWblPa|HU6Y~M9~*?4SX z^7Q1=yT_BU?;Y>|71mZ2KoOpfLW3-?7gCPJ_<`mmA&6+VO0x4F+qXM;cd`?O9T#Ge zJ(Cl!7GFQBomr1dyT-9$cBcN2d>kmCR)6xbPb8VY(evNTUy84M|M)vLzmgqTj6FE( zl45XiHIyXDek=t()o@RGY(qa(EPDpKr%8_v6+z_k59nD!c(U}d1_YJj(Uu^jOEZsZ9w_%7wrMjnea->lPirkK)&RRfzG;4+T4;TI3m?Z9GbwS0XGMRmhZqF z&r_mV^mNj5e*lkhg2xEp(E>QuvoaX%mbyI-dfl{vqUsab8FAPXW9?#W)5o(jwq&13 zmRT4ZECB~4dN05W?3o-kL3rEhWU}(QEy89cN>nw8HJfMcw+aXx3kc*y05S#;0l*gU zv8}~w9a`UP9a90A@fupTjSYu`1Y9^zeQLo!=;Fc96Abc0eYoZH)i~Unt&9!_P zFl#DVvN?DPLdd}*0EGZ1-fWv>wuu3y!X-x^X^x+g^Yb^=t}%LUa9= zxMXSlg8Y@=+SD)jitONIeb)Fom`;H{;oJ2Y8}{h=+Vk6`_1L%nI^3`&rnU7(>&+PK zxZl3s%>O!sy}k832@rm$$tHXHDr0|5xA9)(=jtvnzW7=qSQluc2#Vj=KyHh8=|CFG z4DzW3aFE9xJ$uHSQJmtpG9ktTSz)D>8d^Hp2&lEHK}0hM>{2SqKzoT0tTor59!F%e zn4rpPiKmbXx`|Zx7zA_Cv@`&n*>=dEL33Aj_)jn@V6E53{*!()@qeAghC=6$TKa|R z_$GJiDUpfQEx3vp3XwH;&r_v2$9`|u(rS8cPdGYMjJl*>EDSIBu&Oi4`Dxb|ytYe< z)MCtK`EQ#*YAfQHEsQVtu-X~wvJ00Ij={A%V`+RsS%YjW9(b*9Z2F}Hp#H;T*f>4(#Og)w=e1=In zM#5=Gy+$LX6+)F&D>aMIHcS1AvztP^j2lVxoYzYfT``T1rB=K@PR93%58`K?UD2^` z=5GZ?q#_y1K(ezIOD2M~SS!9DeA(WS-o?VYgH-Lp1Q{+JSXK9O=3haCIpi@xH1HHqBour;jvX6D0>Ckk&zs|b@!fAkMC9G+ zee8+m|Bg@oGj9Ga_~&;IduA0lneqCbwzO5l^kTO;!mggIM%B}n!6t)pCb5;A{j`>s-|TC1FB8;`R3I7`>Do-?QUkTN9Xw zeq{Y7-;T(Jj(6495f}LpbzdybsHBjk)_GrrAoch?=`RS6QeO@Tu`**}-G?hU>h&?n z&f1X}^sge&Z#7m+BsAw5F^*XL|M2Qv@+H7wV8NW}ILAVY0rBsihR1648J_r*>67(& z$(!*_o<58bB^8BNi+d2AVVHVJg4C^VgBXpxDS>u0p)1~z46X4ZHZ}sj_IH5w?+R61+Xtizssaz-`Ve|#2wA#FPo~-Tb~2t+@F_awOYwyP zrG2v`ddz-QB6swn=*VTQGV7-KDIR3dl4*7pdOMzcv76d0q=~;!3TO&EPB4Ok|#%xvT8D}5#`(s35zVHKf+V^SoH+9-{ ztC_8zuQGG2wAJC#7D6p?6pzaQ(9`0DNcTaV)y=>8sALeUhuY4p!m ztBlEuvBY0MNk3zNax<=_>FL(em=Y6obAvs&XxP9__rM7ApovA8{wkWxiH&dy{6QL9 zzndnN2x$Vt3p$xPlp7whQpMIrTj|oQAt7Qd^Ks#Hj_#1qjwRE9T))0F1N?o!#|m?( z`76=24glIvVlI^k(7G-pZRt#jk!nrpqfI7+>bci8dI z@FSx~>wf8oIzz_y3|qgv_S>X@9TxL5l5A&5bGzxgTy>!FP1?%Ld3XGjzT7>2Ep1Ku zc-IdbZp;0gAUXM^v*0OKVrRj~-C}q}^>*uC>x1pn!KABFv&to&&tRgoS;n5EGo zA;@$5#sJ4*m2jr`q`KGpIzOr<^@ZrS`>$3Qc5rbM4u05l9r@Hm?@wyrCP;Xe%9l>r zd9f!jJk?~aNg%3xner-PCZL>=gDJRiQVU3weA52I&Pd$|@77J@Fe4N?FJpe7oKvR@7 zwA8I%ykYAJ)q>cU%V+3Bofn*OGCvDgRael3N{&f@*`SLK*}zkmc=kGm}1CXKC`3sp^A0&K`ghEC6g(J8qe&#>SaV+4vfvr?99tVdZ7+TaS=5~6fDglsS5$OJ$y5b=|;CT zM8?fXEj=>k?xdD-Y{npIXXBX-V*LrI$9a}?GmV*hmFtvIV0<>#Mx2sLiSV2mFQ^6b za-&*KXp}EF3tkeVyEYYUx-i@xG6}PU#>gp#FLac+uegS_lu2GIl6v$=8BvtB8DwPURHZ4FlWlP{bjEX%nT%`dU4W)z;he7wL82Z(73U2Ub|h! zES#JL7m#{d;8}8bE@3MNr_5Bks@sv>mLORc27Uwdz3hdX%Lxkkukym}TsG2OIVgcv zz}}bRw)R{$-h$GVjE;;bdf?fO+U-gJ$_p;x-CIx_xBVzDSt4SH@f4RZm>UHw#Tt2u zGZJk4c{!VK;LIP9S&M=lUXt7 zNZ=0w6rflmW61H*%wevbsrx~}oI~6^9i6NDXg^3o>_J|G3qAZ>ew?LsYiZdZVTkI% z5nT=Z=IfqCsng_jg+GeJ|KwRe;T?H(JWSZ4-Yu_*`)ZB*FCChwFaG`h%M~9Pec{qD zK=HelHsaR^&}dKiiY?~#`6-;^9w%lsp+A>2(Si)Y@_@Z=b0$$B>p|xh)U-n+QH*6A zx=kj;#|~%On&s;!CWktqRX;hfBaXEEwsh^;9oLma)WbT{a24&p$#M6FopuY-QZfK* z1II?Jdi`kw;aF%%6r{;(>?xMZ>Pw1IAkFrVOYhqMiF35q1C=7RbP__d`G5GmdfJO{ z7T__$WE??&rE}-VimW6H^fMBB^*#f(MVuI3@4sbaeHOosuYgN;iaQZ{=j)FXrbaS+ z6`Tk?1}_Diimck|>k#(F3;w}gd>!%E1Q;6~$>Y&KZ0N^>e*pJ`;8}d-PA0^>g+%$8 z<#6NtVYqTMqT@Ga%`e1u(wKz3&{SN}h;?{%G3-AM1E%^4jG2sp^@kJc6NoyZjxDT{ z(06#q<4?Hi{pk}GMnpm(;oK(OSp^Ndj=V!xbsG(p?(8_FyKxVvoD~AsDAgXT1f|j@ zK6>X(c#p6shv)NY>ko0h-Pl~Y&3I)@nh|@W!52&!mk5$4+-^5b-9q~oTGzPA_W6R2 zQ=IwKM$z?b8e*IIO#=oVFfKy%@R?TbOXs&u%-5U$1-dBj%y>j;^A=5C@esVwIt?1oQ}V zJn%D4y?!q@6Y2TOZ8&Pq3`pBZ9YW&g)bK-s8oxzIY_9z7>d|sfvv6&kOYoUH&ycGMQ?sS_KUxJ3K8+wxk!Rrz-qysvWP=z9*L$ zEesXj>h}n)8Fb)`GAHgv#KgE+LH~TOg05O3q~nB`SdOq;BZF=dB5iywo?&wi5f3(GDzib4wWjRFtn5x!V9rLtg+FuI7Vm8>d!RRS#Vy1{?H;f? zYIpeYvfzSpCfToSay2{`W9(u0u`z0wXAfV%uD)$QsiPa?P_j>wdO8a3%a60CP!@8|nnwO+t?iHfPxg$;gJ? zd0_?KEAzcIcYtBv_TbFt8H4tY_k{gfh#!P1XfhCJH)eq6vo+mx|A|&z13v{*BKM^X z_q);3V0eKG*M6TPJQz#p4oFMUq39k6Ru|`JJ^7t7rr{CiBgmC z1=jrY7At+TD?MqzFi`n@sN!oY!39pfY*dS!gwMrPg(6I}V+Qz*=$C#<+M`MR_5oqO zb_lbDwO~r3dL4_h4k2Knc*OU^K~D~$mb>T(disAp^i22Kk*D=X9?kTtKZK@Fz8{+W z^+TP|8t;~-CM6(JPKIJekn&+ub)v6N6cNlAtA4?g(T; z`ThhCAto0sKP3P5cmP#k02)isow(!=E!xC?<=4>v5;>C=a_-nydUsq4Uq90TwkEDf zd@{a&@E?-+`JeS))P~c6BzKVt5@42N`#@QpOh%qX2$U#1iI3g`Q!^i@fKpBrY#*>8oz`8Qbx=iZ5<3Oxp*@OUe9Br1QDBNI7 zmud^aU<0A-)DX7T2s$lB?PFm2>_uyMkO~CMDq9AwtF8=zHG~fDjJ=9AFX3{em9%w0 zjUR=hkoEkK?JxPipx(bov-aFe2d_%ySag-D!6t0U^pGf1>wKL?Wq&^Nz&u-9vl>yG zZN3pGTef!16lH5`-EE{5!lcqXZ?v{-Aq^7bWFNO(Dl3iKBJ;J6uX|@)_JxfM)Mjvv zM-tE0f{PrJ`1$b22z7rr^C@2*ETGdr>7;L`WzdEU6^dE+LB!zQ&{7#k^@G}pFXAEQ3U`GQYP zMX2d^tzn}^EY`XfWfu`x*bk61{?*3-DMsygm9eR)jUp)WyaoBDTkkC(%Zy!Zcf*A&EHa zV3IGcCYepM8*viePo?}?2CGR(4Mduc8Lv#uYU!+SuZ57On1ZxD5akAfQ=*@`YY6Sv za|QuUqw1u-D=wZk3|F(%A}7jOJPr6I{!YLfKrjd3zXxQwXV0MX2>Rl7zk9U2q0`?T z_GdU?cqA3g+d$}Hmv)D=SdS>tZ4Ha=Y2>3Km_1H-$x8(7z{#9AVJM%DE?&4auS@=7 zD1HJDg)GN2JUt*01l50FCX(^}oqtwaKlf^gBLQY zx&rO1Ja^!f5N8ms;TVY^s{|e&`f}u%ubAloF`~Cb!9r&_L^dlI3NQvd;oXvwRQr^|m#1Uds-Wl(_JXp^z_%RjyjRoAgJ6B)n z8ZiThv-~(N)Z9c@TO(VN=Vi>wJm^#>ox8zKQP(*~0FC;d6G1Ie1(2#o2kP8)iW3`m z%~^MrCnbvWAe{-;4N7qyq5h3y&9NcC#7ebP3YMJ_Vc8u^G#Z!ArRGrV;YeOIT2%%+ z0c#9OyOwl>^B_yFK0zQrLOz){%3f#k*-?0kMN(g*m z+8d5&*x(!XNLQ;4OG(WPJZz zbk-30(LechrOSr^%N&>qNI<*!B1YKOZkIiLQJj1Fz2KvPZY0$gTx$1VpB8P!mW=*A zUCE09zAWe|g3wUxBc14Cp281~XyZ ziqHt7s^!e*Tg(GI#)WwWEPL8LIGV1b&r$0fSxviKweoF{rA>u+Hv;b3?;_-@uTU-O z3V5g*!C6CpoC~FIH$u2r{}*#_0@dWXwvC2B7y>8=2>~YrMIccLAd^&4Au2|hW1w0? zn3dK6hth2eVGWRnYEYk(}#^t-E{w-&tqv zwa&NJ4;n&-HzCjaO!qbAp~o~jY}qScfU@9j2CyJ>RDuw3ej&P z&;;&dbef%oS1!6nWDPSNNv#q%(I9sLu<0Ew1(X0f6*LV5 z(7=`$NHq4;DmCZ~VZ#_+8e3j;>{o>C1)$+M`0k!>FgS#c$4mvO%2iT_?Vpecc^VLS z37@n5;c!_8v&zyZIUCAKg7#joy8gSxeG*H0hTug~Gf?XCR~5oZt8W#&cdX)*kh7tL zKRljpg1*g1G;~(xsi#}Bj^ii#@bRsJKz7X?sXkH&gF}Lj5VQZuK_zIV;g{CH|F(I6 zYKJlgkfLp1>7d+PF*OgjV)!MwuMiFc@7dvYqUQpt!6ZpY2Al{O<~Afi0v_CqO0ydY z3%bX+;91d?(C`%ieBtA5WKeF&o^e2-2emI zcE=O|20XmwpFO{A`Dg#3{`JvduMxZrMT`*?C-N5({O6rqgj6xwkts&-`YTvnA!G7a zBINxZ47~Hs`SExuocY?AYzd}9#x__i@-_?JO&lq#> zk;)0Fj=Za$oNmNuj5-RMnxOrEmaskL;MtKE0N^zaUIJO+kvGe=!79n9F0i%TQca*X zZ8K3RVQ!}y0F;kq)Qqfou)wK@6CwHGi||{ZNTgtmH5XP=pzKHhJ7LgHQeE5PiCa*j z##la23d4&f^`>#ahNxzU2spFGQORK%T`1M!n9f-p@L@BQVSwRTfQHFEte>JHy8BezmQKwv%&QaCx4mow5J3*TL$FABk3^3OrwOP2 zR7B3a9fD_YTWRUOF-!&qHw+AT5f6aN(*RB2?d?Ve6;h~hFTRmrMcNnsu{uo3&VP;1 za-4Kz>>vvP0$zBK1LBQ)CJKRO;9gApIB7MNS4M@XI1WTWD~}Zv-c_e&*QW%@_Jmw| z^DsEJm1>SH9UR*wC7VIH1v?h8W?nG3s+(Pe^azrJrOsR9Xpx*WGZL8wED+Ejz`+kA|ME+SI8%)(sCkb+VZf zZv>tO718nip%pH%?<7Lm+Sdy3OFh4PvXeUJtP!u>-*9^UIHn6v(t20&)v(iW3u#~A zzlN+y37@^MKf~<{Cj%M(!A=-fkcTedKF(GXi`1k8*~4)_+4&HOV4+us6LS=Ay>0gw z1m_8!_Qgbdy(^8D9HXY_$;1z6>Ga~_EU{> zZp?Oc_WCvNvB|~mFPt1+*mg!Z-em68WoqgZ6k6L%vSe^J(h$-m+&#$|TGr-AO=By= zTdlNzHDQ_ZMS<=Tt-)p1#cp;fDJ356BGbCktI&xU$ycMG$I(XsE-H$^u7?#3G+Bec zg}-iLfYht-U9qk>OE9Ag`%v2q+s1(eMyAk6ov-oIHP6seBOy`t z*ZAxQ!c-RW!tF8v#$!lLlBHnB3jAGF1i48jhF2JoJpzAO&1t3$w*vCRLxIl|o7mY! znL>CLLPrKuSboAap=p~jFXdF)7g*ey&O=1#t&dlme#~pUwdpO7e1F48A_T~{f%XYn z%#*b(slktoG$k9xYd=3ee@ETzyI%@|$7}B%6){iVW-JKakS?;{QBxasQLy~gce{>E zpBuK!Y^Lcye!TYfkWfz0m2+d6qeJPSZ*Pp1vbCK@mC1BP{v?f16ezzyW27saANbe9 z#oJP?9$HDsSe^6n%y!+pkIe_t#OL<&Vx7ez?v{pG<`RjFWu9Cw%SJu8iwJ&JQVyf} zh!!a&g=h@-OZAWm)6>&+_)O2wtES1D#5tx$S9@}b%bJXHWDq(LwbKeE6iEg~CSAB| zPma++wM%%NsT{?Y%Cv-K$dQ5i$x)mzJI*06AbTY?SFj?K>U$(C(xit@p^r2oEj zmFn3}O8E5s{)i)f^dMkcKROi7-Z=vxzF;_nYt(h=k+;;wu``z?9}z0+jCI+*60e>3 z0}4WU1wz|O3)O2k43b#jY=cdDK&8d2HM-~hvh{s}0{4HsdeugDXl=L|-EOt7pjdYUd)O4&a11nIQ#GY z_isb=|8UitGU=vF(IH@ZRXFZbwrg`85mn_Is!>IWd0Ce-=*uq+FA3kWCw)(}gT72L z7odVBE4g`us-aH_RG7-O94rsH7-6shLqC%#Zl6^($H6@FX8(k1B14YQ*ZG*-dPer5 z#74)-mWd6JB6KbE?q5xu%<@n%dlTtZ{*$9r7{yIYjWl!p!kGiwuz`My*E-mSK<|r#L;K&m2(6DLDoEuVmhB&>KjR2bU4DUVOD8t$K=R65eZ{fcwU=VclA> zymshh^}=eykT2&)t%f1{1S;&Dg>Lqxawax?CMDF(PLL7hOE*iy`00gO)JtD_uu46viAb}41;tJ8bE(PSYv`) zFd7H3pQ%yPVtSK4C4){!mt2ydwTz7)5i~7l5fp{a+aj`KU1Sy6Ja_}3x0)1taNEw9 zyJ3uNT7j3WEA44AkeN2=b;-~XaR7g41CiwP?$IeE$w_3S?x7;|_+f=zC^sd=+?|`U zp9yuc?CRAVQ&~?>ZeyLhQwK0T# z-~7G48?Wc5_D_dN=(}pd2QL1gv?1omr&9CGNyW}81@p>3G`@#{vsWYh)*DqMmN5-J zqS)XNUG6LW1#8dLFc6!BEfHp(tEb@!EJ*@!ZWBj%5)>*RU);F8wymXdR60bUd%#{Y z!!}*5VBx@;(jWkIOxTcICgK&M7naYsh8X~5kP%Zzj*8q~TR195>|VS3h#Q*Ckb~wO z%;qTPHCI}dmzFmmc$#ES*W4|=?Xacrrqg4`yll!Nh3A!s(^Nf>A{{JCLk6rC=<^j z8ziO{$j|SDqojs)8HYiH1!`FW@A zPqU2olPshtOdjwp*$>k|JE82-NYY9g zbzNd;h5kF?ul3b@Eh?idp*&f%&EBT`iwa!$q`}Y7$Re3*;$y-SH_vuf3`qtpI+@yr z{GlO-UW?Qzk*$31R<4x4)FPD?wSd3fvwB5lb+%^46ye$@Q@Q$@wB1}OSGrKjj|dgX z2OA!4DI98NwNG()&_chWpTmmDU{&)Pu4Y!pWmfwpuDgtgl}xEcC6 z8#xgMLg9*~w#Rg({L2bzvT(A2Ie)Url&;B~%+7fiW8y3SFm~uqrcIu8;2BS-mlXeH z<9AMx|I_@-rn#Mo(Be^N9UQw_nO(e(d&1hHcT3xuqWv;k`*=y8jyQ8>tlhvdEcm5I zT9Rv*rViBz19NTf)bzw<`-XYV+8;Tg_nZAu)U4<5!?N6o=pjm{LA}UP;n>`uhjMMks3}!pSrk0jFZ(Rc|DL#<4yy4pRRfk*hZ4F;S4(f zoqLTb?6nc3MEdW~F5l|gQZ`w<&!DlWzBdoI!HhsJ&G;@Pox zg0@JvXzzwW3jJ8{Bj^^s+`Ys|!-xX1UD)Y0!6yO&cT;hB{5Uuq7RMdjCOE#Bz|j~G zB_7!GGN&vte3A3=CmgF71IeI{Wbh+{qZC-O|L#AHW00XE8A$#BZwQ+h0$CIWJJ`}E zP;@9Q$#XuuTV~2Z(wCiFU^Ztq;)7IUq_FAMRdE^xF(p{kL-L7Eme1;@37}qkb|rcV zAjrX>shU|$Kbh#rJhw%AON5?Ryhe2L09k+qL_kW+BZuFWV z@yei4N6A(4ATK+isd{|&plaGsPQrcz{u>%z6iOXwA1#ck9E~KUqB3~~Hg+_V;wd?T zNHtt4#H-&YK>beqop5{r#Nyec7||gFCMUbSS9nVlo1(ZOdS2kyE6e+Hf; zQlTPPhk}fG;WXehBK(QKWO0KI>ltoHOvQ7atMCcP!CVDQJ2Ew|lk(Wn++YVLOFRw_ z|8xO9BM!K%k99N~$r}Kc{AvxdFA;$WH0f9{@FM_+r(U}SR8Gc^$?rR{y8cX|%(SN@ zQ<~ciqO`82bp9GKBi5L5bZY*N z#ItRTz9ppi>(*y6XRhtiSFL+6M@PfKu#e(zVE%pbOvU9vSyN=!+D9F-qdxnbOwx*_ z{5IBa%8TFGKUcyfJgN>>hg8zR45{-&VuzCoc^L zC+dMsQa+Ev+_Ue5eaGm{yJyQ*pXk zU)NcV`Mml_f2;nt4KCZi8^8F$j=9fqE(~|*InBExUA*JKFFVfp8*i;&ZmeO(*(35V zSZs9ED))#=VWqH2rXJ*GIiTPX)j;t%w^eaWPq(dpQFG@gVQrS3SvpI-&tsr_VYax>v2*_8>uR$SEO zlY57{^GITJX66apI@Z^b)zJ;3*36teaBI!Y$?WJ;_9;8Ma3vGxJMoAVz@IfL(ucI~ zi0dQTT*@!;@7h;Ep+v4H_u<|Y;(}@A*kT-NF3#-h=*rFPfH^EoX@yqsp|!GRTXBqw z&8H*0Gl#fN`b6@0_yNC4AjdpMWaLNW9}aB&_x629r2F%7j%mtT*+WbYG|2di}hZi3i zcG&qe8vnM%(Pa9u-%dO|6nNI}w-adRaly`_L-d`wut_TWq{O`Qh`u?S}1unB*P z1m7I@Ld3NYcL5CdN$_5iPe-@|F%2N?JLuvf?K+q}PrC2 zK+GLSAr~MedpF?Wq_Q7HAoG8ee--TpY;7CL>_QdxG$`^Cg~9gs?TdN_=7B3Ko6kN> zy%Jrpr{vW(p-#z?UBao8D{n>z%@9qA4CjjYk)jz;eW$qZQB;bC6_`(jI<;4Ya~Q#~ zb*3nLNwnH&R3-)GECoVFgnrHsNd;YBgOa{qWN-$7(`8^O3{)fLlVr{#?gfSzbV2DA zY<+}~F)(!iArYhkvIvy%@r?6$4*Urs=}53ZhHfCO=m2M;T7CDY2#>%B1h?8fDncQc zNcAAld%R;vjs1f{AXr3{4j=>E%jb@+oRHh6a$VYe0}O&I*pjLB-6O6)=n#J0dw%;n zS^&VYM_P!Xi%rbKpq9e5i9@_log|b)Cn4gVd%+0 zs}%uX1P_-8ortZ2bOF&XjX|#Zd;}sXD9pjteRPGu|D97ukKBP(-65idBPQqrWWFXo z4})7&U7F*w7+u}~mS`o)3_A>VthcjU}la|4>Y_IX; zdC)RakK0sLMFeEH920KmFq!)N`ra=2nYuG_XLvFPa*x7?dAFL61&fZ+-8!n#3(_K~ zbgJRa-@l!+@EgD5H_tW>q*rc)oiVY&&$>dCX=C^DoT6bd1taBy!xxtM79Vuk)=lt( zJJ1$CZ1>zL`zH^&m*?ahg(KA9SrBN>X4J>P2@4|5vsJM@u$_V@f~p8^WXf)IR@fkj z`g_&Z%NDnH_dx7Sb- zPk@aPWHsv^cIt>Gc?Ul0W$((L|i)YPVe(l z&G_-9%>{*;HaBbuV(9nIf%D2?V#ksO7+Igd$ook99`~wX?MrUArb-Ndh;^_v>KNP) z`O)e)=fE>PUO^X}a`I+#-OE7TnSC8)T&>xfUq}lLgpv(gE5fNA$HOBA!avH&tf|(m zgYZoYXh4Jm*CmMB^0LcOQmNQaJ4VfBmeM(4L0UF!0t;}cMJYuo$e1J+SBWX0!87$p zMOm*?YhX(>|Ar{J+>|a`qScYiwwgjaK0j&0(?YjN#|S|w5SF`e&`E^-Oc33qQ^U|P zf(T;OF)SqPX$deJ6Pa+(NFDAnPl_#-dY~W#41y?qryek_A;Tzoq<^qIxjy3;$Q`elJM$pK~&wbKt;db0uXTSzfi z2L$!3hZOT0f&u{(ViW{m-wsoMb$wb&CD(ASqz|7NG{qSq^gsGz2%WAwH!EEX0R968 zaxL&Qz(^__?PDH5hky{?n0yZJ)=xdcx=@k-;_Yre>*?8G&P56WjtOx2+5%gi*VqVw zpy3v93K)j!x4PSJznl*+p-Y$ME2P|JxE|lW>X*>t_K3v@ne%>p{vYs!|E-%MDf4tz z>&zJ0v9s&v3M%EAhpQL=msH9&MIRVTItKcu-9ScqMYS~N8Np`Qz%n`bN|16#^`*M4 zC}tU$8R&rJt{~q!W61m^s>@j-Da4*lI!Xn5+r4dnAg&i&d962E_lE9KPE~B9wp4z? zND*j=pg`5q1aAovPNX)X(ni`h+y17$v(-lKd;EwH=?#ofGiyT4OiQl!;qLD;@DfK* zphKZxw}F}GRHON{NnTbtWena*-I{$lHTyJf#oStXYXxtqX@*_v4X#&_UQN-SQ(n=)b+VY+zB9#zgqT&tfS(X-FD znThoc{Ofx%Eo&}lcW6KHW*P;HGv{)xP1i-3ur7&pIhN;J&BOx}WFA(g9{xiT{Rzoh z@+UsZsSev_Y(CmEvH7R`A8p5%Ol(oj(D{GMr~BV(+W+>r!{_gV!9iaQtVD(>}&nKpVEHhJ^?;6wpzd$qC9HdJ)hZO**1HPRzDZ0Yr=q@>p>L29Y+btdgAniZ|#NZB~={rH^K@#TZT$-ClTo|_PGy1mc6 z@u9bdk=9}!Q(^oCbBt=F>otRtW}6+@LD4<9r#?0;xhj-dSsWRvR~v0p9v{lAZt_@o z{qU~oF+Cm&C+4l5c>4K~+5NwLp77uglht8NaU5dtRO4aHbUZx0cta?q%qH0u7bzQT z24n}BNqf#8l&)8G!n!+qb*QS-?>e6E`DXUArWd=8L=Kgjly4t>dG-2FSAM$lBYtbx zvFnI-N~HLUVis>-Y!AOgJGph$c=0L=z5R=wi;c5VKq zcdB3dwP^RyY%_55?Ore7ug8CX`O_V;<2$Oeu5cz)`+mOD1pzLNcQ!`FcNd&Sm`nAF z99eCaEIoqNQe_`u*Ds3l;D;<^B?_NzC+(ZZw%3QhxB0@q)>lt=$z7}L59?1b7ay|g z7h;$H^dE0m-+IC|eGcLDKdU?<-W&<4{9s(RC;w@pUpn4i_BiXaBX}czM6>sU?CIyL zn;)lN;cq?L6`p^b+QND~8Y+%9k3UHJN6fl`H|u`>8p+)|&k3&o{PoVcgVzu?6B*w& z`rEbsgI2%()=^#f#80KF{jr^BwHsBp{`uxmQtiz{HXadGR)=4;2c7lcZ>_(jI{Dl6 zpX;fH1v}4e{o`D8mxTIO>a%0dTnVl3;z4%gJmvFsjpO|r{(o#=4rRfFY>Gb}11hiU zqW+MqGWlv&vbe#X`y^9#xq?3Hm!ci`my3`+adNF#OH-{(b*}P3Lwf`$ezaQbpHECZ1=V&4lS* zpZ_GK3iX6^MRHO=(WKWVh{j1wvzfqx9EnL0O_VLcxpne+d~Tuq?A=)*cV~&mnBG$) zl&3aubaHUsQ}C(5>IsLpn`l;R>Q27Z7OAgy{dUm$p*#Noy%-{(E59E719D+#flqSv zyEpHj1cA2j2oWD??70=$F*_h1Mw;DRlh3>@yr*P#6gXeKF+m`+D1B}0sI@qM71zYd zyJO=A-m5-)Tr*QX^UkReXoz*m(+!sAqftn{l~^D34U6~uSQu$p5n&U5$)mGS9ebs& z4}Ka9Z>_TZlV(vK+_dp|@{}LT&9rHJc>t*VuF4rv3EFjODk`EUvFFD@x4+T1_bY^gjUYiUzO=_Cd zFe}A=*0`Z%+zXSO3NHb-Fa5@0#wACZD8$ zfL_73T=+r`l~_y55MBUEdfZ7)@I{>zj#iz`G^oU8g*!k7rn_<~1p6j1;ebfY1{!`L zxb4v9;|Y@#zHiVCJl{+oc|eY7FvH=6oGKrMe4njT^rCJdF#?#uhOUs7$66AuC-ghY zej=K-=B5IAf4#KXH>6a7$xohg9Vvz5_PK{}+X;BL>r`l-2Jos6bsrHtvL-;XICpX$ zbBgQY5sJw#|40A*|8B35b-{8=jybon*N9`jw6XMPb zm?Uki@Q5S)?Tb_ZNuSBdqL-?eJ|L2k#Vc9Dx(ObNCVfS8m_TyhIh?FxLnB(0B61S} z;?k!i!GV3D&|1}%qOX;>cNuY;lj;sP=xZJNv3O=N?B&c|i|!Au&|Wc%^xtkLI+=ex z-jB~IyXsP!rlV}SV&4i;N)s(os>k^#a&6>Z!HOSnQ|Q9TwL2#X788p)*xPy5ZxQ9Q ztyL=Juk4xf6`|TIW+&2in$Ki>FIW+(^Z5XgF`iPZ{OfIIB<}eL?gAy1>FqP%>^JFB zs~)TnOo$oZDOCSoulh7afdT3ctPLQ0od0v~Ru%D-fGkp?Ju6YPIDE?=M%!U^p#Alh zlOJuo!^H45urc?u$F*Wz;;ixz$QZF%GLLJ*F|tLZLTLp751KxS9KS@GC{ z?@uC_!)vF;U0VIRoUb=KzOWNHKIW_)Hq)F(J+La}rc~A`1tnO2H7uUUE}zOYVR;c| z@!{~GqIBmbcmy|ZV(Z3Lu!7YN!Fi=})85$*l2slFbeSATuEU+D_vx+oh|@PIAK{_@Jr z$>dGf#RE~TfgLmAfXUS~lw8vJIL;EpLK_j!WixmjqafrrEdL*u&@9iNHTW{4E?Tzs ztSaY7aRcyvx7&{Or!6cvwAv1@qoB8f^r&SyqjlBVCDhuxr`NE7F_)M5b1cW~nMSro z*nPNE+ty_r^*R;;TpgZjq&na$iFS4!Bw-2&ctiXf(nXMucmrvPiNgNc+G+L$wO>fB zSFLt<9GvGs0vHgwouRyyg&c=3gcidHS8PPQi?(WvBs-`>8A#?g@=CslSn4AwKi$w` zcSr|09lMt!q4`?Zlfr9IvJTZP8k0{Gpqx1_MDel{Zu*_@(>I5gE9t*OE}y@(wcHf? z9$(1T?m~eSkIMmR+9x$C^`q$SL-tW9n>K*Ty%FPoC*FY^RZ#PaDBSIn8XenzaQ;vH z9uy}*-_M)!Nkb7x{zTUX);qBXBL#Be7X$_os^ky`xPz8N2-bvG4K{v#8zett-6bB; zjjFDG!Wc)e9ZIYBQD)_l*UQ$v)Lp`M9gps2lpx`O7kgiJns~yC^~VVrNWBGwhZ-Sq zfi1)!JYJ|HIC_7Zf7#TMGkl?mbZ>vQrQPqpo*XgJ%l**Eowcju=f^9l2xjK$YJABk z)}p6@hE1!wBBl0(t`2|)aU7h88K1AFH?5>pCI^Bt_l}YL&g5QK2$pJ)4Q&`v?bef* zxPP0%niGd2l_Ph*z=F$7F_G$ZPh~wSF|^<`>E#|X>S6zN z6}N}z$g4~1;5!Z#fe8~u$x-6C-8hh3!#qgZ_m%7DbCS>ge*e4h;51?f2i_b`%y(W8 zzA%A6ElZ!KY)%xe2RvdlPc@2eVlq}YTNaF{b}%#WjdA0szFw{aGWi5Xy;|h5InDVY zG@qn+if07M50fIQ%qv43zOGKWHn3c(YounMST0cn@!>~K4w8jY21V87ZOQ16uqih^ z;T{&DYgz{h83rOIwFo8DKm~CxG$SQcbkxI=IC&Uoy)$440hgt=DmZ!+{H=YVP|>mQ zLWTwi1~=I^6+?IO_QUO4x!{8iS*xIqB*;aLTe9AIlW4njGER1_L+w-W^-m>cOS#C%u@ zL=);ZNRot$FewwEHyruk%cW8qlma6H?upbD_jHn{(k5#o;W$C2OTNAq&R=mYoJ2JM zl_fkn^hZljDrtxSH|tugz5`rJD8-MAThflRKynjOdMNTeW%_G2G&LO;D-72iKOQL8 zva}Z~?27}>`tU7j|JJ^LTU_a-diBey?6O2XS&?x^j)kX+i<^H8&x)Q=CDoH13zwB2 zkyY`EQhA1&QQ~5e1($CzqAe+xZ|v0Q2POv-w!x_e8CA{^OFxQ;SWnVwmSQ-@sJt`^ zsZYEr61{1G{1i5cvjQe7)x196)Bvw(_&P!m6_kSoQ=DKb&Ea`M62*Ljn zeL@fnGs}$Kc|6BB28+j@}5!Z{9 z)8f<7@jWHZLKiSxA^##!E)cRuO%<-oPaYW!ZFGvx`gSy|B@7RTxPDVf8+_5@>V#`< zeY=ea@TJV6x)GuLTh9D1SL*BQ`+f_<^e|}jS8Zr+zONbv3|N_tuZ_e8mdNVi-a6HA z(~_p}3l|s{%p;D5#(h}(VXahc7Qj68G8~S!LV-{)BnU+fq`dwVNfcAIBLg{JtxtLG z%F~}h+4#*L-Z{7C^FQzX!7#rf(9Md7C@c^{0H!!t)UtSzdtTS z;uCu!d*ZNMh3qUH^PYZTq7VsRaMw2pckylOjX|8z56C$|w28Esy4Q|+36{T6$1vQ% z@f8N5h5DfpH_i>MMWqm!kAibmx z@w`eB%iDenQN49YW5be%1oTGX166k~w#EGU^VEq)6&8~@AS`5Sw4tx6^Bcgmkdu7s zAob^XX=$JZx6&zR^86X3Tau4f(Vv@3Qr=(X!dS5@T_KP-w))Omt9VY`2&)G$2!9Cj08W-AgXm@c>&!I!C&1%vI zRzh&A#{_`T`2`LSo|Bg6OjSu`b?c~mZ4JT>+M0p#-iSdTley6xI1;)9!4yj_`G`0W zmiI#A+YF6B_KIq6Y6H}%sFy2>mAW1&nf>*)BhatoQ zu1SR<;MRfi9^&q^{xwjhfKx#w@B8ncHRogsOB;RIqpY2oo}r^L5L+PWD8Q;TCv`@P z-l*h9$rHQL-BC{(e%vEeMm@0#@0${{e1~ZBG{N%3-HUsKJ7NO0r%j9DXKd?GpVy$! z5Wju^{0w{$_oZq|OKO*=Cvoo2As#HsYe*=hREkLU5!PmiH8X;Mhz_Tf7taw{_q{|N zmlaAP0Hz4Gk%KywJys9lFYxgZKNmH<q*0#e9(65j017T_IW|&Vs_p!V-otnP6mrmn9V>p!gH%nkWnMIc! z=Y~(Ymp-8)ekdD>2X!-l_@O)GncOIz9~B?n+8*x>mY5J^G)4uhoEyd!9u@i51%;NUhIKiD67l&Mpm`WRZ2-7Dw{m~vvevL`37Ou1}f+XQYPv1(8_6QjxGp+V{2FG>w^-3IFo<9)+WVo=8JDeHc6~m?RWQhQTwWrNR?o zkEWGZ$6m*oMh4}+!~#L3LQyhRSJQ-X$@Q%M#Kk{!yVEAQp5>=(ZI0kf;l^dm-LO=u znVxNT6Yzt+j=}VYsisd>7rqp3YkTpvSg}>!RM=uxJFM95;qWk}>DK6O4m=693$#(K z4A|F13D>mr>Ztaz+Ed2xz7_M<`;y$dqd>bLpvVMHSv5)xBYOFIY}fD!8pwV?1Q^UH zZ$0tTHsEOMoAE-e~z$SAtt1mtxyuKi%GWn<|A2e_d&9jM;KcYqTZJH%XDoWuS>2XDf*VSG@R2S??Y~fRat4~I% zCJ_Hoq5c$yZA~}jN8G|SgcHly6TumP`6whM&qskoIB1gBCtu2Dq0VTU+O+f`3Y>|l z855>cL{$Fbk*&H}UD0ARrS~UG&1Gr4a;_{_Ddw7I9?K9LA1vf;m|=W%N~PF(D=V7{ z;;K4>oEO_%DXx?(H6>lE9>M83AcFb~WFWfSW8qTk3P(rB7W0DogXZR;#svlHymUrA zqd>}1b|8XS&F2(&3(GlNa}#56=1@)79$Z}C2ma}GY0gllvX9VTWjT4^yUJSy?Luod z7^h6#ZbFJ3lehh}$E?KD2Qg%h?bhCzJrTo~kVS@cU8mOkPJHN|N|>p@y#yc&_b%_AWX(f-nzOKb5^{fAaXxvH+q`pLTCsh`V+$;tePSDt z8+_>mo)+eauwe7@b`rS8_WrbhsSW{V^YHGDimHnWRU`KPa@f|0@Er<>kWJbf0}Xp1 z>fMX0lnR!S4do%RX<->*NZB1t(I{<*p%k}N#u#X{z@+6OF=>$*%@Pr77bfNQfAn9D z;q+(W!v5TLrgq+ZJ{B@@{Py~l8X$(oEW?uInDd&fh^0Q6Hx)giP%2jRha(l~Wa?Dd z5mPgy*Can5ez#e{9)nwnm5!nx;hlw@^cYr^plU;e&zAHJQR!A^9dQJ#!`cgTSMl2) z8#r5V78z6pxv*a-qpV9R44i}5U$Ej`1c44W;(OAa3^SBHhlPfW!mO64C@V#*a&dzq z{sb$$q4=Qlwr*16LB$F3{hRD6!NzL^B>6K|z7YgS)P)exh^;eR2C$6(xx2TK5Z_>) zP=Ei)=^+Mb-xsdYhRBC4Y51%Esz`DlY(Rtcy-v+>B;NRz>!I>}_Zh3}Rz)QwK~xYg zk3CsTqK;ho-4>i6IA6(Q;fIBEyGwOYQ!a6W7`c*r`a6$P|V^E<|6aYh+hjtXJPSJIlH<5)B1i7+c4*QEB+oUzKg5A z*5WDyyDII6VWP+t=cAvT7kHZ+c}sOuT@9P%-zi(f^6y(V_cc@b=Vy2OpYKdrZP{1X zS8s&;a2J(Rg!uH8?vx-_M~ZtRY5daSUk?_wssqQb*;v-~bw4t= zJM)oY*Imo*ulPUUT29~hk6iiSv6uTk(l@sGc+wW*SJn}=DqB8jmalgD5#LbnI^n1{{dSgV7%d4lXQd48vEKEe-u za(MY>MHSA=m541L!QfLdeD3~0hH4;VT(}8Mhbx2qU!VKrpv8q8^~zCS0CP3 zbdKY~ju(D{zJ6-7hzv@3u!Rz~2P2CSSC$V3<1Od;|G{&tT>0*8D^1DOMV&M@XHqkk;i58y8_I(9ckPr8JR_Z-hY8lVW6+c?rDDbOi z%St~Ph?>A%Pkg+yl*Pg;)9bI~ZOJ;a6>eEYg~R8^w!f=c?}`(a1=ir<*WV#h?mx3*l`tNoXZp1A(*lq8IejQj<6&> zB|VBffwb>yw~xsF=wJSm#LEjGqm*vGr1CG{t{gHk78R0LIE<)Cv^_q%uA_b<_CZQz zl9bNC+&aM>69DdnPuB=l3vi+VK^3Pvq+)K=NYgfAq%}{Hk0M^D8Mq~fQs1lVc`7~m z2sK(R zINE>ggN(h)85OHolO1L8in66X#>(z#5N#EtwTYHmOX!O9WV?cnb_lpA>EZ+`a#Uy> zn%XL4j$JyGzZN9x(H1Ut5YI_SrewNWct_N9>8Q!dNbJk%Qn^uYwHjpc2FpENS8%XX zLtii3I9r2nzg4kxwXAMNHDV>z=twCUxR#Ve4i+yeiEKBT2h=8w04a3gY9WMtBvuBP zk9`s{Aj9ds0EJpHKQN<>>jsIo5J@2+~Pq_?LXcWR52a?GJ z0yI6F(?)cSPPkW-$Kx<;-LTD$H=9Iuk@~0|OIIb&i0p&u1MslV;w{$@k**9he#rAA zK2dOhs4JMjX9yaj)XQzgZ(MK0s)3b*)^e03*&az(cp=alCoVKmxLEz|CtJQelS4JV zcwo!t{#G+N>!a>cW8K0`E^PELs@S)|xP+49wPYZgpuep66H^^E6VX z5p(qXcl~#Qi7lo3*}L{lrchppB%kMSN@-MUmP12=jk&`a&DAUox^y{}DKy8j$B#2x zYSK6+KQg4PW5m?E)T-qVd*9^klU`}s?#oFk(Xym%Yj8Sr?;vQD5;6uQ@5-R>PpI0Qb|ZudoF_1lDahm2~e zNw%Jnv@iNChx*AYp@v`WtM-6&RzQ&lx1A4q$AF~qhOPki$pF{aUGBdV5zP+)>0_bn zYA&{1)+1ICY8fGWE?aL{u{cdjRa26aRHNKeqs)PDV4H$MSMaqcb@1U6P=sI2*kOpJTD=ElYa^+g4vSeqLs78oC#Y+g`MR3Oz=c0iFP_l8Ez(UEA>>gz{C zAyhLrF|RKuHphCNEaVFgnoq|s(0=9gb&&S`-R%p_xs+#rwy%Oh>B`K;+ZmHDBQi6d zH;zZtUpCkqu&rFTZB-o2BHMAY5oa1_wLu`)Nlh0Y7N=y`+G0mq79>*D=;=p1{?&iq zZ-9d{jC9X#iZK{a#hoaqUeu8E+m0W<{>1OH-@%77lOl)xe%pMlxYs?#@I%#Y@aJD7 zCW!8*mJPWWfTy|ncc)XTm@jUJEC>nCD*E#5cHi3{@Csw=m_iziA!?gTYnw!jp_2?6 z^GI)m3T%92P{%UE{2k;5;nYd{MRjw9`ZA$(%l^^2t6u(J=V)tlStTN6v{(JUltJz+ znNUP?l;rqGl3BvA1btd^+k*h{X{w?g=>I`&sW?8vMp>wRV2)jAGV45SRp!ES#lgk9 z%}RO<4?k#%4HD2^9ORvy&zN#uVG1ANlH&s?7BxnX|<0q{G03Q~4q_Y@`+YYU?~ zB~lG(akTcjESiK>RIfQtvuIVaqr%4DJ%OVzOzxDr(3G8EzV9XKD#Ls{Z{3h-n8@9W zQz2?Su_w;ow(|h|uH2;}+T17^JnySG%;AQ%s7_i|CqwG^jMC>-+4c3-h6hU&QJKU| z6b>G2j`l}dbG7D1b)sIK)v0knTG)AjRH~CxGCmCuo<=lmj_^#d(@)LQN_@<&lcuTK2H|Vzx;Yb>qY;W7snb6L3`wN zte0N9_#rf*-9^V&^s{H|J9!(AYQ>-`FlsHm>iH z+A90<3vxzTMEr@l0CpCVOvmZzwzs)O~WeVV-LEUHks3=}3v5jGp_heVfo-LXFj3 zUM>8LjBbVO5(3Y5%vRAn=ry4QjfZ+PLcJ+(gdSBrt95jAsox@$u94onc5easZWll; z`ffZi{t!abf*vxGU=TrFpm?frIC<-R5S>CY8u^J*N>pLs01E2l6m63im)$-=px$Qq(yz`GJ z&~*LY9I~;GsB#l}fEWxKFkjFSs@>5)*%7SCg40Q;0qr#?kJ_Ccsgb795gegZFeJmE ze`->6R9z2DJT}X9#_F*5(27c#hhf@^4AU5pF)Dl5Sc@w-+#_uB9x@V}W2C~^R#eZ_ z&BBL?5(vuvIeoIM4k&$zet00o38TCXp6=*^k&Qfl`Y48C6i)DPBE`js+fII<6oZq4 zwC~R@N=*mK#!v75)xL?Md=(KDci`9^nK*V1d1j{I-bGe{kGA@jnEc^*M`FNqeOq_W{uH2oA zKVlo72LZ8|trkM7o7xdCgj1Ov;b=8aMdZ=*hMSD*jitJm>esVRjjr>3y8Tp9@3lg9 zwbXu1(1{5}?An{@+d2L}2exhapW63tP5pmd9m!jR3a8y`^VjCk z)Hh>qV@?o+uFj?yP^f5ah{;8}6v+;lwHe7AY#gnbpd(GEU6Ag?g!UN`{s3 zl)b#@8OEa4Xs)@4ClIT+MwzWktc`WEsZPdtu#b2VM_1X3%`XZ`nJ>%PJfY7xYDN{# zvAL7xjdg?2-gZqTJ7FNwx?bv4blyj_|h1sZ8O_v&vmMpPen;p;iq(!Ci^3BVy48)Gfu}6RG{b@auTl5GAi5)vJ9jm43QZ56> zRFN{-BInpn-K-v_v6NmMSQ+I>Dw;)kWHTmLn==dT*G$2t9ycEqVQ0r6yL{{g|FeGe zeiHs~t^}kO(^z7u2Pq_sRF|R{JSL~Fz7oMk9E~S3G+Gmc_KXjV* zZkw)c*E*&83)a@duNPJ~s_rDI&PDxD?Mo`-tM{`wZsCV5_8orhS?#+`c6?j4@23e98zh4t^?`M0-YJ=DGoO{gI>q z(~$+t(N(DsLxfBsFw601OLchF=$=VJb^pESa1H#}n9#nz+IIldbBloiv4Jn)A*z## zIeM5*>Su|`;YdCgDRCNjfrV|jd0+Emqf5wDBC$~to39g3qTo#k?AmaNHW^2;+em0k z@`PvJ(POWLP4r8`X-6WG5op*RAYyg#z8j?CQG zv`96$V*23XMFWvVdxbNvTFU*3&7`{QnRO<#n{Xi-R?HF=Nd`V)QikEfi{v`w*CFE% z-DQ@DK?3dmh9CVOUG;zUh7qgu?o%vkTnIu z=AEnQo2%(tvy{##>d}VfjVfqGtNC%wQ#NN9yQlL7rOh^OPjWprLHt>(Y-W-EyW2io z@+#uB(NEsDXMsiH$ zflVuEDvnWIWuzlYY)WP1jKaeQXYJHHLns}asKjMQD4+V%Mrkqp)`)T5-Z`)xMJ~@< zmqp+#t{;Mrdrh^y>r>Z*$-oZX8554_;njdW>f0mwS8Rg^{!W6Vw?!tcNX-QhFsFVk+Yt>1^;0W*DMF|JIZ5RN9SILAc6-o0-jLTjL(Eb&u zQ}|y9&-H%55?~&FM!1LguPPHxEgP6@Iq*0#-cTZ+u&MouDz~7%>FV#_O#*XHTPutyyxHWvJ%-H*eJ}}!zO-lsR zuR>zIH!OaBU6)(ja4zcpFU6~GJ(DK|J;j8c$PIpuKKuGhS(YOE=5p~~ z&GUPQ>`Pt+m|R;nXL7th4?Dtz~0A!5bL%d>Z1%o-fOt zFU#AC4xQhT&vc$zY0Ib7eD$-PwXVRfCw=4e8&|9sKalNPp*!pLl`%V-t^jE_@AFr8 zZ{3am=FL3AoBNMn^o(+RZ}HQid4~Dlyy-n95j5LAdE4d@`9L}NNMhG)kzsoc6Ei$E zF>J6soF8VUT~W266%60dQ_~Z-*k-(}sJHdeIFg%Xn_{y}aQoFVL5EfR^5IO6=}QOi zHty^N2x(g>X{Sdr}B$ zX5^V3m%YdH5E<7P)xz9bjL1d+s{*(ZKuVPipxUP^%-XAJ&%wPMVj_Sb@^~ zlVlnPv2DBpUQocO5}+S8iT+}k98xbA78U-0m%?3F9SBCIzmUR3e_yiZZ~wu;PLjJI zIQvrJ=iz#ONo=J|O`t}fL9m#@D)$-zr~y5J(L>?;+3>d)R}5!aZu)}YPr-$Qx+{*g zM=Rlch`uq*Fg8>?27GR*$;)zRw7;hu0!%9U%W#u)k=&bh2xJ(_*V)l|W!aO@72Q4G z&PA2)HFw5?H){-Z5ldcK_4L#QU*E*j#WUTC@7;n5__mEXMbVFLDc|^R9M1b7?dG=Q zPQTS=pG|t6r(@Z&e_-E4`^Xl*_QN0d{FwCo_y5KGXx7oqZ)&FiZ#ly!IUB4P`;el{ z^5EEBh`~+}*{<;@X@gf0RPgynTKL|OCYh854S%9+pygf(Z5-luP*=XuI2mM&GUl$$ zkkN7EIdP%*zjZ_?351Yayz$h#2Tdg8EtH)|gl0ax7#NLx1D+E(0lc!^T=F0@-!%lh zj`Dz>5bE9c!~(*|xFnEqLn;*Z@nWz#HiPe>v~dFw1g}I#p6L(XnRSZ#gZ@l^0T(Ph zgpUSFNCU_qWcdIK7n1maOAI9W1=Tp>09;=HuV)l*)|M%L*^{qg)F0rA@_c*BUfjgVP}Goe{!z;LC+8n+DeQ3CH_)*wUiKXWJhG`vTspJYAml+nsmwm)2bV-of8@LPrs<-4KavItY9hC$70keA5IXBsg0HGEsa-tvs4C6Wyk=pGV%5kzu zQ!<7ssW4VaBY#)e<1p5Cv_22azNq^ze;jz&MQ8qD@+&OqU`R)L!r&X+wYd%)CGirT z5L02`X49(%*wF$(jn$_%6xLT0;p7rA46hz|v*6ufk3)!fEFkCj2WJ#oo>BH%7Q!0E zW5GU10$Uv>0b$%ea;wS9!;9?c0Fz&2?5nF}zYl&)n6bb26)X%d%U_m}6~oDnP{`3p z*tc-s5jm7TC-xP6yWJ=+<@bf>&nKQQ&fmBr@m61P;`yS!OXwKRqf~K5$vK^OAul*O= z@c_}{Oo2Gr4-FVAh?xz-90#7hHfdxV*gPlk15#>Ei1m~ZfdS)K5NH}FQ-l42)w;*b z%_vb3XpyUri%ZGemmG``M-P$p5&F3j`1rEX#6=VM07+F1T#304(^*{)a2$kk3u7I2 z3E91dO$@XwxyJa`TUoBvP)L-wm2yRGFc5ORR zsTfc=)Qh{#<6%Dus#4`s@c1jAl0}jQ3~WfVt1ESOQ1Zv+7Ni+h-Dp61azEn7$K|{F z;X5}Ws7$8GG3Wegdc@Jpc;V&{EhR&{tw}JQk(ryM%?={7zGO_*etBQL(?FfiE)o_a zv2WPoVC9_+fp1WewjLtMySh2FG}N14C?)J;00$l011~%8` zy`J})3r%-;0uGCunqJ*_O^*i0C*Gy;{m#CQ{H>zi&0jJAZG?3<220%S(2XOLF*tn6 zUd=Cee$J?}>$0<-(~>~eR3yXB2(~XWhVkbD#@iEXk!hZ3oHQ}k^_=S3ohos5D**meWDQG``Ee}Q-UhZw2oFE^JIK;{Z01OVL!SHs4NoXDiG zHF6jfa=DFGU)Cpyjimx^AS?HCtu&hqGC;_mcnd*V`57`M-fVa#tkOt5{#FHfg$7 z@>O1`scs()yJh#UdE|@C76Z2{R_)b2_tm+Fnyu5entZm^L8PXL?1FfuJYM4Ni}OsL^Z_Q-MdhujnT7cG?1zAp^;VE z!NM#1$3&_|j=rkH=!^E*Zzcc1Iq%{tI*|OC81QxoZOK(6cW1Ns6K15QSI6 z3oE&5C8Mvx1ln?Z?)Y5kh_=>59$RQAn}A;;zE?{<##x~Tz4`_ARr$tW8oLamm%K;FNqNv7CjeiMzkuFX$Mxvgk>n z<7ssWf$ihX!eg~Qw+8D6AsS!n5de`HpGzK*wW#O2@At%Wr%yipnyUNj&$+)F-+#>C ze_!S5pZO##OwlIllVdsVYmi*xcY4U(1u->xA8@Cb8hHdp%+K_&73pVeF2qF`u9?!s zkZ5zMnaDmeMW)ZuA39|(Dg;jSC8B2Emdvti!68le&?qTs3rx?l6^30*hjRiGqd{i$ z^boww`b#?f$^kfq*=iPWq`;Skbq1yfX8P43VuHf#e)5Yu-3Ft3Lz`4g>s^f2Cs8oT z3ow#Q31PqMETnk^Li{{gx{|2b1)rSBGi-cG-<);;TJ>?Z2Y= zSPA-DoG*7`*+n>jX$+>yr7{EtAb+-S5zJ*r%N!|&I^FrFKVdDRI9i@FUvLbVby}K@ zRRxO)MPW$tm?e5S8)m=iT;_%XX21t6&rq_Zr1crmf(|O#W1A3bagI~Uj2!Wu-M;9y+vc(dB8Z+&E!l@;-vgFaB);m(VV0xvtKADY$}Ma1!Y(i8{rA zkAZi#2hbT(q%h@$ubNu}q)3C~KY?``@}=Z^lgmE|DdJdV76*qkY9!)Sc0J%77U1g^ z@I@ln4%7Jd+xO~@20oVk*Z9I-7+-69uY}TjgTbh)*OrIhFS!lwX>aj%gnpjg1Ll5k zyek}l>6ZtIAAbOiTMbo12UhkPTY-_C0Vp47F_3z{Gs#rCSD75(O^1~Kj72)xr0F0c zA75xOpq=4fB~4HwLB~k$(^{t|hfe59C8Kz;*)l*$yhCfEf%*d6flV3a0zws^nIK~; z9$kgcyRUDRkBQ!U)n3d5m%d}yZOf_FMzD!{u-rgYI zGeOeYAX2?ix5}Yf-lzDfA6FDV^(me@mpJmB9r=An`Tb7(en*~W$kJoeVw|gUp#)=s z4cNI9vIa1L>#x5CpURzO3N+$+VfH%_7Zh%1{M`H9r0Xyk%_=GUzAdd`>mliNp22L$ z_rBWY468YK$l(gSORJN}{SQQLBw0aaQW&s^CJt{MASXkFSB`kx0Ics_-$5){(Y?!| z3+AUmG`>nlHjS@$$zS7JPa>{J_WpDs{5TKRnF#9``C`WU6uBLu!rwyrqSAJW$ej;j zuF2bBlZa0M6V3iEWQjZjWb%-eg<0cuWPf|WQztbBXhDu%-;%g_ zdQa!|P24x-*~H2-T0bqRa@PDq>+O`^ZQ|xv&Cp0vU9GzKfKX3lc3vPcR`u2R;Az;= zM3uaFj-E>(F4m3LSoqzuk1FM1l4^so>-FOo8RGhfdp_J_ulLinXNPE!Ud}qp%mAw4!Iqxflm&1R7eYVVTqUXWOl7K}->>Zh{#QrVu z>)F2|5UYVmB*e@w6=51z+~@JY{ng)ynOwS!7J+TjQV@eV9F9rpJhJM(f_0&U<{!;) zq1nB{t3}ZCExZy6mEigSVg&1eG|V9$F-XSrRQ%h|F~Q9WIo_XV4IgkYjT!#JFAf^o zMy%bUcl`w{F|hlty(uh*LU4({5JxD*rTpq!=1_Q@S?!@OqQ+dd3EUXJr+etdViBm5O(iu#Hu~N(O|a2*8|llE%^Rn_WXt? z%ATz}XZepjhti&1Bya8YVdILPv)nRum9N1$M6r#-0}ytg_H6~^Xo8c5NRkznTI`O{ zhA&0lGdadU!zP}ANo)fwowuVv3(3WE^Acg}tg}6Tf$Qs(ng8$m@5pWp?eBpfj%%Lv z__!Pr$T04$&a2L!;N0a%zD3eB=Zi>!EXe7o(}S**M6BzH_3+YLmRPGCsCSJ={M+5| zng^Hqx@fnE+SFtwuS8B60h6I_P7n;93IemAQ;u3Ywh0Q^<6fBLtmzd02TNgF5-@S6^cA%K|7S$|}tJ2Pg+o6;)V^@wk1FzY^2>Ql; zik*}+xA9ac9jv4I`@nWR_zOGzbpD#Zb*MtP_s*$>E`m|XB2G{?nu10P;WCIS;s(H7N*<{ z_N2iKWV1bu9jX~=C*SlQ)3zMCxh-+oO^;_NOroip2?eovn_vFCZzA=~K1h1;ZhS}L zXF=Q1E>0ffdqPeojy6^&>KB!3s*?Rxzub{3SCOzV5$_7|YJ(xW0b|}C;df%d5E(DZ z7hQ zxq>oY;!pxTG6?6lq%uPH$#YL!bb+FNJf!f6DK(%#nG56l)9k$zU;WMBhtj^}S+B*! zo)}U(lOBp9{rpCiCp|FX?luIQnS=DMP7B^@uik?j4xH5%y$74Mg=IU{;h^ZPfWKO$ ziVmC+mqWeT+x;CR1QS4qMN z0*VpU)lnm-|F>Xwru@U+C)j@ZikEWgP~v2W4H&-Xe|{y*j2$h(mHmWQK+hC{XnY&Xkt?Mz!Nveq$-QFLq*5=dhNc$sp)m@7wo}dxHJ_uiD zvU`48Zz%WyyAlj`Q68R8Ze%jeC~*{uxk@Tickk-&f(AN_bzcNSfF&8*8VVdR5bj2C zmuzWolj;M?7f{(p1+;$rr{Iy1mpD%o-V3J>jcbiq7klV*=ELZWJrRdyN#UrF1l`8x za(sdOO-~Z-mL8(<{ncTn_P@rL9)oiN86{YV3~ciAB1!z2@KdN44RspuISm%yS=mz5*b66%O^b42RU&46id%#6=PmtkTA zK0+}23w9E*u>pxiZA+R2Xoe9F<;Yk#4lX*Y8O)pCC5_C6Gj05c) zCwwlPLn-iDn9ul$&xOQ)#?N(g+SXn|6xJ6FnfzE9U--jn8|UrbtM%9V4CQ-+8)!(t zeuJrtVum{Y>^K9d9Rhxf zG=bckA#Rl6a%NZrjkQLVeTc5rd9dP=utBeZ^K&$`9bom0t`jG#Lt;Od4Jr3K=s7_- z45drlxD9Ok#vwbdY>D=vBx+w4eXB;;}G*a5^t9_mGT{5@!I;h{MKN|4;ib zT$^6pFV{>>o)FA6H|?7cly8+9TN`h6NHfc-?X)=zKKuGxQj;{@EZ|HcX)2t+NF(Gs zGX5bl7=k3DvK61Y*txoe1R3HJaYtWCe+!n(KDtFBB`r<;&}c}3O-cO7l6lDC5dWMl zQbU+6a;9UW+nw13J)ze&ueoR(hzHOaxFM(Hb7F6_E2>;g-FNe`xxY$8VGBiK?67B0 zI&^;JFi;ZqLPA+#`le1-ZXwKRm$Qn+LE;LYH6h^*X?-d6@k3@U=v z!MpHr=$W3gSbCBC9JNB%(cf2RP-hSeFIM1Am%Mh_85AD-#-hIz`4IXU13*8a3_?!dl3PJjH((E0f1zmK&Z z*mvf=%7IXiAD@`f`iuxl(0X%D*K>Zwt$7!8%~xzhfO=WtU;Qm2S+w6Sd%y}CEi;6V zNM_q_7VLMwFXM0=&F&>66r>)!68;N?vWRa_;f|Y|glAk2UI{%_Td--l?ux)_#LFKI zlvA#_UQ`%TWCGud)XZ{3#&`p|`~|@(hjYd?XOj^5bXc2TS!G@&DjOHmk>r9-YvT*5 z58`MUXGjuWN+Gkq2-;EzwC|TtkPR6gIfCNLt@ld?s;c0%fvCI`$CPn^XlHJdTn zY6#@$&~^@?rsOTIy@;wNG`_z(gyL!Of|+N3%d!E^Dt6#O=y@vf~OAa zM#Qy=Bi$~tV#jJit{PO@2F*<3-Z2Ds2e95CA!uYJ@&>s#fP@7hJb06I&`RLsz>n2c z(VGE*G97q9K(P!pO1b~e_eHRap+2FAhgwGNs&Rp^LRrP_2x{~|!-#{l7ZkJ+aL{3w zVpoqMcSZ36Ee?eExob8K-`js-&vrNfaZ0~mbII z-yuvuqO7k~pXxdD4=6_0ugv8|mQ35vRS8%b%TDYu7PK86*9v3UablhFle~dJKeJ4f$%&g2l&G2-yl}Ii#B> z4U#T-M&iDb#~GgnSpo6;<@YPCoqV@Jd#M+6V#V1||O`mnT2(LL? zr}XfxM|BZ}Mp%jYTygS&tVYMUl|?v2_+cEyv-0ULroafDuR3CBzWAtCOaRbCGhiWh zOr=#TUrS?96$YKhl4n$@_%3@FUor7BpEaXC_;?rfdI>Zh0u_xLC2VvDBOQWcp2e`H zvsk8mEU}Twb$Eh?dU9AYmdqe3#zCmo!m>H%p;{)__~DLNCDu`dVH3NIQ((|ev&H5Z zJ(bQF7KVAOTjYz(j)N?fSd^KgV%S18g$j?ww{(`XT2yRuJy>JFoZYU7wTbc2Qk6%_ zS&Gwqn38s>Df&yRfa*1c{4Ga?t48%}1!~#~wbFK|;|{@wL4B(F4>^{%c@zy(h-~ zYdqJ5zkn*nJV(*?5EtlcS0kdc7 zf%~UZIl_Biz+SPPd1K3EhXY39t5b-d|FJ&z06RY|p>|VZO>{=O1r#_I zfR{d{p~eINd7&i(Wml7_;W0q$r!F{*&qbdIPcTiNhrh;{-l>pepB6h0xORxREC>DC z9l9CQ#3YDeg0xJP4R6SOC6MTeftRg_Ez|_d1~Sob7h!p587jR#_=VBOIi_;dAaCQ; zm~CDkcugA(Sr?4_2g=vUpt$CaftQD|WMxs0^SR>_!3^ja=LwWyG1Q&UtRZ1-9cM#|h9ujR zL#5xrr%Dn^4=nLKi53JS)tXlx>e1G=Pw{9}Z;`5wXKe z%76-h0}9#&d$uA!`z?eqkS06{>V%YBBa>!|)NPl5(h2MI%Lk~ZP2A>3%cY}Xt11dyYU#Ac5Esjym9h;^z??ddrHxcmW~@Z)e`O3>{!0l?t^_~i(X z)oq^M}N4s5E57W*Wsh3R zF#IpE=F20|d*GUTSfd76C9kMzt&npv{5hr~#|#lirn}UvbsBZm5BiIO z3l94G3q@NC&h+~CZM|5C;K-V5Jy0nTZ?3CrUbH330o6_3*w%j@qtPsU{vTm;J~4o;gq;N7$QZz zx-an-bq7gXnAGZQMc$CS{Irbp@EF;b=jaU4_>%rJ;;X24{e5cvv`}*yP5VAzbGUtS zMBr!?;W8j3syC{>6NR&S8Ome(srTINz~BQ>D%(9oZGJSEqGg6mvl#I;U;!g=5Hy~X zG{K9e1~-9E(CHE~Dx@%;yX!Q9D0!=MIefXN$d8@m2?jqbidSOP&?{m+*t;1leN*L2fNE;QG4jp;+%z`>ELn)0wavQORz zq{a_PswrPl^^yuzU6jc(MR^o0fUY}@!WHu4-hSkfQ9H(8uKUqyz4ukOxt)?Wq|5zC zk6V&IS!Cgiw3Q|tlQcUI7z$caTAyRG(($n}H0|<1X&xo4`VF{A@6A9okZF5#Qwj7& z$#eYi&_PUXdUcBo*rhr9b3#v(Q9H*FfA$FU^CAWb6Ob+HL;JAruk{(@>$m=|{kH^J zSa*`S35?w_PzHqIfWu(CvQd)&8A!2ms01oUy%T@|fQf!U6Y%Z`t_$hX8JGr&obY6% zO0WxL@LHKK2scE?j&4{C!m$wWKLU)sl}8Ve8~h8p(aL|bLaZ6vL z{Br9*2G@4Om%U#+S8Q%r+Fot+Fu}%3qZw-68gHvzZLAI^spMN}Bx!Y69c(Hz;~6Ix znsMDS!t8_johRZy3FfyQOms7|GWmvU-aDr)4F$<@#Y@OLaAZ9W*_fVnHss~$DZ%k> zTQfhLRURDK)HR=4icCW=FY$&#fHq-Y0* zG-sW2o7*Oi>H#2!anW2`b~VHqOE#hT*5dRk?Zl>F50TS+S7c;(!D+{kt=K4#YXLhM z3BV?Blx=I*Z}|^6C^WuP44w7oY(O|(*15D*VTZC<~Q0jGwihYX92tqrJKNoz+%{k)wuC0g0f7vq+y)e(pVb};CJ^Zy)UVK0`^|MLvv;OE z_Q6w71bSEN;zB7Bf52|1JoM{(ce|mK0l?jLEO`#EzANtsVCF6?V*$Qvad?AXLV440 z17Y!)#`~H5COP}F*Rod->DkCVBS2#+^(2MkdiVDGWPUt`pn3MGfyOpa%Z~wfp z|4M7VS$jw9CA9Y~19I0ufdwj~RDuqL%xGw%dzQAm2$U>!PZ-icC!q*nf7BzRG1vjV zrGDNs-7bB<`hwlR{RerY&h1M-XmzF_2FxlOgZMl}B&0(n4=qF}-gd?>4u`zG6-Omc zVH=oXFVr7rrY}>;*SKA+kubT^Ebyp3{)=1fMH~m!3jj9|u719+-g6 zvRWG;s5h5%W}PLm{kM1{(iJ1QuoM$IBx1NB9AYINxtQK=*Sj^d!#46|CW#ie6FeXV zcoNAaC3joHS2TYIW|vWY!!P--@ddu4LbR#j(LlA^S0P^oA5=E~=f+oIrR5lRn!6+L ztcgox23&%8VA-VwHU}#?upI!*;`=$2xdGs85g7xyL9+R`dZ*USxCtF;5Y`T&J+-eE zj}S;U@jaOucMR0KVJ|@?E)$q^qT{p~Kf^Eg84IsMP!>{Uh^eMmfy)H79e_80z5^IK z`u9~JWWkPb4i&mwp&M)F>gs?Q?~{f(FC7m`oll~m^_H{cM~JTi6DgRIhaqUHYX7~e z?Mu17X(5fD?hxXcm@v7%^bllOMUdM@BqNhW;eU$0>BCO{bkgF@fEKTnNl|uS zW|0-NgHuvC8cFI0`}}$lSP=%q+M35Pk&TmVlj%B_AadH*xZGKO7D~d9+r6CcZN{_0 zp`f0ZVs;ejc6Ip%wqd19OYmJ+mmCId;D$Q*F`$Tsb}-^8l(BlC0!;$ZRNLA{!{5*h zxCYk!#O5LR#D9l0U87@ zXLRu^ZqD1HDfklpz8~36;PJ0$^3b*aQtoKlhyR?R)PkRx=Dq|&$Y?D$ZMDUp+(Yg! zvYypmRVne4T$7Cp5BHOZ$SYv!03Qi?q46ZftAQPjQl|(aS|prZ26Qi(>t=^T1Ze}~V4i2h$rFPb|edHwtR1-`Yli^j(3jsBSL z@|k=!E-EMgDA;3JIk|hR)MK=^V7q0_$99eI_PXF(^FDWd;Mh|=b(RZ`j1Js?>e;l! zf=$iQ`R6Ua1!MgqwNpQ9Oxm46HIg^$s2l>zwuIiP3ogQ)%&ez&Q`1t^2Aa1YyR434 zfUa)Jj_0U(L>#7?iL?$Jcu0KKXgkK~ljPmB2=Rg=FnWXXolI@$*aC6f^+vT!HDa{O zmwi|p=YD6_vRT^PsXt6oy$Q(AB$4oesB+w|?Z6aXPoTAkg3b>^7MkO<$(I;q*5q$3&L$ z1;FCJGA-U{(R)qD_Tef%VM!Wp zT9zEQOyl8Y%FL;dum42 zkz8}Pxo$=}F}^i~oP=CeLzL$qD+^~Ui%`|v7$rFII1$Zh4#j_LDxwtaF$DeZJ=Zb;a2EF$#7Z+c(mMly>A)i(tR$f3L1=gE#ju575m6yGAlH90 zI!*5Pf!L89Z{;C5!t%V|p85=})e|A8Fz|VynQ@o0=gcKLud#@O= z)+zb+6>A^mukkJYH~tv~3_<%YT*8>0XRde4T+dq9Ba*J`>M*kOLVNn!ieMSeA8hHx4VVhsMZP-hID;q}2W+5;Vq8F_XDH=zc zY)H~be05Ee_TP8?1&z1B=3=>}#TLOH)L)|j)y1|=5uh^b)5l<(5mh1xkP}Th=XxFB zE&+%jfJ-Q@DS=mb3porV{-OYKkzT3fso6fmCag22p-Ku!rM^SJ09o5qq3#7+@(~4y zj8#~a6STlRv7V`p`W2Y0>La~?Zxseld_Kof#OgAe&pt8 z(8=NcvN<;ZX9l5v{_Fhxa~d;0Tta4Px-DHa-TaW7m8zRndZGTP0&!GwY+G_}L@YoM z3)&7z+G3w|iV$T-iuc^#$8h^gh+l^KL48|q)Dz->5hqM~x`)QM z+wQS0$Lk;S7gOBiG(&VqeuXl{87el>yhj~sH(itdO7gtGR-9%hk;Av{a^yW0vEtQA z{F#SAJRPB|!UhFh_*v?*)Ap!UMCyRnkB%?nsL?slfdC0v&t}Y3WXe!lzEcLFjS{tW z6}1JbA|ViVC8Q8=5}ke{p8htVJDjQ8k3)CjoeS%WcgX$A&iOv8wA%?3zm8&Kil{_= z+4nw=E(kY%|KWuzyUyR+?|miY{JZgGvR{5{*qhfu`Wg)lZsYYiswi2E^C(31LKM~z z)?St=wiUUJTY6SfmVU6>ZM@ZpKy#aie7wK`Chd#Z>4_k32>Fx9hbn3?eHW+p|Ft zg|;xpL4`1A(I+BrJG;q@VMbBL!%p$$O6^A0O>qpNlyrnEd$qKCfaSZ4ody|p%M9BD{k&%Xev=sRkijs}0!LhfvI7*(P ztw?_)9H-zLiq=~^O2S~30mq(boIe;}^pA*t^ml#o6QNzt7U_Ss*b3VudDQ{K!p^mB z9c~@#x7ecOvH7cQ&HrgG1l;{5#vTmAmOfzUvq61BaT571&YWS9!y8bS%5T8-C>mI+ zqcijaDZSxj!+wDyTBjcs?s6fv1mgrE@=^dL!7N&3jmVi0&dja-Y{h-E#_9EV03s?N zD+7!ZoPY@ly}wV88qb%VL@Eqwal%=g1xjUZ003MB)63F^s5N-TbMU4WM-Xnm8(*5S zDoO01Os6ItHNK^>T#Yvhj_!gv6XapsF{ar%Ax9^P#lm14%XM;fo_1mlEbq>xhF1Yx z5>AeMWV@A~Jb6dO3-dV%o)RC^RfU;kj|YX-IwLFGgIvM(nLD0eczTY1AVNjV0lrCW zxJyCILq(&{2Ay?P)45v0F_@lJJFsXG>Kn+G1GYambU=XBGdxbl9Abq73BLm0G6|%; zDDg_J(F+LFtm~zO^KhUJ)M~7b%t;b}f9YLQyAz}>X}N)mfX}i?>RgCJe!l5_{~aBB%%0A? zO+5i2gR4RoYS6U zJn+DS3oyNZnu#mtz%WOe%N*sngfJhdgB``b>r!B*BLghf#LCkb+_JDq8-!YZCOVB? z`7B!NdygCa{AsIb^ygLe%D;PR24R`k1Cg zFt@n@AHlk0%%HmD@TDmJGuN5of5=&y-S%WSWNX*^mnk7qXH|G%Z48br_ZjcdmOtJH ztp|qXf=v)^fbqeJ5NU7D;@U>SZ;8^92huF!Zc|=LJMNGe66)YLYwqrl6x;#q1Bk4@<16lziQ(BBcFZ|a)4POiYSbDinUj9F zH;f$>t5z%anWEj~IbFcduBi$dqvGMI#v5~LjsQM`lW>vApAxLR8}dhlu0Eu`?_IHobGF$z(!XrLJ19mgfdVp{LGH($JATkvA`L#ksQTxc4~=?E(=8XNo1cEn!mtL>H_~9 z^)7yn@bFrWc@CeOBZ9zao>pK|Pb}D-jNHS`K;jC~OCkGg6&Q%4yZ3U*-wX!~Bqne| zU`8Ci%7;V=vAXayOAJiJD~=F({~_=FSl0p!8%0Q%y57)>pgvzw8HS9E&D04UhZPr= zId{TiE&$+;H9Fw6x+Q8HY;48zp#zN?<_CXZH*QA6EG)kF=(=!Y^XlTmDjSQ_ecr5r zc*7%52XB4vcfk&OFf+cZR|`%Yol|>~J5}$fL9!FoB9mh~k4>=>Z{X>TL{wq}ejoGS z*%0Mv`rlo?!H^pa+-P~ep#j&C|EjF!NEv1>!A?)pk0`$<84;aiE(bF&j%u=l!^Op= zCkRpZV?%ZGi9Y#mm=y^lg~S?tRvQ{JVPfxu389i&Nl#3-p(FpNS5ELc0@^jI^K@Ah zJdDgyI6Gv{0R%#&*8(r;-5r92rRf!Tq^Vqo*YleY3xRhQI&sOFC1BG7NcSh>tad-P zM~gL`gJ}2wdN)w{qK(SO6nPoa_`=@``|s|*#}^=ZFcMI728eFp=G~hk*%u{TOeTgt z6*^T6{fugp)Fwo4NI8-|HQXidL2^nWa2;a3ofvf*_rqY2(cY0reA0wi=MwMBx@_8B z#x{bxFPwza55#l$-BJ9j4n3w-C_f{3GvYE>T4n18jVt?1FwZSwUC}=p)PegYaI~G) z^RbNYCHgnk{V2j&=AdN}hH^I*wWaUY=YIyyxDFDMU9st`pA5OGO&e4x-!(t;Az1V$ zDq>ulen`@k4kCZd*>64^I!L8_5CfR}GCE{;NHn^0!)TekW~k6WHImK&z@vuf%11hP zU8kPIKy?`bU_m^3ei;aNgv!)9yoQ`31x@^Z2H9*l+mkTz!G~P8zC~Ew(x{r)1u_!p zUQk-X0brWB$=I$EF1kLAXg?&4!9a8YQAHPnP^@5ry;1)13rh#QPBC_gDRgcoXIS(LIX?((S@uANY*120w;J!8QFb+ zC|sdSKua)F(n0jsT@J)66k8B(-+c`&_mdcFjNqAL?d2|R_ zk;UJ7OD%b7JE74-o!f|jPtf^AFNZ*`Euf@OZx8ME7ZaCcf{jnzffffE-+Q(?>Kv7w zel!0XUwIU|wkU<5TTA^~jsY4}3;>sUfZ&$bb1-F@CNSQOW-uC5(EIqaPdL&-!aQcW z{5Girm6VVn?Xi=&lwE1K4^zm=6l2lKOTjg-A_2bDn@ui~UrSlXh!jHCa&%!Z95aL(9dIcWS_l@k!&_aXYg^uQPi*e=j*AozzinOIUVCZ(eF#=pl`J65YM z{A*JqY)}`2a&{JAN#r^W7Xo|(weqP4oDgDls!WjV zbE(z}SWEVXUdH!&jM?_W468!Sf)3c-f>p`P4j^A7W5_ETb_fW|6L=JN8!_o(70_w?dUs#BjO9a5aqIwK7VCgyPCZIe_4FxJUgA0mZ4d5Hm zBBp78WJW8Xp^^4mj7Jdg7jv0{6ec9XV;>3XQ@F$0(|6M!o?dIee9`7pmDlFwhBp0| zn=Nw}1Sm;WalEUm8PL%_Y49C%-O#D}{)!l@XyORFTtWcnuOY(O;HzUjACdbXnEm_N ztuqCeGx{E_8tqXf{>kMh%fdGq`;OWk73|^ZEf@U$*=QY0w$2F`R>cigOl*vOSYYd( z85X&LmR9i#p2m(E3Szs&HkrEgX^8h+-Z{@w z-62xt$e8!|iYB9@MuVecRXD6%`P6Z{mftWKowjhAM*5^fI^rD}JykKy4m;IYtrG;f zy8a>Y=Vlliu~Z}i))?OzZSnldBdXf$Q7W1o-gAwN6D`lg3-!|U4eS@=-Ps4qzELEP zK5lUI=(zY*VXkAO>YOcKs~usht&2M{q09&vWXsr?8kJ;Al~y-3FTuh|y)1{mHDax) z)dy&Nm2*N2%~=2N*WgQ%wc>Jj7%gUNsmZ(}Pq5ndhAE!L8=3mH`npTw)sAcNB?r|E z)cE2CjZwM+l_QHk-K5}&56>|&+aX`A%}!TI<$5P^9`j2S;>1$*$@?BnEXdr@5w-9r z@AL3$M>pw}bxMK}^;)rREwlad&g0 z1IE7WTXbTfMb_G1w%mI+f&a?}{O_AANxG@(+8r7xr^}BdsAo@L+hjOQ=BUTivsSq1 zH7tzDjbY`h>sfamFv}PfHuN5!otO}?VsX6A`NY1BW%qP6liZG+)7dyR&^LOzx59tX zQX{)6&Kd7G%8td`$t>6 zdWI*69e!%jUh)1ZweeOqJ)bYIys#p}#BopF;Tanayx;DSTXwm0`)4sv8^4WDEIg$Z zC*~|&w#DzOt>vo~?HeB&Ovsq(7OH2;8y6B&lMv58AS&{6zCE4B_n~c^`E*6|Hw?Z` zC5FXn^uND`yJ)9Ney;2G+3z{CbUxBKR#~gLQ0?=ni)Spfss8z!UA_&DH6or)+J-!b zFx8d8QzvWhcGjRYcr6MGkozu(^P z*Zf+3!-H3GVYGM56qbeqf6S5-=j|LfT&W$fb$qia#;Cnyj)p}-_%qMWqV@G%tBTh8 zl}2U{(A?`&r$u<~i9+-f613sCtyetYh5* zKH9e&6FvO}`T86|yXv&*vvw?+Vj)OZ_t&><6Lp4tMPJ*W9PZAzzo8xIix%6436>@k zX8F$UpIzW^#$IZxFNj?stntq`t~P~I3BTNO6@NzFw>7zgsRDN!k6E_`3;8v(JZ$gV zKef%YliD7)NfE^J?(A58EKv~IV|`@9^2uVhL?>wUhSBRnKmTRZ#r~^i<$7vH+J~2A z965jIbL~5G+WhVnHAXy%_{!_OQq4i9Iizj>Y59BU&SBp{8 ztHKI@-TK>h_JN?I!P$X28tTUD{3d5|_EekRnW518tXQW;@<@Fq9zG$_(NMc#HTtr6 z&9X5upR!b;RMNm&EBt2N6um_gs*R2tjqbluYBee8#=5-Dv5_~d;u=FIOucD+R85Ut zbm7AtA9g-iW)W-2si9*8WV|w1HCZ`!5KkMpwrpQwgI-Pc$z5FA`?7H^(pf)52Y+_& z#I1|Vk558h>3YFs>tX}B-@?^@w+H{bnCgH0eKfw9w<>3Gfqx_Z3-6Y+{{g@L|D@mL Ie_=TOFVfIJ*Z=?k literal 0 HcmV?d00001 diff --git a/samples/rust/audio-transcription-example/src/main.rs b/samples/rust/audio-transcription-example/src/main.rs index 6f9b3e9e..c326006f 100644 --- a/samples/rust/audio-transcription-example/src/main.rs +++ b/samples/rust/audio-transcription-example/src/main.rs @@ -1,11 +1,14 @@ +// // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +// use std::env; use std::io::{self, Write}; use foundry_local_sdk::{FoundryLocalConfig, FoundryLocalManager}; use tokio_stream::StreamExt; +// const ALIAS: &str = "whisper-tiny"; @@ -14,16 +17,18 @@ async fn main() -> Result<(), Box> { println!("Audio Transcription Example"); println!("===========================\n"); - // Accept an audio file path as a CLI argument. - let audio_path = env::args().nth(1).unwrap_or_else(|| { - eprintln!("Usage: cargo run -- "); - std::process::exit(1); - }); + // Accept an optional audio file path as a CLI argument, defaulting to Recording.mp3. + let audio_path = env::args() + .nth(1) + .unwrap_or_else(|| "Recording.mp3".to_string()); // ── 1. Initialise the manager ──────────────────────────────────────── + // let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; + // - // ── 2. Pick the whisper model and ensure it is downloaded ──────────── + // ── 2. Pick the whispermodel and ensure it is downloaded ──────────── + // let model = manager.catalog().get_model(ALIAS).await?; println!("Model: {} (id: {})", model.alias(), model.id()); @@ -41,8 +46,10 @@ async fn main() -> Result<(), Box> { println!("Loading model..."); model.load().await?; println!("✓ Model loaded\n"); + // - // ── 3. Create an audio client ──────────────────────────────────────── + // + // ── 3. Create an audio client──────────────────────────────────────── let audio_client = model.create_audio_client(); // ── 4. Non-streaming transcription ─────────────────────────────────── @@ -60,11 +67,15 @@ async fn main() -> Result<(), Box> { io::stdout().flush().ok(); } println!("\n"); + // // ── 6. Unload the model────────────────────────────────────────────── + // println!("Unloading model..."); model.unload().await?; println!("Done."); + // Ok(()) } +// diff --git a/samples/rust/foundry-local-webserver/src/main.rs b/samples/rust/foundry-local-webserver/src/main.rs index d8cf0b44..492cbbc1 100644 --- a/samples/rust/foundry-local-webserver/src/main.rs +++ b/samples/rust/foundry-local-webserver/src/main.rs @@ -1,3 +1,4 @@ +// // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. @@ -8,20 +9,25 @@ //! when you want to use the OpenAI REST API directly or integrate with tools //! that expect an OpenAI-compatible endpoint. +// use std::io::{self, Write}; use serde_json::json; use foundry_local_sdk::{FoundryLocalConfig, FoundryLocalManager}; +// #[tokio::main] async fn main() -> Result<(), Box> { // ── 1. Initialise the SDK ──────────────────────────────────────────── + // println!("Initializing Foundry Local SDK..."); let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; println!("✓ SDK initialized"); + // // ── 2. Download and load a model ───────────────────────────────────── + // let model_alias = "qwen2.5-0.5b"; let model = manager.catalog().get_model(model_alias).await?; @@ -39,8 +45,10 @@ async fn main() -> Result<(), Box> { print!("Loading model {model_alias}..."); model.load().await?; println!("done."); + // - // ── 3. Start the web service ───────────────────────────────────────── + // + // ── 3. Start the web service───────────────────────────────────────── print!("Starting web service..."); manager.start_web_service().await?; println!("done."); @@ -90,6 +98,7 @@ async fn main() -> Result<(), Box> { } } println!(); + // // ── 5. Clean up ────────────────────────────────────────────────────── println!("\nStopping web service..."); @@ -101,3 +110,4 @@ async fn main() -> Result<(), Box> { println!("✓ Done."); Ok(()) } +// diff --git a/samples/rust/native-chat-completions/src/main.rs b/samples/rust/native-chat-completions/src/main.rs index 2e2d2d23..04d09372 100644 --- a/samples/rust/native-chat-completions/src/main.rs +++ b/samples/rust/native-chat-completions/src/main.rs @@ -1,6 +1,8 @@ +// // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +// use std::io::{self, Write}; use foundry_local_sdk::{ @@ -8,6 +10,7 @@ use foundry_local_sdk::{ ChatCompletionRequestUserMessage, FoundryLocalConfig, FoundryLocalManager, }; use tokio_stream::StreamExt; +// const ALIAS: &str = "qwen2.5-0.5b"; @@ -17,9 +20,12 @@ async fn main() -> Result<(), Box> { println!("=======================\n"); // ── 1. Initialise the manager ──────────────────────────────────────── + // let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; + // - // ── 2. Pick a model and ensure it is downloaded ────────────────────── + // ── 2. Pick a modeland ensure it is downloaded ────────────────────── + // let model = manager.catalog().get_model(ALIAS).await?; println!("Model: {} (id: {})", model.alias(), model.id()); @@ -37,13 +43,17 @@ async fn main() -> Result<(), Box> { println!("Loading model..."); model.load().await?; println!("✓ Model loaded\n"); + // - // ── 3. Create a chat client ────────────────────────────────────────── + // ── 3. Create a chat client────────────────────────────────────────── + // let client = model.create_chat_client() .temperature(0.7) .max_tokens(256); + // - // ── 4. Non-streaming chat completion ───────────────────────────────── + // ── 4. Non-streamingchat completion ───────────────────────────────── + // let messages: Vec = vec![ ChatCompletionRequestSystemMessage::from("You are a helpful assistant.").into(), ChatCompletionRequestUserMessage::from("What is Rust's ownership model?").into(), @@ -56,8 +66,10 @@ async fn main() -> Result<(), Box> { println!("Assistant: {content}"); } } + // - // ── 5. Streaming chat completion ───────────────────────────────────── + // ── 5. Streamingchat completion ───────────────────────────────────── + // let stream_messages: Vec = vec![ ChatCompletionRequestSystemMessage::from("You are a helpful assistant.").into(), ChatCompletionRequestUserMessage::from("Explain the borrow checker in two sentences.") @@ -79,11 +91,15 @@ async fn main() -> Result<(), Box> { } } println!("\n"); + // - // ── 6. Unload the model────────────────────────────────────────────── + // ── 6. Unloadthe model────────────────────────────────────────────── + // println!("Unloading model..."); model.unload().await?; println!("Done."); + // Ok(()) } +// diff --git a/samples/rust/tool-calling-foundry-local/src/main.rs b/samples/rust/tool-calling-foundry-local/src/main.rs index 9a144500..1ccda1e8 100644 --- a/samples/rust/tool-calling-foundry-local/src/main.rs +++ b/samples/rust/tool-calling-foundry-local/src/main.rs @@ -1,6 +1,8 @@ +// // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. +// use std::io::{self, Write}; use serde_json::{json, Value}; @@ -11,11 +13,13 @@ use foundry_local_sdk::{ ChatCompletionRequestToolMessage, ChatCompletionRequestUserMessage, ChatCompletionTools, ChatToolChoice, FinishReason, FoundryLocalConfig, FoundryLocalManager, }; +// // By using an alias, the most suitable model variant will be downloaded // to your end-user's device. const ALIAS: &str = "qwen2.5-0.5b"; +// /// A simple tool that multiplies two numbers. fn multiply_numbers(first: f64, second: f64) -> f64 { first * second @@ -33,6 +37,7 @@ fn invoke_tool(name: &str, args: &Value) -> String { _ => format!("Unknown tool: {name}"), } } +// /// Accumulated state from a streaming response that contains tool calls. #[derive(Default)] @@ -49,9 +54,12 @@ async fn main() -> Result<(), Box> { println!("===============================\n"); // ── 1. Initialise the manager ──────────────────────────────────────── + // let manager = FoundryLocalManager::create(FoundryLocalConfig::new("foundry_local_samples"))?; + // - // ── 2. Load a model ────────────────────────────────────────────────── + // ── 2. Load a model────────────────────────────────────────────────── + // let model = manager.catalog().get_model(ALIAS).await?; println!("Model: {} (id: {})", model.alias(), model.id()); @@ -69,12 +77,14 @@ async fn main() -> Result<(), Box> { println!("Loading model..."); model.load().await?; println!("✓ Model loaded\n"); + // - // ── 3. Create a chat client with tool_choice = required ────────────── + // ── 3. Create a chat clientwith tool_choice = required ────────────── let client = model.create_chat_client() .max_tokens(512) .tool_choice(ChatToolChoice::Required); + // // Define the multiply_numbers tool. let tools: Vec = serde_json::from_value(json!([{ "type": "function", @@ -97,7 +107,9 @@ async fn main() -> Result<(), Box> { } } }]))?; + // + // // Prepare the initial conversation. let mut messages: Vec = vec![ ChatCompletionRequestSystemMessage::from( @@ -210,11 +222,15 @@ async fn main() -> Result<(), Box> { } } println!("\n"); + // // ── 7. Clean up────────────────────────────────────────────────────── + // println!("Unloading model..."); model.unload().await?; println!("Done."); + // Ok(()) } +// diff --git a/samples/rust/tutorial-chat-assistant/Cargo.toml b/samples/rust/tutorial-chat-assistant/Cargo.toml new file mode 100644 index 00000000..83c7d237 --- /dev/null +++ b/samples/rust/tutorial-chat-assistant/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "tutorial-chat-assistant" +version = "0.1.0" +edition = "2021" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +anyhow = "1" +serde_json = "1" diff --git a/samples/rust/tutorial-chat-assistant/src/main.rs b/samples/rust/tutorial-chat-assistant/src/main.rs new file mode 100644 index 00000000..6b0b587b --- /dev/null +++ b/samples/rust/tutorial-chat-assistant/src/main.rs @@ -0,0 +1,102 @@ +// +// +use foundry_local_sdk::{ + ChatCompletionRequestMessage, + ChatCompletionRequestSystemMessage, ChatCompletionRequestUserMessage, + FoundryLocalConfig, FoundryLocalManager, +}; +use std::io::{self, BufRead, Write}; +use tokio_stream::StreamExt; +// + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // + // Initialize the Foundry Local SDK + let manager = FoundryLocalManager::create(FoundryLocalConfig::new("chat-assistant"))?; + + // Select and load a model from the catalog + let model = manager.catalog().get_model("qwen2.5-0.5b").await?; + + if !model.is_cached().await? { + println!("Downloading model..."); + model + .download(Some(|progress: &str| { + print!("\r {progress}"); + io::stdout().flush().ok(); + })) + .await?; + println!(); + } + + model.load().await?; + println!("Model loaded and ready."); + + // Create a chat client + let client = model.create_chat_client().temperature(0.7).max_tokens(512); + // + + // + // Start the conversation with a system prompt + let mut messages: Vec = vec![ + ChatCompletionRequestSystemMessage::from( + "You are a helpful, friendly assistant. Keep your responses \ + concise and conversational. If you don't know something, say so.", + ) + .into(), + ]; + // + + println!("\nChat assistant ready! Type 'quit' to exit.\n"); + + let stdin = io::stdin(); + // + loop { + print!("You: "); + io::stdout().flush()?; + + let mut input = String::new(); + stdin.lock().read_line(&mut input)?; + let input = input.trim(); + + if input.eq_ignore_ascii_case("quit") || input.eq_ignore_ascii_case("exit") { + break; + } + + // Add the user's message to conversation history + messages.push(ChatCompletionRequestUserMessage::from(input).into()); + + // + // Stream the response token by token + print!("Assistant: "); + io::stdout().flush()?; + let mut full_response = String::new(); + let mut stream = client.complete_streaming_chat(&messages, None).await?; + while let Some(chunk) = stream.next().await { + let chunk = chunk?; + if let Some(choice) = chunk.choices.first() { + if let Some(ref content) = choice.delta.content { + print!("{content}"); + io::stdout().flush()?; + full_response.push_str(content); + } + } + } + println!("\n"); + // + + // Add the complete response to conversation history + let assistant_msg: ChatCompletionRequestMessage = serde_json::from_value( + serde_json::json!({"role": "assistant", "content": full_response}), + )?; + messages.push(assistant_msg); + } + // + + // Clean up - unload the model + model.unload().await?; + println!("Model unloaded. Goodbye!"); + + Ok(()) +} +// diff --git a/samples/rust/tutorial-document-summarizer/Cargo.toml b/samples/rust/tutorial-document-summarizer/Cargo.toml new file mode 100644 index 00000000..cdf77fb7 --- /dev/null +++ b/samples/rust/tutorial-document-summarizer/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "tutorial-document-summarizer" +version = "0.1.0" +edition = "2021" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +anyhow = "1" diff --git a/samples/rust/tutorial-document-summarizer/src/main.rs b/samples/rust/tutorial-document-summarizer/src/main.rs new file mode 100644 index 00000000..9ade2e77 --- /dev/null +++ b/samples/rust/tutorial-document-summarizer/src/main.rs @@ -0,0 +1,157 @@ +// +// +use foundry_local_sdk::{ + ChatCompletionRequestMessage, + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, FoundryLocalConfig, + FoundryLocalManager, +}; +use std::io::{self, Write}; +use std::path::Path; +use std::{env, fs}; +// + +async fn summarize_file( + client: &foundry_local_sdk::openai::ChatClient, + file_path: &Path, + system_prompt: &str, +) -> anyhow::Result<()> { + let content = fs::read_to_string(file_path)?; + let messages: Vec = vec![ + ChatCompletionRequestSystemMessage::from(system_prompt) + .into(), + ChatCompletionRequestUserMessage::from(content.as_str()) + .into(), + ]; + + let response = + client.complete_chat(&messages, None).await?; + let summary = response.choices[0] + .message + .content + .as_deref() + .unwrap_or(""); + println!("{}", summary); + Ok(()) +} + +async fn summarize_directory( + client: &foundry_local_sdk::openai::ChatClient, + directory: &Path, + system_prompt: &str, +) -> anyhow::Result<()> { + let mut txt_files: Vec<_> = fs::read_dir(directory)? + .filter_map(|entry| entry.ok()) + .filter(|entry| { + entry + .path() + .extension() + .map(|ext| ext == "txt") + .unwrap_or(false) + }) + .collect(); + + txt_files.sort_by_key(|e| e.path()); + + if txt_files.is_empty() { + println!( + "No .txt files found in {}", + directory.display() + ); + return Ok(()); + } + + for entry in &txt_files { + let file_name = entry.file_name(); + println!( + "--- {} ---", + file_name.to_string_lossy() + ); + summarize_file( + client, + &entry.path(), + system_prompt, + ) + .await?; + println!(); + } + + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // + // Initialize the Foundry Local SDK + let manager = FoundryLocalManager::create( + FoundryLocalConfig::new("doc-summarizer"), + )?; + + // Select and load a model from the catalog + let model = manager + .catalog() + .get_model("qwen2.5-0.5b") + .await?; + + if !model.is_cached().await? { + println!("Downloading model..."); + model + .download(Some(|progress: &str| { + print!("\r {progress}"); + io::stdout().flush().ok(); + })) + .await?; + println!(); + } + + model.load().await?; + println!("Model loaded and ready.\n"); + + // Create a chat client + let client = model + .create_chat_client() + .temperature(0.7) + .max_tokens(512); + // + + // + let system_prompt = "Summarize the following document \ + into concise bullet points. Focus on the key \ + points and main ideas."; + + // + let target = env::args() + .nth(1) + .unwrap_or_else(|| "document.txt".to_string()); + let target_path = Path::new(&target); + // + + if target_path.is_dir() { + summarize_directory( + &client, + target_path, + system_prompt, + ) + .await?; + } else { + let file_name = target_path + .file_name() + .map(|n| n.to_string_lossy().to_string()) + .unwrap_or_else(|| target.clone()); + println!("--- {} ---", file_name); + summarize_file( + &client, + target_path, + system_prompt, + ) + .await?; + } + // + + // Clean up + model.unload().await?; + println!("\nModel unloaded. Done!"); + + Ok(()) +} +// diff --git a/samples/rust/tutorial-tool-calling/Cargo.toml b/samples/rust/tutorial-tool-calling/Cargo.toml new file mode 100644 index 00000000..2de3d740 --- /dev/null +++ b/samples/rust/tutorial-tool-calling/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "tutorial-tool-calling" +version = "0.1.0" +edition = "2021" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +anyhow = "1" +serde_json = "1" diff --git a/samples/rust/tutorial-tool-calling/src/main.rs b/samples/rust/tutorial-tool-calling/src/main.rs new file mode 100644 index 00000000..f4476643 --- /dev/null +++ b/samples/rust/tutorial-tool-calling/src/main.rs @@ -0,0 +1,330 @@ +// +// +use foundry_local_sdk::{ + ChatCompletionRequestMessage, + ChatCompletionRequestSystemMessage, + ChatCompletionRequestToolMessage, + ChatCompletionRequestUserMessage, + ChatCompletionMessageToolCalls, + ChatCompletionTools, ChatToolChoice, + FoundryLocalConfig, FoundryLocalManager, +}; +use serde_json::{json, Value}; +use std::io::{self, BufRead, Write}; +// + +// +// --- Tool implementations --- +fn execute_tool( + name: &str, + arguments: &Value, +) -> Value { + match name { + "get_weather" => { + let location = arguments["location"] + .as_str() + .unwrap_or("unknown"); + let unit = arguments["unit"] + .as_str() + .unwrap_or("celsius"); + let temp = if unit == "celsius" { 22 } else { 72 }; + json!({ + "location": location, + "temperature": temp, + "unit": unit, + "condition": "Sunny" + }) + } + "calculate" => { + let expression = arguments["expression"] + .as_str() + .unwrap_or(""); + let is_valid = expression + .chars() + .all(|c| "0123456789+-*/(). ".contains(c)); + if !is_valid { + return json!({"error": "Invalid expression"}); + } + match eval_expression(expression) { + Ok(result) => json!({ + "expression": expression, + "result": result + }), + Err(e) => json!({"error": e}), + } + } + _ => json!({"error": format!("Unknown function: {}", name)}), + } +} + +fn eval_expression(expr: &str) -> Result { + let expr = expr.replace(' ', ""); + let chars: Vec = expr.chars().collect(); + let mut pos = 0; + let result = parse_add(&chars, &mut pos)?; + if pos < chars.len() { + return Err("Unexpected character".to_string()); + } + Ok(result) +} + +fn parse_add( + chars: &[char], + pos: &mut usize, +) -> Result { + let mut result = parse_mul(chars, pos)?; + while *pos < chars.len() + && (chars[*pos] == '+' || chars[*pos] == '-') + { + let op = chars[*pos]; + *pos += 1; + let right = parse_mul(chars, pos)?; + result = if op == '+' { + result + right + } else { + result - right + }; + } + Ok(result) +} + +fn parse_mul( + chars: &[char], + pos: &mut usize, +) -> Result { + let mut result = parse_atom(chars, pos)?; + while *pos < chars.len() + && (chars[*pos] == '*' || chars[*pos] == '/') + { + let op = chars[*pos]; + *pos += 1; + let right = parse_atom(chars, pos)?; + result = if op == '*' { + result * right + } else { + result / right + }; + } + Ok(result) +} + +fn parse_atom( + chars: &[char], + pos: &mut usize, +) -> Result { + if *pos < chars.len() && chars[*pos] == '(' { + *pos += 1; + let result = parse_add(chars, pos)?; + if *pos < chars.len() && chars[*pos] == ')' { + *pos += 1; + } + return Ok(result); + } + let start = *pos; + while *pos < chars.len() + && (chars[*pos].is_ascii_digit() || chars[*pos] == '.') + { + *pos += 1; + } + if start == *pos { + return Err("Expected number".to_string()); + } + let num_str: String = chars[start..*pos].iter().collect(); + num_str.parse::().map_err(|e| e.to_string()) +} +// + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // + // --- Tool definitions --- + let tools: Vec = serde_json::from_value(json!([ + { + "type": "function", + "function": { + "name": "get_weather", + "description": + "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": + "The city or location" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"], + "description": "Temperature unit" + } + }, + "required": ["location"] + } + } + }, + { + "type": "function", + "function": { + "name": "calculate", + "description": "Perform a math calculation", + "parameters": { + "type": "object", + "properties": { + "expression": { + "type": "string", + "description": + "The math expression to evaluate" + } + }, + "required": ["expression"] + } + } + } + ]))?; + // + + // + // Initialize the Foundry Local SDK + let manager = FoundryLocalManager::create( + FoundryLocalConfig::new("tool-calling-app"), + )?; + + // Select and load a model + let model = manager + .catalog() + .get_model("qwen2.5-0.5b") + .await?; + + if !model.is_cached().await? { + println!("Downloading model..."); + model + .download(Some(|progress: &str| { + print!("\r {progress}"); + io::stdout().flush().ok(); + })) + .await?; + println!(); + } + + model.load().await?; + println!("Model loaded and ready."); + + // Create a chat client + let client = model + .create_chat_client() + .temperature(0.7) + .max_tokens(512) + .tool_choice(ChatToolChoice::Auto); + + // Conversation with a system prompt + let mut messages: Vec = vec![ + ChatCompletionRequestSystemMessage::from( + "You are a helpful assistant with access to tools. \ + Use them when needed to answer questions accurately.", + ) + .into(), + ]; + // + + // + println!( + "\nTool-calling assistant ready! Type 'quit' to exit.\n" + ); + + let stdin = io::stdin(); + loop { + print!("You: "); + io::stdout().flush()?; + + let mut input = String::new(); + stdin.lock().read_line(&mut input)?; + let input = input.trim(); + + if input.eq_ignore_ascii_case("quit") + || input.eq_ignore_ascii_case("exit") + { + break; + } + + messages.push( + ChatCompletionRequestUserMessage::from(input).into(), + ); + + let mut response = client + .complete_chat(&messages, Some(&tools)) + .await?; + + // Process tool calls in a loop + while response.choices[0].message.tool_calls.is_some() { + let tool_calls = response.choices[0] + .message + .tool_calls + .as_ref() + .unwrap(); + + // Append the assistant's tool_calls message via JSON + let assistant_msg: ChatCompletionRequestMessage = + serde_json::from_value(json!({ + "role": "assistant", + "content": null, + "tool_calls": tool_calls, + }))?; + messages.push(assistant_msg); + + for tc_enum in tool_calls { + let tool_call = match tc_enum { + ChatCompletionMessageToolCalls::Function( + tc, + ) => tc, + _ => continue, + }; + let function_name = + &tool_call.function.name; + let arguments: Value = + serde_json::from_str( + &tool_call.function.arguments, + )?; + println!( + " Tool call: {}({})", + function_name, arguments + ); + + let result = + execute_tool(function_name, &arguments); + messages.push( + ChatCompletionRequestToolMessage { + content: result.to_string().into(), + tool_call_id: tool_call.id.clone(), + } + .into(), + ); + } + + response = client + .complete_chat(&messages, Some(&tools)) + .await?; + } + + let answer = response.choices[0] + .message + .content + .as_deref() + .unwrap_or(""); + let assistant_msg: ChatCompletionRequestMessage = + serde_json::from_value(json!({ + "role": "assistant", + "content": answer, + }))?; + messages.push(assistant_msg); + println!("Assistant: {}\n", answer); + } + + // Clean up + model.unload().await?; + println!("Model unloaded. Goodbye!"); + // + + Ok(()) +} +// diff --git a/samples/rust/tutorial-voice-to-text/Cargo.toml b/samples/rust/tutorial-voice-to-text/Cargo.toml new file mode 100644 index 00000000..35ec4fc4 --- /dev/null +++ b/samples/rust/tutorial-voice-to-text/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "tutorial-voice-to-text" +version = "0.1.0" +edition = "2021" + +[dependencies] +foundry-local-sdk = { path = "../../../sdk/rust" } +tokio = { version = "1", features = ["full"] } +tokio-stream = "0.1" +anyhow = "1" diff --git a/samples/rust/tutorial-voice-to-text/src/main.rs b/samples/rust/tutorial-voice-to-text/src/main.rs new file mode 100644 index 00000000..2295c86a --- /dev/null +++ b/samples/rust/tutorial-voice-to-text/src/main.rs @@ -0,0 +1,110 @@ +// +// +use foundry_local_sdk::{ + ChatCompletionRequestMessage, + ChatCompletionRequestSystemMessage, + ChatCompletionRequestUserMessage, + FoundryLocalConfig, FoundryLocalManager, +}; +use std::io::{self, Write}; +// + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + // + // Initialize the Foundry Local SDK + let manager = FoundryLocalManager::create( + FoundryLocalConfig::new("note-taker"), + )?; + // + + // + // Load the speech-to-text model + let speech_model = manager + .catalog() + .get_model("whisper-tiny") + .await?; + + if !speech_model.is_cached().await? { + println!("Downloading speech model..."); + speech_model + .download(Some(|progress: &str| { + print!("\r {progress}"); + io::stdout().flush().ok(); + })) + .await?; + println!(); + } + + speech_model.load().await?; + println!("Speech model loaded."); + + // Transcribe the audio file + let audio_client = speech_model.create_audio_client(); + let transcription = audio_client + .transcribe("meeting-notes.wav") + .await?; + println!("\nTranscription:\n{}", transcription.text); + + // Unload the speech model to free memory + speech_model.unload().await?; + // + + // + // Load the chat model for summarization + let chat_model = manager + .catalog() + .get_model("qwen2.5-0.5b") + .await?; + + if !chat_model.is_cached().await? { + println!("Downloading chat model..."); + chat_model + .download(Some(|progress: &str| { + print!("\r {progress}"); + io::stdout().flush().ok(); + })) + .await?; + println!(); + } + + chat_model.load().await?; + println!("Chat model loaded."); + + // Summarize the transcription into organized notes + let client = chat_model + .create_chat_client() + .temperature(0.7) + .max_tokens(512); + + let messages: Vec = vec![ + ChatCompletionRequestSystemMessage::from( + "You are a note-taking assistant. Summarize \ + the following transcription into organized, \ + concise notes with bullet points.", + ) + .into(), + ChatCompletionRequestUserMessage::from( + transcription.text.as_str(), + ) + .into(), + ]; + + let response = client + .complete_chat(&messages, None) + .await?; + let summary = response.choices[0] + .message + .content + .as_deref() + .unwrap_or(""); + println!("\nSummary:\n{}", summary); + + // Clean up + chat_model.unload().await?; + println!("\nDone. Models unloaded."); + // + + Ok(()) +} +// From a74261095b2f71b58933188132f7e6a78f368aec Mon Sep 17 00:00:00 2001 From: Prathik Rao Date: Tue, 31 Mar 2026 15:45:32 -0700 Subject: [PATCH 14/83] foundry local packaging pipeline bug fix (#569) fixes issue with xml comments in c# sdk: ``` Foundry-Local\sdk\cs\src\ICatalog.cs(62,73): error CS1573: Parameter 'ct' has no matching param tag in the XML comment for 'ICatalog.GetLatestVersionAsync(IModel, CancellationToken?)' (but other parameters do) ``` --------- Co-authored-by: Prathik Rao --- sdk/cs/src/ICatalog.cs | 1 + 1 file changed, 1 insertion(+) diff --git a/sdk/cs/src/ICatalog.cs b/sdk/cs/src/ICatalog.cs index 85851a9c..b50f8c40 100644 --- a/sdk/cs/src/ICatalog.cs +++ b/sdk/cs/src/ICatalog.cs @@ -58,6 +58,7 @@ public interface ICatalog /// This is used to check if a newer version of a model is available in the catalog for download. /// /// The model to check for the latest version. + /// Optional CancellationToken. /// The latest version of the model. Will match the input if it is the latest version. Task GetLatestVersionAsync(IModel model, CancellationToken? ct = null); } From 80211df12e5d3c269645b8fe28b09c9b342c28a5 Mon Sep 17 00:00:00 2001 From: Rui Ren Date: Wed, 1 Apr 2026 08:40:29 -0700 Subject: [PATCH 15/83] Add live audio transcription streaming support to Foundry Local JS SDK (#486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Here's the corrected PR description with all names aligned to the actual code: --- Adds real-time audio streaming support to the Foundry Local JS SDK, enabling live microphone-to-text transcription via ONNX Runtime GenAI ASR. The existing AudioClient only supports file-based transcription. This PR introduces `LiveAudioTranscriptionSession` that accepts continuous PCM audio chunks (e.g., from a microphone) and returns partial/final transcription results as an async iterable. ## What's included ### New files - `src/openai/liveAudioTranscriptionClient.ts` — Streaming session with `start()`, `append()`, `getTranscriptionStream()`, `stop()`, `dispose()` - `src/openai/liveAudioTranscriptionTypes.ts` — `LiveAudioTranscriptionResponse` and `CoreErrorResponse` interfaces, `tryParseCoreError()` helper - `src/detail/coreInterop.ts` — Added `executeCommandWithBinary()` method and `StreamingRequestBuffer` struct for binary PCM data transport - app.js — E2E example with microphone capture (naudiodon2) and synthetic audio fallback - `test/openai/liveAudioTranscription.test.ts` — Unit tests for types/settings and E2E test with synthetic PCM audio ### Modified files - `src/imodel.ts` — Added `createLiveTranscriptionSession()` to interface - `src/model.ts` — Delegates to `selectedVariant.createLiveTranscriptionSession()` - `src/modelVariant.ts` — Implementation (creates new `LiveAudioTranscriptionSession(modelId, coreInterop)`) - `src/index.ts` — Exports `LiveAudioTranscriptionSession`, `LiveAudioTranscriptionOptions`, `LiveAudioTranscriptionResponse`, `TranscriptionContentPart` ## API surface ```js const session = model.createLiveTranscriptionSession(); session.settings.sampleRate = 16000; session.settings.channels = 1; session.settings.language = "en"; await session.start(); // Push audio from microphone callback await session.append(pcmBytes); // Read results as async iterable for await (const result of session.getTranscriptionStream()) { console.log(result.content[0].text); } await session.stop(); ``` ## Design highlights - **Internal async push queue** — Bounded `AsyncQueue` serializes audio pushes from any context (safe for mic callbacks) and provides backpressure via FIFO resolver queue. Mirrors C#'s `Channel` pattern. - **Binary data transport** — `executeCommandWithBinary()` sends PCM bytes alongside JSON params via `StreamingRequestBuffer`, with transcription results parsed from push responses. - **Settings freeze** — Audio format settings are snapshot-copied and `Object.freeze()`d at `start()`, immutable during the session - **Buffer copy** — `append()` copies the input `Uint8Array` before queueing, safe when caller reuses buffers - **Drain-on-stop** — `stop()` completes the push queue, waits for the push loop to drain, parses final transcription from stop response, then completes the output stream - **Error propagation** — `start()` failures are propagated to `outputQueue` so `getTranscriptionStream()` consumers see the error; `tryParseCoreError()` handles both raw JSON and CoreInterop-prefixed error messages - **Dispose safety** — `dispose()` wraps `stop()` in try/catch, never throws ## Native core dependency This PR adds the JS SDK surface. The 3 native commands (`audio_stream_start`, `audio_stream_push`, `audio_stream_stop`) are routed through `execute_command` and the new `execute_command_with_binary` exports. The code compiles with zero TypeScript errors without the native library. ## Testing - ✅ TypeScript compilation — 0 errors across all source files - ✅ Unit tests for `parseTranscriptionResult()`, `tryParseCoreError()`, `LiveAudioTranscriptionOptions` - ✅ E2E test with synthetic PCM audio (skips gracefully if native core unavailable) ## Parity with C# SDK This implementation mirrors the C# `LiveAudioTranscriptionSession` with identical logic: - Same session lifecycle: `start` → `append` → `getStream` → `stop` - Same push loop with error handling and binary data transport - Same settings freeze and buffer copy semantics - Same drain-before-stop ordering with final result parsing - Same E2E test pattern (synthetic 440Hz sine wave, 100ms chunks, ConversationItem-shaped response validation) - Same renamed types: `LiveAudioTranscription*` (matching C# rename) --- Changes from the original: | Old (incorrect) | New (matches code) | |---|---| | `LiveAudioTranscriptionClient` | `LiveAudioTranscriptionSession` | | `LiveAudioTranscriptionSettings` | `LiveAudioTranscriptionOptions` | | `LiveAudioTranscriptionResult` | `LiveAudioTranscriptionResponse` | | `createLiveTranscriptionClient()` | `createLiveTranscriptionSession()` | --------- Co-authored-by: ruiren_microsoft Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com> Co-authored-by: Kunal Vaishnavi --- .../README.md | 58 +++ .../live-audio-transcription-example/app.js | 157 +++++++ sdk/js/src/detail/coreInterop.ts | 59 +++ sdk/js/src/imodel.ts | 8 + sdk/js/src/index.ts | 2 + sdk/js/src/model.ts | 9 + sdk/js/src/modelVariant.ts | 9 + sdk/js/src/openai/audioClient.ts | 9 + .../openai/liveAudioTranscriptionClient.ts | 409 ++++++++++++++++++ .../src/openai/liveAudioTranscriptionTypes.ts | 95 ++++ .../openai/liveAudioTranscription.test.ts | 203 +++++++++ 11 files changed, 1018 insertions(+) create mode 100644 samples/js/live-audio-transcription-example/README.md create mode 100644 samples/js/live-audio-transcription-example/app.js create mode 100644 sdk/js/src/openai/liveAudioTranscriptionClient.ts create mode 100644 sdk/js/src/openai/liveAudioTranscriptionTypes.ts create mode 100644 sdk/js/test/openai/liveAudioTranscription.test.ts diff --git a/samples/js/live-audio-transcription-example/README.md b/samples/js/live-audio-transcription-example/README.md new file mode 100644 index 00000000..7c817d27 --- /dev/null +++ b/samples/js/live-audio-transcription-example/README.md @@ -0,0 +1,58 @@ +# Live Audio Transcription Example + +Real-time microphone-to-text transcription using the Foundry Local JS SDK with Nemotron ASR. + +## Prerequisites + +- [Foundry Local](https://github.com/microsoft/Foundry-Local) installed +- Node.js 18+ +- A microphone (optional — falls back to synthetic audio) + +## Setup + +```bash +npm install foundry-local-sdk naudiodon2 +``` + +> **Note:** `naudiodon2` is optional — provides cross-platform microphone capture. Without it, the example falls back to synthetic audio for testing. + +## Run + +```bash +node app.js +``` + +Speak into your microphone. Transcription appears in real-time. Press `Ctrl+C` to stop. + +## How it works + +1. Initializes the Foundry Local SDK and loads the Nemotron ASR model +2. Creates a `LiveAudioTranscriptionSession` with 16kHz/16-bit/mono PCM settings +3. Captures microphone audio via `naudiodon2` (or generates synthetic audio as fallback) +4. Pushes PCM chunks to the SDK via `session.append()` +5. Reads transcription results via `for await (const result of session.getTranscriptionStream())` +6. Access text via `result.content[0].text` (OpenAI Realtime ConversationItem pattern) + +## API + +```javascript +const audioClient = model.createAudioClient(); +const session = audioClient.createLiveTranscriptionSession(); +session.settings.sampleRate = 16000; +session.settings.channels = 1; +session.settings.language = 'en'; + +await session.start(); + +// Push audio +await session.append(pcmBytes); + +// Read results +for await (const result of session.getTranscriptionStream()) { + console.log(result.content[0].text); // transcribed text + console.log(result.content[0].transcript); // alias (OpenAI compat) + console.log(result.is_final); // true for final results +} + +await session.stop(); +``` diff --git a/samples/js/live-audio-transcription-example/app.js b/samples/js/live-audio-transcription-example/app.js new file mode 100644 index 00000000..794c3972 --- /dev/null +++ b/samples/js/live-audio-transcription-example/app.js @@ -0,0 +1,157 @@ +// Live Audio Transcription Example — Foundry Local JS SDK +// +// Demonstrates real-time microphone-to-text using the JS SDK. +// Requires: npm install foundry-local-sdk naudiodon2 +// +// Usage: node app.js + +import { FoundryLocalManager } from 'foundry-local-sdk'; + +console.log('╔══════════════════════════════════════════════════════════╗'); +console.log('║ Foundry Local — Live Audio Transcription (JS SDK) ║'); +console.log('╚══════════════════════════════════════════════════════════╝'); +console.log(); + +// Initialize the Foundry Local SDK +console.log('Initializing Foundry Local SDK...'); +const manager = FoundryLocalManager.create({ + appName: 'foundry_local_live_audio', + logLevel: 'info' +}); +console.log('✓ SDK initialized'); + +// Get and load the nemotron model +const modelAlias = 'nemotron'; +let model = await manager.catalog.getModel(modelAlias); +if (!model) { + console.error(`ERROR: Model "${modelAlias}" not found in catalog.`); + process.exit(1); +} + +console.log(`Found model: ${model.id}`); +console.log('Downloading model (if needed)...'); +await model.download((progress) => { + process.stdout.write(`\rDownloading... ${progress.toFixed(2)}%`); +}); +console.log('\n✓ Model downloaded'); + +console.log('Loading model...'); +await model.load(); +console.log('✓ Model loaded'); + +// Create live transcription session +const audioClient = model.createAudioClient(); +const session = audioClient.createLiveTranscriptionSession(); +session.settings.sampleRate = 16000; // Default is 16000; shown here for clarity +session.settings.channels = 1; +session.settings.bitsPerSample = 16; +session.settings.language = 'en'; + +console.log('Starting streaming session...'); +await session.start(); +console.log('✓ Session started'); + +// Read transcription results in background +const readPromise = (async () => { + try { + for await (const result of session.getTranscriptionStream()) { + const text = result.content?.[0]?.text; + if (result.is_final) { + console.log(); + console.log(` [FINAL] ${text}`); + } else if (text) { + process.stdout.write(text); + } + } + } catch (err) { + if (err.name !== 'AbortError') { + console.error('Stream error:', err.message); + } + } +})(); + +// --- Microphone capture --- +// This example uses naudiodon2 for cross-platform audio capture. +// Install with: npm install naudiodon2 +// +// If you prefer a different audio library, just push PCM bytes +// (16-bit signed LE, mono, 16kHz) via session.append(). + +let audioInput; +try { + const { default: portAudio } = await import('naudiodon2'); + + audioInput = portAudio.AudioIO({ + inOptions: { + channelCount: session.settings.channels, + sampleFormat: session.settings.bitsPerSample === 16 + ? portAudio.SampleFormat16Bit + : portAudio.SampleFormat32Bit, + sampleRate: session.settings.sampleRate, + framesPerBuffer: 1600, // 100ms chunks + maxQueue: 15 // buffer during event-loop blocks from sync FFI calls + } + }); + + let appendPending = false; + audioInput.on('data', (buffer) => { + if (appendPending) return; // drop frame while backpressured + const pcm = new Uint8Array(buffer); + appendPending = true; + session.append(pcm).then(() => { + appendPending = false; + }).catch((err) => { + appendPending = false; + console.error('append error:', err.message); + }); + }); + + console.log(); + console.log('════════════════════════════════════════════════════════════'); + console.log(' LIVE TRANSCRIPTION ACTIVE'); + console.log(' Speak into your microphone.'); + console.log(' Press Ctrl+C to stop.'); + console.log('════════════════════════════════════════════════════════════'); + console.log(); + + audioInput.start(); +} catch (err) { + console.warn('⚠ Could not initialize microphone (naudiodon2 may not be installed).'); + console.warn(' Install with: npm install naudiodon2'); + console.warn(' Falling back to synthetic audio test...'); + console.warn(); + + // Fallback: push 2 seconds of synthetic PCM (440Hz sine wave) + const sampleRate = session.settings.sampleRate; + const duration = 2; + const totalSamples = sampleRate * duration; + const pcmBytes = new Uint8Array(totalSamples * 2); + for (let i = 0; i < totalSamples; i++) { + const t = i / sampleRate; + const sample = Math.round(32767 * 0.5 * Math.sin(2 * Math.PI * 440 * t)); + pcmBytes[i * 2] = sample & 0xFF; + pcmBytes[i * 2 + 1] = (sample >> 8) & 0xFF; + } + + // Push in 100ms chunks + const chunkSize = (sampleRate / 10) * 2; + for (let offset = 0; offset < pcmBytes.length; offset += chunkSize) { + const len = Math.min(chunkSize, pcmBytes.length - offset); + await session.append(pcmBytes.slice(offset, offset + len)); + } + + console.log('✓ Synthetic audio pushed'); +} + +// Handle graceful shutdown +process.on('SIGINT', async () => { + console.log('\n\nStopping...'); + if (audioInput) { + audioInput.quit(); + } + await session.stop(); + await readPromise; + await model.unload(); + console.log('✓ Done'); + process.exit(0); +}); diff --git a/sdk/js/src/detail/coreInterop.ts b/sdk/js/src/detail/coreInterop.ts index 167784e7..3116faa9 100644 --- a/sdk/js/src/detail/coreInterop.ts +++ b/sdk/js/src/detail/coreInterop.ts @@ -19,6 +19,16 @@ koffi.struct('ResponseBuffer', { ErrorLength: 'int32_t', }); +// Extended request struct for binary data (audio streaming) +koffi.struct('StreamingRequestBuffer', { + Command: 'char*', + CommandLength: 'int32_t', + Data: 'char*', // JSON params + DataLength: 'int32_t', + BinaryData: 'void*', // raw PCM audio bytes + BinaryDataLength: 'int32_t', +}); + const CallbackType = koffi.proto('void CallbackType(void *data, int32_t length, void *userData)'); const __filename = fileURLToPath(import.meta.url); @@ -28,6 +38,7 @@ export class CoreInterop { private lib: any; private execute_command: any; private execute_command_with_callback: any; + private execute_command_with_binary: any = null; private static _getLibraryExtension(): string { const platform = process.platform; @@ -93,6 +104,7 @@ export class CoreInterop { this.execute_command = this.lib.func('void execute_command(RequestBuffer *request, _Inout_ ResponseBuffer *response)'); this.execute_command_with_callback = this.lib.func('void execute_command_with_callback(RequestBuffer *request, _Inout_ ResponseBuffer *response, CallbackType *callback, void *userData)'); + this.execute_command_with_binary = this.lib.func('void execute_command_with_binary(StreamingRequestBuffer *request, _Inout_ ResponseBuffer *response)'); } public executeCommand(command: string, params?: any): string { @@ -129,6 +141,53 @@ export class CoreInterop { } } + /** + * Execute a native command with binary data (e.g., audio PCM bytes). + * Uses the execute_command_with_binary native entry point which accepts + * both JSON params and raw binary data via StreamingRequestBuffer. + */ + public executeCommandWithBinary(command: string, params: any, binaryData: Uint8Array): string { + const cmdBuf = koffi.alloc('char', command.length + 1); + koffi.encode(cmdBuf, 'char', command, command.length + 1); + + const dataStr = params ? JSON.stringify(params) : ''; + const dataBytes = this._toBytes(dataStr); + const dataBuf = koffi.alloc('char', dataBytes.length + 1); + koffi.encode(dataBuf, 'char', dataStr, dataBytes.length + 1); + + // For binary data, use a Node.js Buffer which allocates stable external memory + // that won't be moved by V8's garbage collector during the FFI call. + const binLength = binaryData.length; + const binBuf = Buffer.from(binaryData); + + // Use koffi.as to pass Buffer directly as a typed pointer + const binTypedPtr = koffi.as(binBuf, 'void *'); + + const req = { + Command: koffi.address(cmdBuf), + CommandLength: command.length, + Data: koffi.address(dataBuf), + DataLength: dataBytes.length, + BinaryData: binTypedPtr, + BinaryDataLength: binLength + }; + const res = { Data: 0, DataLength: 0, Error: 0, ErrorLength: 0 }; + + this.execute_command_with_binary(req, res); + + try { + if (res.Error) { + const errorMsg = koffi.decode(res.Error, 'char', res.ErrorLength); + throw new Error(`Command '${command}' failed: ${errorMsg}`); + } + + return res.Data ? koffi.decode(res.Data, 'char', res.DataLength) : ""; + } finally { + if (res.Data) koffi.free(res.Data); + if (res.Error) koffi.free(res.Error); + } + } + public executeCommandStreaming(command: string, params: any, callback: (chunk: string) => void): Promise { const cmdBuf = koffi.alloc('char', command.length + 1); koffi.encode(cmdBuf, 'char', command, command.length + 1); diff --git a/sdk/js/src/imodel.ts b/sdk/js/src/imodel.ts index f5b72622..625afdec 100644 --- a/sdk/js/src/imodel.ts +++ b/sdk/js/src/imodel.ts @@ -1,5 +1,6 @@ import { ChatClient } from './openai/chatClient.js'; import { AudioClient } from './openai/audioClient.js'; +import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; import { ResponsesClient } from './openai/responsesClient.js'; export interface IModel { @@ -22,6 +23,13 @@ export interface IModel { createChatClient(): ChatClient; createAudioClient(): AudioClient; + + /** + * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. + * The model must be loaded before calling this method. + * @returns A LiveAudioTranscriptionSession instance. + */ + createLiveTranscriptionSession(): LiveAudioTranscriptionSession; /** * Creates a ResponsesClient for interacting with the model via the Responses API. * Unlike createChatClient/createAudioClient (which use FFI), the Responses API diff --git a/sdk/js/src/index.ts b/sdk/js/src/index.ts index 7d7ee17a..57d9fcf7 100644 --- a/sdk/js/src/index.ts +++ b/sdk/js/src/index.ts @@ -6,6 +6,8 @@ export { ModelVariant } from './modelVariant.js'; export type { IModel } from './imodel.js'; export { ChatClient, ChatClientSettings } from './openai/chatClient.js'; export { AudioClient, AudioClientSettings } from './openai/audioClient.js'; +export { LiveAudioTranscriptionSession, LiveAudioTranscriptionOptions } from './openai/liveAudioTranscriptionClient.js'; +export type { LiveAudioTranscriptionResponse, TranscriptionContentPart } from './openai/liveAudioTranscriptionTypes.js'; export { ResponsesClient, ResponsesClientSettings, getOutputText } from './openai/responsesClient.js'; export { ModelLoadManager } from './detail/modelLoadManager.js'; /** @internal */ diff --git a/sdk/js/src/model.ts b/sdk/js/src/model.ts index 155d5dd1..b4f60040 100644 --- a/sdk/js/src/model.ts +++ b/sdk/js/src/model.ts @@ -1,6 +1,7 @@ import { ModelVariant } from './modelVariant.js'; import { ChatClient } from './openai/chatClient.js'; import { AudioClient } from './openai/audioClient.js'; +import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; import { ResponsesClient } from './openai/responsesClient.js'; import { IModel } from './imodel.js'; @@ -179,6 +180,14 @@ export class Model implements IModel { return this.selectedVariant.createAudioClient(); } + /** + * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. + * @returns A LiveAudioTranscriptionSession instance. + */ + public createLiveTranscriptionSession(): LiveAudioTranscriptionSession { + return this.selectedVariant.createLiveTranscriptionSession(); + } + /** * Creates a ResponsesClient for interacting with the model via the Responses API. * @param baseUrl - The base URL of the Foundry Local web service. diff --git a/sdk/js/src/modelVariant.ts b/sdk/js/src/modelVariant.ts index db06033a..86c3d3f5 100644 --- a/sdk/js/src/modelVariant.ts +++ b/sdk/js/src/modelVariant.ts @@ -3,6 +3,7 @@ import { ModelLoadManager } from './detail/modelLoadManager.js'; import { ModelInfo } from './types.js'; import { ChatClient } from './openai/chatClient.js'; import { AudioClient } from './openai/audioClient.js'; +import { LiveAudioTranscriptionSession } from './openai/liveAudioTranscriptionClient.js'; import { ResponsesClient } from './openai/responsesClient.js'; import { IModel } from './imodel.js'; @@ -149,6 +150,14 @@ export class ModelVariant implements IModel { return new AudioClient(this._modelInfo.id, this.coreInterop); } + /** + * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. + * @returns A LiveAudioTranscriptionSession instance. + */ + public createLiveTranscriptionSession(): LiveAudioTranscriptionSession { + return new LiveAudioTranscriptionSession(this._modelInfo.id, this.coreInterop); + } + /** * Creates a ResponsesClient for interacting with the model via the Responses API. * @param baseUrl - The base URL of the Foundry Local web service. diff --git a/sdk/js/src/openai/audioClient.ts b/sdk/js/src/openai/audioClient.ts index 7b174924..0e6b1f37 100644 --- a/sdk/js/src/openai/audioClient.ts +++ b/sdk/js/src/openai/audioClient.ts @@ -1,4 +1,5 @@ import { CoreInterop } from '../detail/coreInterop.js'; +import { LiveAudioTranscriptionSession } from './liveAudioTranscriptionClient.js'; export class AudioClientSettings { language?: string; @@ -56,6 +57,14 @@ export class AudioClient { this.coreInterop = coreInterop; } + /** + * Creates a LiveAudioTranscriptionSession for real-time audio streaming ASR. + * @returns A LiveAudioTranscriptionSession instance. + */ + public createLiveTranscriptionSession(): LiveAudioTranscriptionSession { + return new LiveAudioTranscriptionSession(this.modelId, this.coreInterop); + } + /** * Validates that the audio file path is a non-empty string. * @internal diff --git a/sdk/js/src/openai/liveAudioTranscriptionClient.ts b/sdk/js/src/openai/liveAudioTranscriptionClient.ts new file mode 100644 index 00000000..b1115a25 --- /dev/null +++ b/sdk/js/src/openai/liveAudioTranscriptionClient.ts @@ -0,0 +1,409 @@ +import { CoreInterop } from '../detail/coreInterop.js'; +import { LiveAudioTranscriptionResponse, parseTranscriptionResult, tryParseCoreError } from './liveAudioTranscriptionTypes.js'; + +/** + * Audio format settings for a streaming session. + * Must be configured before calling start(). + * Settings are frozen once the session starts. + */ +export class LiveAudioTranscriptionOptions { + /** PCM sample rate in Hz. Default: 16000. */ + sampleRate: number = 16000; + /** Number of audio channels. Default: 1 (mono). */ + channels: number = 1; + /** Bits per sample. Default: 16. */ + bitsPerSample: number = 16; + /** Optional BCP-47 language hint (e.g., "en", "zh"). */ + language?: string; + /** Maximum number of audio chunks buffered in the internal push queue. Default: 100. */ + pushQueueCapacity: number = 100; + + /** @internal Create a frozen copy of these settings. */ + snapshot(): LiveAudioTranscriptionOptions { + const copy = new LiveAudioTranscriptionOptions(); + copy.sampleRate = this.sampleRate; + copy.channels = this.channels; + copy.bitsPerSample = this.bitsPerSample; + copy.language = this.language; + copy.pushQueueCapacity = this.pushQueueCapacity; + return Object.freeze(copy) as LiveAudioTranscriptionOptions; + } +} + +/** + * Internal async queue that acts like C#'s Channel. + * Supports a single consumer reading via async iteration and multiple producers writing. + * @internal + */ +class AsyncQueue { + private queue: T[] = []; + private waitingResolve: ((value: IteratorResult) => void) | null = null; + private completed = false; + private completionError: Error | null = null; + private maxCapacity: number; + private backpressureQueue: (() => void)[] = []; + + constructor(maxCapacity: number = Infinity) { + this.maxCapacity = maxCapacity; + } + + /** Push an item. If at capacity, waits until space is available. */ + async write(item: T): Promise { + if (this.completed) { + throw new Error('Cannot write to a completed queue.'); + } + + if (this.waitingResolve) { + const resolve = this.waitingResolve; + this.waitingResolve = null; + resolve({ value: item, done: false }); + return; + } + + while (this.queue.length >= this.maxCapacity) { + await new Promise((resolve) => { + this.backpressureQueue.push(resolve); + }); + } + + if (this.completed) { + throw new Error('Cannot write to a completed queue.'); + } + + this.queue.push(item); + } + + /** Push an item synchronously (no backpressure wait). Returns false if completed or at capacity. */ + tryWrite(item: T): boolean { + if (this.completed) return false; + + if (this.waitingResolve) { + const resolve = this.waitingResolve; + this.waitingResolve = null; + resolve({ value: item, done: false }); + return true; + } + + if (this.queue.length >= this.maxCapacity) { + return false; + } + + this.queue.push(item); + return true; + } + + /** Signal that no more items will be written. */ + complete(error?: Error): void { + if (this.completed) return; + this.completed = true; + this.completionError = error ?? null; + + // Release all blocked writers + for (const resolve of this.backpressureQueue) { + resolve(); + } + this.backpressureQueue = []; + + if (this.waitingResolve) { + const resolve = this.waitingResolve; + this.waitingResolve = null; + resolve({ value: undefined as any, done: true }); + } + } + + get error(): Error | null { + return this.completionError; + } + + /** Async iterator for consuming items. */ + async *[Symbol.asyncIterator](): AsyncGenerator { + while (true) { + if (this.backpressureQueue.length > 0 && this.queue.length < this.maxCapacity) { + const resolve = this.backpressureQueue.shift()!; + resolve(); + } + + if (this.queue.length > 0) { + yield this.queue.shift()!; + continue; + } + + if (this.completed) { + if (this.completionError) { + throw this.completionError; + } + return; + } + + const result = await new Promise>((resolve) => { + this.waitingResolve = resolve; + }); + + if (result.done) { + if (this.completionError) { + throw this.completionError; + } + return; + } + + yield result.value; + } + } +} + +/** + * Client for real-time audio streaming ASR (Automatic Speech Recognition). + * Audio data from a microphone (or other source) is pushed in as PCM chunks, + * and transcription results are returned as an async iterable. + * + * Mirrors the C# LiveAudioTranscriptionSession. + */ +export class LiveAudioTranscriptionSession { + private modelId: string; + private coreInterop: CoreInterop; + + private sessionHandle: string | null = null; + private started = false; + private stopped = false; + + private outputQueue: AsyncQueue | null = null; + private pushQueue: AsyncQueue | null = null; + private pushLoopPromise: Promise | null = null; + private activeSettings: LiveAudioTranscriptionOptions | null = null; + private sessionAbortController: AbortController | null = null; + private streamConsumed = false; + + /** + * Configuration settings for the streaming session. + * Must be configured before calling start(). Settings are snapshotted at start(); + * changes made after start() are ignored for the current session. + */ + public settings = new LiveAudioTranscriptionOptions(); + + /** + * @internal + * Users should create sessions via AudioClient.createLiveTranscriptionSession(). + */ + constructor(modelId: string, coreInterop: CoreInterop) { + this.modelId = modelId; + this.coreInterop = coreInterop; + } + + /** + * Start a real-time audio streaming session. + * Must be called before append() or getTranscriptionStream(). + * Settings are frozen after this call. + */ + public async start(): Promise { + if (this.started) { + throw new Error('Streaming session already started. Call stop() first.'); + } + + this.activeSettings = this.settings.snapshot(); + this.outputQueue = new AsyncQueue(); + this.pushQueue = new AsyncQueue(this.activeSettings.pushQueueCapacity); + this.streamConsumed = false; + + const params: Record = { + Model: this.modelId, + SampleRate: this.activeSettings.sampleRate.toString(), + Channels: this.activeSettings.channels.toString(), + BitsPerSample: this.activeSettings.bitsPerSample.toString(), + }; + + if (this.activeSettings.language) { + params['Language'] = this.activeSettings.language; + } + + try { + const response = this.coreInterop.executeCommand("audio_stream_start", { + Params: params + }); + + this.sessionHandle = response; + if (!this.sessionHandle) { + throw new Error('Native core did not return a session handle.'); + } + } catch (error) { + const err = new Error( + `Error starting audio stream session: ${error instanceof Error ? error.message : String(error)}`, + { cause: error } + ); + this.outputQueue.complete(err); + throw err; + } + + this.started = true; + this.stopped = false; + + this.sessionAbortController = new AbortController(); + this.pushLoopPromise = this.pushLoop(); + } + + /** + * Push a chunk of raw PCM audio data to the streaming session. + * Can be called from any context. Chunks are internally queued + * and serialized to native core one at a time. + * + * @param pcmData - Raw PCM audio bytes matching the configured format. + */ + public async append(pcmData: Uint8Array): Promise { + if (!this.started || this.stopped) { + throw new Error('No active streaming session. Call start() first.'); + } + + const copy = new Uint8Array(pcmData.length); + copy.set(pcmData); + + await this.pushQueue!.write(copy); + } + + /** + * Internal loop that drains the push queue and sends chunks to native core one at a time. + * Terminates the session on any native error. + * @internal + */ + private async pushLoop(): Promise { + try { + for await (const audioData of this.pushQueue!) { + if (this.sessionAbortController?.signal.aborted) { + break; + } + + try { + const responseData = this.coreInterop.executeCommandWithBinary("audio_stream_push", { + Params: { + SessionHandle: this.sessionHandle!, + } + }, audioData); + + // Parse transcription result from push response and surface it + if (responseData) { + try { + const result = parseTranscriptionResult(responseData); + const text = result.content?.[0]?.text; + if (text !== undefined && text !== null && text !== '') { + this.outputQueue?.tryWrite(result); + } + } catch { + // Non-fatal: log and continue if response isn't a transcription result + } + } + } catch (error) { + const errorMsg = error instanceof Error ? error.message : String(error); + const errorInfo = tryParseCoreError(errorMsg); + + const fatalError = new Error( + `Push failed (code=${errorInfo?.code ?? 'UNKNOWN'}): ${errorMsg}`, + { cause: error } + ); + this.stopped = true; + this.started = false; + this.pushQueue?.complete(fatalError); + this.outputQueue?.complete(fatalError); + return; + } + } + } catch (error) { + if (this.sessionAbortController?.signal.aborted) { + return; + } + const err = error instanceof Error ? error : new Error(String(error)); + this.outputQueue?.complete(new Error('Push loop terminated unexpectedly.', { cause: err })); + } + } + + /** + * Get the async iterable of transcription results. + * Results arrive as the native ASR engine processes audio data. + * + * Usage: + * ```ts + * for await (const result of client.getTranscriptionStream()) { + * console.log(result.content[0].text); + * } + * ``` + */ + public async *getTranscriptionStream(): AsyncGenerator { + if (!this.outputQueue) { + throw new Error('No active streaming session. Call start() first.'); + } + if (this.streamConsumed) { + throw new Error('getTranscriptionStream() can only be called once per session. The output stream has already been consumed.'); + } + this.streamConsumed = true; + + for await (const item of this.outputQueue) { + yield item; + } + } + + /** + * Signal end-of-audio and stop the streaming session. + * Any remaining buffered audio in the push queue will be drained to native core first. + * Final results are delivered through getTranscriptionStream() before it completes. + */ + public async stop(): Promise { + if (!this.started || this.stopped) { + return; + } + + this.stopped = true; + + this.pushQueue?.complete(); + + if (this.pushLoopPromise) { + await this.pushLoopPromise; + } + + this.sessionAbortController?.abort(); + + let stopError: Error | null = null; + try { + const responseData = this.coreInterop.executeCommand("audio_stream_stop", { + Params: { SessionHandle: this.sessionHandle! } + }); + + // Parse final transcription from stop response + if (responseData) { + try { + const finalResult = parseTranscriptionResult(responseData); + if (finalResult.content?.[0]?.text) { + this.outputQueue?.tryWrite(finalResult); + } + } catch { + // Non-fatal + } + } + } catch (error) { + stopError = error instanceof Error ? error : new Error(String(error)); + } + + this.sessionHandle = null; + this.started = false; + this.sessionAbortController = null; + + this.outputQueue?.complete(); + + if (stopError) { + throw new Error( + `Error stopping audio stream session: ${stopError.message}`, + { cause: stopError } + ); + } + } + + /** + * Dispose the client and stop any active session. + * Safe to call multiple times. + */ + public async dispose(): Promise { + try { + if (this.started && !this.stopped) { + await this.stop(); + } + } catch { + // Swallow errors during best-effort cleanup to keep dispose() silent. + } + } +} diff --git a/sdk/js/src/openai/liveAudioTranscriptionTypes.ts b/sdk/js/src/openai/liveAudioTranscriptionTypes.ts new file mode 100644 index 00000000..d7f07b5b --- /dev/null +++ b/sdk/js/src/openai/liveAudioTranscriptionTypes.ts @@ -0,0 +1,95 @@ +/** + * Types for real-time audio streaming transcription results and structured errors. + * Mirrors the C# LiveAudioTranscriptionResponse (extends ConversationItem) and CoreErrorResponse. + */ + +/** + * A content part within a transcription result. + * Follows the OpenAI Realtime API's ContentPart pattern. + */ +export interface TranscriptionContentPart { + /** The transcribed text. */ + text?: string | null; + /** Alias for text, matching the OpenAI Realtime API's ContentPart.transcript field. */ + transcript?: string | null; +} + +/** + * A transcription result from a real-time audio streaming session. + * Shaped like the OpenAI Realtime API's ConversationItem so that + * customers access text via result.content[0].text or result.content[0].transcript. + */ +export interface LiveAudioTranscriptionResponse { + /** Unique identifier for this result (if available). */ + id?: string | null; + /** Whether this is a partial (interim) or final result for this segment. */ + is_final: boolean; + /** The transcription content parts. Access text via content[0].text or content[0].transcript. */ + content: TranscriptionContentPart[]; + /** Start time offset of this segment in the audio stream (seconds). */ + start_time?: number | null; + /** End time offset of this segment in the audio stream (seconds). */ + end_time?: number | null; +} + +/** + * Parse raw Core JSON response into a LiveAudioTranscriptionResponse. + * Maps the flat Core format (text, is_final, start_time, end_time) into + * the ConversationItem-shaped result with content[0].text and content[0].transcript. + * @internal + */ +export function parseTranscriptionResult(json: string): LiveAudioTranscriptionResponse { + const raw = JSON.parse(json); + return { + id: raw.id ?? null, + is_final: raw.is_final ?? false, + start_time: raw.start_time ?? null, + end_time: raw.end_time ?? null, + content: [ + { + text: raw.text ?? '', + transcript: raw.text ?? '' + } + ] + }; +} + +/** + * Structured error response from native core audio streaming commands. + */ +export interface CoreErrorResponse { + /** Machine-readable error code. */ + code: string; + /** Human-readable error message. */ + message: string; + /** Whether this error is transient and may succeed on retry. */ + isTransient: boolean; +} + +/** + * Attempt to parse a native error string as a structured CoreErrorResponse. + * Handles both raw JSON and CoreInterop-prefixed messages + * (e.g., "Command 'X' failed: {...}"). + * Returns null if no valid CoreErrorResponse JSON is found. + * @internal + */ +export function tryParseCoreError(errorString: string): CoreErrorResponse | null { + // Try raw JSON first, then extract JSON after "failed: " prefix + const candidates = [errorString]; + const prefixIdx = errorString.indexOf('failed: '); + if (prefixIdx !== -1) { + candidates.push(errorString.substring(prefixIdx + 8)); + } + + for (const candidate of candidates) { + try { + const parsed = JSON.parse(candidate); + if (typeof parsed.code === 'string' && typeof parsed.message === 'string' && typeof parsed.isTransient === 'boolean') { + return parsed as CoreErrorResponse; + } + } catch { + // not valid JSON, try next candidate + } + } + return null; +} diff --git a/sdk/js/test/openai/liveAudioTranscription.test.ts b/sdk/js/test/openai/liveAudioTranscription.test.ts new file mode 100644 index 00000000..34edbac7 --- /dev/null +++ b/sdk/js/test/openai/liveAudioTranscription.test.ts @@ -0,0 +1,203 @@ +import { describe, it } from 'mocha'; +import { expect } from 'chai'; +import { parseTranscriptionResult, tryParseCoreError } from '../../src/openai/liveAudioTranscriptionTypes.js'; +import { LiveAudioTranscriptionOptions } from '../../src/openai/liveAudioTranscriptionClient.js'; +import { getTestManager } from '../testUtils.js'; + +describe('Live Audio Transcription Types', () => { + + describe('parseTranscriptionResult', () => { + it('should parse text and is_final', () => { + const json = '{"is_final":true,"text":"hello world","start_time":null,"end_time":null}'; + const result = parseTranscriptionResult(json); + + expect(result.content).to.be.an('array').with.length(1); + expect(result.content[0].text).to.equal('hello world'); + expect(result.content[0].transcript).to.equal('hello world'); + expect(result.is_final).to.be.true; + }); + + it('should map timing fields', () => { + const json = '{"is_final":false,"text":"partial","start_time":1.5,"end_time":3.0}'; + const result = parseTranscriptionResult(json); + + expect(result.content[0].text).to.equal('partial'); + expect(result.is_final).to.be.false; + expect(result.start_time).to.equal(1.5); + expect(result.end_time).to.equal(3.0); + }); + + it('should parse empty text successfully', () => { + const json = '{"is_final":true,"text":"","start_time":null,"end_time":null}'; + const result = parseTranscriptionResult(json); + + expect(result.content[0].text).to.equal(''); + expect(result.is_final).to.be.true; + }); + + it('should set both text and transcript to the same value', () => { + const json = '{"is_final":true,"text":"test","start_time":null,"end_time":null}'; + const result = parseTranscriptionResult(json); + + expect(result.content[0].text).to.equal('test'); + expect(result.content[0].transcript).to.equal('test'); + }); + + it('should handle only start_time', () => { + const json = '{"is_final":true,"text":"word","start_time":2.0,"end_time":null}'; + const result = parseTranscriptionResult(json); + + expect(result.start_time).to.equal(2.0); + expect(result.end_time).to.be.null; + expect(result.content[0].text).to.equal('word'); + }); + + it('should throw on invalid JSON', () => { + expect(() => parseTranscriptionResult('not valid json')).to.throw(); + }); + }); + + describe('tryParseCoreError', () => { + it('should parse valid error JSON', () => { + const json = '{"code":"ASR_SESSION_NOT_FOUND","message":"Session not found","isTransient":false}'; + const error = tryParseCoreError(json); + + expect(error).to.not.be.null; + expect(error!.code).to.equal('ASR_SESSION_NOT_FOUND'); + expect(error!.message).to.equal('Session not found'); + expect(error!.isTransient).to.be.false; + }); + + it('should return null for invalid JSON', () => { + const result = tryParseCoreError('not json'); + expect(result).to.be.null; + }); + + it('should parse transient error', () => { + const json = '{"code":"BUSY","message":"Model busy","isTransient":true}'; + const error = tryParseCoreError(json); + + expect(error).to.not.be.null; + expect(error!.isTransient).to.be.true; + }); + + it('should extract error JSON from CoreInterop-prefixed message', () => { + const prefixed = 'Command \'audio_stream_push\' failed: {"code":"ASR_SESSION_NOT_FOUND","message":"Session not found","isTransient":false}'; + const error = tryParseCoreError(prefixed); + + expect(error).to.not.be.null; + expect(error!.code).to.equal('ASR_SESSION_NOT_FOUND'); + expect(error!.message).to.equal('Session not found'); + expect(error!.isTransient).to.be.false; + }); + }); + + describe('LiveAudioTranscriptionOptions', () => { + it('should have correct default values', () => { + const settings = new LiveAudioTranscriptionOptions(); + + expect(settings.sampleRate).to.equal(16000); + expect(settings.channels).to.equal(1); + expect(settings.bitsPerSample).to.equal(16); + expect(settings.language).to.be.undefined; + expect(settings.pushQueueCapacity).to.equal(100); + }); + + it('should create a frozen snapshot', () => { + const settings = new LiveAudioTranscriptionOptions(); + settings.sampleRate = 44100; + settings.language = 'en'; + + const snapshot = settings.snapshot(); + + expect(snapshot.sampleRate).to.equal(44100); + expect(snapshot.language).to.equal('en'); + expect(() => { (snapshot as any).sampleRate = 8000; }).to.throw(); + }); + }); + + // --- E2E streaming test with synthetic PCM audio --- + + describe('E2E with synthetic PCM audio', () => { + const NEMOTRON_MODEL_ALIAS = 'nemotron'; + + it('should complete a full streaming session with synthetic audio', async function() { + this.timeout(60000); + + let manager; + try { + manager = getTestManager(); + } catch { + console.log(' (skipped: Core DLL not available)'); + return; + } + + const catalog = manager.catalog; + + // Skip if nemotron model is not cached + const cachedModels = await catalog.getCachedModels(); + const cachedVariant = cachedModels.find(m => m.alias === NEMOTRON_MODEL_ALIAS); + if (!cachedVariant) { + console.log(' (skipped: nemotron model not cached)'); + return; + } + + const model = await catalog.getModel(NEMOTRON_MODEL_ALIAS); + expect(model).to.not.be.undefined; + model!.selectVariant(cachedVariant); + await model!.load(); + + try { + const audioClient = model!.createAudioClient(); + const session = audioClient.createLiveTranscriptionSession(); + session.settings.sampleRate = 16000; + session.settings.channels = 1; + session.settings.bitsPerSample = 16; + session.settings.language = 'en'; + + await session.start(); + + // Collect results in background (must start before pushing audio) + const results: any[] = []; + const readPromise = (async () => { + for await (const result of session.getTranscriptionStream()) { + results.push(result); + } + })(); + + // Generate ~2 seconds of synthetic PCM audio (440Hz sine wave) + const sampleRate = session.settings.sampleRate; + const duration = 2; + const totalSamples = sampleRate * duration; + const pcmBytes = new Uint8Array(totalSamples * 2); + for (let i = 0; i < totalSamples; i++) { + const t = i / sampleRate; + const sample = Math.round(32767 * 0.5 * Math.sin(2 * Math.PI * 440 * t)); + pcmBytes[i * 2] = sample & 0xFF; + pcmBytes[i * 2 + 1] = (sample >> 8) & 0xFF; + } + + // Push audio in 100ms chunks + const chunkSize = (sampleRate / 10) * 2; + for (let offset = 0; offset < pcmBytes.length; offset += chunkSize) { + const len = Math.min(chunkSize, pcmBytes.length - offset); + await session.append(pcmBytes.slice(offset, offset + len)); + } + + // Stop session to flush remaining audio and complete the stream + await session.stop(); + await readPromise; + + // Verify response structure — synthetic audio may not produce text, + // but response objects should be properly shaped + for (const result of results) { + expect(result.content).to.be.an('array').with.length.greaterThan(0); + expect(result.content[0].text).to.be.a('string'); + expect(result.content[0].transcript).to.equal(result.content[0].text); + } + } finally { + await model!.unload(); + } + }); + }); +}); From 203546249a8887445eb45060658446b1b9baa9ec Mon Sep 17 00:00:00 2001 From: bmehta001 Date: Wed, 1 Apr 2026 12:45:04 -0500 Subject: [PATCH 16/83] Explicit EP Download & Per-EP Progress Reporting (#568) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Makes execution provider (EP) management explicit across all SDKs and adds real-time per-EP download progress reporting. Previously, EP downloads happened implicitly during catalog access with no granular progress visibility. Now callers explicitly discover, download, and monitor EPs with typed APIs and streaming progress callbacks. What's included Explicit EP discovery and download (all SDKs) - DiscoverEps() / discoverEps() / discover_eps() — returns typed EpInfo with name and registration status - DownloadAndRegisterEpsAsync() / downloadAndRegisterEps() / download_and_register_eps() — downloads and registers EPs, returns typed EpDownloadResult - Catalog access no longer blocks on EP downloads Per-EP progress callbacks (all SDKs) - C#: DownloadAndRegisterEpsAsync(names, Action progressCallback, ct) — uses ExecuteCommandWithCallbackAsync; parses wire format with CultureInfo.InvariantCulture for locale safety - JS: downloadAndRegisterEpsWithProgress(names?, progressCallback?) — uses executeCommandStreaming - Python: download_and_register_eps(names, progress_callback) — uses execute_command_with_callback - Rust: download_and_register_eps_with_progress(names, FnMut(&str, f64)) — parses "name|percent" wire format inside the SDK Live Audio Transcription (C#) - New LiveAudioTranscriptionSession with real-time streaming over WebSocket - Supports start/stop/send audio chunks with configurable output types - Unit tests with mocked CoreInterop Other improvements - Typed EpInfo / EpDownloadResult in dedicated type files across all SDKs - EP unit tests for JS and Python - Removed implicit 6-hour catalog TTL caching (delegated to native core) - New CoreInterop methods for callback-based command execution (C#) - AOT-compatible JSON serialization context for EP types (C#) Testing - New unit tests for EP discovery/download in JS and Python Breaking changes - Catalog no longer implicitly triggers EP downloads — callers must explicitly call DownloadAndRegisterEpsAsync / downloadAndRegisterEps / download_and_register_eps before accessing hardware-accelerated models. --------- Co-authored-by: Baiju Meswani Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../cs/audio-transcription-example/Program.cs | 3 +- .../cs/foundry-local-web-server/Program.cs | 2 +- .../Program.cs | 2 +- .../cs/model-management-example/Program.cs | 7 +- samples/cs/native-chat-completions/Program.cs | 36 +++- .../tool-calling-foundry-local-sdk/Program.cs | 2 +- .../Program.cs | 7 +- samples/js/native-chat-completions/app.js | 29 +++ sdk/cs/README.md | 49 ++++- sdk/cs/docs/api/index.md | 14 +- ...osoft.ai.foundry.local.epdownloadresult.md | 59 ++++++ .../api/microsoft.ai.foundry.local.epinfo.md | 35 ++++ ...ft.ai.foundry.local.foundrylocalmanager.md | 128 +++++++++++- .../microsoft.ai.foundry.local.icatalog.md | 54 +++-- .../api/microsoft.ai.foundry.local.imodel.md | 41 ++++ .../api/microsoft.ai.foundry.local.model.md | 49 ++--- .../microsoft.ai.foundry.local.modelinfo.md | 40 ++++ ...soft.ai.foundry.local.openaiaudioclient.md | 14 ++ ...osoft.ai.foundry.local.openaichatclient.md | 54 ++++- sdk/cs/src/Catalog.cs | 5 + sdk/cs/src/Detail/JsonSerializationContext.cs | 2 + sdk/cs/src/EpInfo.cs | 45 +++++ sdk/cs/src/FoundryLocalManager.cs | 188 ++++++++++++++++-- sdk/cs/src/ICatalog.cs | 2 +- sdk/cs/src/Microsoft.AI.Foundry.Local.csproj | 4 +- sdk/js/README.md | 41 ++++ sdk/js/docs/README.md | 64 ++++++ sdk/js/docs/classes/FoundryLocalManager.md | 92 ++++++++- sdk/js/examples/chat-completion.ts | 13 +- sdk/js/src/catalog.ts | 5 + sdk/js/src/detail/coreInterop.ts | 7 +- sdk/js/src/foundryLocalManager.ts | 134 +++++++++++-- sdk/js/src/types.ts | 24 +++ sdk/js/test/foundryLocalManager.test.ts | 62 ++++++ sdk/python/README.md | 44 +++- sdk/python/examples/chat_completion.py | 9 + sdk/python/requirements-winml.txt | 2 +- sdk/python/src/catalog.py | 6 +- sdk/python/src/ep_types.py | 24 +++ sdk/python/src/foundry_local_manager.py | 94 ++++++++- sdk/python/test/test_foundry_local_manager.py | 61 ++++++ sdk/rust/README.md | 50 +++++ sdk/rust/src/catalog.rs | 5 + sdk/rust/src/foundry_local_manager.rs | 96 ++++++++- sdk/rust/src/lib.rs | 4 +- sdk/rust/src/types.rs | 24 +++ www/src/routes/models/service.ts | 1 - 47 files changed, 1572 insertions(+), 161 deletions(-) create mode 100644 sdk/cs/docs/api/microsoft.ai.foundry.local.epdownloadresult.md create mode 100644 sdk/cs/docs/api/microsoft.ai.foundry.local.epinfo.md create mode 100644 sdk/cs/src/EpInfo.cs create mode 100644 sdk/python/src/ep_types.py diff --git a/samples/cs/audio-transcription-example/Program.cs b/samples/cs/audio-transcription-example/Program.cs index b78e13d2..ac5689c1 100644 --- a/samples/cs/audio-transcription-example/Program.cs +++ b/samples/cs/audio-transcription-example/Program.cs @@ -20,7 +20,7 @@ // EP packages include dependencies and may be large. // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); // @@ -56,6 +56,7 @@ await model.DownloadAsync(progress => // // Get an audio client var audioClient = await model.GetAudioClientAsync(); +audioClient.Settings.Language = "en"; // Get a transcription with streaming outputs var audioFile = args.Length > 0 ? args[0] : Path.Combine(AppContext.BaseDirectory, "Recording.mp3"); diff --git a/samples/cs/foundry-local-web-server/Program.cs b/samples/cs/foundry-local-web-server/Program.cs index 3ca68854..9225ad7d 100644 --- a/samples/cs/foundry-local-web-server/Program.cs +++ b/samples/cs/foundry-local-web-server/Program.cs @@ -26,7 +26,7 @@ // EP packages include dependencies and may be large. // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); // diff --git a/samples/cs/live-audio-transcription-example/Program.cs b/samples/cs/live-audio-transcription-example/Program.cs index 68bba83f..9b4e5921 100644 --- a/samples/cs/live-audio-transcription-example/Program.cs +++ b/samples/cs/live-audio-transcription-example/Program.cs @@ -20,7 +20,7 @@ await FoundryLocalManager.CreateAsync(config, Utils.GetAppLogger()); var mgr = FoundryLocalManager.Instance; -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +await mgr.DownloadAndRegisterEpsAsync(); var catalog = await mgr.GetCatalogAsync(); diff --git a/samples/cs/model-management-example/Program.cs b/samples/cs/model-management-example/Program.cs index 38dec588..a34d2737 100644 --- a/samples/cs/model-management-example/Program.cs +++ b/samples/cs/model-management-example/Program.cs @@ -16,11 +16,8 @@ var mgr = FoundryLocalManager.Instance; -// Ensure that any Execution Provider (EP) downloads run and are completed. -// EP packages include dependencies and may be large. -// Download is only required again if a new version of the EP is released. -// For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +// Download and register all execution providers. +await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); // Model catalog operations diff --git a/samples/cs/native-chat-completions/Program.cs b/samples/cs/native-chat-completions/Program.cs index 082a19f5..d1527503 100644 --- a/samples/cs/native-chat-completions/Program.cs +++ b/samples/cs/native-chat-completions/Program.cs @@ -19,11 +19,43 @@ var mgr = FoundryLocalManager.Instance; -// Ensure that any Execution Provider (EP) downloads run and are completed. +// Discover available execution providers and their registration status. +var eps = mgr.DiscoverEps(); +Console.WriteLine("Available execution providers:"); +foreach (var ep in eps) +{ + Console.WriteLine($" {ep.Name} (registered: {ep.IsRegistered})"); +} + +// Download and register all execution providers with per-EP progress. // EP packages include dependencies and may be large. // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +if (eps.Length > 0) +{ + int maxNameLen = eps.Max(e => e.Name.Length); + string currentEp = ""; + await mgr.DownloadAndRegisterEpsAsync((epName, percent) => + { + if (epName != currentEp) + { + if (currentEp != "") + { + Console.WriteLine(); + } + currentEp = epName; + } + Console.Write($"\r {epName.PadRight(maxNameLen)} {percent,6:F1}%"); + if (percent >= 100) + { + Console.WriteLine(); + } + }); +} +else +{ + Console.WriteLine("No execution providers to download."); +} // diff --git a/samples/cs/tool-calling-foundry-local-sdk/Program.cs b/samples/cs/tool-calling-foundry-local-sdk/Program.cs index bbb050c0..8ac96369 100644 --- a/samples/cs/tool-calling-foundry-local-sdk/Program.cs +++ b/samples/cs/tool-calling-foundry-local-sdk/Program.cs @@ -26,7 +26,7 @@ // EP packages include dependencies and may be large. // Download is only required again if a new version of the EP is released. // For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); // diff --git a/samples/cs/tool-calling-foundry-local-web-server/Program.cs b/samples/cs/tool-calling-foundry-local-web-server/Program.cs index 4c283cd4..48ee6c6f 100644 --- a/samples/cs/tool-calling-foundry-local-web-server/Program.cs +++ b/samples/cs/tool-calling-foundry-local-web-server/Program.cs @@ -21,11 +21,8 @@ var mgr = FoundryLocalManager.Instance; -// Ensure that any Execution Provider (EP) downloads run and are completed. -// EP packages include dependencies and may be large. -// Download is only required again if a new version of the EP is released. -// For cross platform builds there is no dynamic EP download and this will return immediately. -await Utils.RunWithSpinner("Registering execution providers", mgr.EnsureEpsDownloadedAsync()); +// Download and register all execution providers. +await Utils.RunWithSpinner("Registering execution providers", mgr.DownloadAndRegisterEpsAsync()); // Get the model catalog diff --git a/samples/js/native-chat-completions/app.js b/samples/js/native-chat-completions/app.js index 399fd634..4246f64f 100644 --- a/samples/js/native-chat-completions/app.js +++ b/samples/js/native-chat-completions/app.js @@ -14,6 +14,35 @@ const manager = FoundryLocalManager.create({ // console.log('✓ SDK initialized successfully'); +// Discover available execution providers and their registration status. +const eps = manager.discoverEps(); +console.log('\nAvailable execution providers:'); +for (const ep of eps) { + console.log(` ${ep.name} (registered: ${ep.isRegistered})`); +} + +// Download and register all execution providers with per-EP progress. +// EP packages include dependencies and may be large. +// Download is only required again if a new version of the EP is released. +if (eps.length > 0) { + const maxNameLen = Math.max(...eps.map(e => e.name.length)); + let currentEp = ''; + await manager.downloadAndRegisterEps((epName, percent) => { + if (epName !== currentEp) { + if (currentEp !== '') { + process.stdout.write('\n'); + } + currentEp = epName; + } + process.stdout.write(`\r ${epName.padEnd(maxNameLen)} ${percent.toFixed(1).padStart(5)}%`); + if (percent >= 100) { + process.stdout.write('\n'); + } + }); +} else { + console.log('No execution providers to download.'); +} + // // Get the model object const modelAlias = 'qwen2.5-0.5b'; // Using an available model from the list above diff --git a/sdk/cs/README.md b/sdk/cs/README.md index 7037814b..ad6f477a 100644 --- a/sdk/cs/README.md +++ b/sdk/cs/README.md @@ -48,7 +48,10 @@ dotnet build src/Microsoft.AI.Foundry.Local.csproj /p:UseWinML=true ### Triggering EP download -EP download can be time-consuming. Call `DownloadAndRegisterEpsAsync` early (after initialization) to separate the download step from catalog access: +EP management is explicit via two methods: + +- **`DiscoverEps()`** — returns an array of `EpInfo` describing each available EP and whether it is already registered. +- **`DownloadAndRegisterEpsAsync(names?, progressCallback?, ct?)`** — downloads and registers the specified EPs (or all available EPs if no names are given). Returns an `EpDownloadResult`. Overloads are provided so you can pass just a callback without specifying names. ```csharp // Initialize the manager first (see Quick Start) @@ -56,13 +59,49 @@ await FoundryLocalManager.CreateAsync( new Configuration { AppName = "my-app" }, NullLogger.Instance); -await FoundryLocalManager.Instance.DownloadAndRegisterEpsAsync(); +var mgr = FoundryLocalManager.Instance; -// Now catalog access won't trigger an EP download -var catalog = await FoundryLocalManager.Instance.GetCatalogAsync(); +// Discover what EPs are available +var eps = mgr.DiscoverEps(); +foreach (var ep in eps) +{ + Console.WriteLine($"{ep.Name} — registered: {ep.IsRegistered}"); +} + +// Download and register all EPs +var result = await mgr.DownloadAndRegisterEpsAsync(); +Console.WriteLine($"Success: {result.Success}, Status: {result.Status}"); + +// Or download only specific EPs +var result2 = await mgr.DownloadAndRegisterEpsAsync(new[] { eps[0].Name }); +``` + +#### Per-EP download progress + +Pass an optional `Action` callback to receive `(epName, percent)` updates +as each EP downloads (`percent` is 0–100): + +```csharp +string currentEp = ""; +await mgr.DownloadAndRegisterEpsAsync((epName, percent) => +{ + if (epName != currentEp) + { + if (currentEp != "") + { + Console.WriteLine(); + } + currentEp = epName; + } + Console.Write($"\r {epName} {percent,6:F1}%"); + if (percent >= 100) + { + Console.WriteLine(); + } +}); ``` -If you skip this step, EPs are downloaded automatically the first time you access the catalog. Once cached, subsequent calls are fast. +Catalog access no longer blocks on EP downloads. Call `DownloadAndRegisterEpsAsync` explicitly when you need hardware-accelerated execution providers. ## Quick Start diff --git a/sdk/cs/docs/api/index.md b/sdk/cs/docs/api/index.md index 1dcc4e4c..4d084f87 100644 --- a/sdk/cs/docs/api/index.md +++ b/sdk/cs/docs/api/index.md @@ -6,6 +6,10 @@ [DeviceType](./microsoft.ai.foundry.local.devicetype.md) +[EpDownloadResult](./microsoft.ai.foundry.local.epdownloadresult.md) + +[EpInfo](./microsoft.ai.foundry.local.epinfo.md) + [FoundryLocalException](./microsoft.ai.foundry.local.foundrylocalexception.md) [FoundryLocalManager](./microsoft.ai.foundry.local.foundrylocalmanager.md) @@ -22,8 +26,6 @@ [ModelSettings](./microsoft.ai.foundry.local.modelsettings.md) -[ModelVariant](./microsoft.ai.foundry.local.modelvariant.md) - [OpenAIAudioClient](./microsoft.ai.foundry.local.openaiaudioclient.md) [OpenAIChatClient](./microsoft.ai.foundry.local.openaichatclient.md) @@ -39,3 +41,11 @@ [AsyncLock](./microsoft.ai.foundry.local.detail.asynclock.md) [CoreInteropRequest](./microsoft.ai.foundry.local.detail.coreinteroprequest.md) + +## Microsoft.AI.Foundry.Local.OpenAI + +[LiveAudioTranscriptionResponse](./microsoft.ai.foundry.local.openai.liveaudiotranscriptionresponse.md) + +[LiveAudioTranscriptionSession](./microsoft.ai.foundry.local.openai.liveaudiotranscriptionsession.md) + +[ResponseFormatExtended](./microsoft.ai.foundry.local.openai.responseformatextended.md) diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.epdownloadresult.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.epdownloadresult.md new file mode 100644 index 00000000..c9ebeb82 --- /dev/null +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.epdownloadresult.md @@ -0,0 +1,59 @@ +# EpDownloadResult + +Namespace: Microsoft.AI.Foundry.Local + +Result of an explicit EP download and registration operation. + +```csharp +public record EpDownloadResult +``` + +## Properties + +### **Success** + +True if all requested EPs were successfully downloaded and registered. + +```csharp +public bool Success { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
+ +### **Status** + +Human-readable status message. + +```csharp +public string Status { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **RegisteredEps** + +Names of EPs that were successfully registered. + +```csharp +public String[] RegisteredEps { get; set; } +``` + +#### Property Value + +[String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **FailedEps** + +Names of EPs that failed to register. + +```csharp +public String[] FailedEps { get; set; } +``` + +#### Property Value + +[String[]](https://docs.microsoft.com/en-us/dotnet/api/system.string)
diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.epinfo.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.epinfo.md new file mode 100644 index 00000000..d2df44d3 --- /dev/null +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.epinfo.md @@ -0,0 +1,35 @@ +# EpInfo + +Namespace: Microsoft.AI.Foundry.Local + +Describes a discoverable execution provider bootstrapper. + +```csharp +public record EpInfo +``` + +## Properties + +### **Name** + +The identifier of the bootstrapper/execution provider (e.g. "CUDAExecutionProvider"). + +```csharp +public string Name { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **IsRegistered** + +True if this EP has already been successfully downloaded and registered. + +```csharp +public bool IsRegistered { get; set; } +``` + +#### Property Value + +[Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md index 9e5be8aa..5f1ba50e 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.foundrylocalmanager.md @@ -96,9 +96,9 @@ The model catalog. **Remarks:** -The catalog is populated on first use. - If you are using a WinML build this will trigger a one-off execution provider download if not already done. - It is recommended to call [FoundryLocalManager.DownloadAndRegisterEpsAsync(Nullable<CancellationToken>)](./microsoft.ai.foundry.local.foundrylocalmanager.md#downloadandregisterepsasyncnullablecancellationtoken) first to separate out the two steps. +The catalog is populated on first use and returns models based on currently available execution providers. + To ensure all hardware-accelerated models are listed, call [FoundryLocalManager.DownloadAndRegisterEpsAsync(Nullable<CancellationToken>)](./microsoft.ai.foundry.local.foundrylocalmanager.md#downloadandregisterepsasyncnullablecancellationtoken) first to + register execution providers, then access the catalog. ### **StartWebServiceAsync(Nullable<CancellationToken>)** @@ -141,17 +141,26 @@ Optional cancellation token. [Task](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task)
Task stopping the web service. +### **DiscoverEps()** + +Discovers all available execution provider bootstrappers. + Returns metadata about each EP including whether it is already registered. + +```csharp +public EpInfo[] DiscoverEps() +``` + +#### Returns + +[EpInfo[]](./microsoft.ai.foundry.local.epinfo.md)
+Array of EP bootstrapper info describing available EPs. + ### **DownloadAndRegisterEpsAsync(Nullable<CancellationToken>)** -Download and register execution providers. - Only relevant when using WinML. - - Execution provider download can be time consuming due to the size of the packages. - Once downloaded, EPs are not re-downloaded unless a new version is available, so this method will be fast - on subsequent calls. +Downloads and registers all available execution providers. ```csharp -public Task DownloadAndRegisterEpsAsync(Nullable ct) +public Task DownloadAndRegisterEpsAsync(Nullable ct) ``` #### Parameters @@ -161,7 +170,104 @@ Optional cancellation token. #### Returns -[Task](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task)
+[Task<EpDownloadResult>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Result describing which EPs succeeded and which failed. + +**Remarks:** + +Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + +### **DownloadAndRegisterEpsAsync(IEnumerable<String>, Nullable<CancellationToken>)** + +Downloads and registers the specified execution providers. + +```csharp +public Task DownloadAndRegisterEpsAsync(IEnumerable names, Nullable ct) +``` + +#### Parameters + +`names` [IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+Subset of EP bootstrapper names to download (as returned by [FoundryLocalManager.DiscoverEps()](./microsoft.ai.foundry.local.foundrylocalmanager.md#discovereps)). + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<EpDownloadResult>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Result describing which EPs succeeded and which failed. + +**Remarks:** + +Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + +### **DownloadAndRegisterEpsAsync(Action<String, Double>, Nullable<CancellationToken>)** + +Downloads and registers all available execution providers, reporting progress. + +```csharp +public Task DownloadAndRegisterEpsAsync(Action progressCallback, Nullable ct) +``` + +#### Parameters + +`progressCallback` [Action<String, Double>](https://docs.microsoft.com/en-us/dotnet/api/system.action-2)
+Callback invoked as each EP downloads. Parameters are (epName, percentComplete) where percentComplete is 0-100. + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<EpDownloadResult>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Result describing which EPs succeeded and which failed. + +**Remarks:** + +Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + +### **DownloadAndRegisterEpsAsync(IEnumerable<String>, Action<String, Double>, Nullable<CancellationToken>)** + +Downloads and registers the specified execution providers, reporting progress. + +```csharp +public Task DownloadAndRegisterEpsAsync(IEnumerable names, Action progressCallback, Nullable ct) +``` + +#### Parameters + +`names` [IEnumerable<String>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+Subset of EP bootstrapper names to download (as returned by [FoundryLocalManager.DiscoverEps()](./microsoft.ai.foundry.local.foundrylocalmanager.md#discovereps)). + +`progressCallback` [Action<String, Double>](https://docs.microsoft.com/en-us/dotnet/api/system.action-2)
+Callback invoked as each EP downloads. Parameters are (epName, percentComplete) where percentComplete is 0-100. + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<EpDownloadResult>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Result describing which EPs succeeded and which failed. + +**Remarks:** + +Catalog and model requests use whatever EPs are currently registered and do not block on EP downloads. + After downloading new EPs, re-fetch the model catalog to include models requiring the newly registered EPs. + +### **Dispose(Boolean)** + +```csharp +protected void Dispose(bool disposing) +``` + +#### Parameters + +`disposing` [Boolean](https://docs.microsoft.com/en-us/dotnet/api/system.boolean)
### **Dispose()** diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.icatalog.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.icatalog.md index dc68c173..6a3858b2 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.icatalog.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.icatalog.md @@ -29,7 +29,7 @@ public abstract string Name { get; } List the available models in the catalog. ```csharp -Task> ListModelsAsync(Nullable ct) +Task> ListModelsAsync(Nullable ct) ``` #### Parameters @@ -39,15 +39,15 @@ Optional CancellationToken. #### Returns -[Task<List<Model>>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
-List of Model instances. +[Task<List<IModel>>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+List of IModel instances. ### **GetModelAsync(String, Nullable<CancellationToken>)** Lookup a model by its alias. ```csharp -Task GetModelAsync(string modelAlias, Nullable ct) +Task GetModelAsync(string modelAlias, Nullable ct) ``` #### Parameters @@ -60,15 +60,17 @@ Optional CancellationToken. #### Returns -[Task<Model>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
-The matching Model, or null if no model with the given alias exists. +[Task<IModel>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+The matching IModel, or null if no model with the given alias exists. ### **GetModelVariantAsync(String, Nullable<CancellationToken>)** Lookup a model variant by its unique model id. + NOTE: This will return an IModel with a single variant. Use GetModelAsync to get an IModel with all available + variants. ```csharp -Task GetModelVariantAsync(string modelId, Nullable ct) +Task GetModelVariantAsync(string modelId, Nullable ct) ``` #### Parameters @@ -81,15 +83,15 @@ Optional CancellationToken. #### Returns -[Task<ModelVariant>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
-The matching ModelVariant, or null if no variant with the given id exists. +[Task<IModel>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+The matching IModel, or null if no variant with the given id exists. ### **GetCachedModelsAsync(Nullable<CancellationToken>)** Get a list of currently downloaded models from the model cache. ```csharp -Task> GetCachedModelsAsync(Nullable ct) +Task> GetCachedModelsAsync(Nullable ct) ``` #### Parameters @@ -99,15 +101,15 @@ Optional CancellationToken. #### Returns -[Task<List<ModelVariant>>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
-List of ModelVariant instances. +[Task<List<IModel>>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+List of IModel instances. ### **GetLoadedModelsAsync(Nullable<CancellationToken>)** Get a list of the currently loaded models. ```csharp -Task> GetLoadedModelsAsync(Nullable ct) +Task> GetLoadedModelsAsync(Nullable ct) ``` #### Parameters @@ -117,5 +119,27 @@ Optional CancellationToken. #### Returns -[Task<List<ModelVariant>>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
-List of ModelVariant instances. +[Task<List<IModel>>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+List of IModel instances. + +### **GetLatestVersionAsync(IModel, Nullable<CancellationToken>)** + +Get the latest version of a model. + This is used to check if a newer version of a model is available in the catalog for download. + +```csharp +Task GetLatestVersionAsync(IModel model, Nullable ct) +``` + +#### Parameters + +`model` [IModel](./microsoft.ai.foundry.local.imodel.md)
+The model to check for the latest version. + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional CancellationToken. + +#### Returns + +[Task<IModel>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+The latest version of the model. Will match the input if it is the latest version. diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md index d5d2b437..861386a8 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.imodel.md @@ -30,6 +30,28 @@ public abstract string Alias { get; } [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+### **Info** + +```csharp +public abstract ModelInfo Info { get; } +``` + +#### Property Value + +[ModelInfo](./microsoft.ai.foundry.local.modelinfo.md)
+ +### **Variants** + +Variants of the model that are available. Variants of the model are optimized for different devices. + +```csharp +public abstract IReadOnlyList Variants { get; } +``` + +#### Property Value + +[IReadOnlyList<IModel>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ireadonlylist-1)
+ ## Methods ### **IsCachedAsync(Nullable<CancellationToken>)** @@ -185,3 +207,22 @@ Optional cancellation token. [Task<OpenAIAudioClient>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
OpenAI.AudioClient + +### **SelectVariant(IModel)** + +Select a model variant from [IModel.Variants](./microsoft.ai.foundry.local.imodel.md#variants) to use for [IModel](./microsoft.ai.foundry.local.imodel.md) operations. + An IModel from `Variants` can also be used directly. + +```csharp +void SelectVariant(IModel variant) +``` + +#### Parameters + +`variant` [IModel](./microsoft.ai.foundry.local.imodel.md)
+Model variant to select. Must be one of the variants in [IModel.Variants](./microsoft.ai.foundry.local.imodel.md#variants). + +#### Exceptions + +[FoundryLocalException](./microsoft.ai.foundry.local.foundrylocalexception.md)
+If variant is not valid for this model. diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md index c63b78a4..23cd67a3 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.model.md @@ -15,42 +15,42 @@ Attributes [NullableContextAttribute](https://docs.microsoft.com/en-us/dotnet/ap ### **Variants** ```csharp -public List Variants { get; internal set; } +public IReadOnlyList Variants { get; } ``` #### Property Value -[List<ModelVariant>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.list-1)
+[IReadOnlyList<IModel>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ireadonlylist-1)
-### **SelectedVariant** +### **Alias** ```csharp -public ModelVariant SelectedVariant { get; internal set; } +public string Alias { get; set; } ``` #### Property Value -[ModelVariant](./microsoft.ai.foundry.local.modelvariant.md)
+[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
-### **Alias** +### **Id** ```csharp -public string Alias { get; set; } +public string Id { get; } ``` #### Property Value [String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
-### **Id** +### **Info** ```csharp -public string Id { get; } +public ModelInfo Info { get; } ``` #### Property Value -[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+[ModelInfo](./microsoft.ai.foundry.local.modelinfo.md)
## Methods @@ -86,17 +86,17 @@ public Task IsLoadedAsync(Nullable ct) [Task<Boolean>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
-### **SelectVariant(ModelVariant)** +### **SelectVariant(IModel)** Select a specific model variant from [Model.Variants](./microsoft.ai.foundry.local.model.md#variants) to use for [IModel](./microsoft.ai.foundry.local.imodel.md) operations. ```csharp -public void SelectVariant(ModelVariant variant) +public void SelectVariant(IModel variant) ``` #### Parameters -`variant` [ModelVariant](./microsoft.ai.foundry.local.modelvariant.md)
+`variant` [IModel](./microsoft.ai.foundry.local.imodel.md)
Model variant to select. Must be one of the variants in [Model.Variants](./microsoft.ai.foundry.local.model.md#variants). #### Exceptions @@ -104,29 +104,6 @@ Model variant to select. Must be one of the variants in [Model.Variants](./micro [FoundryLocalException](./microsoft.ai.foundry.local.foundrylocalexception.md)
If variant is not valid for this model. -### **GetLatestVersion(ModelVariant)** - -Get the latest version of the specified model variant. - -```csharp -public ModelVariant GetLatestVersion(ModelVariant variant) -``` - -#### Parameters - -`variant` [ModelVariant](./microsoft.ai.foundry.local.modelvariant.md)
-Model variant. - -#### Returns - -[ModelVariant](./microsoft.ai.foundry.local.modelvariant.md)
-ModelVariant for latest version. Same as `variant` if that is the latest version. - -#### Exceptions - -[FoundryLocalException](./microsoft.ai.foundry.local.foundrylocalexception.md)
-If variant is not valid for this model. - ### **GetPathAsync(Nullable<CancellationToken>)** ```csharp diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.modelinfo.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.modelinfo.md index 750253c1..1716e3b2 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.modelinfo.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.modelinfo.md @@ -222,6 +222,46 @@ public long CreatedAtUnix { get; set; } [Int64](https://docs.microsoft.com/en-us/dotnet/api/system.int64)
+### **ContextLength** + +```csharp +public Nullable ContextLength { get; set; } +``` + +#### Property Value + +[Nullable<Int64>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+ +### **InputModalities** + +```csharp +public string InputModalities { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **OutputModalities** + +```csharp +public string OutputModalities { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ +### **Capabilities** + +```csharp +public string Capabilities { get; set; } +``` + +#### Property Value + +[String](https://docs.microsoft.com/en-us/dotnet/api/system.string)
+ ## Constructors ### **ModelInfo()** diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiaudioclient.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiaudioclient.md index bcaefc04..b1b60bd8 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiaudioclient.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.openaiaudioclient.md @@ -71,3 +71,17 @@ Cancellation token. [IAsyncEnumerable<AudioCreateTranscriptionResponse>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
An asynchronous enumerable of transcription responses. + +### **CreateLiveTranscriptionSession()** + +Create a real-time streaming transcription session. + Audio data is pushed in as PCM chunks and transcription results are returned as an async stream. + +```csharp +public LiveAudioTranscriptionSession CreateLiveTranscriptionSession() +``` + +#### Returns + +[LiveAudioTranscriptionSession](./microsoft.ai.foundry.local.openai.liveaudiotranscriptionsession.md)
+A streaming session that must be disposed when done. diff --git a/sdk/cs/docs/api/microsoft.ai.foundry.local.openaichatclient.md b/sdk/cs/docs/api/microsoft.ai.foundry.local.openaichatclient.md index 251e474c..43e00f6d 100644 --- a/sdk/cs/docs/api/microsoft.ai.foundry.local.openaichatclient.md +++ b/sdk/cs/docs/api/microsoft.ai.foundry.local.openaichatclient.md @@ -51,6 +51,32 @@ Optional cancellation token. [Task<ChatCompletionCreateResponse>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
Chat completion response. +### **CompleteChatAsync(IEnumerable<ChatMessage>, IEnumerable<ToolDefinition>, Nullable<CancellationToken>)** + +Execute a chat completion request. + + To continue a conversation, add the ChatMessage from the previous response and new prompt to the messages. + +```csharp +public Task CompleteChatAsync(IEnumerable messages, IEnumerable tools, Nullable ct) +``` + +#### Parameters + +`messages` [IEnumerable<ChatMessage>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+Chat messages. The system message is automatically added. + +`tools` [IEnumerable<ToolDefinition>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+Optional tool definitions to include in the request. + +`ct` [Nullable<CancellationToken>](https://docs.microsoft.com/en-us/dotnet/api/system.nullable-1)
+Optional cancellation token. + +#### Returns + +[Task<ChatCompletionCreateResponse>](https://docs.microsoft.com/en-us/dotnet/api/system.threading.tasks.task-1)
+Chat completion response. + ### **CompleteChatStreamingAsync(IEnumerable<ChatMessage>, CancellationToken)** Execute a chat completion request with streamed output. @@ -67,7 +93,33 @@ public IAsyncEnumerable CompleteChatStreamingAsync Chat messages. The system message is automatically added. `ct` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
-Optional cancellation token. +Cancellation token. + +#### Returns + +[IAsyncEnumerable<ChatCompletionCreateResponse>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.iasyncenumerable-1)
+Async enumerable of chat completion responses. + +### **CompleteChatStreamingAsync(IEnumerable<ChatMessage>, IEnumerable<ToolDefinition>, CancellationToken)** + +Execute a chat completion request with streamed output. + + To continue a conversation, add the ChatMessage from the previous response and new prompt to the messages. + +```csharp +public IAsyncEnumerable CompleteChatStreamingAsync(IEnumerable messages, IEnumerable tools, CancellationToken ct) +``` + +#### Parameters + +`messages` [IEnumerable<ChatMessage>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+Chat messages. The system message is automatically added. + +`tools` [IEnumerable<ToolDefinition>](https://docs.microsoft.com/en-us/dotnet/api/system.collections.generic.ienumerable-1)
+Optional tool definitions to include in the request. + +`ct` [CancellationToken](https://docs.microsoft.com/en-us/dotnet/api/system.threading.cancellationtoken)
+Cancellation token. #### Returns diff --git a/sdk/cs/src/Catalog.cs b/sdk/cs/src/Catalog.cs index 5cdb050f..f33dcaff 100644 --- a/sdk/cs/src/Catalog.cs +++ b/sdk/cs/src/Catalog.cs @@ -240,6 +240,11 @@ private async Task UpdateModels(CancellationToken? ct) _lastFetch = DateTime.Now; } + internal void InvalidateCache() + { + _lastFetch = DateTime.MinValue; + } + public void Dispose() { _lock.Dispose(); diff --git a/sdk/cs/src/Detail/JsonSerializationContext.cs b/sdk/cs/src/Detail/JsonSerializationContext.cs index 3fefd305..37cc81ac 100644 --- a/sdk/cs/src/Detail/JsonSerializationContext.cs +++ b/sdk/cs/src/Detail/JsonSerializationContext.cs @@ -24,6 +24,8 @@ namespace Microsoft.AI.Foundry.Local.Detail; [JsonSerializable(typeof(AudioCreateTranscriptionRequest))] [JsonSerializable(typeof(AudioCreateTranscriptionResponse))] [JsonSerializable(typeof(string[]))] // list loaded or cached models +[JsonSerializable(typeof(EpInfo[]))] +[JsonSerializable(typeof(EpDownloadResult))] [JsonSerializable(typeof(JsonElement))] [JsonSerializable(typeof(ResponseFormatExtended))] [JsonSerializable(typeof(ToolChoice))] diff --git a/sdk/cs/src/EpInfo.cs b/sdk/cs/src/EpInfo.cs new file mode 100644 index 00000000..d170ac0e --- /dev/null +++ b/sdk/cs/src/EpInfo.cs @@ -0,0 +1,45 @@ +// -------------------------------------------------------------------------------------------------------------------- +// +// Copyright (c) Microsoft. All rights reserved. +// +// -------------------------------------------------------------------------------------------------------------------- + +namespace Microsoft.AI.Foundry.Local; + +using System.Text.Json.Serialization; + +///