Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions crates/goose/src/providers/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -534,8 +534,26 @@ pub trait Provider: Send + Sync {
Ok(vec![])
}

/// Whether to bypass canonical model filtering for this provider.
/// Local no-auth providers (e.g. LM Studio) should return true, since their
/// model names are arbitrary and won't match the canonical registry.
fn skip_model_filtering(&self) -> bool {
false
}

/// Fetch models filtered by canonical registry and usability
async fn fetch_recommended_models(&self) -> Result<Vec<String>, ProviderError> {
if self.skip_model_filtering() {
let models = self.fetch_supported_models().await?;
tracing::warn!(
provider = self.get_name(),
count = models.len(),
"Returning all available models without canonical filtering — \
some models may not support tool calling and could be incompatible with Goose"
);
return Ok(models);
Comment on lines +546 to +554

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Block automatic fallback to the first unfiltered LM Studio model

Returning the raw /models list here feeds arbitrary LM Studio IDs into SwitchModelModal, whose findPreferredModel() helper falls back to validModels[0] and is invoked automatically whenever a provider is chosen with no current model (SwitchModelModal.tsx:62-80,390-400). Because LM Studio model names often do not match the hard-coded preference patterns, opening the modal or switching to LM Studio can silently preselect the alphabetically first model—including embedding or non-tool-capable entries—and save a broken configuration unless the user notices and corrects it manually.

Useful? React with 👍 / 👎.

}

let all_models = self.fetch_supported_models().await?;

let registry = CanonicalModelRegistry::bundled().map_err(|e| {
Expand Down
12 changes: 11 additions & 1 deletion crates/goose/src/providers/declarative/lmstudio.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,17 @@
"display_name": "LM Studio",
"description": "Run local models with LM Studio",
"api_key_env": "",
"base_url": "http://localhost:1234/v1/chat/completions",
"base_url": "${LMSTUDIO_HOST}/chat/completions",
"env_vars": [
{
"name": "LMSTUDIO_HOST",
"required": false,
"secret": false,
"description": "LM Studio server URL (e.g. http://localhost:1234/v1)",
"default": "http://localhost:1234/v1"
}
Comment on lines +8 to +15

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 Badge Preserve LM Studio validation after adding default config keys

Adding LMSTUDIO_HOST as an optional key with a default moves LM Studio into the allOptionalWithDefaults branch in DefaultSubmitHandler.tsx:49-65, which returns before the existing getProviderModels() probe runs. In the desktop configure flow, saving a typoed host or a stopped LM Studio server will now look successful and only fail later when the user tries to list or use models, whereas this provider was previously validated on save.

Useful? React with 👍 / 👎.

],
"dynamic_models": true,
"models": [],
"supports_streaming": true,
"requires_auth": false
Expand Down
10 changes: 10 additions & 0 deletions crates/goose/src/providers/openai.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ pub struct OpenAiProvider {
custom_headers: Option<HashMap<String, String>>,
supports_streaming: bool,
name: String,
/// Skip canonical model filtering for no-auth local providers (e.g. LM Studio)
skip_model_filter: bool,
}

impl OpenAiProvider {
Expand Down Expand Up @@ -126,6 +128,7 @@ impl OpenAiProvider {
custom_headers,
supports_streaming: true,
name: OPEN_AI_PROVIDER_NAME.to_string(),
skip_model_filter: false,
})
}

Expand All @@ -140,6 +143,7 @@ impl OpenAiProvider {
custom_headers: None,
supports_streaming: true,
name: OPEN_AI_PROVIDER_NAME.to_string(),
skip_model_filter: false,
}
}

Expand Down Expand Up @@ -208,6 +212,7 @@ impl OpenAiProvider {
custom_headers: config.headers,
supports_streaming: config.supports_streaming.unwrap_or(true),
name: config.name.clone(),
skip_model_filter: config.name == "lmstudio",
})
}

Expand Down Expand Up @@ -365,6 +370,10 @@ impl Provider for OpenAiProvider {
self.model.clone()
}

fn skip_model_filtering(&self) -> bool {
self.skip_model_filter
}

async fn fetch_supported_models(&self) -> Result<Vec<String>, ProviderError> {
let models_path =
Self::map_base_path(&self.base_path, "models", OPEN_AI_DEFAULT_MODELS_PATH);
Expand Down Expand Up @@ -617,6 +626,7 @@ mod tests {
custom_headers: None,
supports_streaming: true,
name: name.to_string(),
skip_model_filter: false,
}
}

Expand Down
6 changes: 5 additions & 1 deletion ui/desktop/src/components/settings/models/modelInterface.ts
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,11 @@ export async function fetchModelsForProviders(
throwOnError: true,
});
const models = response.data || [];
return { provider: p, models, error: null, warning: null };
let warning = null;
if (p.name === 'lmstudio') {
warning = 'Some local models may not follow the expected format for tool calling in goose. Check the LM Studio documentation for model compatibility.';
}
return { provider: p, models, error: null, warning };
} catch (e: unknown) {
// For custom providers, fall back to the configured model list
if (p.provider_type === 'Custom') {
Expand Down
Loading