diff --git a/apiclient/types/model.go b/apiclient/types/model.go index 4f21f46b29..3fa8412f66 100644 --- a/apiclient/types/model.go +++ b/apiclient/types/model.go @@ -14,6 +14,7 @@ type ModelManifest struct { Alias string `json:"alias,omitempty"` Active bool `json:"active"` Usage ModelUsage `json:"usage"` + Dialect string `json:"dialect,omitempty"` } type ModelList List[Model] diff --git a/apiclient/types/modelprovider.go b/apiclient/types/modelprovider.go index 0c58695662..29f093cd89 100644 --- a/apiclient/types/modelprovider.go +++ b/apiclient/types/modelprovider.go @@ -5,6 +5,9 @@ type CommonProviderMetadata struct { IconDark string `json:"iconDark,omitempty"` Description string `json:"description,omitempty"` Link string `json:"link,omitempty"` + // Dialect specifies the LLM API format used by this provider + // (e.g. "AnthropicMessages", "OpenAIChatCompletions", "OpenAIResponses"). + Dialect string `json:"dialect,omitempty"` } type CommonProviderStatus struct { diff --git a/go.mod b/go.mod index eaedbcc4e9..9e32488f18 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,8 @@ replace ( github.com/obot-platform/obot/logger => ./logger ) +replace github.com/nanobot-ai/nanobot => github.com/calvinmclean/nanobot v0.0.0-20260408174919-d7f157a83d0c + require ( cloud.google.com/go/storage v1.43.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 diff --git a/go.sum b/go.sum index aa2193d4fe..389585bc08 100644 --- a/go.sum +++ b/go.sum @@ -164,6 +164,8 @@ github.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4= github.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM= github.com/bombsimon/logrusr/v4 v4.1.0 h1:uZNPbwusB0eUXlO8hIUwStE6Lr5bLN6IgYgG+75kuh4= github.com/bombsimon/logrusr/v4 v4.1.0/go.mod h1:pjfHC5e59CvjTBIU3V3sGhFWFAnsnhOR03TRc6im0l8= +github.com/calvinmclean/nanobot v0.0.0-20260408174919-d7f157a83d0c h1:0XulKTIDELtp3KS+nIqub+rOJ5bve1LOhFewzE218Gg= +github.com/calvinmclean/nanobot v0.0.0-20260408174919-d7f157a83d0c/go.mod h1:6Yi07gQdKON69TMEIVIkPjuNL7R+Iyy6kJ3CUck5Qeg= github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -546,8 +548,6 @@ github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/nanobot-ai/nanobot v0.0.65 h1:oMngi8LQ3vkXQteY5Hzik5yRlIUq9FmA3637mHfgn7I= -github.com/nanobot-ai/nanobot v0.0.65/go.mod h1:QRUCktmw9QyBxt9YR2PKyfnh9R1/OcqOigfND5Dreuw= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/nightlyone/lockfile v1.0.0 h1:RHep2cFKK4PonZJDdEl4GmkabuhbsRMgk/k3uAmxBiA= diff --git a/pkg/api/handlers/nanobotagent.go b/pkg/api/handlers/nanobotagent.go index 53d42dfc59..a470dd0513 100644 --- a/pkg/api/handlers/nanobotagent.go +++ b/pkg/api/handlers/nanobotagent.go @@ -211,7 +211,8 @@ func (h *NanobotAgentHandler) Launch(req api.Context) error { break } var errHTTP *types.ErrHTTP - if !errors.As(err, &errHTTP) || errHTTP.Code != http.StatusBadRequest || !strings.Contains(errHTTP.Message, "NANOBOT_ENV_FILE") { + if !errors.As(err, &errHTTP) || errHTTP.Code != http.StatusBadRequest || + (!strings.Contains(errHTTP.Message, "NANOBOT_ENV_FILE") && !strings.Contains(errHTTP.Message, "NANOBOT_CONFIG_PATH")) { return err } select { diff --git a/pkg/controller/handlers/nanobotagent/nanobotagent.go b/pkg/controller/handlers/nanobotagent/nanobotagent.go index e2c9244461..6be01fcbfc 100644 --- a/pkg/controller/handlers/nanobotagent/nanobotagent.go +++ b/pkg/controller/handlers/nanobotagent/nanobotagent.go @@ -11,6 +11,7 @@ import ( "time" "github.com/gptscript-ai/go-gptscript" + nanobottypes "github.com/nanobot-ai/nanobot/pkg/types" "github.com/obot-platform/nah/pkg/backend" "github.com/obot-platform/nah/pkg/name" "github.com/obot-platform/nah/pkg/router" @@ -27,6 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kclient "sigs.k8s.io/controller-runtime/pkg/client" + sigsyaml "sigs.k8s.io/yaml" ) const ( @@ -110,6 +112,17 @@ func (h *Handler) EnsureMCPServer(req router.Request, resp router.Response) erro File: true, DynamicFile: true, }, + { + MCPHeader: types.MCPHeader{ + Name: "NANOBOT_CONFIG_PATH", + Description: "Provider config YAML for Nanobot", + Key: "NANOBOT_CONFIG_PATH", + Sensitive: true, + Required: true, + }, + File: true, + DynamicFile: true, + }, } currentArgs := existing.Spec.Manifest.ContainerizedConfig.Args @@ -124,7 +137,7 @@ func (h *Handler) EnsureMCPServer(req router.Request, resp router.Response) erro } } - if len(existing.Spec.Manifest.Env) != len(expectedEnv) || existing.Spec.Manifest.Env[0] != expectedEnv[0] { + if !slices.Equal(existing.Spec.Manifest.Env, expectedEnv) { needsUpdate = true } @@ -149,7 +162,7 @@ func (h *Handler) EnsureMCPServer(req router.Request, resp router.Response) erro } // Create new MCPServer - args := []string{"run"} + args := []string{"run", "--state", ".nanobot/state/nanobot.db"} if agent.Spec.DefaultAgent != "" { args = append(args, "--agent", agent.Spec.DefaultAgent) } @@ -186,6 +199,17 @@ func (h *Handler) EnsureMCPServer(req router.Request, resp router.Response) erro File: true, DynamicFile: true, }, + { + MCPHeader: types.MCPHeader{ + Name: "NANOBOT_CONFIG_PATH", + Description: "Provider config YAML for Nanobot", + Key: "NANOBOT_CONFIG_PATH", + Sensitive: true, + Required: true, + }, + File: true, + DynamicFile: true, + }, }, }, }, @@ -209,6 +233,23 @@ func (h *Handler) EnsureMCPServer(req router.Request, resp router.Response) erro func (h *Handler) ensureCredentials(ctx context.Context, req router.Request, resp router.Response, agent *v1.NanobotAgent, mcpServerName string) error { credCtx := fmt.Sprintf("%s-%s", agent.Spec.UserID, mcpServerName) + llmModel, err := resolveModel(ctx, req.Client, req.Namespace, types.DefaultModelAliasTypeLLM) + if err != nil { + return err + } + llmProvider, llmDefault := h.parseModelProvider(llmModel) + + miniModel, err := resolveModel(ctx, req.Client, req.Namespace, types.DefaultModelAliasTypeLLMMini) + if err != nil { + return err + } + miniProvider, miniDefault := h.parseModelProvider(miniModel) + + providerYAML, err := buildNanobotProviderConfigYAML(llmProvider, miniProvider) + if err != nil { + return fmt.Errorf("failed to build nanobot provider config: %w", err) + } + // Check if credential exists and if the token needs refreshing var needsRefresh bool cred, err := h.gptClient.RevealCredential(ctx, []string{credCtx}, mcpServerName) @@ -222,8 +263,11 @@ func (h *Handler) ensureCredentials(ctx context.Context, req router.Request, res needsRefresh = true log.Debugf("Nanobot credential missing, creating: agent=%s mcpServer=%s", agent.Name, mcpServerName) } else { - // Credential exists, check if token needs refreshing - if token := credEnvFileVars["OPENAI_API_KEY"]; token != "" { + // Credential exists, check if token needs refreshing. + // Use the configured provider's API key env var to find the token. + llmEnvVarName := strings.TrimSuffix(strings.TrimPrefix(llmProvider.APIKey, "${"), "}") + token := credEnvFileVars[llmEnvVarName] + if token != "" { tokenCtx, err := h.tokenService.DecodeToken(ctx, token) if err != nil { // Token is invalid, needs refresh @@ -247,21 +291,15 @@ func (h *Handler) ensureCredentials(ctx context.Context, req router.Request, res } } - llmModel, err := resolveModel(ctx, req.Client, req.Namespace, types.DefaultModelAliasTypeLLM) - if err != nil { - return err - } - miniModel, err := resolveModel(ctx, req.Client, req.Namespace, types.DefaultModelAliasTypeLLMMini) - if err != nil { - return err - } - - if !needsRefresh && credEnvFileVars["NANOBOT_DEFAULT_MODEL"] == llmModel && credEnvFileVars["NANOBOT_DEFAULT_MINI_MODEL"] == miniModel { + if !needsRefresh && + credEnvFileVars["NANOBOT_DEFAULT_MODEL"] == llmDefault && + credEnvFileVars["NANOBOT_DEFAULT_MINI_MODEL"] == miniDefault && + cred.Env["NANOBOT_CONFIG_PATH"] == providerYAML { // Credentials are up to date return nil } - log.Debugf("Refreshing nanobot credentials: agent=%s mcpServer=%s model=%s miniModel=%s", agent.Name, mcpServerName, llmModel, miniModel) + log.Debugf("Refreshing nanobot credentials: agent=%s mcpServer=%s model=%s miniModel=%s", agent.Name, mcpServerName, llmDefault, miniDefault) // Generate a new token that expires in 12 hours now := time.Now() @@ -313,26 +351,34 @@ func (h *Handler) ensureCredentials(ctx context.Context, req router.Request, res return fmt.Errorf("failed to create API key: %w", err) } - // Create or update the credential with the new token and API key + envFileLines := []string{ + fmt.Sprintf("OBOT_URL=%s", h.serverURL), + fmt.Sprintf("MCP_API_KEY=%s", apiKeyResp.Key), + fmt.Sprintf("MCP_API_KEY_ID=%d", apiKeyResp.ID), + fmt.Sprintf("MCP_API_KEY_ID_PREV=%s", credEnvFileVars["MCP_API_KEY_ID"]), + fmt.Sprintf("MCP_SERVER_SEARCH_URL=%s", system.MCPConnectURL(h.serverURL, system.ObotMCPServerName)), + fmt.Sprintf("MCP_SERVER_SEARCH_API_KEY=%s", apiKeyResp.Key), + fmt.Sprintf("NANOBOT_DEFAULT_MODEL=%s", llmDefault), + fmt.Sprintf("NANOBOT_DEFAULT_MINI_MODEL=%s", miniDefault), + } + seenProviders := make(map[string]struct{}, 2) + for _, p := range []nanobotLLMProvider{llmProvider, miniProvider} { + if _, ok := seenProviders[p.Name]; ok { + continue + } + seenProviders[p.Name] = struct{}{} + envVarName := strings.TrimSuffix(strings.TrimPrefix(p.APIKey, "${"), "}") + envFileLines = append(envFileLines, fmt.Sprintf("%s=%s", envVarName, token)) + } + + // Create or update the credential with the new token, API key, and provider config. if err := h.gptClient.CreateCredential(ctx, gptscript.Credential{ Context: credCtx, ToolName: mcpServerName, Type: gptscript.CredentialTypeTool, Env: map[string]string{ - "NANOBOT_ENV_FILE": strings.Join([]string{ - fmt.Sprintf("OBOT_URL=%s", h.serverURL), - fmt.Sprintf("ANTHROPIC_BASE_URL=%s/api/llm-proxy/anthropic", h.serverURL), - fmt.Sprintf("OPENAI_BASE_URL=%s/api/llm-proxy/openai", h.serverURL), - fmt.Sprintf("ANTHROPIC_API_KEY=%s", token), - fmt.Sprintf("OPENAI_API_KEY=%s", token), - fmt.Sprintf("MCP_API_KEY=%s", apiKeyResp.Key), - fmt.Sprintf("MCP_API_KEY_ID=%d", apiKeyResp.ID), - fmt.Sprintf("MCP_API_KEY_ID_PREV=%s", credEnvFileVars["MCP_API_KEY_ID"]), - fmt.Sprintf("MCP_SERVER_SEARCH_URL=%s", system.MCPConnectURL(h.serverURL, system.ObotMCPServerName)), - fmt.Sprintf("MCP_SERVER_SEARCH_API_KEY=%s", apiKeyResp.Key), - fmt.Sprintf("NANOBOT_DEFAULT_MODEL=%s", llmModel), - fmt.Sprintf("NANOBOT_DEFAULT_MINI_MODEL=%s", miniModel), - }, "\n"), + "NANOBOT_ENV_FILE": strings.Join(envFileLines, "\n"), + "NANOBOT_CONFIG_PATH": providerYAML, }, }); err != nil { return fmt.Errorf("failed to create credential: %w", err) @@ -355,26 +401,110 @@ func (h *Handler) ensureCredentials(ctx context.Context, req router.Request, res return nil } -func getModelForAlias(ctx context.Context, client kclient.Client, namespace string, aliasName types.DefaultModelAliasType) (string, error) { +// resolvedLLMModel pairs the resolved target model name with its configured provider reference +// and the dialect declared by that provider (if any). +type resolvedLLMModel struct { + TargetModel string + ModelProvider string // e.g. "openai-model-provider", "anthropic-model-provider" + ProviderDialect nanobottypes.Dialect // from ProviderMeta.Dialect; empty if not declared +} + +// nanobotLLMProvider describes how a single LLM provider should be configured in nanobot's YAML. +type nanobotLLMProvider struct { + Name string // key in llmProviders map (e.g. "openai", "anthropic") + Dialect nanobottypes.Dialect + APIKey string // env var reference derived from Name, e.g. "${OPENAI_MODEL_PROVIDER_API_KEY}" + BaseURL string // actual Obot proxy URL +} + +// parseModelProvider returns the nanobot provider config and the fully-qualified +// model name (provider/model) for a resolved model. +// +// If the provider has declared a dialect via ProviderMeta.Dialect, that dialect +// is used and the base URL is derived from it. Otherwise the known built-in +// providers (openai, anthropic) supply both; everything else falls back to +// OpenResponses via the generic /api/llm-proxy dispatch. +func (h *Handler) parseModelProvider(model resolvedLLMModel) (nanobotLLMProvider, string) { + name := model.ModelProvider + envVarName := strings.ToUpper(strings.ReplaceAll(name, "-", "_")) + "_API_KEY" + + dialect := model.ProviderDialect + if dialect == "" { + // No declared dialect — fall back to per-provider defaults. + switch model.ModelProvider { + case system.AnthropicModelProviderTool: + dialect = nanobottypes.DialectAnthropicMessages + case system.OpenAIModelProviderTool: + dialect = nanobottypes.DialectOpenAIResponses + default: + dialect = nanobottypes.DialectOpenResponses + } + } + + var baseURL string + switch dialect { + case nanobottypes.DialectAnthropicMessages: + baseURL = h.serverURL + "/api/llm-proxy/anthropic" + case nanobottypes.DialectOpenAIResponses: + baseURL = h.serverURL + "/api/llm-proxy/openai" + default: + baseURL = h.serverURL + "/api/llm-proxy" + } + + p := nanobotLLMProvider{ + Name: name, + Dialect: dialect, + APIKey: fmt.Sprintf("${%s}", envVarName), + BaseURL: baseURL, + } + return p, fmt.Sprintf("%s/%s", p.Name, model.TargetModel) +} + +// buildNanobotProviderConfigYAML generates a nanobot Config YAML containing only the +// providers required by the given LLM and mini-LLM models. +func buildNanobotProviderConfigYAML(providers ...nanobotLLMProvider) (string, error) { + llmProviders := make(map[string]nanobottypes.LLMProvider, len(providers)) + for _, p := range providers { + if _, exists := llmProviders[p.Name]; exists { + continue + } + llmProviders[p.Name] = nanobottypes.LLMProvider{ + Dialect: p.Dialect, + APIKey: p.APIKey, + BaseURL: p.BaseURL, + } + } + data, err := sigsyaml.Marshal(nanobottypes.Config{LLMProviders: llmProviders}) + if err != nil { + return "", err + } + return string(data), nil +} + +func getModelForAlias(ctx context.Context, client kclient.Client, namespace string, aliasName types.DefaultModelAliasType) (resolvedLLMModel, error) { llmModel, err := alias.GetFromScope(ctx, client, "Model", namespace, string(aliasName)) if err != nil { - return "", fmt.Errorf("failed to get default model alias %v: %w", aliasName, err) + return resolvedLLMModel{}, fmt.Errorf("failed to get default model alias %v: %w", aliasName, err) } modelAlias, ok := llmModel.(*v1.DefaultModelAlias) if !ok { - return "", fmt.Errorf("alias %v is not of type Alias", aliasName) + return resolvedLLMModel{}, fmt.Errorf("alias %v is not of type Alias", aliasName) } var model v1.Model if err := alias.Get(ctx, client, &model, namespace, modelAlias.Spec.Manifest.Model); err != nil { - return "", err + return resolvedLLMModel{}, err } - return model.Spec.Manifest.TargetModel, nil + return resolvedLLMModel{ + TargetModel: model.Spec.Manifest.TargetModel, + ModelProvider: model.Spec.Manifest.ModelProvider, + ProviderDialect: nanobottypes.Dialect(model.Spec.Manifest.Dialect), + }, nil } -// resolveModel returns a concrete model name for a default alias. +// resolveModel returns a resolved model and its provider for a default alias. // // It prefers an explicitly configured alias target when one exists. If the // alias is unset or cannot be resolved, it falls back to active LLM models in @@ -382,14 +512,14 @@ func getModelForAlias(ctx context.Context, client kclient.Client, namespace stri // for that alias. The llm-mini alias falls back to the resolved llm model when // no preferred mini model is available. All other aliases fall back to the // first active LLM model available. -func resolveModel(ctx context.Context, client kclient.Client, namespace string, aliasName types.DefaultModelAliasType) (string, error) { - if model, err := getModelForAlias(ctx, client, namespace, aliasName); err == nil && strings.TrimSpace(model) != "" { +func resolveModel(ctx context.Context, client kclient.Client, namespace string, aliasName types.DefaultModelAliasType) (resolvedLLMModel, error) { + if model, err := getModelForAlias(ctx, client, namespace, aliasName); err == nil && strings.TrimSpace(model.TargetModel) != "" { return model, nil } models, err := listActiveLLMModels(ctx, client, namespace) if err != nil { - return "", err + return resolvedLLMModel{}, err } return chooseModel(ctx, client, namespace, models, aliasName) @@ -422,12 +552,16 @@ func listActiveLLMModels(ctx context.Context, client kclient.Client, namespace s return result, nil } -func chooseModel(ctx context.Context, client kclient.Client, namespace string, models []v1.Model, aliasName types.DefaultModelAliasType) (string, error) { +func chooseModel(ctx context.Context, client kclient.Client, namespace string, models []v1.Model, aliasName types.DefaultModelAliasType) (resolvedLLMModel, error) { preferred := preferredModelsForAlias(aliasName) for _, preferredName := range preferred { for _, model := range models { if model.Spec.Manifest.TargetModel == preferredName || model.Spec.Manifest.Name == preferredName { - return model.Spec.Manifest.TargetModel, nil + return resolvedLLMModel{ + TargetModel: model.Spec.Manifest.TargetModel, + ModelProvider: model.Spec.Manifest.ModelProvider, + ProviderDialect: nanobottypes.Dialect(model.Spec.Manifest.Dialect), + }, nil } } } @@ -437,10 +571,14 @@ func chooseModel(ctx context.Context, client kclient.Client, namespace string, m } if len(models) > 0 { - return models[0].Spec.Manifest.TargetModel, nil + return resolvedLLMModel{ + TargetModel: models[0].Spec.Manifest.TargetModel, + ModelProvider: models[0].Spec.Manifest.ModelProvider, + ProviderDialect: nanobottypes.Dialect(models[0].Spec.Manifest.Dialect), + }, nil } - return "", fmt.Errorf("failed to resolve default model for alias %s: no active llm models available", aliasName) + return resolvedLLMModel{}, fmt.Errorf("failed to resolve default model for alias %s: no active llm models available", aliasName) } func preferredModelsForAlias(aliasName types.DefaultModelAliasType) []string { diff --git a/pkg/controller/handlers/nanobotagent/nanobotagent_test.go b/pkg/controller/handlers/nanobotagent/nanobotagent_test.go index 17a12f66eb..fc25a9d0b9 100644 --- a/pkg/controller/handlers/nanobotagent/nanobotagent_test.go +++ b/pkg/controller/handlers/nanobotagent/nanobotagent_test.go @@ -4,11 +4,14 @@ import ( "context" "testing" + nanobottypes "github.com/nanobot-ai/nanobot/pkg/types" "github.com/obot-platform/obot/apiclient/types" v1 "github.com/obot-platform/obot/pkg/storage/apis/obot.obot.ai/v1" storagescheme "github.com/obot-platform/obot/pkg/storage/scheme" + "github.com/obot-platform/obot/pkg/system" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client/fake" + sigsyaml "sigs.k8s.io/yaml" ) func TestChooseModelPrefersKnownNames(t *testing.T) { @@ -40,8 +43,8 @@ func TestChooseModelPrefersKnownNames(t *testing.T) { t.Fatalf("expected model, got error: %v", err) } - if model != "gpt-5.4" { - t.Fatalf("expected gpt-5.4, got %q", model) + if model.TargetModel != "gpt-5.4" { + t.Fatalf("expected gpt-5.4, got %q", model.TargetModel) } } @@ -64,8 +67,8 @@ func TestChooseModelFallsBackToFirstActiveModel(t *testing.T) { t.Fatalf("expected model, got error: %v", err) } - if model != "model-a" { - t.Fatalf("expected model-a, got %q", model) + if model.TargetModel != "model-a" { + t.Fatalf("expected model-a, got %q", model.TargetModel) } } @@ -98,8 +101,238 @@ func TestChooseModelPrefersSuggestedOrder(t *testing.T) { t.Fatalf("expected model, got error: %v", err) } - if model != "gpt-5.4" { - t.Fatalf("expected gpt-5.4, got %q", model) + if model.TargetModel != "gpt-5.4" { + t.Fatalf("expected gpt-5.4, got %q", model.TargetModel) + } +} + +func TestNanobotParseModelProviderDeclaredDialectDrivesURL(t *testing.T) { + h := &Handler{serverURL: "https://obot.example.com"} + + for _, tc := range []struct { + dialect nanobottypes.Dialect + wantBaseURL string + }{ + {nanobottypes.DialectAnthropicMessages, "https://obot.example.com/api/llm-proxy/anthropic"}, + {nanobottypes.DialectOpenAIResponses, "https://obot.example.com/api/llm-proxy/openai"}, + {nanobottypes.DialectOpenAIChatCompletions, "https://obot.example.com/api/llm-proxy"}, + {nanobottypes.DialectOpenResponses, "https://obot.example.com/api/llm-proxy"}, + } { + model := resolvedLLMModel{ + TargetModel: "some-model", + ModelProvider: "custom-model-provider", + ProviderDialect: tc.dialect, + } + p, _ := h.parseModelProvider(model) + if p.BaseURL != tc.wantBaseURL { + t.Errorf("dialect %s: baseURL = %q, want %q", tc.dialect, p.BaseURL, tc.wantBaseURL) + } + if p.Dialect != tc.dialect { + t.Errorf("dialect %s: provider dialect = %q, want same", tc.dialect, p.Dialect) + } + } +} + +func TestNanobotParseModelProviderBuiltinFallbacks(t *testing.T) { + h := &Handler{serverURL: "https://obot.example.com"} + + for _, tc := range []struct { + modelProvider string + wantDialect nanobottypes.Dialect + wantBaseURL string + }{ + {system.OpenAIModelProviderTool, nanobottypes.DialectOpenAIResponses, "https://obot.example.com/api/llm-proxy/openai"}, + {system.AnthropicModelProviderTool, nanobottypes.DialectAnthropicMessages, "https://obot.example.com/api/llm-proxy/anthropic"}, + {"unknown-model-provider", nanobottypes.DialectOpenResponses, "https://obot.example.com/api/llm-proxy"}, + } { + model := resolvedLLMModel{TargetModel: "my-model", ModelProvider: tc.modelProvider} + p, qualifiedName := h.parseModelProvider(model) + if p.Dialect != tc.wantDialect { + t.Errorf("%s: dialect = %q, want %q", tc.modelProvider, p.Dialect, tc.wantDialect) + } + if p.BaseURL != tc.wantBaseURL { + t.Errorf("%s: baseURL = %q, want %q", tc.modelProvider, p.BaseURL, tc.wantBaseURL) + } + wantName := tc.modelProvider + "/my-model" + if qualifiedName != wantName { + t.Errorf("%s: qualified name = %q, want %q", tc.modelProvider, qualifiedName, wantName) + } + } +} + +func TestBuildNanobotProviderConfigYAMLSingleProvider(t *testing.T) { + p := nanobotLLMProvider{ + Name: "openai-model-provider", + Dialect: nanobottypes.DialectOpenAIResponses, + APIKey: "${OPENAI_MODEL_PROVIDER_API_KEY}", + BaseURL: "https://obot.example.com/api/llm-proxy/openai", + } + + yaml, err := buildNanobotProviderConfigYAML(p) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + var cfg nanobottypes.Config + if err := sigsyaml.Unmarshal([]byte(yaml), &cfg); err != nil { + t.Fatalf("failed to parse output YAML: %v", err) + } + + if len(cfg.LLMProviders) != 1 { + t.Fatalf("expected 1 provider, got %d", len(cfg.LLMProviders)) + } + got := cfg.LLMProviders["openai-model-provider"] + if got.Dialect != nanobottypes.DialectOpenAIResponses { + t.Errorf("dialect = %q, want OpenAIResponses", got.Dialect) + } + if got.BaseURL != p.BaseURL { + t.Errorf("baseURL = %q, want %q", got.BaseURL, p.BaseURL) + } +} + +func TestBuildNanobotProviderConfigYAMLMultipleProviders(t *testing.T) { + openai := nanobotLLMProvider{ + Name: "openai-model-provider", + Dialect: nanobottypes.DialectOpenAIResponses, + APIKey: "${OPENAI_MODEL_PROVIDER_API_KEY}", + BaseURL: "https://obot.example.com/api/llm-proxy/openai", + } + anthropic := nanobotLLMProvider{ + Name: "anthropic-model-provider", + Dialect: nanobottypes.DialectAnthropicMessages, + APIKey: "${ANTHROPIC_MODEL_PROVIDER_API_KEY}", + BaseURL: "https://obot.example.com/api/llm-proxy/anthropic", + } + + yaml, err := buildNanobotProviderConfigYAML(openai, anthropic) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + var cfg nanobottypes.Config + if err := sigsyaml.Unmarshal([]byte(yaml), &cfg); err != nil { + t.Fatalf("failed to parse output YAML: %v", err) + } + + if len(cfg.LLMProviders) != 2 { + t.Fatalf("expected 2 providers, got %d: %v", len(cfg.LLMProviders), cfg.LLMProviders) + } + if cfg.LLMProviders["openai-model-provider"].Dialect != nanobottypes.DialectOpenAIResponses { + t.Errorf("openai dialect = %q, want OpenAIResponses", cfg.LLMProviders["openai-model-provider"].Dialect) + } + if cfg.LLMProviders["anthropic-model-provider"].Dialect != nanobottypes.DialectAnthropicMessages { + t.Errorf("anthropic dialect = %q, want AnthropicMessages", cfg.LLMProviders["anthropic-model-provider"].Dialect) + } +} + +func TestBuildNanobotProviderConfigYAMLDeduplicates(t *testing.T) { + p := nanobotLLMProvider{ + Name: "openai-model-provider", + Dialect: nanobottypes.DialectOpenAIResponses, + APIKey: "${OPENAI_MODEL_PROVIDER_API_KEY}", + BaseURL: "https://obot.example.com/api/llm-proxy/openai", + } + + yaml, err := buildNanobotProviderConfigYAML(p, p) // same provider twice + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + var cfg nanobottypes.Config + if err := sigsyaml.Unmarshal([]byte(yaml), &cfg); err != nil { + t.Fatalf("failed to parse output YAML: %v", err) + } + + if len(cfg.LLMProviders) != 1 { + t.Errorf("expected deduplication to 1 provider, got %d", len(cfg.LLMProviders)) + } +} + +func TestResolveModelCarriesProviderAndDialect(t *testing.T) { + c := fake.NewClientBuilder(). + WithScheme(storagescheme.Scheme). + WithObjects( + &v1.DefaultModelAlias{ + TypeMeta: metav1.TypeMeta{APIVersion: v1.SchemeGroupVersion.String(), Kind: "DefaultModelAlias"}, + ObjectMeta: metav1.ObjectMeta{Name: "llm"}, + Spec: v1.DefaultModelAliasSpec{ + Manifest: types.DefaultModelAliasManifest{Alias: "llm", Model: "groq-llama"}, + }, + }, + &v1.Model{ + TypeMeta: metav1.TypeMeta{APIVersion: v1.SchemeGroupVersion.String(), Kind: "Model"}, + ObjectMeta: metav1.ObjectMeta{Name: "groq-llama"}, + Spec: v1.ModelSpec{ + Manifest: types.ModelManifest{ + Name: "groq-llama", + TargetModel: "llama-3.1-70b-versatile", + ModelProvider: "groq-model-provider", + Active: true, + Usage: types.ModelUsageLLM, + Dialect: string(nanobottypes.DialectOpenAIChatCompletions), + }, + }, + }, + ).Build() + + model, err := resolveModel(context.Background(), c, "", types.DefaultModelAliasTypeLLM) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if model.TargetModel != "llama-3.1-70b-versatile" { + t.Errorf("TargetModel = %q, want llama-3.1-70b-versatile", model.TargetModel) + } + if model.ModelProvider != "groq-model-provider" { + t.Errorf("ModelProvider = %q, want groq-model-provider", model.ModelProvider) + } + if model.ProviderDialect != nanobottypes.DialectOpenAIChatCompletions { + t.Errorf("ProviderDialect = %q, want OpenAIChatCompletions", model.ProviderDialect) + } +} + +// TestMultipleProvidersWhenLLMAndMiniDiffer verifies that when the default LLM and +// mini-LLM models are on different providers, both providers appear in the generated +// nanobot config YAML. +func TestMultipleProvidersWhenLLMAndMiniDiffer(t *testing.T) { + h := &Handler{serverURL: "https://obot.example.com"} + + llmModel := resolvedLLMModel{ + TargetModel: "claude-sonnet-4-6", + ModelProvider: system.AnthropicModelProviderTool, + } + miniModel := resolvedLLMModel{ + TargetModel: "gpt-4.1-mini", + ModelProvider: system.OpenAIModelProviderTool, + } + + llmProvider, llmDefault := h.parseModelProvider(llmModel) + miniProvider, miniDefault := h.parseModelProvider(miniModel) + + if llmDefault != system.AnthropicModelProviderTool+"/claude-sonnet-4-6" { + t.Errorf("llmDefault = %q, want %s/claude-sonnet-4-6", llmDefault, system.AnthropicModelProviderTool) + } + if miniDefault != system.OpenAIModelProviderTool+"/gpt-4.1-mini" { + t.Errorf("miniDefault = %q, want %s/gpt-4.1-mini", miniDefault, system.OpenAIModelProviderTool) + } + + yaml, err := buildNanobotProviderConfigYAML(llmProvider, miniProvider) + if err != nil { + t.Fatalf("buildNanobotProviderConfigYAML: %v", err) + } + + var cfg nanobottypes.Config + if err := sigsyaml.Unmarshal([]byte(yaml), &cfg); err != nil { + t.Fatalf("failed to parse output YAML: %v", err) + } + + if len(cfg.LLMProviders) != 2 { + t.Fatalf("expected 2 providers (one per model), got %d:\n%s", len(cfg.LLMProviders), yaml) + } + if _, ok := cfg.LLMProviders[system.AnthropicModelProviderTool]; !ok { + t.Errorf("anthropic-model-provider missing from YAML") + } + if _, ok := cfg.LLMProviders[system.OpenAIModelProviderTool]; !ok { + t.Errorf("openai-model-provider missing from YAML") } } @@ -160,7 +393,7 @@ func TestChooseModelMiniFallsBackToResolvedLLM(t *testing.T) { t.Fatalf("expected model, got error: %v", err) } - if model != "gpt-5.4" { - t.Fatalf("expected gpt-5.4, got %q", model) + if model.TargetModel != "gpt-5.4" { + t.Fatalf("expected gpt-5.4, got %q", model.TargetModel) } } diff --git a/pkg/controller/handlers/toolreference/toolreference.go b/pkg/controller/handlers/toolreference/toolreference.go index ad1b832a99..84b105f925 100644 --- a/pkg/controller/handlers/toolreference/toolreference.go +++ b/pkg/controller/handlers/toolreference/toolreference.go @@ -416,6 +416,14 @@ func (h *Handler) BackPopulateModels(req router.Request, _ router.Response) erro } } + var dialect string + if toolRef.Status.Tool != nil && toolRef.Status.Tool.Metadata["providerMeta"] != "" { + var meta types.CommonProviderMetadata + if err := json.Unmarshal([]byte(toolRef.Status.Tool.Metadata["providerMeta"]), &meta); err == nil { + dialect = meta.Dialect + } + } + availableModels, err := h.dispatcher.ModelsForProvider(req.Ctx, h.gptClient, req.Namespace, req.Name) if err != nil { // Don't error and retry because it will likely fail again. Log the error, and the user can re-sync manually. @@ -470,6 +478,7 @@ func (h *Handler) BackPopulateModels(req router.Request, _ router.Response) erro ModelProvider: toolRef.Name, Active: true, Usage: types.ModelUsage(model.Metadata["usage"]), + Dialect: dialect, }, }, }) diff --git a/pkg/mcp/backend.go b/pkg/mcp/backend.go index 47b48d16fb..5ab83bda30 100644 --- a/pkg/mcp/backend.go +++ b/pkg/mcp/backend.go @@ -176,7 +176,7 @@ func webhookToServerConfig(webhook Webhook, baseImage, mcpServerName, userID, sc }, nil } -func constructNanobotYAMLForCompositeServer(servers []ComponentServer) ([]byte, error) { +func constructMCPServerNanobotYAMLForComposite(servers []ComponentServer) ([]byte, error) { mcpServers := make(map[string]nanobotConfigMCPServer, len(servers)) names := make([]string, 0, len(servers)) replacer := strings.NewReplacer("/", "-", ":", "-", "?", "-") @@ -217,22 +217,22 @@ func constructNanobotYAMLForCompositeServer(servers []ComponentServer) ([]byte, return data, nil } -func constructNanobotYAMLForServer(name, url, command string, args []string, env, headers map[string][]byte, webhooks []Webhook) ([]byte, error) { +func constructMCPServerNanobotYAML(name, url, command string, args []string, env, headers map[string][]byte, webhooks []Webhook) ([]byte, error) { replacer := strings.NewReplacer("/", "-", ":", "-", "?", "-") webhookDefinitions := make(map[string][]string, len(webhooks)) mcpServers := make(map[string]nanobotConfigMCPServer, len(webhooks)+1) for _, webhook := range webhooks { - name := replacer.Replace(webhook.DisplayName) - if name == "" { - name = replacer.Replace(webhook.Name) + webhookName := replacer.Replace(webhook.DisplayName) + if webhookName == "" { + webhookName = replacer.Replace(webhook.Name) } - mcpServers[name] = nanobotConfigMCPServer{ + mcpServers[webhookName] = nanobotConfigMCPServer{ BaseURL: webhook.URL, } for _, def := range webhook.Definitions { - webhookDefinitions[def] = append(webhookDefinitions[def], fmt.Sprintf("%s/%s", name, webhookToolName)) + webhookDefinitions[def] = append(webhookDefinitions[def], fmt.Sprintf("%s/%s", webhookName, webhookToolName)) } } diff --git a/pkg/mcp/docker.go b/pkg/mcp/docker.go index f4699ada3e..ae46ac6eeb 100644 --- a/pkg/mcp/docker.go +++ b/pkg/mcp/docker.go @@ -935,10 +935,9 @@ func (d *dockerBackend) createAndStartContainer(ctx context.Context, server Serv containerPort = defaultContainerPort - // Prepare nanobot configuration - nanobotVolumeName, err := d.prepareNanobotConfig(ctx, server, fileEnvVars, webhooks) + nanobotVolumeName, err := d.prepareMCPServerNanobotConfig(ctx, server, fileEnvVars, webhooks) if err != nil { - return "", 0, fmt.Errorf("failed to prepare nanobot config: %w", err) + return "", 0, fmt.Errorf("failed to prepare MCP server nanobot config: %w", err) } volumeMounts = append(volumeMounts, mount.Mount{ @@ -1244,6 +1243,56 @@ func (d *dockerBackend) createVolumeWithFiles(ctx context.Context, files []File, return volumeName, envVars, nil } +// runInitContainer pulls alpine:latest (if not present), runs a one-shot sh -c container +// with the given script and mounts, waits for it to exit, and returns any error. +func (d *dockerBackend) runInitContainer(ctx context.Context, namePrefix, script string, mounts []mount.Mount) error { + initImage := "alpine:latest" + if err := d.pullImage(ctx, initImage, true); err != nil { + return fmt.Errorf("failed to ensure init image exists: %w", err) + } + + networkingConfig := &network.NetworkingConfig{} + if d.network != "" { + networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ + d.network: {}, + } + } + + resp, err := d.client.ContainerCreate(ctx, + &container.Config{ + Image: initImage, + Entrypoint: []string{"sh", "-c"}, + Cmd: []string{script}, + }, + &container.HostConfig{ + Mounts: mounts, + AutoRemove: true, + }, + networkingConfig, nil, + fmt.Sprintf("%s-%s", namePrefix, strings.ToLower(rand.Text()))) + if err != nil { + return fmt.Errorf("failed to create init container: %w", err) + } + + if err := d.client.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { + return fmt.Errorf("failed to start init container: %w", err) + } + + statusCh, errCh := d.client.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) + select { + case err := <-errCh: + if err != nil && !cerrdefs.IsNotFound(err) { + return fmt.Errorf("error waiting for init container: %w", err) + } + case status := <-statusCh: + if status.StatusCode != 0 { + return fmt.Errorf("init container %s failed with exit code %d", namePrefix, status.StatusCode) + } + } + + return nil +} + func containerFiles(files []File, containerName string) (map[string]string, map[string]string) { fileContents := make(map[string]string, len(files)) envVars := make(map[string]string, len(files)) @@ -1294,11 +1343,6 @@ func fileEnvKeysHash(files []File) string { } func (d *dockerBackend) populateFilesVolume(ctx context.Context, volumeName, containerName string, fileContents map[string]string) error { - initImage := "alpine:latest" - if err := d.pullImage(ctx, initImage, true); err != nil { - return fmt.Errorf("failed to ensure init image exists: %w", err) - } - var script strings.Builder script.WriteString("#!/bin/sh\nset -e\n") script.WriteString("rm -f /files/*\n") @@ -1314,44 +1358,11 @@ func (d *dockerBackend) populateFilesVolume(ctx context.Context, volumeName, con fmt.Fprintf(&script, "cat > '%s' << 'EOF'\n%s\nEOF\n", containerPath, fileContents[filename]) } - initConfig := &container.Config{ - Image: initImage, - Entrypoint: []string{"sh", "-c"}, - Cmd: []string{script.String()}, - WorkingDir: "/", - } - - initHostConfig := &container.HostConfig{ - Mounts: []mount.Mount{{ - Type: mount.TypeVolume, - Source: volumeName, - Target: "/files", - }}, - AutoRemove: true, - } - - resp, err := d.client.ContainerCreate(ctx, initConfig, initHostConfig, &network.NetworkingConfig{}, nil, fmt.Sprintf("%s-init-%s", containerName, strings.ToLower(rand.Text()))) - if err != nil { - return fmt.Errorf("failed to create init container: %w", err) - } - - if err := d.client.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { - return fmt.Errorf("failed to start init container: %w", err) - } - - statusCh, errCh := d.client.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) - select { - case err := <-errCh: - if err != nil && !cerrdefs.IsNotFound(err) { - return fmt.Errorf("error waiting for init container: %w", err) - } - case status := <-statusCh: - if status.StatusCode != 0 { - return fmt.Errorf("init container failed with exit code %d", status.StatusCode) - } - } - - return nil + return d.runInitContainer(ctx, containerName+"-init", script.String(), []mount.Mount{{ + Type: mount.TypeVolume, + Source: volumeName, + Target: "/files", + }}) } func (d *dockerBackend) pullImage(ctx context.Context, imageName string, ifNotExists bool) error { @@ -1386,8 +1397,9 @@ func (d *dockerBackend) pullImage(ctx context.Context, imageName string, ifNotEx return nil } -// prepareNanobotConfig creates a volume with nanobot YAML configuration for UVX/NPX runtimes -func (d *dockerBackend) prepareNanobotConfig(ctx context.Context, server ServerConfig, envVars map[string]string, webhooks []Webhook) (string, error) { +// prepareMCPServerNanobotConfig creates a volume containing the nanobot.yaml that configures +// how nanobot proxies to the underlying MCP server (used for UVX/NPX/remote/composite runtimes). +func (d *dockerBackend) prepareMCPServerNanobotConfig(ctx context.Context, server ServerConfig, envVars map[string]string, webhooks []Webhook) (string, error) { // Create all environment variables map allEnvVars := make(map[string][]byte, len(server.Env)+len(envVars)) headers := make(map[string][]byte, len(server.Headers)) @@ -1414,83 +1426,35 @@ func (d *dockerBackend) prepareNanobotConfig(ctx context.Context, server ServerC err error ) if server.Runtime == otypes.RuntimeComposite { - nanobotYAML, err = constructNanobotYAMLForCompositeServer(server.Components) + nanobotYAML, err = constructMCPServerNanobotYAMLForComposite(server.Components) } else { - nanobotYAML, err = constructNanobotYAMLForServer(server.MCPServerDisplayName, server.URL, server.Command, server.Args, allEnvVars, headers, webhooks) + nanobotYAML, err = constructMCPServerNanobotYAML(server.MCPServerDisplayName, server.URL, server.Command, server.Args, allEnvVars, headers, webhooks) } if err != nil { return "", fmt.Errorf("failed to construct nanobot YAML: %w", err) } - volumeName := server.MCPServerName + "-nanobot-config" - // Create volume for nanobot config + volumeName := server.MCPServerName + "-mcp-server-nanobot-config" _, err = d.client.VolumeCreate(ctx, volume.CreateOptions{ Labels: map[string]string{ "mcp.server.id": server.MCPServerName, - "mcp.purpose": "nanobot-config", + "mcp.purpose": "mcp-server-nanobot-config", }, Name: volumeName, }) if err != nil && !cerrdefs.IsAlreadyExists(err) { - return "", fmt.Errorf("failed to create nanobot config volume: %w", err) - } - - // Create init container to populate the volume with nanobot config - initImage := "alpine:latest" - if err = d.pullImage(ctx, initImage, true); err != nil { - return "", fmt.Errorf("failed to ensure init image exists: %w", err) + return "", fmt.Errorf("failed to create MCP server nanobot config volume: %w", err) } - // Create script to write nanobot config script := fmt.Sprintf("cat > /config/nanobot.yaml << 'EOF'\n%s\nEOF\n", nanobotYAML) - - // Create and run init container - initConfig := &container.Config{ - Image: initImage, - Entrypoint: []string{"sh", "-c"}, - Cmd: []string{script}, - } - - initHostConfig := &container.HostConfig{ - Mounts: []mount.Mount{ - { - Type: mount.TypeVolume, - Source: volumeName, - Target: "/config", - }, + if err = d.runInitContainer(ctx, server.MCPServerName+"-nanobot-init", script, []mount.Mount{ + { + Type: mount.TypeVolume, + Source: volumeName, + Target: "/config", }, - AutoRemove: true, - } - - // Configure network (same as main containers) - initNetworkingConfig := &network.NetworkingConfig{} - if d.network != "" { - initNetworkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{ - d.network: {}, - } - } - - resp, err := d.client.ContainerCreate(ctx, initConfig, initHostConfig, initNetworkingConfig, nil, fmt.Sprintf("%s-nanobot-init-%s", server.MCPServerName, strings.ToLower(rand.Text()))) - if err != nil { - return "", fmt.Errorf("failed to create nanobot init container: %w", err) - } - - // Start and wait for init container to complete - if err := d.client.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { - return "", fmt.Errorf("failed to start init container: %w", err) - } - - // Wait for init container to complete - statusCh, errCh := d.client.ContainerWait(ctx, resp.ID, container.WaitConditionNotRunning) - select { - case err := <-errCh: - if err != nil && !cerrdefs.IsNotFound(err) { - return "", fmt.Errorf("error waiting for nanobot init container: %w", err) - } - case status := <-statusCh: - if status.StatusCode != 0 { - return "", fmt.Errorf("nanobot init container failed with exit code %d", status.StatusCode) - } + }); err != nil { + return "", err } return volumeName, nil diff --git a/pkg/mcp/kubernetes.go b/pkg/mcp/kubernetes.go index 59489a1f20..06fd941ddf 100644 --- a/pkg/mcp/kubernetes.go +++ b/pkg/mcp/kubernetes.go @@ -607,7 +607,7 @@ func (k *kubernetesBackend) k8sObjects(ctx context.Context, server ServerConfig, if server.NanobotAgentName == "" { // If this is anything other than a remote runtime, then we need to add a special shim container. // The remote runtime will just be the shim and is deployed as the "real" container. - nanobotFileString, err := constructNanobotYAMLForServer( + nanobotFileString, err := constructMCPServerNanobotYAML( server.MCPServerDisplayName+" Shim", fmt.Sprintf("http://localhost:%d/%s", port, strings.TrimPrefix(server.ContainerPath, "/")), "", @@ -867,13 +867,14 @@ func (k *kubernetesBackend) k8sObjects(ctx context.Context, server ServerConfig, objs = append(objs, dep) if server.Runtime != types.RuntimeContainerized { - // Setup the nanobot config file and add it to the last container in the deployment. + // Setup the MCP server nanobot config (nanobot.yaml that configures how nanobot proxies + // to the underlying MCP server) and mount it into the last container in the deployment. var nanobotFileString []byte if server.Runtime == types.RuntimeComposite { - nanobotFileString, err = constructNanobotYAMLForCompositeServer(server.Components) + nanobotFileString, err = constructMCPServerNanobotYAMLForComposite(server.Components) annotations["nanobot-composite-file-rev"] = hash.Digest(nanobotFileString) } else { - nanobotFileString, err = constructNanobotYAMLForServer(server.MCPServerDisplayName, server.URL, server.Command, server.Args, secretEnvData, headerData, webhooks) + nanobotFileString, err = constructMCPServerNanobotYAML(server.MCPServerDisplayName, server.URL, server.Command, server.Args, secretEnvData, headerData, webhooks) } if err != nil { return nil, fmt.Errorf("failed to construct nanobot.yaml: %w", err) diff --git a/pkg/storage/openapi/generated/openapi_generated.go b/pkg/storage/openapi/generated/openapi_generated.go index f542417332..5239b5a057 100644 --- a/pkg/storage/openapi/generated/openapi_generated.go +++ b/pkg/storage/openapi/generated/openapi_generated.go @@ -2098,6 +2098,13 @@ func schema_obot_platform_obot_apiclient_types_CommonProviderMetadata(ref common Format: "", }, }, + "dialect": { + SchemaProps: spec.SchemaProps{ + Description: "Dialect specifies the LLM API format used by this provider (e.g. \"AnthropicMessages\", \"OpenAIChatCompletions\", \"OpenAIResponses\").", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -6948,6 +6955,12 @@ func schema_obot_platform_obot_apiclient_types_ModelManifest(ref common.Referenc Format: "", }, }, + "dialect": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"active", "usage"}, },