diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index c87c2627c9..54cf3444a0 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -5,8 +5,8 @@ ### CLI * Moved file-based OAuth token cache management from the SDK to the CLI. No user-visible change; part of a three-PR sequence that makes the CLI the sole owner of its token cache. -* Added experimental OS-native secure token storage behind the `--secure-storage` flag on `databricks auth login` and the `DATABRICKS_AUTH_STORAGE=secure` environment variable. Hidden from help during MS1. Legacy file-backed token storage remains the default. -* Added experimental OS-native secure token storage opt-in via `DATABRICKS_AUTH_STORAGE=secure` or `[__settings__].auth_storage = secure` in `.databrickscfg`. Legacy file-backed token storage remains the default. +* Added experimental OS-native secure token storage, opt-in via the `--secure-storage` flag on `databricks auth login` or the `DATABRICKS_AUTH_STORAGE=secure` environment variable. Legacy file-backed token storage remains the default. +* Remove the `--experimental-is-unified-host` flag and stop reading `experimental_is_unified_host` from `.databrickscfg` profiles and the `DATABRICKS_EXPERIMENTAL_IS_UNIFIED_HOST` env var. Unified hosts are now detected exclusively from `/.well-known/databricks-config` discovery. The `experimental_is_unified_host` field is retained as a no-op in `databricks.yml` for schema compatibility. ### Bundles diff --git a/acceptance/auth/credentials/unified-host/out.requests.txt b/acceptance/auth/credentials/unified-host/out.requests.txt deleted file mode 100644 index c154a54bff..0000000000 --- a/acceptance/auth/credentials/unified-host/out.requests.txt +++ /dev/null @@ -1,39 +0,0 @@ -{ - "headers": { - "User-Agent": [ - "cli/[DEV_VERSION] databricks-sdk-go/[SDK_VERSION] go/[GO_VERSION] os/[OS]" - ] - }, - "method": "GET", - "path": "/.well-known/databricks-config" -} -{ - "headers": { - "Authorization": [ - "Bearer dapi-unified-token" - ], - "User-Agent": [ - "cli/[DEV_VERSION] databricks-sdk-go/[SDK_VERSION] go/[GO_VERSION] os/[OS] cmd/current-user_me cmd-exec-id/[UUID] interactive/none auth/pat" - ], - "X-Databricks-Org-Id": [ - "[NUMID]" - ] - }, - "method": "GET", - "path": "/api/2.0/preview/scim/v2/Me" -} -{ - "headers": { - "Authorization": [ - "Bearer dapi-unified-token" - ], - "User-Agent": [ - "cli/[DEV_VERSION] databricks-sdk-go/[SDK_VERSION] go/[GO_VERSION] os/[OS] cmd/current-user_me cmd-exec-id/[UUID] interactive/none auth/pat" - ], - "X-Databricks-Org-Id": [ - "[NUMID]" - ] - }, - "method": "GET", - "path": "/api/2.0/preview/scim/v2/Me" -} diff --git a/acceptance/auth/credentials/unified-host/out.test.toml b/acceptance/auth/credentials/unified-host/out.test.toml deleted file mode 100644 index d560f1de04..0000000000 --- a/acceptance/auth/credentials/unified-host/out.test.toml +++ /dev/null @@ -1,5 +0,0 @@ -Local = true -Cloud = false - -[EnvMatrix] - DATABRICKS_BUNDLE_ENGINE = ["terraform", "direct"] diff --git a/acceptance/auth/credentials/unified-host/output.txt b/acceptance/auth/credentials/unified-host/output.txt deleted file mode 100644 index af071887d0..0000000000 --- a/acceptance/auth/credentials/unified-host/output.txt +++ /dev/null @@ -1,12 +0,0 @@ - -=== With workspace_id -{ - "id":"[USERID]", - "userName":"[USERNAME]" -} - -=== Without workspace_id (should error) -{ - "id":"[USERID]", - "userName":"[USERNAME]" -} diff --git a/acceptance/auth/credentials/unified-host/script b/acceptance/auth/credentials/unified-host/script deleted file mode 100644 index f785987219..0000000000 --- a/acceptance/auth/credentials/unified-host/script +++ /dev/null @@ -1,12 +0,0 @@ -# Test unified host authentication with PAT token -export DATABRICKS_TOKEN=dapi-unified-token -export DATABRICKS_ACCOUNT_ID=test-account-123 -export DATABRICKS_WORKSPACE_ID=1234567890 -export DATABRICKS_EXPERIMENTAL_IS_UNIFIED_HOST=true - -title "With workspace_id\n" -$CLI current-user me - -title "Without workspace_id (should error)\n" -unset DATABRICKS_WORKSPACE_ID -errcode $CLI current-user me diff --git a/acceptance/auth/credentials/unified-host/test.toml b/acceptance/auth/credentials/unified-host/test.toml deleted file mode 100644 index fd0cd96421..0000000000 --- a/acceptance/auth/credentials/unified-host/test.toml +++ /dev/null @@ -1,3 +0,0 @@ -# Test unified host authentication with PAT tokens -# Include X-Databricks-Org-Id header to verify workspace_id is sent -IncludeRequestHeaders = ["Authorization", "User-Agent", "X-Databricks-Org-Id"] diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index 325e7cbd55..9cd397f13a 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -46,6 +46,11 @@ type Workspace struct { AzureLoginAppID string `json:"azure_login_app_id,omitempty"` // Unified host specific attributes. + // + // ExperimentalIsUnifiedHost is a deprecated no-op. Unified hosts are now + // detected automatically from /.well-known/databricks-config. The field is + // retained so existing databricks.yml files using it still validate against + // the bundle schema. ExperimentalIsUnifiedHost bool `json:"experimental_is_unified_host,omitempty"` AccountID string `json:"account_id,omitempty"` WorkspaceID string `json:"workspace_id,omitempty"` @@ -135,9 +140,8 @@ func (w *Workspace) Config(ctx context.Context) *config.Config { AzureLoginAppID: w.AzureLoginAppID, // Unified host - Experimental_IsUnifiedHost: w.ExperimentalIsUnifiedHost, - AccountID: w.AccountID, - WorkspaceID: w.WorkspaceID, + AccountID: w.AccountID, + WorkspaceID: w.WorkspaceID, } for k := range config.ConfigAttributes { diff --git a/bundle/docsgen/output/reference.md b/bundle/docsgen/output/reference.md index cb48644fd4..ea8f922575 100644 --- a/bundle/docsgen/output/reference.md +++ b/bundle/docsgen/output/reference.md @@ -1,7 +1,7 @@ --- description: 'Configuration reference for databricks.yml' last_update: - date: 2026-04-17 + date: 2026-04-23 --- @@ -527,6 +527,10 @@ resources: - Map - See [\_](#resourcessynced_database_tables). +- - `vector_search_endpoints` + - Map + - See [\_](#resourcesvector_search_endpoints). + - - `volumes` - Map - The volume definitions for the bundle, where each key is the name of the volume. See [\_](/dev-tools/bundles/resources.md#volumes). @@ -991,6 +995,10 @@ catalogs: - Map - See [\_](#resourcescatalogsnamelifecycle). +- - `managed_encryption_settings` + - Map + - See [\_](#resourcescatalogsnamemanaged_encryption_settings). + - - `name` - String - @@ -1398,6 +1406,10 @@ external_locations: - Boolean - +- - `effective_file_event_queue` + - Map + - See [\_](#resourcesexternal_locationsnameeffective_file_event_queue). + - - `enable_file_events` - Boolean - @@ -1659,6 +1671,10 @@ postgres_projects: - Sequence - See [\_](#resourcespostgres_projectsnamecustom_tags). +- - `default_branch` + - String + - + - - `default_endpoint_settings` - Map - See [\_](#resourcespostgres_projectsnamedefault_endpoint_settings). @@ -1923,6 +1939,110 @@ synced_database_tables: ::: +### resources.vector_search_endpoints + +**`Type: Map`** + + + +```yaml +vector_search_endpoints: + : + : +``` + + +:::list-table + +- - Key + - Type + - Description + +- - `budget_policy_id` + - String + - + +- - `endpoint_type` + - String + - + +- - `lifecycle` + - Map + - See [\_](#resourcesvector_search_endpointsnamelifecycle). + +- - `min_qps` + - Integer + - + +- - `name` + - String + - + +- - `permissions` + - Sequence + - See [\_](#resourcesvector_search_endpointsnamepermissions). + +- - `usage_policy_id` + - String + - + +::: + + +### resources.vector_search_endpoints._name_.lifecycle + +**`Type: Map`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `prevent_destroy` + - Boolean + - Lifecycle setting to prevent the resource from being destroyed. + +::: + + +### resources.vector_search_endpoints._name_.permissions + +**`Type: Sequence`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `group_name` + - String + - The name of the group that has the permission set in level. + +- - `level` + - String + - The allowed permission for user, group, service principal defined for this permission. + +- - `service_principal_name` + - String + - The name of the service principal that has the permission set in level. + +- - `user_name` + - String + - The name of the user that has the permission set in level. + +::: + + ## run_as **`Type: Map`** @@ -2481,6 +2601,10 @@ The resource definitions for the target. - Map - See [\_](#targetsnameresourcessynced_database_tables). +- - `vector_search_endpoints` + - Map + - See [\_](#targetsnameresourcesvector_search_endpoints). + - - `volumes` - Map - The volume definitions for the bundle, where each key is the name of the volume. See [\_](/dev-tools/bundles/resources.md#volumes). @@ -2945,6 +3069,10 @@ catalogs: - Map - See [\_](#targetsnameresourcescatalogsnamelifecycle). +- - `managed_encryption_settings` + - Map + - See [\_](#targetsnameresourcescatalogsnamemanaged_encryption_settings). + - - `name` - String - @@ -3352,6 +3480,10 @@ external_locations: - Boolean - +- - `effective_file_event_queue` + - Map + - See [\_](#targetsnameresourcesexternal_locationsnameeffective_file_event_queue). + - - `enable_file_events` - Boolean - @@ -3613,6 +3745,10 @@ postgres_projects: - Sequence - See [\_](#targetsnameresourcespostgres_projectsnamecustom_tags). +- - `default_branch` + - String + - + - - `default_endpoint_settings` - Map - See [\_](#targetsnameresourcespostgres_projectsnamedefault_endpoint_settings). @@ -3877,6 +4013,110 @@ synced_database_tables: ::: +### targets._name_.resources.vector_search_endpoints + +**`Type: Map`** + + + +```yaml +vector_search_endpoints: + : + : +``` + + +:::list-table + +- - Key + - Type + - Description + +- - `budget_policy_id` + - String + - + +- - `endpoint_type` + - String + - + +- - `lifecycle` + - Map + - See [\_](#targetsnameresourcesvector_search_endpointsnamelifecycle). + +- - `min_qps` + - Integer + - + +- - `name` + - String + - + +- - `permissions` + - Sequence + - See [\_](#targetsnameresourcesvector_search_endpointsnamepermissions). + +- - `usage_policy_id` + - String + - + +::: + + +### targets._name_.resources.vector_search_endpoints._name_.lifecycle + +**`Type: Map`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `prevent_destroy` + - Boolean + - Lifecycle setting to prevent the resource from being destroyed. + +::: + + +### targets._name_.resources.vector_search_endpoints._name_.permissions + +**`Type: Sequence`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `group_name` + - String + - The name of the group that has the permission set in level. + +- - `level` + - String + - The allowed permission for user, group, service principal defined for this permission. + +- - `service_principal_name` + - String + - The name of the service principal that has the permission set in level. + +- - `user_name` + - String + - The name of the user that has the permission set in level. + +::: + + ### targets._name_.run_as **`Type: Map`** @@ -4094,7 +4334,7 @@ The Databricks workspace for the target. - - `experimental_is_unified_host` - Boolean - - Experimental feature flag to indicate if the host is a unified host + - Deprecated: no-op. Unified hosts are now detected automatically from /.well-known/databricks-config. Retained for schema compatibility with existing databricks.yml files. - - `file_path` - String @@ -4290,7 +4530,7 @@ Defines the Databricks workspace for the bundle. See [\_](/dev-tools/bundles/set - - `experimental_is_unified_host` - Boolean - - Experimental feature flag to indicate if the host is a unified host + - Deprecated: no-op. Unified hosts are now detected automatically from /.well-known/databricks-config. Retained for schema compatibility with existing databricks.yml files. - - `file_path` - String diff --git a/bundle/docsgen/output/resources.md b/bundle/docsgen/output/resources.md index 8277b4c30d..e262f4620b 100644 --- a/bundle/docsgen/output/resources.md +++ b/bundle/docsgen/output/resources.md @@ -1,7 +1,7 @@ --- description: 'Learn about resources supported by Declarative Automation Bundles and how to configure them.' last_update: - date: 2026-04-17 + date: 2026-04-23 --- @@ -746,7 +746,7 @@ Resources for the app. - - `app` - Map - - + - See [\_](#appsnameresourcesapp). - - `database` - Map @@ -772,6 +772,10 @@ Resources for the app. - String - Name of the App Resource. +- - `postgres` + - Map + - See [\_](#appsnameresourcespostgres). + - - `secret` - Map - See [\_](#appsnameresourcessecret). @@ -798,6 +802,24 @@ Resources for the app. + +:::list-table + +- - Key + - Type + - Description + +- - `name` + - String + - + +- - `permission` + - String + - + +::: + + ### apps._name_.resources.database **`Type: Map`** @@ -906,6 +928,35 @@ Resources for the app. ::: +### apps._name_.resources.postgres + +**`Type: Map`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `branch` + - String + - + +- - `database` + - String + - + +- - `permission` + - String + - + +::: + + ### apps._name_.resources.secret **`Type: Map`** @@ -3252,7 +3303,7 @@ In this minimal environment spec, only pip and java dependencies are supported. - - `base_environment` - String - - The `base_environment` key refers to an `env.yaml` file that specifies an environment version and a collection of dependencies required for the environment setup. This `env.yaml` file may itself include a `base_environment` reference pointing to another `env_1.yaml` file. However, when used as a base environment, `env_1.yaml` (or further nested references) will not be processed or included in the final environment, meaning that the resolution of `base_environment` references is not recursive. + - The base environment this environment is built on top of. A base environment defines the environment version and a list of dependencies for serverless compute. The value can be a file path to a custom `env.yaml` file (e.g., `/Workspace/path/to/env.yaml`). Support for a Databricks-provided base environment ID (e.g., `workspace-base-environments/databricks_ai_v4`) and workspace base environment ID (e.g., `workspace-base-environments/dbe_b849b66e-b31a-4cb5-b161-1f2b10877fb7`) is in Beta. Either `environment_version` or `base_environment` can be provided. For more information, see - - `client` - String @@ -4447,7 +4498,7 @@ Read endpoints return only 100 tasks. If more than 100 tasks are available, you - - `alert_task` - Map - - New alert v2 task. See [\_](#jobsnametasksalert_task). + - The task evaluates a Databricks alert and sends notifications to subscribers when the `alert_task` field is present. See [\_](#jobsnametasksalert_task). - - `clean_rooms_notebook_task` - Map @@ -4588,7 +4639,8 @@ Read endpoints return only 100 tasks. If more than 100 tasks are available, you **`Type: Map`** -New alert v2 task +The task evaluates a Databricks alert and sends notifications to subscribers +when the `alert_task` field is present. @@ -10111,6 +10163,10 @@ postgres_projects: - Sequence - See [\_](#postgres_projectsnamecustom_tags). +- - `default_branch` + - String + - + - - `default_endpoint_settings` - Map - A collection of settings for a compute endpoint. See [\_](#postgres_projectsnamedefault_endpoint_settings). @@ -11393,6 +11449,106 @@ only requires read permissions. ::: +## vector_search_endpoints + +**`Type: Map`** + + + +```yaml +vector_search_endpoints: + : + : +``` + + +:::list-table + +- - Key + - Type + - Description + +- - `budget_policy_id` + - String + - + +- - `endpoint_type` + - String + - Type of endpoint. + +- - `lifecycle` + - Map + - See [\_](#vector_search_endpointsnamelifecycle). + +- - `min_qps` + - Integer + - + +- - `name` + - String + - + +- - `permissions` + - Sequence + - See [\_](#vector_search_endpointsnamepermissions). + +::: + + +### vector_search_endpoints._name_.lifecycle + +**`Type: Map`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `prevent_destroy` + - Boolean + - Lifecycle setting to prevent the resource from being destroyed. + +::: + + +### vector_search_endpoints._name_.permissions + +**`Type: Sequence`** + + + + + +:::list-table + +- - Key + - Type + - Description + +- - `group_name` + - String + - The name of the group that has the permission set in level. + +- - `level` + - String + - The allowed permission for user, group, service principal defined for this permission. + +- - `service_principal_name` + - String + - The name of the service principal that has the permission set in level. + +- - `user_name` + - String + - The name of the user that has the permission set in level. + +::: + + ## volumes **`Type: Map`** diff --git a/bundle/internal/schema/annotations.yml b/bundle/internal/schema/annotations.yml index c869a19926..2f28ca2759 100644 --- a/bundle/internal/schema/annotations.yml +++ b/bundle/internal/schema/annotations.yml @@ -450,7 +450,7 @@ github.com/databricks/cli/bundle/config.Workspace: The client ID for the workspace "experimental_is_unified_host": "description": |- - Experimental feature flag to indicate if the host is a unified host + Deprecated: no-op. Unified hosts are now detected automatically from /.well-known/databricks-config. Retained for schema compatibility with existing databricks.yml files. "file_path": "description": |- The file path to use within the workspace for both deployments and job runs diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 03654f1f63..ee105a6f82 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -2743,7 +2743,7 @@ "$ref": "#/$defs/string" }, "experimental_is_unified_host": { - "description": "Experimental feature flag to indicate if the host is a unified host", + "description": "Deprecated: no-op. Unified hosts are now detected automatically from /.well-known/databricks-config. Retained for schema compatibility with existing databricks.yml files.", "$ref": "#/$defs/bool" }, "file_path": { diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index 6bca3f5962..348c213856 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -28,7 +28,6 @@ GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`, var authArguments auth.AuthArguments cmd.PersistentFlags().StringVar(&authArguments.Host, "host", "", "Databricks Host") cmd.PersistentFlags().StringVar(&authArguments.AccountID, "account-id", "", "Databricks Account ID") - cmd.PersistentFlags().BoolVar(&authArguments.IsUnifiedHost, "experimental-is-unified-host", false, "Flag to indicate if the host is a unified host") cmd.PersistentFlags().StringVar(&authArguments.WorkspaceID, "workspace-id", "", "Databricks Workspace ID") cmd.AddCommand(newEnvCommand()) diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 966ea2631e..3b5d78c4f1 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -231,13 +231,6 @@ a new profile is created. }) } - // Load unified host flag from the profile if not explicitly set via CLI flag. - // WorkspaceID is NOT loaded here; it is deferred to setHostAndAccountId() - // so that URL query params (?o=...) can override stale profile values. - if !cmd.Flag("experimental-is-unified-host").Changed && existingProfile != nil { - authArguments.IsUnifiedHost = existingProfile.IsUnifiedHost - } - err = setHostAndAccountId(ctx, existingProfile, authArguments, args) if err != nil { return err @@ -289,8 +282,7 @@ a new profile is created. // If discovery gave us an account_id but we still have no workspace_id, // prompt the user to select a workspace. This applies to any host where - // .well-known/databricks-config returned an account_id, regardless of - // whether IsUnifiedHost is set. + // .well-known/databricks-config returned an account_id. shouldPromptWorkspace := authArguments.AccountID != "" && authArguments.WorkspaceID == "" && !skipWorkspace @@ -314,15 +306,11 @@ a new profile is created. var clusterID, serverlessComputeID string // Keys to explicitly remove from the profile. OAuth login always - // clears incompatible credential fields (PAT, basic auth, M2M). + // clears incompatible credential fields (PAT, basic auth, M2M) and + // the deprecated experimental_is_unified_host key (routing now comes + // from .well-known discovery, so stale values would be misleading). clearKeys := oauthLoginClearKeys() - - // Boolean false is zero-valued and skipped by SaveToProfile's IsZero - // check. Explicitly clear experimental_is_unified_host when false so - // it doesn't remain sticky from a previous login. - if !authArguments.IsUnifiedHost { - clearKeys = append(clearKeys, "experimental_is_unified_host") - } + clearKeys = append(clearKeys, databrickscfg.ExperimentalIsUnifiedHostKey) switch { case configureCluster: @@ -330,11 +318,10 @@ a new profile is created. // We use a custom CredentialsStrategy that wraps the token we just minted, // avoiding the need to spawn a child CLI process (which AuthType "databricks-cli" does). w, err := databricks.NewWorkspaceClient(&databricks.Config{ - Host: authArguments.Host, - AccountID: authArguments.AccountID, - WorkspaceID: authArguments.WorkspaceID, - Experimental_IsUnifiedHost: authArguments.IsUnifiedHost, - Credentials: config.NewTokenSourceStrategy("login-token", authconv.AuthTokenSource(persistentAuth)), + Host: authArguments.Host, + AccountID: authArguments.AccountID, + WorkspaceID: authArguments.WorkspaceID, + Credentials: config.NewTokenSourceStrategy("login-token", authconv.AuthTokenSource(persistentAuth)), }) if err != nil { return err @@ -355,17 +342,19 @@ a new profile is created. } if profileName != "" { + // experimental_is_unified_host is no longer written to new profiles. + // Routing now comes from .well-known discovery; stale keys on existing + // profiles are cleaned up via clearKeys above. err := databrickscfg.SaveToProfile(ctx, &config.Config{ - Profile: profileName, - Host: authArguments.Host, - AuthType: authTypeDatabricksCLI, - AccountID: authArguments.AccountID, - WorkspaceID: authArguments.WorkspaceID, - Experimental_IsUnifiedHost: authArguments.IsUnifiedHost, - ClusterID: clusterID, - ConfigFile: env.Get(ctx, "DATABRICKS_CONFIG_FILE"), - ServerlessComputeID: serverlessComputeID, - Scopes: scopesList, + Profile: profileName, + Host: authArguments.Host, + AuthType: authTypeDatabricksCLI, + AccountID: authArguments.AccountID, + WorkspaceID: authArguments.WorkspaceID, + ClusterID: clusterID, + ConfigFile: env.Get(ctx, "DATABRICKS_CONFIG_FILE"), + ServerlessComputeID: serverlessComputeID, + Scopes: scopesList, }, clearKeys...) if err != nil { return err @@ -442,52 +431,32 @@ func setHostAndAccountId(ctx context.Context, existingProfile *profile.Profile, // are logged as warnings and never block login. runHostDiscovery(ctx, authArguments) - // Determine the host type and handle account ID / workspace ID accordingly - cfg := &config.Config{ - Host: authArguments.Host, - AccountID: authArguments.AccountID, - WorkspaceID: authArguments.WorkspaceID, - Experimental_IsUnifiedHost: authArguments.IsUnifiedHost, - } - - switch cfg.HostType() { //nolint:staticcheck // HostType() deprecated in SDK v0.127.0; SDK moving to host-agnostic behavior. - case config.AccountHost: - // Account host: prompt for account ID if not provided - if authArguments.AccountID == "" { - if existingProfile != nil && existingProfile.AccountID != "" { - authArguments.AccountID = existingProfile.AccountID - } else { - accountId, err := promptForAccountID(ctx) - if err != nil { - return err - } - authArguments.AccountID = accountId - } - } - case config.UnifiedHost: - // Unified host requires an account ID for OAuth URL construction. - // Workspace selection happens post-OAuth via promptForWorkspaceSelection. - if authArguments.AccountID == "" { - if existingProfile != nil && existingProfile.AccountID != "" { - authArguments.AccountID = existingProfile.AccountID - } else { - accountId, err := promptForAccountID(ctx) - if err != nil { - return err - } - authArguments.AccountID = accountId + if needsAccountIDPrompt(authArguments.Host, authArguments.DiscoveryURL) && authArguments.AccountID == "" { + if existingProfile != nil && existingProfile.AccountID != "" { + authArguments.AccountID = existingProfile.AccountID + } else { + accountId, err := promptForAccountID(ctx) + if err != nil { + return err } + authArguments.AccountID = accountId } - case config.WorkspaceHost: - // Regular workspace host: no additional prompts needed. - // If discovery already populated account_id/workspace_id, those are kept. - default: - return fmt.Errorf("unknown host type: %v", cfg.HostType()) //nolint:staticcheck // HostType() deprecated in SDK v0.127.0; SDK moving to host-agnostic behavior. } return nil } +// needsAccountIDPrompt reports whether the target host requires an account ID +// for OAuth URL construction. True for classic account hosts (accounts.*) and +// for unified hosts detected via account-scoped DiscoveryURL. +func needsAccountIDPrompt(host, discoveryURL string) bool { + canonicalHost := (&config.Config{Host: host}).CanonicalHostName() + if auth.IsClassicAccountHost(canonicalHost) { + return true + } + return auth.HasUnifiedHostSignal(discoveryURL) +} + // runHostDiscovery calls EnsureResolved() with a temporary config to fetch // .well-known/databricks-config from the host. Populates account_id and // workspace_id from discovery if not already set. @@ -577,7 +546,6 @@ func shouldUseDiscovery(hostFlag string, args []string, existingProfile *profile var discoveryIncompatibleFlags = []string{ "account-id", "workspace-id", - "experimental-is-unified-host", "configure-cluster", "configure-serverless", } @@ -696,7 +664,7 @@ func discoveryLogin(ctx context.Context, in discoveryLoginInputs) error { clearKeys = append(clearKeys, "account_id", "workspace_id", - "experimental_is_unified_host", + databrickscfg.ExperimentalIsUnifiedHostKey, "cluster_id", "serverless_compute_id", ) diff --git a/cmd/auth/login_test.go b/cmd/auth/login_test.go index b5d8a39f43..e057c979c3 100644 --- a/cmd/auth/login_test.go +++ b/cmd/auth/login_test.go @@ -218,10 +218,9 @@ func TestSetWorkspaceIDForUnifiedHost(t *testing.T) { // Test setting workspace-id from flag for unified host authArguments = auth.AuthArguments{ - Host: "https://unified.databricks.com", - AccountID: "test-unified-account", - WorkspaceID: "val from --workspace-id", - IsUnifiedHost: true, + Host: "https://unified.databricks.com", + AccountID: "test-unified-account", + WorkspaceID: "val from --workspace-id", } err := setHostAndAccountId(ctx, unifiedWorkspaceProfile, &authArguments, []string{}) assert.NoError(t, err) @@ -231,9 +230,8 @@ func TestSetWorkspaceIDForUnifiedHost(t *testing.T) { // Test setting workspace_id from profile for unified host authArguments = auth.AuthArguments{ - Host: "https://unified.databricks.com", - AccountID: "test-unified-account", - IsUnifiedHost: true, + Host: "https://unified.databricks.com", + AccountID: "test-unified-account", } err = setHostAndAccountId(ctx, unifiedWorkspaceProfile, &authArguments, []string{}) assert.NoError(t, err) @@ -243,9 +241,8 @@ func TestSetWorkspaceIDForUnifiedHost(t *testing.T) { // Test workspace_id is optional - should default to empty in non-interactive mode authArguments = auth.AuthArguments{ - Host: "https://unified.databricks.com", - AccountID: "test-unified-account", - IsUnifiedHost: true, + Host: "https://unified.databricks.com", + AccountID: "test-unified-account", } err = setHostAndAccountId(ctx, unifiedAccountProfile, &authArguments, []string{}) assert.NoError(t, err) @@ -255,9 +252,8 @@ func TestSetWorkspaceIDForUnifiedHost(t *testing.T) { // Test workspace_id is optional - should default to empty when no profile exists authArguments = auth.AuthArguments{ - Host: "https://unified.databricks.com", - AccountID: "test-unified-account", - IsUnifiedHost: true, + Host: "https://unified.databricks.com", + AccountID: "test-unified-account", } err = setHostAndAccountId(ctx, nil, &authArguments, []string{}) assert.NoError(t, err) @@ -399,6 +395,29 @@ func TestShouldUseDiscovery(t *testing.T) { } } +func TestNeedsAccountIDPrompt(t *testing.T) { + cases := []struct { + name string + host string + discoveryURL string + want bool + }{ + {name: "classic accounts host", host: "https://accounts.cloud.databricks.com", want: true}, + {name: "accounts-dod host", host: "https://accounts-dod.databricks.com", want: true}, + {name: "accounts host with path", host: "https://accounts.cloud.databricks.com/some/path", want: true}, + {name: "plain workspace host", host: "https://workspace.cloud.databricks.com"}, + {name: "account-scoped DiscoveryURL", host: "https://spog.cloud.databricks.com", discoveryURL: "https://spog.cloud.databricks.com/oidc/accounts/acct-123/.well-known/oauth-authorization-server", want: true}, + {name: "workspace-scoped DiscoveryURL", host: "https://workspace.cloud.databricks.com", discoveryURL: "https://workspace.cloud.databricks.com/oidc/.well-known/oauth-authorization-server"}, + {name: "workspace host no signals", host: "https://workspace.cloud.databricks.com"}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + got := needsAccountIDPrompt(tc.host, tc.discoveryURL) + assert.Equal(t, tc.want, got) + }) + } +} + func TestSplitScopes(t *testing.T) { tests := []struct { name string @@ -536,9 +555,8 @@ func TestSetHostAndAccountId_URLParamsOverrideProfile(t *testing.T) { // The profile has workspace_id=123456789, but the URL has ?o=99999. // URL params should win over profile values. args := auth.AuthArguments{ - Host: "https://unified.databricks.com?o=99999", - AccountID: "test-unified-account", - IsUnifiedHost: true, + Host: "https://unified.databricks.com?o=99999", + AccountID: "test-unified-account", } err := setHostAndAccountId(ctx, unifiedWorkspaceProfile, &args, []string{}) assert.NoError(t, err) @@ -565,12 +583,6 @@ func TestValidateDiscoveryFlagCompatibility(t *testing.T) { flagVal: "12345", wantErr: "--workspace-id requires --host to be specified", }, - { - name: "experimental-is-unified-host is incompatible", - setFlag: "experimental-is-unified-host", - flagVal: "true", - wantErr: "--experimental-is-unified-host requires --host to be specified", - }, { name: "configure-cluster is incompatible", setFlag: "configure-cluster", @@ -592,7 +604,6 @@ func TestValidateDiscoveryFlagCompatibility(t *testing.T) { cmd := &cobra.Command{} cmd.Flags().String("account-id", "", "") cmd.Flags().String("workspace-id", "", "") - cmd.Flags().Bool("experimental-is-unified-host", false, "") cmd.Flags().Bool("configure-cluster", false, "") cmd.Flags().Bool("configure-serverless", false, "") @@ -985,11 +996,10 @@ auth_type = databricks-cli } existingProfile := &profile.Profile{ - Name: "DISCOVERY", - Host: "https://old-unified.databricks.com", - AccountID: "old-account", - WorkspaceID: "999999", - IsUnifiedHost: true, + Name: "DISCOVERY", + Host: "https://old-unified.databricks.com", + AccountID: "old-account", + WorkspaceID: "999999", } ctx, _ := cmdio.NewTestContextWithStdout(t.Context()) @@ -1010,7 +1020,11 @@ auth_type = databricks-cli // Stale routing fields must be cleared. assert.Empty(t, savedProfile.AccountID, "stale account_id should be cleared") assert.Empty(t, savedProfile.WorkspaceID, "stale workspace_id should be cleared on introspection failure") - assert.False(t, savedProfile.IsUnifiedHost, "stale experimental_is_unified_host should be cleared") + + // Verify the experimental_is_unified_host INI key was also cleared from disk. + raw, err := os.ReadFile(configPath) + require.NoError(t, err) + assert.NotContains(t, string(raw), "experimental_is_unified_host") } func TestDiscoveryLogin_IntrospectionWritesFreshWorkspaceID(t *testing.T) { diff --git a/cmd/auth/logout.go b/cmd/auth/logout.go index bdd0f75430..a8cd14be0a 100644 --- a/cmd/auth/logout.go +++ b/cmd/auth/logout.go @@ -303,13 +303,12 @@ func hostCacheKeyAndMatchFn(p profile.Profile) (string, profile.ProfileMatchFunc // Use ToOAuthArgument to derive the host-based cache key via the same // routing logic the SDK used when the token was written during login. // This includes a .well-known/databricks-config call that distinguishes - // classic workspace hosts from SPOG hosts — a distinction that cannot + // classic workspace hosts from SPOG hosts, a distinction that cannot // be made from the profile fields alone. arg, err := (auth.AuthArguments{ - Host: p.Host, - AccountID: p.AccountID, - WorkspaceID: p.WorkspaceID, - IsUnifiedHost: p.IsUnifiedHost, + Host: p.Host, + AccountID: p.AccountID, + WorkspaceID: p.WorkspaceID, // Profile is deliberately empty so GetCacheKey returns the host-based // key rather than the profile name. // DiscoveryURL is left empty to force a fresh .well-known resolution diff --git a/cmd/auth/logout_test.go b/cmd/auth/logout_test.go index e4e8f58058..c9007e4a5f 100644 --- a/cmd/auth/logout_test.go +++ b/cmd/auth/logout_test.go @@ -41,29 +41,21 @@ host = https://accounts.cloud.databricks.com account_id = abc123 auth_type = databricks-cli -[my-unified] -host = https://unified.cloud.databricks.com -account_id = def456 -experimental_is_unified_host = true -auth_type = databricks-cli - [my-m2m] host = https://my-m2m.cloud.databricks.com token = dev-token ` var logoutTestTokensCacheConfig = map[string]*oauth2.Token{ - "my-workspace": {AccessToken: "shared-workspace-token"}, - "shared-workspace": {AccessToken: "shared-workspace-token"}, - "my-unique-workspace": {AccessToken: "my-unique-workspace-token"}, - "my-workspace-stale-account": {AccessToken: "stale-account-token"}, - "my-account": {AccessToken: "my-account-token"}, - "my-unified": {AccessToken: "my-unified-token"}, + "my-workspace": {AccessToken: "shared-workspace-token"}, + "shared-workspace": {AccessToken: "shared-workspace-token"}, + "my-unique-workspace": {AccessToken: "my-unique-workspace-token"}, + "my-workspace-stale-account": {AccessToken: "stale-account-token"}, + "my-account": {AccessToken: "my-account-token"}, "https://my-workspace.cloud.databricks.com": {AccessToken: "shared-workspace-host-token"}, "https://my-unique-workspace.cloud.databricks.com": {AccessToken: "unique-workspace-host-token"}, "https://stale-account.cloud.databricks.com": {AccessToken: "stale-account-host-token"}, "https://accounts.cloud.databricks.com/oidc/accounts/abc123": {AccessToken: "account-host-token"}, - "https://unified.cloud.databricks.com/oidc/accounts/def456": {AccessToken: "unified-host-token"}, "my-m2m": {AccessToken: "m2m-service-token"}, "https://my-m2m.cloud.databricks.com": {AccessToken: "m2m-host-token"}, } @@ -120,13 +112,6 @@ func TestLogout(t *testing.T) { isSharedKey: false, autoApprove: true, }, - { - name: "existing unified profile", - profileName: "my-unified", - hostBasedKey: "https://unified.cloud.databricks.com/oidc/accounts/def456", - isSharedKey: false, - autoApprove: true, - }, { name: "existing workspace profile without auto-approve in non-interactive mode", profileName: "my-workspace", @@ -163,14 +148,6 @@ func TestLogout(t *testing.T) { autoApprove: true, deleteProfile: true, }, - { - name: "delete unified profile", - profileName: "my-unified", - hostBasedKey: "https://unified.cloud.databricks.com/oidc/accounts/def456", - isSharedKey: false, - autoApprove: true, - deleteProfile: true, - }, { name: "do not delete m2m profile tokens", profileName: "my-m2m", @@ -439,16 +416,6 @@ func TestHostCacheKeyAndMatchFn(t *testing.T) { }, wantKey: "https://accounts.cloud.databricks.com/oidc/accounts/abc123", }, - { - name: "unified host with flag", - profile: profile.Profile{ - Name: "unified", - Host: wsServer.URL, - AccountID: "def456", - IsUnifiedHost: true, - }, - wantKey: wsServer.URL + "/oidc/accounts/def456", - }, { name: "SPOG profile routes to account key via discovery", profile: profile.Profile{ diff --git a/cmd/auth/profiles_test.go b/cmd/auth/profiles_test.go index a0792344ae..59803e210c 100644 --- a/cmd/auth/profiles_test.go +++ b/cmd/auth/profiles_test.go @@ -201,45 +201,6 @@ func TestProfileLoadSPOGConfigType(t *testing.T) { } } -func TestProfileLoadUnifiedHostFallback(t *testing.T) { - // When Experimental_IsUnifiedHost is set but .well-known is unreachable, - // ConfigType() returns InvalidConfig. The fallback should reclassify as - // AccountConfig so the profile is still validated. - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - switch r.URL.Path { - case "/.well-known/databricks-config": - w.WriteHeader(http.StatusNotFound) - case "/api/2.0/accounts/unified-acct/workspaces": - _ = json.NewEncoder(w).Encode([]map[string]any{}) - default: - w.WriteHeader(http.StatusNotFound) - } - })) - t.Cleanup(server.Close) - - dir := t.TempDir() - configFile := filepath.Join(dir, ".databrickscfg") - t.Setenv("HOME", dir) - if runtime.GOOS == "windows" { - t.Setenv("USERPROFILE", dir) - } - - content := "[unified-profile]\nhost = " + server.URL + "\ntoken = test-token\naccount_id = unified-acct\nexperimental_is_unified_host = true\n" - require.NoError(t, os.WriteFile(configFile, []byte(content), 0o600)) - - p := &profileMetadata{ - Name: "unified-profile", - Host: server.URL, - AccountID: "unified-acct", - } - p.Load(t.Context(), configFile, false) - - assert.True(t, p.Valid, "unified host profile should be valid via fallback") - assert.NotEmpty(t, p.Host) - assert.NotEmpty(t, p.AuthType) -} - func TestClassicAccountsHostConfigType(t *testing.T) { // Classic accounts.* hosts can't be tested through Load() because httptest // generates 127.0.0.1 URLs. Verify directly that ConfigType() classifies @@ -256,7 +217,7 @@ func TestClassicAccountsHostConfigType(t *testing.T) { } func TestProfileLoadNoDiscoveryStaysWorkspace(t *testing.T) { - // When .well-known returns 404 and Experimental_IsUnifiedHost is false, + // When .well-known returns 404 and the unified-host fallback is false, // the SPOG override should NOT trigger even if account_id is set. The // profile should stay WorkspaceConfig and validate via CurrentUser.Me. server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/cmd/auth/token.go b/cmd/auth/token.go index da954b6318..1601501c44 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -41,19 +41,6 @@ const ( createNewSelected // User chose "Create a new profile" ) -// applyUnifiedHostFlags copies unified host fields from the profile to the -// auth arguments when they are not already set. WorkspaceID is NOT copied -// here; it is deferred to setHostAndAccountId() so that URL query params -// (?o=...) can override stale profile values. -func applyUnifiedHostFlags(p *profile.Profile, args *auth.AuthArguments) { - if p == nil { - return - } - if !args.IsUnifiedHost && p.IsUnifiedHost { - args.IsUnifiedHost = p.IsUnifiedHost - } -} - func newTokenCommand(authArguments *auth.AuthArguments) *cobra.Command { cmd := &cobra.Command{ Use: "token [PROFILE]", @@ -195,8 +182,6 @@ func loadToken(ctx context.Context, args loadTokenArgs) (*oauth2.Token, error) { return nil, err } - applyUnifiedHostFlags(existingProfile, args.authArguments) - // When no explicit profile, host, or positional args are provided, attempt to // resolve the target through environment variables or interactive profile selection. if args.profileName == "" && args.authArguments.Host == "" && len(args.args) == 0 { @@ -206,7 +191,6 @@ func loadToken(ctx context.Context, args loadTokenArgs) (*oauth2.Token, error) { return nil, err } args.profileName = resolvedProfile - applyUnifiedHostFlags(existingProfile, args.authArguments) } err = setHostAndAccountId(ctx, existingProfile, args.authArguments, args.args) @@ -342,9 +326,6 @@ func resolveNoArgsToken(ctx context.Context, profiler profile.Profiler, authArgs if v := env.Get(ctx, "DATABRICKS_WORKSPACE_ID"); v != "" { authArgs.WorkspaceID = v } - if ok, _ := env.GetBool(ctx, "DATABRICKS_EXPERIMENTAL_IS_UNIFIED_HOST"); ok { - authArgs.IsUnifiedHost = true - } return "", nil, nil } @@ -457,7 +438,6 @@ func runInlineLogin(ctx context.Context, profiler profile.Profiler, tokenCache c } loginArgs := &auth.AuthArguments{} - applyUnifiedHostFlags(existingProfile, loginArgs) err = setHostAndAccountId(ctx, existingProfile, loginArgs, nil) if err != nil { @@ -500,19 +480,16 @@ func runInlineLogin(ctx context.Context, profiler profile.Profiler, tokenCache c dualWriteLegacyHostKey(ctx, tokenCache, oauthArgument, mode) clearKeys := oauthLoginClearKeys() - if !loginArgs.IsUnifiedHost { - clearKeys = append(clearKeys, "experimental_is_unified_host") - } + clearKeys = append(clearKeys, databrickscfg.ExperimentalIsUnifiedHostKey) err = databrickscfg.SaveToProfile(ctx, &config.Config{ - Profile: profileName, - Host: loginArgs.Host, - AuthType: authTypeDatabricksCLI, - AccountID: loginArgs.AccountID, - WorkspaceID: loginArgs.WorkspaceID, - Experimental_IsUnifiedHost: loginArgs.IsUnifiedHost, - ConfigFile: env.Get(ctx, "DATABRICKS_CONFIG_FILE"), - Scopes: scopesList, + Profile: profileName, + Host: loginArgs.Host, + AuthType: authTypeDatabricksCLI, + AccountID: loginArgs.AccountID, + WorkspaceID: loginArgs.WorkspaceID, + ConfigFile: env.Get(ctx, "DATABRICKS_CONFIG_FILE"), + Scopes: scopesList, }, clearKeys...) if err != nil { return "", nil, err diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index b0ce8c9280..0d6ad09def 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -162,9 +162,9 @@ The host must be specified with the --host flag or the DATABRICKS_HOST environme clearKeys = append(clearKeys, "serverless_compute_id") } - // Clear stale unified-host metadata — PAT profiles don't use it, + // Clear stale unified-host metadata, PAT profiles don't use it, // and leaving it can change HostType() routing. - clearKeys = append(clearKeys, "experimental_is_unified_host") + clearKeys = append(clearKeys, databrickscfg.ExperimentalIsUnifiedHostKey) err = databrickscfg.SaveToProfile(ctx, &config.Config{ Profile: cfg.Profile, diff --git a/libs/auth/arguments.go b/libs/auth/arguments.go index 4f724cc801..d18d7058cd 100644 --- a/libs/auth/arguments.go +++ b/libs/auth/arguments.go @@ -1,8 +1,6 @@ package auth import ( - "strings" - "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/credentials/u2m" ) @@ -14,10 +12,9 @@ const WorkspaceIDNone = "none" // AuthArguments is a struct that contains the common arguments passed to // `databricks auth` commands. type AuthArguments struct { - Host string - AccountID string - WorkspaceID string - IsUnifiedHost bool + Host string + AccountID string + WorkspaceID string // Profile is the optional profile name. When set, the OAuth token cache // key is the profile name instead of the host-based key. @@ -30,7 +27,7 @@ type AuthArguments struct { // ToOAuthArgument converts the AuthArguments to an OAuthArgument from the Go SDK. // It calls EnsureResolved() to run host metadata discovery and routes based on -// the resolved DiscoveryURL rather than the Experimental_IsUnifiedHost flag. +// the resolved DiscoveryURL. func (a AuthArguments) ToOAuthArgument() (u2m.OAuthArgument, error) { // Strip the "none" sentinel so it is never passed to the SDK. workspaceID := a.WorkspaceID @@ -39,11 +36,10 @@ func (a AuthArguments) ToOAuthArgument() (u2m.OAuthArgument, error) { } cfg := &config.Config{ - Host: a.Host, - AccountID: a.AccountID, - WorkspaceID: workspaceID, - Experimental_IsUnifiedHost: a.IsUnifiedHost, - HTTPTimeoutSeconds: 5, + Host: a.Host, + AccountID: a.AccountID, + WorkspaceID: workspaceID, + HTTPTimeoutSeconds: 5, // Skip config file loading. We only want host metadata resolution // based on the explicit fields provided. Loaders: []config.Loader{config.ConfigAttributes}, @@ -59,7 +55,7 @@ func (a AuthArguments) ToOAuthArgument() (u2m.OAuthArgument, error) { host := cfg.CanonicalHostName() // Classic accounts.* hosts always use account OAuth. - if strings.HasPrefix(host, "https://accounts.") || strings.HasPrefix(host, "https://accounts-dod.") { + if IsClassicAccountHost(host) { return u2m.NewProfileAccountOAuthArgument(host, cfg.AccountID, a.Profile) } diff --git a/libs/auth/arguments_test.go b/libs/auth/arguments_test.go index 415e87c0dd..6aeb16e22b 100644 --- a/libs/auth/arguments_test.go +++ b/libs/auth/arguments_test.go @@ -89,33 +89,33 @@ func TestToOAuthArgument(t *testing.T) { wantCacheKey: "https://my-workspace.cloud.databricks.com", }, { - name: "unified host with account ID only", + name: "unified host via DiscoveryURL with account ID only", args: AuthArguments{ - Host: "https://unified.cloud.databricks.com", - AccountID: "123456789", - IsUnifiedHost: true, + Host: "https://unified.cloud.databricks.com", + AccountID: "123456789", + DiscoveryURL: "https://unified.cloud.databricks.com/oidc/accounts/123456789/.well-known/oauth-authorization-server", }, wantHost: "https://unified.cloud.databricks.com", wantCacheKey: "https://unified.cloud.databricks.com/oidc/accounts/123456789", }, { - name: "unified host with both account ID and workspace ID", + name: "unified host via DiscoveryURL with both account ID and workspace ID", args: AuthArguments{ - Host: "https://unified.cloud.databricks.com", - AccountID: "123456789", - WorkspaceID: "123456789", - IsUnifiedHost: true, + Host: "https://unified.cloud.databricks.com", + AccountID: "123456789", + WorkspaceID: "123456789", + DiscoveryURL: "https://unified.cloud.databricks.com/oidc/accounts/123456789/.well-known/oauth-authorization-server", }, wantHost: "https://unified.cloud.databricks.com", wantCacheKey: "https://unified.cloud.databricks.com/oidc/accounts/123456789", }, { - name: "unified host with profile uses profile-based cache key", + name: "unified host via DiscoveryURL with profile uses profile-based cache key", args: AuthArguments{ - Host: "https://unified.cloud.databricks.com", - AccountID: "123456789", - IsUnifiedHost: true, - Profile: "my-unified-profile", + Host: "https://unified.cloud.databricks.com", + AccountID: "123456789", + DiscoveryURL: "https://unified.cloud.databricks.com/oidc/accounts/123456789/.well-known/oauth-authorization-server", + Profile: "my-unified-profile", }, wantHost: "https://unified.cloud.databricks.com", wantCacheKey: "my-unified-profile", @@ -123,11 +123,11 @@ func TestToOAuthArgument(t *testing.T) { { name: "workspace_id none sentinel is stripped", args: AuthArguments{ - Host: "https://unified.cloud.databricks.com", - AccountID: "123456789", - WorkspaceID: "none", - IsUnifiedHost: true, - Profile: "my-profile", + Host: "https://unified.cloud.databricks.com", + AccountID: "123456789", + WorkspaceID: "none", + DiscoveryURL: "https://unified.cloud.databricks.com/oidc/accounts/123456789/.well-known/oauth-authorization-server", + Profile: "my-profile", }, wantHost: "https://unified.cloud.databricks.com", wantCacheKey: "my-profile", @@ -145,15 +145,17 @@ func TestToOAuthArgument(t *testing.T) { assert.Equal(t, tt.wantCacheKey, got.GetCacheKey()) // Check if we got the right type of argument and verify the hostname - if tt.args.IsUnifiedHost { + isUnified := tt.args.AccountID != "" && HasUnifiedHostSignal(tt.args.DiscoveryURL) + switch { + case isUnified: arg, ok := got.(u2m.UnifiedOAuthArgument) assert.True(t, ok, "expected UnifiedOAuthArgument for unified host") assert.Equal(t, tt.wantHost, arg.GetHost()) - } else if tt.args.AccountID != "" { + case IsClassicAccountHost(tt.wantHost): arg, ok := got.(u2m.AccountOAuthArgument) assert.True(t, ok, "expected AccountOAuthArgument for account host") assert.Equal(t, tt.wantHost, arg.GetAccountHost()) - } else { + default: arg, ok := got.(u2m.WorkspaceOAuthArgument) assert.True(t, ok, "expected WorkspaceOAuthArgument for workspace host") assert.Equal(t, tt.wantHost, arg.GetWorkspaceHost()) diff --git a/libs/auth/config_type.go b/libs/auth/config_type.go index 520b6864cd..0d93b1bf07 100644 --- a/libs/auth/config_type.go +++ b/libs/auth/config_type.go @@ -6,10 +6,26 @@ import ( "github.com/databricks/databricks-sdk-go/config" ) +// IsClassicAccountHost reports whether a host is a classic accounts.* host +// (account-level API access). Must be called with a canonicalized host; see +// config.Config.CanonicalHostName. +func IsClassicAccountHost(canonicalHost string) bool { + return strings.HasPrefix(canonicalHost, "https://accounts.") || + strings.HasPrefix(canonicalHost, "https://accounts-dod.") +} + +// HasUnifiedHostSignal reports whether a host has been identified as unified, +// based on a resolved DiscoveryURL pointing at an account-scoped OIDC endpoint. +// Extracted so callers that don't (yet) have an account ID can check the signal +// without tripping IsSPOG's guard. +func HasUnifiedHostSignal(discoveryURL string) bool { + return discoveryURL != "" && strings.Contains(discoveryURL, "/oidc/accounts/") +} + // IsSPOG returns true if the config represents a SPOG (Single Pane of Glass) -// host with account-scoped OIDC. Detection is based on: -// 1. The resolved DiscoveryURL containing /oidc/accounts/ (from .well-known). -// 2. The Experimental_IsUnifiedHost flag as a legacy fallback. +// host with account-scoped OIDC. Detection layers HasUnifiedHostSignal on top +// of an accountID guard: SPOG routing requires an account ID to construct the +// OAuth URL, so a nil or empty accountID always returns false. // // The accountID parameter is separate from cfg.AccountID so that callers can // control the source: ResolveConfigType passes cfg.AccountID (from config file), @@ -19,10 +35,7 @@ func IsSPOG(cfg *config.Config, accountID string) bool { if accountID == "" { return false } - if cfg.DiscoveryURL != "" && strings.Contains(cfg.DiscoveryURL, "/oidc/accounts/") { - return true - } - return cfg.Experimental_IsUnifiedHost + return HasUnifiedHostSignal(cfg.DiscoveryURL) } // ResolveConfigType determines the effective ConfigType for a resolved config. @@ -41,9 +54,6 @@ func ResolveConfigType(cfg *config.Config) config.ConfigType { return configType } - // The WorkspaceConfig return is a no-op when configType is already - // WorkspaceConfig, but is needed for InvalidConfig (legacy IsUnifiedHost - // profiles where the SDK dropped the UnifiedHost case in v0.126.0). if cfg.WorkspaceID != "" && cfg.WorkspaceID != WorkspaceIDNone { return config.WorkspaceConfig } diff --git a/libs/auth/config_type_test.go b/libs/auth/config_type_test.go index 0ce3b6d410..8ebe8ff7d6 100644 --- a/libs/auth/config_type_test.go +++ b/libs/auth/config_type_test.go @@ -7,6 +7,23 @@ import ( "github.com/stretchr/testify/assert" ) +func TestHasUnifiedHostSignal(t *testing.T) { + cases := []struct { + name string + discoveryURL string + want bool + }{ + {name: "no signal", want: false}, + {name: "account-scoped OIDC", discoveryURL: "https://spog.databricks.com/oidc/accounts/acct-123/.well-known/oauth-authorization-server", want: true}, + {name: "workspace-scoped OIDC", discoveryURL: "https://workspace.databricks.com/oidc/.well-known/oauth-authorization-server", want: false}, + } + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + assert.Equal(t, tc.want, HasUnifiedHostSignal(tc.discoveryURL)) + }) + } +} + func TestResolveConfigType(t *testing.T) { cases := []struct { name string @@ -60,26 +77,7 @@ func TestResolveConfigType(t *testing.T) { want: config.WorkspaceConfig, }, { - name: "IsUnifiedHost fallback without discovery routes to AccountConfig", - cfg: &config.Config{ - Host: "https://spog.databricks.com", - AccountID: "acct-123", - Experimental_IsUnifiedHost: true, - }, - want: config.AccountConfig, - }, - { - name: "IsUnifiedHost fallback with workspace routes to WorkspaceConfig", - cfg: &config.Config{ - Host: "https://spog.databricks.com", - AccountID: "acct-123", - WorkspaceID: "ws-456", - Experimental_IsUnifiedHost: true, - }, - want: config.WorkspaceConfig, - }, - { - name: "no discovery and no IsUnifiedHost stays WorkspaceConfig", + name: "no discovery stays WorkspaceConfig", cfg: &config.Config{ Host: "https://workspace.databricks.com", AccountID: "acct-123", diff --git a/libs/auth/credentials.go b/libs/auth/credentials.go index 6b95177353..ddc95d9602 100644 --- a/libs/auth/credentials.go +++ b/libs/auth/credentials.go @@ -138,13 +138,14 @@ func (c CLICredentials) persistentAuth(ctx context.Context, opts ...u2m.Persiste } // authArgumentsFromConfig converts an SDK config to AuthArguments. +// DiscoveryURL is the primary (and only) unified-host signal; it is populated +// by EnsureResolved() from .well-known/databricks-config before this runs. func authArgumentsFromConfig(cfg *config.Config) AuthArguments { return AuthArguments{ - Host: cfg.Host, - AccountID: cfg.AccountID, - WorkspaceID: cfg.WorkspaceID, - IsUnifiedHost: cfg.Experimental_IsUnifiedHost, - Profile: cfg.Profile, - DiscoveryURL: cfg.DiscoveryURL, + Host: cfg.Host, + AccountID: cfg.AccountID, + WorkspaceID: cfg.WorkspaceID, + Profile: cfg.Profile, + DiscoveryURL: cfg.DiscoveryURL, } } diff --git a/libs/auth/credentials_test.go b/libs/auth/credentials_test.go index 9412d16418..3ce2e4b0c4 100644 --- a/libs/auth/credentials_test.go +++ b/libs/auth/credentials_test.go @@ -96,18 +96,18 @@ func TestAuthArgumentsFromConfig(t *testing.T) { { name: "all fields", cfg: &config.Config{ - Host: "https://myhost.com", - AccountID: "acc-123", - WorkspaceID: "ws-456", - Profile: "my-profile", - Experimental_IsUnifiedHost: true, + Host: "https://myhost.com", + AccountID: "acc-123", + WorkspaceID: "ws-456", + Profile: "my-profile", + DiscoveryURL: "https://myhost.com/oidc/accounts/acc-123/.well-known/oauth-authorization-server", }, want: AuthArguments{ - Host: "https://myhost.com", - AccountID: "acc-123", - WorkspaceID: "ws-456", - Profile: "my-profile", - IsUnifiedHost: true, + Host: "https://myhost.com", + AccountID: "acc-123", + WorkspaceID: "ws-456", + Profile: "my-profile", + DiscoveryURL: "https://myhost.com/oidc/accounts/acc-123/.well-known/oauth-authorization-server", }, }, } diff --git a/libs/auth/error.go b/libs/auth/error.go index 60c2e5ce7b..2ca8aa5f80 100644 --- a/libs/auth/error.go +++ b/libs/auth/error.go @@ -124,10 +124,10 @@ func writeReauthSteps(ctx context.Context, cfg *config.Config, b *strings.Builde return } oauthArg, argErr := AuthArguments{ - Host: cfg.Host, - AccountID: cfg.AccountID, - WorkspaceID: cfg.WorkspaceID, - IsUnifiedHost: cfg.Experimental_IsUnifiedHost, + Host: cfg.Host, + AccountID: cfg.AccountID, + WorkspaceID: cfg.WorkspaceID, + DiscoveryURL: cfg.DiscoveryURL, }.ToOAuthArgument() if argErr != nil { fmt.Fprint(b, "\n - Re-authenticate: databricks auth login") @@ -172,10 +172,9 @@ func BuildLoginCommand(ctx context.Context, profile string, arg u2m.OAuthArgumen } else { switch arg := arg.(type) { case u2m.UnifiedOAuthArgument: - // The --experimental-is-unified-host flag is redundant now that - // discovery handles routing, but kept for backward compatibility - // until the flag is fully removed. - cmd = append(cmd, "--host", arg.GetHost(), "--account-id", arg.GetAccountId(), "--experimental-is-unified-host") + // Discovery handles unified-host routing from --host + --account-id, + // so we no longer suggest --experimental-is-unified-host here. + cmd = append(cmd, "--host", arg.GetHost(), "--account-id", arg.GetAccountId()) case u2m.AccountOAuthArgument: cmd = append(cmd, "--host", arg.GetAccountHost(), "--account-id", arg.GetAccountId()) case u2m.WorkspaceOAuthArgument: diff --git a/libs/auth/error_test.go b/libs/auth/error_test.go index 52c739294b..1e724a4b25 100644 --- a/libs/auth/error_test.go +++ b/libs/auth/error_test.go @@ -228,20 +228,20 @@ func TestEnrichAuthError(t *testing.T) { "\n - Consider setting up a profile: databricks auth login --profile ", }, { - name: "401 with unified host and no profile", + name: "401 with unified host (resolved DiscoveryURL) and no profile", cfg: &config.Config{ - Host: "https://unified.cloud.databricks.com", - AccountID: "acc-123", - WorkspaceID: "ws-456", - AuthType: AuthTypeDatabricksCli, - Experimental_IsUnifiedHost: true, + Host: "https://unified.cloud.databricks.com", + AccountID: "acc-123", + WorkspaceID: "ws-456", + AuthType: AuthTypeDatabricksCli, + DiscoveryURL: "https://unified.cloud.databricks.com/oidc/accounts/acc-123/.well-known/oauth-authorization-server", }, statusCode: 401, wantMsg: "test error message\n" + "\nHost: https://unified.cloud.databricks.com" + "\nAuth type: OAuth (databricks-cli)" + "\n\nNext steps:" + - "\n - Re-authenticate: databricks auth login --host https://unified.cloud.databricks.com --account-id acc-123 --experimental-is-unified-host" + + "\n - Re-authenticate: databricks auth login --host https://unified.cloud.databricks.com --account-id acc-123" + "\n - Check your identity: databricks auth describe" + "\n - Consider setting up a profile: databricks auth login --profile ", }, diff --git a/libs/databrickscfg/ops.go b/libs/databrickscfg/ops.go index 4b705744d2..c4d0f1cc79 100644 --- a/libs/databrickscfg/ops.go +++ b/libs/databrickscfg/ops.go @@ -276,6 +276,12 @@ func matchOrCreateSection(ctx context.Context, configFile *config.File, cfg *con return section, nil } +// ExperimentalIsUnifiedHostKey is the INI key for the deprecated +// experimental_is_unified_host flag. Unified hosts are now detected from +// /.well-known/databricks-config; the key is only ever cleared from profiles +// (never read or written) so stale values don't influence routing. +const ExperimentalIsUnifiedHostKey = "experimental_is_unified_host" + // AuthCredentialKeys returns the config file key names for all auth credential // fields from the SDK's ConfigAttributes. These are fields annotated with an // auth type (e.g. pat, basic, oauth, azure, google). Use this to clear stale diff --git a/libs/databrickscfg/profile/file.go b/libs/databrickscfg/profile/file.go index 32f5bc5a8c..b7f6074c81 100644 --- a/libs/databrickscfg/profile/file.go +++ b/libs/databrickscfg/profile/file.go @@ -83,7 +83,6 @@ func (f FileProfilerImpl) LoadProfiles(ctx context.Context, fn ProfileMatchFunct Host: host, AccountID: all["account_id"], WorkspaceID: all["workspace_id"], - IsUnifiedHost: all["experimental_is_unified_host"] == "true", ClusterID: all["cluster_id"], ServerlessComputeID: all["serverless_compute_id"], HasClientCredentials: all["client_id"] != "" && all["client_secret"] != "", diff --git a/libs/databrickscfg/profile/profile.go b/libs/databrickscfg/profile/profile.go index 1651d33541..7d2b8f715a 100644 --- a/libs/databrickscfg/profile/profile.go +++ b/libs/databrickscfg/profile/profile.go @@ -14,7 +14,6 @@ type Profile struct { Host string AccountID string WorkspaceID string - IsUnifiedHost bool ClusterID string ServerlessComputeID string HasClientCredentials bool diff --git a/libs/databrickscfg/profile/profiler.go b/libs/databrickscfg/profile/profiler.go index af99794799..56cfdb5f52 100644 --- a/libs/databrickscfg/profile/profiler.go +++ b/libs/databrickscfg/profile/profiler.go @@ -11,9 +11,9 @@ type ProfileMatchFunction func(Profile) bool func MatchWorkspaceProfiles(p Profile) bool { // Workspace profile: has workspace_id (covers both classic and SPOG profiles), - // or is a regular workspace host (no account_id and not a legacy unified-host profile). + // or is a regular workspace host (no account_id). // workspace_id = "none" is a sentinel for "skip workspace", so it does NOT count. - return (p.WorkspaceID != "" && p.WorkspaceID != auth.WorkspaceIDNone) || (p.AccountID == "" && !p.IsUnifiedHost) + return (p.WorkspaceID != "" && p.WorkspaceID != auth.WorkspaceIDNone) || p.AccountID == "" } func MatchAccountProfiles(p Profile) bool { diff --git a/libs/databrickscfg/profile/profiler_test.go b/libs/databrickscfg/profile/profiler_test.go index 66db4dcbb5..e3566b9351 100644 --- a/libs/databrickscfg/profile/profiler_test.go +++ b/libs/databrickscfg/profile/profiler_test.go @@ -211,21 +211,11 @@ func TestMatchWorkspaceProfiles(t *testing.T) { profile: Profile{Host: "https://spog.example.com", AccountID: "acc-1", WorkspaceID: "ws-1"}, want: true, }, - { - name: "legacy unified workspace (has workspace_id and IsUnifiedHost)", - profile: Profile{Host: "https://unified.example.com", AccountID: "acc-1", WorkspaceID: "ws-1", IsUnifiedHost: true}, - want: true, - }, { name: "regular account profile (has account_id, no workspace_id)", profile: Profile{Host: "https://accounts.cloud.databricks.com", AccountID: "acc-1"}, want: false, }, - { - name: "legacy unified account (IsUnifiedHost, no workspace_id)", - profile: Profile{Host: "https://unified.example.com", AccountID: "acc-1", IsUnifiedHost: true}, - want: false, - }, { name: "workspace_id none sentinel is not a workspace profile", profile: Profile{Host: "https://spog.example.com", AccountID: "acc-1", WorkspaceID: "none"}, @@ -256,11 +246,6 @@ func TestMatchAccountProfiles(t *testing.T) { profile: Profile{Host: "https://spog.example.com", AccountID: "acc-1"}, want: true, }, - { - name: "legacy unified account profile", - profile: Profile{Host: "https://unified.example.com", AccountID: "acc-1", IsUnifiedHost: true}, - want: true, - }, { name: "workspace_id none sentinel matches as account profile", profile: Profile{Host: "https://spog.example.com", AccountID: "acc-1", WorkspaceID: "none"},