Skip to content
33 changes: 22 additions & 11 deletions cluster-autoscaler/builder/autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/capacitybuffer/fakepods"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/config"
cacontext "k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core"
coreoptions "k8s.io/autoscaler/cluster-autoscaler/core/options"
"k8s.io/autoscaler/cluster-autoscaler/core/podlistprocessor"
Expand Down Expand Up @@ -75,6 +76,7 @@ type AutoscalerBuilder struct {
podObserver *loop.UnschedulablePodObserver
cloudProvider cloudprovider.CloudProvider
informerFactory informers.SharedInformerFactory
kubeClients *cacontext.AutoscalingKubeClients
}

// New creates a builder with default options.
Expand Down Expand Up @@ -120,6 +122,14 @@ func (b *AutoscalerBuilder) WithInformerFactory(f informers.SharedInformerFactor
return b
}

// WithAutoscalingKubeClients allows injecting autoscaling kube clients.
// It is not needed for most use-cases.
// Once used, it has to be in sync with the object provided in WithKubeClient and WithInformerFactory.
func (b *AutoscalerBuilder) WithAutoscalingKubeClients(kubeClients *cacontext.AutoscalingKubeClients) *AutoscalerBuilder {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

AutoscalingKubeClients is normally built based on the KubeClient and InformerFactory, both of which are already injectable (and as far I can see they need to be injected). Why do we need to make AutoscalingKubeClients directly injectable too? It seems like we'd always want to have the AutoscalingKubeClients field in sync with the other two. Otherwise we'd get different behavior for components that use the client/informers directly than for components that use AutoscalingKubeClients.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah okay, after reading some more I see why - we want to change the log recorder. Could you add a warning to the method comment that this is not needed for most use-cases, and if you're using it it has to be in sync with the objects provided in WithKubeClient() and WithInformerFactory()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Added a comment to the method.

b.kubeClients = kubeClients
return b
}

// Build constructs the Autoscaler based on the provided configuration.
func (b *AutoscalerBuilder) Build(ctx context.Context) (core.Autoscaler, *loop.LoopTrigger, error) {
// Get AutoscalingOptions from flags.
Expand Down Expand Up @@ -147,17 +157,18 @@ func (b *AutoscalerBuilder) Build(ctx context.Context) (core.Autoscaler, *loop.L

var snapshotStore clustersnapshot.ClusterSnapshotStore = store.NewDeltaSnapshotStore(autoscalingOptions.ClusterSnapshotParallelism)
opts := coreoptions.AutoscalerOptions{
AutoscalingOptions: autoscalingOptions,
FrameworkHandle: fwHandle,
ClusterSnapshot: predicate.NewPredicateSnapshot(snapshotStore, fwHandle, autoscalingOptions.DynamicResourceAllocationEnabled, autoscalingOptions.PredicateParallelism, autoscalingOptions.CSINodeAwareSchedulingEnabled),
KubeClient: b.kubeClient,
InformerFactory: b.informerFactory,
DebuggingSnapshotter: b.debuggingSnapshotter,
DeleteOptions: deleteOptions,
DrainabilityRules: drainabilityRules,
ScaleUpOrchestrator: orchestrator.New(),
KubeClientNew: b.manager.GetClient(),
KubeCache: b.manager.GetCache(),
AutoscalingOptions: autoscalingOptions,
FrameworkHandle: fwHandle,
ClusterSnapshot: predicate.NewPredicateSnapshot(snapshotStore, fwHandle, autoscalingOptions.DynamicResourceAllocationEnabled, autoscalingOptions.PredicateParallelism, autoscalingOptions.CSINodeAwareSchedulingEnabled),
KubeClient: b.kubeClient,
InformerFactory: b.informerFactory,
AutoscalingKubeClients: b.kubeClients,
DebuggingSnapshotter: b.debuggingSnapshotter,
DeleteOptions: deleteOptions,
DrainabilityRules: drainabilityRules,
ScaleUpOrchestrator: orchestrator.New(),
KubeClientNew: b.manager.GetClient(),
KubeCache: b.manager.GetCache(),
}

opts.Processors = ca_processors.DefaultProcessors(autoscalingOptions)
Expand Down
19 changes: 18 additions & 1 deletion cluster-autoscaler/cloudprovider/test/fake_cloud_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ package test

import (
"fmt"
"sync"

apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
fakek8s "k8s.io/autoscaler/cluster-autoscaler/utils/fake"
"sync"
)

const (
Expand Down Expand Up @@ -153,6 +154,22 @@ func WithNode(node *apiv1.Node) NodeGroupOption {
}
}

// WithNGSize sets the minimum and maximum size of the node group.
func WithNGSize(min, max int) NodeGroupOption {
return func(n *NodeGroup) {
n.minSize = min
n.maxSize = max

}
}

// WithTemplate sets the node template for the node group.
func WithTemplate(template *framework.NodeInfo) NodeGroupOption {
return func(n *NodeGroup) {
n.template = template
}
}

// AddNodeGroup is a helper for tests to add a group with its template.
func (c *CloudProvider) AddNodeGroup(id string, opts ...NodeGroupOption) {
c.Lock()
Expand Down
Loading
Loading