Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion acceptance/features/stretch-cluster-basics.feature
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
@multicluster
@multicluster @dev-env
Feature: Multicluster Operator

@skip:gke @skip:aks @skip:eks
Expand All @@ -17,6 +17,7 @@ Feature: Multicluster Operator
enabled: false
rbac:
enabled: true
rpkDebugBundle: true
tls:
enabled: true
certs:
Expand Down
122 changes: 115 additions & 7 deletions acceptance/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,18 @@ package main

import (
"context"
"fmt"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"testing"
"time"

"github.com/redpanda-data/common-go/kube"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
Expand Down Expand Up @@ -137,12 +140,20 @@ var setupSuite = sync.OnceValues(func() (*framework.Suite, error) {
}).
RegisterTag("cluster", 2, ClusterTag).
RegisterGroup("multicluster", "multicluster").
RegisterGroup("dev-env", "dev-env").
ExitOnCleanupFailures()

if testutil.MultiClusterSetupOnly() {
builder = builder.SkipCleanup()
}

if testutil.AcceptanceSetupOnly() {
builder = builder.
AfterSetup(applyBasicClusterForDevEnv).
SkipFeatures().
SkipCleanup()
}

return builder.Build()
})

Expand Down Expand Up @@ -231,13 +242,7 @@ func installSharedOperator(ctx context.Context, restConfig *rest.Config) error {
VectorizedControllers: &operatorchart.PartialVectorizedControllers{
Enabled: ptr.To(true),
},
AdditionalCmdFlags: []string{
"--configurator-image-pull-policy=IfNotPresent",
"--additional-controllers=nodeWatcher,decommission",
"--unbind-pvcs-after=5s",
"--cluster-connection-timeout=500ms",
"--enable-shadowlinks",
},
AdditionalCmdFlags: operatorCmdFlags(),
},
})
// Tolerate "already installed" errors from rerun-fails retries where
Expand Down Expand Up @@ -303,3 +308,106 @@ func waitForCertManagerWebhook(ctx context.Context, restConfig *rest.Config) err
}
return testutil.WaitForCertManagerWebhook(ctx, c, 2*time.Minute)
}

// applyBasicClusterForDevEnv is registered as an AfterSetup hook when
// `-acceptance-setup-only` is used. It applies acceptance/clusters/basic/cluster.yaml
// against the suite's k3s cluster after the operator has been installed, giving
// `task dev:setup-dev-env` a usable single-node Redpanda deployment. When
// `-acceptance-setup-nodepools=N` is non-zero, N additional NodePool CRDs
// pointing at the basic cluster are also applied.
func applyBasicClusterForDevEnv(ctx context.Context, restConfig *rest.Config) error {
const manifestPath = "clusters/basic/cluster.yaml"

raw, err := os.ReadFile(manifestPath)
if err != nil {
return fmt.Errorf("reading %s: %w", manifestPath, err)
}

manifest := strings.NewReplacer(
"${DEFAULT_REDPANDA_REPO}", steps.DefaultRedpandaRepo,
"${DEFAULT_REDPANDA_TAG}", steps.DefaultRedpandaTag,
).Replace(string(raw))

if n := testutil.AcceptanceSetupNodePools(); n > 0 {
manifest += renderDevEnvNodePools(n)
}

manifestFile, err := os.CreateTemp("", "dev-env-basic-cluster-*.yaml")
if err != nil {
return err
}
defer os.Remove(manifestFile.Name())
if _, err := manifestFile.WriteString(manifest); err != nil {
manifestFile.Close()
return err
}
if err := manifestFile.Close(); err != nil {
return err
}

kubeconfigFile, err := os.CreateTemp("", "dev-env-kubeconfig-*")
if err != nil {
return err
}
kubeconfigFile.Close()
defer os.Remove(kubeconfigFile.Name())
if err := kube.WriteToFile(kube.RestToConfig(restConfig), kubeconfigFile.Name()); err != nil {
return err
}

cmd := exec.CommandContext(ctx, "kubectl", "--kubeconfig", kubeconfigFile.Name(), "apply", "--server-side", "-f", manifestFile.Name())
out, err := cmd.CombinedOutput()
if err != nil {
return fmt.Errorf("kubectl apply: %w: %s", err, out)
}
fmt.Printf("dev-env: applied basic Redpanda cluster\n%s", out)
return nil
}

// operatorCmdFlags returns the --additional-cmd-flags entries passed to the
// shared operator's helm install. The v2 nodepool controller is opt-in
// (false by default in the operator). It's enabled here only when the dev
// env was started with -acceptance-setup-nodepools > 0 — i.e. when the user
// invoked `task dev:setup-dev-env-with-nodepools` — because the dev-env
// manifest applies NodePool CRDs in that mode and the controller has to be
// running for them to reconcile.
func operatorCmdFlags() []string {
flags := []string{
"--configurator-image-pull-policy=IfNotPresent",
"--additional-controllers=nodeWatcher,decommission",
"--unbind-pvcs-after=5s",
"--cluster-connection-timeout=500ms",
"--enable-shadowlinks",
}
if testutil.AcceptanceSetupNodePools() > 0 {
flags = append(flags, "--enable-v2-nodepools=true")
}
return flags
}

// renderDevEnvNodePools returns N NodePool manifests separated by YAML document
// markers, each referencing the basic cluster and using the same Redpanda and
// operator images as cluster.yaml.
func renderDevEnvNodePools(n int) string {
var b strings.Builder
for i := 1; i <= n; i++ {
fmt.Fprintf(&b, `
---
apiVersion: cluster.redpanda.com/v1alpha2
kind: NodePool
metadata:
name: pool-%d
spec:
clusterRef:
name: basic
replicas: 1
image:
repository: %s
tag: %s
sidecarImage:
repository: %s
tag: %s
`, i, steps.DefaultRedpandaRepo, steps.DefaultRedpandaTag, imageRepo, imageTag)
}
return b.String()
}
80 changes: 47 additions & 33 deletions harpoon/suite.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,7 @@ type SuiteBuilder struct {
afterSetup []func(ctx context.Context, restConfig *rest.Config) error
exitOnCleanupFailures bool
skipCleanup bool
skipFeatures bool
// registeredGroups maps group name to the feature tag that identifies it.
// The special "default" group represents features with none of the registered tags.
registeredGroups map[string]string
Expand Down Expand Up @@ -354,6 +355,15 @@ func (b *SuiteBuilder) SkipCleanup() *SuiteBuilder {
return b
}

// SkipFeatures causes RunT to run only the setup phase (provider creation,
// helm charts, AfterSetup hooks) and skip executing feature scenarios. The
// teardown phase still runs unless SkipCleanup is also set. This is intended
// for "setup-only" entry points such as dev-environment bootstrappers.
func (b *SuiteBuilder) SkipFeatures() *SuiteBuilder {
b.skipFeatures = true
return b
}

func (b *SuiteBuilder) WithCRDDirectory(directory string) *SuiteBuilder {
b.crdDirectories = append(b.crdDirectories, directory)
return b
Expand Down Expand Up @@ -457,6 +467,7 @@ func (b *SuiteBuilder) Build() (*Suite, error) {
images: b.images,
exitOnCleanupFailures: b.exitOnCleanupFailures,
skipCleanup: b.skipCleanup,
skipFeatures: b.skipFeatures,
}, nil
}

Expand All @@ -479,6 +490,7 @@ type Suite struct {
images []string
exitOnCleanupFailures bool
skipCleanup bool
skipFeatures bool
}

// makeGodogSuite creates a godog.TestSuite for the given feature contents.
Expand Down Expand Up @@ -749,40 +761,42 @@ func (s *Suite) RunT(t *testing.T) {
// Track whether any feature suite reported a failure.
var suiteFailed bool

// Phase 2: Run parallel features concurrently.
// Wrapped in a non-parallel subtest so that Go waits for all parallel
// features to complete before Phase 3 (serial features) begins.
t.Run("parallel", func(t *testing.T) {
for _, f := range parallelFeatures {
t.Run(strings.ReplaceAll(f.name, "/", "_"), func(t *testing.T) {
t.Parallel()

tracker := tracking.NewFeatureHookTracker(s.registry, s.testingOpts, s.onFeatures, s.onScenarios)
gf := []godog.Feature{{Name: f.name, Contents: f.contents}}
suite := s.makeGodogSuite(f.name, tracker, gf, nil)
suite.Options.TestingT = t
suite.Run()
if tracker.SuiteFailed() {
termMu.Lock()
suiteFailed = true
termMu.Unlock()
}
})
}
})
if !s.skipFeatures {
// Phase 2: Run parallel features concurrently.
// Wrapped in a non-parallel subtest so that Go waits for all parallel
// features to complete before Phase 3 (serial features) begins.
t.Run("parallel", func(t *testing.T) {
for _, f := range parallelFeatures {
t.Run(strings.ReplaceAll(f.name, "/", "_"), func(t *testing.T) {
t.Parallel()

tracker := tracking.NewFeatureHookTracker(s.registry, s.testingOpts, s.onFeatures, s.onScenarios)
gf := []godog.Feature{{Name: f.name, Contents: f.contents}}
suite := s.makeGodogSuite(f.name, tracker, gf, nil)
suite.Options.TestingT = t
suite.Run()
if tracker.SuiteFailed() {
termMu.Lock()
suiteFailed = true
termMu.Unlock()
}
})
}
})

// Phase 3: Run serial features sequentially.
if len(serialFeatures) > 0 {
tracker := tracking.NewFeatureHookTracker(s.registry, s.testingOpts, s.onFeatures, s.onScenarios)
var gf []godog.Feature
for _, f := range serialFeatures {
gf = append(gf, godog.Feature{Name: f.name, Contents: f.contents})
}
suite := s.makeGodogSuite("serial", tracker, gf, nil)
suite.Options.TestingT = t
suite.Run()
if tracker.SuiteFailed() {
suiteFailed = true
// Phase 3: Run serial features sequentially.
if len(serialFeatures) > 0 {
tracker := tracking.NewFeatureHookTracker(s.registry, s.testingOpts, s.onFeatures, s.onScenarios)
var gf []godog.Feature
for _, f := range serialFeatures {
gf = append(gf, godog.Feature{Name: f.name, Contents: f.contents})
}
suite := s.makeGodogSuite("serial", tracker, gf, nil)
suite.Options.TestingT = t
suite.Run()
if tracker.SuiteFailed() {
suiteFailed = true
}
}
}

Expand Down
22 changes: 19 additions & 3 deletions pkg/testutil/testutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,11 @@ import (
)

var (
multiClusterSetupOnly = flag.Bool("multi-cluster-setup-only", false, "if true, only the multi-cluster setup will be performed.")
retain = flag.Bool("retain", false, "if true, no clean up will be performed.")
update = flag.Bool("update", false, "if true, golden assertions will update the expected file instead of performing an assertion")
multiClusterSetupOnly = flag.Bool("multi-cluster-setup-only", false, "if true, only the multi-cluster setup will be performed.")
acceptanceSetupOnly = flag.Bool("acceptance-setup-only", false, "if true, the acceptance suite runs only setup (no scenarios) and skips cleanup; used by `task dev:setup-dev-env`.")
acceptanceSetupNodePools = flag.Int("acceptance-setup-nodepools", 0, "number of NodePool CRDs to deploy alongside the basic cluster when -acceptance-setup-only is used. Each pool has replicas=1 and points to the basic cluster.")
retain = flag.Bool("retain", false, "if true, no clean up will be performed.")
update = flag.Bool("update", false, "if true, golden assertions will update the expected file instead of performing an assertion")
)

const (
Expand Down Expand Up @@ -112,6 +114,20 @@ func MultiClusterSetupOnly() bool {
return *multiClusterSetupOnly
}

// AcceptanceSetupOnly returns true when the acceptance suite should run only
// the setup phase (provider, helm charts, AfterSetup hooks) and skip executing
// feature scenarios. Used by `task dev:setup-dev-env` to bring up a k3s
// cluster with a basic Redpanda deployment for local development.
func AcceptanceSetupOnly() bool {
return *acceptanceSetupOnly
}

// AcceptanceSetupNodePools returns the number of NodePool CRDs to deploy
// alongside the basic cluster in dev-env setup mode.
func AcceptanceSetupNodePools() int {
return *acceptanceSetupNodePools
}

// TempDir is wrapper around [testing.T.TempDir] that respects [Retain].
func TempDir(t *testing.T) string {
t.Helper()
Expand Down
17 changes: 16 additions & 1 deletion taskfiles/dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,21 @@ tasks:

setup-multicluster-dev-env:
- 'echo "--- Setting up multicluster dev env using k3s and vCluster(s)"'
# -groups dev-env narrows the multicluster suite to stretch-cluster-basics.feature
# (the only feature tagged @dev-env), so the rest of the multicluster features
# are skipped without having to delete the .feature files locally.
- task: :test:acceptance-multicluster
vars:
CLI_ARGS: '{{.CLI_ARGS}} -v -count=1 -multi-cluster-setup-only'
CLI_ARGS: '{{.CLI_ARGS}} -v -count=1 -multi-cluster-setup-only -groups dev-env'

setup-dev-env:
- 'echo "--- Setting up dev env using k3s with a basic Redpanda cluster"'
- task: :test:acceptance
vars:
CLI_ARGS: '{{.CLI_ARGS}} -v -count=1 -acceptance-setup-only'

setup-dev-env-with-nodepools:
- 'echo "--- Setting up dev env using k3s with a basic Redpanda cluster + 2 NodePools (3 brokers total)"'
- task: :test:acceptance
vars:
CLI_ARGS: '{{.CLI_ARGS}} -v -count=1 -acceptance-setup-only -acceptance-setup-nodepools=2'
Loading