diff --git a/Makefile b/Makefile index d6e7c2878..8cf6fad82 100644 --- a/Makefile +++ b/Makefile @@ -181,6 +181,14 @@ test-e2e: --ginkgo.trace \ --ginkgo.v +# Kind cluster name used by test-e2e-local (must match the cluster you create with kind create cluster --name ). +KIND_CLUSTER ?= koperator-e2e + +# Build operator image, load it into kind, and run e2e tests with that image. Use when testing local changes. +test-e2e-local: docker-build + kind load docker-image $(IMG) --name $(KIND_CLUSTER) + $(MAKE) test-e2e IMG_E2E=$(IMG) + manager: generate fmt vet ## Generate (kubebuilder) and build manager binary. go build -o bin/manager main.go diff --git a/tests/e2e/global.go b/tests/e2e/global.go index 8102543cc..3cdb379c1 100644 --- a/tests/e2e/global.go +++ b/tests/e2e/global.go @@ -39,6 +39,8 @@ var ( }, } // contour ingress controller + // Envoy service is set to NodePort so Helm --atomic does not wait for a LoadBalancer + // ingress IP (which never comes on kind without MetalLB). contourIngressControllerHelmDescriptor = helmDescriptor{ Repository: "https://projectcontour.github.io/helm-charts", ChartName: "contour", @@ -47,6 +49,7 @@ var ( Namespace: "projectcontour", SetValues: map[string]string{ "contour.manageCRDs": "true", + "envoy.service.type": "NodePort", }, HelmExtraArguments: map[string][]string{ "install": {"--timeout", "10m"}, diff --git a/tests/e2e/helm.go b/tests/e2e/helm.go index 67e3dd709..0c0914663 100644 --- a/tests/e2e/helm.go +++ b/tests/e2e/helm.go @@ -191,8 +191,9 @@ func (helmDescriptor *helmDescriptor) installHelmChart(kubectlOptions k8s.Kubect switch { case isInstalled: installedChartName, installedChartVersion := helmRelease.chartNameAndVersion() + expectedChartName := chartNameForComparison(helmDescriptor.ChartName) - if installedChartName != helmDescriptor.ChartName { + if installedChartName != expectedChartName { return errors.Errorf( "Installed Helm chart name '%s' mismatches Helm descriptor chart name to be installed '%s'", installedChartName, helmDescriptor.ChartName, @@ -386,6 +387,19 @@ func (helmRelease *HelmRelease) chartNameAndVersion() (string, string) { return groups[1], groups[2] } +// chartNameForComparison returns the chart name to compare with the installed release. +// For OCI refs (oci://host/path/chart-name), Helm reports only the last segment as the chart name. +func chartNameForComparison(descriptorChartName string) string { + if strings.HasPrefix(descriptorChartName, "oci://") { + path := strings.TrimPrefix(descriptorChartName, "oci://") + if i := strings.LastIndex(path, "/"); i >= 0 && i+1 < len(path) { + return path[i+1:] + } + return path + } + return descriptorChartName +} + // listHelmReleases returns a slice of Helm releases retrieved from the cluster // using the specified kubectl context and namespace. func listHelmReleases(kubectlOptions k8s.KubectlOptions) ([]*HelmRelease, error) { @@ -415,8 +429,16 @@ func listHelmReleases(kubectlOptions k8s.KubectlOptions) ([]*HelmRelease, error) return nil, errors.WrapIf(err, "listing Helm releases failed") } + // Helm may print WARNINGs (e.g. kubeconfig permissions) to stdout before the JSON. + // Parse only the JSON array: from the first '[' to the end. + jsonStart := strings.Index(output, "[") + if jsonStart < 0 { + return nil, errors.WrapIfWithDetails(errors.New("no JSON array in helm list output"), "parsing Helm releases failed", "output", output) + } + jsonBytes := strings.TrimSpace(output[jsonStart:]) + var releases []*HelmRelease - err = json.Unmarshal([]byte(output), &releases) + err = json.Unmarshal([]byte(jsonBytes), &releases) if err != nil { return nil, errors.WrapIfWithDetails(err, "parsing Helm releases failed", "output", output) } diff --git a/tests/e2e/platforms/kind/kind_config.yaml b/tests/e2e/platforms/kind/kind_config.yaml index 977e3b7fb..65d601b47 100644 --- a/tests/e2e/platforms/kind/kind_config.yaml +++ b/tests/e2e/platforms/kind/kind_config.yaml @@ -1,4 +1,6 @@ --- +# Kind cluster config for e2e tests. Compatible with affinity tests that use +# topology.kubernetes.io/zone (e.g. config/samples/simplekafkacluster_affinity.yaml). kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: @@ -10,8 +12,26 @@ nodes: kubeletExtraArgs: node-labels: "ingress-ready=true" - role: worker + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "topology.kubernetes.io/zone=zone-a" - role: worker + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "topology.kubernetes.io/zone=zone-b" - role: worker + kubeadmConfigPatches: + - | + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "topology.kubernetes.io/zone=zone-c" containerdConfigPatches: - |- [plugins."io.containerd.grpc.v1.cri".containerd]