diff --git a/.dockerignore b/.dockerignore
index 7c504700d..da2dbb715 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1,6 +1,7 @@
# Temporary Build Files
build/_output
build/_test
+catalog/
# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
### Emacs ###
# -*- mode: gitignore; -*-
diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml
index 6c31783de..70bac6e19 100644
--- a/.github/workflows/main.yaml
+++ b/.github/workflows/main.yaml
@@ -113,7 +113,6 @@ jobs:
make manifests
make api-gen
make docs
- make olm
# check for uncommited changes to crds, docs or API
git diff --exit-code
make test
diff --git a/.github/workflows/operatorhub.yaml b/.github/workflows/operatorhub.yaml
index cba21058c..f120c99a9 100644
--- a/.github/workflows/operatorhub.yaml
+++ b/.github/workflows/operatorhub.yaml
@@ -38,7 +38,7 @@ jobs:
repository: ${{ matrix.repo.upstream }}
ref: main
token: ${{ secrets.VM_BOT_GH_TOKEN }}
- path: __k8s-operatorhub-repo
+ path: __operatorhub-repo
- name: Import GPG key
uses: crazy-max/ghaction-import-gpg@v6
@@ -48,58 +48,69 @@ jobs:
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
git_user_signingkey: true
git_commit_gpgsign: true
- workdir: __k8s-operatorhub-repo
+ workdir: __operatorhub-repo
- uses: dawidd6/action-download-artifact@v11
with:
name: olm
- workflow: main.yaml
+ workflow: release.yaml
github_token: ${{ secrets.VM_BOT_GH_TOKEN }}
run_id: ${{ github.event.workflow_run.id }}
path: bundle
+ - name: Install opm
+ run: |
+ OPM_VERSION=v1.65.0
+ curl -fsSLO https://github.com/operator-framework/operator-registry/releases/download/${OPM_VERSION}/linux-amd64-opm
+ curl -fsSLO https://github.com/operator-framework/operator-registry/releases/download/${OPM_VERSION}/checksums.txt
+ grep ' linux-amd64-opm$' checksums.txt | sha256sum -c -
+ install -m 0755 linux-amd64-opm /usr/local/bin/opm
+
- name: Add operatorhub bundle
id: update
run: |
if [ ! -d bundle ]; then
echo "No bundle directory found"
- exit 1;
+ exit 1
fi
- OPERATOR_DIR=__k8s-operatorhub-repo/operators/victoriametrics-operator
- CATALOGS_DIR=__k8s-operatorhub-repo/catalogs
- mkdir -p ${CATALOGS_DIR}
+ export OPERATOR_NAME="victoriametrics-operator"
+ export OPERATOR_DIR=__operatorhub-repo/operators/${OPERATOR_NAME}
mkdir -p ${OPERATOR_DIR}
+ NEW_VERSION=$(ls bundle | head -1)
export OLD_VERSION=$(find ${OPERATOR_DIR}/* ! -path "*/catalog-templates" -maxdepth 0 -type d -exec basename {} \; | sort -V -r | head -1)
- export OLD_ENTRY="victoriametrics-operator.v${OLD_VERSION}"
-
- export NEW_VERSION=$(ls bundle | head -1)
+ export OLD_ENTRY="${OPERATOR_NAME}.v${OLD_VERSION}"
if [ ! -z $OLD_VERSION ]; then
export MANIFEST_PATH=bundle/${NEW_VERSION}/manifests/victoriametrics-operator.clusterserviceversion.yaml
yq -i '.spec.replaces = "victoriametrics-operator.v" + strenv(OLD_VERSION)' $MANIFEST_PATH
fi
- mv bundle/* ${OPERATOR_DIR}/
+ mv bundle/${NEW_VERSION} ${OPERATOR_DIR}/
if [ -f ${OPERATOR_DIR}/Makefile ]; then
- if [ ! -z $OLD_VERSION ]; then
- yq -i -I2 '.catalog_templates.[].replaces = strenv(OLD_ENTRY)' ${OPERATOR_DIR}/${NEW_VERSION}/release-config.yaml
- fi
- else
- rm -f ${OPERATOR_DIR}/${NEW_VERSION}/release-config.yaml
+ opm render ${OPERATOR_DIR}/${NEW_VERSION} --output=yaml \
+ | yq 'select(.schema == "olm.bundle")' > /tmp/new-bundle.yaml
+
+ for TEMPLATE in ${OPERATOR_DIR}/catalog-templates/*.yaml; do
+ PREV_HEAD=$(yq '.entries[] | select(.schema == "olm.channel") | .entries[-1].name' "${TEMPLATE}")
+ NEW_VERSION="${NEW_VERSION}" PREV_HEAD="${PREV_HEAD}" \
+ yq -i '(.entries[] | select(.schema == "olm.channel") | .entries) += [{"name": "victoriametrics-operator.v" + strenv(NEW_VERSION), "replaces": strenv(PREV_HEAD)}]' "${TEMPLATE}"
+ yq -i '.entries += [load("/tmp/new-bundle.yaml")]' "${TEMPLATE}"
+ done
fi
- echo "VERSION=$NEW_VERSION" >> $GITHUB_OUTPUT
+
+ echo "VERSION=${NEW_VERSION}" >> $GITHUB_OUTPUT
- name: Create Pull Request
if: ${{ steps.update.outputs.VERSION != '' }}
uses: peter-evans/create-pull-request@v7
with:
- add-paths: operators/victoriametrics-operator,catalogs
+ add-paths: operators/victoriametrics-operator
commit-message: 'victoriametrics-operator: ${{ steps.update.outputs.VERSION }}'
signoff: true
committer: "Github Actions <${{ steps.import-gpg.outputs.email }}>"
- path: __k8s-operatorhub-repo
+ path: __operatorhub-repo
push-to-fork: ${{ matrix.repo.fork }}
branch: vm-operator-release-${{ steps.update.outputs.VERSION }}
token: ${{ secrets.VM_BOT_GH_TOKEN }}
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index 5160ee9d4..7f27e0853 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -37,7 +37,7 @@ jobs:
make lint test build build-installer
echo ${{secrets.REPO_KEY}} | docker login --username ${{secrets.REPO_USER}} --password-stdin
echo ${{secrets.QUAY_ACCESSKEY}} | docker login quay.io --username '${{secrets.QUAY_USER}}' --password-stdin
- make publish
+ TAG=${TAG} make publish
TAG=${TAG} REGISTRY=quay.io make olm
gh release upload ${{github.event.release.tag_name}} ./dist/install-no-webhook.yaml#install-no-webhook.yaml --clobber || echo "fix me NOT enough security permissions"
gh release upload ${{github.event.release.tag_name}} ./dist/install-with-webhook.yaml#install-with-webhook.yaml --clobber || echo "fix me NOT enough security permissions"
diff --git a/.gitignore b/.gitignore
index 9b9e1e387..0496def64 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@ report.xml
/bin/
/build*
/bundle*
+/catalog*
release
operator.zip
coverage.txt
diff --git a/Dockerfile b/Dockerfile
index 159250eb2..62f89e72c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -3,7 +3,7 @@ ARG BUILDINFO
ARG BASEIMAGE=scratch
# Build the manager binary
-FROM golang:1.25.8 AS builder
+FROM golang:1.25.10 AS builder
ARG TARGETOS
ARG TARGETARCH
diff --git a/Makefile b/Makefile
index 4a8139e4e..ef3832589 100644
--- a/Makefile
+++ b/Makefile
@@ -14,6 +14,7 @@ DATEINFO_TAG ?= $(shell date -u +'%Y%m%d-%H%M%S')
NAMESPACE ?= vm
OVERLAY ?= config/manager
E2E_TESTS_CONCURRENCY ?= $(shell getconf _NPROCESSORS_ONLN)
+E2E_TARGET ?= ./test/e2e/...
FIPS_VERSION=v1.0.0
BASEIMAGE ?=scratch
@@ -160,26 +161,18 @@ test: manifests generate fmt vet envtest ## Run tests.
# Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors.
.PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up.
-BASE_REF ?= origin/master
-SKIP_UPGRADE_TESTS ?= $(shell if git diff --quiet $(BASE_REF)...HEAD -- test/e2e/upgrade 2>/dev/null; then echo "--skip-package=upgrade"; fi)
-
test-e2e: load-kind ginkgo crust-gather mirrord
env CGO_ENABLED=1 OPERATOR_IMAGE=$(OPERATOR_IMAGE) REPORTS_DIR=$(shell pwd) CRUST_GATHER_BIN=$(CRUST_GATHER_BIN) $(MIRRORD_BIN) exec -f ./mirrord.json -- $(GINKGO_BIN) \
-ldflags="-linkmode=external" \
- $(SKIP_UPGRADE_TESTS) \
+ --output-interceptor-mode=none \
-procs=$(E2E_TESTS_CONCURRENCY) \
-randomize-all \
-timeout=60m \
- -junit-report=report.xml ./test/e2e/...
+ -junit-report=report.xml $(E2E_TARGET)
.PHONY: test-e2e-upgrade # Run only the e2e upgrade tests against a Kind k8s instance that is spun up.
-test-e2e-upgrade: load-kind ginkgo crust-gather mirrord
- env CGO_ENABLED=1 OPERATOR_IMAGE=$(OPERATOR_IMAGE) REPORTS_DIR=$(shell pwd) CRUST_GATHER_BIN=$(CRUST_GATHER_BIN) $(MIRRORD_BIN) exec -f ./mirrord.json -- $(GINKGO_BIN) \
- -ldflags="-linkmode=external" \
- -procs=$(E2E_TESTS_CONCURRENCY) \
- -randomize-all \
- -timeout=60m \
- -junit-report=report.xml ./test/e2e/upgrade/...
+test-e2e-upgrade: E2E_TARGET=./test/e2e/upgrade/...
+test-e2e-upgrade: test-e2e
.PHONY: lint
lint: golangci-lint ## Run golangci-lint linter
@@ -279,7 +272,7 @@ build-installer: manifests generate kustomize ## Generate a consolidated YAML wi
$(KUSTOMIZE) build config/base-with-webhook > dist/install-with-webhook.yaml
$(KUSTOMIZE) build config/crd/overlay > dist/crd.yaml
-olm: operator-sdk opm yq docs
+olm: operator-sdk yq docs
$(eval DIGEST = $(shell $(CONTAINER_TOOL) buildx imagetools inspect $(REGISTRY)/$(ORG)/$(REPO):$(TAG)-ubi --format "{{print .Manifest.Digest}}"))
rm -rf bundle*
$(OPERATOR_SDK) generate kustomize manifests -q
@@ -289,17 +282,12 @@ olm: operator-sdk opm yq docs
-q --overwrite --version $(VERSION) \
--channels=beta --default-channel=beta --output-dir=bundle/$(VERSION)
$(OPERATOR_SDK) bundle validate ./bundle/$(VERSION)
- cp config/manifests/release-config.yaml bundle/$(VERSION)/
- $(YQ) -i '.metadata.annotations.containerImage = "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)"' \
- bundle/$(VERSION)/manifests/victoriametrics-operator.clusterserviceversion.yaml
- $(YQ) -i '.spec.install.spec.deployments[0].spec.template.containers[0].image = "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)"' \
- bundle/$(VERSION)/manifests/victoriametrics-operator.clusterserviceversion.yaml
- $(YQ) -i '.spec.install.spec.deployments[0].spec.template.spec.containers[0].image = "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)"' \
+ $(YQ) -i '.metadata.annotations.containerImage = "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)" | .spec.install.spec.deployments[0].spec.template.containers[0].image = "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)" | .spec.install.spec.deployments[0].spec.template.spec.containers[0].image = "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)"' \
bundle/$(VERSION)/manifests/victoriametrics-operator.clusterserviceversion.yaml
+ $(YQ) -i '.annotations."com.redhat.openshift.versions" = "v4.12-v4.22"' \
+ bundle/$(VERSION)/metadata/annotations.yaml
$(YQ) -i '.spec.relatedImages = [{"name": "victoriametrics-operator", "image": "$(REGISTRY)/$(ORG)/$(REPO)@$(DIGEST)"}]' \
bundle/$(VERSION)/manifests/victoriametrics-operator.clusterserviceversion.yaml
- $(YQ) -i '.annotations."com.redhat.openshift.versions" = "v4.12-v4.21"' \
- bundle/$(VERSION)/metadata/annotations.yaml
##@ Deployment
diff --git a/api/operator/v1/cluster_types_test.go b/api/operator/v1/cluster_types_test.go
new file mode 100644
index 000000000..0b3e53871
--- /dev/null
+++ b/api/operator/v1/cluster_types_test.go
@@ -0,0 +1,78 @@
+package v1
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/utils/ptr"
+
+ vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
+)
+
+func TestVTCluster_AvailableStorageNodeIDs(t *testing.T) {
+ f := func(cr *VTCluster, requestsType string, want []int32) {
+ t.Helper()
+ assert.Equal(t, want, cr.AvailableStorageNodeIDs(requestsType))
+ }
+
+ cr := &VTCluster{
+ Spec: VTClusterSpec{
+ Storage: &VTStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To(int32(5)),
+ },
+ MaintenanceSelectNodeIDs: []int32{1, 3},
+ MaintenanceInsertNodeIDs: []int32{0, 4},
+ },
+ },
+ }
+
+ // select excludes maintenance nodes
+ f(cr, "select", []int32{0, 2, 4})
+
+ // insert excludes maintenance nodes
+ f(cr, "insert", []int32{1, 2, 3})
+
+ // no maintenance nodes
+ f(&VTCluster{
+ Spec: VTClusterSpec{
+ Storage: &VTStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To(int32(3))},
+ },
+ },
+ }, "select", []int32{0, 1, 2})
+}
+
+func TestVLCluster_AvailableStorageNodeIDs(t *testing.T) {
+ f := func(cr *VLCluster, requestsType string, want []int32) {
+ t.Helper()
+ assert.Equal(t, want, cr.AvailableStorageNodeIDs(requestsType))
+ }
+
+ cr := &VLCluster{
+ Spec: VLClusterSpec{
+ VLStorage: &VLStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To(int32(5)),
+ },
+ MaintenanceSelectNodeIDs: []int32{1, 3},
+ MaintenanceInsertNodeIDs: []int32{0, 4},
+ },
+ },
+ }
+
+ // select excludes maintenance nodes
+ f(cr, "select", []int32{0, 2, 4})
+
+ // insert excludes maintenance nodes
+ f(cr, "insert", []int32{1, 2, 3})
+
+ // no maintenance nodes
+ f(&VLCluster{
+ Spec: VLClusterSpec{
+ VLStorage: &VLStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To(int32(3))},
+ },
+ },
+ }, "select", []int32{0, 1, 2})
+}
diff --git a/api/operator/v1/vlagent_types.go b/api/operator/v1/vlagent_types.go
index 6f8a229e2..2b77a8112 100644
--- a/api/operator/v1/vlagent_types.go
+++ b/api/operator/v1/vlagent_types.go
@@ -18,8 +18,6 @@ import (
// VLAgentSpec defines the desired state of VLAgent
// +k8s:openapi-gen=true
type VLAgentSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the vlagent pods.
// +optional
PodMetadata *vmv1beta1.EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
@@ -188,28 +186,6 @@ func (cr *VLAgent) UseProxyProtocol() bool {
return vmv1beta1.UseProxyProtocol(cr.Spec.ExtraArgs)
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VLAgent) UnmarshalJSON(src []byte) error {
- type pcr VLAgent
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VLAgentSpec) UnmarshalJSON(src []byte) error {
- type pcr VLAgentSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vlagent spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VLAgentRemoteWriteSettings - defines global settings for all remoteWrite urls.
type VLAgentRemoteWriteSettings struct {
// The maximum size of unpacked request to send to remote storage
@@ -285,6 +261,8 @@ type VLAgentStatus struct {
// ReplicaCount Total number of pods targeted by this VLAgent
Replicas int32 `json:"replicas,omitempty"`
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -366,6 +344,28 @@ func (cr *VLAgent) DefaultStatusFields(vs *VLAgentStatus) {
vs.Replicas = replicaCount
}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VLAgent) UnmarshalJSON(src []byte) error {
+ type pcr VLAgent
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VLAgentSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
+}
+
// FinalAnnotations implements build.builderOpts interface
func (cr *VLAgent) FinalAnnotations() map[string]string {
var v map[string]string
diff --git a/api/operator/v1/vlcluster_types.go b/api/operator/v1/vlcluster_types.go
index 903903c69..339918e38 100644
--- a/api/operator/v1/vlcluster_types.go
+++ b/api/operator/v1/vlcluster_types.go
@@ -34,8 +34,6 @@ import (
// VLClusterSpec defines the desired state of VLCluster
type VLClusterSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// VLSelect, VLInsert and VLStorage Pods.
@@ -195,19 +193,11 @@ func (cr *VLCluster) FinalLabels(kind vmv1beta1.ClusterComponent) map[string]str
return v
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VLClusterSpec) UnmarshalJSON(src []byte) error {
- type pcr VLClusterSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vlcluster spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VLClusterStatus defines the observed state of VLCluster
type VLClusterStatus struct {
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -659,27 +649,37 @@ func (cr *VLCluster) SetLastSpec(prevSpec VLClusterSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
+// GetStatus implements reconcile.ObjectWithDeepCopyAndStatus interface
+func (cr *VLCluster) GetStatus() *VLClusterStatus {
+ return &cr.Status
+}
+
+// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
+func (cr *VLCluster) DefaultStatusFields(vs *VLClusterStatus) {
+}
+
// UnmarshalJSON implements json.Unmarshaler interface
func (cr *VLCluster) UnmarshalJSON(src []byte) error {
type pcr VLCluster
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
return err
}
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VLClusterSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
return err
}
return nil
}
-// GetStatus implements reconcile.ObjectWithDeepCopyAndStatus interface
-func (cr *VLCluster) GetStatus() *VLClusterStatus {
- return &cr.Status
-}
-
-// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
-func (cr *VLCluster) DefaultStatusFields(vs *VLClusterStatus) {
-}
-
// AsOwner returns owner references with current object as owner
func (cr *VLCluster) AsOwner() metav1.OwnerReference {
return metav1.OwnerReference{
@@ -781,21 +781,17 @@ func (cr *VLCluster) AvailableStorageNodeIDs(requestsType string) []int32 {
if cr.Spec.VLStorage == nil || cr.Spec.VLStorage.ReplicaCount == nil {
return result
}
- maintenanceNodes := make(map[int32]struct{})
+ maintenanceNodes := sets.New[int32]()
switch requestsType {
case "select":
- for _, i := range cr.Spec.VLStorage.MaintenanceSelectNodeIDs {
- maintenanceNodes[i] = struct{}{}
- }
+ maintenanceNodes.Insert(cr.Spec.VLStorage.MaintenanceSelectNodeIDs...)
case "insert":
- for _, i := range cr.Spec.VLStorage.MaintenanceInsertNodeIDs {
- maintenanceNodes[i] = struct{}{}
- }
+ maintenanceNodes.Insert(cr.Spec.VLStorage.MaintenanceInsertNodeIDs...)
default:
panic("BUG unsupported requestsType: " + requestsType)
}
for i := int32(0); i < *cr.Spec.VLStorage.ReplicaCount; i++ {
- if _, ok := maintenanceNodes[i]; ok {
+ if maintenanceNodes.Has(i) {
continue
}
result = append(result, i)
diff --git a/api/operator/v1/vlsingle_types.go b/api/operator/v1/vlsingle_types.go
index 6ed4943b1..8440b5894 100644
--- a/api/operator/v1/vlsingle_types.go
+++ b/api/operator/v1/vlsingle_types.go
@@ -33,8 +33,6 @@ import (
// VLSingleSpec defines the desired state of VLSingle
// +k8s:openapi-gen=true
type VLSingleSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the VLSingle pods.
// +optional
@@ -108,6 +106,8 @@ type VLSingleSpec struct {
// VLSingleStatus defines the observed state of VLSingle
type VLSingleStatus struct {
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -151,7 +151,28 @@ func (cr *VLSingle) UseProxyProtocol() bool {
}
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
-func (cr *VLSingle) DefaultStatusFields(vs *VLSingleStatus) {
+func (cr *VLSingle) DefaultStatusFields(vs *VLSingleStatus) {}
+
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VLSingle) UnmarshalJSON(src []byte) error {
+ type pcr VLSingle
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VLSingleSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
}
// +kubebuilder:object:root=true
@@ -190,29 +211,6 @@ func (cr *VLSingle) SetLastSpec(prevSpec VLSingleSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VLSingle) UnmarshalJSON(src []byte) error {
- type pcr VLSingle
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
-
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VLSingleSpec) UnmarshalJSON(src []byte) error {
- type pcr VLSingleSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vlsingle spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
func (cr *VLSingle) ProbePath() string {
return vmv1beta1.BuildPathWithPrefixFlag(cr.Spec.ExtraArgs, healthPath)
}
diff --git a/api/operator/v1/vmanomaly_types.go b/api/operator/v1/vmanomaly_types.go
index 913ff0405..5dc3a09db 100644
--- a/api/operator/v1/vmanomaly_types.go
+++ b/api/operator/v1/vmanomaly_types.go
@@ -35,8 +35,6 @@ import (
// VMAnomalySpec defines the desired state of VMAnomaly.
// +k8s:openapi-gen=true
type VMAnomalySpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the vmanomaly pods.
// +optional
PodMetadata *vmv1beta1.EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
@@ -178,6 +176,8 @@ type VMAnomalyStatus struct {
// Shards represents total number of vmanomaly statefulsets with uniq scrape targets
Shards int32 `json:"shards,omitempty"`
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -311,6 +311,28 @@ func (cr *VMAnomaly) DefaultStatusFields(vs *VMAnomalyStatus) {
vs.Shards = shardCnt
}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMAnomaly) UnmarshalJSON(src []byte) error {
+ type pcr VMAnomaly
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMAnomalySpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
+}
+
// SelectorLabels returns selector labels for vmanomaly
func (cr *VMAnomaly) SelectorLabels() map[string]string {
return map[string]string{
@@ -468,28 +490,6 @@ func (cr *VMAnomaly) UseProxyProtocol() bool {
return false
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAnomaly) UnmarshalJSON(src []byte) error {
- type pcr VMAnomaly
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAnomalySpec) UnmarshalJSON(src []byte) error {
- type pcr VMAnomalySpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmanomaly spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// +kubebuilder:object:root=true
// VMAnomalyList contains a list of VMAnomaly.
diff --git a/api/operator/v1/vtcluster_types.go b/api/operator/v1/vtcluster_types.go
index 8a8c9121f..023649f88 100644
--- a/api/operator/v1/vtcluster_types.go
+++ b/api/operator/v1/vtcluster_types.go
@@ -34,8 +34,6 @@ import (
// VTClusterSpec defines the desired state of VTCluster
type VTClusterSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// ServiceAccountName is the name of the ServiceAccount to use to run the
// VTSelect, VTInsert and VTStorage Pods.
@@ -190,19 +188,11 @@ func (cr *VTCluster) FinalLabels(kind vmv1beta1.ClusterComponent) map[string]str
return v
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VTClusterSpec) UnmarshalJSON(src []byte) error {
- type pcr VTClusterSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vtcluster spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VTClusterStatus defines the observed state of VTCluster
type VTClusterStatus struct {
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -569,27 +559,37 @@ func (cr *VTCluster) SetLastSpec(prevSpec VTClusterSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
+// GetStatus implements reconcile.ObjectWithDeepCopyAndStatus interface
+func (cr *VTCluster) GetStatus() *VTClusterStatus {
+ return &cr.Status
+}
+
+// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
+func (cr *VTCluster) DefaultStatusFields(vs *VTClusterStatus) {
+}
+
// UnmarshalJSON implements json.Unmarshaler interface
func (cr *VTCluster) UnmarshalJSON(src []byte) error {
type pcr VTCluster
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
return err
}
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VTClusterSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
return err
}
return nil
}
-// GetStatus implements reconcile.ObjectWithDeepCopyAndStatus interface
-func (cr *VTCluster) GetStatus() *VTClusterStatus {
- return &cr.Status
-}
-
-// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
-func (cr *VTCluster) DefaultStatusFields(vs *VTClusterStatus) {
-}
-
// AsOwner returns owner references with current object as owner
func (cr *VTCluster) AsOwner() metav1.OwnerReference {
return metav1.OwnerReference{
@@ -692,21 +692,17 @@ func (cr *VTCluster) AvailableStorageNodeIDs(requestsType string) []int32 {
if cr.Spec.Storage == nil || cr.Spec.Storage.ReplicaCount == nil {
return result
}
- maintenanceNodes := make(map[int32]struct{})
+ maintenanceNodes := sets.New[int32]()
switch requestsType {
case "select":
- for _, i := range cr.Spec.Storage.MaintenanceSelectNodeIDs {
- maintenanceNodes[i] = struct{}{}
- }
+ maintenanceNodes.Insert(cr.Spec.Storage.MaintenanceSelectNodeIDs...)
case "insert":
- for _, i := range cr.Spec.Storage.MaintenanceInsertNodeIDs {
- maintenanceNodes[i] = struct{}{}
- }
+ maintenanceNodes.Insert(cr.Spec.Storage.MaintenanceInsertNodeIDs...)
default:
panic("BUG unsupported requestsType: " + requestsType)
}
for i := int32(0); i < *cr.Spec.Storage.ReplicaCount; i++ {
- if _, ok := maintenanceNodes[i]; ok {
+ if maintenanceNodes.Has(i) {
continue
}
result = append(result, i)
diff --git a/api/operator/v1/vtsingle_types.go b/api/operator/v1/vtsingle_types.go
index 69a4dd093..94f1b1c4a 100644
--- a/api/operator/v1/vtsingle_types.go
+++ b/api/operator/v1/vtsingle_types.go
@@ -33,8 +33,6 @@ import (
// VTSingleSpec defines the desired state of VTSingle
// +k8s:openapi-gen=true
type VTSingleSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the VTSingle pods.
// +optional
@@ -102,6 +100,8 @@ type VTSingleSpec struct {
// VTSingleStatus defines the observed state of VTSingle
type VTSingleStatus struct {
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -143,6 +143,28 @@ func (cr *VTSingle) GetStatus() *VTSingleStatus {
func (cr *VTSingle) DefaultStatusFields(vs *VTSingleStatus) {
}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VTSingle) UnmarshalJSON(src []byte) error {
+ type pcr VTSingle
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VTSingleSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
+}
+
// +kubebuilder:object:root=true
// VTSingleList contains a list of VTSingle
@@ -179,29 +201,6 @@ func (cr *VTSingle) SetLastSpec(prevSpec VTSingleSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VTSingle) UnmarshalJSON(src []byte) error {
- type pcr VTSingle
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := vmv1beta1.ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
-
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VTSingleSpec) UnmarshalJSON(src []byte) error {
- type pcr VTSingleSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vtsingle spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// ProbePath implements build.probeCRD interface
func (cr *VTSingle) ProbePath() string {
return vmv1beta1.BuildPathWithPrefixFlag(cr.Spec.ExtraArgs, healthPath)
diff --git a/api/operator/v1alpha1/vmdistributed_types.go b/api/operator/v1alpha1/vmdistributed_types.go
index 213dd17e2..a9046bc02 100644
--- a/api/operator/v1alpha1/vmdistributed_types.go
+++ b/api/operator/v1alpha1/vmdistributed_types.go
@@ -24,6 +24,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -37,8 +38,6 @@ const (
// VMDistributedSpec defines configurable parameters for VMDistributed CR
// +k8s:openapi-gen=true
type VMDistributedSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// VMAuth is a VMAuth definition (name + optional spec) that acts as a proxy for the VMUsers created by the operator.
// Use an inline spec to define a VMAuth object in-place or provide a name to reference an existing VMAuth.
// +optional
@@ -154,8 +153,6 @@ type VMDistributedZoneAgent struct {
// VMDistributedZoneAgentSpec is a customized specification of a new VMAgent.
// It includes selected options from the original VMAgentSpec.
type VMDistributedZoneAgentSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the vmagent pods.
// +optional
PodMetadata *vmv1beta1.EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
@@ -308,6 +305,8 @@ type VMDistributedAuth struct {
// VMDistributedStatus defines the observed state of VMDistributedStatus
type VMDistributedStatus struct {
vmv1beta1.StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="VMDistributed App"
@@ -422,20 +421,32 @@ func (cr *VMDistributed) SelectorLabels() map[string]string {
}
// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMDistributedSpec) UnmarshalJSON(src []byte) error {
- type pcr VMDistributedSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse VMDistributed spec: %s, err: %s", string(src), err)
- return nil
+func (cr *VMDistributed) UnmarshalJSON(src []byte) error {
+ type pcr VMDistributed
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
}
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMDistributedSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
return nil
}
// Validate validates the VMDistributed resource
func (cr *VMDistributed) Validate() error {
- zones := make(map[string]struct{})
- clusters := make(map[string]struct{})
- agents := make(map[string]struct{})
+ zones := sets.New[string]()
+ clusters := sets.New[string]()
+ agents := sets.New[string]()
spec := cr.Spec
hasCommonVMInsert := cr.Spec.ZoneCommon.VMCluster.Spec.VMInsert != nil
hasCommonVMSelect := cr.Spec.ZoneCommon.VMCluster.Spec.VMSelect != nil
@@ -444,23 +455,23 @@ func (cr *VMDistributed) Validate() error {
if len(zone.Name) == 0 {
return fmt.Errorf("spec.zones[%d].name is required", i)
}
- if _, ok := zones[zone.Name]; ok {
+ if zones.Has(zone.Name) {
return fmt.Errorf("spec.zones[%d].name=%s is duplicated, zone names must be unique", i, zone.Name)
}
- zones[zone.Name] = struct{}{}
+ zones.Insert(zone.Name)
clusterName := zone.VMClusterName(cr)
agentName := zone.VMAgentName(cr)
if len(clusterName) > 0 {
- if _, ok := clusters[clusterName]; ok {
+ if clusters.Has(clusterName) {
return fmt.Errorf("spec.zones[%d].vmcluster.name=%s is already added in a different zone", i, clusterName)
}
- clusters[clusterName] = struct{}{}
+ clusters.Insert(clusterName)
}
if len(agentName) > 0 {
- if _, ok := agents[agentName]; ok {
+ if agents.Has(agentName) {
return fmt.Errorf("spec.zones[%d].vmagent.name=%s is already added in a different zone", i, agentName)
}
- agents[agentName] = struct{}{}
+ agents.Insert(agentName)
}
if zone.VMAgent.Spec.StatefulMode {
if zone.VMAgent.Spec.StatefulRollingUpdateStrategyBehavior != nil {
diff --git a/api/operator/v1beta1/vlogs_types.go b/api/operator/v1beta1/vlogs_types.go
index 9ac51935c..613ba68fa 100644
--- a/api/operator/v1beta1/vlogs_types.go
+++ b/api/operator/v1beta1/vlogs_types.go
@@ -34,9 +34,6 @@ import (
// +kubebuilder:validation:Schemaless
// +kubebuilder:pruning:PreserveUnknownFields
type VLogsSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
-
// PodMetadata configures Labels and Annotations which are propagated to the VLogs pods.
// +optional
PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
@@ -94,6 +91,8 @@ type VLogsSpec struct {
// VLogsStatus defines the observed state of VLogs
type VLogsStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -174,23 +173,22 @@ func (cr *VLogs) SetLastSpec(prevSpec VLogsSpec) {
// UnmarshalJSON implements json.Unmarshaler interface
func (cr *VLogs) UnmarshalJSON(src []byte) error {
type pcr VLogs
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
return err
}
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VLogsSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
if err := ParseLastAppliedStateTo(cr); err != nil {
return err
}
-
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VLogsSpec) UnmarshalJSON(src []byte) error {
- type pcr VLogsSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vlogs spec: %s, err: %s", string(src), err)
- return nil
- }
return nil
}
diff --git a/api/operator/v1beta1/vmagent_types.go b/api/operator/v1beta1/vmagent_types.go
index 519aec153..b2f82fb80 100644
--- a/api/operator/v1beta1/vmagent_types.go
+++ b/api/operator/v1beta1/vmagent_types.go
@@ -10,6 +10,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -17,8 +18,6 @@ import (
// VMAgentSpec defines the desired state of VMAgent
// +k8s:openapi-gen=true
type VMAgentSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the vmagent pods.
// +optional
PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
@@ -177,10 +176,10 @@ func (cr *VMAgent) Validate() error {
return fmt.Errorf("enableKubernetesAPISelectors cannot be used with daemonSetMode")
}
}
- scrapeClassNames := make(map[string]struct{})
+ scrapeClassNames := sets.New[string]()
defaultScrapeClass := false
for _, sc := range cr.Spec.ScrapeClasses {
- if _, ok := scrapeClassNames[sc.Name]; ok {
+ if scrapeClassNames.Has(sc.Name) {
return fmt.Errorf("duplicated scrapeClass=%q", sc.Name)
}
if ptr.Deref(sc.Default, false) {
@@ -203,6 +202,7 @@ func (cr *VMAgent) Validate() error {
if err := sc.validate(); err != nil {
return fmt.Errorf("incorrect relabeling for scrapeClass=%q: %w", sc.Name, err)
}
+ scrapeClassNames.Insert(sc.Name)
}
return nil
}
@@ -248,25 +248,25 @@ func (cr *VMAgent) AutomountServiceAccountToken() bool {
// UnmarshalJSON implements json.Unmarshaler interface
func (cr *VMAgent) UnmarshalJSON(src []byte) error {
type pcr VMAgent
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
return err
}
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMAgentSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
if err := ParseLastAppliedStateTo(cr); err != nil {
return err
}
return nil
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAgentSpec) UnmarshalJSON(src []byte) error {
- type pcr VMAgentSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmagent spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMAgentRemoteWriteSettings - defines global settings for all remoteWrite urls.
type VMAgentRemoteWriteSettings struct {
// The maximum size in bytes of unpacked request to send to remote storage
@@ -412,6 +412,8 @@ type VMAgentStatus struct {
// ReplicaCount Total number of pods targeted by this VMAgent
Replicas int32 `json:"replicas,omitempty"`
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -619,7 +621,7 @@ func (cr *VMAgent) ScrapeSelectors(scrape client.Object) (*metav1.LabelSelector,
// IsUnmanaged checks if object should managed any config objects
func (cr *VMAgent) IsUnmanaged(scrape client.Object) bool {
- if !cr.DeletionTimestamp.IsZero() || cr.Spec.ParsingError != "" {
+ if !cr.DeletionTimestamp.IsZero() || cr.Status.ParsingSpecError != "" {
return true
}
if scrape == nil {
diff --git a/api/operator/v1beta1/vmagent_types_test.go b/api/operator/v1beta1/vmagent_types_test.go
index 48455a5db..08d5a101d 100644
--- a/api/operator/v1beta1/vmagent_types_test.go
+++ b/api/operator/v1beta1/vmagent_types_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "k8s.io/utils/ptr"
)
func TestVMAgent_Validate(t *testing.T) {
@@ -54,6 +55,28 @@ func TestVMAgent_Validate(t *testing.T) {
},
}, false)
+ // duplicate scrape class names
+ f(VMAgentSpec{
+ RemoteWrite: []VMAgentRemoteWriteSpec{{URL: "http://some-rw"}},
+ CommonScrapeParams: CommonScrapeParams{
+ ScrapeClasses: []ScrapeClass{
+ {Name: "class-a"},
+ {Name: "class-a"},
+ },
+ },
+ }, true)
+
+ // multiple default scrape classes
+ f(VMAgentSpec{
+ RemoteWrite: []VMAgentRemoteWriteSpec{{URL: "http://some-rw"}},
+ CommonScrapeParams: CommonScrapeParams{
+ ScrapeClasses: []ScrapeClass{
+ {Name: "class-a", Default: ptr.To(true)},
+ {Name: "class-b", Default: ptr.To(true)},
+ },
+ },
+ }, true)
+
// relabeling with if array
f(VMAgentSpec{
RemoteWrite: []VMAgentRemoteWriteSpec{{URL: "http://some-rw"}},
diff --git a/api/operator/v1beta1/vmalert_types.go b/api/operator/v1beta1/vmalert_types.go
index bddeb117a..7114c4e4f 100644
--- a/api/operator/v1beta1/vmalert_types.go
+++ b/api/operator/v1beta1/vmalert_types.go
@@ -23,8 +23,6 @@ const (
// VMAlertSpec defines the desired state of VMAlert
// +k8s:openapi-gen=true
type VMAlertSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the VMAlert pods.
PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
// ManagedMetadata defines metadata that will be added to the all objects
@@ -179,23 +177,22 @@ func (cr *VMAlert) AutomountServiceAccountToken() bool {
// UnmarshalJSON implements json.Unmarshaler interface
func (cr *VMAlert) UnmarshalJSON(src []byte) error {
type pcr VMAlert
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
return err
}
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMAlertSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
if err := ParseLastAppliedStateTo(cr); err != nil {
return err
}
-
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAlertSpec) UnmarshalJSON(src []byte) error {
- type pcr VMAlertSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmalert spec: %s, err: %s", string(src), err)
- return nil
- }
return nil
}
@@ -276,6 +273,8 @@ type VMAlertRemoteWriteSpec struct {
// +k8s:openapi-gen=true
type VMAlertStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -498,6 +497,9 @@ func (cr *VMAlert) AsURL() string {
// IsUnmanaged checks if object should managed any config objects
func (cr *VMAlert) IsUnmanaged() bool {
+ if !cr.DeletionTimestamp.IsZero() || cr.Status.ParsingSpecError != "" {
+ return true
+ }
return !cr.Spec.SelectAllByDefault && cr.Spec.RuleSelector == nil && cr.Spec.RuleNamespaceSelector == nil
}
diff --git a/api/operator/v1beta1/vmalertmanager_types.go b/api/operator/v1beta1/vmalertmanager_types.go
index 2b8f7cd71..a3bd60ae8 100644
--- a/api/operator/v1beta1/vmalertmanager_types.go
+++ b/api/operator/v1beta1/vmalertmanager_types.go
@@ -49,8 +49,6 @@ type VMAlertmanager struct {
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +k8s:openapi-gen=true
type VMAlertmanagerSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods.
// +optional
@@ -243,29 +241,6 @@ func (cr *VMAlertmanager) AutomountServiceAccountToken() bool {
return !cr.Spec.DisableAutomountServiceAccountToken
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAlertmanager) UnmarshalJSON(src []byte) error {
- type pcr VMAlertmanager
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
-
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAlertmanagerSpec) UnmarshalJSON(src []byte) error {
- type pcr VMAlertmanagerSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmalertmanager spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMAlertmanagerList is a list of Alertmanagers.
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -282,6 +257,8 @@ type VMAlertmanagerList struct {
// Operator API itself. More info:
type VMAlertmanagerStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -295,7 +272,28 @@ func (cr *VMAlertmanager) GetStatus() *VMAlertmanagerStatus {
}
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
-func (cr *VMAlertmanager) DefaultStatusFields(vs *VMAlertmanagerStatus) {
+func (cr *VMAlertmanager) DefaultStatusFields(vs *VMAlertmanagerStatus) {}
+
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMAlertmanager) UnmarshalJSON(src []byte) error {
+ type pcr VMAlertmanager
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMAlertmanagerSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
}
// AsOwner returns owner references with current object as owner
@@ -486,6 +484,9 @@ func (*VMAlertmanager) ProbeNeedLiveness() bool {
// IsUnmanaged checks if alertmanager should managed any alertmanager config objects
func (cr *VMAlertmanager) IsUnmanaged() bool {
+ if !cr.DeletionTimestamp.IsZero() || cr.Status.ParsingSpecError != "" {
+ return true
+ }
return !cr.Spec.SelectAllByDefault && cr.Spec.ConfigSelector == nil && cr.Spec.ConfigNamespaceSelector == nil
}
diff --git a/api/operator/v1beta1/vmalertmanagerconfig_types.go b/api/operator/v1beta1/vmalertmanagerconfig_types.go
index 09fb05744..f55a8dfe0 100644
--- a/api/operator/v1beta1/vmalertmanagerconfig_types.go
+++ b/api/operator/v1beta1/vmalertmanagerconfig_types.go
@@ -36,6 +36,7 @@ import (
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
)
// VMAlertmanagerConfigSpec defines configuration for VMAlertmanagerConfig
@@ -55,8 +56,6 @@ type VMAlertmanagerConfigSpec struct {
// See https://prometheus.io/docs/alerting/latest/configuration/#time_interval
// +optional
TimeIntervals []TimeIntervals `json:"time_intervals,omitempty" yaml:"time_intervals,omitempty"`
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
}
// TimeIntervals for alerts
@@ -121,12 +120,12 @@ func (r *VMAlertmanagerConfig) Validate() error {
if MustSkipCRValidation(r) {
return nil
}
- receivers := make(map[string]struct{})
+ receivers := sets.New[string]()
for idx, recv := range r.Spec.Receivers {
- if _, ok := receivers[recv.Name]; ok {
+ if receivers.Has(recv.Name) {
return fmt.Errorf("notification config name %q is not unique", recv.Name)
}
- receivers[recv.Name] = struct{}{}
+ receivers.Insert(recv.Name)
if err := validateReceiver(recv); err != nil {
return fmt.Errorf("receiver at idx=%d is invalid: %w", idx, err)
}
@@ -172,6 +171,8 @@ type VMAlertmanagerConfigStatus struct {
// reconcile
StatusMetadata `json:",inline"`
LastErrorParentAlertmanagerName string `json:"lastErrorParentAlertmanagerName,omitempty"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// VMAlertmanagerConfig is the Schema for the vmalertmanagerconfigs API
@@ -279,18 +280,26 @@ func parseNestedRoutes(src *Route) error {
}
// UnmarshalJSON implements json.Unmarshaler interface
-func (r *VMAlertmanagerConfig) UnmarshalJSON(src []byte) error {
- type amcfg VMAlertmanagerConfig
- if err := json.Unmarshal(src, (*amcfg)(r)); err != nil {
- r.Spec.ParsingError = fmt.Sprintf("cannot parse alertmanager config: %s, err: %s", string(src), err)
- return nil
+func (cr *VMAlertmanagerConfig) UnmarshalJSON(src []byte) error {
+ type pcr VMAlertmanagerConfig
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
}
-
- if err := parseNestedRoutes(r.Spec.Route); err != nil {
- r.Spec.ParsingError = fmt.Sprintf("cannot parse routes for alertmanager config: %s at namespace: %s, err: %s", r.Name, r.Namespace, err)
- return nil
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMAlertmanagerConfigSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := parseNestedRoutes(cr.Spec.Route); err != nil {
+ if len(cr.Status.ParsingSpecError) == 0 {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse routes for VMAlertmanagerConfig: %s at namespace: %s, err: %s", cr.Name, cr.Namespace, err)
+ }
}
-
return nil
}
@@ -1506,17 +1515,17 @@ func parseTime(in string) (mins int, err error) {
return mins, nil
}
-func validateTimeIntervals(timeIntervals []TimeIntervals) (map[string]struct{}, error) {
- timeIntervalNames := make(map[string]struct{}, len(timeIntervals))
+func validateTimeIntervals(timeIntervals []TimeIntervals) (sets.Set[string], error) {
+ timeIntervalNames := sets.New[string]()
for idx, ti := range timeIntervals {
if err := validateTimeIntervalsEntry(&ti); err != nil {
return nil, fmt.Errorf("time interval at idx=%d is invalid: %w", idx, err)
}
- if _, ok := timeIntervalNames[ti.Name]; ok {
+ if timeIntervalNames.Has(ti.Name) {
return nil, fmt.Errorf("time interval at idx=%d is not unique with name=%q", idx, ti.Name)
}
- timeIntervalNames[ti.Name] = struct{}{}
+ timeIntervalNames.Insert(ti.Name)
}
return timeIntervalNames, nil
}
@@ -1527,21 +1536,21 @@ var opsgenieTypeMatcher = regexp.MustCompile(opsgenieValidTypesRe)
// checkRouteReceiver returns an error if a node in the routing tree
// references a receiver not in the given map.
-func checkRouteReceiver(r *SubRoute, receivers map[string]struct{}, tiNames map[string]struct{}) error {
+func checkRouteReceiver(r *SubRoute, receivers sets.Set[string], tiNames sets.Set[string]) error {
for _, ti := range r.ActiveTimeIntervals {
- if _, ok := tiNames[ti]; !ok {
+ if !tiNames.Has(ti) {
return fmt.Errorf("undefined time interval %q used in route", ti)
}
}
for _, ti := range r.MuteTimeIntervals {
- if _, ok := tiNames[ti]; !ok {
+ if !tiNames.Has(ti) {
return fmt.Errorf("undefined time interval %q used in route", ti)
}
}
if r.Receiver == "" {
return nil
}
- if _, ok := receivers[r.Receiver]; !ok {
+ if !receivers.Has(r.Receiver) {
return fmt.Errorf("undefined receiver %q used in route", r.Receiver)
}
for idx, sr := range r.Routes {
diff --git a/api/operator/v1beta1/vmalertmanagerconfig_types_test.go b/api/operator/v1beta1/vmalertmanagerconfig_types_test.go
index 75062f3eb..68c16a6b8 100644
--- a/api/operator/v1beta1/vmalertmanagerconfig_types_test.go
+++ b/api/operator/v1beta1/vmalertmanagerconfig_types_test.go
@@ -13,11 +13,11 @@ func TestValidateVMAlertmanagerConfigFail(t *testing.T) {
t.Helper()
var amc VMAlertmanagerConfig
assert.NoError(t, json.Unmarshal([]byte(src), &amc))
- if len(amc.Spec.ParsingError) > 0 {
- if strings.Contains(amc.Spec.ParsingError, expectedReason) {
+ if len(amc.Status.ParsingSpecError) > 0 {
+ if strings.Contains(amc.Status.ParsingSpecError, expectedReason) {
return
}
- t.Fatalf("unexpected parsing error: %s", amc.Spec.ParsingError)
+ t.Fatalf("unexpected parsing error: %s", amc.Status.ParsingSpecError)
}
err := amc.Validate()
if assert.Error(t, err, "expect error:\n%s\n got nil", expectedReason) {
@@ -403,7 +403,7 @@ func TestValidateVMAlertmanagerConfigOk(t *testing.T) {
t.Helper()
var amc VMAlertmanagerConfig
assert.NoError(t, json.Unmarshal([]byte(src), &amc))
- assert.Empty(t, amc.Spec.ParsingError)
+ assert.Empty(t, amc.Status.ParsingSpecError)
assert.NoError(t, amc.Validate())
}
f(`{
diff --git a/api/operator/v1beta1/vmauth_types.go b/api/operator/v1beta1/vmauth_types.go
index b32748550..cfae5d1de 100644
--- a/api/operator/v1beta1/vmauth_types.go
+++ b/api/operator/v1beta1/vmauth_types.go
@@ -21,8 +21,6 @@ var labelNameRegexp = regexp.MustCompile("^[a-zA-Z_:.][a-zA-Z0-9_:.]*$")
// VMAuthSpec defines the desired state of VMAuth
type VMAuthSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the VMAuth pods.
// +optional
PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty" yaml:"podMetadata,omitempty"`
@@ -398,19 +396,6 @@ func (cr *VMAuth) SetLastSpec(prevSpec VMAuthSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAuth) UnmarshalJSON(src []byte) error {
- type pcr VMAuth
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
-
- return nil
-}
-
func (cr *VMAuth) Validate() error {
if MustSkipCRValidation(cr) {
return nil
@@ -479,16 +464,6 @@ func (cr *VMAuth) Validate() error {
return nil
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMAuthSpec) UnmarshalJSON(src []byte) error {
- type pcr VMAuthSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmauth spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// EmbeddedHTTPRoute describes httproute configuration options.
//
// Requires gateway-controller CRD installed and VM_GATEWAY_API_ENABLED=true env var
@@ -541,6 +516,8 @@ type EmbeddedIngress struct {
// VMAuthStatus defines the observed state of VMAuth
type VMAuthStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -577,6 +554,28 @@ func (cr *VMAuth) GetStatus() *VMAuthStatus {
func (cr *VMAuth) DefaultStatusFields(vs *VMAuthStatus) {
}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMAuth) UnmarshalJSON(src []byte) error {
+ type pcr VMAuth
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMAuthSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
+}
+
func (cr *VMAuth) ProbePath() string {
return BuildPathWithPrefixFlag(cr.Spec.ExtraArgs, healthPath)
}
@@ -704,6 +703,9 @@ func (cr *VMAuth) IsOwnsServiceAccount() bool {
// IsUnmanaged checks if object should managed any config objects
func (cr *VMAuth) IsUnmanaged() bool {
+ if !cr.DeletionTimestamp.IsZero() || cr.Status.ParsingSpecError != "" {
+ return true
+ }
return (!cr.Spec.SelectAllByDefault && cr.Spec.UserSelector == nil && cr.Spec.UserNamespaceSelector == nil) ||
cr.Spec.SecretRef != nil ||
cr.Spec.LocalPath != ""
diff --git a/api/operator/v1beta1/vmcluster_types.go b/api/operator/v1beta1/vmcluster_types.go
index 3752a2bdf..d4b2b1dad 100644
--- a/api/operator/v1beta1/vmcluster_types.go
+++ b/api/operator/v1beta1/vmcluster_types.go
@@ -9,6 +9,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -16,8 +17,6 @@ import (
// VMClusterSpec defines the desired state of VMCluster
// +k8s:openapi-gen=true
type VMClusterSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// RetentionPeriod defines how long to retain stored metrics, specified as a duration (e.g., "1d", "1w", "1m").
// Data with timestamps outside the RetentionPeriod is automatically deleted. The minimum allowed value is 1d, or 24h.
// The default value is 1 (one month).
@@ -179,28 +178,6 @@ func (cr *VMCluster) SetLastSpec(prevSpec VMClusterSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMCluster) UnmarshalJSON(src []byte) error {
- type pcr VMCluster
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
- return nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMClusterSpec) UnmarshalJSON(src []byte) error {
- type pcr VMClusterSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmcluster spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMCluster is fast, cost-effective and scalable time-series database.
// Cluster version with
// +operator-sdk:gen-csv:customresourcedefinitions.displayName="VMCluster App"
@@ -238,6 +215,28 @@ func (cr *VMCluster) GetStatus() *VMClusterStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMCluster) DefaultStatusFields(vs *VMClusterStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMCluster) UnmarshalJSON(src []byte) error {
+ type pcr VMCluster
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMClusterSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
+}
+
// AsOwner returns owner references with current object as owner
func (cr *VMCluster) AsOwner() metav1.OwnerReference {
return metav1.OwnerReference{
@@ -253,6 +252,8 @@ func (cr *VMCluster) AsOwner() metav1.OwnerReference {
// VMClusterStatus defines the observed state of VMCluster
type VMClusterStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata returns metadata for object status
@@ -719,21 +720,17 @@ func (cr *VMCluster) AvailableStorageNodeIDs(requestsType string) []int32 {
if cr.Spec.VMStorage == nil || cr.Spec.VMStorage.ReplicaCount == nil {
return result
}
- maintenanceNodes := make(map[int32]struct{})
+ maintenanceNodes := sets.New[int32]()
switch requestsType {
case "select":
- for _, i := range cr.Spec.VMStorage.MaintenanceSelectNodeIDs {
- maintenanceNodes[i] = struct{}{}
- }
+ maintenanceNodes.Insert(cr.Spec.VMStorage.MaintenanceSelectNodeIDs...)
case "insert":
- for _, i := range cr.Spec.VMStorage.MaintenanceInsertNodeIDs {
- maintenanceNodes[i] = struct{}{}
- }
+ maintenanceNodes.Insert(cr.Spec.VMStorage.MaintenanceInsertNodeIDs...)
default:
panic("BUG unsupported requestsType: " + requestsType)
}
for i := int32(0); i < *cr.Spec.VMStorage.ReplicaCount; i++ {
- if _, ok := maintenanceNodes[i]; ok {
+ if maintenanceNodes.Has(i) {
continue
}
result = append(result, i)
diff --git a/api/operator/v1beta1/vmcluster_types_test.go b/api/operator/v1beta1/vmcluster_types_test.go
index af34f69d4..032f8f591 100644
--- a/api/operator/v1beta1/vmcluster_types_test.go
+++ b/api/operator/v1beta1/vmcluster_types_test.go
@@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "k8s.io/utils/ptr"
)
func TestVMBackup_SnapshotDeletePathWithFlags(t *testing.T) {
@@ -86,3 +87,37 @@ func TestVMBackup_SnapshotCreatePathWithFlags(t *testing.T) {
want: "http://localhost:8429/prefix/custom/snapshot/create?authKey=some-auth-key",
})
}
+
+func TestVMCluster_AvailableStorageNodeIDs(t *testing.T) {
+ f := func(cr *VMCluster, requestsType string, want []int32) {
+ t.Helper()
+ assert.Equal(t, want, cr.AvailableStorageNodeIDs(requestsType))
+ }
+
+ cr := &VMCluster{
+ Spec: VMClusterSpec{
+ VMStorage: &VMStorage{
+ CommonAppsParams: CommonAppsParams{
+ ReplicaCount: ptr.To(int32(5)),
+ },
+ MaintenanceSelectNodeIDs: []int32{1, 3},
+ MaintenanceInsertNodeIDs: []int32{0, 4},
+ },
+ },
+ }
+
+ // select excludes maintenance nodes
+ f(cr, "select", []int32{0, 2, 4})
+
+ // insert excludes maintenance nodes
+ f(cr, "insert", []int32{1, 2, 3})
+
+ // no maintenance nodes
+ f(&VMCluster{
+ Spec: VMClusterSpec{
+ VMStorage: &VMStorage{
+ CommonAppsParams: CommonAppsParams{ReplicaCount: ptr.To(int32(3))},
+ },
+ },
+ }, "select", []int32{0, 1, 2})
+}
diff --git a/api/operator/v1beta1/vmextra_types.go b/api/operator/v1beta1/vmextra_types.go
index 2dae14b9e..61642e1d2 100644
--- a/api/operator/v1beta1/vmextra_types.go
+++ b/api/operator/v1beta1/vmextra_types.go
@@ -188,23 +188,39 @@ type StorageSpec struct {
// IntoSTSVolume converts storageSpec into proper volume for statefulsetSpec
// by default, it adds emptyDir volume.
-func (ss *StorageSpec) IntoSTSVolume(name string, sts *appsv1.StatefulSetSpec) {
+func (ss *StorageSpec) IntoSTSVolume(name string, sts *appsv1.StatefulSetSpec) error {
+ podSpec := &sts.Template.Spec
+ foundVolume := false
+ for _, volume := range podSpec.Volumes {
+ if volume.Name == name {
+ foundVolume = true
+ }
+ }
switch {
case ss == nil:
- sts.Template.Spec.Volumes = append(sts.Template.Spec.Volumes, corev1.Volume{
+ if foundVolume {
+ return nil
+ }
+ podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{
Name: name,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
})
case ss.EmptyDir != nil:
- sts.Template.Spec.Volumes = append(sts.Template.Spec.Volumes, corev1.Volume{
+ if foundVolume {
+ return fmt.Errorf("either unset storage.emptyDir or remove volume=%q from spec", name)
+ }
+ podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{
Name: name,
VolumeSource: corev1.VolumeSource{
EmptyDir: ss.EmptyDir,
},
})
default:
+ if foundVolume {
+ return fmt.Errorf("either unset storage.volumeClaimTemplate or remove volume=%q from spec", name)
+ }
claimTemplate := ss.VolumeClaimTemplate
stsClaim := corev1.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
@@ -224,6 +240,7 @@ func (ss *StorageSpec) IntoSTSVolume(name string, sts *appsv1.StatefulSetSpec) {
}
sts.VolumeClaimTemplates = append(sts.VolumeClaimTemplates, stsClaim)
}
+ return nil
}
// EmbeddedPersistentVolumeClaim is an embedded version of k8s.io/api/core/v1.PersistentVolumeClaim.
@@ -1012,6 +1029,8 @@ type TLSClientConfig struct {
// ScrapeObjectStatus defines the observed state of ScrapeObjects
type ScrapeObjectStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
type objectWithLastAppliedState[T, ST any] interface {
diff --git a/api/operator/v1beta1/vmnodescrape_types.go b/api/operator/v1beta1/vmnodescrape_types.go
index b76758ed1..9c440f039 100644
--- a/api/operator/v1beta1/vmnodescrape_types.go
+++ b/api/operator/v1beta1/vmnodescrape_types.go
@@ -7,12 +7,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var _ json.Unmarshaler = (*VMNodeScrapeSpec)(nil)
+var _ json.Unmarshaler = (*VMNodeScrape)(nil)
// VMNodeScrapeSpec defines specification for VMNodeScrape.
type VMNodeScrapeSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// The label to use to retrieve the job name from.
// +optional
JobLabel string `json:"jobLabel,omitempty"`
@@ -37,16 +35,6 @@ type VMNodeScrapeSpec struct {
ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMNodeScrapeSpec) UnmarshalJSON(src []byte) error {
- type pcr VMNodeScrapeSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMNodeScrape defines discovery for targets placed on kubernetes nodes,
// usually its node-exporters and other host services.
// InternalIP is used as __address__ for scraping.
@@ -95,6 +83,25 @@ func (cr *VMNodeScrape) GetStatus() *ScrapeObjectStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMNodeScrape) DefaultStatusFields(vs *ScrapeObjectStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMNodeScrape) UnmarshalJSON(src []byte) error {
+ type pcr VMNodeScrape
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMNodeScrapeSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
// AsKey returns unique key for object
func (cr *VMNodeScrape) AsKey(_ bool) string {
return cr.Namespace + "/" + cr.Name
diff --git a/api/operator/v1beta1/vmpodscrape_types.go b/api/operator/v1beta1/vmpodscrape_types.go
index 9903bc2ff..6e1d1fbdb 100644
--- a/api/operator/v1beta1/vmpodscrape_types.go
+++ b/api/operator/v1beta1/vmpodscrape_types.go
@@ -8,12 +8,10 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
-var _ json.Unmarshaler = (*VMPodScrapeSpec)(nil)
+var _ json.Unmarshaler = (*VMPodScrape)(nil)
// VMPodScrapeSpec defines the desired state of VMPodScrape
type VMPodScrapeSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// The label to use to retrieve the job name from.
// +optional
JobLabel string `json:"jobLabel,omitempty"`
@@ -46,16 +44,6 @@ type VMPodScrapeSpec struct {
ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMPodScrapeSpec) UnmarshalJSON(src []byte) error {
- type pcr VMPodScrapeSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMPodScrape is scrape configuration for pods,
// it generates vmagent's config for scraping pod targets
// based on selectors.
@@ -160,6 +148,25 @@ func (cr *VMPodScrape) GetStatus() *ScrapeObjectStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMPodScrape) DefaultStatusFields(vs *ScrapeObjectStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMPodScrape) UnmarshalJSON(src []byte) error {
+ type pcr VMPodScrape
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMPodScrapeSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
func init() {
SchemeBuilder.Register(&VMPodScrape{}, &VMPodScrapeList{})
}
diff --git a/api/operator/v1beta1/vmprobe_types.go b/api/operator/v1beta1/vmprobe_types.go
index 167ce57d7..f458102e5 100644
--- a/api/operator/v1beta1/vmprobe_types.go
+++ b/api/operator/v1beta1/vmprobe_types.go
@@ -23,13 +23,11 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var _ json.Unmarshaler = (*VMProbeSpec)(nil)
+var _ json.Unmarshaler = (*VMProbe)(nil)
// VMProbeSpec contains specification parameters for a Probe.
// +k8s:openapi-gen=true
type VMProbeSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// The job name assigned to scraped metrics by default.
JobName string `json:"jobName,omitempty"`
// Specification for the prober to use for probing targets.
@@ -52,16 +50,6 @@ type VMProbeSpec struct {
ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMProbeSpec) UnmarshalJSON(src []byte) error {
- type pcr VMProbeSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMProbeTargets defines a set of static and dynamically discovered targets for the prober.
// +k8s:openapi-gen=true
type VMProbeTargets struct {
@@ -175,6 +163,25 @@ func (cr *VMProbe) GetStatus() *ScrapeObjectStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMProbe) DefaultStatusFields(vs *ScrapeObjectStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMProbe) UnmarshalJSON(src []byte) error {
+ type pcr VMProbe
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMProbeSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
// AsKey returns unique key for object
func (cr *VMProbe) AsKey(_ bool) string {
return cr.Namespace + "/" + cr.Name
diff --git a/api/operator/v1beta1/vmrule_types.go b/api/operator/v1beta1/vmrule_types.go
index 4f70f4423..b292a4905 100644
--- a/api/operator/v1beta1/vmrule_types.go
+++ b/api/operator/v1beta1/vmrule_types.go
@@ -14,6 +14,7 @@ import (
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
)
// MaxConfigMapDataSize is a maximum `Data` field size of a ConfigMap.
@@ -26,8 +27,6 @@ var initVMAlertTemplatesOnce sync.Once
type VMRuleSpec struct {
// Groups list of group rules
Groups []RuleGroup `json:"groups"`
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
}
// RuleGroup is a list of sequentially evaluated recording and alerting rules.
@@ -134,6 +133,8 @@ type Rule struct {
// VMRuleStatus defines the observed state of VMRule
type VMRuleStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// GetStatusMetadata implements reconcile.objectWithStatus interface
@@ -165,7 +166,7 @@ func (cr *VMRule) Validate() error {
panic(fmt.Sprintf("cannot init vmalert templates for validation: %s", err))
}
})
- uniqNames := make(map[string]struct{})
+ uniqNames := sets.New[string]()
var totalSize int
for i := range cr.Spec.Groups {
// make a copy
@@ -179,10 +180,10 @@ func (cr *VMRule) Validate() error {
group.Tenant = ""
}
errContext := fmt.Sprintf("VMRule: %s/%s group: %s", cr.Namespace, cr.Name, group.Name)
- if _, ok := uniqNames[group.Name]; ok {
+ if uniqNames.Has(group.Name) {
return fmt.Errorf("duplicate group name: %s", errContext)
}
- uniqNames[group.Name] = struct{}{}
+ uniqNames.Insert(group.Name)
groupBytes, err := yaml.Marshal(group)
if err != nil {
return fmt.Errorf("cannot marshal %s, err: %w", errContext, err)
@@ -242,11 +243,20 @@ type VMRule struct {
}
// UnmarshalJSON implements json.Unmarshaler interface
-func (r *VMRule) UnmarshalJSON(src []byte) error {
- type rcfg VMRule
- if err := json.Unmarshal(src, (*rcfg)(r)); err != nil {
- r.Spec.ParsingError = fmt.Sprintf("cannot parse vmrule config: %s, err: %s", string(src), err)
- return nil
+func (cr *VMRule) UnmarshalJSON(src []byte) error {
+ type pcr VMRule
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMRuleSpec: %s, err: %s", string(s.Spec), err)
+ }
}
return nil
}
diff --git a/api/operator/v1beta1/vmscrapeconfig_types.go b/api/operator/v1beta1/vmscrapeconfig_types.go
index 983305512..11baee8f8 100644
--- a/api/operator/v1beta1/vmscrapeconfig_types.go
+++ b/api/operator/v1beta1/vmscrapeconfig_types.go
@@ -41,12 +41,10 @@ type VMScrapeConfig struct {
Status ScrapeObjectStatus `json:"status,omitempty"`
}
-var _ json.Unmarshaler = (*VMScrapeConfigSpec)(nil)
+var _ json.Unmarshaler = (*VMScrapeConfig)(nil)
// VMScrapeConfigSpec defines the desired state of VMScrapeConfig
type VMScrapeConfigSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// StaticConfigs defines a list of static targets with a common label set.
// +optional
StaticConfigs []StaticConfig `json:"staticConfigs,omitempty"`
@@ -91,16 +89,6 @@ type VMScrapeConfigSpec struct {
ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMScrapeConfigSpec) UnmarshalJSON(src []byte) error {
- type pcr VMScrapeConfigSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// StaticConfig defines a static configuration.
// See [here](https://docs.victoriametrics.com/victoriametrics/sd_configs/#static_configs)
type StaticConfig struct {
@@ -599,6 +587,25 @@ func (cr *VMScrapeConfig) GetStatus() *ScrapeObjectStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMScrapeConfig) DefaultStatusFields(vs *ScrapeObjectStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMScrapeConfig) UnmarshalJSON(src []byte) error {
+ type pcr VMScrapeConfig
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMScrapeConfigSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
func init() {
SchemeBuilder.Register(&VMScrapeConfig{}, &VMScrapeConfigList{})
}
diff --git a/api/operator/v1beta1/vmservicescrape_types.go b/api/operator/v1beta1/vmservicescrape_types.go
index ecc4ce6e5..5a77222ba 100644
--- a/api/operator/v1beta1/vmservicescrape_types.go
+++ b/api/operator/v1beta1/vmservicescrape_types.go
@@ -8,12 +8,10 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
-var _ json.Unmarshaler = (*VMServiceScrapeSpec)(nil)
+var _ json.Unmarshaler = (*VMServiceScrape)(nil)
// VMServiceScrapeSpec defines the desired state of VMServiceScrape
type VMServiceScrapeSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// DiscoveryRole - defines kubernetes_sd role for objects discovery.
// by default, its endpoints.
// can be changed to service or endpointslices.
@@ -57,16 +55,6 @@ type VMServiceScrapeSpec struct {
ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMServiceScrapeSpec) UnmarshalJSON(src []byte) error {
- type pcr VMServiceScrapeSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMServiceScrape is scrape configuration for endpoints associated with
// kubernetes service,
// it generates scrape configuration for vmagent based on selectors.
@@ -182,6 +170,25 @@ func (cr *VMServiceScrape) GetStatus() *ScrapeObjectStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMServiceScrape) DefaultStatusFields(vs *ScrapeObjectStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMServiceScrape) UnmarshalJSON(src []byte) error {
+ type pcr VMServiceScrape
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMServiceScrapeSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
func init() {
SchemeBuilder.Register(&VMServiceScrape{}, &VMServiceScrapeList{})
}
diff --git a/api/operator/v1beta1/vmsingle_types.go b/api/operator/v1beta1/vmsingle_types.go
index 1e2a1f6fa..437bdfdbc 100644
--- a/api/operator/v1beta1/vmsingle_types.go
+++ b/api/operator/v1beta1/vmsingle_types.go
@@ -15,8 +15,6 @@ import (
// VMSingleSpec defines the desired state of VMSingle
// +k8s:openapi-gen=true
type VMSingleSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// PodMetadata configures Labels and Annotations which are propagated to the VMSingle pods.
// +optional
PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"`
@@ -95,19 +93,6 @@ func (cr *VMSingle) SetLastSpec(prevSpec VMSingleSpec) {
cr.ParsedLastAppliedSpec = &prevSpec
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMSingle) UnmarshalJSON(src []byte) error {
- type pcr VMSingle
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- return err
- }
- if err := ParseLastAppliedStateTo(cr); err != nil {
- return err
- }
-
- return nil
-}
-
// UseProxyProtocol implements build.probeCRD interface
func (cr *VMSingle) UseProxyProtocol() bool {
return UseProxyProtocol(cr.Spec.ExtraArgs)
@@ -118,20 +103,12 @@ func (cr *VMSingle) AutomountServiceAccountToken() bool {
return !cr.Spec.DisableAutomountServiceAccountToken
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMSingleSpec) UnmarshalJSON(src []byte) error {
- type pcr VMSingleSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmsingle spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// VMSingleStatus defines the observed state of VMSingle
// +k8s:openapi-gen=true
type VMSingleStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// VMSingle is fast, cost-effective and scalable time-series database.
@@ -165,6 +142,28 @@ func (cr *VMSingle) GetStatus() *VMSingleStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMSingle) DefaultStatusFields(_ *VMSingleStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMSingle) UnmarshalJSON(src []byte) error {
+ type pcr VMSingle
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMSingleSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ if err := ParseLastAppliedStateTo(cr); err != nil {
+ return err
+ }
+ return nil
+}
+
func (cr *VMSingle) ProbePath() string {
return BuildPathWithPrefixFlag(cr.Spec.ExtraArgs, healthPath)
}
diff --git a/api/operator/v1beta1/vmstaticscrape_types.go b/api/operator/v1beta1/vmstaticscrape_types.go
index 7030f349d..88ef25bf8 100644
--- a/api/operator/v1beta1/vmstaticscrape_types.go
+++ b/api/operator/v1beta1/vmstaticscrape_types.go
@@ -7,12 +7,10 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
-var _ json.Unmarshaler = (*VMStaticScrapeSpec)(nil)
+var _ json.Unmarshaler = (*VMStaticScrape)(nil)
// VMStaticScrapeSpec defines the desired state of VMStaticScrape.
type VMStaticScrapeSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// JobName name of job.
JobName string `json:"jobName,omitempty"`
// A list of target endpoints to scrape metrics from.
@@ -29,16 +27,6 @@ type VMStaticScrapeSpec struct {
ScrapeClassName *string `json:"scrapeClass,omitempty"`
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMStaticScrapeSpec) UnmarshalJSON(src []byte) error {
- type pcr VMStaticScrapeSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse spec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// TargetEndpoint defines single static target endpoint.
type TargetEndpoint struct {
// Targets static targets addresses in form of ["192.122.55.55:9100","some-name:9100"].
@@ -101,6 +89,25 @@ func (cr *VMStaticScrape) GetStatus() *ScrapeObjectStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMStaticScrape) DefaultStatusFields(vs *ScrapeObjectStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMStaticScrape) UnmarshalJSON(src []byte) error {
+ type pcr VMStaticScrape
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMStaticScrapeSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
// AsKey returns unique key for object
func (cr *VMStaticScrape) AsKey(_ bool) string {
return cr.Namespace + "/" + cr.Name
diff --git a/api/operator/v1beta1/vmuser_types.go b/api/operator/v1beta1/vmuser_types.go
index ba0be19fb..13bdb529f 100644
--- a/api/operator/v1beta1/vmuser_types.go
+++ b/api/operator/v1beta1/vmuser_types.go
@@ -14,8 +14,6 @@ import (
// VMUserSpec defines the desired state of VMUser
type VMUserSpec struct {
- // ParsingError contents error with context if operator was failed to parse json object from kubernetes api server
- ParsingError string `json:"-" yaml:"-"`
// Name of the VMUser object.
// +optional
Name *string `json:"name,omitempty"`
@@ -191,6 +189,8 @@ type TargetRefBasicAuth struct {
// VMUserStatus defines the observed state of VMUser
type VMUserStatus struct {
StatusMetadata `json:",inline"`
+ // ParsingSpecError contents error with context if operator was failed to parse json object from kubernetes api server
+ ParsingSpecError string `json:"-" yaml:"-"`
}
// VMUser is the Schema for the vmusers API
@@ -262,16 +262,6 @@ func (cr *VMUser) SelectorLabels() map[string]string {
}
}
-// UnmarshalJSON implements json.Unmarshaler interface
-func (cr *VMUserSpec) UnmarshalJSON(src []byte) error {
- type pcr VMUserSpec
- if err := json.Unmarshal(src, (*pcr)(cr)); err != nil {
- cr.ParsingError = fmt.Sprintf("cannot parse vmuserspec: %s, err: %s", string(src), err)
- return nil
- }
- return nil
-}
-
// FinalLabels returns combination of selector and managed labels
func (cr *VMUser) FinalLabels() map[string]string {
v := cr.SelectorLabels()
@@ -294,6 +284,25 @@ func (cr *VMUser) GetStatus() *VMUserStatus {
// DefaultStatusFields implements reconcile.ObjectWithDeepCopyAndStatus interface
func (cr *VMUser) DefaultStatusFields(vs *VMUserStatus) {}
+// UnmarshalJSON implements json.Unmarshaler interface
+func (cr *VMUser) UnmarshalJSON(src []byte) error {
+ type pcr VMUser
+ type shadow struct {
+ *pcr
+ Spec json.RawMessage `json:"spec"`
+ }
+ s := shadow{pcr: (*pcr)(cr)}
+ if err := json.Unmarshal(src, &s); err != nil {
+ return err
+ }
+ if len(s.Spec) > 0 {
+ if err := json.Unmarshal(s.Spec, &cr.Spec); err != nil {
+ cr.Status.ParsingSpecError = fmt.Sprintf("cannot parse VMUserSpec: %s, err: %s", string(s.Spec), err)
+ }
+ }
+ return nil
+}
+
func (cr *VMUser) AsKey(hide bool) string {
var id string
hideFn := func(s string) string {
diff --git a/cmd/config-reloader/file_watch.go b/cmd/config-reloader/file_watch.go
index 0e56b8b87..915ae1436 100644
--- a/cmd/config-reloader/file_watch.go
+++ b/cmd/config-reloader/file_watch.go
@@ -14,6 +14,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/fsnotify/fsnotify"
+ "k8s.io/apimachinery/pkg/util/sets"
)
type fileWatcher struct {
@@ -114,13 +115,13 @@ func readFileContent(src string) ([]byte, error) {
}
type dirWatcher struct {
- dirs map[string]struct{}
+ dirs sets.Set[string]
wg sync.WaitGroup
w *fsnotify.Watcher
}
func newDirWatchers(dirs []string) (*dirWatcher, error) {
- dws := map[string]struct{}{}
+ dws := sets.New[string]()
w, err := fsnotify.NewWatcher()
if err != nil {
return nil, fmt.Errorf("cannot create new dir watcher: %w", err)
@@ -130,7 +131,7 @@ func newDirWatchers(dirs []string) (*dirWatcher, error) {
if err := w.Add(dir); err != nil {
return nil, fmt.Errorf("cannot dir: %s to watcher: %w", dir, err)
}
- dws[dir] = struct{}{}
+ dws.Insert(dir)
}
return &dirWatcher{
w: w,
diff --git a/config/manifests/release-config.yaml b/config/manifests/release-config.yaml
deleted file mode 100644
index 52df9476f..000000000
--- a/config/manifests/release-config.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-catalog_templates:
- - template_name: v4.12-v4.16.yaml
- channels: [beta]
- - template_name: latest.yaml
- channels: [beta]
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 7e7745084..1b8f1386f 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -13,6 +13,21 @@ aliases:
## tip
+**Update note 1**: `-eula` flag is not set by default anymore for VMBackup and VMRestore. To avoid VMCluster/VMSingle rollouts set `spec.vmstorage.vmBackup.acceptEula: true` for VMCluster and `spec.vmBackup.acceptEula: true` for VMSingle and replace it with `spec.license` during VMSingle/VMCluster upgrade.
+
+* Dependency: [vmoperator](https://docs.victoriametrics.com/operator/): Updated default versions for VM apps to [v1.143.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.143.0) version
+
+* SECURITY: upgrade Go builder from Go1.25.8 to Go1.25.10. See [the list of issues addressed in Go1.25.10](https://github.com/golang/go/issues?q=milestone%3AGo1.25.10+label%3ACherryPickApproved).
+
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/operator/resources/vmagent/): use volume from spec.volumes as persistent queue volume if its name is `persistent-queue-data`, previously emptyDir was mounted. See [#1677](https://github.com/VictoriaMetrics/operator/issues/1677).
+* BUGFIX: [vmcluster](https://docs.victoriametrics.com/operator/resources/vmcluster/): use volume from spec.vmstorage.volumes and spec.vmselect.volumes as data and cache volumes if its name is `vmstorage-db` and `vmselect-cachedir` respectively. See [#784](https://github.com/VictoriaMetrics/operator/issues/784).
+* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): Improve reconcile error handling for Prometheus and VictoriaMetrics controllers.
+* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): Add acceptEula support for VMBackup/VMRestore.
+* BUGFIX: [vmdistributed](https://docs.victoriametrics.com/operator/resources/vmdistributed/): change default load balancing policy for write requests from `first_available` to `least_loaded`. This should allow to evenly distribute write load across all VMAgents.
+* BUGFIX: [VMCluster](https://docs.victoriametrics.com/operator/resources/vmcluster/), [VTCluster](https://docs.victoriametrics.com/operator/resources/vtcluster/) and [VLCluster](https://docs.victoriametrics.com/operator/resources/vlcluster/): fixed infinite non-default additional service recreation, when requestsLoadBalancer.enabled: true
+* BUGFIX: [vmoperator](https://docs.victoriametrics.com/operator/): retry reconcile errors, that may lead to expanding state, before resource could hang in expanding state.
+* BUGFIX: [vmdistributed](https://docs.victoriametrics.com/operator/resources/vmdistributed/): expose VMClusterSpec parsing error in status, previously it was just swallowed and led to infinite reconciles. See [#2113](https://github.com/VictoriaMetrics/operator/issues/2113).
+
## [v0.68.4](https://github.com/VictoriaMetrics/operator/releases/tag/v0.68.4)
**Release date:** 09 April 2026
diff --git a/docs/env.md b/docs/env.md
index e322a3e62..10527c6e8 100644
--- a/docs/env.md
+++ b/docs/env.md
@@ -1,8 +1,8 @@
| Environment variables |
| --- |
-| VM_METRICS_VERSION: `v1.136.0` # |
-| VM_LOGS_VERSION: `v1.47.0` # |
-| VM_ANOMALY_VERSION: `v1.28.5` # |
+| VM_METRICS_VERSION: `v1.143.0` # |
+| VM_LOGS_VERSION: `v1.50.0` # |
+| VM_ANOMALY_VERSION: `v1.29.3` # |
| VM_TRACES_VERSION: `v0.7.0` # |
| VM_OPERATOR_VERSION: `v0.68.3` # |
| VM_GATEWAY_API_ENABLED: `false` # |
diff --git a/go.mod b/go.mod
index 210400d00..3d3667d26 100644
--- a/go.mod
+++ b/go.mod
@@ -1,14 +1,14 @@
module github.com/VictoriaMetrics/operator
-go 1.25.5
+go 1.26.2
require (
github.com/Masterminds/semver/v3 v3.4.0
- github.com/VictoriaMetrics/VictoriaMetrics v1.132.0
- github.com/VictoriaMetrics/metrics v1.40.2
- github.com/VictoriaMetrics/metricsql v0.84.8
- github.com/VictoriaMetrics/operator/api v0.51.3
- github.com/caarlos0/env/v11 v11.3.1
+ github.com/VictoriaMetrics/VictoriaMetrics v1.141.0
+ github.com/VictoriaMetrics/metrics v1.43.2
+ github.com/VictoriaMetrics/metricsql v0.87.0
+ github.com/VictoriaMetrics/operator/api v0.66.1
+ github.com/caarlos0/env/v11 v11.4.1
github.com/cespare/xxhash/v2 v2.3.0
github.com/fsnotify/fsnotify v1.9.0
github.com/go-logr/logr v1.4.3
@@ -20,44 +20,44 @@ require (
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.86.1
github.com/prometheus/client_golang v1.23.2
github.com/stretchr/testify v1.11.1
- go.uber.org/zap v1.27.0
- golang.org/x/sync v0.19.0
+ go.uber.org/zap v1.27.1
+ golang.org/x/sync v0.20.0
gopkg.in/yaml.v2 v2.4.0
- k8s.io/api v0.34.1
+ k8s.io/api v0.35.3
k8s.io/apiextensions-apiserver v0.34.1
- k8s.io/apimachinery v0.34.1
+ k8s.io/apimachinery v0.35.3
k8s.io/autoscaler/vertical-pod-autoscaler v1.5.1
- k8s.io/client-go v0.34.1
- k8s.io/klog/v2 v2.130.1
- k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
+ k8s.io/client-go v0.35.3
+ k8s.io/klog/v2 v2.140.0
+ k8s.io/utils v0.0.0-20260319190234-28399d86e0b5
sigs.k8s.io/controller-runtime v0.22.2
sigs.k8s.io/gateway-api v1.3.0
)
require (
- github.com/VictoriaMetrics/VictoriaLogs v1.36.2-0.20251008164716-21c0fb3de84d // indirect
- github.com/VictoriaMetrics/easyproto v1.1.3 // indirect
- github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
- github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
- github.com/aws/smithy-go v1.24.0 // indirect
+ github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0 // indirect
+ github.com/VictoriaMetrics/easyproto v1.2.0 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.41.5 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.32.14 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.19.14 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 // indirect
+ github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 // indirect
+ github.com/aws/smithy-go v1.24.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
- github.com/bmatcuk/doublestar/v4 v4.9.1 // indirect
+ github.com/bmatcuk/doublestar/v4 v4.10.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
- github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.1 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
github.com/go-openapi/jsonpointer v0.22.4 // indirect
github.com/go-openapi/jsonreference v0.21.4 // indirect
@@ -75,13 +75,13 @@ require (
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
+ github.com/golang-jwt/jwt/v5 v5.3.1 // indirect
github.com/google/btree v1.1.3 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/pprof v0.0.0-20260115054156-294ebfa9ad83 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
- github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/compress v1.18.5 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -90,11 +90,11 @@ require (
github.com/prometheus/alertmanager v0.31.0 // indirect
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.67.5 // indirect
- github.com/prometheus/procfs v0.17.0 // indirect
- github.com/prometheus/sigv4 v0.4.0 // indirect
- github.com/spf13/pflag v1.0.7 // indirect
+ github.com/prometheus/procfs v0.20.1 // indirect
+ github.com/prometheus/sigv4 v0.4.1 // indirect
+ github.com/spf13/pflag v1.0.9 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
- github.com/valyala/fastjson v1.6.5 // indirect
+ github.com/valyala/fastjson v1.6.10 // indirect
github.com/valyala/fastrand v1.1.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
github.com/valyala/gozstd v1.24.0 // indirect
@@ -102,22 +102,22 @@ require (
github.com/valyala/quicktemplate v1.8.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.yaml.in/yaml/v2 v2.4.3 // indirect
+ go.yaml.in/yaml/v2 v2.4.4 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
- golang.org/x/mod v0.32.0 // indirect
- golang.org/x/net v0.49.0 // indirect
- golang.org/x/oauth2 v0.34.0 // indirect
- golang.org/x/sys v0.40.0 // indirect
- golang.org/x/term v0.39.0 // indirect
- golang.org/x/text v0.33.0 // indirect
- golang.org/x/time v0.14.0 // indirect
- golang.org/x/tools v0.41.0 // indirect
+ golang.org/x/mod v0.34.0 // indirect
+ golang.org/x/net v0.53.0 // indirect
+ golang.org/x/oauth2 v0.36.0 // indirect
+ golang.org/x/sys v0.43.0 // indirect
+ golang.org/x/term v0.42.0 // indirect
+ golang.org/x/text v0.36.0 // indirect
+ golang.org/x/time v0.15.0 // indirect
+ golang.org/x/tools v0.43.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
- k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3 // indirect
+ k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.2 // indirect
@@ -125,5 +125,3 @@ require (
)
replace github.com/VictoriaMetrics/operator/api => ./api
-
-replace github.com/caarlos0/env/v11 => github.com/AndrewChubatiuk/env/v11 v11.0.0-20260302065400-14d0354881b6
diff --git a/go.sum b/go.sum
index e87561340..ff395a3df 100644
--- a/go.sum
+++ b/go.sum
@@ -1,49 +1,49 @@
-github.com/AndrewChubatiuk/env/v11 v11.0.0-20260302065400-14d0354881b6 h1:5CPOPjp7co7TgffUQ/jOVlw6IX8uHXDHt0W85Mwd7Zw=
-github.com/AndrewChubatiuk/env/v11 v11.0.0-20260302065400-14d0354881b6/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
-github.com/VictoriaMetrics/VictoriaLogs v1.36.2-0.20251008164716-21c0fb3de84d h1:fV15mhBCGpCCBbuOAbOflO8Air+tLklMt8bG35FimzQ=
-github.com/VictoriaMetrics/VictoriaLogs v1.36.2-0.20251008164716-21c0fb3de84d/go.mod h1:JKZK8LZ9O38pW3+CbBSqL64nswBg6nJ0GE788b0Ps/8=
-github.com/VictoriaMetrics/VictoriaMetrics v1.132.0 h1:7J6U3j4WvjyEez+Ibb2bAYBbM+znWWoSCRclrrkPrhg=
-github.com/VictoriaMetrics/VictoriaMetrics v1.132.0/go.mod h1:NpJOi7BbuSKVHPioRwS3yhdL9irJrKMWB+GBwI7+iT8=
-github.com/VictoriaMetrics/easyproto v1.1.3 h1:gRSA3ZQs7n4+5I+SniDWD59jde1jVq4JmgQ9HUUyvk4=
-github.com/VictoriaMetrics/easyproto v1.1.3/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
-github.com/VictoriaMetrics/metrics v1.40.2 h1:OVSjKcQEx6JAwGeu8/KQm9Su5qJ72TMEW4xYn5vw3Ac=
-github.com/VictoriaMetrics/metrics v1.40.2/go.mod h1:XE4uudAAIRaJE614Tl5HMrtoEU6+GDZO4QTnNSsZRuA=
-github.com/VictoriaMetrics/metricsql v0.84.8 h1:5JXrvPJiYkYNqJVT7+hMZmpAwRHd3txBdlVIw4rJ1VM=
-github.com/VictoriaMetrics/metricsql v0.84.8/go.mod h1:d4EisFO6ONP/HIGDYTAtwrejJBBeKGQYiRl095bS4QQ=
-github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU=
-github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
-github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY=
-github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8=
-github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU=
-github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y=
-github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk=
-github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo=
-github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ=
-github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ=
-github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
-github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
+github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0 h1:2x1Tszv41PnCdSMumEtejz/On1RQ45kHQ+hhKT53sOk=
+github.com/VictoriaMetrics/VictoriaLogs v1.50.1-0.20260415124154-6b7a6357aec0/go.mod h1:fQtmzaSUL+HJmHozeAKmnTJTOMBT+vBccv/VWQEwhUQ=
+github.com/VictoriaMetrics/VictoriaMetrics v1.141.0 h1:PK9dU7XZUtxcUfSuRRsZNzFOnWqrolALqqPrzBQTs6A=
+github.com/VictoriaMetrics/VictoriaMetrics v1.141.0/go.mod h1:OkD7Jbu5N6mSuQTAw/1WrvRdkkUYWNaB/jdQ8H439RE=
+github.com/VictoriaMetrics/easyproto v1.2.0 h1:FJT9uNXA2isppFuJErbLqD306KoFlehl7Wn2dg/6oIE=
+github.com/VictoriaMetrics/easyproto v1.2.0/go.mod h1:QlGlzaJnDfFd8Lk6Ci/fuLxfTo3/GThPs2KH23mv710=
+github.com/VictoriaMetrics/metrics v1.43.2 h1:+8pIQEGwchKS5CYFyvv3LKvNXGi7baZ9hmIV4RHqibY=
+github.com/VictoriaMetrics/metrics v1.43.2/go.mod h1:xDM82ULLYCYdFRgQ2JBxi8Uf1+8En1So9YUwlGTOqTc=
+github.com/VictoriaMetrics/metricsql v0.87.0 h1:Koxh3GkB/Z0f3O0bEChVFxiE4YZoxYyn5TzmGJfSfaw=
+github.com/VictoriaMetrics/metricsql v0.87.0/go.mod h1:d4EisFO6ONP/HIGDYTAtwrejJBBeKGQYiRl095bS4QQ=
+github.com/aws/aws-sdk-go-v2 v1.41.5 h1:dj5kopbwUsVUVFgO4Fi5BIT3t4WyqIDjGKCangnV/yY=
+github.com/aws/aws-sdk-go-v2 v1.41.5/go.mod h1:mwsPRE8ceUUpiTgF7QmQIJ7lgsKUPQOUl3o72QBrE1o=
+github.com/aws/aws-sdk-go-v2/config v1.32.14 h1:opVIRo/ZbbI8OIqSOKmpFaY7IwfFUOCCXBsUpJOwDdI=
+github.com/aws/aws-sdk-go-v2/config v1.32.14/go.mod h1:U4/V0uKxh0Tl5sxmCBZ3AecYny4UNlVmObYjKuuaiOo=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.14 h1:n+UcGWAIZHkXzYt87uMFBv/l8THYELoX6gVcUvgl6fI=
+github.com/aws/aws-sdk-go-v2/credentials v1.19.14/go.mod h1:cJKuyWB59Mqi0jM3nFYQRmnHVQIcgoxjEMAbLkpr62w=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21 h1:NUS3K4BTDArQqNu2ih7yeDLaS3bmHD0YndtA6UP884g=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.21/go.mod h1:YWNWJQNjKigKY1RHVJCuupeWDrrHjRqHm0N9rdrWzYI=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21 h1:Rgg6wvjjtX8bNHcvi9OnXWwcE0a2vGpbwmtICOsvcf4=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.21/go.mod h1:A/kJFst/nm//cyqonihbdpQZwiUhhzpqTsdbhDdRF9c=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21 h1:PEgGVtPoB6NTpPrBgqSE5hE/o47Ij9qk/SEZFbUOe9A=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.21/go.mod h1:p+hz+PRAYlY3zcpJhPwXlLC4C+kqn70WIHwnzAfs6ps=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6 h1:qYQ4pzQ2Oz6WpQ8T3HvGHnZydA72MnLuFK9tJwmrbHw=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.6/go.mod h1:O3h0IK87yXci+kg6flUKzJnWeziQUKciKrLjcatSNcY=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7 h1:5EniKhLZe4xzL7a+fU3C2tfUN4nWIqlLesfrjkuPFTY=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.7/go.mod h1:x0nZssQ3qZSnIcePWLvcoFisRXJzcTVvYpAAdYX8+GI=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21 h1:c31//R3xgIJMSC8S6hEVq+38DcvUlgFY0FM6mSI5oto=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.21/go.mod h1:r6+pf23ouCB718FUxaqzZdbpYFyDtehyZcmP5KL9FkA=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.9 h1:QKZH0S178gCmFEgst8hN0mCX1KxLgHBKKY/CLqwP8lg=
+github.com/aws/aws-sdk-go-v2/service/signin v1.0.9/go.mod h1:7yuQJoT+OoH8aqIxw9vwF+8KpvLZ8AWmvmUWHsGQZvI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.15 h1:lFd1+ZSEYJZYvv9d6kXzhkZu07si3f+GQ1AaYwa2LUM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.30.15/go.mod h1:WSvS1NLr7JaPunCXqpJnWk1Bjo7IxzZXrZi1QQCkuqM=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19 h1:dzztQ1YmfPrxdrOiuZRMF6fuOwWlWpD2StNLTceKpys=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.19/go.mod h1:YO8TrYtFdl5w/4vmjL8zaBSsiNp3w0L1FfKVKenZT7w=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.10 h1:p8ogvvLugcR/zLBXTXrTkj0RYBUdErbMnAFFp12Lm/U=
+github.com/aws/aws-sdk-go-v2/service/sts v1.41.10/go.mod h1:60dv0eZJfeVXfbT1tFJinbHrDfSJ2GZl4Q//OSSNAVw=
+github.com/aws/smithy-go v1.24.3 h1:XgOAaUgx+HhVBoP4v8n6HCQoTRDhoMghKqw4LNHsDNg=
+github.com/aws/smithy-go v1.24.3/go.mod h1:YE2RhdIuDbA5E5bTdciG9KrW3+TiEONeUWCqxX9i1Fc=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
-github.com/bmatcuk/doublestar/v4 v4.9.1 h1:X8jg9rRZmJd4yRy7ZeNDRnM+T3ZfHv15JiBJ/avrEXE=
-github.com/bmatcuk/doublestar/v4 v4.9.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs=
+github.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
+github.com/caarlos0/env/v11 v11.4.1 h1:fYwH0sWEsBSMPG7t4e/PEfTFzrWrpjyygXyUnWiSwEw=
+github.com/caarlos0/env/v11 v11.4.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -58,8 +58,8 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
-github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
-github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/fxamacker/cbor/v2 v2.9.1 h1:2rWm8B193Ll4VdjsJY28jxs70IdDsHRWgQYAI80+rMQ=
+github.com/fxamacker/cbor/v2 v2.9.1/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs=
github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo=
github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M=
@@ -110,8 +110,8 @@ github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
-github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
+github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
+github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
@@ -135,8 +135,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
-github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
+github.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=
+github.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
@@ -178,14 +178,14 @@ github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNw
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
-github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
-github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
-github.com/prometheus/sigv4 v0.4.0 h1:s8oiq+S4ORkpjftnBvzObLrz5Hw49YwEhumNGBdfg4M=
-github.com/prometheus/sigv4 v0.4.0/go.mod h1:D6dQeKEsDyUWzoNGjby5HgXshiOAbsz7vuApHTCmOxA=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
-github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M=
-github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/prometheus/procfs v0.20.1 h1:XwbrGOIplXW/AU3YhIhLODXMJYyC1isLFfYCsTEycfc=
+github.com/prometheus/procfs v0.20.1/go.mod h1:o9EMBZGRyvDrSPH1RqdxhojkuXstoe4UlK79eF5TGGo=
+github.com/prometheus/sigv4 v0.4.1 h1:EIc3j+8NBea9u1iV6O5ZAN8uvPq2xOIUPcqCTivHuXs=
+github.com/prometheus/sigv4 v0.4.1/go.mod h1:eu+ZbRvsc5TPiHwqh77OWuCnWK73IdkETYY46P4dXOU=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
+github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
@@ -202,8 +202,8 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fastjson v1.6.5 h1:LLabX0wszE1JDH9+IxLK6b+tb4B7gNdTEFTRasd0Ejw=
-github.com/valyala/fastjson v1.6.5/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
+github.com/valyala/fastjson v1.6.10 h1:/yjJg8jaVQdYR3arGxPE2X5z89xrlhS0eGXdv+ADTh4=
+github.com/valyala/fastjson v1.6.10/go.mod h1:e6FubmQouUNP73jtMLmcbxS6ydWIpOfhz34TSfO3JaE=
github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8=
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
@@ -222,10 +222,10 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
-go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
-go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
-go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
+go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ=
+go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -233,40 +233,40 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
-golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
+golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI=
+golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
-golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
-golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
-golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=
+golang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=
+golang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=
+golang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
-golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=
+golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
-golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
-golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
+golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
+golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
+golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
+golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
-golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
-golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
-golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
+golang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=
+golang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=
+golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U=
+golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
-golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
+golang.org/x/tools v0.43.0 h1:12BdW9CeB3Z+J/I/wj34VMl8X+fEXBxVR90JeMX5E7s=
+golang.org/x/tools v0.43.0/go.mod h1:uHkMso649BX2cZK6+RpuIPXS3ho2hZo4FVwfoy1vIk0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -286,22 +286,22 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM=
-k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk=
+k8s.io/api v0.35.3 h1:pA2fiBc6+N9PDf7SAiluKGEBuScsTzd2uYBkA5RzNWQ=
+k8s.io/api v0.35.3/go.mod h1:9Y9tkBcFwKNq2sxwZTQh1Njh9qHl81D0As56tu42GA4=
k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI=
k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc=
-k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4=
-k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/apimachinery v0.35.3 h1:MeaUwQCV3tjKP4bcwWGgZ/cp/vpsRnQzqO6J6tJyoF8=
+k8s.io/apimachinery v0.35.3/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/autoscaler/vertical-pod-autoscaler v1.5.1 h1:LlVtM3IKqIVHz1ZXC3ahe/mAtDWb7Eob0tyTzqFULqg=
k8s.io/autoscaler/vertical-pod-autoscaler v1.5.1/go.mod h1:znhUnV0Yn+CkZu3TZ2HVqd8GFRMkPj/CXszX1gdBjTU=
-k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY=
-k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8=
-k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
-k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3 h1:liMHz39T5dJO1aOKHLvwaCjDbf07wVh6yaUlTpunnkE=
-k8s.io/kube-openapi v0.0.0-20250814151709-d7b6acb124c3/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
-k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
-k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg=
+k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c=
+k8s.io/klog/v2 v2.140.0 h1:Tf+J3AH7xnUzZyVVXhTgGhEKnFqye14aadWv7bzXdzc=
+k8s.io/klog/v2 v2.140.0/go.mod h1:o+/RWfJ6PwpnFn7OyAG3QnO47BFsymfEfrz6XyYSSp0=
+k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f h1:4Qiq0YAoQATdgmHALJWz9rJ4fj20pB3xebpB4CFNhYM=
+k8s.io/kube-openapi v0.0.0-20260414162039-ec9c827d403f/go.mod h1:uGBT7iTA6c6MvqUvSXIaYZo9ukscABYi2btjhvgKGZ0=
+k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 h1:kBawHLSnx/mYHmRnNUf9d4CpjREbeZuxoSGOX/J+aYM=
+k8s.io/utils v0.0.0-20260319190234-28399d86e0b5/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
sigs.k8s.io/controller-runtime v0.22.2 h1:cK2l8BGWsSWkXz09tcS4rJh95iOLney5eawcK5A33r4=
sigs.k8s.io/controller-runtime v0.22.2/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8=
sigs.k8s.io/gateway-api v1.3.0 h1:q6okN+/UKDATola4JY7zXzx40WO4VISk7i9DIfOvr9M=
diff --git a/internal/config/config.go b/internal/config/config.go
index b690cb86d..7478f1205 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -35,9 +35,9 @@ var (
initConf sync.Once
defaultEnvs = map[string]string{
- "VM_METRICS_VERSION": "v1.136.0",
- "VM_LOGS_VERSION": "v1.47.0",
- "VM_ANOMALY_VERSION": "v1.28.5",
+ "VM_METRICS_VERSION": "v1.143.0",
+ "VM_LOGS_VERSION": "v1.50.0",
+ "VM_ANOMALY_VERSION": "v1.29.3",
"VM_TRACES_VERSION": "v0.7.0",
"VM_OPERATOR_VERSION": getVersion("v0.68.3"),
}
diff --git a/internal/controller/operator/controllers.go b/internal/controller/operator/controllers.go
index 4cf1c925a..d5c8bb01d 100644
--- a/internal/controller/operator/controllers.go
+++ b/internal/controller/operator/controllers.go
@@ -98,6 +98,11 @@ func (pe *parsingError) Error() string {
pe.controller, pe.origin)
}
+func isParsingError(err error) bool {
+ var pe *parsingError
+ return errors.As(err, &pe)
+}
+
// getError could usually occur at following cases:
// - not enough k8s permissions
// - object was deleted and due to race condition queue by operator cache
@@ -116,18 +121,41 @@ func (ge *getError) Error() string {
return fmt.Sprintf("get_object error for controller=%q object_name=%q at namespace=%q, origin=%q", ge.controller, ge.requestObject.Name, ge.requestObject.Namespace, ge.origin)
}
-func handleReconcileErr[T client.Object, ST reconcile.StatusWithMetadata[STC], STC any](
+func handleReconcileErrWithStatus[T client.Object, ST reconcile.StatusWithMetadata[STC], STC any](
ctx context.Context,
rclient client.Client,
object reconcile.ObjectWithDeepCopyAndStatus[T, ST, STC],
originResult ctrl.Result,
err error,
) (ctrl.Result, error) {
+ result, err := handleReconcileErr(ctx, rclient, object, originResult, err)
+ if isParsingError(err) {
+ if err := reconcile.UpdateObjectStatus(ctx, rclient, object, vmv1beta1.UpdateStatusFailed, err); err != nil {
+ logger.WithContext(ctx).Error(err, "failed to update status with parsing error")
+ }
+ }
+ return result, err
+}
+
+func handleReconcileErr(ctx context.Context, rclient client.Client, object client.Object, originResult ctrl.Result, err error) (ctrl.Result, error) {
if err == nil {
return originResult, nil
}
- var ge *getError
- var pe *parsingError
+
+ switch e := err.(type) {
+ case *getError:
+ deregisterObjectByCollector(e.requestObject.Name, e.requestObject.Namespace, e.controller)
+ getObjectsErrorsTotal.WithLabelValues(e.controller, e.requestObject.String()).Inc()
+ if k8serrors.IsNotFound(err) {
+ return originResult, nil
+ }
+ case *parsingError:
+ if object != nil && !reflect.ValueOf(object).IsNil() {
+ namespacedName := fmt.Sprintf("%s/%s", object.GetNamespace(), object.GetName())
+ parseObjectErrorsTotal.WithLabelValues(e.controller, namespacedName).Inc()
+ }
+ }
+
switch {
case errors.Is(err, context.Canceled):
contextCancelErrorsTotal.Inc()
@@ -135,29 +163,12 @@ func handleReconcileErr[T client.Object, ST reconcile.StatusWithMetadata[STC], S
originResult.RequeueAfter = time.Second * 5
}
return originResult, nil
- case errors.As(err, &pe):
- namespacedName := "unknown"
- if object != nil && !reflect.ValueOf(object).IsNil() {
- namespacedName = fmt.Sprintf("%s/%s", object.GetNamespace(), object.GetName())
- if err := reconcile.UpdateObjectStatus(ctx, rclient, object, vmv1beta1.UpdateStatusFailed, err); err != nil {
- logger.WithContext(ctx).Error(err, "failed to update status with parsing error")
- }
- }
- parseObjectErrorsTotal.WithLabelValues(pe.controller, namespacedName).Inc()
- case errors.As(err, &ge):
- deregisterObjectByCollector(ge.requestObject.Name, ge.requestObject.Namespace, ge.controller)
- getObjectsErrorsTotal.WithLabelValues(ge.controller, ge.requestObject.String()).Inc()
- if k8serrors.IsNotFound(err) {
- return originResult, nil
- }
case k8serrors.IsConflict(err):
- controller := "unknown"
- namespacedName := "unknown"
if object != nil && !reflect.ValueOf(object).IsNil() && object.GetNamespace() != "" {
- controller = object.GetObjectKind().GroupVersionKind().GroupKind().Kind
- namespacedName = fmt.Sprintf("%s/%s", object.GetNamespace(), object.GetName())
+ controller := object.GetObjectKind().GroupVersionKind().GroupKind().Kind
+ namespacedName := fmt.Sprintf("%s/%s", object.GetNamespace(), object.GetName())
+ conflictErrorsTotal.WithLabelValues(controller, namespacedName).Inc()
}
- conflictErrorsTotal.WithLabelValues(controller, namespacedName).Inc()
return ctrl.Result{RequeueAfter: time.Second * 5}, nil
}
if object != nil && !reflect.ValueOf(object).IsNil() && object.GetNamespace() != "" {
@@ -344,12 +355,11 @@ func reconcileAndTrackStatus[T client.Object, ST reconcile.StatusWithMetadata[ST
if err != nil {
// do not change status on conflict to failed
// it should be retried on the next loop
+ resultStatus = vmv1beta1.UpdateStatusFailed
if reconcile.IsRetryable(err) {
resultStatus = vmv1beta1.UpdateStatusExpanding
- } else {
- resultStatus = vmv1beta1.UpdateStatusFailed
- resultErr = err
}
+ resultErr = err
return
}
if specChanged {
diff --git a/internal/controller/operator/controllers_test.go b/internal/controller/operator/controllers_test.go
index ecfa434ef..a0ded524e 100644
--- a/internal/controller/operator/controllers_test.go
+++ b/internal/controller/operator/controllers_test.go
@@ -319,6 +319,80 @@ func TestIsSelectorsMatchesTargetCRD(t *testing.T) {
})
}
+func TestHandleReconcileErrWithStatus(t *testing.T) {
+ type opts struct {
+ ctx context.Context
+ err error
+ origin ctrl.Result
+ object *vmv1beta1.VMCluster
+ wantResult ctrl.Result
+ wantErr error
+ wantStatus vmv1beta1.UpdateStatus
+ }
+
+ f := func(o opts) {
+ t.Helper()
+ if o.ctx == nil {
+ o.ctx = context.Background()
+ }
+ var predefined []runtime.Object
+ if o.object != nil {
+ predefined = append(predefined, o.object)
+ }
+ fclient := k8stools.GetTestClientWithObjects(predefined)
+ got, err := handleReconcileErrWithStatus(o.ctx, fclient, o.object, o.origin, o.err)
+ assert.Equal(t, o.wantErr, err)
+ assert.Equal(t, o.wantResult, got)
+ if o.wantStatus != "" && o.object != nil {
+ updated := &vmv1beta1.VMCluster{}
+ assert.NoError(t, fclient.Get(o.ctx, client.ObjectKeyFromObject(o.object), updated))
+ assert.Equal(t, o.wantStatus, updated.Status.UpdateStatus)
+ }
+ }
+
+ // nil error
+ f(opts{
+ err: nil,
+ object: &vmv1beta1.VMCluster{},
+ origin: ctrl.Result{RequeueAfter: 10},
+ wantResult: ctrl.Result{RequeueAfter: 10},
+ wantErr: nil,
+ })
+
+ // parsingError
+ f(opts{
+ err: &parsingError{origin: "bad field value", controller: "vmcluster"},
+ object: &vmv1beta1.VMCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cluster",
+ Namespace: "default",
+ },
+ },
+ origin: ctrl.Result{},
+ wantResult: ctrl.Result{},
+ wantErr: &parsingError{origin: "bad field value", controller: "vmcluster"},
+ wantStatus: vmv1beta1.UpdateStatusFailed,
+ })
+
+ // context.Canceled sets RequeueAfter, no status update
+ f(opts{
+ err: context.Canceled,
+ object: &vmv1beta1.VMCluster{},
+ origin: ctrl.Result{},
+ wantResult: ctrl.Result{RequeueAfter: time.Second * 5},
+ wantErr: nil,
+ })
+
+ // transient error
+ f(opts{
+ err: fmt.Errorf("some transient error"),
+ object: &vmv1beta1.VMCluster{},
+ origin: ctrl.Result{},
+ wantResult: ctrl.Result{},
+ wantErr: fmt.Errorf("some transient error"),
+ })
+}
+
func TestHandleReconcileErr(t *testing.T) {
type opts struct {
ctx context.Context
diff --git a/internal/controller/operator/factory/build/backup.go b/internal/controller/operator/factory/build/backup.go
index 872b51f88..29e9d57ad 100644
--- a/internal/controller/operator/factory/build/backup.go
+++ b/internal/controller/operator/factory/build/backup.go
@@ -54,17 +54,22 @@ func VMBackupManager(
fmt.Sprintf("-dst=%s", backupDst),
fmt.Sprintf("-snapshot.createURL=%s", snapshotCreateURL),
fmt.Sprintf("-snapshot.deleteURL=%s", snapshotDeleteURL),
- "-eula",
}
-
+ if cr.AcceptEULA {
+ args = append(args, "-eula")
+ }
if cr.LogLevel != nil {
args = append(args, fmt.Sprintf("-loggerLevel=%s", *cr.LogLevel))
}
if cr.LogFormat != nil {
args = append(args, fmt.Sprintf("-loggerFormat=%s", *cr.LogFormat))
}
- for arg, value := range cr.ExtraArgs {
- args = append(args, fmt.Sprintf("-%s=%s", arg, value))
+ for key, value := range cr.ExtraArgs {
+ arg := fmt.Sprintf("-%s", key)
+ if len(value) != 0 {
+ arg = fmt.Sprintf("%s=%s", arg, value)
+ }
+ args = append(args, arg)
}
if cr.Concurrency != nil {
args = append(args, fmt.Sprintf("-concurrency=%d", *cr.Concurrency))
@@ -171,15 +176,21 @@ func VMRestore(
fmt.Sprintf("-storageDataPath=%s", storagePath),
"-eula",
}
-
+ if cr.AcceptEULA {
+ args = append(args, "-eula")
+ }
if cr.LogLevel != nil {
args = append(args, fmt.Sprintf("-loggerLevel=%s", *cr.LogLevel))
}
if cr.LogFormat != nil {
args = append(args, fmt.Sprintf("-loggerFormat=%s", *cr.LogFormat))
}
- for arg, value := range cr.ExtraArgs {
- args = append(args, fmt.Sprintf("-%s=%s", arg, value))
+ for key, value := range cr.ExtraArgs {
+ arg := fmt.Sprintf("-%s", key)
+ if len(value) != 0 {
+ arg = fmt.Sprintf("%s=%s", arg, value)
+ }
+ args = append(args, arg)
}
if cr.Concurrency != nil {
args = append(args, fmt.Sprintf("-concurrency=%d", *cr.Concurrency))
diff --git a/internal/controller/operator/factory/build/container.go b/internal/controller/operator/factory/build/container.go
index 2f4afb8bf..582e53145 100644
--- a/internal/controller/operator/factory/build/container.go
+++ b/internal/controller/operator/factory/build/container.go
@@ -607,7 +607,7 @@ func AddSyslogTLSConfigToVolumes(dstVolumes []corev1.Volume, dstMounts []corev1.
return dstVolumes, dstMounts
}
-func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount, pvcSrc *corev1.PersistentVolumeClaimVolumeSource, storagePath, dataVolumeName string) ([]corev1.Volume, []corev1.VolumeMount, error) {
+func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount, pvcSrc *corev1.PersistentVolumeClaimVolumeSource, storagePath, dataVolumeName string, isStatefulSet bool) ([]corev1.Volume, []corev1.VolumeMount, error) {
foundMount := false
for _, volumeMount := range mounts {
rel, err := filepath.Rel(volumeMount.MountPath, storagePath)
@@ -643,6 +643,9 @@ func StorageVolumeMountsTo(volumes []corev1.Volume, mounts []corev1.VolumeMount,
return volumes, mounts, nil
}
}
+ if isStatefulSet {
+ return volumes, mounts, nil
+ }
var source corev1.VolumeSource
if pvcSrc != nil {
source.PersistentVolumeClaim = pvcSrc
diff --git a/internal/controller/operator/factory/build/container_test.go b/internal/controller/operator/factory/build/container_test.go
index 630cdabe0..9a1a9cfa4 100644
--- a/internal/controller/operator/factory/build/container_test.go
+++ b/internal/controller/operator/factory/build/container_test.go
@@ -356,10 +356,11 @@ func TestStorageVolumeMountsTo(t *testing.T) {
mounts []corev1.VolumeMount
expectedMounts []corev1.VolumeMount
wantErr bool
+ isStatefulSet bool
}
f := func(o opts) {
t.Helper()
- gotVolumes, gotMounts, err := StorageVolumeMountsTo(o.volumes, o.mounts, o.pvcSrc, o.storagePath, DataVolumeName)
+ gotVolumes, gotMounts, err := StorageVolumeMountsTo(o.volumes, o.mounts, o.pvcSrc, o.storagePath, DataVolumeName, o.isStatefulSet)
assert.Equal(t, o.expectedMounts, gotMounts)
assert.Equal(t, o.expectedVolumes, gotVolumes)
if o.wantErr {
@@ -542,6 +543,61 @@ func TestStorageVolumeMountsTo(t *testing.T) {
},
wantErr: true,
})
+
+ // isStatefulSet, add data volume
+ f(opts{
+ isStatefulSet: true,
+ storagePath: "/test",
+ expectedVolumes: nil,
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+
+ // isStatefulSet, mount PVC
+ f(opts{
+ isStatefulSet: true,
+ volumes: []corev1.Volume{{
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ },
+ }},
+ storagePath: "/test",
+ expectedVolumes: []corev1.Volume{{
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ },
+ }},
+ expectedMounts: []corev1.VolumeMount{{
+ Name: DataVolumeName,
+ MountPath: "/test",
+ }},
+ })
+
+ // isStatefulSet, existing volume + pvcSrc — error
+ f(opts{
+ isStatefulSet: true,
+ volumes: []corev1.Volume{{
+ Name: DataVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ AWSElasticBlockStore: &corev1.AWSElasticBlockStoreVolumeSource{
+ VolumeID: "aws-volume",
+ },
+ },
+ }},
+ storagePath: "/test",
+ pvcSrc: &corev1.PersistentVolumeClaimVolumeSource{
+ ClaimName: "test-claim",
+ },
+ wantErr: true,
+ })
}
func TestBuildConfigReloaderContainer(t *testing.T) {
diff --git a/internal/controller/operator/factory/reconcile/status.go b/internal/controller/operator/factory/reconcile/status.go
index df2670ea7..d269b96f8 100644
--- a/internal/controller/operator/factory/reconcile/status.go
+++ b/internal/controller/operator/factory/reconcile/status.go
@@ -13,6 +13,7 @@ import (
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
"sigs.k8s.io/controller-runtime/pkg/client"
vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
@@ -39,7 +40,7 @@ func StatusForChildObjects[T any, PT interface {
*T
objectWithStatus
}](ctx context.Context, rclient client.Client, parentObjectName string, childObjects []PT) error {
- var errors []string
+ var errs []error
n := strings.Split(parentObjectName, ".")
if len(n) != 3 {
@@ -63,14 +64,14 @@ func StatusForChildObjects[T any, PT interface {
} else {
currCound.Status = "False"
currCound.Message = st.CurrentSyncError
- errors = append(errors, fmt.Sprintf("parent=%s config=namespace/name=%s/%s error text: %s", parentObjectName, childObject.GetNamespace(), childObject.GetName(), st.CurrentSyncError))
+ errs = append(errs, fmt.Errorf("parent=%s config=namespace/name=%s/%s error text: %s", parentObjectName, childObject.GetNamespace(), childObject.GetName(), st.CurrentSyncError))
}
if err := updateChildStatusConditions[T](ctx, rclient, childObject, currCound); err != nil {
return err
}
}
- if len(errors) > 0 {
- logger.WithContext(ctx).Error(fmt.Errorf("%s have errors", parentObjectName), fmt.Sprintf("skip config generation for resources: %s", strings.Join(errors, ",")))
+ if aggErr := utilerrors.NewAggregate(errs); aggErr != nil {
+ logger.WithContext(ctx).Error(aggErr, fmt.Sprintf("%s skip config generation for resources", parentObjectName))
}
return nil
}
diff --git a/internal/controller/operator/factory/vlagent/vlagent.go b/internal/controller/operator/factory/vlagent/vlagent.go
index 95207f5c5..5f712f910 100644
--- a/internal/controller/operator/factory/vlagent/vlagent.go
+++ b/internal/controller/operator/factory/vlagent/vlagent.go
@@ -238,7 +238,9 @@ func newK8sApp(cr *vmv1.VLAgent) (client.Object, error) {
build.StatefulSetAddCommonParams(stsSpec, &cr.Spec.CommonAppsParams)
if cr.Spec.TmpDataPath == nil {
- cr.Spec.Storage.IntoSTSVolume(tmpDataVolumeName, &stsSpec.Spec)
+ if err := cr.Spec.Storage.IntoSTSVolume(tmpDataVolumeName, &stsSpec.Spec); err != nil {
+ return nil, err
+ }
}
stsSpec.Spec.VolumeClaimTemplates = append(stsSpec.Spec.VolumeClaimTemplates, cr.Spec.ClaimTemplates...)
return stsSpec, nil
@@ -268,7 +270,7 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
args = append(args, "-envflag.enable=true")
}
- var agentVolumeMounts []corev1.VolumeMount
+ var vmMounts []corev1.VolumeMount
var volumes []corev1.Volume
tmpDataPath := defaultTmpDataPath
if cr.Spec.K8sCollector.Enabled {
@@ -330,7 +332,7 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
},
},
})
- agentVolumeMounts = append(agentVolumeMounts, corev1.VolumeMount{
+ vmMounts = append(vmMounts, corev1.VolumeMount{
Name: logVolumeName,
MountPath: logVolumePath,
ReadOnly: true,
@@ -359,7 +361,7 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
}
if cr.Spec.TmpDataPath == nil {
- agentVolumeMounts = append(agentVolumeMounts,
+ vmMounts = append(vmMounts,
corev1.VolumeMount{
Name: tmpDataVolumeName,
MountPath: tmpDataPath,
@@ -377,14 +379,14 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
if cr.Spec.SyslogSpec != nil {
args = build.AddSyslogArgsTo(args, cr.Spec.SyslogSpec, tlsServerConfigMountPath)
- volumes, agentVolumeMounts = build.AddSyslogTLSConfigToVolumes(volumes, agentVolumeMounts, cr.Spec.SyslogSpec, tlsServerConfigMountPath)
+ volumes, vmMounts = build.AddSyslogTLSConfigToVolumes(volumes, vmMounts, cr.Spec.SyslogSpec, tlsServerConfigMountPath)
ports = build.AddSyslogPortsTo(ports, cr.Spec.SyslogSpec)
}
- volumes, agentVolumeMounts = build.LicenseVolumeTo(volumes, agentVolumeMounts, cr.Spec.License, vmv1beta1.SecretsDir)
+ volumes, vmMounts = build.LicenseVolumeTo(volumes, vmMounts, cr.Spec.License, vmv1beta1.SecretsDir)
args = build.LicenseArgsTo(args, cr.Spec.License, vmv1beta1.SecretsDir)
- agentVolumeMounts = append(agentVolumeMounts, cr.Spec.VolumeMounts...)
+ vmMounts = append(vmMounts, cr.Spec.VolumeMounts...)
volumes = append(volumes, cr.Spec.Volumes...)
for _, s := range cr.Spec.Secrets {
@@ -396,7 +398,7 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
},
},
})
- agentVolumeMounts = append(agentVolumeMounts, corev1.VolumeMount{
+ vmMounts = append(vmMounts, corev1.VolumeMount{
Name: k8stools.SanitizeVolumeName("secret-" + s),
ReadOnly: true,
MountPath: path.Join(vmv1beta1.SecretsDir, s),
@@ -419,9 +421,9 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
ReadOnly: true,
MountPath: path.Join(vmv1beta1.ConfigMapsDir, c),
}
- agentVolumeMounts = append(agentVolumeMounts, cvm)
+ vmMounts = append(vmMounts, cvm)
}
- volumes, agentVolumeMounts = addRemoteWriteAssetsToVolumes(volumes, agentVolumeMounts, cr)
+ volumes, vmMounts = addRemoteWriteAssetsToVolumes(volumes, vmMounts, cr)
args = build.AddExtraArgsOverrideDefaults(args, cr.Spec.ExtraArgs, "-")
sort.Strings(args)
@@ -433,7 +435,7 @@ func newPodSpec(cr *vmv1.VLAgent) (*corev1.PodSpec, error) {
Args: args,
Env: envs,
EnvFrom: cr.Spec.ExtraEnvsFrom,
- VolumeMounts: agentVolumeMounts,
+ VolumeMounts: vmMounts,
Resources: cr.Spec.Resources,
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
}
diff --git a/internal/controller/operator/factory/vlcluster/vlinsert.go b/internal/controller/operator/factory/vlcluster/vlinsert.go
index 7a9a04887..bae883096 100644
--- a/internal/controller/operator/factory/vlcluster/vlinsert.go
+++ b/internal/controller/operator/factory/vlcluster/vlinsert.go
@@ -318,11 +318,15 @@ func createOrUpdateVLInsertService(ctx context.Context, rclient client.Client, c
var prevSvc, prevAdditionalSvc *corev1.Service
if prevCR != nil && prevCR.Spec.VLInsert != nil {
prevSvc = buildVLInsertService(prevCR)
- prevAdditionalSvc = build.AdditionalServiceFromDefault(prevSvc, prevCR.Spec.VLInsert.ServiceSpec)
+ prevAdditionalSvcBase := *prevSvc
+ prevAdditionalSvcBase.Name = prevCR.PrefixedName(vmv1beta1.ClusterComponentInsert)
+ prevAdditionalSvc = build.AdditionalServiceFromDefault(&prevAdditionalSvcBase, prevCR.Spec.VLInsert.ServiceSpec)
}
owner := cr.AsOwner()
if err := cr.Spec.VLInsert.ServiceSpec.IsSomeAndThen(func(s *vmv1beta1.AdditionalServiceSpec) error {
- additionalSvc := build.AdditionalServiceFromDefault(svc, s)
+ additionalSvcBase := *svc
+ additionalSvcBase.Name = cr.PrefixedName(vmv1beta1.ClusterComponentInsert)
+ additionalSvc := build.AdditionalServiceFromDefault(&additionalSvcBase, s)
if additionalSvc.Name == svc.Name {
return fmt.Errorf("VLInsert additional service name: %q cannot be the same as crd.prefixedname: %q", additionalSvc.Name, svc.Name)
}
diff --git a/internal/controller/operator/factory/vlcluster/vlselect.go b/internal/controller/operator/factory/vlcluster/vlselect.go
index f7fd81211..1a5cab8fa 100644
--- a/internal/controller/operator/factory/vlcluster/vlselect.go
+++ b/internal/controller/operator/factory/vlcluster/vlselect.go
@@ -113,12 +113,16 @@ func createOrUpdateVLSelectService(ctx context.Context, rclient client.Client, c
var prevSvc, prevAdditionalSvc *corev1.Service
if prevCR != nil && prevCR.Spec.VLSelect != nil {
prevSvc = buildVLSelectService(prevCR)
- prevAdditionalSvc = build.AdditionalServiceFromDefault(prevSvc, prevCR.Spec.VLSelect.ServiceSpec)
+ prevAdditionalSvcBase := *prevSvc
+ prevAdditionalSvcBase.Name = prevCR.PrefixedName(vmv1beta1.ClusterComponentSelect)
+ prevAdditionalSvc = build.AdditionalServiceFromDefault(&prevAdditionalSvcBase, prevCR.Spec.VLSelect.ServiceSpec)
}
svc := buildVLSelectService(cr)
owner := cr.AsOwner()
if err := cr.Spec.VLSelect.ServiceSpec.IsSomeAndThen(func(s *vmv1beta1.AdditionalServiceSpec) error {
- additionalSvc := build.AdditionalServiceFromDefault(svc, s)
+ additionalSvcBase := *svc
+ additionalSvcBase.Name = cr.PrefixedName(vmv1beta1.ClusterComponentSelect)
+ additionalSvc := build.AdditionalServiceFromDefault(&additionalSvcBase, s)
if additionalSvc.Name == svc.Name {
return fmt.Errorf("VLSelect additional service name: %q cannot be the same as crd.prefixedname: %q", additionalSvc.Name, svc.Name)
}
diff --git a/internal/controller/operator/factory/vlcluster/vlstorage.go b/internal/controller/operator/factory/vlcluster/vlstorage.go
index 01742e7ca..2a2b491b4 100644
--- a/internal/controller/operator/factory/vlcluster/vlstorage.go
+++ b/internal/controller/operator/factory/vlcluster/vlstorage.go
@@ -210,7 +210,9 @@ func buildVLStorageSTSSpec(cr *vmv1.VLCluster) (*appsv1.StatefulSet, error) {
}
build.StatefulSetAddCommonParams(stsSpec, &cr.Spec.VLStorage.CommonAppsParams)
storageSpec := cr.Spec.VLStorage.Storage
- storageSpec.IntoSTSVolume(cr.Spec.VLStorage.GetStorageVolumeName(), &stsSpec.Spec)
+ if err := storageSpec.IntoSTSVolume(cr.Spec.VLStorage.GetStorageVolumeName(), &stsSpec.Spec); err != nil {
+ return nil, err
+ }
stsSpec.Spec.VolumeClaimTemplates = append(stsSpec.Spec.VolumeClaimTemplates, cr.Spec.VLStorage.ClaimTemplates...)
return stsSpec, nil
diff --git a/internal/controller/operator/factory/vlcluster/vmauth_lb.go b/internal/controller/operator/factory/vlcluster/vmauth_lb.go
index 117230a39..3ab77e553 100644
--- a/internal/controller/operator/factory/vlcluster/vmauth_lb.go
+++ b/internal/controller/operator/factory/vlcluster/vmauth_lb.go
@@ -292,7 +292,7 @@ func createOrUpdateLBProxyService(ctx context.Context, rclient client.Client, cr
b.SetFinalLabels(labels.Merge(b.FinalLabels(), map[string]string{
vmv1beta1.VMAuthLBServiceProxyTargetLabel: string(kind),
}))
- b.SetSelectorLabels(cr.SelectorLabels(vmv1beta1.ClusterComponentBalancer))
+ b.SetSelectorLabels(r.SelectorLabels(vmv1beta1.ClusterComponentBalancer))
return b
}
b := builder(cr)
diff --git a/internal/controller/operator/factory/vlsingle/vlsingle.go b/internal/controller/operator/factory/vlsingle/vlsingle.go
index 387ed92c1..d6dc4c74a 100644
--- a/internal/controller/operator/factory/vlsingle/vlsingle.go
+++ b/internal/controller/operator/factory/vlsingle/vlsingle.go
@@ -192,7 +192,7 @@ func makePodSpec(r *vmv1.VLSingle) (*corev1.PodTemplateSpec, error) {
ClaimName: r.PrefixedName(),
}
}
- volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName, false)
if err != nil {
return nil, err
}
diff --git a/internal/controller/operator/factory/vmagent/vmagent.go b/internal/controller/operator/factory/vmagent/vmagent.go
index 9d51bc4ee..4f01e2c8e 100644
--- a/internal/controller/operator/factory/vmagent/vmagent.go
+++ b/internal/controller/operator/factory/vmagent/vmagent.go
@@ -184,13 +184,14 @@ func createOrUpdateApp(ctx context.Context, rclient client.Client, cr, prevCR *v
shardCount := cr.GetShardCount()
prevShardCount := prevCR.GetShardCount()
- isUpscaling := false
+ isUpscaling := prevShardCount < shardCount
if prevCR.IsSharded() {
- if prevShardCount < shardCount {
- logger.WithContext(ctx).Info(fmt.Sprintf("VMAgent shard upscaling from=%d to=%d", prevShardCount, shardCount))
- isUpscaling = true
- } else {
- logger.WithContext(ctx).Info(fmt.Sprintf("VMAgent shard downscaling from=%d to=%d", prevShardCount, shardCount))
+ if prevShardCount != shardCount {
+ action := "downscaling"
+ if isUpscaling {
+ action = "upscaling"
+ }
+ logger.WithContext(ctx).Info(fmt.Sprintf("VMAgent shard %s from=%d to=%d", action, prevShardCount, shardCount))
}
}
@@ -426,7 +427,9 @@ func newK8sApp(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (client.Object, err
}
build.StatefulSetAddCommonParams(stsSpec, &cr.Spec.CommonAppsParams)
stsSpec.Spec.Template.Spec.Volumes = build.AddServiceAccountTokenVolume(stsSpec.Spec.Template.Spec.Volumes, &cr.Spec.CommonAppsParams)
- cr.Spec.StatefulStorage.IntoSTSVolume(persistentQueueMountName, &stsSpec.Spec)
+ if err := cr.Spec.StatefulStorage.IntoSTSVolume(persistentQueueMountName, &stsSpec.Spec); err != nil {
+ return nil, err
+ }
stsSpec.Spec.VolumeClaimTemplates = append(stsSpec.Spec.VolumeClaimTemplates, cr.Spec.ClaimTemplates...)
return stsSpec, nil
}
@@ -523,7 +526,6 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
ports = append(ports, corev1.ContainerPort{Name: "http", Protocol: "TCP", ContainerPort: intstr.Parse(cr.Spec.Port).IntVal})
ports = build.AppendInsertPorts(ports, cr.Spec.InsertPorts)
- var agentVolumeMounts []corev1.VolumeMount
var crMounts []corev1.VolumeMount
// mount data path any way, even if user changes its value
// we cannot rely on value of remoteWriteSettings.
@@ -531,27 +533,12 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
if cr.Spec.StatefulMode {
pqMountPath = persistentQueueSTSDir
}
- agentVolumeMounts = append(agentVolumeMounts,
- corev1.VolumeMount{
- Name: persistentQueueMountName,
- MountPath: pqMountPath,
- },
- )
- agentVolumeMounts = append(agentVolumeMounts, cr.Spec.VolumeMounts...)
- var volumes []corev1.Volume
- // in case for sts, we have to use persistentVolumeClaimTemplate instead
- if !cr.Spec.StatefulMode {
- volumes = append(volumes, corev1.Volume{
- Name: persistentQueueMountName,
- VolumeSource: corev1.VolumeSource{
- EmptyDir: &corev1.EmptyDirVolumeSource{},
- },
- })
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(cr.Spec.Volumes, cr.Spec.VolumeMounts, nil, pqMountPath, persistentQueueMountName, cr.Spec.StatefulMode)
+ if err != nil {
+ return nil, fmt.Errorf("cannot configure persistent queue volume: %w", err)
}
- volumes = append(volumes, cr.Spec.Volumes...)
-
if !ptr.Deref(cr.Spec.IngestOnlyMode, false) {
args = append(args,
fmt.Sprintf("-promscrape.config=%s", path.Join(confOutDir, configFilename)))
@@ -588,23 +575,23 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
}
crMounts = append(crMounts, m)
m.ReadOnly = true
- agentVolumeMounts = append(agentVolumeMounts, m)
- agentVolumeMounts = append(agentVolumeMounts, corev1.VolumeMount{
+ vmMounts = append(vmMounts, m)
+ vmMounts = append(vmMounts, corev1.VolumeMount{
Name: string(build.TLSAssetsResourceKind),
MountPath: tlsAssetsDir,
ReadOnly: true,
})
- agentVolumeMounts = append(agentVolumeMounts, corev1.VolumeMount{
+ vmMounts = append(vmMounts, corev1.VolumeMount{
Name: string(build.SecretConfigResourceKind),
MountPath: confDir,
ReadOnly: true,
})
}
- mountsLen := len(agentVolumeMounts)
- volumes, agentVolumeMounts = build.StreamAggrVolumeTo(volumes, agentVolumeMounts, cr)
- volumes, agentVolumeMounts = build.RelabelVolumeTo(volumes, agentVolumeMounts, cr)
- crMounts = append(crMounts, agentVolumeMounts[mountsLen:]...)
+ mountsLen := len(vmMounts)
+ volumes, vmMounts = build.StreamAggrVolumeTo(volumes, vmMounts, cr)
+ volumes, vmMounts = build.RelabelVolumeTo(volumes, vmMounts, cr)
+ crMounts = append(crMounts, vmMounts[mountsLen:]...)
for _, s := range cr.Spec.Secrets {
volumes = append(volumes, corev1.Volume{
Name: k8stools.SanitizeVolumeName("secret-" + s),
@@ -614,7 +601,7 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
},
},
})
- agentVolumeMounts = append(agentVolumeMounts, corev1.VolumeMount{
+ vmMounts = append(vmMounts, corev1.VolumeMount{
Name: k8stools.SanitizeVolumeName("secret-" + s),
ReadOnly: true,
MountPath: path.Join(vmv1beta1.SecretsDir, s),
@@ -637,11 +624,11 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
ReadOnly: true,
MountPath: path.Join(vmv1beta1.ConfigMapsDir, c),
}
- agentVolumeMounts = append(agentVolumeMounts, cvm)
+ vmMounts = append(vmMounts, cvm)
crMounts = append(crMounts, cvm)
}
- volumes, agentVolumeMounts = build.LicenseVolumeTo(volumes, agentVolumeMounts, cr.Spec.License, vmv1beta1.SecretsDir)
+ volumes, vmMounts = build.LicenseVolumeTo(volumes, vmMounts, cr.Spec.License, vmv1beta1.SecretsDir)
args = build.LicenseArgsTo(args, cr.Spec.License, vmv1beta1.SecretsDir)
relabelKeys := []string{globalRelabelingName}
@@ -664,7 +651,7 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
Args: args,
Env: envs,
EnvFrom: cr.Spec.ExtraEnvsFrom,
- VolumeMounts: agentVolumeMounts,
+ VolumeMounts: vmMounts,
Resources: cr.Spec.Resources,
TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError,
}
@@ -691,7 +678,6 @@ func newPodSpec(cr *vmv1beta1.VMAgent, ac *build.AssetsCache) (*corev1.PodSpec,
configReloader := build.ConfigReloaderContainer(false, cr, crMounts, ss)
operatorContainers = append(operatorContainers, configReloader)
}
- var err error
ic, err = k8stools.MergePatchContainers(ic, cr.Spec.InitContainers)
if err != nil {
return nil, fmt.Errorf("cannot apply patch for initContainers: %w", err)
diff --git a/internal/controller/operator/factory/vmagent/vmagent_test.go b/internal/controller/operator/factory/vmagent/vmagent_test.go
index 66f4aa534..c4e9b4dd3 100644
--- a/internal/controller/operator/factory/vmagent/vmagent_test.go
+++ b/internal/controller/operator/factory/vmagent/vmagent_test.go
@@ -457,6 +457,72 @@ func TestCreateOrUpdate(t *testing.T) {
},
})
+ // generate vmagent daemonset with predefined volume for persistent queue data
+ f(opts{
+ cr: &vmv1beta1.VMAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "example-agent-with-existing-volume",
+ Namespace: "default",
+ },
+ Spec: vmv1beta1.VMAgentSpec{
+ RemoteWrite: []vmv1beta1.VMAgentRemoteWriteSpec{
+ {URL: "http://remote-write"},
+ },
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ Volumes: []corev1.Volume{{
+ Name: "persistent-queue-data",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/host/path/cache",
+ },
+ },
+ }},
+ },
+ DaemonSetMode: true,
+ },
+ },
+ validate: func(ctx context.Context, fclient client.Client, cr *vmv1beta1.VMAgent) {
+ var ds appsv1.DaemonSet
+ assert.NoError(t, fclient.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: cr.PrefixedName()}, &ds))
+ expected := []corev1.Volume{
+ {
+ Name: "persistent-queue-data",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/host/path/cache",
+ },
+ },
+ },
+ {
+ Name: "tls-assets",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "tls-assets-vmagent-example-agent-with-existing-volume",
+ },
+ },
+ },
+ {
+ Name: "config-out",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: "config",
+ VolumeSource: corev1.VolumeSource{
+ Secret: &corev1.SecretVolumeSource{
+ SecretName: "vmagent-example-agent-with-existing-volume",
+ },
+ },
+ },
+ }
+ assert.Equal(t, ds.Spec.Template.Spec.Volumes, expected)
+ },
+ predefinedObjects: []runtime.Object{
+ k8stools.NewReadyDeployment("vmagent-example-agent", "default"),
+ },
+ })
+
// generate vmagent sharded statefulset with prevSpec
f(opts{
cr: &vmv1beta1.VMAgent{
diff --git a/internal/controller/operator/factory/vmalert/rules.go b/internal/controller/operator/factory/vmalert/rules.go
index 67630364a..44b915e5e 100644
--- a/internal/controller/operator/factory/vmalert/rules.go
+++ b/internal/controller/operator/factory/vmalert/rules.go
@@ -10,6 +10,7 @@ import (
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
@@ -228,14 +229,14 @@ func deduplicateRules(ctx context.Context, origin []*vmv1beta1.VMRule) []*vmv1be
// deduplicate rules across groups.
for _, vmRule := range origin {
for i, grp := range vmRule.Spec.Groups {
- uniqRules := make(map[uint64]struct{})
+ uniqRules := sets.New[uint64]()
rules := make([]vmv1beta1.Rule, 0, len(grp.Rules))
for _, rule := range grp.Rules {
ruleID := calculateRuleID(rule)
- if _, ok := uniqRules[ruleID]; ok {
+ if uniqRules.Has(ruleID) {
logger.WithContext(ctx).Info(fmt.Sprintf("duplicate rule=%q found at group=%q for vmrule=%q", rule.Expr, grp.Name, vmRule.Name))
} else {
- uniqRules[ruleID] = struct{}{}
+ uniqRules.Insert(ruleID)
rules = append(rules, rule)
}
}
diff --git a/internal/controller/operator/factory/vmalertmanager/config.go b/internal/controller/operator/factory/vmalertmanager/config.go
index b424316df..2abea3ea9 100644
--- a/internal/controller/operator/factory/vmalertmanager/config.go
+++ b/internal/controller/operator/factory/vmalertmanager/config.go
@@ -8,6 +8,7 @@ import (
"strings"
"gopkg.in/yaml.v2"
+ "k8s.io/apimachinery/pkg/util/sets"
vmv1beta1 "github.com/VictoriaMetrics/operator/api/operator/v1beta1"
"github.com/VictoriaMetrics/operator/internal/controller/operator/factory/build"
@@ -157,13 +158,13 @@ func addConfigTemplates(baseCfg []byte, templates []string) ([]byte, error) {
func buildGlobalTimeIntervals(cr *vmv1beta1.VMAlertmanagerConfig) ([]yaml.MapSlice, error) {
var r []yaml.MapSlice
- timeIntervalNameList := map[string]struct{}{}
+ timeIntervalNameList := sets.New[string]()
tis := cr.Spec.TimeIntervals
for _, mti := range tis {
- if _, ok := timeIntervalNameList[mti.Name]; ok {
+ if timeIntervalNameList.Has(mti.Name) {
return r, fmt.Errorf("got duplicate timeInterval name %s", mti.Name)
}
- timeIntervalNameList[mti.Name] = struct{}{}
+ timeIntervalNameList.Insert(mti.Name)
if len(mti.TimeIntervals) == 0 {
continue
}
diff --git a/internal/controller/operator/factory/vmalertmanager/statefulset.go b/internal/controller/operator/factory/vmalertmanager/statefulset.go
index f2118aaad..ec16fa39c 100644
--- a/internal/controller/operator/factory/vmalertmanager/statefulset.go
+++ b/internal/controller/operator/factory/vmalertmanager/statefulset.go
@@ -16,6 +16,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -74,7 +75,9 @@ func newStsForAlertManager(cr *vmv1beta1.VMAlertmanager) (*appsv1.StatefulSet, e
statefulset.Spec.PersistentVolumeClaimRetentionPolicy = cr.Spec.PersistentVolumeClaimRetentionPolicy
}
build.StatefulSetAddCommonParams(statefulset, &cr.Spec.CommonAppsParams)
- cr.Spec.Storage.IntoSTSVolume(cr.GetVolumeName(), &statefulset.Spec)
+ if err := cr.Spec.Storage.IntoSTSVolume(cr.GetVolumeName(), &statefulset.Spec); err != nil {
+ return nil, err
+ }
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, cr.Spec.Volumes...)
return statefulset, nil
@@ -331,13 +334,13 @@ func makeStatefulSetSpec(cr *vmv1beta1.VMAlertmanager) (*appsv1.StatefulSetSpec,
crMounts = append(crMounts, cmVolumeMount)
}
- volumeByName := make(map[string]struct{})
+ volumeByName := sets.New[string]()
for _, t := range cr.Spec.Templates {
// Deduplicate configmaps by name
- if _, ok := volumeByName[t.Name]; ok {
+ if volumeByName.Has(t.Name) {
continue
}
- volumeByName[t.Name] = struct{}{}
+ volumeByName.Insert(t.Name)
volumes = append(volumes, corev1.Volume{
Name: k8stools.SanitizeVolumeName("templates-" + t.Name),
VolumeSource: corev1.VolumeSource{
diff --git a/internal/controller/operator/factory/vmanomaly/statefulset.go b/internal/controller/operator/factory/vmanomaly/statefulset.go
index 63f6acac1..f279158b3 100644
--- a/internal/controller/operator/factory/vmanomaly/statefulset.go
+++ b/internal/controller/operator/factory/vmanomaly/statefulset.go
@@ -2,7 +2,6 @@ package vmanomaly
import (
"context"
- "errors"
"fmt"
"maps"
"sync"
@@ -12,6 +11,7 @@ import (
policyv1 "k8s.io/api/policy/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
+ utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -158,7 +158,9 @@ func newK8sApp(cr *vmv1.VMAnomaly, configHash string, ac *build.AssetsCache) (*a
}
build.StatefulSetAddCommonParams(app, &cr.Spec.CommonAppsParams)
app.Spec.Template.Spec.Volumes = append(app.Spec.Template.Spec.Volumes, cr.Spec.Volumes...)
- cr.Spec.Storage.IntoSTSVolume(cr.GetVolumeName(), &app.Spec)
+ if err := cr.Spec.Storage.IntoSTSVolume(cr.GetVolumeName(), &app.Spec); err != nil {
+ return nil, err
+ }
app.Spec.VolumeClaimTemplates = append(app.Spec.VolumeClaimTemplates, cr.Spec.ClaimTemplates...)
return app, nil
}
@@ -186,13 +188,14 @@ func createOrUpdateApp(ctx context.Context, rclient client.Client, cr, prevCR *v
shardCount := cr.GetShardCount()
prevShardCount := prevCR.GetShardCount()
- isUpscaling := false
+ isUpscaling := prevShardCount < shardCount
if prevCR.IsSharded() {
- if prevShardCount < shardCount {
- logger.WithContext(ctx).Info(fmt.Sprintf("%T shard upscaling from=%d to=%d", cr, prevShardCount, shardCount))
- isUpscaling = true
- } else {
- logger.WithContext(ctx).Info(fmt.Sprintf("%T shard downscaling from=%d to=%d", cr, prevShardCount, shardCount))
+ if prevShardCount != shardCount {
+ action := "downscaling"
+ if isUpscaling {
+ action = "upscaling"
+ }
+ logger.WithContext(ctx).Info(fmt.Sprintf("VMAnomaly shard %s from=%d to=%d", action, prevShardCount, shardCount))
}
}
@@ -263,8 +266,8 @@ func createOrUpdateApp(ctx context.Context, rclient client.Client, cr, prevCR *v
}
}
}
- if len(errs) > 0 {
- return errors.Join(errs...)
+ if err := utilerrors.NewAggregate(errs); err != nil {
+ return err
}
if err := finalize.RemoveOrphanedPDBs(ctx, rclient, cr, pdbToKeep, true); err != nil {
return err
diff --git a/internal/controller/operator/factory/vmcluster/vmcluster.go b/internal/controller/operator/factory/vmcluster/vmcluster.go
index 82bd12a53..c9cd88a14 100644
--- a/internal/controller/operator/factory/vmcluster/vmcluster.go
+++ b/internal/controller/operator/factory/vmcluster/vmcluster.go
@@ -218,11 +218,15 @@ func createOrUpdateVMSelectService(ctx context.Context, rclient client.Client, c
var prevSvc, prevAdditionalSvc *corev1.Service
if prevCR != nil && prevCR.Spec.VMSelect != nil {
prevSvc = buildVMSelectService(prevCR)
- prevAdditionalSvc = build.AdditionalServiceFromDefault(prevSvc, prevCR.Spec.VMSelect.ServiceSpec)
+ prevAdditionalSvcBase := *prevSvc
+ prevAdditionalSvcBase.Name = prevCR.PrefixedName(vmv1beta1.ClusterComponentSelect)
+ prevAdditionalSvc = build.AdditionalServiceFromDefault(&prevAdditionalSvcBase, prevCR.Spec.VMSelect.ServiceSpec)
}
owner := cr.AsOwner()
if err := cr.Spec.VMSelect.ServiceSpec.IsSomeAndThen(func(s *vmv1beta1.AdditionalServiceSpec) error {
- additionalSvc := build.AdditionalServiceFromDefault(svc, s)
+ additionalSvcBase := *svc
+ additionalSvcBase.Name = cr.PrefixedName(vmv1beta1.ClusterComponentSelect)
+ additionalSvc := build.AdditionalServiceFromDefault(&additionalSvcBase, s)
if additionalSvc.Name == svc.Name {
return fmt.Errorf("vmselect additional service name: %q cannot be the same as crd.prefixedname: %q", additionalSvc.Name, svc.Name)
}
@@ -275,7 +279,7 @@ func createOrUpdateLBProxyService(ctx context.Context, rclient client.Client, cr
b.SetFinalLabels(labels.Merge(b.FinalLabels(), map[string]string{
vmv1beta1.VMAuthLBServiceProxyTargetLabel: string(kind),
}))
- b.SetSelectorLabels(cr.SelectorLabels(vmv1beta1.ClusterComponentBalancer))
+ b.SetSelectorLabels(r.SelectorLabels(vmv1beta1.ClusterComponentBalancer))
return b
}
b := builder(cr)
@@ -361,11 +365,15 @@ func createOrUpdateVMInsertService(ctx context.Context, rclient client.Client, c
var prevSvc, prevAdditionalSvc *corev1.Service
if prevCR != nil && prevCR.Spec.VMInsert != nil {
prevSvc = buildVMInsertService(prevCR)
- prevAdditionalSvc = build.AdditionalServiceFromDefault(prevSvc, prevCR.Spec.VMInsert.ServiceSpec)
+ prevAdditionalSvcBase := *prevSvc
+ prevAdditionalSvcBase.Name = prevCR.PrefixedName(vmv1beta1.ClusterComponentInsert)
+ prevAdditionalSvc = build.AdditionalServiceFromDefault(&prevAdditionalSvcBase, prevCR.Spec.VMInsert.ServiceSpec)
}
owner := cr.AsOwner()
if err := cr.Spec.VMInsert.ServiceSpec.IsSomeAndThen(func(s *vmv1beta1.AdditionalServiceSpec) error {
- additionalSvc := build.AdditionalServiceFromDefault(svc, s)
+ additionalSvcBase := *svc
+ additionalSvcBase.Name = cr.PrefixedName(vmv1beta1.ClusterComponentInsert)
+ additionalSvc := build.AdditionalServiceFromDefault(&additionalSvcBase, s)
if additionalSvc.Name == svc.Name {
return fmt.Errorf("vminsert additional service name: %q cannot be the same as crd.prefixedname: %q", additionalSvc.Name, svc.Name)
}
@@ -533,7 +541,9 @@ func genVMSelectSpec(cr *vmv1beta1.VMCluster) (*appsv1.StatefulSet, error) {
}
build.StatefulSetAddCommonParams(stsSpec, &cr.Spec.VMSelect.CommonAppsParams)
if cr.Spec.VMSelect.CacheMountPath != "" {
- cr.Spec.VMSelect.StorageSpec.IntoSTSVolume(cr.Spec.VMSelect.GetCacheMountVolumeName(), &stsSpec.Spec)
+ if err := cr.Spec.VMSelect.StorageSpec.IntoSTSVolume(cr.Spec.VMSelect.GetCacheMountVolumeName(), &stsSpec.Spec); err != nil {
+ return nil, err
+ }
}
stsSpec.Spec.VolumeClaimTemplates = append(stsSpec.Spec.VolumeClaimTemplates, cr.Spec.VMSelect.ClaimTemplates...)
return stsSpec, nil
@@ -959,7 +969,9 @@ func buildVMStorageSpec(ctx context.Context, cr *vmv1beta1.VMCluster) (*appsv1.S
}
build.StatefulSetAddCommonParams(stsSpec, &cr.Spec.VMStorage.CommonAppsParams)
storageSpec := cr.Spec.VMStorage.Storage
- storageSpec.IntoSTSVolume(cr.Spec.VMStorage.GetStorageVolumeName(), &stsSpec.Spec)
+ if err := storageSpec.IntoSTSVolume(cr.Spec.VMStorage.GetStorageVolumeName(), &stsSpec.Spec); err != nil {
+ return nil, err
+ }
stsSpec.Spec.VolumeClaimTemplates = append(stsSpec.Spec.VolumeClaimTemplates, cr.Spec.VMStorage.ClaimTemplates...)
return stsSpec, nil
diff --git a/internal/controller/operator/factory/vmcluster/vmcluster_test.go b/internal/controller/operator/factory/vmcluster/vmcluster_test.go
index a7f84d6c5..2e8b3a942 100644
--- a/internal/controller/operator/factory/vmcluster/vmcluster_test.go
+++ b/internal/controller/operator/factory/vmcluster/vmcluster_test.go
@@ -2,15 +2,20 @@ package vmcluster
import (
"context"
+ "io"
+ "sort"
+ "strings"
"testing"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
+ appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -387,7 +392,16 @@ func TestCreateOrUpdate(t *testing.T) {
VMSelect: &vmv1beta1.VMSelect{
CommonAppsParams: vmv1beta1.CommonAppsParams{
ReplicaCount: ptr.To(int32(0)),
+ Volumes: []corev1.Volume{{
+ Name: "vmselect-cachedir",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/host/path/cache",
+ },
+ },
+ }},
},
+ CacheMountPath: "/cache",
VPA: &vmv1beta1.EmbeddedVPA{
UpdatePolicy: &vpav1.PodUpdatePolicy{
UpdateMode: ptr.To(vpav1.UpdateModeRecreate),
@@ -411,13 +425,17 @@ func TestCreateOrUpdate(t *testing.T) {
c.VPAAPIEnabled = true
},
validate: func(ctx context.Context, rclient client.Client, cr *vmv1beta1.VMCluster) {
- var got vpav1.VerticalPodAutoscaler
+ var vpaGot vpav1.VerticalPodAutoscaler
component := vmv1beta1.ClusterComponentSelect
- vpaName := cr.PrefixedName(component)
- assert.NoError(t, rclient.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: vpaName}, &got))
- expected := vpav1.VerticalPodAutoscaler{
+ selectName := cr.PrefixedName(component)
+ nsn := types.NamespacedName{
+ Namespace: cr.Namespace,
+ Name: selectName,
+ }
+ assert.NoError(t, rclient.Get(ctx, nsn, &vpaGot))
+ vpaExpected := vpav1.VerticalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
- Name: vpaName,
+ Name: selectName,
Namespace: cr.Namespace,
Labels: cr.FinalLabels(component),
ResourceVersion: "1",
@@ -425,7 +443,7 @@ func TestCreateOrUpdate(t *testing.T) {
},
Spec: vpav1.VerticalPodAutoscalerSpec{
TargetRef: &autoscalingv1.CrossVersionObjectReference{
- Name: vpaName,
+ Name: selectName,
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
@@ -443,7 +461,18 @@ func TestCreateOrUpdate(t *testing.T) {
},
},
}
- assert.Equal(t, got, expected)
+ assert.Equal(t, vpaGot, vpaExpected)
+ var stsSelectGot appsv1.StatefulSet
+ assert.NoError(t, rclient.Get(ctx, nsn, &stsSelectGot))
+ volumesSelectExpected := []corev1.Volume{{
+ Name: "vmselect-cachedir",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/host/path/cache",
+ },
+ },
+ }}
+ assert.Equal(t, stsSelectGot.Spec.Template.Spec.Volumes, volumesSelectExpected)
},
})
@@ -455,6 +484,14 @@ func TestCreateOrUpdate(t *testing.T) {
VMStorage: &vmv1beta1.VMStorage{
CommonAppsParams: vmv1beta1.CommonAppsParams{
ReplicaCount: ptr.To(int32(0)),
+ Volumes: []corev1.Volume{{
+ Name: "vmstorage-db",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/host/path/storage",
+ },
+ },
+ }},
},
VPA: &vmv1beta1.EmbeddedVPA{
UpdatePolicy: &vpav1.PodUpdatePolicy{
@@ -475,11 +512,15 @@ func TestCreateOrUpdate(t *testing.T) {
validate: func(ctx context.Context, rclient client.Client, cr *vmv1beta1.VMCluster) {
component := vmv1beta1.ClusterComponentStorage
var got vpav1.VerticalPodAutoscaler
- vpaName := cr.PrefixedName(component)
- assert.NoError(t, rclient.Get(ctx, types.NamespacedName{Namespace: cr.Namespace, Name: vpaName}, &got))
+ storageName := cr.PrefixedName(component)
+ nsn := types.NamespacedName{
+ Namespace: cr.Namespace,
+ Name: storageName,
+ }
+ assert.NoError(t, rclient.Get(ctx, nsn, &got))
expected := vpav1.VerticalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
- Name: vpaName,
+ Name: storageName,
Namespace: cr.Namespace,
Labels: cr.FinalLabels(component),
ResourceVersion: "1",
@@ -487,7 +528,7 @@ func TestCreateOrUpdate(t *testing.T) {
},
Spec: vpav1.VerticalPodAutoscalerSpec{
TargetRef: &autoscalingv1.CrossVersionObjectReference{
- Name: vpaName,
+ Name: storageName,
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
@@ -502,6 +543,18 @@ func TestCreateOrUpdate(t *testing.T) {
},
}
assert.Equal(t, got, expected)
+
+ var stsStorageGot appsv1.StatefulSet
+ assert.NoError(t, rclient.Get(ctx, nsn, &stsStorageGot))
+ volumesStorageExpected := []corev1.Volume{{
+ Name: "vmstorage-db",
+ VolumeSource: corev1.VolumeSource{
+ HostPath: &corev1.HostPathVolumeSource{
+ Path: "/host/path/storage",
+ },
+ },
+ }}
+ assert.Equal(t, stsStorageGot.Spec.Template.Spec.Volumes, volumesStorageExpected)
},
})
@@ -649,37 +702,35 @@ func TestCreateOrUpdate(t *testing.T) {
}
func TestCreatOrUpdateClusterServices(t *testing.T) {
- f := func(component vmv1beta1.ClusterComponent, cr *vmv1beta1.VMCluster, wantSvcYAML string, predefinedObjects ...runtime.Object) {
+ f := func(cr *vmv1beta1.VMCluster, wantSvcYAML string, predefinedObjects ...runtime.Object) {
t.Helper()
ctx := context.Background()
fclient := k8stools.GetTestClientWithObjects(predefinedObjects)
build.AddDefaults(fclient.Scheme())
fclient.Scheme().Default(cr)
-
- var builderF func(ctx context.Context, rclient client.Client, cr, prevCR *vmv1beta1.VMCluster) error
- var svc *corev1.Service
- switch component {
- case vmv1beta1.ClusterComponentInsert:
- builderF = createOrUpdateVMInsertService
- svc = buildVMInsertService(cr)
- case vmv1beta1.ClusterComponentStorage:
- builderF = createOrUpdateVMStorageService
- svc = buildVMStorageService(cr)
- case vmv1beta1.ClusterComponentSelect:
- builderF = createOrUpdateVMSelectService
- svc = buildVMSelectService(cr)
- default:
- t.Fatalf("BUG not expected component for test: %q", component)
+ assert.NoError(t, CreateOrUpdate(ctx, cr, fclient))
+ var ls corev1.ServiceList
+ selector := cr.SelectorLabels(vmv1beta1.ClusterComponentRoot)
+ delete(selector, "app.kubernetes.io/name")
+ assert.NoError(t, fclient.List(ctx, &ls, &client.ListOptions{LabelSelector: labels.SelectorFromSet(selector)}))
+ sort.Slice(ls.Items, func(i, j int) bool {
+ return strings.ToLower(ls.Items[i].Name) < strings.ToLower(ls.Items[j].Name)
+ })
+ decoder := yaml.NewDecoder(strings.NewReader(wantSvcYAML))
+ var wantServices []corev1.Service
+ for {
+ var wantService corev1.Service
+ err := decoder.Decode(&wantService)
+ if err == io.EOF {
+ break
+ }
+ assert.NoError(t, err)
+ wantServices = append(wantServices, wantService)
}
- assert.NoError(t, builderF(ctx, fclient, cr, nil))
- var actualService corev1.Service
- assert.NoError(t, fclient.Get(ctx, types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}, &actualService))
- var wantService corev1.Service
- assert.NoError(t, yaml.Unmarshal([]byte(wantSvcYAML), &wantService))
- assert.Equal(t, wantService, actualService)
+ assert.Equal(t, wantServices, ls.Items)
}
- f(vmv1beta1.ClusterComponentStorage, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
VMStorage: &vmv1beta1.VMStorage{},
@@ -696,8 +747,7 @@ objectmeta:
app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -727,7 +777,7 @@ spec:
publishnotreadyaddresses: true
`)
// with vmbackup and additional service ports
- f(vmv1beta1.ClusterComponentStorage, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
License: &vmv1beta1.License{
@@ -762,8 +812,7 @@ objectmeta:
app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -802,7 +851,7 @@ spec:
publishnotreadyaddresses: true
`)
- f(vmv1beta1.ClusterComponentSelect, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
VMStorage: &vmv1beta1.VMStorage{},
@@ -824,8 +873,7 @@ objectmeta:
app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -843,9 +891,49 @@ spec:
clusterip: None
type: ClusterIP
publishnotreadyaddresses: true
+---
+objectmeta:
+ name: vmstorage-test
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmstorage
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8482
+ targetport:
+ intval: 8482
+ - name: vminsert
+ protocol: TCP
+ port: 8400
+ targetport:
+ intval: 8400
+ - name: vmselect
+ protocol: TCP
+ port: 8401
+ targetport:
+ intval: 8401
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmstorage
+ managed-by: vm-operator
+ clusterip: None
+ type: ClusterIP
+ publishnotreadyaddresses: true
`)
// with native and extra service
- f(vmv1beta1.ClusterComponentSelect, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
VMStorage: &vmv1beta1.VMStorage{},
@@ -864,8 +952,7 @@ objectmeta:
app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -888,8 +975,82 @@ spec:
clusterip: None
type: ClusterIP
publishnotreadyaddresses: true
+---
+objectmeta:
+ name: vmselect-test-additional-service
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmselect
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ operator.victoriametrics.com/additional-service: managed
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8352
+ targetport:
+ intval: 8352
+ - name: clusternative
+ protocol: TCP
+ port: 8477
+ targetport:
+ intval: 8477
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmselect
+ managed-by: vm-operator
+ type: LoadBalancer
+---
+objectmeta:
+ name: vmstorage-test
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmstorage
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8482
+ targetport:
+ intval: 8482
+ - name: vminsert
+ protocol: TCP
+ port: 8400
+ targetport:
+ intval: 8400
+ - name: vmselect
+ protocol: TCP
+ port: 8401
+ targetport:
+ intval: 8401
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmstorage
+ managed-by: vm-operator
+ clusterip: None
+ type: ClusterIP
+ publishnotreadyaddresses: true
`)
- f(vmv1beta1.ClusterComponentInsert, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
VMInsert: &vmv1beta1.VMInsert{
@@ -910,8 +1071,7 @@ objectmeta:
app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -931,11 +1091,10 @@ spec:
app.kubernetes.io/instance: test
app.kubernetes.io/name: vminsert
managed-by: vm-operator
- clusterip: ""
type: ClusterIP
`)
// transit to headless
- f(vmv1beta1.ClusterComponentInsert, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
VMInsert: &vmv1beta1.VMInsert{
@@ -964,8 +1123,7 @@ objectmeta:
app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -1009,7 +1167,7 @@ spec:
},
})
// transit to loadbalancer
- f(vmv1beta1.ClusterComponentInsert, &vmv1beta1.VMCluster{
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
VMInsert: &vmv1beta1.VMInsert{
@@ -1024,7 +1182,6 @@ spec:
},
},
Spec: corev1.ServiceSpec{
- ClusterIP: "",
Type: "LoadBalancer",
LoadBalancerClass: ptr.To("service.k8s.aws/nlb"),
},
@@ -1049,8 +1206,7 @@ objectmeta:
annotations:
"service.beta.kubernetes.io/aws-load-balancer-type": "external"
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -1093,8 +1249,9 @@ spec:
},
},
})
- // insert with load-balanacer
- f(vmv1beta1.ClusterComponentInsert, &vmv1beta1.VMCluster{
+
+ // insert with load-balancer and additional service
+ f(&vmv1beta1.VMCluster{
ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
Spec: vmv1beta1.VMClusterSpec{
RequestsLoadBalancer: vmv1beta1.VMAuthLoadBalancer{
@@ -1102,7 +1259,6 @@ spec:
},
VMInsert: &vmv1beta1.VMInsert{
ServiceSpec: &vmv1beta1.AdditionalServiceSpec{
- UseAsDefault: true,
EmbeddedObjectMetadata: vmv1beta1.EmbeddedObjectMetadata{
Labels: map[string]string{
"app.kubernetes.io/instance": "incorrect-label",
@@ -1112,7 +1268,6 @@ spec:
},
},
Spec: corev1.ServiceSpec{
- ClusterIP: "",
Type: "LoadBalancer",
LoadBalancerClass: ptr.To("service.k8s.aws/nlb"),
},
@@ -1124,6 +1279,110 @@ spec:
},
},
}, `
+objectmeta:
+ name: vmclusterlb-test
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmclusterlb-vmauth-balancer
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ operator.victoriametrics.com/vmauthlb-proxy-name: vmauth
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8427
+ targetport:
+ intval: 8427
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmclusterlb-vmauth-balancer
+ managed-by: vm-operator
+ type: ClusterIP
+---
+objectmeta:
+ name: vminsert-test
+ namespace: default-1
+ resourceversion: "1000"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vminsert
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ operator.victoriametrics.com/vmauthlb-proxy-name: insert
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8480
+ targetport:
+ intval: 8427
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmclusterlb-vmauth-balancer
+ managed-by: vm-operator
+ clusterip: 10.0.0.5
+ type: ClusterIP
+ sessionaffinity: None
+ internaltrafficpolicy: Cluster
+---
+objectmeta:
+ name: vminsert-test-additional-service
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vminsert
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ operator.victoriametrics.com/additional-service: managed
+ operator.victoriametrics.com/vmauthlb-proxy-job-name: vminsert-test
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: external
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8480
+ targetport:
+ intval: 8480
+ - name: opentsdb-http
+ protocol: TCP
+ port: 8087
+ targetport:
+ intval: 8087
+ - name: clusternative
+ protocol: TCP
+ port: 8055
+ targetport:
+ intval: 8055
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vminsert
+ managed-by: vm-operator
+ type: LoadBalancer
+ loadbalancerclass: service.k8s.aws/nlb
+---
objectmeta:
name: vminsertinternal-test
namespace: default-1
@@ -1132,14 +1391,170 @@ objectmeta:
app.kubernetes.io/component: monitoring
app.kubernetes.io/instance: test
app.kubernetes.io/name: vminsert
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ operator.victoriametrics.com/vmauthlb-proxy-job-name: vminsert-test
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8480
+ targetport:
+ intval: 8480
+ - name: opentsdb-http
+ protocol: TCP
+ port: 8087
+ targetport:
+ intval: 8087
+ - name: clusternative
+ protocol: TCP
+ port: 8055
+ targetport:
+ intval: 8055
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vminsert
+ managed-by: vm-operator
+ clusterip: None
+ type: ClusterIP
+`, &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "vminsert-test",
+ Namespace: "default-1",
+ },
+ Spec: corev1.ServiceSpec{
+ Type: corev1.ServiceTypeClusterIP,
+ ClusterIP: "10.0.0.5",
+ Selector: map[string]string{
+ "app.kubernetes.io/component": "monitoring",
+ "app.kubernetes.io/instance": "test",
+ "app.kubernetes.io/name": "vminsert",
+ "managed-by": "vm-operator",
+ },
+ },
+ })
+
+ // insert with load-balancer and custom service spec
+ f(&vmv1beta1.VMCluster{
+ ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default-1"},
+ Spec: vmv1beta1.VMClusterSpec{
+ RequestsLoadBalancer: vmv1beta1.VMAuthLoadBalancer{
+ Enabled: true,
+ },
+ VMInsert: &vmv1beta1.VMInsert{
+ ServiceSpec: &vmv1beta1.AdditionalServiceSpec{
+ UseAsDefault: true,
+ EmbeddedObjectMetadata: vmv1beta1.EmbeddedObjectMetadata{
+ Labels: map[string]string{
+ "app.kubernetes.io/instance": "incorrect-label",
+ },
+ Annotations: map[string]string{
+ "service.beta.kubernetes.io/aws-load-balancer-type": "external",
+ },
+ },
+ Spec: corev1.ServiceSpec{
+ Type: "LoadBalancer",
+ LoadBalancerClass: ptr.To("service.k8s.aws/nlb"),
+ },
+ },
+ ClusterNativePort: "8055",
+ InsertPorts: &vmv1beta1.InsertPorts{
+ OpenTSDBHTTPPort: "8087",
+ },
+ },
+ },
+ }, `
+objectmeta:
+ name: vmclusterlb-test
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmclusterlb-vmauth-balancer
+ app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
+ operator.victoriametrics.com/vmauthlb-proxy-name: vmauth
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8427
+ targetport:
+ intval: 8427
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmclusterlb-vmauth-balancer
+ managed-by: vm-operator
+ type: ClusterIP
+---
+objectmeta:
+ name: vminsert-test
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vminsert
+ app.kubernetes.io/part-of: vmcluster
managed-by: vm-operator
+ operator.victoriametrics.com/vmauthlb-proxy-name: insert
+ annotations:
+ service.beta.kubernetes.io/aws-load-balancer-type: external
+ ownerreferences:
+ - name: test
+ controller: true
+ blockownerdeletion: true
+spec:
+ ports:
+ - name: http
+ protocol: TCP
+ port: 8480
+ targetport:
+ intval: 8480
+ - name: opentsdb-http
+ protocol: TCP
+ port: 8087
+ targetport:
+ intval: 8087
+ - name: clusternative
+ protocol: TCP
+ port: 8055
+ targetport:
+ intval: 8055
+ selector:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vmclusterlb-vmauth-balancer
+ managed-by: vm-operator
+ type: LoadBalancer
+ loadbalancerclass: service.k8s.aws/nlb
+---
+objectmeta:
+ name: vminsertinternal-test
+ namespace: default-1
+ resourceversion: "1"
+ labels:
+ app.kubernetes.io/component: monitoring
+ app.kubernetes.io/instance: test
+ app.kubernetes.io/name: vminsert
app.kubernetes.io/part-of: vmcluster
+ managed-by: vm-operator
operator.victoriametrics.com/vmauthlb-proxy-job-name: vminsert-test
annotations:
- "service.beta.kubernetes.io/aws-load-balancer-type": "external"
+ service.beta.kubernetes.io/aws-load-balancer-type: external
ownerreferences:
- - apiversion: ""
- name: test
+ - name: test
controller: true
blockownerdeletion: true
spec:
@@ -1164,8 +1579,8 @@ spec:
app.kubernetes.io/instance: test
app.kubernetes.io/name: vminsert
managed-by: vm-operator
+ clusterip: None
type: ClusterIP
- clusterip: "None"
loadbalancerclass: service.k8s.aws/nlb
`, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
diff --git a/internal/controller/operator/factory/vmdistributed/vmauth.go b/internal/controller/operator/factory/vmdistributed/vmauth.go
index 85c3efeee..2c36dd296 100644
--- a/internal/controller/operator/factory/vmdistributed/vmauth.go
+++ b/internal/controller/operator/factory/vmdistributed/vmauth.go
@@ -66,7 +66,7 @@ func vmAgentTargetRef(vmAgents []*vmv1beta1.VMAgent, owner *metav1.OwnerReferenc
}
return vmv1beta1.TargetRef{
URLMapCommon: vmv1beta1.URLMapCommon{
- LoadBalancingPolicy: ptr.To("first_available"),
+ LoadBalancingPolicy: ptr.To("least_loaded"),
RetryStatusCodes: []int{500, 502, 503},
},
Paths: []string{"/insert/.+", "/api/v1/write"},
diff --git a/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go b/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go
index 961249b00..8abe1e161 100644
--- a/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go
+++ b/internal/controller/operator/factory/vmdistributed/vmdistributed_test.go
@@ -404,17 +404,22 @@ func TestCreateOrUpdate(t *testing.T) {
Static: &vmv1beta1.StaticRef{
URLs: vmAgentURLs,
},
+ URLMapCommon: vmv1beta1.URLMapCommon{
+ LoadBalancingPolicy: ptr.To("least_loaded"),
+ },
},
{
Paths: []string{"/select/.+", "/admin/tenants"},
Static: &vmv1beta1.StaticRef{
URLs: vmClusterURLs,
},
+ URLMapCommon: vmv1beta1.URLMapCommon{
+ LoadBalancingPolicy: ptr.To("first_available"),
+ },
},
}
for i := range targetRefs {
targetRef := &targetRefs[i]
- targetRef.LoadBalancingPolicy = ptr.To("first_available")
targetRef.RetryStatusCodes = []int{500, 502, 503}
}
var got vmv1beta1.VMAuth
@@ -527,17 +532,22 @@ func TestCreateOrUpdate(t *testing.T) {
Static: &vmv1beta1.StaticRef{
URLs: vmAgentURLs,
},
+ URLMapCommon: vmv1beta1.URLMapCommon{
+ LoadBalancingPolicy: ptr.To("least_loaded"),
+ },
},
{
Paths: []string{"/select/.+", "/admin/tenants"},
Static: &vmv1beta1.StaticRef{
URLs: vmClusterURLs,
},
+ URLMapCommon: vmv1beta1.URLMapCommon{
+ LoadBalancingPolicy: ptr.To("first_available"),
+ },
},
}
for i := range targetRefs {
targetRef := &targetRefs[i]
- targetRef.LoadBalancingPolicy = ptr.To("first_available")
targetRef.RetryStatusCodes = []int{500, 502, 503}
}
var got vmv1beta1.VMAuth
diff --git a/internal/controller/operator/factory/vmdistributed/zone.go b/internal/controller/operator/factory/vmdistributed/zone.go
index bd72e55c9..46406cee5 100644
--- a/internal/controller/operator/factory/vmdistributed/zone.go
+++ b/internal/controller/operator/factory/vmdistributed/zone.go
@@ -18,6 +18,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -244,7 +245,7 @@ func (zs *zones) updateLB(ctx context.Context, rclient client.Client, cr *vmv1al
return reconcile.VMAuth(ctx, rclient, vmAuth, nil, &owner)
}
-func getMetricsAddrs(ctx context.Context, rclient client.Client, vmAgent *vmv1beta1.VMAgent) map[string]struct{} {
+func getMetricsAddrs(ctx context.Context, rclient client.Client, vmAgent *vmv1beta1.VMAgent) sets.Set[string] {
var esl discoveryv1.EndpointSliceList
o := client.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set{discoveryv1.LabelServiceName: vmAgent.PrefixedName()}),
@@ -257,7 +258,7 @@ func getMetricsAddrs(ctx context.Context, rclient client.Client, vmAgent *vmv1be
if len(esl.Items) == 0 {
return nil
}
- addrs := make(map[string]struct{})
+ addrs := sets.New[string]()
for i := range esl.Items {
es := &esl.Items[i]
var port int32
@@ -286,7 +287,7 @@ func getMetricsAddrs(ctx context.Context, rclient client.Client, vmAgent *vmv1be
Scheme: strings.ToLower(vmAgent.ProbeScheme()),
Path: vmAgent.GetMetricsPath(),
}
- addrs[u.String()] = struct{}{}
+ addrs.Insert(u.String())
}
}
}
diff --git a/internal/controller/operator/factory/vmsingle/vmsingle.go b/internal/controller/operator/factory/vmsingle/vmsingle.go
index 16e059118..ebf5c4e2a 100644
--- a/internal/controller/operator/factory/vmsingle/vmsingle.go
+++ b/internal/controller/operator/factory/vmsingle/vmsingle.go
@@ -186,7 +186,7 @@ func makeSpec(ctx context.Context, cr *vmv1beta1.VMSingle) (*corev1.PodTemplateS
}
}
- volumes, vmMounts, err := build.StorageVolumeMountsTo(cr.Spec.Volumes, cr.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(cr.Spec.Volumes, cr.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName, false)
if err != nil {
return nil, err
}
diff --git a/internal/controller/operator/factory/vtcluster/insert.go b/internal/controller/operator/factory/vtcluster/insert.go
index 5a8ea9422..0bbc70994 100644
--- a/internal/controller/operator/factory/vtcluster/insert.go
+++ b/internal/controller/operator/factory/vtcluster/insert.go
@@ -307,12 +307,16 @@ func createOrUpdateVTInsertService(ctx context.Context, rclient client.Client, c
var prevSvc, prevAdditionalSvc *corev1.Service
if prevCR != nil && prevCR.Spec.Insert != nil {
prevSvc = buildVTInsertService(prevCR)
- prevAdditionalSvc = build.AdditionalServiceFromDefault(prevSvc, prevCR.Spec.Insert.ServiceSpec)
+ prevAdditionalSvcBase := *prevSvc
+ prevAdditionalSvcBase.Name = prevCR.PrefixedName(vmv1beta1.ClusterComponentInsert)
+ prevAdditionalSvc = build.AdditionalServiceFromDefault(&prevAdditionalSvcBase, prevCR.Spec.Insert.ServiceSpec)
}
svc := buildVTInsertService(cr)
owner := cr.AsOwner()
if err := cr.Spec.Insert.ServiceSpec.IsSomeAndThen(func(s *vmv1beta1.AdditionalServiceSpec) error {
- additionalSvc := build.AdditionalServiceFromDefault(svc, s)
+ additionalSvcBase := *svc
+ additionalSvcBase.Name = cr.PrefixedName(vmv1beta1.ClusterComponentInsert)
+ additionalSvc := build.AdditionalServiceFromDefault(&additionalSvcBase, s)
if additionalSvc.Name == svc.Name {
return fmt.Errorf("VTInsert additional service name: %q cannot be the same as crd.prefixedname: %q", additionalSvc.Name, svc.Name)
}
diff --git a/internal/controller/operator/factory/vtcluster/select.go b/internal/controller/operator/factory/vtcluster/select.go
index 2b2d9eb9b..66374b132 100644
--- a/internal/controller/operator/factory/vtcluster/select.go
+++ b/internal/controller/operator/factory/vtcluster/select.go
@@ -113,12 +113,16 @@ func createOrUpdateVTSelectService(ctx context.Context, rclient client.Client, c
var prevSvc, prevAdditionalSvc *corev1.Service
if prevCR != nil && prevCR.Spec.Select != nil {
prevSvc = buildVTSelectService(prevCR)
- prevAdditionalSvc = build.AdditionalServiceFromDefault(prevSvc, prevCR.Spec.Select.ServiceSpec)
+ prevAdditionalSvcBase := *prevSvc
+ prevAdditionalSvcBase.Name = prevCR.PrefixedName(vmv1beta1.ClusterComponentSelect)
+ prevAdditionalSvc = build.AdditionalServiceFromDefault(&prevAdditionalSvcBase, prevCR.Spec.Select.ServiceSpec)
}
svc := buildVTSelectService(cr)
owner := cr.AsOwner()
if err := cr.Spec.Select.ServiceSpec.IsSomeAndThen(func(s *vmv1beta1.AdditionalServiceSpec) error {
- additionalSvc := build.AdditionalServiceFromDefault(svc, s)
+ additionalSvcBase := *svc
+ additionalSvcBase.Name = cr.PrefixedName(vmv1beta1.ClusterComponentSelect)
+ additionalSvc := build.AdditionalServiceFromDefault(&additionalSvcBase, s)
if additionalSvc.Name == svc.Name {
return fmt.Errorf("VTSelect additional service name: %q cannot be the same as crd.prefixedname: %q", additionalSvc.Name, svc.Name)
}
diff --git a/internal/controller/operator/factory/vtcluster/storage.go b/internal/controller/operator/factory/vtcluster/storage.go
index e6ec80418..9222d7a3e 100644
--- a/internal/controller/operator/factory/vtcluster/storage.go
+++ b/internal/controller/operator/factory/vtcluster/storage.go
@@ -207,7 +207,9 @@ func buildVTStorageSTSSpec(cr *vmv1.VTCluster) (*appsv1.StatefulSet, error) {
}
build.StatefulSetAddCommonParams(stsSpec, &cr.Spec.Storage.CommonAppsParams)
storageSpec := cr.Spec.Storage.Storage
- storageSpec.IntoSTSVolume(cr.Spec.Storage.GetStorageVolumeName(), &stsSpec.Spec)
+ if err := storageSpec.IntoSTSVolume(cr.Spec.Storage.GetStorageVolumeName(), &stsSpec.Spec); err != nil {
+ return nil, err
+ }
stsSpec.Spec.VolumeClaimTemplates = append(stsSpec.Spec.VolumeClaimTemplates, cr.Spec.Storage.ClaimTemplates...)
return stsSpec, nil
diff --git a/internal/controller/operator/factory/vtcluster/vmauth_lb.go b/internal/controller/operator/factory/vtcluster/vmauth_lb.go
index 0c2435ed7..f7b067662 100644
--- a/internal/controller/operator/factory/vtcluster/vmauth_lb.go
+++ b/internal/controller/operator/factory/vtcluster/vmauth_lb.go
@@ -286,7 +286,7 @@ func createOrUpdateLBProxyService(ctx context.Context, rclient client.Client, cr
b.SetFinalLabels(labels.Merge(b.FinalLabels(), map[string]string{
vmv1beta1.VMAuthLBServiceProxyTargetLabel: string(kind),
}))
- b.SetSelectorLabels(cr.SelectorLabels(vmv1beta1.ClusterComponentBalancer))
+ b.SetSelectorLabels(r.SelectorLabels(vmv1beta1.ClusterComponentBalancer))
return b
}
b := builder(cr)
diff --git a/internal/controller/operator/factory/vtsingle/vtsingle.go b/internal/controller/operator/factory/vtsingle/vtsingle.go
index 061ef7d76..5d3edbb1b 100644
--- a/internal/controller/operator/factory/vtsingle/vtsingle.go
+++ b/internal/controller/operator/factory/vtsingle/vtsingle.go
@@ -191,7 +191,7 @@ func makePodSpec(r *vmv1.VTSingle) (*corev1.PodTemplateSpec, error) {
ClaimName: r.PrefixedName(),
}
}
- volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName)
+ volumes, vmMounts, err := build.StorageVolumeMountsTo(r.Spec.Volumes, r.Spec.VolumeMounts, pvcSrc, storagePath, build.DataVolumeName, false)
if err != nil {
return nil, err
}
diff --git a/internal/controller/operator/objects_stat.go b/internal/controller/operator/objects_stat.go
index 7483f794a..adc0cddf2 100644
--- a/internal/controller/operator/objects_stat.go
+++ b/internal/controller/operator/objects_stat.go
@@ -5,6 +5,7 @@ import (
"sync"
"github.com/prometheus/client_golang/prometheus"
+ "k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/metrics"
)
@@ -16,19 +17,19 @@ var (
type objectCollector struct {
mu sync.Mutex
- objectsByController map[string]map[string]struct{}
+ objectsByController map[string]sets.Set[string]
}
func (oc *objectCollector) register(name, ns, controller string) {
oc.mu.Lock()
defer oc.mu.Unlock()
- oc.objectsByController[controller][ns+"/"+name] = struct{}{}
+ oc.objectsByController[controller].Insert(ns + "/" + name)
}
func (oc *objectCollector) deRegister(name, ns, controller string) {
oc.mu.Lock()
defer oc.mu.Unlock()
- delete(oc.objectsByController[controller], ns+"/"+name)
+ oc.objectsByController[controller].Delete(ns + "/" + name)
}
func (oc *objectCollector) countByController(controller string) float64 {
@@ -38,12 +39,12 @@ func (oc *objectCollector) countByController(controller string) float64 {
if !ok {
panic(fmt.Sprintf("BUG, controller: %s is not registered", controller))
}
- return float64(len(objects))
+ return float64(objects.Len())
}
func newCollector() *objectCollector {
oc := &objectCollector{
- objectsByController: map[string]map[string]struct{}{},
+ objectsByController: map[string]sets.Set[string]{},
}
registeredObjects := []string{
"vmagent", "vmalert", "vmsingle", "vmcluster", "vmalertmanager", "vmauth", "vlogs", "vlsingle",
@@ -52,7 +53,7 @@ func newCollector() *objectCollector {
"vtsingle", "vtcluster", "vmdistributed",
}
for _, controller := range registeredObjects {
- oc.objectsByController[controller] = map[string]struct{}{}
+ oc.objectsByController[controller] = sets.New[string]()
}
registry := metrics.Registry
instrumentMetric := func(controller string) prometheus.GaugeFunc {
diff --git a/internal/controller/operator/reconcile_and_track_status_test.go b/internal/controller/operator/reconcile_and_track_status_test.go
index 20e693cd1..1362121a9 100644
--- a/internal/controller/operator/reconcile_and_track_status_test.go
+++ b/internal/controller/operator/reconcile_and_track_status_test.go
@@ -72,7 +72,7 @@ func TestReconcileAndTrackStatus(t *testing.T) {
wantStatus: vmv1beta1.UpdateStatusOperational,
})
- // retryable conflict error, operational → expanding
+ // retryable conflict error, operational → expanding, error propagated for requeue
opSpec := vmv1beta1.VMAlertSpec{SelectAllByDefault: true}
f(opts{
object: &vmv1beta1.VMAlert{
@@ -87,9 +87,10 @@ func TestReconcileAndTrackStatus(t *testing.T) {
return ctrl.Result{}, k8serrors.NewConflict(schema.GroupResource{Group: "apps", Resource: "deployments"}, "test", fmt.Errorf("conflict"))
},
wantStatus: vmv1beta1.UpdateStatusExpanding,
+ wantErr: true,
})
- // retryable wait interrupted, operational → expanding
+ // retryable wait interrupted, operational → expanding, error propagated for requeue
f(opts{
object: &vmv1beta1.VMAlert{
ObjectMeta: metav1.ObjectMeta{Name: "test-vmalert", Namespace: "default"},
@@ -103,6 +104,7 @@ func TestReconcileAndTrackStatus(t *testing.T) {
return ctrl.Result{}, wait.ErrorInterrupted(fmt.Errorf("timeout"))
},
wantStatus: vmv1beta1.UpdateStatusExpanding,
+ wantErr: true,
})
// operational → expanding → operational
@@ -235,7 +237,7 @@ func TestVMClusterRemainsExpandingDuringPVCResize(t *testing.T) {
_, err := reconcileAndTrackStatus(context.Background(), fclient, cluster, func() (ctrl.Result, error) {
return ctrl.Result{}, wait.ErrorInterrupted(fmt.Errorf("pvc resize still in progress"))
})
- assert.NoError(t, err)
+ assert.Error(t, err)
got := &vmv1beta1.VMCluster{}
assert.NoError(t, fclient.Get(context.Background(), types.NamespacedName{Name: "test-vmcluster", Namespace: "default"}, got))
diff --git a/internal/controller/operator/vlagent_controller.go b/internal/controller/operator/vlagent_controller.go
index 12ec3ab1b..d495c1b3c 100644
--- a/internal/controller/operator/vlagent_controller.go
+++ b/internal/controller/operator/vlagent_controller.go
@@ -66,7 +66,7 @@ func (r *VLAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
var instance vmv1.VLAgent
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VLAgent instance
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -80,8 +80,8 @@ func (r *VLAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vlagent"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vlagent"}
return
}
diff --git a/internal/controller/operator/vlcluster_controller.go b/internal/controller/operator/vlcluster_controller.go
index 502838077..3af3f33c9 100644
--- a/internal/controller/operator/vlcluster_controller.go
+++ b/internal/controller/operator/vlcluster_controller.go
@@ -61,7 +61,7 @@ func (r *VLClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
var instance vmv1.VLCluster
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -74,8 +74,8 @@ func (r *VLClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
err = finalize.OnClusterDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vlcluster"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vlcluster"}
return
}
if err = finalize.AddFinalizer(ctx, r.Client, &instance); err != nil {
diff --git a/internal/controller/operator/vlogs_controller.go b/internal/controller/operator/vlogs_controller.go
index 6be8408a1..53cad8e24 100644
--- a/internal/controller/operator/vlogs_controller.go
+++ b/internal/controller/operator/vlogs_controller.go
@@ -67,7 +67,7 @@ func (r *VLogsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (resu
var instance vmv1beta1.VLogs
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -80,8 +80,8 @@ func (r *VLogsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (resu
err = finalize.OnVLogsDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vlogs"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vlogs"}
return
}
l.Info("VLogs CustomResource transited into read-only state. Please migrate to the VLSingle.")
diff --git a/internal/controller/operator/vlsingle_controller.go b/internal/controller/operator/vlsingle_controller.go
index b2dc2841c..ad7867464 100644
--- a/internal/controller/operator/vlsingle_controller.go
+++ b/internal/controller/operator/vlsingle_controller.go
@@ -62,7 +62,7 @@ func (r *VLSingleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
var instance vmv1.VLSingle
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -75,8 +75,8 @@ func (r *VLSingleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
err = finalize.OnVLSingleDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vlsingle"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vlsingle"}
return
}
if err = finalize.AddFinalizer(ctx, r.Client, &instance); err != nil {
diff --git a/internal/controller/operator/vmagent_controller.go b/internal/controller/operator/vmagent_controller.go
index 508c4e308..eb6da2f5e 100644
--- a/internal/controller/operator/vmagent_controller.go
+++ b/internal/controller/operator/vmagent_controller.go
@@ -85,7 +85,7 @@ func (r *VMAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
ctx = logger.AddToContext(ctx, l)
var instance vmv1beta1.VMAgent
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMAgent instance
@@ -105,8 +105,8 @@ func (r *VMAgentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmagent"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmagent"}
return
}
diff --git a/internal/controller/operator/vmalert_controller.go b/internal/controller/operator/vmalert_controller.go
index 1b05f3c7f..0a1521414 100644
--- a/internal/controller/operator/vmalert_controller.go
+++ b/internal/controller/operator/vmalert_controller.go
@@ -72,7 +72,7 @@ func (r *VMAlertReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
var instance vmv1beta1.VMAlert
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -91,8 +91,8 @@ func (r *VMAlertReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmalert"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmalert"}
return
}
diff --git a/internal/controller/operator/vmalertmanager_controller.go b/internal/controller/operator/vmalertmanager_controller.go
index 4b8f418cc..d1bd82f45 100644
--- a/internal/controller/operator/vmalertmanager_controller.go
+++ b/internal/controller/operator/vmalertmanager_controller.go
@@ -75,7 +75,7 @@ func (r *VMAlertmanagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
var instance vmv1beta1.VMAlertmanager
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
err = &getError{err, "vmalertmanager", req}
@@ -92,8 +92,8 @@ func (r *VMAlertmanagerReconciler) Reconcile(ctx context.Context, req ctrl.Reque
err = finalize.OnVMAlertManagerDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmalertmanager"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmalertmanager"}
return
}
diff --git a/internal/controller/operator/vmalertmanagerconfig_controller.go b/internal/controller/operator/vmalertmanagerconfig_controller.go
index 215d600af..bb0bf5b86 100644
--- a/internal/controller/operator/vmalertmanagerconfig_controller.go
+++ b/internal/controller/operator/vmalertmanagerconfig_controller.go
@@ -62,7 +62,7 @@ func (r *VMAlertmanagerConfigReconciler) Reconcile(ctx context.Context, req ctrl
l := r.Log.WithValues("vmalertmanagerconfig", req.Name, "namespace", req.Namespace)
var instance vmv1beta1.VMAlertmanagerConfig
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -71,8 +71,8 @@ func (r *VMAlertmanagerConfigReconciler) Reconcile(ctx context.Context, req ctrl
}
RegisterObjectStat(&instance, "vmalertmanagerconfig")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmalertmanagerconfig"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmalertmanagerconfig"}
return
}
if alertmanagerReconcileLimit.Throttle() {
@@ -91,7 +91,7 @@ func (r *VMAlertmanagerConfigReconciler) Reconcile(ctx context.Context, req ctrl
for i := range objects.Items {
item := &objects.Items[i]
- if !item.DeletionTimestamp.IsZero() || item.Spec.ParsingError != "" || item.IsUnmanaged() {
+ if !item.DeletionTimestamp.IsZero() || item.Status.ParsingSpecError != "" || item.IsUnmanaged() {
continue
}
diff --git a/internal/controller/operator/vmanomaly_controller.go b/internal/controller/operator/vmanomaly_controller.go
index 7732c59b6..1c252dc47 100644
--- a/internal/controller/operator/vmanomaly_controller.go
+++ b/internal/controller/operator/vmanomaly_controller.go
@@ -66,7 +66,7 @@ func (r *VMAnomalyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
var instance vmv1.VMAnomaly
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMAnomaly instance
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -80,8 +80,8 @@ func (r *VMAnomalyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmanomaly"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmanomaly"}
return
}
diff --git a/internal/controller/operator/vmauth_controller.go b/internal/controller/operator/vmauth_controller.go
index 0157a77ca..5a8792eb1 100644
--- a/internal/controller/operator/vmauth_controller.go
+++ b/internal/controller/operator/vmauth_controller.go
@@ -72,7 +72,7 @@ func (r *VMAuthReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
var instance vmv1beta1.VMAuth
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err := r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -91,8 +91,8 @@ func (r *VMAuthReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
}
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmauth"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmauth"}
return
}
diff --git a/internal/controller/operator/vmcluster_controller.go b/internal/controller/operator/vmcluster_controller.go
index 2369c7414..d471bb22c 100644
--- a/internal/controller/operator/vmcluster_controller.go
+++ b/internal/controller/operator/vmcluster_controller.go
@@ -50,7 +50,7 @@ func (r *VMClusterReconciler) Reconcile(ctx context.Context, request ctrl.Reques
var instance vmv1beta1.VMCluster
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Client.Get(ctx, request.NamespacedName, &instance); err != nil {
@@ -64,8 +64,8 @@ func (r *VMClusterReconciler) Reconcile(ctx context.Context, request ctrl.Reques
err = finalize.OnClusterDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmcluster"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmcluster"}
return
}
if err = finalize.AddFinalizer(ctx, r.Client, &instance); err != nil {
diff --git a/internal/controller/operator/vmdistributed_controller.go b/internal/controller/operator/vmdistributed_controller.go
index e3095fd10..a628acbcc 100644
--- a/internal/controller/operator/vmdistributed_controller.go
+++ b/internal/controller/operator/vmdistributed_controller.go
@@ -59,7 +59,7 @@ func (r *VMDistributedReconciler) Reconcile(ctx context.Context, req ctrl.Reques
// Handle reconcile errors
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch VMDistributed instance
@@ -79,8 +79,8 @@ func (r *VMDistributedReconciler) Reconcile(ctx context.Context, req ctrl.Reques
return
}
// Check parsing error
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "VMDistributed"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "VMDistributed"}
return
}
diff --git a/internal/controller/operator/vmnodescrape_controller.go b/internal/controller/operator/vmnodescrape_controller.go
index 3798f691b..2c4efa63b 100644
--- a/internal/controller/operator/vmnodescrape_controller.go
+++ b/internal/controller/operator/vmnodescrape_controller.go
@@ -61,7 +61,7 @@ func (r *VMNodeScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Request
var instance vmv1beta1.VMNodeScrape
ctx = logger.AddToContext(ctx, l)
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMNodeScrape instance
@@ -70,8 +70,8 @@ func (r *VMNodeScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Request
return
}
RegisterObjectStat(&instance, "vmnodescrape")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmnodescrape"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmnodescrape"}
return
}
if err = collectVMAgentScrapes(l, ctx, r.Client, r.BaseConf.WatchNamespaces, &instance); err != nil {
diff --git a/internal/controller/operator/vmpodscrape_controller.go b/internal/controller/operator/vmpodscrape_controller.go
index 7167e4373..fbb414f7a 100644
--- a/internal/controller/operator/vmpodscrape_controller.go
+++ b/internal/controller/operator/vmpodscrape_controller.go
@@ -61,7 +61,7 @@ func (r *VMPodScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Request)
ctx = logger.AddToContext(ctx, l)
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMPodScrape instance
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -69,8 +69,8 @@ func (r *VMPodScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Request)
return
}
RegisterObjectStat(&instance, "vmpodscrape")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmpodscrape"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmpodscrape"}
return
}
if err = collectVMAgentScrapes(l, ctx, r.Client, r.BaseConf.WatchNamespaces, &instance); err != nil {
diff --git a/internal/controller/operator/vmprobe_controller.go b/internal/controller/operator/vmprobe_controller.go
index c5c65b165..a6e9f2711 100644
--- a/internal/controller/operator/vmprobe_controller.go
+++ b/internal/controller/operator/vmprobe_controller.go
@@ -60,7 +60,7 @@ func (r *VMProbeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
var instance vmv1beta1.VMProbe
ctx = logger.AddToContext(ctx, l)
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMPodScrape instance
@@ -69,8 +69,8 @@ func (r *VMProbeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (re
return
}
RegisterObjectStat(&instance, "vmprobescrape")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmprobescrape"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmprobescrape"}
return
}
if err = collectVMAgentScrapes(l, ctx, r.Client, r.BaseConf.WatchNamespaces, &instance); err != nil {
diff --git a/internal/controller/operator/vmrule_controller.go b/internal/controller/operator/vmrule_controller.go
index 7ab942896..0d1b06175 100644
--- a/internal/controller/operator/vmrule_controller.go
+++ b/internal/controller/operator/vmrule_controller.go
@@ -64,7 +64,7 @@ func (r *VMRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
ctx = logger.AddToContext(ctx, l)
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMRule instance
@@ -74,8 +74,8 @@ func (r *VMRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
}
RegisterObjectStat(&instance, "vmrule")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmrule"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmrule"}
return
}
if alertReconcileLimit.Throttle() {
@@ -94,7 +94,7 @@ func (r *VMRuleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
for i := range objects.Items {
item := &objects.Items[i]
- if item.DeletionTimestamp != nil || item.Spec.ParsingError != "" {
+ if item.DeletionTimestamp != nil || item.Status.ParsingSpecError != "" {
continue
}
l := l.WithValues("vmalert", item.Name, "parent_namespace", item.Namespace)
diff --git a/internal/controller/operator/vmscrapeconfig_controller.go b/internal/controller/operator/vmscrapeconfig_controller.go
index 290b0e0ed..cb87e6a2a 100644
--- a/internal/controller/operator/vmscrapeconfig_controller.go
+++ b/internal/controller/operator/vmscrapeconfig_controller.go
@@ -60,7 +60,7 @@ func (r *VMScrapeConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque
var instance vmv1beta1.VMScrapeConfig
ctx = logger.AddToContext(ctx, l)
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMScrapeConfig instance
@@ -69,8 +69,8 @@ func (r *VMScrapeConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return
}
RegisterObjectStat(&instance, "vmscrapeconfig")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmscrapeconfig"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmscrapeconfig"}
return
}
if err = collectVMAgentScrapes(l, ctx, r.Client, r.BaseConf.WatchNamespaces, &instance); err != nil {
diff --git a/internal/controller/operator/vmservicescrape_controller.go b/internal/controller/operator/vmservicescrape_controller.go
index a8021066d..acb3a5b22 100644
--- a/internal/controller/operator/vmservicescrape_controller.go
+++ b/internal/controller/operator/vmservicescrape_controller.go
@@ -60,7 +60,7 @@ func (r *VMServiceScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Requ
ctx = logger.AddToContext(ctx, l)
var instance vmv1beta1.VMServiceScrape
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
// Fetch the VMServiceScrape instance
@@ -69,8 +69,8 @@ func (r *VMServiceScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return
}
RegisterObjectStat(&instance, "vmservicescrape")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmservicescrape"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmservicescrape"}
return
}
if err = collectVMAgentScrapes(l, ctx, r.Client, r.BaseConf.WatchNamespaces, &instance); err != nil {
diff --git a/internal/controller/operator/vmsingle_controller.go b/internal/controller/operator/vmsingle_controller.go
index 84a1b7b75..359ffb70d 100644
--- a/internal/controller/operator/vmsingle_controller.go
+++ b/internal/controller/operator/vmsingle_controller.go
@@ -69,7 +69,7 @@ func (r *VMSingleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
var instance vmv1beta1.VMSingle
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -82,8 +82,8 @@ func (r *VMSingleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
err = finalize.OnVMSingleDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmsingle"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmsingle"}
return
}
if err = finalize.AddFinalizer(ctx, r.Client, &instance); err != nil {
diff --git a/internal/controller/operator/vmstaticscrape_controller.go b/internal/controller/operator/vmstaticscrape_controller.go
index c79ba7d03..e1543d03f 100644
--- a/internal/controller/operator/vmstaticscrape_controller.go
+++ b/internal/controller/operator/vmstaticscrape_controller.go
@@ -42,15 +42,15 @@ func (r *VMStaticScrapeReconciler) Reconcile(ctx context.Context, req ctrl.Reque
l := r.Log.WithValues("vmstaticscrape", req.Name, "namespace", req.Namespace)
var instance vmv1beta1.VMStaticScrape
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
err = &getError{err, "vmstaticscrape", req}
return
}
RegisterObjectStat(&instance, "vmstaticscrape")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmstaticscrape"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmstaticscrape"}
return
}
if err = collectVMAgentScrapes(l, ctx, r.Client, r.BaseConf.WatchNamespaces, &instance); err != nil {
diff --git a/internal/controller/operator/vmuser_controller.go b/internal/controller/operator/vmuser_controller.go
index e54b7b2f4..c652d7437 100644
--- a/internal/controller/operator/vmuser_controller.go
+++ b/internal/controller/operator/vmuser_controller.go
@@ -65,7 +65,7 @@ func (r *VMUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
l := r.Log.WithValues("vmuser", req.Name, "namespace", req.Namespace)
var instance vmv1beta1.VMUser
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -80,8 +80,8 @@ func (r *VMUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
return
}
RegisterObjectStat(&instance, "vmuser")
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vmuser"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vmuser"}
return
}
@@ -105,7 +105,7 @@ func (r *VMUserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
for i := range objects.Items {
item := &objects.Items[i]
- if !item.DeletionTimestamp.IsZero() || item.Spec.ParsingError != "" || item.IsUnmanaged() {
+ if !item.DeletionTimestamp.IsZero() || item.Status.ParsingSpecError != "" || item.IsUnmanaged() {
continue
}
// reconcile users for given vmauth.
diff --git a/internal/controller/operator/vtcluster_controller.go b/internal/controller/operator/vtcluster_controller.go
index 5163971a0..931d4da6c 100644
--- a/internal/controller/operator/vtcluster_controller.go
+++ b/internal/controller/operator/vtcluster_controller.go
@@ -61,7 +61,7 @@ func (r *VTClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
var instance vmv1.VTCluster
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -74,8 +74,8 @@ func (r *VTClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
err = finalize.OnClusterDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vtcluster"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vtcluster"}
return
}
if err = finalize.AddFinalizer(ctx, r.Client, &instance); err != nil {
diff --git a/internal/controller/operator/vtsingle_controller.go b/internal/controller/operator/vtsingle_controller.go
index 0f328e65f..369f98495 100644
--- a/internal/controller/operator/vtsingle_controller.go
+++ b/internal/controller/operator/vtsingle_controller.go
@@ -62,7 +62,7 @@ func (r *VTSingleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
var instance vmv1.VTSingle
defer func() {
- result, err = handleReconcileErr(ctx, r.Client, &instance, result, err)
+ result, err = handleReconcileErrWithStatus(ctx, r.Client, &instance, result, err)
}()
if err = r.Get(ctx, req.NamespacedName, &instance); err != nil {
@@ -75,8 +75,8 @@ func (r *VTSingleReconciler) Reconcile(ctx context.Context, req ctrl.Request) (r
err = finalize.OnVTSingleDelete(ctx, r.Client, &instance)
return
}
- if instance.Spec.ParsingError != "" {
- err = &parsingError{instance.Spec.ParsingError, "vtsingle"}
+ if instance.Status.ParsingSpecError != "" {
+ err = &parsingError{instance.Status.ParsingSpecError, "vtsingle"}
return
}
if err = finalize.AddFinalizer(ctx, r.Client, &instance); err != nil {
diff --git a/internal/manager/manager.go b/internal/manager/manager.go
index 3cc576624..b0fc53828 100644
--- a/internal/manager/manager.go
+++ b/internal/manager/manager.go
@@ -601,14 +601,14 @@ func getMetricsServerMTLSOpts() ([]func(*tls.Config), error) {
}
func flagsAsMetrics(registry metrics.RegistererGatherer, flagSet *flag.FlagSet) {
- isSetMap := make(map[string]struct{})
+ isSetMap := sets.New[string]()
flagSet.Visit(func(f *flag.Flag) {
- isSetMap[f.Name] = struct{}{}
+ isSetMap.Insert(f.Name)
})
m := prometheus.NewGaugeVec(prometheus.GaugeOpts{Name: "flag", Help: "defines provided flags for the operator"}, []string{"name", "value", "is_set"})
flagSet.VisitAll(func(f *flag.Flag) {
isSetStr := "false"
- if _, isSet := isSetMap[f.Name]; isSet {
+ if isSetMap.Has(f.Name) {
isSetStr = "true"
}
m.WithLabelValues(f.Name, f.Value.String(), isSetStr).Set(1)
diff --git a/internal/webhook/operator/v1/vlagent_webhook.go b/internal/webhook/operator/v1/vlagent_webhook.go
index ed7b0befa..8ce0e8ade 100644
--- a/internal/webhook/operator/v1/vlagent_webhook.go
+++ b/internal/webhook/operator/v1/vlagent_webhook.go
@@ -47,8 +47,8 @@ func (*VLAgentCustomValidator) ValidateCreate(_ context.Context, obj runtime.Obj
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VLAgentCustomValidator) ValidateUpdate(_ context.Context, _, newObj runti
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1/vlcluster_webhook.go b/internal/webhook/operator/v1/vlcluster_webhook.go
index c5527040c..887d6b8d6 100644
--- a/internal/webhook/operator/v1/vlcluster_webhook.go
+++ b/internal/webhook/operator/v1/vlcluster_webhook.go
@@ -47,8 +47,8 @@ func (*VLClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.O
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VLClusterCustomValidator) ValidateUpdate(_ context.Context, _, newObj run
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1/vlsingle_webhook.go b/internal/webhook/operator/v1/vlsingle_webhook.go
index 2cba5e76d..69c9a680d 100644
--- a/internal/webhook/operator/v1/vlsingle_webhook.go
+++ b/internal/webhook/operator/v1/vlsingle_webhook.go
@@ -47,8 +47,8 @@ func (*VLSingleCustomValidator) ValidateCreate(_ context.Context, obj runtime.Ob
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VLSingleCustomValidator) ValidateUpdate(_ context.Context, _, newObj runt
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1/vmanomaly_webhook.go b/internal/webhook/operator/v1/vmanomaly_webhook.go
index 87e2c86cd..846641831 100644
--- a/internal/webhook/operator/v1/vmanomaly_webhook.go
+++ b/internal/webhook/operator/v1/vmanomaly_webhook.go
@@ -47,8 +47,8 @@ func (v *VMAnomalyCustomValidator) ValidateCreate(ctx context.Context, obj runti
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (v *VMAnomalyCustomValidator) ValidateUpdate(ctx context.Context, oldObj, n
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1/vtcluster_webhook.go b/internal/webhook/operator/v1/vtcluster_webhook.go
index bacf0f88a..d68dbf0ec 100644
--- a/internal/webhook/operator/v1/vtcluster_webhook.go
+++ b/internal/webhook/operator/v1/vtcluster_webhook.go
@@ -47,8 +47,8 @@ func (*VTClusterCustomValidator) ValidateCreate(_ context.Context, obj runtime.O
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VTClusterCustomValidator) ValidateUpdate(_ context.Context, _, newObj run
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1/vtsingle_webhook.go b/internal/webhook/operator/v1/vtsingle_webhook.go
index 9e8cdd196..e7d2a9983 100644
--- a/internal/webhook/operator/v1/vtsingle_webhook.go
+++ b/internal/webhook/operator/v1/vtsingle_webhook.go
@@ -47,8 +47,8 @@ func (*VTSingleCustomValidator) ValidateCreate(_ context.Context, obj runtime.Ob
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VTSingleCustomValidator) ValidateUpdate(_ context.Context, _, newObj runt
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1alpha1/vmdistributed_webhook.go b/internal/webhook/operator/v1alpha1/vmdistributed_webhook.go
index c350d4f45..357040782 100644
--- a/internal/webhook/operator/v1alpha1/vmdistributed_webhook.go
+++ b/internal/webhook/operator/v1alpha1/vmdistributed_webhook.go
@@ -46,18 +46,14 @@ func (*VMDistributedCustomValidator) ValidateCreate(ctx context.Context, obj run
r, ok := obj.(*vmv1alpha1.VMDistributed)
if !ok {
err = fmt.Errorf("BUG: unexpected type: %T", obj)
- return
}
-
- if r.Spec.ParsingError != "" {
- err = errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ err = errors.New(r.Status.ParsingSpecError)
return
}
-
if err = r.Validate(); err != nil {
return
}
-
return
}
@@ -66,18 +62,14 @@ func (*VMDistributedCustomValidator) ValidateUpdate(ctx context.Context, _, newO
r, ok := newObj.(*vmv1alpha1.VMDistributed)
if !ok {
err = fmt.Errorf("BUG: unexpected type: %T", newObj)
- return
}
-
- if r.Spec.ParsingError != "" {
- err = errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ err = errors.New(r.Status.ParsingSpecError)
return
}
-
if err = r.Validate(); err != nil {
return
}
-
return
}
diff --git a/internal/webhook/operator/v1beta1/vlogs_webhook.go b/internal/webhook/operator/v1beta1/vlogs_webhook.go
index 5bc677efd..654b0248e 100644
--- a/internal/webhook/operator/v1beta1/vlogs_webhook.go
+++ b/internal/webhook/operator/v1beta1/vlogs_webhook.go
@@ -51,8 +51,8 @@ func (*VLogsCustomValidator) ValidateCreate(_ context.Context, obj runtime.Objec
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -66,8 +66,8 @@ func (*VLogsCustomValidator) ValidateUpdate(_ context.Context, _, newObj runtime
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmagent_webhook.go b/internal/webhook/operator/v1beta1/vmagent_webhook.go
index a11df550b..c7074f1af 100644
--- a/internal/webhook/operator/v1beta1/vmagent_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmagent_webhook.go
@@ -47,8 +47,8 @@ func (*VMAgentCustomValidator) ValidateCreate(_ context.Context, obj runtime.Obj
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VMAgentCustomValidator) ValidateUpdate(_ context.Context, _, newObj runti
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmalert_webhook.go b/internal/webhook/operator/v1beta1/vmalert_webhook.go
index f48b0a781..f51ceaab0 100644
--- a/internal/webhook/operator/v1beta1/vmalert_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmalert_webhook.go
@@ -47,8 +47,8 @@ func (*VMAlertCustomValidator) ValidateCreate(_ context.Context, obj runtime.Obj
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VMAlertCustomValidator) ValidateUpdate(_ context.Context, _, newObj runti
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmalertmanager_webhook.go b/internal/webhook/operator/v1beta1/vmalertmanager_webhook.go
index a6145460a..bd3bd5f5e 100644
--- a/internal/webhook/operator/v1beta1/vmalertmanager_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmalertmanager_webhook.go
@@ -47,8 +47,8 @@ func (*VMAlertmanagerCustomValidator) ValidateCreate(_ context.Context, obj runt
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VMAlertmanagerCustomValidator) ValidateUpdate(_ context.Context, _, newOb
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmalertmanagerconfig_webhook.go b/internal/webhook/operator/v1beta1/vmalertmanagerconfig_webhook.go
index a2e2b1f64..6e7bcf371 100644
--- a/internal/webhook/operator/v1beta1/vmalertmanagerconfig_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmalertmanagerconfig_webhook.go
@@ -47,8 +47,8 @@ func (*VMAlertmanagerConfigCustomValidator) ValidateCreate(_ context.Context, ob
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,10 +62,9 @@ func (*VMAlertmanagerConfigCustomValidator) ValidateUpdate(_ context.Context, _,
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
-
if err := r.Validate(); err != nil {
return nil, err
}
diff --git a/internal/webhook/operator/v1beta1/vmauth_webhook.go b/internal/webhook/operator/v1beta1/vmauth_webhook.go
index de93fcaa6..aa74544dc 100644
--- a/internal/webhook/operator/v1beta1/vmauth_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmauth_webhook.go
@@ -47,8 +47,8 @@ func (*VMAuthCustomValidator) ValidateCreate(_ context.Context, obj runtime.Obje
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VMAuthCustomValidator) ValidateUpdate(_ context.Context, _, newObj runtim
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmcluster_webhook.go b/internal/webhook/operator/v1beta1/vmcluster_webhook.go
index 3f062dd35..97ff0ecff 100644
--- a/internal/webhook/operator/v1beta1/vmcluster_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmcluster_webhook.go
@@ -47,10 +47,9 @@ func (*VMClusterCustomValidator) ValidateCreate(ctx context.Context, obj runtime
r, ok := obj.(*vmv1beta1.VMCluster)
if !ok {
err = fmt.Errorf("BUG: unexpected type: %T", obj)
- return
}
- if r.Spec.ParsingError != "" {
- err = errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ err = errors.New(r.Status.ParsingSpecError)
return
}
if err = r.Validate(); err != nil {
@@ -68,10 +67,9 @@ func (*VMClusterCustomValidator) ValidateUpdate(ctx context.Context, _, newObj r
r, ok := newObj.(*vmv1beta1.VMCluster)
if !ok {
err = fmt.Errorf("BUG: unexpected type: %T", newObj)
- return
}
- if r.Spec.ParsingError != "" {
- err = errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ err = errors.New(r.Status.ParsingSpecError)
return
}
if err = r.Validate(); err != nil {
diff --git a/internal/webhook/operator/v1beta1/vmnodescrape_webhook.go b/internal/webhook/operator/v1beta1/vmnodescrape_webhook.go
index 2b66d5e31..ae8f721dc 100644
--- a/internal/webhook/operator/v1beta1/vmnodescrape_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmnodescrape_webhook.go
@@ -47,8 +47,8 @@ func (*VMNodeScrapeCustomValidator) ValidateCreate(_ context.Context, obj runtim
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VMNodeScrapeCustomValidator) ValidateUpdate(_ context.Context, oldObj, ne
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmpodscrape_webhook.go b/internal/webhook/operator/v1beta1/vmpodscrape_webhook.go
index 694d680c3..5ddee6338 100644
--- a/internal/webhook/operator/v1beta1/vmpodscrape_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmpodscrape_webhook.go
@@ -47,10 +47,9 @@ func (*VMPodScrapeCustomValidator) ValidateCreate(_ context.Context, obj runtime
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
-
if err := r.Validate(); err != nil {
return nil, err
}
@@ -63,10 +62,9 @@ func (*VMPodScrapeCustomValidator) ValidateUpdate(_ context.Context, oldObj, new
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
-
if err := r.Validate(); err != nil {
return nil, err
}
diff --git a/internal/webhook/operator/v1beta1/vmprobe_webhook.go b/internal/webhook/operator/v1beta1/vmprobe_webhook.go
index 549648012..1a66d1788 100644
--- a/internal/webhook/operator/v1beta1/vmprobe_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmprobe_webhook.go
@@ -47,10 +47,9 @@ func (*VMProbeCustomValidator) ValidateCreate(_ context.Context, obj runtime.Obj
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
-
if err := r.Validate(); err != nil {
return nil, err
}
@@ -63,8 +62,8 @@ func (*VMProbeCustomValidator) ValidateUpdate(_ context.Context, oldObj, newObj
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/internal/webhook/operator/v1beta1/vmscrapeconfig_webhook.go b/internal/webhook/operator/v1beta1/vmscrapeconfig_webhook.go
index b39bc1c33..fa9f00170 100644
--- a/internal/webhook/operator/v1beta1/vmscrapeconfig_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmscrapeconfig_webhook.go
@@ -47,8 +47,8 @@ func (*VMScrapeConfigCustomValidator) ValidateCreate(_ context.Context, obj runt
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,6 +62,9 @@ func (*VMScrapeConfigCustomValidator) ValidateUpdate(_ context.Context, oldObj,
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
+ }
if err := r.Validate(); err != nil {
return nil, err
}
diff --git a/internal/webhook/operator/v1beta1/vmservicescrape_webhook.go b/internal/webhook/operator/v1beta1/vmservicescrape_webhook.go
index 4d102b66d..183cef77c 100644
--- a/internal/webhook/operator/v1beta1/vmservicescrape_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmservicescrape_webhook.go
@@ -48,8 +48,8 @@ func (*VMServiceScrapeCustomValidator) ValidateCreate(ctx context.Context, obj r
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -66,11 +66,8 @@ func (*VMServiceScrapeCustomValidator) ValidateUpdate(ctx context.Context, oldOb
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
- }
- if err := r.Validate(); err != nil {
- return nil, err
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if r.Spec.DiscoveryRole == "endpointslices" {
logger.WithContext(ctx).Info("deprecated discoverRole value `endpointslices`, use `endpointslice` instead.")
diff --git a/internal/webhook/operator/v1beta1/vmsingle_webhook.go b/internal/webhook/operator/v1beta1/vmsingle_webhook.go
index 428a8522c..09fce8567 100644
--- a/internal/webhook/operator/v1beta1/vmsingle_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmsingle_webhook.go
@@ -47,10 +47,9 @@ func (*VMSingleCustomValidator) ValidateCreate(ctx context.Context, obj runtime.
r, ok := obj.(*vmv1beta1.VMSingle)
if !ok {
err = fmt.Errorf("BUG: unexpected type: %T", obj)
- return
}
- if r.Spec.ParsingError != "" {
- err = errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ err = errors.New(r.Status.ParsingSpecError)
return
}
if err = r.Validate(); err != nil {
@@ -68,10 +67,9 @@ func (*VMSingleCustomValidator) ValidateUpdate(ctx context.Context, _, newObj ru
r, ok := newObj.(*vmv1beta1.VMSingle)
if !ok {
err = fmt.Errorf("BUG: unexpected type: %T", newObj)
- return
}
- if r.Spec.ParsingError != "" {
- err = errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ err = errors.New(r.Status.ParsingSpecError)
return
}
if err = r.Validate(); err != nil {
diff --git a/internal/webhook/operator/v1beta1/vmstaticscrape_webhook.go b/internal/webhook/operator/v1beta1/vmstaticscrape_webhook.go
index 4dd7831b7..34c8fd284 100644
--- a/internal/webhook/operator/v1beta1/vmstaticscrape_webhook.go
+++ b/internal/webhook/operator/v1beta1/vmstaticscrape_webhook.go
@@ -47,8 +47,8 @@ func (*VMStaticScrapeCustomValidator) ValidateCreate(_ context.Context, obj runt
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", obj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
@@ -62,8 +62,8 @@ func (*VMStaticScrapeCustomValidator) ValidateUpdate(_ context.Context, oldObj,
if !ok {
return nil, fmt.Errorf("BUG: unexpected type: %T", newObj)
}
- if r.Spec.ParsingError != "" {
- return nil, errors.New(r.Spec.ParsingError)
+ if r.Status.ParsingSpecError != "" {
+ return nil, errors.New(r.Status.ParsingSpecError)
}
if err := r.Validate(); err != nil {
return nil, err
diff --git a/test/e2e/suite/allure/result.go b/test/e2e/suite/allure/result.go
index 08ebf8751..77732150c 100644
--- a/test/e2e/suite/allure/result.go
+++ b/test/e2e/suite/allure/result.go
@@ -8,7 +8,6 @@ package allure
import (
"encoding/json"
"fmt"
- "maps"
"os"
"reflect"
"runtime"
@@ -18,6 +17,7 @@ import (
"github.com/google/uuid"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/types"
+ "k8s.io/apimachinery/pkg/util/sets"
)
const descriptionReportEntryName = "DESCRIPTION"
@@ -97,11 +97,11 @@ func (r *result) createFromSpecReport(specReport ginkgo.SpecReport) *result {
}
attachmentEntries := filterForAttachments(specReport.ReportEntries)
- var toSkip map[int]struct{}
+ var toSkip sets.Set[int]
r.Steps, toSkip = createSteps(specReport.SpecEvents, attachmentEntries)
for i, entry := range attachmentEntries {
- if _, ok := toSkip[i]; !ok {
+ if !toSkip.Has(i) {
var att attachment
err := json.Unmarshal([]byte(entry.Value.GetRawValue().(string)), &att)
@@ -135,9 +135,9 @@ func (r *result) createFromSpecReport(specReport ginkgo.SpecReport) *result {
return r
}
-func createSteps(events types.SpecEvents, entries types.ReportEntries) (steps []stepObject, indicesToSkip map[int]struct{}) {
+func createSteps(events types.SpecEvents, entries types.ReportEntries) (steps []stepObject, indicesToSkip sets.Set[int]) {
currentEndIndex := -1
- indicesToSkip = make(map[int]struct{})
+ indicesToSkip = sets.New[int]()
steps = []stepObject{}
for startEventIndex, startEvent := range events {
@@ -162,7 +162,7 @@ func createSteps(events types.SpecEvents, entries types.ReportEntries) (steps []
step.ChildrenSteps = childrenSteps
for i, entry := range entries {
- if _, ok := toSkip[i]; !ok {
+ if !toSkip.Has(i) {
if entry.TimelineLocation.Order > startEvent.TimelineLocation.Order &&
entry.TimelineLocation.Order < endEvent.TimelineLocation.Order {
var att attachment
@@ -174,12 +174,12 @@ func createSteps(events types.SpecEvents, entries types.ReportEntries) (steps []
}
step.addAttachment(&att)
- toSkip[i] = struct{}{}
+ toSkip.Insert(i)
}
}
}
- maps.Copy(indicesToSkip, toSkip)
+ indicesToSkip.Insert(toSkip.UnsortedList()...)
currentEndIndex = endIndex
}
diff --git a/test/e2e/upgrade/upgrade_test.go b/test/e2e/upgrade/upgrade_test.go
index 19df87fb2..46b560438 100644
--- a/test/e2e/upgrade/upgrade_test.go
+++ b/test/e2e/upgrade/upgrade_test.go
@@ -516,6 +516,7 @@ func ensureNoPodRollout(version string, genDeps func(string) []client.Object, ob
for i, o := range objs {
By(fmt.Sprintf("waiting for %s to become operational", displayNames[i]))
wg.Go(func() {
+ defer GinkgoRecover()
Eventually(func() error {
return suite.ExpectObjectStatus(ctx, k8sClient, o, nsns[i], vmv1beta1.UpdateStatusOperational)
}, waitTimeout, 5*time.Second).ShouldNot(HaveOccurred())
@@ -532,6 +533,7 @@ func ensureNoPodRollout(version string, genDeps func(string) []client.Object, ob
for i, o := range objs {
By(fmt.Sprintf("waiting for latest operator to reconcile %s", displayNames[i]))
wg.Go(func() {
+ defer GinkgoRecover()
Eventually(func() error {
return suite.ExpectObjectStatus(ctx, k8sClient, o, nsns[i], vmv1beta1.UpdateStatusOperational)
}, waitTimeout, 5*time.Second).ShouldNot(HaveOccurred())
@@ -642,6 +644,18 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
cr.Spec.K8sCollector.Enabled = true
cr.Spec.ServiceAccountName = "vlagent-collector"
})},
+ {version: "v0.68.4", cr: with(vmagent)},
+ {version: "v0.68.4", cr: with(vmagent, func(cr *vmv1beta1.VMAgent) {
+ cr.Spec.DaemonSetMode = true
+ })},
+ {version: "v0.68.4", cr: with(vmagent, func(cr *vmv1beta1.VMAgent) {
+ cr.Spec.StatefulMode = true
+ })},
+ {version: "v0.68.4", cr: with(vlagent)},
+ {version: "v0.68.4", cr: with(vlagent, func(cr *vmv1.VLAgent) {
+ cr.Spec.K8sCollector.Enabled = true
+ cr.Spec.ServiceAccountName = "vlagent-collector"
+ })},
},
},
// nolint:dupl
@@ -672,6 +686,10 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
{version: "v0.68.3", cr: with(vmauth)},
{version: "v0.68.3", cr: with(vmalertmanager)},
{version: "v0.68.3", cr: with(vmanomaly)},
+ {version: "v0.68.4", cr: with(vmalert)},
+ {version: "v0.68.4", cr: with(vmauth)},
+ {version: "v0.68.4", cr: with(vmalertmanager)},
+ {version: "v0.68.4", cr: with(vmanomaly)},
},
},
// nolint:dupl
@@ -696,6 +714,9 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
{version: "v0.68.3", cr: with(vmsingle)},
{version: "v0.68.3", cr: with(vtsingle)},
{version: "v0.68.3", cr: with(vlsingle)},
+ {version: "v0.68.4", cr: with(vmsingle)},
+ {version: "v0.68.4", cr: with(vtsingle)},
+ {version: "v0.68.4", cr: with(vlsingle)},
},
},
// nolint:dupl
@@ -726,6 +747,10 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
{version: "v0.68.3", cr: with(vlcluster, func(cr *vmv1.VLCluster) {
cr.Spec.RequestsLoadBalancer.Enabled = true
})},
+ {version: "v0.68.4", cr: with(vlcluster)},
+ {version: "v0.68.4", cr: with(vlcluster, func(cr *vmv1.VLCluster) {
+ cr.Spec.RequestsLoadBalancer.Enabled = true
+ })},
},
},
// nolint:dupl
@@ -756,6 +781,10 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
{version: "v0.68.3", cr: with(vtcluster, func(cr *vmv1.VTCluster) {
cr.Spec.RequestsLoadBalancer.Enabled = true
})},
+ {version: "v0.68.4", cr: with(vtcluster)},
+ {version: "v0.68.4", cr: with(vtcluster, func(cr *vmv1.VTCluster) {
+ cr.Spec.RequestsLoadBalancer.Enabled = true
+ })},
},
},
// nolint:dupl
@@ -768,6 +797,7 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
{version: "v0.68.1", cr: with(vmcluster)},
{version: "v0.68.2", cr: with(vmcluster)},
{version: "v0.68.3", cr: with(vmcluster)},
+ {version: "v0.68.4", cr: with(vmcluster)},
},
},
// nolint:dupl
@@ -786,6 +816,7 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
Image: vmv1beta1.Image{
Tag: "v1.136.0-enterprise",
},
+ AcceptEULA: true,
}
cr.Spec.License = &vmv1beta1.License{
KeyRef: &corev1.SecretKeySelector{
@@ -808,6 +839,53 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
Image: vmv1beta1.Image{
Tag: "v1.136.0-enterprise",
},
+ AcceptEULA: true,
+ }
+ cr.Spec.License = &vmv1beta1.License{
+ KeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "license",
+ },
+ Key: "key",
+ },
+ }
+ })},
+ {version: "v0.68.4", cr: with(vmcluster, func(cr *vmv1beta1.VMCluster) {
+ cr.Spec.RequestsLoadBalancer.Enabled = true
+ cr.Spec.VMStorage.Image.Tag = "v1.136.0-enterprise-cluster"
+ cr.Spec.VMSelect.Image.Tag = "v1.136.0-enterprise-cluster"
+ cr.Spec.VMInsert.Image.Tag = "v1.136.0-enterprise-cluster"
+ cr.Spec.RequestsLoadBalancer.Spec.Image.Tag = "v1.136.0-enterprise"
+ cr.Spec.VMStorage.VMBackup = &vmv1beta1.VMBackup{
+ Destination: "fs:///tmp",
+ DestinationDisableSuffixAdd: true,
+ Image: vmv1beta1.Image{
+ Tag: "v1.136.0-enterprise",
+ },
+ AcceptEULA: true,
+ }
+ cr.Spec.License = &vmv1beta1.License{
+ KeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: "license",
+ },
+ Key: "key",
+ },
+ }
+ })},
+ {version: "v0.68.4", cr: with(vmcluster, func(cr *vmv1beta1.VMCluster) {
+ cr.Spec.RequestsLoadBalancer.Enabled = true
+ cr.Spec.VMStorage.Image.Tag = "v1.136.0-enterprise-cluster"
+ cr.Spec.VMSelect.Image.Tag = "v1.136.0-enterprise-cluster"
+ cr.Spec.VMInsert.Image.Tag = "v1.136.0-enterprise-cluster"
+ cr.Spec.RequestsLoadBalancer.Spec.Image.Tag = "v1.136.0-enterprise"
+ cr.Spec.VMStorage.VMBackup = &vmv1beta1.VMBackup{
+ Destination: "fs:///tmp",
+ DestinationDisableSuffixAdd: true,
+ Image: vmv1beta1.Image{
+ Tag: "v1.136.0-enterprise",
+ },
+ AcceptEULA: true,
}
cr.Spec.License = &vmv1beta1.License{
KeyRef: &corev1.SecretKeySelector{
@@ -831,6 +909,7 @@ var _ = Describe("operator upgrade", Label("upgrade"), func() {
{version: "v0.68.1", cr: with(vmdistributed)},
{version: "v0.68.2", cr: with(vmdistributed)},
{version: "v0.68.3", cr: with(vmdistributed)},
+ {version: "v0.68.4", cr: with(vmdistributed)},
},
},
}))
diff --git a/test/e2e/utils_test.go b/test/e2e/utils_test.go
index af6e5f4b7..11c296345 100644
--- a/test/e2e/utils_test.go
+++ b/test/e2e/utils_test.go
@@ -14,6 +14,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -33,7 +34,7 @@ func expectPodCount(ctx context.Context, rclient client.Client, obj client.Objec
}
podsByHash := make(map[string][]corev1.Pod)
var labelName, kind string
- owners := make(map[string]struct{})
+ owners := sets.New[string]()
switch obj.(type) {
case *appsv1.StatefulSet:
labelName = "controller-revision-hash"
@@ -56,9 +57,7 @@ func expectPodCount(ctx context.Context, rclient client.Client, obj client.Objec
if ref.Kind != kind {
continue
}
- if _, ok := owners[ref.Name]; !ok {
- owners[ref.Name] = struct{}{}
- }
+ owners.Insert(ref.Name)
}
podsByHash[labelValue] = append(podsByHash[labelValue], pod)
}
@@ -129,6 +128,13 @@ func expectObjectStatusPaused(ctx context.Context,
return suite.ExpectObjectStatus(ctx, rclient, object, name, vmv1beta1.UpdateStatusPaused)
}
+func expectObjectStatusFailed(ctx context.Context,
+ rclient client.Client,
+ object client.Object,
+ name types.NamespacedName) error {
+ return suite.ExpectObjectStatus(ctx, rclient, object, name, vmv1beta1.UpdateStatusFailed)
+}
+
type httpRequestOpts struct {
dstURL string
method string
diff --git a/test/e2e/vlagent_test.go b/test/e2e/vlagent_test.go
index dc5a048dc..9e70ddfdb 100644
--- a/test/e2e/vlagent_test.go
+++ b/test/e2e/vlagent_test.go
@@ -526,5 +526,150 @@ var _ = Describe("test vlagent Controller", Label("vl", "agent", "vlagent"), fun
},
),
)
+
+ Context("status transitions", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vlagent-status-created"
+ cr := &vmv1.VLAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLAgentSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RemoteWrite: []vmv1.VLAgentRemoteWriteSpec{
+ {URL: "http://localhost:9428/internal/insert"},
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vlagent-status-update"
+ cr := &vmv1.VLAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLAgentSpec{
+ RemoteWrite: []vmv1.VLAgentRemoteWriteSpec{
+ {URL: "http://localhost:9428/internal/insert"},
+ },
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.LogLevel = "WARN"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vlagent-status-pause"
+ cr := &vmv1.VLAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLAgentSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RemoteWrite: []vmv1.VLAgentRemoteWriteSpec{
+ {URL: "http://localhost:9428/internal/insert"},
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VLAgent")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vlagent-status-unpause"
+ cr := &vmv1.VLAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLAgentSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ RemoteWrite: []vmv1.VLAgentRemoteWriteSpec{
+ {URL: "http://localhost:9428/internal/insert"},
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VLAgent")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vlcluster_test.go b/test/e2e/vlcluster_test.go
index 6f47379b8..adfb132a8 100644
--- a/test/e2e/vlcluster_test.go
+++ b/test/e2e/vlcluster_test.go
@@ -436,7 +436,166 @@ var _ = Describe("test vlcluster Controller", Label("vl", "cluster", "vlcluster"
},
),
)
- },
- )
+ Context("status transitions", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vlcluster-status-created"
+ cr := &vmv1.VLCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLClusterSpec{
+ VLInsert: &vmv1.VLInsert{},
+ VLSelect: &vmv1.VLSelect{},
+ VLStorage: &vmv1.VLStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vlcluster-status-update"
+ cr := &vmv1.VLCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLClusterSpec{
+ VLInsert: &vmv1.VLInsert{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ VLSelect: &vmv1.VLSelect{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ VLStorage: &vmv1.VLStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.VLStorage.RetentionPeriod = "2"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vlcluster-status-pause"
+ cr := &vmv1.VLCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLClusterSpec{
+ VLInsert: &vmv1.VLInsert{},
+ VLSelect: &vmv1.VLSelect{},
+ VLStorage: &vmv1.VLStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VLCluster")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vlcluster-status-unpause"
+ cr := &vmv1.VLCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLClusterSpec{
+ Paused: true,
+ VLInsert: &vmv1.VLInsert{},
+ VLSelect: &vmv1.VLSelect{},
+ VLStorage: &vmv1.VLStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VLCluster")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
+ })
})
diff --git a/test/e2e/vlsingle_test.go b/test/e2e/vlsingle_test.go
index 04924a227..7ca1ef60c 100644
--- a/test/e2e/vlsingle_test.go
+++ b/test/e2e/vlsingle_test.go
@@ -322,5 +322,142 @@ var _ = Describe("test vlsingle Controller", Label("vl", "single", "vlsingle"),
)
},
)
+
+ Context("status transitions", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vlsingle-status-created"
+ cr := &vmv1.VLSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vlsingle-status-update"
+ cr := &vmv1.VLSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.RetentionPeriod = "2"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vlsingle-status-pause"
+ cr := &vmv1.VLSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VLSingle")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vlsingle-status-unpause"
+ cr := &vmv1.VLSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VLSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VLSingle")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VLSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vmagent_test.go b/test/e2e/vmagent_test.go
index 90999c023..68b01b5b8 100644
--- a/test/e2e/vmagent_test.go
+++ b/test/e2e/vmagent_test.go
@@ -737,5 +737,150 @@ var _ = Describe("test vmagent Controller", Label("vm", "agent", "vmagent"), fun
return *dep.Spec.Replicas
}, eventualStatefulsetAppReadyTimeout).Should(Equal(updatedReplicas))
})
+
+ Context("status transitions", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vmagent-status-created"
+ cr := &vmv1beta1.VMAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAgentSpec{
+ RemoteWrite: []vmv1beta1.VMAgentRemoteWriteSpec{
+ {URL: "http://localhost:8428/api/v1/write"},
+ },
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vmagent-status-update"
+ cr := &vmv1beta1.VMAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAgentSpec{
+ RemoteWrite: []vmv1beta1.VMAgentRemoteWriteSpec{
+ {URL: "http://localhost:8428/api/v1/write"},
+ },
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.LogLevel = "WARN"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vmagent-status-pause"
+ cr := &vmv1beta1.VMAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAgentSpec{
+ RemoteWrite: []vmv1beta1.VMAgentRemoteWriteSpec{
+ {URL: "http://localhost:8428/api/v1/write"},
+ },
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VMAgent")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vmagent-status-unpause"
+ cr := &vmv1beta1.VMAgent{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAgentSpec{
+ RemoteWrite: []vmv1beta1.VMAgentRemoteWriteSpec{
+ {URL: "http://localhost:8428/api/v1/write"},
+ },
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VMAgent")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAgent{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vmalert_test.go b/test/e2e/vmalert_test.go
index 4cebcd58b..a66f57766 100644
--- a/test/e2e/vmalert_test.go
+++ b/test/e2e/vmalert_test.go
@@ -471,5 +471,162 @@ var _ = Describe("test vmalert Controller", Label("vm", "alert"), func() {
return *dep.Spec.Replicas
}, eventualStatefulsetAppReadyTimeout).Should(Equal(updatedReplicas))
})
+
+ Context("status transitions", func() {
+ JustBeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vmalert-status-created"
+ cr := &vmv1beta1.VMAlert{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ Datasource: vmv1beta1.VMAlertDatasourceSpec{
+ URL: "http://localhost:8428",
+ },
+ Notifier: &vmv1beta1.VMAlertNotifierSpec{
+ URL: "http://localhost:9093",
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vmalert-status-update"
+ cr := &vmv1beta1.VMAlert{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ Datasource: vmv1beta1.VMAlertDatasourceSpec{
+ URL: "http://localhost:8428",
+ },
+ Notifier: &vmv1beta1.VMAlertNotifierSpec{
+ URL: "http://localhost:9093",
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.LogLevel = "WARN"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vmalert-status-pause"
+ cr := &vmv1beta1.VMAlert{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ Datasource: vmv1beta1.VMAlertDatasourceSpec{
+ URL: "http://localhost:8428",
+ },
+ Notifier: &vmv1beta1.VMAlertNotifierSpec{
+ URL: "http://localhost:9093",
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VMAlert")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vmalert-status-unpause"
+ cr := &vmv1beta1.VMAlert{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ Datasource: vmv1beta1.VMAlertDatasourceSpec{
+ URL: "http://localhost:8428",
+ },
+ Notifier: &vmv1beta1.VMAlertNotifierSpec{
+ URL: "http://localhost:9093",
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VMAlert")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlert{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vmalertmanager_test.go b/test/e2e/vmalertmanager_test.go
index 9f5289fe3..090416db6 100644
--- a/test/e2e/vmalertmanager_test.go
+++ b/test/e2e/vmalertmanager_test.go
@@ -301,5 +301,138 @@ var _ = Describe("test vmalertmanager Controller", Label("vm", "alertmanager"),
return *sts.Spec.Replicas
}, eventualStatefulsetAppReadyTimeout).Should(Equal(updatedReplicas))
})
+
+ Context("status transitions", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vmalertmanager-status-created"
+ cr := &vmv1beta1.VMAlertmanager{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertmanagerSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vmalertmanager-status-update"
+ cr := &vmv1beta1.VMAlertmanager{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertmanagerSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.LogLevel = "debug"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vmalertmanager-status-pause"
+ cr := &vmv1beta1.VMAlertmanager{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertmanagerSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VMAlertmanager")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vmalertmanager-status-unpause"
+ cr := &vmv1beta1.VMAlertmanager{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAlertmanagerSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VMAlertmanager")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAlertmanager{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vmauth_test.go b/test/e2e/vmauth_test.go
index fab76df51..284796e01 100644
--- a/test/e2e/vmauth_test.go
+++ b/test/e2e/vmauth_test.go
@@ -776,5 +776,138 @@ var _ = Describe("test vmauth Controller", Label("vm", "auth"), func() {
return *dep.Spec.Replicas
}, eventualStatefulsetAppReadyTimeout).Should(Equal(updatedReplicas))
})
+
+ Context("status transitions", func() {
+ BeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vmauth-status-created"
+ cr := &vmv1beta1.VMAuth{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAuthSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vmauth-status-update"
+ cr := &vmv1beta1.VMAuth{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAuthSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.UseProxyProtocol = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vmauth-status-pause"
+ cr := &vmv1beta1.VMAuth{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAuthSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VMAuth")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vmauth-status-unpause"
+ cr := &vmv1beta1.VMAuth{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMAuthSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VMAuth")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMAuth{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vmcluster_test.go b/test/e2e/vmcluster_test.go
index 56bc17b6b..182c0c3ee 100644
--- a/test/e2e/vmcluster_test.go
+++ b/test/e2e/vmcluster_test.go
@@ -1839,6 +1839,178 @@ up{baz="bar"} 123
),
)
})
+
+ Context("status transitions", func() {
+ var nsn types.NamespacedName
+ var ctx context.Context
+ BeforeEach(func() {
+ ctx = context.Background()
+ ctx = context.Background()
+ nsn = types.NamespacedName{Namespace: namespace}
+ })
+
+ It("should reach operational after creation", func() {
+ nsn.Name = "vmcluster-status-created"
+ cr := &vmv1beta1.VMCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMClusterSpec{
+ RetentionPeriod: "1",
+ VMStorage: &vmv1beta1.VMStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ VMSelect: &vmv1beta1.VMSelect{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ VMInsert: &vmv1beta1.VMInsert{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vmcluster-status-update"
+ cr := &vmv1beta1.VMCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMClusterSpec{
+ RetentionPeriod: "1",
+ VMInsert: &vmv1beta1.VMInsert{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ VMSelect: &vmv1beta1.VMSelect{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ VMStorage: &vmv1beta1.VMStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.RetentionPeriod = "2"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vmcluster-status-pause"
+ cr := &vmv1beta1.VMCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMClusterSpec{
+ RetentionPeriod: "1",
+ VMStorage: &vmv1beta1.VMStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ VMSelect: &vmv1beta1.VMSelect{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ VMInsert: &vmv1beta1.VMInsert{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VMCluster")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vmcluster-status-unpause"
+ cr := &vmv1beta1.VMCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMClusterSpec{
+ RetentionPeriod: "1",
+ Paused: true,
+ VMStorage: &vmv1beta1.VMStorage{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ VMSelect: &vmv1beta1.VMSelect{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ VMInsert: &vmv1beta1.VMInsert{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{ReplicaCount: ptr.To[int32](1)},
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VMCluster")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
func assertStrictSecurity(podSpec corev1.PodSpec) {
diff --git a/test/e2e/vmsingle_test.go b/test/e2e/vmsingle_test.go
index ace347f2f..da1bdfdc5 100644
--- a/test/e2e/vmsingle_test.go
+++ b/test/e2e/vmsingle_test.go
@@ -569,5 +569,237 @@ var _ = Describe("test vmsingle Controller", Label("vm", "single"), func() {
return *dep.Spec.Replicas
}, eventualStatefulsetAppReadyTimeout).Should(Equal(updatedReplicas))
})
+
+ Context("status transitions", func() {
+ JustBeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vmsingle-status-created"
+ cr := &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vmsingle-status-update"
+ cr := &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.RetentionPeriod = "2"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vmsingle-status-pause"
+ cr := &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VMSingle")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vmsingle-status-unpause"
+ cr := &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VMSingle")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→failed→operational on non-retryable error", func() {
+ nsn.Name = "vmsingle-status-failed"
+ cr := &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating with invalid spec to trigger failure (non-retryable error)")
+ Expect(k8sClient.Get(ctx, nsn, cr)).ToNot(HaveOccurred())
+ cr.Spec.ServiceAccountName = "invalid_name!"
+ Expect(k8sClient.Update(ctx, cr)).ToNot(HaveOccurred())
+
+ By("waiting for failed status")
+ Eventually(func() error {
+ return expectObjectStatusFailed(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("fixing the spec to recover")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.ServiceAccountName = ""
+ return k8sClient.Update(ctx, cr)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after recovery")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on retryable error", func() {
+ nsn.Name = "vmsingle-status-retryable"
+ cr := &vmv1beta1.VMSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1beta1.VMSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating with unsatisfiable nodeSelector to trigger timeout (retryable wait.Interrupted error)")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ // Change both spec and node selector to force a reconcile
+ cr.Spec.NodeSelector = map[string]string{"non-existent-node-label": "true"}
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status due to Pending pods")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("fixing the spec to recover")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.NodeSelector = nil
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after recovery")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1beta1.VMSingle{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})
diff --git a/test/e2e/vtcluster_test.go b/test/e2e/vtcluster_test.go
index f3ce6443f..336abfb2d 100644
--- a/test/e2e/vtcluster_test.go
+++ b/test/e2e/vtcluster_test.go
@@ -340,7 +340,166 @@ var _ = Describe("test vtcluster Controller", Label("vt", "cluster", "vtcluster"
},
),
)
- },
- )
+ Context("status transitions", func() {
+ JustBeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vtcluster-status-created"
+ cr := &vmv1.VTCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTClusterSpec{
+ Insert: &vmv1.VTInsert{},
+ Select: &vmv1.VTSelect{},
+ Storage: &vmv1.VTStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vtcluster-status-update"
+ cr := &vmv1.VTCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTClusterSpec{
+ Insert: &vmv1.VTInsert{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ Select: &vmv1.VTSelect{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ Storage: &vmv1.VTStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Storage.RetentionPeriod = "2"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vtcluster-status-pause"
+ cr := &vmv1.VTCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTClusterSpec{
+ Insert: &vmv1.VTInsert{},
+ Select: &vmv1.VTSelect{},
+ Storage: &vmv1.VTStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VTCluster")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vtcluster-status-unpause"
+ cr := &vmv1.VTCluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTClusterSpec{
+ Paused: true,
+ Insert: &vmv1.VTInsert{},
+ Select: &vmv1.VTSelect{},
+ Storage: &vmv1.VTStorage{
+ RetentionPeriod: "1",
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ },
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VTCluster")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTCluster{}, nsn)
+ }, eventualStatefulsetAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
+ })
})
diff --git a/test/e2e/vtsingle_test.go b/test/e2e/vtsingle_test.go
index d0207228f..cda8ad91b 100644
--- a/test/e2e/vtsingle_test.go
+++ b/test/e2e/vtsingle_test.go
@@ -255,5 +255,142 @@ var _ = Describe("test vtsingle Controller", Label("vt", "single", "vtsingle"),
)
},
)
+
+ Context("status transitions", func() {
+ JustBeforeEach(func() {
+ ctx = context.Background()
+ })
+ It("should reach operational after creation", func() {
+ nsn.Name = "vtsingle-status-created"
+ cr := &vmv1.VTSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status after creation")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→expanding→operational on spec update", func() {
+ nsn.Name = "vtsingle-status-update"
+ cr := &vmv1.VTSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("updating the spec to trigger reconcile")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.RetentionPeriod = "2"
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for expanding status")
+ Eventually(func() error {
+ return expectObjectStatusExpanding(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after update")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition operational→paused when paused", func() {
+ nsn.Name = "vtsingle-status-pause"
+ cr := &vmv1.VTSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for operational status before pause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("pausing the VTSingle")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = true
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for paused status")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+
+ It("should transition paused→operational when unpaused", func() {
+ nsn.Name = "vtsingle-status-unpause"
+ cr := &vmv1.VTSingle{
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: namespace,
+ Name: nsn.Name,
+ },
+ Spec: vmv1.VTSingleSpec{
+ CommonAppsParams: vmv1beta1.CommonAppsParams{
+ ReplicaCount: ptr.To[int32](1),
+ Paused: true,
+ },
+ RetentionPeriod: "1",
+ },
+ }
+ Expect(k8sClient.Create(ctx, cr)).ToNot(HaveOccurred())
+ By("waiting for paused status after creation")
+ Eventually(func() error {
+ return expectObjectStatusPaused(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualExpandingTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("unpausing the VTSingle")
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, nsn, cr); err != nil {
+ return err
+ }
+ cr.Spec.Paused = false
+ return k8sClient.Update(ctx, cr)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+
+ By("waiting for operational status after unpause")
+ Eventually(func() error {
+ return expectObjectStatusOperational(ctx, k8sClient, &vmv1.VTSingle{}, nsn)
+ }, eventualDeploymentAppReadyTimeout).WithContext(ctx).ShouldNot(HaveOccurred())
+ })
+ })
})
})