[BUG] mongodb/ape-mysql/ck/rabbitmq/tidb/zk cluster upgrade to v1 failed
Describe the bug A clear and concise description of what the bug is.
To Reproduce Steps to reproduce the behavior:
- mongodb roleProbe recovering
echo yes|kbcli cluster upgrade-to-v1 mongo-cluster
┌──────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│apiVersion: apps.kubeblocks.io/v1alpha1 │ │apiVersion: apps.kubeblocks.io/v1 │
│kind: Cluster │ │kind: Cluster │
│metadata: │ │metadata: │
│ annotations: │ │ annotations: │
│ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1│ │ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1 │
│ kubeblocks.io/reconcile: "2025-04-28T07:40:59.332173722Z" │ │ kubeblocks.io/reconcile: "2025-04-28T07:40:59.332173722Z"│
│ creationTimestamp: "2025-04-28T07:23:40Z" │ │ creationTimestamp: "2025-04-28T07:23:40Z" │
│ finalizers: │ │ finalizers: │
│ - cluster.kubeblocks.io/finalizer │ │ - cluster.kubeblocks.io/finalizer │
│ generation: 1 │ │ generation: 1 │
│ name: mongo-cluster │ │ name: mongo-cluster │
│ namespace: default │ │ namespace: default │
│ resourceVersion: "36321" │ │ resourceVersion: "36321" │
│ uid: 812523fd-54c4-46fa-a9ff-56fcdb3f8422 │ │ uid: 812523fd-54c4-46fa-a9ff-56fcdb3f8422 │
│spec: │ │spec: │
│ componentSpecs: │ │ componentSpecs: │
│ - componentDef: mongodb │ │ - componentDef: mongodb-1.0.0-alpha.0 │
│ name: mongodb │ │ name: mongodb │
│ replicas: 3 │ │ replicas: 3 │
│ resources: │ │ resources: │
│ limits: │ │ limits: │
│ cpu: 100m │ │ cpu: 100m │
│ memory: 512Mi │ │ memory: 512Mi │
│ requests: │ │ requests: │
│ cpu: 100m │ │ cpu: 100m │
│ memory: 512Mi │ │ memory: 512Mi │
│ serviceVersion: 6.0.16 │ │ serviceVersion: 6.0.16 │
│ updateStrategy: BestEffortParallel │ │ volumeClaimTemplates: │
│ volumeClaimTemplates: │ │ - name: data │
│ - name: data │ │ spec: │
│ spec: │ │ accessModes: │
│ accessModes: │ │ - ReadWriteOnce │
│ - ReadWriteOnce │ │ resources: │
│ resources: │ │ requests: │
│ requests: │ │ storage: 20Gi │
│ storage: 20Gi │ │ terminationPolicy: WipeOut │
│ resources: │ │status: {} │
│ cpu: "0" │ │ │
│ memory: "0" │ └─────────────────────────────────────────────────────────────┘
│ storage: │
│ size: "0" │
│ terminationPolicy: WipeOut │
│status: {} │
│ │
└──────────────────────────────────────────────────────────────┘
Cluster mongo-cluster will be converted to v1 with output as yaml.
Please type 'Yes/yes' to confirm your operation: yes
mongo-cluster-mongodb
mongo-cluster-mongodb-mongodb
mongo-cluster-mongodb-mongodb-ro
Cluster mongo-cluster has converted successfully, you can view the spec:
kubectl get clusters.apps.kubeblocks.io mongo-cluster -n default -oyaml
kubectl get clusters.apps.kubeblocks.io mongo-cluster -n default -oyaml
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
annotations:
kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1
kubeblocks.io/reconcile: "2025-04-28T07:40:59.332173722Z"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"mongo-cluster","namespace":"default"},"spec":{"componentSpecs":[{"componentDef":"mongodb","name":"mongodb","replicas":3,"resources":{"limits":{"cpu":"100m","memory":"0.5Gi"},"requests":{"cpu":"100m","memory":"0.5Gi"}},"serviceVersion":"6.0.16","updateStrategy":"BestEffortParallel","volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]}],"terminationPolicy":"WipeOut"}}
creationTimestamp: "2025-04-28T07:23:40Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 2
name: mongo-cluster
namespace: default
resourceVersion: "63351"
uid: 812523fd-54c4-46fa-a9ff-56fcdb3f8422
spec:
componentSpecs:
- componentDef: mongodb-1.0.0-alpha.0
name: mongodb
replicas: 3
resources:
limits:
cpu: 100m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
serviceVersion: 6.0.16
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
terminationPolicy: WipeOut
status:
components:
mongodb:
message:
InstanceSet/mongo-cluster-mongodb: Role probe timeout, check whether the application
is available
phase: Failed
conditions:
- lastTransitionTime: "2025-04-28T08:07:36Z"
message: 'The operator has started the provisioning of Cluster: mongo-cluster'
observedGeneration: 2
reason: PreCheckSucceed
status: "True"
type: ProvisioningStarted
- lastTransitionTime: "2025-04-28T07:23:40Z"
message: Successfully applied for resources
observedGeneration: 2
reason: ApplyResourcesSucceed
status: "True"
type: ApplyResources
- lastTransitionTime: "2025-04-28T07:37:33Z"
message: all pods of components are ready, waiting for the probe detection successful
reason: AllReplicasReady
status: "True"
type: ReplicasReady
- lastTransitionTime: "2025-04-28T08:24:54Z"
message: 'cluster mongo-cluster is NOT ready, unavailable components: mongodb'
reason: ComponentsNotReady
status: "False"
type: Ready
observedGeneration: 2
phase: Failed
➜ ~ kubectl get cluster mongo-cluster
NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE
mongo-cluster WipeOut Failed 91m
➜ ~
➜ ~ kubectl get pod -l app.kubernetes.io/instance=mongo-cluster
NAME READY STATUS RESTARTS AGE
mongo-cluster-mongodb-0 2/2 Running 0 87m
mongo-cluster-mongodb-1 2/2 Running 0 79m
mongo-cluster-mongodb-2 2/2 Running 0 32m
➜ ~ kbcli cluster list-instances mongo-cluster
NAME NAMESPACE CLUSTER COMPONENT STATUS ROLE ACCESSMODE AZ CPU(REQUEST/LIMIT) MEMORY(REQUEST/LIMIT) STORAGE NODE CREATED-TIME
mongo-cluster-mongodb-0 default mongo-cluster mongodb Running primary cn-beijing-b 100m / 100m 512Mi / 512Mi data:20Gi 172.31.0.43/172.31.0.43 Apr 28,2025 15:28 UTC+0800
mongo-cluster-mongodb-1 default mongo-cluster mongodb Running secondary cn-beijing-b 100m / 100m 512Mi / 512Mi data:20Gi 172.31.0.49/172.31.0.49 Apr 28,2025 15:35 UTC+0800
mongo-cluster-mongodb-2 default mongo-cluster mongodb Running <none> cn-beijing-b 100m / 100m 512Mi / 512Mi data:20Gi 172.31.0.21/172.31.0.21 Apr 28,2025 16:22 UTC+0800
logs pod
kubectl logs mongo-cluster-mongodb-2
Defaulted container "mongodb" out of: mongodb, kbagent, init-syncer (init), kbagent-worker (init)
2025-04-28T08:23:09Z DEBUG Starting syncer {"version": "2bb107a3bb1d722a9ec678e15063c6f2d6ad1f8b"}
2025-04-28T08:23:09Z INFO Initialize DB manager
2025-04-28T08:23:09Z INFO HTTPServer Starting HTTP Server
2025-04-28T08:23:09Z INFO HTTPServer API route path {"method": "GET", "path": ["/v1.0/datasync", "/v1.0/getrole"]}
2025-04-28T08:23:09Z INFO HTTPServer API route path {"method": "POST", "path": ["/v1.0/rebuild", "/v1.0/resume", "/v1.0/leavemember", "/v1.0/pause", "/v1.0/start", "/v1.0/stop", "/v1.0/switchover"]}
2025-04-28T08:23:09Z INFO HTTPServer http server {"listen address": "0.0.0.0", "port": 3601}
2025-04-28T08:23:09Z INFO HA HA starting
2025-04-28T08:23:09Z INFO pinger Waiting for dns resolution to be ready
2025-04-28T08:23:09Z INFO pinger dns resolution is ready {"dns": ""}
2025-04-28T08:23:09Z INFO DCS-K8S pod selector: app.kubernetes.io/instance=mongo-cluster,app.kubernetes.io/managed-by=kubeblocks,apps.kubeblocks.io/component-name=mongodb
2025-04-28T08:23:09Z INFO DCS-K8S podlist: 3
2025-04-28T08:23:09Z DEBUG HA cluster info {"cluster": {"ClusterCompName":"mongo-cluster-mongodb","Namespace":"default","Replicas":3,"HaConfig":{"DeleteMembers":{}},"Leader":{"DBState":null,"Index":"60220","Name":"mongo-cluster-mongodb-0","Holder":"mongo-cluster-mongodb-0","Released":false,"AvailableTime":1745828589,"AcquireTime":1745825747,"RenewTime":1745828589,"TTL":15,"Resource":{"metadata":{"name":"mongo-cluster-mongodb-leader","namespace":"default","uid":"614db82f-c348-4647-b605-dab2f5ae7fdd","resourceVersion":"60220","creationTimestamp":"2025-04-28T07:35:47Z","labels":{"app.kubernetes.io/instance":"mongo-cluster","app.kubernetes.io/managed-by":"kubeblocks","apps.kubeblocks.io/component-name":"mongodb"},"annotations":{"acquire-time":"1745825747","available-time":"1745828589","extra":"","holder":"mongo-cluster-mongodb-0","leader":"mongo-cluster-mongodb-0","renew-time":"1745828589","ttl":"15"},"ownerReferences":[{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","name":"mongo-cluster","uid":"812523fd-54c4-46fa-a9ff-56fcdb3f8422"}],"managedFields":[{"manager":"syncer","operation":"Update","apiVersion":"v1","time":"2025-04-28T08:23:09Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:acquire-time":{},"f:available-time":{},"f:extra":{},"f:holder":{},"f:leader":{},"f:renew-time":{},"f:ttl":{}},"f:labels":{".":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:apps.kubeblocks.io/component-name":{}},"f:ownerReferences":{".":{},"k:{"uid":"812523fd-54c4-46fa-a9ff-56fcdb3f8422"}":{}}}}}]}}},"Members":[{"Index":"","Name":"mongo-cluster-mongodb-0","Role":"primary","PodIP":"172.31.0.56","DBPort":"27017","SyncerPort":"3601","UID":"d5ae98ce-55b8-4069-a9b0-344422244785","ComponentName":"mongodb","UseIP":false},{"Index":"","Name":"mongo-cluster-mongodb-1","Role":"secondary","PodIP":"172.31.0.66","DBPort":"27017","SyncerPort":"3601","UID":"31197f3c-a0df-4889-a479-bbd3c7a63313","ComponentName":"mongodb","UseIP":false},{"Index":"","Name":"mongo-cluster-mongodb-2","Role":"","PodIP":"172.31.0.85","DBPort":"27017","SyncerPort":"3601","UID":"c6ddb899-927f-4b27-b57b-9267567deca0","ComponentName":"mongodb","UseIP":false}],"Switchover":null,"Extra":null,"Resource":{"metadata":{"name":"mongo-cluster","namespace":"default","uid":"812523fd-54c4-46fa-a9ff-56fcdb3f8422","resourceVersion":"57987","generation":2,"creationTimestamp":"2025-04-28T07:23:40Z","annotations":{"kubeblocks.io/crd-api-version":"apps.kubeblocks.io/v1","kubeblocks.io/reconcile":"2025-04-28T07:40:59.332173722Z","kubectl.kubernetes.io/last-applied-configuration":"{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"mongo-cluster","namespace":"default"},"spec":{"componentSpecs":[{"componentDef":"mongodb","name":"mongodb","replicas":3,"resources":{"limits":{"cpu":"100m","memory":"0.5Gi"},"requests":{"cpu":"100m","memory":"0.5Gi"}},"serviceVersion":"6.0.16","updateStrategy":"BestEffortParallel","volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]}],"terminationPolicy":"WipeOut"}}\n"},"finalizers":["cluster.kubeblocks.io/finalizer"],"managedFields":[{"manager":"kubectl-client-side-apply","operation":"Update","apiVersion":"apps.kubeblocks.io/v1alpha1","time":"2025-04-28T07:23:40Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubectl.kubernetes.io/last-applied-configuration":{}}},"f:spec":{".":{},"f:terminationPolicy":{}}}},{"manager":"kubeblocks","operation":"Update","apiVersion":"apps.kubeblocks.io/v1alpha1","time":"2025-04-28T07:40:59Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeblocks.io/reconcile":{}},"f:finalizers":{".":{},"v:"cluster.kubeblocks.io/finalizer"":{}}}}},{"manager":"kubeblocks","operation":"Update","apiVersion":"apps.kubeblocks.io/v1alpha1","time":"2025-04-28T08:07:36Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{".":{},"f:components":{".":{},"f:mongodb":{}}}},"subresource":"status"},{"manager":"kbcli","operation":"Update","apiVersion":"apps.kubeblocks.io/v1","time":"2025-04-28T08:22:08Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeblocks.io/crd-api-version":{}}},"f:spec":{"f:componentSpecs":{}}}},{"manager":"kubeblocks","operation":"Update","apiVersion":"apps.kubeblocks.io/v1","time":"2025-04-28T08:22:09Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:components":{"f:mongodb":{"f:phase":{}}},"f:conditions":{},"f:observedGeneration":{},"f:phase":{}}},"subresource":"status"}]},"spec":{"terminationPolicy":"WipeOut","componentSpecs":[{"name":"mongodb","componentDef":"mongodb-1.0.0-alpha.0","serviceVersion":"6.0.16","replicas":3,"resources":{"limits":{"cpu":"100m","memory":"512Mi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}}}}]}],"resources":{"cpu":"0","memory":"0"},"storage":{"size":"0"}},"status":{"observedGeneration":2,"phase":"Updating","components":{"mongodb":{"phase":"Updating"}},"conditions":[{"type":"ProvisioningStarted","status":"True","observedGeneration":2,"lastTransitionTime":"2025-04-28T08:07:36Z","reason":"PreCheckSucceed","message":"The operator has started the provisioning of Cluster: mongo-cluster"},{"type":"ApplyResources","status":"True","observedGeneration":2,"lastTransitionTime":"2025-04-28T07:23:40Z","reason":"ApplyResourcesSucceed","message":"Successfully applied for resources"},{"type":"ReplicasReady","status":"True","lastTransitionTime":"2025-04-28T07:37:33Z","reason":"AllReplicasReady","message":"all pods of components are ready, waiting for the probe detection successful"},{"type":"Ready","status":"True","lastTransitionTime":"2025-04-28T07:37:33Z","reason":"ClusterReady","message":"Cluster: mongo-cluster is ready, current phase is Running"}]}}}}
2025-04-28T08:23:10Z INFO Hypervisor Starting Hypervisor
2025-04-28T08:23:10Z INFO Hypervisor Start DB Service {"command": "/scripts/replicaset-setup.sh /scripts/replicaset-setup.sh"}
2025-04-28T08:23:10Z INFO HA check if DB Service is running
2025-04-28T08:23:10Z INFO HA DB Service is running
2025-04-28T08:23:10Z INFO Hypervisor Starting watcher on dbService
== DB == {"t":{"$date":"2025-04-28T08:23:10.429Z"},"s":"I", "c":"CONTROL", "id":5760901, "ctx":"-","msg":"Applied --setParameter options","attr":{"serverParameters":{"enableLocalhostAuthBypass":{"default":true,"value":true}}}}
== DB == {"t":{"$date":"2025-04-28T08:23:10.429Z"},"s":"I", "c":"CONTROL", "id":20697, "ctx":"-","msg":"Renamed existing log file","attr":{"oldLogPath":"/data/mongodb/logs/mongodb.log","newLogPath":"/data/mongodb/logs/mongodb.log.2025-04-28T08-23-10"}}
➜ ~
logs error pod kbagent
➜ ~ kubectl logs mongo-cluster-mongodb-2 kbagent
2025-04-28T08:23:09Z INFO create service Action {"actions": "memberLeave,roleProbe,switchover"}
2025-04-28T08:23:09Z INFO create service Probe {"probes": "roleProbe"}
2025-04-28T08:23:09Z INFO create service Streaming {"actions": ""}
2025-04-28T08:23:09Z INFO service Action started...
2025-04-28T08:23:09Z INFO service Probe started...
2025-04-28T08:23:09Z INFO service Streaming started...
2025-04-28T08:23:09Z INFO starting the HTTP server
2025-04-28T08:23:09Z INFO register service to server {"service": "Action", "method": "POST", "uri": "/v1.0/action"}
2025-04-28T08:23:09Z INFO register service to server {"service": "Probe", "method": "POST", "uri": "/v1.0/probe"}
2025-04-28T08:23:09Z INFO register service to server {"service": "Streaming", "method": "POST", "uri": "/v1.0/streaming"}
2025-04-28T08:23:09Z INFO probe started {"probe": "roleProbe", "config": {"instance":"mongo-cluster-mongodb","action":"roleProbe","periodSeconds":2}}
2025-04-28T08:23:09Z INFO starting the streaming server
2025-04-28T08:23:13Z INFO send probe event {"probe": "roleProbe", "probe": "roleProbe", "code": 0, "output": "secondary", "message": ""}
2025-04-28T08:23:13Z INFO send probe event {"probe": "roleProbe", "probe": "roleProbe", "code": 0, "output": "recovering", "message": ""}
- apecloud-mysql panic: dbService startup failed. Exiting...
echo yes|kbcli cluster upgrade-to-v1 mysql-cluster
┌──────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│apiVersion: apps.kubeblocks.io/v1alpha1 │ │apiVersion: apps.kubeblocks.io/v1 │
│kind: Cluster │ │kind: Cluster │
│metadata: │ │metadata: │
│ annotations: │ │ annotations: │
│ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1│ │ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1 │
│ kubeblocks.io/reconcile: "2025-04-28T07:45:52.459685376Z" │ │ kubeblocks.io/reconcile: "2025-04-28T07:45:52.459685376Z"│
│ creationTimestamp: "2025-04-28T07:23:40Z" │ │ creationTimestamp: "2025-04-28T07:23:40Z" │
│ finalizers: │ │ finalizers: │
│ - cluster.kubeblocks.io/finalizer │ │ - cluster.kubeblocks.io/finalizer │
│ generation: 2 │ │ generation: 2 │
│ name: mysql-cluster │ │ name: mysql-cluster │
│ namespace: default │ │ namespace: default │
│ resourceVersion: "36285" │ │ resourceVersion: "36285" │
│ uid: 6d18627b-12c3-4958-8607-2bbc8b67df34 │ │ uid: 6d18627b-12c3-4958-8607-2bbc8b67df34 │
│spec: │ │spec: │
│ componentSpecs: │ │ componentSpecs: │
│ - componentDef: apecloud-mysql │ │ - componentDef: apecloud-mysql-1.0.0-alpha.0 │
│ name: mysql │ │ name: mysql │
│ replicas: 3 │ │ replicas: 3 │
│ resources: │ │ resources: │
│ limits: │ │ limits: │
│ cpu: 100m │ │ cpu: 100m │
│ memory: 512Mi │ │ memory: 512Mi │
│ requests: │ │ requests: │
│ cpu: 100m │ │ cpu: 100m │
│ memory: 512Mi │ │ memory: 512Mi │
│ serviceVersion: 8.0.30 │ │ serviceVersion: 8.0.30 │
│ volumeClaimTemplates: │ │ volumeClaimTemplates: │
│ - name: data │ │ - name: data │
│ spec: │ │ spec: │
│ accessModes: │ │ accessModes: │
│ - ReadWriteOnce │ │ - ReadWriteOnce │
│ resources: │ │ resources: │
│ requests: │ │ requests: │
│ storage: 20Gi │ │ storage: 20Gi │
│ resources: │ │ terminationPolicy: WipeOut │
│ cpu: "0" │ │status: {} │
│ memory: "0" │ │ │
│ storage: │ └─────────────────────────────────────────────────────────────┘
│ size: "0" │
│ terminationPolicy: WipeOut │
│status: {} │
│ │
└──────────────────────────────────────────────────────────────┘
Cluster mysql-cluster will be converted to v1 with output as yaml.
Please type 'Yes/yes' to confirm your operation: yes
mysql-cluster-mysql
Cluster mysql-cluster has converted successfully, you can view the spec:
kubectl get clusters.apps.kubeblocks.io mysql-cluster -n default -oyaml
➜ ~ kubectl get clusters.apps.kubeblocks.io mysql-cluster -n default -oyaml
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
annotations:
kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1
kubeblocks.io/reconcile: "2025-04-28T07:45:52.459685376Z"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"mysql-cluster","namespace":"default"},"spec":{"componentSpecs":[{"componentDef":"apecloud-mysql","name":"mysql","replicas":3,"resources":{"limits":{"cpu":"100m","memory":"0.5Gi"},"requests":{"cpu":"100m","memory":"0.5Gi"}},"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]}],"terminationPolicy":"WipeOut"}}
creationTimestamp: "2025-04-28T07:23:40Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 3
name: mysql-cluster
namespace: default
resourceVersion: "64936"
uid: 6d18627b-12c3-4958-8607-2bbc8b67df34
spec:
componentSpecs:
- componentDef: apecloud-mysql-1.0.0-alpha.0
name: mysql
replicas: 3
resources:
limits:
cpu: 100m
memory: 512Mi
requests:
cpu: 100m
memory: 512Mi
serviceVersion: 8.0.30
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
terminationPolicy: WipeOut
status:
components:
mysql:
message:
InstanceSet/mysql-cluster-mysql: '["mysql-cluster-mysql-2"]'
phase: Failed
conditions:
- lastTransitionTime: "2025-04-28T08:07:35Z"
message: 'The operator has started the provisioning of Cluster: mysql-cluster'
observedGeneration: 3
reason: PreCheckSucceed
status: "True"
type: ProvisioningStarted
- lastTransitionTime: "2025-04-28T07:23:42Z"
message: Successfully applied for resources
observedGeneration: 3
reason: ApplyResourcesSucceed
status: "True"
type: ApplyResources
- lastTransitionTime: "2025-04-28T07:47:58Z"
message: all pods of components are ready, waiting for the probe detection successful
reason: AllReplicasReady
status: "True"
type: ReplicasReady
- lastTransitionTime: "2025-04-28T08:25:37Z"
message: 'cluster mysql-cluster is NOT ready, unavailable components: mysql'
reason: ComponentsNotReady
status: "False"
type: Ready
observedGeneration: 3
phase: Failed
see error
kubectl get cluster mysql-cluster
NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE
mysql-cluster WipeOut Updating 97m
➜ ~
➜ ~ kubectl get pod -l app.kubernetes.io/instance=mysql-cluster
NAME READY STATUS RESTARTS AGE
mysql-cluster-mysql-0 4/4 Running 0 75m
mysql-cluster-mysql-1 4/4 Running 0 74m
mysql-cluster-mysql-2 4/5 CrashLoopBackOff 12 (13s ago) 38m
➜ ~
➜ ~ kubectl logs mysql-cluster-mysql-2 --previous
Defaulted container "mysql" out of: mysql, vtablet, mysql-exporter, kbagent, config-manager, init-data (init), init-syncer (init), init-kbagent (init), kbagent-worker (init)
2025-04-28T09:01:23Z DEBUG Starting syncer {"version": "51ee9e6b4f322f4fbc757259b95dd835472208fd"}
2025-04-28T09:01:23Z INFO Initialize DB manager
2025-04-28T09:01:23Z INFO HTTPServer Starting HTTP Server
2025-04-28T09:01:23Z INFO HTTPServer API route path {"method": "GET", "path": ["/v1.0/datasync", "/v1.0/getrole"]}
2025-04-28T09:01:23Z INFO HTTPServer API route path {"method": "POST", "path": ["/v1.0/pause", "/v1.0/rebuild", "/v1.0/switchover", "/v1.0/leavemember", "/v1.0/resume", "/v1.0/start", "/v1.0/stop"]}
2025-04-28T09:01:23Z INFO HTTPServer http server {"listen address": "0.0.0.0", "port": 3601}
2025-04-28T09:01:23Z INFO HA HA starting
2025-04-28T09:01:23Z INFO pinger Waiting for dns resolution to be ready
2025-04-28T09:01:23Z INFO pinger dns resolution is ready {"dns": ""}
2025-04-28T09:01:23Z INFO DCS-K8S pod selector: app.kubernetes.io/instance=mysql-cluster,app.kubernetes.io/managed-by=kubeblocks,apps.kubeblocks.io/component-name=mysql
2025-04-28T09:01:23Z INFO DCS-K8S podlist: 3
2025-04-28T09:01:23Z DEBUG HA cluster info {"cluster": {"ClusterCompName":"mysql-cluster-mysql","Namespace":"default","Replicas":3,"HaConfig":{"DeleteMembers":{}},"Leader":{"DBState":{"OpTimestamp":1745830865,"Extra":{"Binlog_File":"mysql-bin.000001","Binlog_Pos":"","gtid_executed":"e27d72f3-2404-11f0-9bdb-00163e51acbc:1-1336","gtid_purged":"","hostname":"mysql-cluster-mysql-0","read_only":"0","server_uuid":"e27d72f3-2404-11f0-9bdb-00163e51acbc","super_read_only":"0"}},"Index":"97617","Name":"mysql-cluster-mysql-0","Holder":"mysql-cluster-mysql-0","Released":false,"AvailableTime":1745830875,"AcquireTime":1745826455,"RenewTime":1745830875,"TTL":15,"Resource":{"metadata":{"name":"mysql-cluster-mysql-leader","namespace":"default","uid":"c1cc531e-46f2-4916-bed0-89614f0be859","resourceVersion":"97617","creationTimestamp":"2025-04-28T07:47:35Z","labels":{"app.kubernetes.io/instance":"mysql-cluster","app.kubernetes.io/managed-by":"kubeblocks","apps.kubeblocks.io/component-name":"mysql"},"annotations":{"acquire-time":"1745826455","dbstate":"{\"OpTimestamp\":1745830865,\"Extra\":{\"Binlog_File\":\"mysql-bin.000001\",\"Binlog_Pos\":\"\",\"gtid_executed\":\"e27d72f3-2404-11f0-9bdb-00163e51acbc:1-1336\",\"gtid_purged\":\"\",\"hostname\":\"mysql-cluster-mysql-0\",\"read_only\":\"0\",\"server_uuid\":\"e27d72f3-2404-11f0-9bdb-00163e51acbc\",\"super_read_only\":\"0\"}}","extra":"","leader":"mysql-cluster-mysql-0","renew-time":"1745830875","ttl":"15"},"ownerReferences":[{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","name":"mysql-cluster","uid":"6d18627b-12c3-4958-8607-2bbc8b67df34"}],"managedFields":[{"manager":"lorry","operation":"Update","apiVersion":"v1","time":"2025-04-28T09:01:15Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:acquire-time":{},"f:dbstate":{},"f:extra":{},"f:leader":{},"f:renew-time":{},"f:ttl":{}},"f:labels":{".":{},"f:app.kubernetes.io/instance":{},"f:app.kubernetes.io/managed-by":{},"f:apps.kubeblocks.io/component-name":{}},"f:ownerReferences":{".":{},"k:{\"uid\":\"6d18627b-12c3-4958-8607-2bbc8b67df34\"}":{}}}}}]}}},"Members":[{"Index":"","Name":"mysql-cluster-mysql-0","Role":"leader","PodIP":"172.31.0.70","DBPort":"3306","SyncerPort":"3601","UID":"c807dfb0-911d-4aab-9dc4-a0b7a9710284","ComponentName":"mysql","UseIP":false},{"Index":"","Name":"mysql-cluster-mysql-1","Role":"follower","PodIP":"172.31.0.72","DBPort":"3306","SyncerPort":"3601","UID":"cb8ec26e-795d-412a-a386-d978be610033","ComponentName":"mysql","UseIP":false},{"Index":"","Name":"mysql-cluster-mysql-2","Role":"","PodIP":"172.31.0.39","DBPort":"3306","SyncerPort":"3601","UID":"144d3401-50da-4703-92d5-5ac8829723ba","ComponentName":"mysql","UseIP":false}],"Switchover":null,"Extra":null,"Resource":{"metadata":{"name":"mysql-cluster","namespace":"default","uid":"6d18627b-12c3-4958-8607-2bbc8b67df34","resourceVersion":"93329","generation":3,"creationTimestamp":"2025-04-28T07:23:40Z","annotations":{"kubeblocks.io/crd-api-version":"apps.kubeblocks.io/v1","kubeblocks.io/reconcile":"2025-04-28T07:45:52.459685376Z","kubectl.kubernetes.io/last-applied-configuration":"{\"apiVersion\":\"apps.kubeblocks.io/v1alpha1\",\"kind\":\"Cluster\",\"metadata\":{\"annotations\":{},\"name\":\"mysql-cluster\",\"namespace\":\"default\"},\"spec\":{\"componentSpecs\":[{\"componentDef\":\"apecloud-mysql\",\"name\":\"mysql\",\"replicas\":3,\"resources\":{\"limits\":{\"cpu\":\"100m\",\"memory\":\"0.5Gi\"},\"requests\":{\"cpu\":\"100m\",\"memory\":\"0.5Gi\"}},\"volumeClaimTemplates\":[{\"name\":\"data\",\"spec\":{\"accessModes\":[\"ReadWriteOnce\"],\"resources\":{\"requests\":{\"storage\":\"20Gi\"}},\"storageClassName\":null}}]}],\"terminationPolicy\":\"WipeOut\"}}\n"},"finalizers":["cluster.kubeblocks.io/finalizer"],"managedFields":[{"manager":"kubectl-client-side-apply","operation":"Update","apiVersion":"apps.kubeblocks.io/v1alpha1","time":"2025-04-28T07:23:40Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:kubectl.kubernetes.io/last-applied-configuration":{}}},"f:spec":{".":{},"f:terminationPolicy":{}}}},{"manager":"kubeblocks","operation":"Update","apiVersion":"apps.kubeblocks.io/v1alpha1","time":"2025-04-28T07:45:52Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeblocks.io/reconcile":{}},"f:finalizers":{".":{},"v:\"cluster.kubeblocks.io/finalizer\"":{}}}}},{"manager":"kubeblocks","operation":"Update","apiVersion":"apps.kubeblocks.io/v1alpha1","time":"2025-04-28T08:07:35Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{".":{},"f:components":{".":{},"f:mysql":{}}}},"subresource":"status"},{"manager":"kbcli","operation":"Update","apiVersion":"apps.kubeblocks.io/v1","time":"2025-04-28T08:22:20Z","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{"f:kubeblocks.io/crd-api-version":{}}},"f:spec":{"f:componentSpecs":{}}}},{"manager":"kubeblocks","operation":"Update","apiVersion":"apps.kubeblocks.io/v1","time":"2025-04-28T08:56:37Z","fieldsType":"FieldsV1","fieldsV1":{"f:status":{"f:components":{"f:mysql":{"f:message":{".":{},"f:InstanceSet/mysql-cluster-mysql":{}},"f:phase":{}}},"f:conditions":{},"f:observedGeneration":{},"f:phase":{}}},"subresource":"status"}]},"spec":{"terminationPolicy":"WipeOut","componentSpecs":[{"name":"mysql","componentDef":"apecloud-mysql-1.0.0-alpha.0","serviceVersion":"8.0.30","replicas":3,"resources":{"limits":{"cpu":"100m","memory":"512Mi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}}}}]}],"resources":{"cpu":"0","memory":"0"},"storage":{"size":"0"}},"status":{"observedGeneration":3,"phase":"Failed","components":{"mysql":{"phase":"Failed","message":{"InstanceSet/mysql-cluster-mysql":"[\"mysql-cluster-mysql-2\"]"}}},"conditions":[{"type":"ProvisioningStarted","status":"True","observedGeneration":3,"lastTransitionTime":"2025-04-28T08:07:35Z","reason":"PreCheckSucceed","message":"The operator has started the provisioning of Cluster: mysql-cluster"},{"type":"ApplyResources","status":"True","observedGeneration":3,"lastTransitionTime":"2025-04-28T07:23:42Z","reason":"ApplyResourcesSucceed","message":"Successfully applied for resources"},{"type":"ReplicasReady","status":"True","lastTransitionTime":"2025-04-28T07:47:58Z","reason":"AllReplicasReady","message":"all pods of components are ready, waiting for the probe detection successful"},{"type":"Ready","status":"False","lastTransitionTime":"2025-04-28T08:25:37Z","reason":"ComponentsNotReady","message":"cluster mysql-cluster is NOT ready, unavailable components: mysql"}]}}}}
2025-04-28T09:01:23Z INFO Hypervisor Starting Hypervisor
2025-04-28T09:01:23Z INFO Hypervisor Start DB Service {"command": "/usr/bin/bash -c cp /data/mysql/plugin/audit_log.so /usr/lib64/mysql/plugin/\n/scripts/setup.sh\n"}
2025-04-28T09:01:23Z INFO HA check if DB Service is running
2025-04-28T09:01:23Z INFO Hypervisor Starting watcher on dbService
2025-04-28T09:01:23Z INFO Hypervisor dbService exists {"state": "exit status 1"}
panic: dbService startup failed. Exiting...
goroutine 66 [running]:
github.com/apecloud/syncer/hypervisor.(*Watcher).Start(0xc0004bd840)
/src/hypervisor/watcher.go:60 +0x3e8
created by github.com/apecloud/syncer/hypervisor.NewHypervisor in goroutine 1
/src/hypervisor/hypevisor.go:58 +0x10c
➜ ~
- clickhouse DB::Exception: Effective user of the process (root) does not match the owner of the data (1001)
echo yes|kbcli cluster upgrade-to-v1 chouse-cluster
┌──────────────────────────────────────────────────────────────┐ ┌────────────────────────────────────────────────────────┐
│apiVersion: apps.kubeblocks.io/v1alpha1 │ │apiVersion: apps.kubeblocks.io/v1 │
│kind: Cluster │ │kind: Cluster │
│metadata: │ │metadata: │
│ annotations: │ │ annotations: │
│ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1│ │ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1│
│ creationTimestamp: "2025-04-28T07:23:40Z" │ │ creationTimestamp: "2025-04-28T07:23:40Z" │
│ finalizers: │ │ finalizers: │
│ - cluster.kubeblocks.io/finalizer │ │ - cluster.kubeblocks.io/finalizer │
│ generation: 2 │ │ generation: 2 │
│ labels: │ │ labels: │
│ clusterdefinition.kubeblocks.io/name: clickhouse │ │ clusterdefinition.kubeblocks.io/name: clickhouse │
│ clusterversion.kubeblocks.io/name: "" │ │ clusterversion.kubeblocks.io/name: "" │
│ name: chouse-cluster │ │ name: chouse-cluster │
│ namespace: default │ │ namespace: default │
│ resourceVersion: "36251" │ │ resourceVersion: "36251" │
│ uid: d36340e7-39b5-4069-ae4e-c82dd4f06d28 │ │ uid: d36340e7-39b5-4069-ae4e-c82dd4f06d28 │
│spec: │ │spec: │
│ clusterDefinitionRef: clickhouse │ │ clusterDef: clickhouse │
│ componentSpecs: │ │ componentSpecs: │
│ - componentDef: clickhouse-24 │ │ - componentDef: clickhouse-24-1.0.0-alpha.0 │
│ name: clickhouse │ │ name: clickhouse │
│ replicas: 3 │ │ replicas: 3 │
│ resources: │ │ resources: │
│ limits: │ │ limits: │
│ cpu: 200m │ │ cpu: 200m │
│ memory: 1Gi │ │ memory: 1Gi │
│ requests: │ │ requests: │
│ cpu: 200m │ │ cpu: 200m │
│ memory: 1Gi │ │ memory: 1Gi │
│ serviceVersion: 24.8.3 │ │ serviceVersion: 24.8.3 │
│ volumeClaimTemplates: │ │ volumeClaimTemplates: │
│ - name: data │ │ - name: data │
│ spec: │ │ spec: │
│ accessModes: │ │ accessModes: │
│ - ReadWriteOnce │ │ - ReadWriteOnce │
│ resources: │ │ resources: │
│ requests: │ │ requests: │
│ storage: 20Gi │ │ storage: 20Gi │
│ resources: │ │ terminationPolicy: WipeOut │
│ cpu: "0" │ │ topology: standalone │
│ memory: "0" │ │status: {} │
│ storage: │ │ │
│ size: "0" │ └────────────────────────────────────────────────────────┘
│ terminationPolicy: WipeOut │
│ topology: standalone │
│status: {} │
│ │
└──────────────────────────────────────────────────────────────┘
Cluster chouse-cluster will be converted to v1 with output as yaml.
Please type 'Yes/yes' to confirm your operation: yes
chouse-cluster-clickhouse
Cluster chouse-cluster has converted successfully, you can view the spec:
kubectl get clusters.apps.kubeblocks.io chouse-cluster -n default -oyaml
➜ ~
➜ ~ kubectl get clusters.apps.kubeblocks.io chouse-cluster -n default -oyaml
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
annotations:
kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"chouse-cluster","namespace":"default"},"spec":{"clusterDefinitionRef":"clickhouse","componentSpecs":[{"name":"clickhouse","replicas":3,"resources":{"limits":{"cpu":"200m","memory":"1Gi"},"requests":{"cpu":"200m","memory":"1Gi"}},"serviceVersion":"24.8.3","volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]}],"terminationPolicy":"WipeOut","topology":"standalone"}}
creationTimestamp: "2025-04-28T07:23:40Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 3
labels:
clusterdefinition.kubeblocks.io/name: clickhouse
clusterversion.kubeblocks.io/name: ""
name: chouse-cluster
namespace: default
resourceVersion: "92625"
uid: d36340e7-39b5-4069-ae4e-c82dd4f06d28
spec:
clusterDef: clickhouse
componentSpecs:
- componentDef: clickhouse-24-1.0.0-alpha.0
name: clickhouse
replicas: 3
resources:
limits:
cpu: 200m
memory: 1Gi
requests:
cpu: 200m
memory: 1Gi
serviceVersion: 24.8.3
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
terminationPolicy: WipeOut
topology: standalone
status:
components:
clickhouse:
message:
InstanceSet/chouse-cluster-clickhouse: '["chouse-cluster-clickhouse-2"]'
phase: Failed
conditions:
- lastTransitionTime: "2025-04-28T08:07:34Z"
message: 'The operator has started the provisioning of Cluster: chouse-cluster'
observedGeneration: 3
reason: PreCheckSucceed
status: "True"
type: ProvisioningStarted
- lastTransitionTime: "2025-04-28T07:23:45Z"
message: Successfully applied for resources
observedGeneration: 3
reason: ApplyResourcesSucceed
status: "True"
type: ApplyResources
- lastTransitionTime: "2025-04-28T07:26:12Z"
message: all pods of components are ready, waiting for the probe detection successful
reason: AllReplicasReady
status: "True"
type: ReplicasReady
- lastTransitionTime: "2025-04-28T08:19:19Z"
message: 'cluster chouse-cluster is NOT ready, unavailable components: clickhouse'
reason: ComponentsNotReady
status: "False"
type: Ready
observedGeneration: 3
phase: Failed
see error
➜ ~ kubectl get cluster chouse-cluster
NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE
chouse-cluster clickhouse WipeOut Failed 99m
➜ ~
➜ ~ kubectl get pod -l app.kubernetes.io/instance=chouse-cluster
NAME READY STATUS RESTARTS AGE
chouse-cluster-clickhouse-0 1/1 Running 0 99m
chouse-cluster-clickhouse-1 1/1 Running 0 99m
chouse-cluster-clickhouse-2 1/2 CrashLoopBackOff 13 (2m7s ago) 45m
➜ ~
➜ ~ kubectl logs chouse-cluster-clickhouse-2 --previous
Defaulted container "clickhouse" out of: clickhouse, kbagent, init-kbagent (init), kbagent-worker (init)
clickhouse 09:00:53.89 INFO ==>
clickhouse 09:00:53.89 INFO ==> Welcome to the Bitnami clickhouse container
clickhouse 09:00:53.89 INFO ==> Subscribe to project updates by watching https://github.com/bitnami/containers
clickhouse 09:00:53.89 INFO ==> Submit issues and feature requests at https://github.com/bitnami/containers/issues
clickhouse 09:00:53.89 INFO ==> Upgrade to Tanzu Application Catalog for production environments to access custom-configured and pre-packaged software components. Gain enhanced features, including Software Bill of Materials (SBOM), CVE scan result reports, and VEX documents. To learn more, visit https://bitnami.com/enterprise
clickhouse 09:00:53.89 INFO ==>
clickhouse 09:00:53.89 INFO ==> ** Starting ClickHouse setup **
clickhouse 09:00:53.93 INFO ==> Copying mounted configuration from /bitnami/clickhouse/etc
cp: -r not specified; omitting directory '/bitnami/clickhouse/etc/users.d/default/..data'
clickhouse 09:00:53.94 INFO ==> ** ClickHouse setup finished! **
clickhouse 09:00:54.03 INFO ==> ** Starting ClickHouse **
Processing configuration file '/opt/bitnami/clickhouse/etc/config.xml'.
Merging configuration file '/opt/bitnami/clickhouse/etc/conf.d/00_default_overrides.xml'.
Logging information to /bitnami/clickhouse/log/clickhouse-server.log
Logging errors to /bitnami/clickhouse/log/clickhouse-server.err.log
2025.04.28 09:00:54.136112 [ 1 ] {} <Information> Application: Will watch for the process with pid 57
2025.04.28 09:00:54.136153 [ 57 ] {} <Information> Application: Forked a child process to watch
2025.04.28 09:00:54.136363 [ 57 ] {} <Information> SentryWriter: Sending crash reports is disabled
2025.04.28 09:00:54.332982 [ 57 ] {} <Information> Application: Starting ClickHouse 24.8.3.59 (revision: 54491, git hash: e729b9fa40eb9cf7b9b95c683f6c10791ce4c498, build id: 05C41E4C0EDD48EB5EB9409F112481F54AC251D7), PID 57
2025.04.28 09:00:54.333125 [ 57 ] {} <Information> Application: starting up
2025.04.28 09:00:54.333146 [ 57 ] {} <Information> Application: OS name: Linux, version: 5.4.250-9-velinux1-amd64, architecture: x86_64
2025.04.28 09:00:54.333269 [ 57 ] {} <Information> Jemalloc: Value for background_thread set to true (from false)
2025.04.28 09:00:54.336670 [ 57 ] {} <Information> Application: Available RAM: 1.00 GiB; logical cores: 8; used cores: 1.
2025.04.28 09:00:54.336700 [ 57 ] {} <Information> Application: Available CPU instruction sets: SSE, SSE2, SSE3, SSSE3, SSE41, SSE42, F16C, POPCNT, BMI1, BMI2, PCLMUL, AES, AVX, FMA, AVX2, AVX512F, AVX512DQ, AVX512IFMA, AVX512CD, AVX512BW, AVX512VL, AVX512VBMI, AVX512VBMI2, SHA, ADX, RDRAND, RDSEED, RDTSCP, CLFLUSHOPT, CLWB, XSAVE, OSXSAVE
2025.04.28 09:00:54.337354 [ 57 ] {} <Information> Application: Shutting down storages.
2025.04.28 09:00:54.337444 [ 57 ] {} <Information> Application: Waiting for background threads
2025.04.28 09:00:54.337509 [ 57 ] {} <Information> Application: Background threads finished in 0 ms
2025.04.28 09:00:54.337730 [ 57 ] {} <Error> Application: Code: 430. DB::Exception: Effective user of the process (root) does not match the owner of the data (1001). Run under 'sudo -u 1001'. (MISMATCHING_USERS_FOR_PROCESS_AND_DATA), Stack trace (when copying this message, always include the lines below):
0. DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x000000000dafea5b
1. DB::Exception::Exception(PreformattedMessage&&, int) @ 0x0000000007e58dec
2. DB::Exception::Exception<String&>(int, FormatStringHelperImpl<std::type_identity<String&>::type>, String&) @ 0x0000000007e82b2b
3. DB::assertProcessUserMatchesDataOwner(String const&, std::function<void (String const&)>) @ 0x000000000dbcf03c
4. DB::Server::main(std::vector<String, std::allocator<String>> const&) @ 0x000000000dcb33e8
5. Poco::Util::Application::run() @ 0x0000000015de3226
6. DB::Server::run() @ 0x000000000dcaeab0
7. Poco::Util::ServerApplication::run(int, char**) @ 0x0000000015dec067
8. mainEntryClickHouseServer(int, char**) @ 0x000000000dcabd06
9. main @ 0x0000000007e554ee
10. ? @ 0x00007fa64ee0c24a
11. ? @ 0x00007fa64ee0c305
12. _start @ 0x000000000628902e
(version 24.8.3.59 (official build))
2025.04.28 09:00:54.337755 [ 57 ] {} <Information> Application: shutting down
2025.04.28 09:00:54.337850 [ 58 ] {} <Information> BaseDaemon: Stop SignalListener thread
2025.04.28 09:00:54.343026 [ 1 ] {} <Information> Application: Child process exited normally with code 174.
- rabbitmq Connection rejected. Invalid challenge reply. **
echo yes|kbcli cluster upgrade-to-v1 rabbitmq-cluster
┌──────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│apiVersion: apps.kubeblocks.io/v1alpha1 │ │apiVersion: apps.kubeblocks.io/v1 │
│kind: Cluster │ │kind: Cluster │
│metadata: │ │metadata: │
│ annotations: │ │ annotations: │
│ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1│ │ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1 │
│ kubeblocks.io/reconcile: "2025-04-28T07:40:59.302848658Z" │ │ kubeblocks.io/reconcile: "2025-04-28T07:40:59.302848658Z"│
│ creationTimestamp: "2025-04-28T07:23:41Z" │ │ creationTimestamp: "2025-04-28T07:23:41Z" │
│ finalizers: │ │ finalizers: │
│ - cluster.kubeblocks.io/finalizer │ │ - cluster.kubeblocks.io/finalizer │
│ generation: 2 │ │ generation: 2 │
│ name: rabbitmq-cluster │ │ name: rabbitmq-cluster │
│ namespace: default │ │ namespace: default │
│ resourceVersion: "36334" │ │ resourceVersion: "36334" │
│ uid: 16a24dc5-6c2e-4bc2-8a7e-27f9ff059e60 │ │ uid: 16a24dc5-6c2e-4bc2-8a7e-27f9ff059e60 │
│spec: │ │spec: │
│ componentSpecs: │ │ componentSpecs: │
│ - componentDef: rabbitmq │ │ - componentDef: rabbitmq-1.0.0-alpha.0 │
│ name: rabbitmq │ │ name: rabbitmq │
│ replicas: 3 │ │ replicas: 3 │
│ resources: │ │ resources: │
│ limits: │ │ limits: │
│ cpu: 500m │ │ cpu: 500m │
│ memory: 512Mi │ │ memory: 512Mi │
│ requests: │ │ requests: │
│ cpu: 500m │ │ cpu: 500m │
│ memory: 512Mi │ │ memory: 512Mi │
│ serviceAccountName: kb-rabbitmq-cluster │ │ serviceVersion: 3.13.7 │
│ serviceVersion: 3.13.7 │ │ volumeClaimTemplates: │
│ volumeClaimTemplates: │ │ - name: data │
│ - name: data │ │ spec: │
│ spec: │ │ accessModes: │
│ accessModes: │ │ - ReadWriteOnce │
│ - ReadWriteOnce │ │ resources: │
│ resources: │ │ requests: │
│ requests: │ │ storage: 20Gi │
│ storage: 20Gi │ │ terminationPolicy: WipeOut │
│ resources: │ │status: {} │
│ cpu: "0" │ │ │
│ memory: "0" │ └─────────────────────────────────────────────────────────────┘
│ storage: │
│ size: "0" │
│ terminationPolicy: WipeOut │
│status: {} │
│ │
└──────────────────────────────────────────────────────────────┘
Cluster rabbitmq-cluster will be converted to v1 with output as yaml.
Please type 'Yes/yes' to confirm your operation: yes
rabbitmq-cluster-rabbitmq
Cluster rabbitmq-cluster has converted successfully, you can view the spec:
kubectl get clusters.apps.kubeblocks.io rabbitmq-cluster -n default -oyaml
➜ ~
➜ ~ kubectl get clusters.apps.kubeblocks.io rabbitmq-cluster -n default -oyaml
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
annotations:
kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1
kubeblocks.io/reconcile: "2025-04-28T07:40:59.302848658Z"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"rabbitmq-cluster","namespace":"default"},"spec":{"componentSpecs":[{"componentDef":"rabbitmq","name":"rabbitmq","replicas":3,"resources":{"limits":{"cpu":"500m","memory":"0.5Gi"},"requests":{"cpu":"500m","memory":"0.5Gi"}},"serviceAccountName":"kb-rabbitmq-cluster","serviceVersion":null,"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]}],"terminationPolicy":"WipeOut"}}
creationTimestamp: "2025-04-28T07:23:41Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 3
name: rabbitmq-cluster
namespace: default
resourceVersion: "60050"
uid: 16a24dc5-6c2e-4bc2-8a7e-27f9ff059e60
spec:
componentSpecs:
- componentDef: rabbitmq-1.0.0-alpha.0
name: rabbitmq
replicas: 3
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 500m
memory: 512Mi
serviceVersion: 3.13.7
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
terminationPolicy: WipeOut
status:
components:
rabbitmq:
message:
InstanceSet/rabbitmq-cluster-rabbitmq: '["rabbitmq-cluster-rabbitmq-1"]'
phase: Updating
conditions:
- lastTransitionTime: "2025-04-28T08:07:36Z"
message: 'The operator has started the provisioning of Cluster: rabbitmq-cluster'
observedGeneration: 3
reason: PreCheckSucceed
status: "True"
type: ProvisioningStarted
- lastTransitionTime: "2025-04-28T07:23:48Z"
message: Successfully applied for resources
observedGeneration: 3
reason: ApplyResourcesSucceed
status: "True"
type: ApplyResources
- lastTransitionTime: "2025-04-28T07:36:14Z"
message: all pods of components are ready, waiting for the probe detection successful
reason: AllReplicasReady
status: "True"
type: ReplicasReady
- lastTransitionTime: "2025-04-28T07:36:14Z"
message: 'Cluster: rabbitmq-cluster is ready, current phase is Running'
reason: ClusterReady
status: "True"
type: Ready
observedGeneration: 3
phase: Updating
see error
➜ ~ kubectl get cluster rabbitmq-cluster
NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE
rabbitmq-cluster WipeOut Updating 102m
➜ ~
➜ ~ kubectl get pod -l app.kubernetes.io/instance=rabbitmq-cluster
NAME READY STATUS RESTARTS AGE
rabbitmq-cluster-rabbitmq-0 2/2 Running 0 102m
rabbitmq-cluster-rabbitmq-1 2/2 Running 0 98m
rabbitmq-cluster-rabbitmq-2 1/2 Running 8 (2m39s ago) 42m
➜ ~ kubectl logs rabbitmq-cluster-rabbitmq-2 --previous
Defaulted container "rabbitmq" out of: rabbitmq, kbagent, init-kbagent (init), kbagent-worker (init)
2025-04-28 08:58:17.087091+00:00 [error] <0.235.0> ** Connection attempt from node 'rabbit@rabbitmq-cluster-rabbitmq-1.rabbitmq-cluster-rabbitmq-headless.default' rejected. Invalid challenge reply. **
2025-04-28 08:58:17.087091+00:00 [error] <0.235.0>
2025-04-28 08:58:17.087050+00:00 [error] <0.233.0> ** Connection attempt from node 'rabbit@rabbitmq-cluster-rabbitmq-0.rabbitmq-cluster-rabbitmq-headless.default' rejected. Invalid challenge reply. **
2025-04-28 08:58:17.087050+00:00 [error] <0.233.0>
2025-04-28 08:58:18.098106+00:00 [error] <0.260.0> ** Connection attempt from node 'rabbit@rabbitmq-cluster-rabbitmq-1.rabbitmq-cluster-rabbitmq-headless.default' rejected. Invalid challenge reply. **
2025-04-28 08:58:18.098106+00:00 [error] <0.260.0>
2025-04-28 08:58:18.098433+00:00 [error] <0.262.0> ** Connection attempt from node 'rabbit@rabbitmq-cluster-rabbitmq-0.rabbitmq-cluster-rabbitmq-headless.default' rejected. Invalid challenge reply. **
2025-04-28 08:58:18.098433+00:00 [error] <0.262.0>
2025-04-28 08:58:18.681425+00:00 [notice] <0.44.0> Application syslog exited with reason: stopped
2025-04-28 08:58:18.681488+00:00 [notice] <0.258.0> Logging: switching to configured handler(s); following messages may not be visible in this log output
## ## RabbitMQ 3.13.7
## ##
########## Copyright (c) 2007-2024 Broadcom Inc and/or its subsidiaries
###### ##
########## Licensed under the MPL 2.0. Website: https://rabbitmq.com
Erlang: 26.2.5.5 [jit]
TLS Library: OpenSSL - OpenSSL 3.1.7 3 Sep 2024
Release series support status: see https://www.rabbitmq.com/release-information
Doc guides: https://www.rabbitmq.com/docs
Support: https://www.rabbitmq.com/docs/contact
Tutorials: https://www.rabbitmq.com/tutorials
Monitoring: https://www.rabbitmq.com/docs/monitoring
Upgrading: https://www.rabbitmq.com/docs/upgrade
Logs: /var/lib/rabbitmq/log/rabbit.log
<stdout>
Config file(s): /etc/rabbitmq/conf.d/10-defaults.conf
/etc/rabbitmq/conf.d/12-kubeblocks.conf
Starting broker...%
Expected behavior A clear and concise description of what you expected to happen.
Screenshots If applicable, add screenshots to help explain your problem.
Desktop (please complete the following information):
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
Additional context Add any other context about the problem here.
- tidb pull docker.io/msoap/shell2http:1.16.0 timeout
echo yes|kbcli cluster upgrade-to-v1 tidb-cluster
┌──────────────────────────────────────────────────────────────┐ ┌───────────────────────────────────────────────────────────┐
│apiVersion: apps.kubeblocks.io/v1alpha1 │ │apiVersion: apps.kubeblocks.io/v1 │
│kind: Cluster │ │kind: Cluster │
│metadata: │ │metadata: │
│ annotations: │ │ annotations: │
│ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1│ │ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1 │
│ kubeblocks.io/reconcile: "2025-04-28T07:40:59.9203666Z" │ │ kubeblocks.io/reconcile: "2025-04-28T07:40:59.9203666Z"│
│ creationTimestamp: "2025-04-28T07:23:41Z" │ │ creationTimestamp: "2025-04-28T07:23:41Z" │
│ finalizers: │ │ finalizers: │
│ - cluster.kubeblocks.io/finalizer │ │ - cluster.kubeblocks.io/finalizer │
│ generation: 2 │ │ generation: 2 │
│ labels: │ │ labels: │
│ clusterdefinition.kubeblocks.io/name: tidb │ │ clusterdefinition.kubeblocks.io/name: tidb │
│ clusterversion.kubeblocks.io/name: "" │ │ clusterversion.kubeblocks.io/name: "" │
│ name: tidb-cluster │ │ name: tidb-cluster │
│ namespace: default │ │ namespace: default │
│ resourceVersion: "36318" │ │ resourceVersion: "36318" │
│ uid: 8a860673-4998-4f87-9f48-5f84100ee693 │ │ uid: 8a860673-4998-4f87-9f48-5f84100ee693 │
│spec: │ │spec: │
│ clusterDefinitionRef: tidb │ │ clusterDef: tidb │
│ componentSpecs: │ │ componentSpecs: │
│ - componentDef: tidb-pd-8 │ │ - componentDef: tidb-pd-7-1.0.0-alpha.0 │
│ disableExporter: false │ │ disableExporter: false │
│ name: tidb-pd │ │ name: tidb-pd │
│ replicas: 1 │ │ replicas: 1 │
│ resources: │ │ resources: │
│ limits: │ │ limits: │
│ cpu: 500m │ │ cpu: 500m │
│ memory: 2Gi │ │ memory: 2Gi │
│ requests: │ │ requests: │
│ cpu: 500m │ │ cpu: 500m │
│ memory: 2Gi │ │ memory: 2Gi │
│ serviceAccountName: kb-tidb-cluster │ │ serviceVersion: 8.4.0 │
│ serviceVersion: 8.4.0 │ │ volumeClaimTemplates: │
│ volumeClaimTemplates: │ │ - name: data │
│ - name: data │ │ spec: │
│ spec: │ │ accessModes: │
│ accessModes: │ │ - ReadWriteOnce │
│ - ReadWriteOnce │ │ resources: │
│ resources: │ │ requests: │
│ requests: │ │ storage: 20Gi │
│ storage: 20Gi │ │ - componentDef: tikv-7-1.0.0-alpha.0 │
│ - componentDef: tikv-8 │ │ disableExporter: false │
│ disableExporter: false │ │ name: tikv │
│ name: tikv │ │ replicas: 1 │
│ replicas: 1 │ │ resources: │
│ resources: │ │ limits: │
│ limits: │ │ cpu: 500m │
│ cpu: 500m │ │ memory: 2Gi │
│ memory: 2Gi │ │ requests: │
│ requests: │ │ cpu: 500m │
│ cpu: 500m │ │ memory: 2Gi │
│ memory: 2Gi │ │ serviceVersion: 8.4.0 │
│ serviceAccountName: kb-tidb-cluster │ │ volumeClaimTemplates: │
│ serviceVersion: 8.4.0 │ │ - name: data │
│ volumeClaimTemplates: │ │ spec: │
│ - name: data │ │ accessModes: │
│ spec: │ │ - ReadWriteOnce │
│ accessModes: │ │ resources: │
│ - ReadWriteOnce │ │ requests: │
│ resources: │ │ storage: 20Gi │
│ requests: │ │ - componentDef: tidb-pd-7-1.0.0-alpha.0 │
│ storage: 20Gi │ │ disableExporter: false │
│ - componentDef: tidb-8 │ │ name: tidb │
│ disableExporter: false │ │ replicas: 1 │
│ name: tidb │ │ resources: │
│ replicas: 1 │ │ limits: │
│ resources: │ │ cpu: 500m │
│ limits: │ │ memory: 2Gi │
│ cpu: 500m │ │ requests: │
│ memory: 2Gi │ │ cpu: 500m │
│ requests: │ │ memory: 2Gi │
│ cpu: 500m │ │ serviceVersion: 8.4.0 │
│ memory: 2Gi │ │ terminationPolicy: WipeOut │
│ serviceAccountName: kb-tidb-cluster │ │ topology: cluster │
│ serviceVersion: 8.4.0 │ │status: {} │
│ resources: │ │ │
│ cpu: "0" │ └───────────────────────────────────────────────────────────┘
│ memory: "0" │
│ storage: │
│ size: "0" │
│ terminationPolicy: WipeOut │
│ topology: cluster │
│status: {} │
│ │
└──────────────────────────────────────────────────────────────┘
Cluster tidb-cluster will be converted to v1 with output as yaml.
Please type 'Yes/yes' to confirm your operation: yes
tidb-cluster-tidb
tidb-cluster-tidb-pd
tidb-cluster-tikv
Cluster tidb-cluster has converted successfully, you can view the spec:
➜ ~
➜ ~ kubectl get clusters.apps.kubeblocks.io tidb-cluster -n default -oyaml
apiVersion: apps.kubeblocks.io/v1
kind: Cluster
metadata:
annotations:
kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1
kubeblocks.io/reconcile: "2025-04-28T07:40:59.9203666Z"
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"apps.kubeblocks.io/v1alpha1","kind":"Cluster","metadata":{"annotations":{},"name":"tidb-cluster","namespace":"default"},"spec":{"clusterDefinitionRef":"tidb","componentSpecs":[{"disableExporter":false,"name":"tidb","replicas":1,"resources":{"limits":{"cpu":"500m","memory":"2Gi"},"requests":{"cpu":"500m","memory":"2Gi"}},"serviceAccountName":"kb-tidb-cluster","serviceVersion":null},{"disableExporter":false,"name":"tikv","replicas":1,"resources":{"limits":{"cpu":"500m","memory":"2Gi"},"requests":{"cpu":"500m","memory":"2Gi"}},"serviceAccountName":"kb-tidb-cluster","serviceVersion":null,"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]},{"disableExporter":false,"name":"tidb-pd","replicas":1,"resources":{"limits":{"cpu":"500m","memory":"2Gi"},"requests":{"cpu":"500m","memory":"2Gi"}},"serviceAccountName":"kb-tidb-cluster","serviceVersion":null,"volumeClaimTemplates":[{"name":"data","spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"20Gi"}},"storageClassName":null}}]}],"terminationPolicy":"WipeOut","topology":"cluster"}}
creationTimestamp: "2025-04-28T07:23:41Z"
finalizers:
- cluster.kubeblocks.io/finalizer
generation: 3
labels:
clusterdefinition.kubeblocks.io/name: tidb
clusterversion.kubeblocks.io/name: ""
name: tidb-cluster
namespace: default
resourceVersion: "62486"
uid: 8a860673-4998-4f87-9f48-5f84100ee693
spec:
clusterDef: tidb
componentSpecs:
- componentDef: tidb-pd-7-1.0.0-alpha.0
disableExporter: false
name: tidb-pd
replicas: 1
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
serviceVersion: 8.4.0
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
- componentDef: tikv-7-1.0.0-alpha.0
disableExporter: false
name: tikv
replicas: 1
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
serviceVersion: 8.4.0
volumeClaimTemplates:
- name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
- componentDef: tidb-pd-7-1.0.0-alpha.0
disableExporter: false
name: tidb
replicas: 1
resources:
limits:
cpu: 500m
memory: 2Gi
requests:
cpu: 500m
memory: 2Gi
serviceVersion: 8.4.0
terminationPolicy: WipeOut
topology: cluster
status:
components:
tidb:
message:
InstanceSet/tidb-cluster-tidb: Role probe timeout, check whether the application
is available
phase: Failed
tidb-pd:
message:
InstanceSet/tidb-cluster-tidb-pd: '["tidb-cluster-tidb-pd-0"]'
phase: Failed
tikv:
phase: Running
conditions:
- lastTransitionTime: "2025-04-28T08:07:36Z"
message: 'The operator has started the provisioning of Cluster: tidb-cluster'
observedGeneration: 3
reason: PreCheckSucceed
status: "True"
type: ProvisioningStarted
- lastTransitionTime: "2025-04-28T07:23:59Z"
message: Successfully applied for resources
observedGeneration: 3
reason: ApplyResourcesSucceed
status: "True"
type: ApplyResources
- lastTransitionTime: "2025-04-28T07:35:26Z"
message: all pods of components are ready, waiting for the probe detection successful
reason: AllReplicasReady
status: "True"
type: ReplicasReady
- lastTransitionTime: "2025-04-28T08:23:37Z"
message: 'cluster tidb-cluster is NOT ready, unavailable components: tidb,tidb-pd'
reason: ComponentsNotReady
status: "False"
type: Ready
observedGeneration: 3
phase: Abnormal
kubectl get cluster tidb-cluster
NAME CLUSTER-DEFINITION TERMINATION-POLICY STATUS AGE
tidb-cluster tidb WipeOut Abnormal 106m
➜ ~
➜ ~ kubectl get pod -l app.kubernetes.io/instance=tidb-cluster
NAME READY STATUS RESTARTS AGE
tidb-cluster-tidb-0 2/2 Running 0 95m
tidb-cluster-tidb-pd-0 0/4 Init:ImagePullBackOff 0 46m
tidb-cluster-tikv-0 2/2 Running 0 102m
➜ ~
➜ ~ kubectl describe pod tidb-cluster-tidb-pd-0
Name: tidb-cluster-tidb-pd-0
Namespace: default
Priority: 0
Service Account: kb-tidb-cluster
Node: 172.31.0.7/172.31.0.7
Start Time: Mon, 28 Apr 2025 16:23:31 +0800
Labels: app.kubernetes.io/component=tidb-pd-8
app.kubernetes.io/instance=tidb-cluster
app.kubernetes.io/managed-by=kubeblocks
app.kubernetes.io/name=tidb-pd-8
app.kubernetes.io/version=tidb-pd-8
apps.kubeblocks.io/cluster-uid=8a860673-4998-4f87-9f48-5f84100ee693
apps.kubeblocks.io/component-name=tidb-pd
apps.kubeblocks.io/pod-name=tidb-cluster-tidb-pd-0
clusterdefinition.kubeblocks.io/name=tidb
clusterversion.kubeblocks.io/name=
componentdefinition.kubeblocks.io/name=tidb-pd-8
controller-revision-hash=575dd788dd
workloads.kubeblocks.io/instance=tidb-cluster-tidb-pd
workloads.kubeblocks.io/managed-by=InstanceSet
Annotations: apps.kubeblocks.io/component-replicas: 1
vke.volcengine.com/cello-pod-evict-policy: allow
Status: Pending
IP: 172.31.0.40
IPs:
IP: 172.31.0.40
Controlled By: InstanceSet/tidb-cluster-tidb-pd
Init Containers:
init-lorry:
Container ID: containerd://523c9d1ede2b45ce2869b5d555a6027d5be586ec3002a652f1a49c5e6029cdb5
Image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:0.9.4-beta.20
Image ID: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools@sha256:055ae3e10531fda259e57cc87c3ad97c8ea1539e118341f16d9557435187a8a0
Port: <none>
Host Port: <none>
Command:
cp
-r
/bin/lorry
/config
/bin/curl
/kubeblocks/
State: Terminated
Reason: Completed
Exit Code: 0
Started: Mon, 28 Apr 2025 16:23:36 +0800
Finished: Mon, 28 Apr 2025 16:23:36 +0800
Ready: True
Restart Count: 0
Limits:
cpu: 0
memory: 0
Requests:
cpu: 0
memory: 0
Environment Variables from:
tidb-cluster-tidb-pd-env ConfigMap Optional: false
Environment:
KB_POD_NAME: tidb-cluster-tidb-pd-0 (v1:metadata.name)
KB_POD_UID: (v1:metadata.uid)
KB_NAMESPACE: default (v1:metadata.namespace)
KB_SA_NAME: (v1:spec.serviceAccountName)
KB_NODENAME: (v1:spec.nodeName)
KB_HOST_IP: (v1:status.hostIP)
KB_POD_IP: (v1:status.podIP)
KB_POD_IPS: (v1:status.podIPs)
KB_HOSTIP: (v1:status.hostIP)
KB_PODIP: (v1:status.podIP)
KB_PODIPS: (v1:status.podIPs)
KB_POD_FQDN: $(KB_POD_NAME).tidb-cluster-tidb-pd-headless.$(KB_NAMESPACE).svc
Mounts:
/kubeblocks from kubeblocks (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
init-pd-ctl:
Container ID: containerd://162602f2ac5b08236e7401f7cd5324c894de4ad27bd6b212eae554119fd3c51c
Image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/pd:v7.1.5
Image ID: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/pd@sha256:c70a110b7e10d4e055724cfda5006432bae93a03d5d71559ce9b4ea1e13e8e58
Port: <none>
Host Port: <none>
Command:
cp
/pd-ctl
/kb-tools/pd-ctl
State: Terminated
Reason: Completed
Exit Code: 0
Started: Mon, 28 Apr 2025 16:23:37 +0800
Finished: Mon, 28 Apr 2025 16:23:37 +0800
Ready: True
Restart Count: 0
Limits:
cpu: 0
memory: 0
Requests:
cpu: 0
memory: 0
Environment Variables from:
tidb-cluster-tidb-pd-env ConfigMap Optional: false
Environment:
KB_POD_NAME: tidb-cluster-tidb-pd-0 (v1:metadata.name)
KB_POD_UID: (v1:metadata.uid)
KB_NAMESPACE: default (v1:metadata.namespace)
KB_SA_NAME: (v1:spec.serviceAccountName)
KB_NODENAME: (v1:spec.nodeName)
KB_HOST_IP: (v1:status.hostIP)
KB_POD_IP: (v1:status.podIP)
KB_POD_IPS: (v1:status.podIPs)
KB_HOSTIP: (v1:status.hostIP)
KB_PODIP: (v1:status.podIP)
KB_PODIPS: (v1:status.podIPs)
KB_POD_FQDN: $(KB_POD_NAME).tidb-cluster-tidb-pd-headless.$(KB_NAMESPACE).svc
TOOLS_SCRIPTS_PATH: /opt/kb-tools/reload/pd-configuration
Mounts:
/etc/pd from config (rw)
/kb-tools from kb-tools (rw)
/opt/config-manager from config-manager-config (rw)
/opt/kb-tools/reload/pd-configuration from cm-script-pd-configuration (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
role-agent-installer:
Container ID:
Image: msoap/shell2http:1.16.0
Image ID:
Port: <none>
Host Port: <none>
Command:
cp
/app/shell2http
/role-probe/agent
State: Waiting
Reason: ImagePullBackOff
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/role-probe from role-agent (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
Containers:
pd:
Container ID:
Image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/pd:v8.4.0
Image ID:
Ports: 2379/TCP, 2380/TCP
Host Ports: 0/TCP, 0/TCP
Command:
/scripts/pd_start.sh
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Limits:
cpu: 500m
memory: 2Gi
vke.volcengine.com/eni-ip: 1
Requests:
cpu: 500m
memory: 2Gi
vke.volcengine.com/eni-ip: 1
Environment Variables from:
tidb-cluster-tidb-pd-env ConfigMap Optional: false
tidb-cluster-tidb-pd-rsm-env ConfigMap Optional: false
Environment:
KB_POD_NAME: tidb-cluster-tidb-pd-0 (v1:metadata.name)
KB_POD_UID: (v1:metadata.uid)
KB_NAMESPACE: default (v1:metadata.namespace)
KB_SA_NAME: (v1:spec.serviceAccountName)
KB_NODENAME: (v1:spec.nodeName)
KB_HOST_IP: (v1:status.hostIP)
KB_POD_IP: (v1:status.podIP)
KB_POD_IPS: (v1:status.podIPs)
KB_HOSTIP: (v1:status.hostIP)
KB_PODIP: (v1:status.podIP)
KB_PODIPS: (v1:status.podIPs)
KB_POD_FQDN: $(KB_POD_NAME).tidb-cluster-tidb-pd-headless.$(KB_NAMESPACE).svc
SERVICE_PORT: 2379
Mounts:
/etc/pd from config (rw)
/kb-tools from kb-tools (rw)
/scripts from scripts (rw)
/var/lib/pd from data (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
lorry:
Container ID:
Image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/pd:v7.1.5
Image ID:
Ports: 3501/TCP, 50001/TCP
Host Ports: 0/TCP, 0/TCP
Command:
/kubeblocks/lorry
--port
3501
--grpcport
50001
--config-path
/kubeblocks/config/lorry/components/
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Limits:
cpu: 0
memory: 0
Requests:
cpu: 0
memory: 0
Readiness: http-get http://:3501/v1.0/checkrole delay=0s timeout=1s period=10s #success=1 #failure=3
Startup: tcp-socket :3501 delay=0s timeout=1s period=10s #success=1 #failure=3
Environment Variables from:
tidb-cluster-tidb-pd-env ConfigMap Optional: false
tidb-cluster-tidb-pd-rsm-env ConfigMap Optional: false
Environment:
KB_POD_NAME: tidb-cluster-tidb-pd-0 (v1:metadata.name)
KB_POD_UID: (v1:metadata.uid)
KB_NAMESPACE: default (v1:metadata.namespace)
KB_SA_NAME: (v1:spec.serviceAccountName)
KB_NODENAME: (v1:spec.nodeName)
KB_HOST_IP: (v1:status.hostIP)
KB_POD_IP: (v1:status.podIP)
KB_POD_IPS: (v1:status.podIPs)
KB_HOSTIP: (v1:status.hostIP)
KB_PODIP: (v1:status.podIP)
KB_PODIPS: (v1:status.podIPs)
KB_POD_FQDN: $(KB_POD_NAME).tidb-cluster-tidb-pd-headless.$(KB_NAMESPACE).svc
KB_RSM_ROLE_PROBE_PERIOD: 0
KB_BUILTIN_HANDLER: custom
KB_SERVICE_PORT: 2379
KB_DATA_PATH: /var/lib/pd
KB_ACTION_COMMANDS: {"memberLeave":["bash","-c","ADDRESS=${KB_MEMBER_ADDRESSES%%,*}\necho $KB_LEAVE_MEMBER_POD_NAME\necho $ADDRESS\n/pd-ctl -u $ADDRESS member delete name $KB_LEAVE_MEMBER_POD_NAME\n"],"roleProbe":["bash","-c","# FIXME: this will fail, no idea why\n# LEADER_NAME=$(/pd-ctl member | jq -r '.leader.name')\nMEMBER=$(/pd-ctl member)\nLEADER_NAME=$(echo $MEMBER | jq -r .leader.name)\nif [ \"$LEADER_NAME\" == \"$HOSTNAME\" ]; then\n echo -n \"leader\"\nelse\n echo -n \"follower\"\nfi\n"]}
SERVICE_PORT: 2379
KB_RSM_ACTION_SVC_LIST: [36501]
KB_RSM_ROLE_UPDATE_MECHANISM: DirectAPIServerEventUpdate
KB_RSM_ROLE_PROBE_TIMEOUT: 1
KB_CLUSTER_NAME: (v1:metadata.labels['app.kubernetes.io/instance'])
KB_COMP_NAME: (v1:metadata.labels['apps.kubeblocks.io/component-name'])
KB_SERVICE_CHARACTER_TYPE: custom
Mounts:
/etc/pd from config (rw)
/kb-tools from kb-tools (rw)
/kubeblocks from kubeblocks (rw)
/scripts from scripts (rw)
/var/lib/pd from data (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
config-manager:
Container ID:
Image: apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:0.9.4-beta.20
Image ID:
Port: 9901/TCP
Host Port: 0/TCP
Command:
env
Args:
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$(TOOLS_PATH)
/bin/reloader
--log-level
info
--operator-update-enable
--tcp
9901
--config
/opt/config-manager/config-manager.yaml
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Limits:
cpu: 0
memory: 0
Requests:
cpu: 0
memory: 0
Environment Variables from:
tidb-cluster-tidb-pd-env ConfigMap Optional: false
tidb-cluster-tidb-pd-rsm-env ConfigMap Optional: false
Environment:
KB_POD_NAME: tidb-cluster-tidb-pd-0 (v1:metadata.name)
KB_POD_UID: (v1:metadata.uid)
KB_NAMESPACE: default (v1:metadata.namespace)
KB_SA_NAME: (v1:spec.serviceAccountName)
KB_NODENAME: (v1:spec.nodeName)
KB_HOST_IP: (v1:status.hostIP)
KB_POD_IP: (v1:status.podIP)
KB_POD_IPS: (v1:status.podIPs)
KB_HOSTIP: (v1:status.hostIP)
KB_PODIP: (v1:status.podIP)
KB_PODIPS: (v1:status.podIPs)
KB_POD_FQDN: $(KB_POD_NAME).tidb-cluster-tidb-pd-headless.$(KB_NAMESPACE).svc
CONFIG_MANAGER_POD_IP: (v1:status.podIP)
TOOLS_PATH: /opt/kb-tools/reload/pd-configuration:/opt/config-manager:/kb-tools
Mounts:
/etc/pd from config (rw)
/kb-tools from kb-tools (rw)
/opt/config-manager from config-manager-config (rw)
/opt/kb-tools/reload/pd-configuration from cm-script-pd-configuration (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
action-0:
Container ID:
Image: busybox:1.35
Image ID:
Port: <none>
Host Port: <none>
Command:
/role-probe/agent
-port
36501
-export-all-vars
-form
/role
bash -c # FIXME: this will fail, no idea why
# LEADER_NAME=$(/pd-ctl member | jq -r '.leader.name')
MEMBER=$(/pd-ctl member)
LEADER_NAME=$(echo $MEMBER | jq -r .leader.name)
if [ "$LEADER_NAME" == "$HOSTNAME" ]; then
echo -n "leader"
else
echo -n "follower"
fi
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Environment: <none>
Mounts:
/role-probe from role-agent (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-4xhwp (ro)
Conditions:
Type Status
PodReadyToStartContainers True
Initialized False
Ready False
ContainersReady False
PodScheduled True
Volumes:
config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: tidb-cluster-tidb-pd-pd-configuration
Optional: false
scripts:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: tidb-cluster-tidb-pd-tidb-scripts
Optional: false
cm-script-pd-configuration:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: sidecar-tidb-scripts-tidb-cluster
Optional: false
config-manager-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: sidecar-tidb-cluster-tidb-pd-config-manager-config
Optional: false
kb-tools:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
SizeLimit: <unset>
data:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: data-tidb-cluster-tidb-pd-0
ReadOnly: false
kubeblocks:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
SizeLimit: <unset>
role-agent:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
SizeLimit: <unset>
kube-api-access-4xhwp:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional: <nil>
DownwardAPI: true
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: kb-data=true:NoSchedule
node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 46m default-scheduler Successfully assigned default/tidb-cluster-tidb-pd-0 to 172.31.0.7
Normal Pulled 46m kubelet Container image "apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/kubeblocks-tools:0.9.4-beta.20" already present on machine
Normal Created 46m kubelet Created container init-lorry
Normal Started 46m kubelet Started container init-lorry
Normal Pulled 46m kubelet Container image "apecloud-registry.cn-zhangjiakou.cr.aliyuncs.com/apecloud/pd:v7.1.5" already present on machine
Normal Created 46m kubelet Created container init-pd-ctl
Normal Started 46m kubelet Started container init-pd-ctl
Warning Failed 44m (x3 over 46m) kubelet Failed to pull image "msoap/shell2http:1.16.0": failed to pull and unpack image "docker.io/msoap/shell2http:1.16.0": failed to resolve reference "docker.io/msoap/shell2http:1.16.0": failed to do request: Head "https://registry-1.docker.io/v2/msoap/shell2http/manifests/1.16.0": dial tcp 168.143.162.58:443: connect: connection timed out
Warning Failed 44m (x3 over 46m) kubelet Error: ErrImagePull
Warning Failed 44m (x4 over 45m) kubelet Error: ImagePullBackOff
Normal Pulling 44m (x4 over 46m) kubelet Pulling image "msoap/shell2http:1.16.0"
Warning Failed 43m kubelet Failed to pull image "msoap/shell2http:1.16.0": failed to pull and unpack image "docker.io/msoap/shell2http:1.16.0": failed to resolve reference "docker.io/msoap/shell2http:1.16.0": failed to do request: Head "https://registry-1.docker.io/v2/msoap/shell2http/manifests/1.16.0": dial tcp 202.160.130.117:443: connect: connection timed out
Normal BackOff 6m24s (x161 over 45m) kubelet Back-off pulling image "msoap/shell2http:1.16.0"
Warning Failed 78s (x4 over 17m) kubelet (combined from similar events): Failed to pull image "msoap/shell2http:1.16.0": failed to pull and unpack image "docker.io/msoap/shell2http:1.16.0": failed to resolve reference "docker.io/msoap/shell2http:1.16.0": failed to do request: Head "https://registry-1.docker.io/v2/msoap/shell2http/manifests/1.16.0": dial tcp 116.89.243.8:443: connect: connection timed out
➜ ~
- zookeeper upgrade unknown clusterVersion or componentDefinition,
echo yes|kbcli cluster upgrade-to-v1 zkeeper-cluster
┌──────────────────────────────────────────────────────────────┐ ┌─────────────────────────────────────────────────────────────┐
│apiVersion: apps.kubeblocks.io/v1alpha1 │ │apiVersion: apps.kubeblocks.io/v1 │
│kind: Cluster │ │kind: Cluster │
│metadata: │ │metadata: │
│ annotations: │ │ annotations: │
│ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1alpha1│ │ kubeblocks.io/crd-api-version: apps.kubeblocks.io/v1 │
│ kubeblocks.io/reconcile: "2025-04-28T08:04:42.960517137Z" │ │ kubeblocks.io/reconcile: "2025-04-28T08:04:42.960517137Z"│
│ creationTimestamp: "2025-04-28T07:23:41Z" │ │ creationTimestamp: "2025-04-28T07:23:41Z" │
│ finalizers: │ │ finalizers: │
│ - cluster.kubeblocks.io/finalizer │ │ - cluster.kubeblocks.io/finalizer │
│ generation: 1 │ │ generation: 1 │
│ name: zkeeper-cluster │ │ name: zkeeper-cluster │
│ namespace: default │ │ namespace: default │
│ resourceVersion: "36349" │ │ resourceVersion: "36349" │
│ uid: 1e49074f-eea2-4706-a758-9f83b624c926 │ │ uid: 1e49074f-eea2-4706-a758-9f83b624c926 │
│spec: │ │spec: │
│ componentSpecs: │ │ componentSpecs: │
│ - componentDef: zookeeper-3 │ │ - componentDef: <yourComponentDef> │
│ disableExporter: true │ │ disableExporter: true │
│ env: │ │ env: │
│ - name: ZOOKEEPER_IMAGE_VERSION │ │ - name: ZOOKEEPER_IMAGE_VERSION │
│ value: 3.6.4 │ │ value: 3.6.4 │
│ name: zookeeper │ │ name: zookeeper │
│ replicas: 3 │ │ replicas: 3 │
│ resources: │ │ resources: │
│ limits: │ │ limits: │
│ cpu: 500m │ │ cpu: 500m │
│ memory: 1Gi │ │ memory: 1Gi │
│ requests: │ │ requests: │
│ cpu: 500m │ │ cpu: 500m │
│ memory: 1Gi │ │ memory: 1Gi │
│ serviceVersion: 3.6.4 │ │ serviceVersion: 3.6.4 │
│ volumeClaimTemplates: │ │ volumeClaimTemplates: │
│ - name: data │ │ - name: data │
│ spec: │ │ spec: │
│ accessModes: │ │ accessModes: │
│ - ReadWriteOnce │ │ - ReadWriteOnce │
│ resources: │ │ resources: │
│ requests: │ │ requests: │
│ storage: 20Gi │ │ storage: 20Gi │
│ - name: snapshot-log │ │ - name: snapshot-log │
│ spec: │ │ spec: │
│ accessModes: │ │ accessModes: │
│ - ReadWriteOnce │ │ - ReadWriteOnce │
│ resources: │ │ resources: │
│ requests: │ │ requests: │
│ storage: 20Gi │ │ storage: 20Gi │
│ resources: │ │ terminationPolicy: WipeOut │
│ cpu: "0" │ │status: {} │
│ memory: "0" │ │ │
│ storage: │ └─────────────────────────────────────────────────────────────┘
│ size: "0" │
│ terminationPolicy: WipeOut │
│status: {} │
│ │
└──────────────────────────────────────────────────────────────┘
error: cluster "zkeeper-cluster" has unknown clusterVersion or componentDefinition, you can replace with accorrding ComponentDefinition with 1.0 api