milvus-helm
milvus-helm copied to clipboard
read source yaml failed: error converting YAML to JSON: yaml: line 22: block sequence entries are not allowed in this context
Hello, i'am trying to install milvus on a k8s cluster using helm:
helm install milvus milvus/milvus --values='/home/siradjedd/airstream/application/k8s/helm/milvus/values/milvus.yml' --namespace milvus
NAME: milvus
LAST DEPLOYED: Fri May 31 09:34:37 2024
NAMESPACE: milvus
STATUS: deployed
REVISION: 1
TEST SUITE: None
BUT i got problems on some pods saying :
read source yaml failed: error converting YAML to JSON: yaml: line 22: block sequence entries are not allowed in this context
here is my values.yaml:
## Enable or disable Milvus Cluster mode
cluster:
enabled: true
image:
all:
repository: milvusdb/milvus
tag: v2.2.13 #v2.2.4
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
# pullSecrets:
# - myRegistryKeySecretName
tools:
repository: milvusdb/milvus-config-tool
tag: v0.1.1
pullPolicy: IfNotPresent
# Global node selector
# If set, this will apply to all milvus components
# Individual components can be set to a different node selector
nodeSelector:
tech: milvus
# Global tolerations
# If set, this will apply to all milvus components
# Individual components can be set to a different tolerations
tolerations:
- key: "milvus"
operator: "Equal"
value: "true"
effect: "NoSchedule"
# Global affinity
# If set, this will apply to all milvus components
# Individual components can be set to a different affinity
affinity: {}
# Global labels and annotations
# If set, this will apply to all milvus components
labels: {}
annotations: {}
# Extra configs for milvus.yaml
# If set, this config will merge into milvus.yaml
# Please follow the config structure in the milvus.yaml
# at https://github.com/milvus-io/milvus/blob/master/configs/milvus.yaml
# Note: this config will be the top priority which will override the config
# in the image and helm chart.
extraConfigFiles:
user.yaml: |+
# For example enable rest http for milvus proxy
# proxy:
# http:
# enabled: true
## Expose the Milvus service to be accessed from outside the cluster (LoadBalancer service).
## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it.
## ref: http://kubernetes.io/docs/user-guide/services/
##
service:
type: LoadBalancer
port: 19530
nodePort: ""
annotations:
service.beta.kubernetes.io/aws-load-balancer-internal: "true"
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
labels: {}
## List of IP addresses at which the Milvus service is available
## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
##
externalIPs: []
# - externalIp1
# LoadBalancerSourcesRange is a list of allowed CIDR values, which are combined with ServicePort to
# set allowed inbound rules on the security group assigned to the master load balancer
loadBalancerSourceRanges:
#- 172.254.0.0/16
- 0.0.0.0/0
# Optionally assign a known public LB IP
# loadBalancerIP: 1.2.3.4
ingress:
enabled: false
annotations:
# Annotation example: set nginx ingress type
# kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/backend-protocol: GRPC
nginx.ingress.kubernetes.io/listen-ports-ssl: '[19530]'
nginx.ingress.kubernetes.io/proxy-body-size: 4m
nginx.ingress.kubernetes.io/ssl-redirect: "true"
labels: {}
hosts:
- milvus-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - milvus-example.local
serviceAccount:
create: false
name:
annotations:
labels:
metrics:
enabled: true
serviceMonitor:
# Set this to `true` to create ServiceMonitor for Prometheus operator
enabled: false
interval: "30s"
scrapeTimeout: "10s"
# Additional labels that can be used so ServiceMonitor will be discovered by Prometheus
additionalLabels: {}
livenessProbe:
enabled: true
initialDelaySeconds: 90
periodSeconds: 30
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
enabled: true
initialDelaySeconds: 90
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
log:
level: "info"
file:
maxSize: 300 # MB
maxAge: 5 # day
maxBackups: 20
format: "text" # text/json
persistence:
mountPath: "/milvus/logs"
## If true, create/use a Persistent Volume Claim
## If false, use emptyDir
##
enabled: true
annotations:
helm.sh/resource-policy: keep
persistentVolumeClaim:
existingClaim: ""
## Milvus Logs Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner.
## ReadWriteMany access mode required for milvus cluster.
##
storageClass: efs-csi-sc
accessModes: ReadWriteMany
size: 10Gi
subPath: ""
## Heaptrack traces all memory allocations and annotates these events with stack traces.
## See more: https://github.com/KDE/heaptrack
## Enable heaptrack in production is not recommended.
heaptrack:
image:
repository: milvusdb/heaptrack
tag: v0.1.0
pullPolicy: IfNotPresent
# standalone:
# replicas: 1 # Run standalone mode with replication disabled
# resources: {}
# # Set local storage size in resources
# # limits:
# # ephemeral-storage: 100Gi
# nodeSelector: {}
# affinity: {}
# tolerations: []
# extraEnv: []
# heaptrack:
# enabled: false
# disk:
# enabled: true
# size:
# enabled: false # Enable local storage size limit
# profiling:
# enabled: false # Enable live profiling
# ## Default message queue for milvus standalone
# ## Supported value: rocksmq, pulsar and kafka
# messageQueue: rocksmq
# persistence:
# mountPath: "/var/lib/milvus"
# ## If true, alertmanager will create/use a Persistent Volume Claim
# ## If false, use emptyDir
# ##
# enabled: true
# annotations:
# helm.sh/resource-policy: keep
# persistentVolumeClaim:
# existingClaim: ""
# ## Milvus Persistent Volume Storage Class
# ## If defined, storageClassName: <storageClass>
# ## If set to "-", storageClassName: "", which disables dynamic provisioning
# ## If undefined (the default) or set to null, no storageClassName spec is
# ## set, choosing the default provisioner.
# ##
# storageClass: efs-csi-sc
# accessModes: ReadWriteOnce
# size: 50Gi
# subPath: ""
proxy:
enabled: true
replicas: 1
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: false # Enable live profiling
http:
enabled: true # whether to enable http rest server
debugMode:
enabled: false
rootCoordinator:
enabled: true
# You can set the number of replicas greater than 1, only if enable active standby
replicas: 1 # Run Root Coordinator mode with replication disabled
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: true # Enable live profiling
activeStandby:
enabled: false # Enable active-standby when you set multiple replicas for root coordinator
service:
port: 53100
annotations: {}
labels: {}
clusterIP: ""
queryCoordinator:
enabled: true
# You can set the number of replicas greater than 1, only if enable active standby
replicas: 1 # Run Query Coordinator mode with replication disabled
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: true # Enable live profiling
activeStandby:
enabled: false # Enable active-standby when you set multiple replicas for query coordinator
service:
port: 19531
annotations: {}
labels: {}
clusterIP: ""
queryNode:
enabled: true
replicas: 2
resources: {}
# Set local storage size in resources
# limits:
# ephemeral-storage: 100Gi
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
disk:
enabled: true # Enable querynode load disk index, and search on disk index
size:
enabled: false # Enable local storage size limit
profiling:
enabled: false # Enable live profiling
indexCoordinator:
enabled: true
# You can set the number of replicas greater than 1, only if enable active standby
replicas: 1 # Run Index Coordinator mode with replication disabled
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: false # Enable live profiling
activeStandby:
enabled: false # Enable active-standby when you set multiple replicas for index coordinator
service:
port: 31000
annotations: {}
labels: {}
clusterIP: ""
indexNode:
enabled: true
replicas: 2
resources: {}
# Set local storage size in resources
# limits:
# ephemeral-storage: 100Gi
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: true # Enable live profiling
disk:
enabled: true # Enable index node build disk vector index
size:
enabled: false # Enable local storage size limit
dataCoordinator:
enabled: true
# You can set the number of replicas greater than 1, only if enable active standby
replicas: 1 # Run Data Coordinator mode with replication disabled
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: true # Enable live profiling
activeStandby:
enabled: false # Enable active-standby when you set multiple replicas for data coordinator
service:
port: 13333
annotations: {}
labels: {}
clusterIP: ""
dataNode:
enabled: true
replicas: 2
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: true # Enable live profiling
## mixCoordinator contains all coord
## If you want to use mixcoord, enable this and disable all of other coords
mixCoordinator:
enabled: false
# You can set the number of replicas greater than 1, only if enable active standby
replicas: 1 # Run Mixture Coordinator mode with replication disabled
resources: {}
nodeSelector: {}
affinity: {}
tolerations: []
extraEnv: []
heaptrack:
enabled: false
profiling:
enabled: false # Enable live profiling
activeStandby:
enabled: false # Enable active-standby when you set multiple replicas for Mixture coordinator
service:
annotations: {}
labels: {}
clusterIP: ""
attu:
enabled: true
name: attu
image:
repository: zilliz/attu
tag: v2.2.3
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 3000
resources: {}
ingress:
enabled: true
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
# Annotation example: set nginx ingress type
# kubernetes.io/ingress.class: nginx
labels: {}
hosts:
# - milvus.padasiradjme.actops.io
- milvus.padasiradjmeplus.com
tls:
- secretName: milvus-tls
hosts:
#- milvus.padasiradjme.actops.io
- milvus.padasiradjmeplus.com
## Configuration values for the minio dependency
## ref: https://github.com/minio/charts/blob/master/README.md
##
minio:
enabled: false
## Configuration values for the etcd dependency
## ref: https://artifacthub.io/packages/helm/bitnami/etcd
##
etcd:
enabled: true
name: etcd
replicaCount: 3
pdb:
create: false
image:
repository: "milvusdb/etcd"
tag: "3.5.5-r2"
pullPolicy: IfNotPresent
service:
type: ClusterIP
port: 2379
peerPort: 2380
auth:
rbac:
enabled: false
persistence:
enabled: true
storageClass: efs-csi-sc
accessMode: ReadWriteOnce
size: 10Gi
## Enable auto compaction
## compaction by every 1000 revision
##
autoCompactionMode: revision
autoCompactionRetention: "1000"
nodeSelector:
tech: milvus
tolerations:
- key: "milvus"
operator: "Equal"
value: "true"
effect: "NoSchedule"
## Increase default quota to 4G
##
extraEnvVars:
- name: ETCD_QUOTA_BACKEND_BYTES
value: "4294967296"
- name: ETCD_HEARTBEAT_INTERVAL
value: "500"
- name: ETCD_ELECTION_TIMEOUT
value: "2500"
## Configuration values for the pulsar dependency
## ref: https://github.com/apache/pulsar-helm-chart
##
pulsar:
enabled: false
kafka:
enabled: true
name: kafka
replicaCount: 3
nodeSelector:
tech: milvus
tolerations:
- key: "milvus"
operator: "Equal"
value: "true"
effect: "NoSchedule"
image:
repository: bitnami/kafka
tag: 3.1.0-debian-10-r52
## Increase graceful termination for kafka graceful shutdown
terminationGracePeriodSeconds: "90"
pdb:
create: false
## Enable startup probe to prevent pod restart during recovering
startupProbe:
enabled: true
## Kafka Java Heap size
heapOpts: "-Xmx4096m -Xms4096m"
maxMessageBytes: _10485760
defaultReplicationFactor: 3
offsetsTopicReplicationFactor: 3
## Only enable time based log retention
logRetentionHours: 168
logRetentionBytes: _-1
extraEnvVars:
- name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
value: "5242880"
- name: KAFKA_CFG_MAX_REQUEST_SIZE
value: "5242880"
- name: KAFKA_CFG_REPLICA_FETCH_MAX_BYTES
value: "10485760"
- name: KAFKA_CFG_FETCH_MESSAGE_MAX_BYTES
value: "5242880"
- name: KAFKA_CFG_LOG_ROLL_HOURS
value: "24"
persistence:
enabled: true
storageClass: efs-csi-sc
accessMode: ReadWriteOnce
size: 100Gi
metrics:
## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter
kafka:
enabled: false
## Prometheus JMX exporter: exposes the majority of Kafkas metrics
jmx:
enabled: false
## To enable serviceMonitor, you must enable either kafka exporter or jmx exporter.
## And you can enable them both
serviceMonitor:
enabled: false
service:
type: ClusterIP
ports:
client: 9092
zookeeper:
enabled: true
replicaCount: 3
nodeSelector:
tech: milvus
tolerations:
- key: "milvus"
operator: "Equal"
value: "true"
effect: "NoSchedule"
## Configuration values for the mysql dependency
## ref: https://artifacthub.io/packages/helm/bitnami/mysql
##
## MySQL used for meta store is testing internally
mysql:
enabled: false
###################################
# External S3
# - these configs are only used when `externalS3.enabled` is true
###################################
externalS3:
enabled: true
host: "s3.eu-west-3.amazonaws.com"
port: "80"
accessKey: "-"
secretKey: "-"
useSSL: false
bucketName: "milvus-match-video-objects-bucket-prod"
rootPath: ""
useIAM: false
iamEndpoint: ""