charts icon indicating copy to clipboard operation
charts copied to clipboard

when installing kube-prometheus i keep getting "unknown field "path" in io.k8s.api.core.v1.Probe"

Open ngingihy opened this issue 2 years ago • 2 comments

Name and Version

bitnami/kube-prometheus 8.0.9

What steps will reproduce the bug?

Adding the repo and then when i install the deployment using my customized values.yaml i keep getting

Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: error validating "": error validating data: [ValidationError(Deployment.spec.template.spec.containers[0].livenessProbe): unknown field "path" in io.k8s.api.core.v1.Probe, ValidationError(Deployment.spec.template.spec.containers[0].readinessProbe): unknown field "path" in io.k8s.api.core.v1.Probe]

Are you using any custom parameters or values?

I install the deployment using this command

helm install my-prom -f values.yaml bitnami/kube-prometheus --skip-crds `--set` ingressController.installCRDs=false --debug

this my values.yaml file

global:

  imageRegistry: " test .com"
  imagePullSecrets: [' test']
  storageClass: ""
  labels: {}


nameOverride: ""

fullnameOverride: ""

extraDeploy: []


operator:
  enabled: true

  image:

    registry:  test .com
    repository:  team1/prometheus-operator
    tag: 0.55.0-debian-10-r9

    pullPolicy: IfNotPresent

    pullSecrets: [' test']

  extraArgs: []

  hostAliases: []

  serviceAccount:

    create: true

    name: ""

  schedulerName: ""

  podSecurityContext:
    enabled: true
    runAsUser: 1001
    fsGroup: 1001

  containerSecurityContext:
    enabled: true
    capabilities:
      drop:
      - ALL
    runAsNonRoot: true
    allowPrivilegeEscalation: false
    readOnlyRootFilesystem: false

  service:

    type: LoadBalancer

    port: 8080

    clusterIP: ""

    nodePort: ""
 
    loadBalancerIP: ""

    loadBalancerSourceRanges: []

    externalTrafficPolicy: Cluster

    healthCheckNodePort: ""
    ## @param operator.service.annotations Additional annotations for Prometheus Operator service
    ##
    annotations: {}
  ## Create a servicemonitor for the operator
  ##
  serviceMonitor:

    enabled: true

    interval: ""

    metricRelabelings: []

    relabelings: []

  resources: {}
 
  podAffinityPreset: ""

  podAntiAffinityPreset: soft

  nodeAffinityPreset:

    type: ""
 
    key: ""

    values: []

  affinity: {}
  
  nodeSelector: {}

  tolerations: []

  priorityClassName: ""

  livenessProbe:
    enabled: true
    path: /metrics
    initialDelaySeconds: 120
    periodSeconds: 10
    timeoutSeconds: 5
    failureThreshold: 6
    successThreshold: 1

  readinessProbe:
    enabled: true
    path: /metrics
    initialDelaySeconds: 30
    periodSeconds: 10
    timeoutSeconds: 5
    failureThreshold: 6
    successThreshold: 1

  logLevel: info

  logFormat: logfmt

  configReloaderResources: {}

  kubeletService:
    enabled: true
    namespace: kube-system

  prometheusConfigReloader:

    image: {}

    containerSecurityContext:
      enabled: true
      readOnlyRootFilesystem: false
      allowPrivilegeEscalation: false
      runAsNonRoot: true
      capabilities:
        drop:
          - ALL

    livenessProbe:
      enabled: true
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 5
      failureThreshold: 6
      successThreshold: 1

    readinessProbe:
      enabled: true
      initialDelaySeconds: 15
      periodSeconds: 20
      timeoutSeconds: 5
      failureThreshold: 6
      successThreshold: 1


prometheus:

  enabled: true

  image:

    registry:  test .com
    repository:  team1/prometheus
    tag: 2.33.5-debian-10-r10

    pullSecrets: [' test']

  serviceAccount:

    create: true

    name: ""

    annotations: {}

  podSecurityContext:
    enabled: true
    runAsUser: 1001
    fsGroup: 1001

  containerSecurityContext:
    enabled: true
    readOnlyRootFilesystem: false
    allowPrivilegeEscalation: false
    runAsNonRoot: true
    capabilities:
      drop:
        - ALL

  podDisruptionBudget:
    enabled: false
    minAvailable: 1
    maxUnavailable: ""

  service:

    type: ClusterIP

    port: 9090

    clusterIP: ""

    nodePort: ""

    loadBalancerIP: ""

    loadBalancerSourceRanges: []

    externalTrafficPolicy: Cluster

    healthCheckNodePort: ""

    stickySessions: ""

    annotations: {}
  serviceMonitor:

    enabled: true

    interval: ""

    metricRelabelings: []

    relabelings: []

  ingress:

    enabled: false

    pathType: ImplementationSpecific

    apiVersion: ""

    hostname: prometheus.local

    path: /

    annotations: {}

    ingressClassName: ""

    tls: false

    extraHosts: []

    extraPaths: []

    extraTls: []

    secrets: []

  externalUrl: ""

  resources: {}

  podAffinityPreset: ""

  podAntiAffinityPreset: soft

  nodeAffinityPreset:

    type: ""

    key: ""
    values: []

  affinity: {}

  nodeSelector: {}

  tolerations: []

  scrapeInterval: ""

  evaluationInterval: ""

  listenLocal: false

  livenessProbe:
    enabled: true
    path: /-/healthy
    initialDelaySeconds: 0
    failureThreshold: 10
    periodSeconds: 10
    successThreshold: 1
    timeoutSeconds: 3

  readinessProbe:
    enabled: true
    path: /-/ready
    initialDelaySeconds: 0
    failureThreshold: 10
    periodSeconds: 10
    successThreshold: 1
    timeoutSeconds: 3

  enableAdminAPI: false

  enableFeatures: []

  alertingEndpoints: []

  externalLabels: {}

  replicaExternalLabelName: ""

  replicaExternalLabelNameClear: false

  routePrefix: /

  prometheusExternalLabelName: ""

  prometheusExternalLabelNameClear: false

  secrets: []
 
  configMaps: []

  querySpec: {}

  ruleNamespaceSelector: {}

  ruleSelector: {}

  serviceMonitorSelector: {}

  matchLabels: {}
 
  serviceMonitorNamespaceSelector: {}

  podMonitorSelector: {}

  podMonitorNamespaceSelector: {}

  probeSelector: {}

  probeNamespaceSelector: {}
 
  retention: 10d

  retentionSize: ""

  disableCompaction: false

  walCompression: false

  paused: false

  replicaCount: 1

  logLevel: info

  logFormat: logfmt

  podMetadata:

    labels: {}
    annotations: {}

  remoteRead: []

  remoteWrite: []

  storageSpec: {}

  persistence:

    enabled: false

    storageClass: ""

    accessModes:
      - ReadWriteOnce

    size: 8Gi

  priorityClassName: ""

  containers: []

  volumes: []

  volumeMounts: []
  
  ##
  additionalPrometheusRules: []

  additionalScrapeConfigs:
    enabled: false
    type: external
    external:

      name: ""

      key: ""
    internal:
      jobList: []

  additionalScrapeConfigsExternal:
    enabled: false
    name: ""
    key: ""

  additionalAlertRelabelConfigsExternal:
    enabled: false
    name: ""
    key: ""

  thanos:

    create: false

    image:
      registry:  test .com
      repository:  team1/thanos
      tag: 0.25.1-scratch-r2
 
      pullPolicy: IfNotPresent

      pullSecrets: [' test']

    containerSecurityContext:
      enabled: true
      readOnlyRootFilesystem: false
      allowPrivilegeEscalation: false
      runAsNonRoot: true
      capabilities:
        drop:
          - ALL

    prometheusUrl: ""

    extraArgs: []

    objectStorageConfig: {}

    extraVolumeMounts: []

    resources:

      limits: {}

      requests: {}

    livenessProbe:
      enabled: true
      path: /-/healthy
      initialDelaySeconds: 0
      periodSeconds: 5
      timeoutSeconds: 3
      failureThreshold: 120
      successThreshold: 1
    readinessProbe:
      enabled: true
      path: /-/ready
      initialDelaySeconds: 0
      periodSeconds: 5
      timeoutSeconds: 3
      failureThreshold: 120
      successThreshold: 1

    service:

      type: LoadBalancer

      port: 10901

      clusterIP: None

      nodePort: ""

      loadBalancerIP: ""

      loadBalancerSourceRanges: []

      annotations: {}

      extraPorts: []

    ingress:

      enabled: false

      annotations: {}

      ingressClassName: ""

      hosts:
        - name: thanos.prometheus.local
          path: /

      tls: {}

  portName: web


alertmanager:

  enabled: true

  image:
    registry:  test .com
    repository:  team1/alertmanager 
    tag: 0.23.0-debian-10-r199

    pullSecrets: [' test']

  serviceAccount:

    create: true

    name: ""

  podSecurityContext:
    enabled: true
    runAsUser: 1001
    fsGroup: 1001
 
  containerSecurityContext:
    enabled: true
    readOnlyRootFilesystem: false
    allowPrivilegeEscalation: false
    runAsNonRoot: true
    capabilities:
      drop:
        - ALL

  podDisruptionBudget:
    enabled: false
    minAvailable: 1
    maxUnavailable: ""

  service:

    type: ClusterIP

    port: 9093

    clusterIP: ""

    nodePort: ""

    loadBalancerIP: ""

    loadBalancerSourceRanges: []

    externalTrafficPolicy: Cluster

    healthCheckNodePort: ""

    stickySessions: ""

    annotations: {}

  serviceMonitor:

    enabled: true

    interval: ""

    metricRelabelings: []

    relabelings: []

  ingress:

    enabled: false

    pathType: ImplementationSpecific

    apiVersion: ""

    hostname: alertmanager.local

    path: /

    annotations: {}

    ingressClassName: ""

    tls: false

    extraHosts: []

    extraPaths: []

    extraTls: []

    secrets: []

  externalUrl: ""

  resources: {}

  podAffinityPreset: ""

  podAntiAffinityPreset: soft

  nodeAffinityPreset:

    type: ""

    key: ""

    values: []

  affinity: {}

  nodeSelector: {}

  tolerations: []

  config:
    global:
      resolve_timeout: 5m
    route:
      group_by: ['job']
      group_wait: 30s
      group_interval: 5m
      repeat_interval: 12h
      receiver: 'null'
      routes:
        - match:
            alertname: Watchdog
          receiver: 'null'
    receivers:
      - name: 'null'

  externalConfig: false
  replicaCount: 1

  livenessProbe:
    enabled: true
    path: /-/healthy
    initialDelaySeconds: 0
    periodSeconds: 5
    timeoutSeconds: 3
    failureThreshold: 120
    successThreshold: 1

  readinessProbe:
    enabled: true
    path: /-/ready
    initialDelaySeconds: 0
    periodSeconds: 5
    timeoutSeconds: 3
    failureThreshold: 120
    successThreshold: 1

  logLevel: info

  logFormat: logfmt

  podMetadata:
    labels: {}
    annotations: {}

  secrets: []

  configMaps: []

  retention: 120h

  storageSpec: {}

  persistence:

    enabled: false
    storageClass: ""

    accessModes:
      - ReadWriteOnce

    size: 8Gi

  paused: false

  listenLocal: false

  containers: []

  volumes: []

  volumeMounts: []

  priorityClassName: ""

  additionalPeers: []

  routePrefix: /

  portName: web

  configNamespaceSelector: {}

  configSelector: {}

  configuration: {}


exporters:
  node-exporter:
    enabled: true
  kube-state-metrics:

    enabled: true

node-exporter:
  service:
    labels:
      jobLabel: node-exporter
  serviceMonitor:
    enabled: true
    jobLabel: jobLabel
  extraArgs:
    collector.filesystem.ignored-mount-points: "^/(dev|proc|sys|var/lib/docker/.+)($|/)"
    collector.filesystem.ignored-fs-types: "^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$"

kube-state-metrics:
  serviceMonitor:
    enabled: true
    honorLabels: true

kubelet:

  enabled: true

  namespace: kube-system
  serviceMonitor:

    https: true

    interval: ""

    metricRelabelings: []

    relabelings: []

    cAdvisorMetricRelabelings: []

    cAdvisorRelabelings: []

kubeApiServer:

  enabled: true
  serviceMonitor:

    interval: ""

    metricRelabelings: []
 
    relabelings: []

kubeControllerManager:

  enabled: true

  endpoints: []

  namespace: kube-system

  service:
    enabled: true
    port: 10252
    targetPort: 10252

    selector: {}
  serviceMonitor:
 
    interval: ""

    https: false

    insecureSkipVerify: ""
    serverName: ""

    metricRelabelings: []

    relabelings: []

kubeScheduler:

  enabled: true

  endpoints: []

  namespace: kube-system

  service:
    enabled: true
    port: 10251
    targetPort: 10251

    selector: {}
  serviceMonitor:

    interval: ""

    https: false

    insecureSkipVerify: ""

    serverName: ""

    metricRelabelings: []

    relabelings: []

coreDns:

  enabled: true

  namespace: kube-system

  service:
    enabled: true
    port: 9153
    targetPort: 9153

    selector: {}
  serviceMonitor:

    interval: ""

    metricRelabelings: []

    relabelings: []

kubeProxy:

  enabled: true

  endpoints: []

  namespace: kube-system

  service:
    enabled: true
    port: 10249
    targetPort: 10249

    selector: {}
  serviceMonitor:

    https: false
  
    interval: ""

    metricRelabelings: []
  
    relabelings: []
rbac:
  create: true
  apiVersion: v1beta1
  pspEnabled: true

What is the expected behavior?

It didn't get installed

What do you see instead?

Error: INSTALLATION FAILED: unable to build kubernetes objects from release manifest: error validating "": error validating data: [ValidationError(Deployment.spec.template.spec.containers[0].livenessProbe): unknown field "path" in io.k8s.api.core.v1.Probe, ValidationError(Deployment.spec.template.spec.containers[0].readinessProbe): unknown field "path" in io.k8s.api.core.v1.Probe]

ngingihy avatar Jul 19 '22 14:07 ngingihy

Hi, Could you provide only the changes respect to the provided vaules.yaml ? I think there are some typos like kubeScheduler.service.port that should be: kubeScheduler.service.ports.http. My guess that is some kind of typo in your values. Maybe you can use the provided one and start adding changes little by little.

rafariossaa avatar Jul 21 '22 09:07 rafariossaa

This Issue has been automatically marked as "stale" because it has not had recent activity (for 15 days). It will be closed if no further activity occurs. Thanks for the feedback.

github-actions[bot] avatar Aug 06 '22 01:08 github-actions[bot]

Due to the lack of activity in the last 5 days since it was marked as "stale", we proceed to close this Issue. Do not hesitate to reopen it later if necessary.

github-actions[bot] avatar Aug 11 '22 01:08 github-actions[bot]