mysqld_exporter
mysqld_exporter copied to clipboard
VictoriaMetrics ScrapeConfig
Host operating system: output of uname -a: Kubernetes
mysqld_exporter version: output of mysqld_exporter --version: v0.15.1
MySQL server version: 5.7
mysqld_exporter command line flags
auto_increment.columns: true
binlog_size: true
engine_innodb_status: true
engine_tokudb_status: true
global_status: true
global_variables: true
info_schema.clientstats: true
info_schema.innodb_metrics: true
info_schema.innodb_tablespaces: true
info_schema.innodb_cmp: true
info_schema.innodb_cmpmem: true
info_schema.processlist: true
info_schema.processlist.min_time: 0
info_schema.query_response_time: true
info_schema.tables: true
info_schema.tables.databases: '*'
info_schema.tablestats: true
info_schema.schemastats: true
info_schema.userstats: true
perf_schema.eventsstatements: true
perf_schema.eventsstatements.digest_text_limit: 120
# perf_schema.eventsstatements.limit: false
perf_schema.eventsstatements.timelimit: 86400
perf_schema.eventswaits: true
perf_schema.file_events: true
perf_schema.file_instances: true
perf_schema.indexiowaits: true
perf_schema.tableiowaits: true
perf_schema.tablelocks: true
perf_schema.replication_group_member_stats: true
slave_status: true
slave_hosts: true
heartbeat: true
heartbeat.database: heartbeat
heartbeat.table: heartbeat
What did you do that produced an error?
I have installed mysqld exporter with helm chart and below is my values.yaml file.
## Default values for prometheus-mysql-exporter.
## This is a YAML-formatted file.
## Declare variables to be passed into your templates.
## namespaceOverride overrides the namespace which the resources will be deployed in
namespaceOverride: ""
## override release name
fullnameOverride: ""
replicaCount: 1
image:
registry: quay.io
repository: prometheus/mysqld-exporter
## if not set charts appVersion var is used
tag: ""
pullPolicy: "IfNotPresent"
# imagePullSecrets:
# - name: secret-name
imagePullSecrets: []
service:
labels: {}
annotations: {}
name: mysql-exporter
type: ClusterIP
externalPort: 9104
internalPort: 9104
serviceMonitor:
# enabled should be set to true to enable prometheus-operator discovery of this service
enabled: true
# interval is the interval at which metrics should be scraped
interval: 5s
# scrapeTimeout is the timeout after which the scrape is ended
scrapeTimeout: 10s
namespace: monitoring
# namespaceSelector: []
# additionalLabels is the set of additional labels to add to the ServiceMonitor
additionalLabels: {}
jobLabel: ""
targetLabels: []
podTargetLabels: []
metricRelabelings: []
# Set relabel_configs as per https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
relabelings: []
# Enable multi target scraping.
multipleTarget:
enabled: true
targets:
# target connection information with name (required), endpoint (required) and port (optionnal)
# if sharedSecret is not enabled the name must match an entry client.{{ name }} existing in the secret
- endpoint: xx.xx.xx.xx
name: all-staging-cloudsql-test-5_6
# port: 3306
- endpoint: xx.xx.xx.xx
name: all-staging-cloudsql-test-5_7
# Enable shared credentials for all targets
sharedSecret:
enabled: true
name: "monitoring-mysql-creds"
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: false
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
annotations: {}
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits:
cpu: 100m
memory: 128Mi
requests:
cpu: 100m
memory: 128Mi
nodeSelector: {}
tolerations:
- key: teamname
operator: Equal
value: infra
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: teamname
operator: In
values:
- infra
podLabels: {}
# Extra Volume Mounts for the mysql exporter container
extraVolumeMounts: []
# - name: example
# mountPath: /example
# Extra Volumes for the pod
extraVolumes: []
# - name: example
# configMap:
# name: example
podSecurityContext: {}
# fsGroup: 65534
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 65534
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9104"
config: {}
# Allow to set specifc options on the exporter
# logLevel: info
# logFormat: "logger:stderr"
collectors:
auto_increment.columns: true
binlog_size: true
engine_innodb_status: true
engine_tokudb_status: true
global_status: true
global_variables: true
info_schema.clientstats: true
info_schema.innodb_metrics: true
info_schema.innodb_tablespaces: true
info_schema.innodb_cmp: true
info_schema.innodb_cmpmem: true
info_schema.processlist: true
info_schema.processlist.min_time: 0
info_schema.query_response_time: true
info_schema.tables: true
info_schema.tables.databases: '*'
info_schema.tablestats: true
info_schema.schemastats: true
info_schema.userstats: true
perf_schema.eventsstatements: true
perf_schema.eventsstatements.digest_text_limit: 120
# perf_schema.eventsstatements.limit: false
perf_schema.eventsstatements.timelimit: 86400
perf_schema.eventswaits: true
perf_schema.file_events: true
perf_schema.file_instances: true
perf_schema.indexiowaits: true
perf_schema.tableiowaits: true
perf_schema.tablelocks: true
perf_schema.replication_group_member_stats: true
slave_status: true
slave_hosts: true
heartbeat: true
heartbeat.database: heartbeat
heartbeat.table: heartbeat
# mysql connection params which build the my.cnf config
mysql:
db: ""
host: "localhost"
# config my.cnf https://dev.mysql.com/doc/c-api/8.0/en/mysql-options.html
additionalConfig:
# - connect-timeout=5
# - debug
pass: ""
port: 3306
protocol: ""
user: ""
# secret with full config my.cnf
existingConfigSecret:
name: "monitoring-mysql-creds"
key: ".my.cnf"
# secret only containing the password
existingPasswordSecret:
name: ""
key: ""
# cloudsqlproxy https://cloud.google.com/sql/docs/mysql/sql-proxy
cloudsqlproxy:
enabled: false
image:
repo: "gcr.io/cloud-sql-connectors/cloud-sql-proxy"
tag: "2.4.0"
pullPolicy: "IfNotPresent"
instanceConnectionName: "project:us-central1:dbname"
privateIp: false
port: "3306"
credentialsSecret: ""
# service account json
credentials: ""
workloadIdentity:
enabled: false
serviceAccountEmail: ""
extraArgs: ""
## Custom PrometheusRules to be defined
## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart
## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions
prometheusRule:
enabled: false
additionalLabels: {}
namespace: ""
rules:
[]
## These are just examples rules, please adapt them to your needs.
## Make sure to constraint the rules to the current service.
# - alert: MysqlDown
# expr: mysql_up == 0
# for: 5m
# labels:
# severity: critical
# annotations:
# summary: MySQL down (instance {{ $labels.instance }})
# description: "MySQL instance is down on {{ $labels.instance }}\n VALUE = {{ $value }}\n LABELS = {{ $labels }}"
The helm chart created a servicemonitor, and i also created and applied a scrape config like below.
- job_name: mysql-exporter-prometheus-mysql-exporter
honor_labels: true
kubernetes_sd_configs:
- role: endpoints
namespaces:
names:
- monitoring
scrape_interval: 15s
Below is my ServiceMonitor
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
annotations:
meta.helm.sh/release-name: mysql-exporter
meta.helm.sh/release-namespace: monitoring
creationTimestamp: "2024-02-06T07:44:21Z"
generation: 1
labels:
app.kubernetes.io/instance: mysql-exporter
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: prometheus-mysql-exporter
app.kubernetes.io/version: v0.15.1
helm.sh/chart: prometheus-mysql-exporter-2.4.0
name: mysql-exporter-prometheus-mysql-exporter
namespace: monitoring
resourceVersion: "631612003"
uid: a7b7c1cf-3679-4200-8d0f-40a558f75653
spec:
endpoints:
- interval: 5s
metricRelabelings:
- action: replace
replacement: xx.xx.xx.xx
sourceLabels:
- instance
targetLabel: instance
- action: replace
replacement: all-staging-cloudsql-test-5_6
sourceLabels:
- target
targetLabel: target
params:
auth_module:
- client.monitoring-mysql-creds
target:
- xx.xx.xx.xx:3306
path: /probe
port: mysql-exporter
scrapeTimeout: 10s
- interval: 5s
metricRelabelings:
- action: replace
replacement: xx.xx.xx.xx
sourceLabels:
- instance
targetLabel: instance
- action: replace
replacement: all-staging-cloudsql-test-5_7
sourceLabels:
- target
targetLabel: target
params:
auth_module:
- client.monitoring-mysql-creds
target:
- xx.xx.xx.xx:3306
path: /probe
port: mysql-exporter
scrapeTimeout: 10s
namespaceSelector:
matchNames:
- monitoring
selector:
matchLabels:
app.kubernetes.io/instance: mysql-exporter
app.kubernetes.io/name: prometheus-mysql-exporter
But after applying all these, i could only see the metrics in VM which will be present in /metrics endpoint which are mysql exporter's metrics and not the /probe endpoint metrics. Due to this we are not able to see any of the useful metrics which we would like to. Any help here would be greatly appreciated :)
PS: Although, curl request with /probe&target= gets the metrics, i suspect issue with ServiceMonitor and ScrapeConfigs.
What did you expect to see?
All the metrics from /probe endpoint.
What did you see instead?
Metrics from /metrics endpoint.
@Sam-Sundar Maybe this PR fix https://github.com/prometheus-community/helm-charts/pull/4356
@dongjiang1989 i tried it, no luck. Earlier atlease /metrics endpoint's metrics were available in prometheus. Now that's too gone.
What is the new ServiceMonitor in https://github.com/prometheus-community/helm-charts/releases/tag/prometheus-mysql-exporter-2.5.1 ?
In addition to probe collection, is there also metric collection? @Sam-Sundar
Hey @dongjiang1989 ,
I did apply the changes even before the PR got merged and also tried now by pulling the changes thro' helm. No luck. Now both /metrics and /probe are not being scrapped.
When i try to search for mysql metric in prometheus, i'm not able to find any.
In the prometheus target list, are /metrics and /probe collected successfully?
Hey @dongjiang1989,
this is my mysql-exporter target.
{
"discoveredLabels": {
"__address__": "10.68.4.21:9104",
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
"__meta_kubernetes_endpoint_address_target_name": "mysql-exporter-prometheus-mysql-exporter-67d44746b6-58qkx",
"__meta_kubernetes_endpoint_node_name": "gke-justcall-staging-infra-node-pool-136d9490-qcbs",
"__meta_kubernetes_endpoint_port_name": "mysql-exporter",
"__meta_kubernetes_endpoint_port_protocol": "TCP",
"__meta_kubernetes_endpoint_ready": "true",
"__meta_kubernetes_endpoints_label_app_kubernetes_io_instance": "mysql-exporter",
"__meta_kubernetes_endpoints_label_app_kubernetes_io_managed_by": "Helm",
"__meta_kubernetes_endpoints_label_app_kubernetes_io_name": "prometheus-mysql-exporter",
"__meta_kubernetes_endpoints_label_app_kubernetes_io_version": "v0.15.1",
"__meta_kubernetes_endpoints_label_helm_sh_chart": "prometheus-mysql-exporter-2.4.0",
"__meta_kubernetes_endpoints_labelpresent_app_kubernetes_io_instance": "true",
"__meta_kubernetes_endpoints_labelpresent_app_kubernetes_io_managed_by": "true",
"__meta_kubernetes_endpoints_labelpresent_app_kubernetes_io_name": "true",
"__meta_kubernetes_endpoints_labelpresent_app_kubernetes_io_version": "true",
"__meta_kubernetes_endpoints_labelpresent_helm_sh_chart": "true",
"__meta_kubernetes_endpoints_name": "mysql-exporter-prometheus-mysql-exporter",
"__meta_kubernetes_namespace": "monitoring",
"__meta_kubernetes_pod_annotation_checksum_credentials": "01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b",
"__meta_kubernetes_pod_annotation_kubectl_kubernetes_io_restartedAt": "2024-03-21T00:55:37+05:30",
"__meta_kubernetes_pod_annotation_prometheus_io_path": "/metrics",
"__meta_kubernetes_pod_annotation_prometheus_io_port": "9104",
"__meta_kubernetes_pod_annotation_prometheus_io_scrape": "true",
"__meta_kubernetes_pod_annotationpresent_checksum_credentials": "true",
"__meta_kubernetes_pod_annotationpresent_kubectl_kubernetes_io_restartedAt": "true",
"__meta_kubernetes_pod_annotationpresent_prometheus_io_path": "true",
"__meta_kubernetes_pod_annotationpresent_prometheus_io_port": "true",
"__meta_kubernetes_pod_annotationpresent_prometheus_io_scrape": "true",
"__meta_kubernetes_pod_container_image": "quay.io/prometheus/mysqld-exporter:v0.15.1",
"__meta_kubernetes_pod_container_name": "prometheus-mysql-exporter",
"__meta_kubernetes_pod_container_port_number": "9104",
"__meta_kubernetes_pod_container_port_protocol": "TCP",
"__meta_kubernetes_pod_controller_kind": "ReplicaSet",
"__meta_kubernetes_pod_controller_name": "mysql-exporter-prometheus-mysql-exporter-67d44746b6",
"__meta_kubernetes_pod_host_ip": "10.128.0.23",
"__meta_kubernetes_pod_ip": "10.68.4.21",
"__meta_kubernetes_pod_label_app_kubernetes_io_instance": "mysql-exporter",
"__meta_kubernetes_pod_label_app_kubernetes_io_name": "prometheus-mysql-exporter",
"__meta_kubernetes_pod_label_pod_template_hash": "67d44746b6",
"__meta_kubernetes_pod_labelpresent_app_kubernetes_io_instance": "true",
"__meta_kubernetes_pod_labelpresent_app_kubernetes_io_name": "true",
"__meta_kubernetes_pod_labelpresent_pod_template_hash": "true",
"__meta_kubernetes_pod_name": "mysql-exporter-prometheus-mysql-exporter-67d44746b6-58qkx",
"__meta_kubernetes_pod_node_name": "gke-justcall-staging-infra-node-pool-136d9490-qcbs",
"__meta_kubernetes_pod_phase": "Running",
"__meta_kubernetes_pod_ready": "true",
"__meta_kubernetes_pod_uid": "a8e8e4f0-aa6d-40f0-8f16-0d324b774e19",
"__meta_kubernetes_service_annotation_cloud_google_com_neg": "{\"ingress\":true}",
"__meta_kubernetes_service_annotation_meta_helm_sh_release_name": "mysql-exporter",
"__meta_kubernetes_service_annotation_meta_helm_sh_release_namespace": "monitoring",
"__meta_kubernetes_service_annotationpresent_cloud_google_com_neg": "true",
"__meta_kubernetes_service_annotationpresent_meta_helm_sh_release_name": "true",
"__meta_kubernetes_service_annotationpresent_meta_helm_sh_release_namespace": "true",
"__meta_kubernetes_service_label_app_kubernetes_io_instance": "mysql-exporter",
"__meta_kubernetes_service_label_app_kubernetes_io_managed_by": "Helm",
"__meta_kubernetes_service_label_app_kubernetes_io_name": "prometheus-mysql-exporter",
"__meta_kubernetes_service_label_app_kubernetes_io_version": "v0.15.1",
"__meta_kubernetes_service_label_helm_sh_chart": "prometheus-mysql-exporter-2.4.0",
"__meta_kubernetes_service_labelpresent_app_kubernetes_io_instance": "true",
"__meta_kubernetes_service_labelpresent_app_kubernetes_io_managed_by": "true",
"__meta_kubernetes_service_labelpresent_app_kubernetes_io_name": "true",
"__meta_kubernetes_service_labelpresent_app_kubernetes_io_version": "true",
"__meta_kubernetes_service_labelpresent_helm_sh_chart": "true",
"__meta_kubernetes_service_name": "mysql-exporter-prometheus-mysql-exporter",
"__metrics_path__": "/metrics",
"__scheme__": "http",
"__scrape_interval__": "15s",
"__scrape_timeout__": "10s",
"__tmp_prometheus_job_name": "serviceMonitor/justcall-iq-ai/prometheus-oper-istio-controlplane/0"
}
}
Fixed it by creating a VMServicescrape and adding scrapeconfig to VictoriaMetrics.
{{- if .Values.serviceMonitor.enabled }}
apiVersion: operator.victoriametrics.com/v1beta1
kind: VMServiceScrape
metadata:
name: {{ template "prometheus-mysql-exporter.fullname" . }}
labels:
{{- include "prometheus-mysql-exporter.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{ toYaml .Values.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
{{- if .Values.serviceMonitor.namespace }}
namespace: {{ .Values.serviceMonitor.namespace }}
{{- end }}
spec:
namespaceSelector:
matchNames:
{{- if .Values.serviceMonitor.namespaceSelector }}
{{- with .Values.serviceMonitor.namespaceSelector }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- else }}
- {{ include "prometheus-mysql-exporter.namespace" . }}
{{- end }}
selector:
matchLabels:
{{- include "prometheus-mysql-exporter.selectorLabels" . | nindent 6 }}
{{- with .Values.serviceMonitor.jobLabel }}
jobLabel: {{ . | quote}}
{{- end }}
{{- with .Values.serviceMonitor.targetLabels }}
targetLabels:
{{ toYaml . | trim | indent 4 -}}
{{- end }}
{{- with .Values.serviceMonitor.podTargetLabels }}
podTargetLabels:
{{ toYaml . | trim | indent 4 -}}
{{- end }}
endpoints:
{{- if .Values.serviceMonitor.multipleTarget.enabled }}
{{- range .Values.serviceMonitor.multipleTarget.targets }}
- path: /probe
port: {{ $.Values.service.name }}
{{- if $.Values.serviceMonitor.interval }}
interval: {{ $.Values.serviceMonitor.interval }}
{{- end }}
{{- if $.Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ $.Values.serviceMonitor.scrapeTimeout }}
{{- end }}
metricRelabelConfigs:
- action: replace
replacement: {{ .endpoint }}
sourceLabels: [instance]
targetLabel: instance
- action: replace
replacement: {{ .name }}
sourceLabels: [target]
targetLabel: target
{{- if $.Values.serviceMonitor.metricRelabelings -}}
{{ toYaml $.Values.serviceMonitor.metricRelabelings | nindent 8 }}
{{- end }}
{{- if $.Values.serviceMonitor.relabelings }}
relabelings: {{ toYaml $.Values.serviceMonitor.relabelings | nindent 8 }}
{{- end }}
params:
target:
- {{ .endpoint }}:{{ .port | default 3306 }}
{{- if $.Values.serviceMonitor.multipleTarget.sharedSecret.enabled }}
auth_module:
- client.{{ $.Values.serviceMonitor.multipleTarget.sharedSecret.name }}
{{- else }}
auth_module:
- client.{{ .name }}
{{- end }}
{{- end }}
{{- end }}
- path: /metrics
port: {{ .Values.service.name }}
{{- if .Values.serviceMonitor.interval }}
interval: {{ .Values.serviceMonitor.interval }}
{{- end }}
{{- if .Values.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- end }}
{{- if .Values.serviceMonitor.metricRelabelings }}
metricRelabelings: {{ toYaml .Values.serviceMonitor.metricRelabelings | nindent 8 }}
{{- end }}
{{- if .Values.serviceMonitor.relabelings }}
relabelings: {{ toYaml .Values.serviceMonitor.relabelings | nindent 8 }}
{{- end }}
{{- end }}
ScrapeConfig
- job_name: mysqld-exporter
metrics_path: /probe
params:
auth_module: [client]
static_configs:
- targets:
- '10.0.0.1:3306'
labels:
instance: 'xxxx'
- targets:
- '10.0.0.0:3306'
labels:
instance: 'xxx'
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: mysql-exporter-prometheus-mysql-exporter:9104