xk6-client-tracing
xk6-client-tracing copied to clipboard
Include kubernetes cron example?
This project might make an interesting "verification" step when standing up a new Tempo cluster. Currently, we document a few ways of getting data into the cluster which requires users to have already instrumented something and to be ready to start pushing traces. Could this replace the deprecated synthetic load generator?
Here is perhaps an easy way for kubernetes users to get started using k6, taken from a lab environment.
apiVersion: v1
data:
param.js: |
import { sleep } from 'k6'
import tracing from 'k6/x/tracing'
import { randomIntBetween } from 'https://jslib.k6.io/k6-utils/1.2.0/index.js'
export let options = {
vus: 1,
duration: '1m',
}
const client = new tracing.Client({
endpoint: 'tempo.trace.svc.cluster.znet:4317',
exporter: tracing.EXPORTER_OTLP,
insecure: true,
headers: { 'X-Scope-OrgID': 'k6' },
})
export default function () {
let pushSizeTraces = randomIntBetween(2, 3)
let pushSizeSpans = 0
let t = []
for (let i = 0; i < pushSizeTraces; i++) {
let c = randomIntBetween(5, 35)
pushSizeSpans += c
t.push({
random_service_name: false,
spans: {
count: c,
size: randomIntBetween(300, 30000),
random_name: true,
fixed_attrs: {
test: 'test',
},
},
})
}
let gen = new tracing.ParameterizedGenerator(t)
let traces = gen.traces()
client.push(traces)
console.log(
`Pushed ${pushSizeSpans} spans from ${pushSizeTraces} different traces. Here is a random traceID: ${
t[Math.floor(Math.random() * t.length)].id
}`
)
sleep(1)
}
export function teardown() {
client.shutdown()
}
kind: ConfigMap
metadata:
name: k6-tracing-config
namespace: trace
---
apiVersion: batch/v1
kind: CronJob
metadata:
name: k6-tracing
namespace: trace
spec:
jobTemplate:
spec:
backoffLimit: 1
template:
metadata:
labels:
name: k6-tracing
spec:
containers:
- args:
- run
- /config/param.js
image: zalegrala/xk6-client-tracing:latest
imagePullPolicy: Always
name: k6-tracing
ports:
- containerPort: 6565
name: none
volumeMounts:
- mountPath: /config
name: k6-tracing-config
restartPolicy: Never
volumes:
- configMap:
name: k6-tracing-config
name: k6-tracing-config
ttlSecondsAfterFinished: 604800
schedule: '*/10 * * * *'
The above CronJob will run k6 every 10 minutes for 1 minute. Is this worth working into this project, and perhaps writing a bit of documentation for users to verify a new cluster? Perhaps there is value in keeping a job around even if you have a running cluster.