apisix-helm-chart icon indicating copy to clipboard operation
apisix-helm-chart copied to clipboard

Production Static Config

Open busyboy77 opened this issue 6 months ago • 0 comments

Just sharing the config We used while moving away from 4 pods to single pod with static config. Sharing here for anyone coming here in future .

My deployment is

Ingress-nginx -> main-ingress ( pointing to apisix ) --> apisix --> routes/services/upstreams


global:
  # The OPA policy is defined here so it can be easily applied to any route.
  policies:
    opa:
      restrictivePolicy: |
        package apisix.authz

        import rego.v1

        default allow := false

        allow if {
            method_allowed
            jwt_claims_valid
        }

        method_allowed if { input.request.method == "GET" }
        method_allowed if { input.request.method == "POST" }

        jwt_claims_valid if {
            startswith(input.request.headers.authorization, "Bearer ")
            token := split(input.request.headers.authorization, " ")[1]
            [is_valid, header, payload] := io.jwt.decode_verify(token, { "cert": "" })
            is_valid

            allowed_roles := {"power", "superpower"}
            some role in payload.realm_access.roles
            role in allowed_roles
        }
etcd:
  enabled: false
externalEtcd:
  user: ""
  existingSecret: ""
  password: ""
dashboard:
  enabled: false
ingress-controller:
  enabled: false
serviceAccount:
  create: true      
rbac:
  create: true  # this requires the template/clusterrole.yaml to be updated as well.
# https://github.com/apache/apisix/discussions/11520
# https://www.cnblogs.com/hukey/p/18158054
# https://github.com/apache/apisix/issues/7026

extraVolumes:
  - name: apisix-cache-volume
    emptyDir:
     sizeLimit: 2Gi
extraVolumeMounts:
  - name: apisix-cache-volume
    mountPath: /data/cache/one


ingress:
  enabled: true
  className: nginx 
  annotations: 
    nginx.ingress.kubernetes.io/backend-protocol: "HTTP"
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
    nginx.ingress.kubernetes.io/proxy-body-size: "6m"
    nginx.ingress.kubernetes.io/proxy-read-timeout: "600s"
    nginx.ingress.kubernetes.io/proxy-send-timeout: "600s"
    nginx.ingress.kubernetes.io/proxy-connect-timeout: "60s"
  hosts:
    - host: my.fqdn.com
      paths: 
        - /
  tls: 
    - hosts: 
      - my.fqdn.com
      secretName: tls-cert-name
        

apisix:
  deployment:
    mode: standalone
    role: "data_plane"
  admin:
    enabled: false
  fullCustomConfig:
    enabled: true
    config:
      apisix:
        node_listen:
          - 9080
        enable_heartbeat: true
        enable_admin: false
        enable_admin_cors: true   # harmless when admin is off
        enable_control: false
        enable_debug: false
        enable_dev_mode: false
        enable_reuseport: true    # spread load across workers
        enable_ipv6: true
        enable_http2: true
        enable_server_tokens: false  # hide version for slight perf gain
        proxy_cache:
          cache_ttl: 60s
          zones:
            - name: disk_cache_one
              memory_size: 100m
              disk_size: 2G
              disk_path: "/data/cache/one"
              cache_levels: "1:2"
        proxy_mode: http
        dns_resolver_valid: 30
        resolver_timeout: 5
        router:
          http: radixtree_host_uri
      nginx_config:
        error_log: "/dev/stderr"
        error_log_level: "warn"
        worker_processes: "auto"        # one per CPU core
        enable_cpu_affinity: true
        worker_rlimit_nofile: 200000    # allow many open files
        event:
          worker_connections: 16384     # many concurrent connections
        envs: 
          - KUBERNETES_SERVICE_HOST
          - KUBERNETES_SERVICE_PORT
        http:
          enable_access_log: true
          access_log: "/dev/stdout"
          access_log_format: '$remote_addr - $remote_user [$time_local] $http_host \"$request\" $status $body_bytes_sent $request_time \"$http_referer\" \"$http_user_agent\" $upstream_addr $upstream_status $upstream_response_time \"$upstream_scheme://$upstream_host$upstream_uri\"'
          access_log_format_escape: default
          keepalive_timeout: "60s"
          client_header_timeout: 60s
          client_body_timeout: 60s
          send_timeout: 10s
          underscores_in_headers: "on"
          real_ip_header: "X-Real-IP"
          real_ip_from:
            - 127.0.0.1
            - "unix:"
      discovery:
        kubernetes: {}
      deployment:
        role: data_plane
        role_data_plane:
          config_provider: yaml
        admin:
          allow_admin:
            - 127.0.0.1/24
          admin_listen:
            ip: 0.0.0.0
            port: 9180
          admin_key:
            - name: "admin"
              key: "edd1c9f034335f136f87ad84b625c8f1"
              role: admin
            - name: "viewer"
              key: "4054f7cf07e344346cd3f287985e76a2"
              role: viewer
  plugins:
    - prometheus
    - proxy-rewrite
    - cors
    - openid-connect
    - limit-count
    - ip-restriction
    - response-rewrite
    - client-control
    - redirect
    - real-ip
    - mocking
    - error-page-rewrite
    - opa
  pluginAttrs: {}
  stream_plugins: []

  
# =================================================================
# HOW TO APPLY THE OPA POLICY
# =================================================================
# The OPA policy defined in 'global.policies.opa.restrictivePolicy' is not applied to any
# route by default. To secure a route, manually add the 'opa' plugin block to it.
# For example:
#
#   - id: my-secure-route
#     ...
#     plugins:
#       opa:
#         policy: '{{ .Values.global.policies.opa.restrictivePolicy }}'
#       # ... other plugins for the route
#
# =================================================================

replicaCount: 1
useDaemonSet: false

service:
  type: NodePort

## ====================================
## Resource requests & limits
## ====================================
resources:
  requests:
    cpu: "256m"
    memory: "512Mi"
  limits:
    cpu: "512m"
    memory: "1Gi"


apisixStandalone:
   # =================================================================
   # SERVICES (UPSTREAMS)
   # =================================================================
   # Each service corresponds to a microservice in your Kubernetes cluster.
   # The 'service_name' uses the format: <namespace>/<k8s-service-name>:<port-name>
   #------------------------------------------------------------------
   services:
     - id: svc-example-service
       name: svc-example-service
       upstream:
         discovery_type: kubernetes
         service_name: "<NAMESPACE>/example-service-svc:http-3000"
         scheme: http
         type: roundrobin
         timeout: { connect: 5, send: 10, read: 10 }
     - id: svc-exmaple-studio
       name: svc-example-studio
       upstream:
         discovery_type: kubernetes
         service_name: "<NAMESPACE>/example-service-studio-svc:http-1880"
         scheme: http
         type: roundrobin
         timeout: { connect: 5, send: 10, read: 10 }
     # Dummy service for the 404 handler route to satisfy schema requirements
     - id: dummy-service-for-404
       name: dummy-service-for-404
       upstream:
         nodes:
           "127.0.0.1:1984": 1 # A placeholder node that will not be used
         type: roundrobin
   
   
   # =================================================================
   # ROUTES
   # =================================================================
   # Each route defines a public-facing path and links it to a service.
   # Plugins for auth, rewrite, cors, etc., are applied here.
   #------------------------------------------------------------------
   routes:
     - id: example-service-login-unsecured
       uris: ["/example-service/path/login"]
       hosts: 
	      - my.fqdn.com
       priority: 10
       service_id: svc-example-service
       status: 1
       plugins:
         proxy-rewrite:
           enable: true
           regex_uri: ["^/example-service/path/login$", "/login"]
     - id: example-service-socketio-unsecured
       uris: ["/example-service/socket.io/*"]
       hosts: 
	     - my.fqdn.com
       priority: 10
       service_id: svc-example-service
       enable_websocket: true
       status: 1
       timeout:
         connect: 5
         send: 3600
         read: 3600
       plugins:
         proxy-rewrite:
           enable: true
           regex_uri: ["^/example-service/(socket\\.io/.*)", "/$1"]
     - id: svc-example-studio-un-secured
       uris: ["/svc-example-studio/path/options*"]
       hosts: 
	     - my.fqdn.com
       priority: 10
       service_id: svc-example-studio
       status: 1
       plugins:
         proxy-rewrite:
           enable: true
           regex_uri: ["^/svc-example-studio(/path/options.*)$", "$1"]

Secondly I had to modify the templates/apisix-config-cm.yml as per below to automate the routes/services/upstream as follows.

{{- if eq .Values.apisix.deployment.mode "standalone" }}
# This Helm template creates a ConfigMap named 'apisix.yaml'.
# This ConfigMap provides the static configuration for APISIX when running in standalone mode.
# The content is dynamically populated from the 'apisixStandalone' section of your values file.

kind: ConfigMap
apiVersion: v1
metadata:
  name: apisix.yaml
data:
  apisix.yaml: |
{{- if .Values.apisixStandalone }}
{{- include "apisix.tplvalues.render" ( dict "value" .Values.apisixStandalone "context" . ) | nindent 4 }}
    #END
{{- end }}
{{- end }}

suggestions, updates and queries are welcome.

busyboy77 avatar Jun 21 '25 12:06 busyboy77