kustomize icon indicating copy to clipboard operation
kustomize copied to clipboard

NIL Pointer Dereference Panic in Replacements Transformer

Open vsabella opened this issue 2 years ago • 2 comments

Describe the bug

When running kustomize v4.5.7 we get a Nil pointer dereference/segmentation fault on anything using the replacements transformer. This is a regression from Kustomize v4.5.5

panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x2 addr=0x0 pc=0x103397938]

goroutine 1 [running]:
sigs.k8s.io/kustomize/api/filters/replacement.setFieldValue(0x14001c7d4a0, 0x0, 0x1?)
	sigs.k8s.io/kustomize/api/filters/replacement/replacement.go:237 +0x3b8
sigs.k8s.io/kustomize/api/filters/replacement.copyValueToTarget(0x0?, 0x14002448c00, 0x14002448280)
	sigs.k8s.io/kustomize/api/filters/replacement/replacement.go:208 +0x374
sigs.k8s.io/kustomize/api/filters/replacement.applyReplacement({0x1400244c420, 0x3, 0x3}, 0x14002357b00?, {0x14002357ae0, 0x1, 0x1400218ca28?})
	sigs.k8s.io/kustomize/api/filters/replacement/replacement.go:131 +0x318
sigs.k8s.io/kustomize/api/filters/replacement.Filter.Filter({{0x14002357b00?, 0x103387238?, 0x18?}}, {0x1400244c420?, 0x2957650c17c274bf?, 0x102d635e0?})
	sigs.k8s.io/kustomize/api/filters/replacement/replacement.go:33 +0xf8
sigs.k8s.io/kustomize/api/resmap.(*resWrangler).ApplyFilter(0x14001eda1f8, {0x1036b1c68, 0x1400244c408})
	sigs.k8s.io/kustomize/api/resmap/reswrangler.go:737 +0x194
sigs.k8s.io/kustomize/api/internal/builtins.(*ReplacementTransformerPlugin).Transform(0x1400218cc78?, {0x1036ba1f8, 0x14001eda1f8})
	sigs.k8s.io/kustomize/api/internal/builtins/ReplacementTransformer.go:71 +0x68
sigs.k8s.io/kustomize/api/internal/target.(*multiTransformer).Transform(0x140022f8960?, {0x1036ba1f8, 0x14001eda1f8})
	sigs.k8s.io/kustomize/api/internal/target/multitransformer.go:30 +0x8c
sigs.k8s.io/kustomize/api/internal/accumulator.(*ResAccumulator).Transform(...)
	sigs.k8s.io/kustomize/api/internal/accumulator/resaccumulator.go:141
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).runTransformers(0x140022f8960, 0x14001cf2860)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:330 +0x198
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateTarget(0x140022f8960, 0x0?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:220 +0x178
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateDirectory(0x140022f87d0, 0x14001cf2860, {0x1036b67b0?, 0x140022f8910}, 0x1)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:504 +0x3ec
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateComponents(0x140022f87d0, 0x1033b7f74?, {0x140022bfcc0?, 0x1, 0x14002303900?})
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:462 +0x20c
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateTarget(0x140022f87d0, 0x0?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:192 +0x58
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateDirectory(0x14001d94e10, 0x14001cf2860, {0x1036b67b0?, 0x140022f8780}, 0x1)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:504 +0x3ec
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateComponents(0x14001d94e10, 0x14001441b00?, {0x14001e68bc0?, 0x1, 0x1400218dcc8?})
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:462 +0x20c
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateTarget(0x14001d94e10, 0x1?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:192 +0x58
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).AccumulateTarget(0x140013f0740?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:181 +0x114
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateDirectory(0x14001d94d20, 0x14001cf2780, {0x1036b67b0?, 0x14001d94dc0}, 0x0)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:509 +0x510
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateResources(0x14001d94d20, 0x14001441410?, {0x14001e68a00?, 0x1, 0x1400218e4e8?})
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:429 +0x248
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateTarget(0x14001d94d20, 0x1?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:188 +0x34
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).AccumulateTarget(0x140013f0280?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:181 +0x114
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateDirectory(0x140003fc550, 0x140004747c0, {0x1036b67b0?, 0x14001d94cd0}, 0x0)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:509 +0x510
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateResources(0x140003fc550, 0x14000498600?, {0x1400049e180?, 0x4, 0x1400053ed08?})
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:429 +0x248
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateTarget(0x140003fc550, 0x1?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:188 +0x34
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).AccumulateTarget(0x1400048e760?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:181 +0x114
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateDirectory(0x140003fc460, 0x14000474680, {0x1036b67b0?, 0x140003fc500}, 0x0)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:509 +0x510
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateResources(0x140003fc460, 0x1400038def0?, {0x14000379cc0?, 0x1, 0x1400053f528?})
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:429 +0x248
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).accumulateTarget(0x140003fc460, 0x14000483078?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:188 +0x34
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).AccumulateTarget(0x0?)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:181 +0x114
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).makeCustomizedResMap(0x140003fc460)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:127 +0x70
sigs.k8s.io/kustomize/api/internal/target.(*KustTarget).MakeCustomizedResMap(...)
	sigs.k8s.io/kustomize/api/internal/target/kusttarget.go:118
sigs.k8s.io/kustomize/api/krusty.(*Kustomizer).Run(0x1400218fd60, {0x1036b9140, 0x103bf19b8}, {0x1033f4828, 0x1})
	sigs.k8s.io/kustomize/api/krusty/kustomizer.go:88 +0x248
sigs.k8s.io/kustomize/kustomize/v4/commands/build.NewCmdBuild.func1(0x1400040b400?, {0x103bf19b8?, 0x0?, 0x0?})
	sigs.k8s.io/kustomize/kustomize/v4/commands/build/build.go:80 +0x148
github.com/spf13/cobra.(*Command).execute(0x1400040b400, {0x103bf19b8, 0x0, 0x0})
	github.com/spf13/[email protected]/command.go:856 +0x4c4
github.com/spf13/cobra.(*Command).ExecuteC(0x1400040af00)
	github.com/spf13/[email protected]/command.go:974 +0x354
github.com/spf13/cobra.(*Command).Execute(...)
	github.com/spf13/[email protected]/command.go:902
main.main()
	sigs.k8s.io/kustomize/kustomize/v4/main.go:14 +0x24

Files that can reproduce the issue

Expected output

Actual output

Kustomize version v4.5.7 {Version:kustomize/v4.5.7 GitCommit:56d82a8378dfc8dc3b3b1085e5a6e67b82966bd7 BuildDate:2022-08-02T16:28:01Z GoOs:darwin GoArch:arm64}

Platform

darwin arm64

Additional context

vsabella avatar Aug 14 '22 04:08 vsabella

@vsabella: This issue is currently awaiting triage.

SIG CLI takes a lead on issue triage for this repo, but any Kubernetes member can accept issues by applying the triage/accepted label.

The triage/accepted label can be added by org members by writing /triage accepted in a comment.

Instructions for interacting with me using PR comments are available here. If you have questions or suggestions related to my behavior, please file an issue against the kubernetes/test-infra repository.

k8s-ci-robot avatar Aug 14 '22 04:08 k8s-ci-robot

Can you please provide a reproduction sample? It doesn't happen universally (we have many tests for this transformer). If confirmed, we would consider this urgent to fix.

KnVerey avatar Aug 16 '22 22:08 KnVerey

I got this same stack trace today with kustomize version 4.2.0. I'm not 100% what caused it.

jamesloosli avatar Aug 17 '22 23:08 jamesloosli

Aha here's the easiest way to reproduce;

apiVersion: getambassador.io/v3alpha1
kind: Mapping
metadata:
  name: client
spec:
  hostname: "needs.a.hypenated.prefix"
  service: placeholder:8080
# replacement.yaml
source: { kind: ConfigMap, name: someConfigMap, fieldPath: data.SOME_MISSING_THING }
targets:
  - select: { kind: Mapping, name: client }
    fieldPaths: [spec.hostname]
    options: { delimiter: "-", index: -1 }
OTHER_GARBAGE_VARS=here
NOT_THE_RIGHT_ONE=though
# kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization

resources:
  - mapping.yaml

replacements:
  - path: replacement.yaml

generatorOptions:
  disableNameSuffixHash: true
configMapGenerator:
  - name: cosmEnv
    behavior: create
    envs:
      - env

If the data is missing, there appears to be a panic. Cheers.

jamesloosli avatar Aug 17 '22 23:08 jamesloosli

I'm not able to reproduce using that sample. Even on Kustomize v4.2.0, I get Error: nothing selected by ~G_~V_ConfigMap|~X|someConfigMap:data.SOME_MISSING_THING, not a panic.

If I fix the reference to be to the generated configmap, but still leave the key missing, I get a panic on 4.2.0, but not latest (4.5.7), so that particular bug has already been fixed. (the new error message is: Error: fieldPath data.SOME_MISSING_THING is missing for replacement source ConfigMap.[noVer].[noGrp]/someConfigMap.[noNs])

Please provide a sample that can reproduce the panic reported with version 4.5.7 of Kustomize.

KnVerey avatar Aug 22 '22 19:08 KnVerey

@KnVerey Sorry I was out of the office but I'll see if I can get you a sanitized repro today.

vsabella avatar Sep 05 '22 05:09 vsabella

@KnVerey here is a simple repro where we wanted to use a replacements transformer to add a field.

The scenario is - imagine you have multiple "Filters" in a list, you want to add some new yaml to each instance (in this case the proxy: option)

However this was the pattern we needed to use in earlier versions of kustomize, due to https://github.com/kubernetes-sigs/kustomize/issues/4561 It looks like a nil pointer in the newly refactored "create" workflow

Our option is either try to replace all instances (0..N and hope there are not more than N) but have the code not cause a null pointer / dereference exception

This worked in 4.5.5, regressed in 4.5.7

Base file

apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter
metadata:
  name: request-id
spec:
  configPatches:
    - applyTo: NETWORK_FILTER
    - applyTo: NETWORK_FILTER

Broken Replacement

apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
  - ../base
  - istio-version.yaml
replacements:
  - source:
      kind: ConfigMap
      name: istio-version
      fieldPath: data.ISTIO_REGEX
    targets:
      - select:
          kind: EnvoyFilter
        fieldPaths:
          - spec.configPatches.0.match.proxy.proxyVersion
          - spec.configPatches.1.match.proxy.proxyVersion
          - spec.configPatches.2.match.proxy.proxyVersion
          - spec.configPatches.3.match.proxy.proxyVersion
        options:
          create: true

Working Replacement

apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
  - ../base
  - istio-version.yaml
replacements:
  - source:
      kind: ConfigMap
      name: istio-version
      fieldPath: data.ISTIO_REGEX
    targets:
      - select:
          kind: EnvoyFilter
        fieldPaths:
          - spec.configPatches.0.match.proxy.proxyVersion
          - spec.configPatches.1.match.proxy.proxyVersion
        options:
          create: true

Configmap

apiVersion: v1
kind: ConfigMap
metadata:
  name: istio-version
  annotations:
    config.kubernetes.io/local-config: true
data:
  ISTIO_REGEX: '^1\.14.*'

vsabella avatar Sep 05 '22 05:09 vsabella

Thank you! I successfully reproduced.

/triage accepted /kind regression /assign

KnVerey avatar Sep 06 '22 20:09 KnVerey

The Kubernetes project currently lacks enough contributors to adequately respond to all issues and PRs.

This bot triages issues and PRs according to the following rules:

  • After 90d of inactivity, lifecycle/stale is applied
  • After 30d of inactivity since lifecycle/stale was applied, lifecycle/rotten is applied
  • After 30d of inactivity since lifecycle/rotten was applied, the issue is closed

You can:

  • Mark this issue or PR as fresh with /remove-lifecycle stale
  • Mark this issue or PR as rotten with /lifecycle rotten
  • Close this issue or PR with /close
  • Offer to help out with Issue Triage

Please send feedback to sig-contributor-experience at kubernetes/community.

/lifecycle stale

k8s-triage-robot avatar Dec 05 '22 20:12 k8s-triage-robot