containerd icon indicating copy to clipboard operation
containerd copied to clipboard

Deleting the image actually failed

Open cywang4 opened this issue 1 year ago • 2 comments

Description


	client, err := containerd.New("/run/containerd/containerd.sock")
	if err != nil {
		return err
	}
	defer client.Close()

	ctx := namespaces.WithNamespace(context.Background(), "k8s.io")

	imageName := "docker.io/library/busybox:1.36"

	imgManager := client.ImageService()

	err = imgManager.Delete(ctx, imageName)
	if err != nil {
        log.Fatalf("Failed to delete image: %v", err)
    }

    log.Printf("Image %s deleted successfully", imageName)

The code is as above. I ran it in a pod. The pod mounted the socket and containerd directories. After execution, it showed that the deletion was successful, but I found on the node that the actual image file was not deleted.

Steps to reproduce the issue

Describe the results you received and expected

2024/04/02 10:37:46 Image docker.io/library/busybox:1.36 deleted successfully

Although it is shown deleted here, it is found that the actual image file has not been deleted on the node.

What version of containerd are you using?

containerd github.com/containerd/containerd v1.6.6_bb202209

Any other relevant information

runc --version runc version 1.1.3 commit: v1.1.3-0-g6724737f spec: 1.0.2-dev go: go1.17.10 libseccomp: 2.5.4

Show configuration if it is related to CRI plugin.

disabled_plugins = [] imports = ["/var/lib/sealer/data/my-cluster/rootfs/etc/dump-nvidia-config.toml"] oom_score = 0 plugin_dir = "" required_plugins = [] root = "/var/lib/containerd" state = "/run/containerd" temp = "" version = 2

[cgroup] path = ""

[debug] address = "" format = "" gid = 0 level = "" uid = 0

[grpc] address = "/run/containerd/containerd.sock" gid = 0 max_recv_message_size = 16777216 max_send_message_size = 16777216 tcp_address = "" tcp_tls_ca = "" tcp_tls_cert = "" tcp_tls_key = "" uid = 0

[metrics] address = "" grpc_histogram = false

[plugins]

[plugins."io.containerd.gc.v1.scheduler"] deletion_threshold = 0 mutation_threshold = 100 pause_threshold = 0.02 schedule_delay = "0s" startup_delay = "100ms"

[plugins."io.containerd.grpc.v1.cri"] device_ownership_from_security_context = false disable_apparmor = false disable_cgroup = false disable_hugetlb_controller = true disable_proc_mount = false disable_tcp_service = true enable_selinux = false enable_tls_streaming = false enable_unprivileged_icmp = false enable_unprivileged_ports = false ignore_image_defined_volumes = false kubelet_pod_path = "/var/lib/kubelet/pods" max_concurrent_downloads = 3 max_container_log_line_size = 16384 netns_mounts_under_state_dir = false restrict_oom_score_adj = false sandbox_image = "docker-reg.basebit.me:5000/pause:3.6" selinux_category_range = 1024 stats_collect_period = 10 stream_idle_timeout = "4h0m0s" stream_server_address = "127.0.0.1" stream_server_port = "0" systemd_cgroup = false tolerate_missing_hugetlb_controller = true unset_seccomp_profile = ""

[plugins."io.containerd.grpc.v1.cri".cni]
  bin_dir = "/opt/cni/bin"
  conf_dir = "/etc/cni/net.d"
  conf_template = ""
  ip_pref = ""
  max_conf_num = 1

[plugins."io.containerd.grpc.v1.cri".containerd]
  default_runtime_name = "nvidia"
  disable_snapshot_annotations = true
  discard_unpacked_layers = false
  ignore_rdt_not_enabled_errors = false
  no_pivot = false
  snapshotter = "overlayfs"

  [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
    base_runtime_spec = ""
    cni_conf_dir = ""
    cni_max_conf_num = 0
    container_annotations = []
    pod_annotations = []
    privileged_without_host_devices = false
    runtime_engine = ""
    runtime_path = ""
    runtime_root = ""
    runtime_type = ""

    [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime.options]

  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]

    [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia]
      base_runtime_spec = ""
      cni_conf_dir = ""
      cni_max_conf_num = 0
      container_annotations = []
      pod_annotations = []
      privileged_without_host_devices = false
      runtime_engine = ""
      runtime_path = ""
      runtime_root = ""
      runtime_type = "io.containerd.runc.v2"

      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.nvidia.options]
        BinaryName = "/usr/local/bin/nvidia-container-runtime"
        SystemdCgroup = false

  [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
    base_runtime_spec = ""
    cni_conf_dir = ""
    cni_max_conf_num = 0
    container_annotations = []
    pod_annotations = []
    privileged_without_host_devices = false
    runtime_engine = ""
    runtime_path = ""
    runtime_root = ""
    runtime_type = ""

    [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime.options]

[plugins."io.containerd.grpc.v1.cri".image_decryption]
  key_model = "node"

[plugins."io.containerd.grpc.v1.cri".registry]
  config_path = "/etc/docker/certs.d/"

  [plugins."io.containerd.grpc.v1.cri".registry.auths]

  [plugins."io.containerd.grpc.v1.cri".registry.configs]

  [plugins."io.containerd.grpc.v1.cri".registry.headers]

  [plugins."io.containerd.grpc.v1.cri".registry.mirrors]

[plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
  tls_cert_file = ""
  tls_key_file = ""

[plugins."io.containerd.internal.v1.opt"] path = "/opt/containerd"

[plugins."io.containerd.internal.v1.restart"] interval = "10s"

[plugins."io.containerd.internal.v1.tracing"] sampling_ratio = 1.0 service_name = "containerd"

[plugins."io.containerd.metadata.v1.bolt"] content_sharing_policy = "shared"

[plugins."io.containerd.monitor.v1.cgroups"] no_prometheus = false

[plugins."io.containerd.runtime.v1.linux"] no_shim = false runtime = "runc" runtime_root = "" shim = "containerd-shim" shim_debug = false

[plugins."io.containerd.runtime.v2.task"] platforms = ["linux/amd64"] sched_core = false

[plugins."io.containerd.service.v1.diff-service"] default = ["walking"]

[plugins."io.containerd.service.v1.tasks-service"] rdt_config_file = ""

[plugins."io.containerd.snapshotter.v1.aufs"] root_path = ""

[plugins."io.containerd.snapshotter.v1.devmapper"] async_remove = false base_image_size = "" discard_blocks = false fs_options = "" fs_type = "" pool_name = "" root_path = ""

[plugins."io.containerd.snapshotter.v1.native"] root_path = ""

[plugins."io.containerd.snapshotter.v1.overlayfs"] root_path = "" upperdir_label = false

[plugins."io.containerd.snapshotter.v1.zfs"] root_path = ""

[plugins."io.containerd.tracing.processor.v1.otlp"] endpoint = "" insecure = false protocol = ""

[proxy_plugins]

[stream_processors]

[stream_processors."io.containerd.ocicrypt.decoder.v1.tar"] accepts = ["application/vnd.oci.image.layer.v1.tar+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar"

[stream_processors."io.containerd.ocicrypt.decoder.v1.tar.gzip"] accepts = ["application/vnd.oci.image.layer.v1.tar+gzip+encrypted"] args = ["--decryption-keys-path", "/etc/containerd/ocicrypt/keys"] env = ["OCICRYPT_KEYPROVIDER_CONFIG=/etc/containerd/ocicrypt/ocicrypt_keyprovider.conf"] path = "ctd-decoder" returns = "application/vnd.oci.image.layer.v1.tar+gzip"

[timeouts] "io.containerd.timeout.bolt.open" = "0s" "io.containerd.timeout.shim.cleanup" = "5s" "io.containerd.timeout.shim.load" = "5s" "io.containerd.timeout.shim.shutdown" = "3s" "io.containerd.timeout.task.state" = "2s"

[ttrpc] address = "" gid = 0 uid = 0

cywang4 avatar Apr 02 '24 11:04 cywang4

Needs more information. What does I found on the node that the actual image file was not deleted. refer to and would need the daemon logs to see if there was any garbage collection issue.

dmcgowan avatar Apr 02 '24 17:04 dmcgowan

This issue is stale because it has been open 90 days with no activity. This issue will be closed in 7 days unless new comments are made or the stale label is removed.

github-actions[bot] avatar Jul 02 '24 00:07 github-actions[bot]

This issue was closed because it has been stalled for 7 days with no activity.

github-actions[bot] avatar Jul 10 '24 00:07 github-actions[bot]