lxd icon indicating copy to clipboard operation
lxd copied to clipboard

Second ACL not saved and restored to/from container image

Open kimfaint opened this issue 2 years ago • 1 comments

Required information

  • Distribution: Ubuntu
  • Distribution version: 22.04 (Jammy)
  • Kernel version: 6.2.0-33-generic
  • LXC version: 5.18
  • LXD version: 5.18
  • Storage backend in use: ZFS

Issue description

Similar to #11901 (which has been fixed) there is an issue when 2 group ACLs are applied to the same directory. The first one is now restored correctly, but the second one is restored as a numerical value.

Steps to reproduce

  1. Create a container, apply 2 ACL groups to the same directory, stop the container
$ lxc launch images:ubuntu/jammy c1
$ lxc exec c1 -- apt install acl -y
$ lxc exec c1 -- groupadd foo
$ lxc exec c1 -- groupadd wibble
$ lxc exec c1 -- touch ./foo
$ lxc exec c1 -- setfacl -m g:foo:rwx ./foo
$ lxc exec c1 -- getfacl foo
# file: foo
# owner: root
# group: root
user::rw-
group::r--
group:foo:rwx
mask::rwx
other::r--
  1. Publish container to image, launch new container from image, check ACL
$ lxc stop c1
$ lxc publish c1
Instance published with fingerprint: ad7f2ecc32d26416a6b59b65f4fb3061a5490e1658b06a8ddfec3cee77c6e016
$ lxc launch ad7f2ecc32d26416a6b59b65f4fb3061a5490e1658b06a8ddfec3cee77c6e016 c2
Creating c2
Starting c2                                
$ lxc exec c2 -- getfacl foo
# file: foo
# owner: root
# group: root
user::rw-
group::r--
group:foo:rwx
group:4294967295:rwx
mask::rwx
other::r--

Instead of wibble we get 4294967295.

Information to attach

lxc info
config:                                 
  core.https_address: '[::]:8443'
  core.trust_password: true    
api_extensions:                     
- storage_zfs_remove_snapshots 
- container_host_shutdown_timeout
- container_stop_priority            
- container_syscall_filtering
- auth_pki                                                     
- container_last_used_at           
- etag                                                                                                                                                                                                                                                         
- patch                                  
- usb_devices                                            
- https_allowed_credentials                
- image_compression_algorithm 
- directory_manipulation         
- container_cpu_time               
- storage_zfs_use_refquota
- storage_lvm_mount_options          
- network                                   
- profile_usedby                        
- container_push                                               
- container_exec_recording         
- certificate_update           
- container_exec_signal_handling
- gpu_devices                
- container_image_properties         
- migration_progress         
- id_map                                                                                                                       
- network_firewall_filtering        
- network_routes                     
- storage                    
- file_delete            
- file_append                
- network_dhcp_expiry                  
- storage_lvm_vg_rename           
- storage_lvm_thinpool_rename                                                                                                  
- network_vlan                                                                                                                 
- image_create_aliases                                                                                                         
- container_stateless_copy                                                                                                     
- container_only_migration                                                                                                     
- storage_zfs_clone_copy                                                                                                       
- unix_device_rename                                                                                                           
- storage_lvm_use_thinpool
- storage_rsync_bwlimit
- network_vxlan_interface
- storage_btrfs_mount_options
- entity_description
- image_force_refresh
- storage_lvm_lv_resizing
- id_map_base
- file_symlinks
- container_push_target
- network_vlan_physical
- storage_images_delete
- container_edit_metadata
- container_snapshot_stateful_migration
- storage_driver_ceph
- storage_ceph_user_name
- resource_limits
- storage_volatile_initial_source
- storage_ceph_force_osd_reuse
- storage_block_filesystem_btrfs
- resources
- kernel_limits
- storage_api_volume_rename
- macaroon_authentication
- network_sriov
- console                                                      
- restrict_devlxd             
- migration_pre_copy
- infiniband                     
- maas_network             
- devlxd_events
- proxy                       
- network_dhcp_gateway           
- file_get_symlink       
- network_leases
- unix_device_hotplug
- storage_api_local_volume_handling
- operation_description
- clustering
- event_lifecycle
- storage_api_remote_volume_handling
- nvidia_runtime
- container_mount_propagation
- container_backup
- devlxd_images
- container_local_cross_pool_handling
- proxy_unix
- proxy_udp
- clustering_join
- proxy_tcp_udp_multi_port_handling
- network_state
- proxy_unix_dac_properties
- container_protection_delete
- unix_priv_drop
- pprof_http
- proxy_haproxy_protocol
- network_hwaddr
- proxy_nat
- network_nat_order
- container_full
- candid_authentication
- backup_compression
- candid_config
- nvidia_runtime_config
- storage_api_volume_snapshots
- storage_unmapped
- projects
- candid_config_key
- network_vxlan_ttl
- container_incremental_copy
- usb_optional_vendorid
- snapshot_scheduling
- snapshot_schedule_aliases
- container_copy_project
- clustering_server_address
- clustering_image_replication
- container_protection_shift
- snapshot_expiry
- container_backup_override_pool
- snapshot_expiry_creation
- network_leases_location
- resources_cpu_socket
- resources_gpu
- resources_numa
- kernel_features
- id_map_current
- event_location
- storage_api_remote_volume_snapshots
- network_nat_address
- container_nic_routes
- rbac
- cluster_internal_copy
- seccomp_notify
- lxc_features
- container_nic_ipvlan
- network_vlan_sriov
- storage_cephfs
- container_nic_ipfilter
- resources_v2
- container_exec_user_group_cwd
- container_syscall_intercept
- container_disk_shift
- storage_shifted
- resources_infiniband
- daemon_storage
- instances
- image_types
- resources_disk_sata
- clustering_roles
- images_expiry
- resources_network_firmware
- backup_compression_algorithm
- ceph_data_pool_name
- container_syscall_intercept_mount
- compression_squashfs
- container_raw_mount
- container_nic_routed
- container_syscall_intercept_mount_fuse
- container_disk_ceph
- virtual-machines
- image_profiles
- clustering_architecture
- resources_disk_id
- storage_lvm_stripes
- vm_boot_priority
- unix_hotplug_devices
- api_filtering
- instance_nic_network
- clustering_sizing
- firewall_driver
- projects_limits
- container_syscall_intercept_hugetlbfs
- limits_hugepages
- container_nic_routed_gateway
- projects_restrictions
- custom_volume_snapshot_expiry
- volume_snapshot_scheduling
- trust_ca_certificates
- snapshot_disk_usage
- clustering_edit_roles
- container_nic_routed_host_address
- container_nic_ipvlan_gateway
- resources_usb_pci
- resources_cpu_threads_numa
- resources_cpu_core_die
- api_os
- container_nic_routed_host_table
- container_nic_ipvlan_host_table
- container_nic_ipvlan_mode
- resources_system
- images_push_relay
- network_dns_search
- container_nic_routed_limits
- instance_nic_bridged_vlan
- network_state_bond_bridge
- usedby_consistency
- custom_block_volumes
- clustering_failure_domains
- resources_gpu_mdev
- console_vga_type
- projects_limits_disk
- network_type_macvlan
- network_type_sriov
- container_syscall_intercept_bpf_devices
- network_type_ovn
- projects_networks
- projects_networks_restricted_uplinks
- custom_volume_backup
- backup_override_name
- storage_rsync_compression
- network_type_physical
- network_ovn_external_subnets
- network_ovn_nat
- network_ovn_external_routes_remove
- tpm_device_type
- storage_zfs_clone_copy_rebase
- gpu_mdev
- resources_pci_iommu
- resources_network_usb
- resources_disk_address
- network_physical_ovn_ingress_mode
- network_ovn_dhcp
- network_physical_routes_anycast
- projects_limits_instances
- network_state_vlan
- instance_nic_bridged_port_isolation
- instance_bulk_state_change
- network_gvrp
- instance_pool_move
- gpu_sriov
- pci_device_type
- storage_volume_state
- network_acl
- migration_stateful
- disk_state_quota
- storage_ceph_features
- projects_compression
- projects_images_remote_cache_expiry
- certificate_project
- network_ovn_acl
- projects_images_auto_update
- projects_restricted_cluster_target
- images_default_architecture
- network_ovn_acl_defaults
- gpu_mig
- project_usage
- network_bridge_acl
- warnings
- projects_restricted_backups_and_snapshots
- clustering_join_token
- clustering_description
- server_trusted_proxy
- clustering_update_cert
- storage_api_project
- server_instance_driver_operational
- server_supported_storage_drivers
- event_lifecycle_requestor_address
- resources_gpu_usb
- clustering_evacuation
- network_ovn_nat_address
- network_bgp
- network_forward
- custom_volume_refresh
- network_counters_errors_dropped
- metrics
- image_source_project
- clustering_config
- network_peer
- linux_sysctl
- network_dns
- ovn_nic_acceleration
- certificate_self_renewal
- instance_project_move
- storage_volume_project_move
- cloud_init
- network_dns_nat
- database_leader
- instance_all_projects
- clustering_groups
- ceph_rbd_du
- instance_get_full
- qemu_metrics
- gpu_mig_uuid
- event_project
- clustering_evacuation_live
- instance_allow_inconsistent_copy
- network_state_ovn
- storage_volume_api_filtering
- image_restrictions
- storage_zfs_export
- network_dns_records
- storage_zfs_reserve_space
- network_acl_log
- storage_zfs_blocksize
- metrics_cpu_seconds
- instance_snapshot_never
- certificate_token
- instance_nic_routed_neighbor_probe
- event_hub
- agent_nic_config
- projects_restricted_intercept
- metrics_authentication
- images_target_project
- cluster_migration_inconsistent_copy
- cluster_ovn_chassis
- container_syscall_intercept_sched_setscheduler
- storage_lvm_thinpool_metadata_size
- storage_volume_state_total
- instance_file_head
- instances_nic_host_name
- image_copy_profile
- container_syscall_intercept_sysinfo
- clustering_evacuation_mode
- resources_pci_vpd
- qemu_raw_conf
- storage_cephfs_fscache
- network_load_balancer
- vsock_api
- instance_ready_state
- network_bgp_holdtime
- storage_volumes_all_projects
- metrics_memory_oom_total
- storage_buckets
- storage_buckets_create_credentials
- metrics_cpu_effective_total
- projects_networks_restricted_access
- storage_buckets_local
- loki
- acme
- internal_metrics
- cluster_join_token_expiry
- remote_token_expiry
- init_preseed
- storage_volumes_created_at
- cpu_hotplug
- projects_networks_zones
- network_txqueuelen
- cluster_member_state
- instances_placement_scriptlet
- storage_pool_source_wipe
- zfs_block_mode
- instance_generation_id
- disk_io_cache
- amd_sev
- storage_pool_loop_resize
- migration_vm_live
- ovn_nic_nesting
- oidc
- network_ovn_l3only
- ovn_nic_acceleration_vdpa
- cluster_healing
- instances_state_total
- auth_user
- security_csm
- instances_rebuild
- numa_cpu_placement
- custom_volume_iso
- network_allocations
- storage_api_remote_volume_snapshot_copy
- zfs_delegate
- operations_get_query_all_projects
- metadata_configuration
- syslog_socket
api_status: stable
api_version: "1.0"
auth: trusted
public: false
auth_methods:
- tls
auth_user_name: kfaint
auth_user_method: unix
environment:
  addresses:
  - 172.27.82.34:8443
  - 192.168.2.254:8443
  - 172.17.0.1:8443
  - 10.10.10.1:8443
  - '[fd42:632b:873a:319e::1]:8443'
  architectures:
  - x86_64
  - i686
  certificate: |
    -----BEGIN CERTIFICATE-----
    -----END CERTIFICATE-----
  certificate_fingerprint: f40a5dcfddbe947ec36e542fbdfc507b66203bab9bc6b7c2597086c56030cd53
  driver: lxc | qemu
  driver_version: 5.0.3 | 8.0.4
  firewall: nftables
  kernel: Linux
  kernel_architecture: x86_64
  kernel_features:
    idmapped_mounts: "true"
    netnsid_getifaddrs: "true"
    seccomp_listener: "true"
    seccomp_listener_continue: "true"
    shiftfs: "false"
    uevent_injection: "true"
    unpriv_fscaps: "true"
  kernel_version: 6.2.0-33-generic
  lxc_features:
    cgroup2: "true"
    core_scheduling: "true"
    devpts_fd: "true"
    idmapped_mounts_v2: "true"
    mount_injection_file: "true"
    network_gateway_device_route: "true"
    network_ipvlan: "true"
    network_l2proxy: "true"
    network_phys_macvlan_mtu: "true"
    network_veth_router: "true"
    pidfd: "true"
    seccomp_allow_deny_syntax: "true"
    seccomp_notify: "true"
    seccomp_proxy_send_notify_fd: "true"
  os_name: Ubuntu
  os_version: "22.04"
  project: default
  server: lxd
  server_clustered: false
  server_event_mode: full-mesh
  server_name: TAP0001367
  server_pid: 2932407
  server_version: "5.18"
  storage: zfs | btrfs
  storage_version: 2.1.9-2ubuntu1.1 | 5.16.2
  storage_supported_drivers:
  - name: cephobject
    version: 17.2.6
    remote: true
  - name: dir
    version: "1"
    remote: false
  - name: lvm
    version: 2.03.11(2) (2021-01-08) / 1.02.175 (2021-01-08) / 4.47.0
    remote: false
  - name: zfs
    version: 2.1.9-2ubuntu1.1
    remote: false
  - name: btrfs
    version: 5.16.2
    remote: false
  - name: ceph
    version: 17.2.6
    remote: true
  - name: cephfs
    version: 17.2.6
    remote: true
lxc info c1 --show-log
Name: c1
Status: STOPPED
Type: container
Architecture: x86_64
Created: 2023/10/23 15:16 AEST
Last Used: 2023/10/23 15:16 AEST

Log:

lxc c1 20231023051651.210 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051651.210 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051651.213 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051651.213 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051651.214 WARN     cgfsng - ../src/src/lxc/cgroups/cgfsng.c:fchowmodat:1619 - No such file or directory - Failed to fchownat(42, memory.oom.group, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc c1 20231023051651.215 WARN     cgfsng - ../src/src/lxc/cgroups/cgfsng.c:fchowmodat:1619 - No such file or directory - Failed to fchownat(42, memory.reclaim, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc c1 20231023051718.738 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051718.738 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051734.123 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051734.123 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051755.119 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051755.119 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051903.198 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051903.198 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051922.415 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051922.415 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051937.202 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051937.202 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051949.134 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051949.134 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c1 20231023051952.119 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c1 20231023051952.119 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc info c2 --show-log
Name: c2
Status: RUNNING
Type: container
Architecture: x86_64
PID: 1108042
Created: 2023/10/23 15:20 AEST
Last Used: 2023/10/23 15:21 AEST

Resources:
  Processes: 13
  Disk usage:
    root: 3.69MiB
  CPU usage:
    CPU usage (in seconds): 1
  Memory usage:
    Memory (current): 56.62MiB
    Memory (peak): 61.65MiB
  Network usage:
    eth0:
      Type: broadcast
      State: UP
      Host interface: veth716f6a11
      MAC address: 00:16:3e:35:f6:14
      MTU: 1500
      Bytes received: 14.10kB
      Bytes sent: 3.50kB
      Packets received: 83
      Packets sent: 35
      IP addresses:
        inet:  10.10.10.40/24 (global)
        inet6: fd42:632b:873a:319e:216:3eff:fe35:f614/64 (global)
        inet6: fe80::216:3eff:fe35:f614/64 (link)
    lo:
      Type: loopback
      State: UP
      MTU: 65536
      Bytes received: 0B
      Bytes sent: 0B
      Packets received: 0
      Packets sent: 0
      IP addresses:
        inet:  127.0.0.1/8 (local)
        inet6: ::1/128 (local)

Log:

lxc c2 20231023052105.390 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c2 20231023052105.390 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c2 20231023052105.392 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c2 20231023052105.392 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc c2 20231023052105.394 WARN     cgfsng - ../src/src/lxc/cgroups/cgfsng.c:fchowmodat:1619 - No such file or directory - Failed to fchownat(42, memory.oom.group, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc c2 20231023052105.394 WARN     cgfsng - ../src/src/lxc/cgroups/cgfsng.c:fchowmodat:1619 - No such file or directory - Failed to fchownat(42, memory.reclaim, 1000000000, 0, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW )
lxc c2 20231023052112.422 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3621 - newuidmap binary is missing
lxc c2 20231023052112.422 WARN     conf - ../src/src/lxc/conf.c:lxc_map_ids:3627 - newgidmap binary is missing
lxc config show c1 --expanded
architecture: x86_64
config:
  image.architecture: amd64
  image.description: Ubuntu jammy amd64 (20231022_07:42)
  image.os: Ubuntu
  image.release: jammy
  image.serial: "20231022_07:42"
  image.type: squashfs
  image.variant: default
  volatile.base_image: 2fb5bae1e9f95a14855d5c3afc1d0f55c24718dc1e2db03b18a48b1e33137756
  volatile.cloud-init.instance-id: 330f476b-fe1f-4835-9a51-1cb0b8af0238
  volatile.eth0.hwaddr: 00:16:3e:92:b3:2d
  volatile.idmap.base: "0"
  volatile.idmap.current: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
  volatile.idmap.next: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
  volatile.last_state.idmap: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
  volatile.last_state.power: STOPPED
  volatile.last_state.ready: "false"
  volatile.uuid: 20050700-5554-4d67-8ec0-996a13c467c6
  volatile.uuid.generation: 20050700-5554-4d67-8ec0-996a13c467c6
devices:
  eth0:
    name: eth0
    network: lxdbr0
    type: nic
  root:
    path: /
    pool: default
    type: disk
ephemeral: false
profiles:
- default
stateful: false
description: ""
lxc config show c2 --expanded
architecture: x86_64
config:
  image.architecture: amd64
  image.description: Ubuntu jammy amd64 (20231022_07:42)
  image.name: ubuntu-jammy-amd64-default-20231022_07:42
  image.os: ubuntu
  image.release: jammy
  image.serial: "20231022_07:42"
  image.variant: default
  volatile.base_image: ad7f2ecc32d26416a6b59b65f4fb3061a5490e1658b06a8ddfec3cee77c6e016
  volatile.cloud-init.instance-id: 652b930e-3a74-435a-abad-4b0b6a167cbe
  volatile.eth0.host_name: veth716f6a11
  volatile.eth0.hwaddr: 00:16:3e:35:f6:14
  volatile.idmap.base: "0"
  volatile.idmap.current: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
  volatile.idmap.next: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
  volatile.last_state.idmap: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
  volatile.last_state.power: RUNNING
  volatile.uuid: 907b6213-d440-49d5-9867-4450aad1940c
  volatile.uuid.generation: 907b6213-d440-49d5-9867-4450aad1940c
devices:
  eth0:
    name: eth0
    network: lxdbr0
    type: nic
  root:
    path: /
    pool: default
    type: disk
ephemeral: false
profiles:
- default
stateful: false
description: ""

kimfaint avatar Oct 23 '23 05:10 kimfaint

4294967295 is 2**32-1 which is suspicious. I tried changing the zpool's aclmode and aclinherit to passthrough to no avail, only the first ACL has the group ID preserved properly.

However, if the zfs image is published and then restored to btrfs, all the group IDs are preserved fine:

# c1 is on ZFS
$ lxc exec c1 -- getfacl -n foo
# file: foo
# owner: 0
# group: 0
user::rw-
group::r--
group:4:rwx
group:1001:rwx
group:1002:rwx
mask::rwx
other::r--

$ lxc publish c1
Instance published with fingerprint: b8ad7655b6c55d67b9d8789c489fcd93f5fc0399d2e0392df02c7e9403e92238

# c2 is on BTRFS
$ lxc launch b8ad7655b6c55d67b9d8789c489fcd93f5fc0399d2e0392df02c7e9403e92238 c2 -s btrfs
Creating c2
Starting c2

$ lxc exec c2 -- getfacl -n foo
# file: foo
# owner: 0
# group: 0
user::rw-
group::r--
group:4:rwx
group:1001:rwx
group:1002:rwx
mask::rwx
other::r--

Interestingly, if instead of lxc publish + lxc launch we lxc copy c1 c2, zfs has no issue:

# c1 and c2 are both on ZFS
$ lxc copy c1 c2
$ lxc start c2
$ lxc exec c2 -- getfacl -n foo
# file: foo
# owner: 0
# group: 0
user::rw-
group::r--
group:4:rwx
group:1001:rwx
group:1002:rwx
mask::rwx
other::r--

simondeziel avatar Oct 27 '23 01:10 simondeziel

Hello @kimfaint .

I am not able to reproduce the issue. Here is the output of my lxc info:

api_extensions:
 ...
api_status: stable
api_version: "1.0"
auth: trusted
public: false
auth_methods:
- tls
auth_user_name: infinity
auth_user_method: unix
environment:
  addresses: []
  architectures:
  - x86_64
  - i686
  certificate: |
    -----BEGIN CERTIFICATE-----
    MIICHjCCAaOgAwIBAgIRAIiycBgru25H6U1LsqI9BAQwCgYIKoZIzj0EAwMwNzEM
    MAoGA1UEChMDTFhEMScwJQYDVQQDDB5yb290QGluZmluaXR5LUxlZ2lvbi01LTE1
    QVJIN0gwHhcNMjQxMTA3MDkwNzM4WhcNMzQxMTA1MDkwNzM4WjA3MQwwCgYDVQQK
    EwNMWEQxJzAlBgNVBAMMHnJvb3RAaW5maW5pdHktTGVnaW9uLTUtMTVBUkg3SDB2
    MBAGByqGSM49AgEGBSuBBAAiA2IABC1BPE+kJgGhWVyg02xZAA143dYYw9jNi5kt
    dchWYHRVFpaVwgKnwyTLk+HVl084ZvBh5x/Vp29aWbMwkHRHYdU4bQdKqXl73RYb
    6rMf4ob9EX4a2frHRPLgZebqC6fGHaNzMHEwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud
    JQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwPAYDVR0RBDUwM4IZaW5maW5p
    dHktTGVnaW9uLTUtMTVBUkg3SIcEfwAAAYcQAAAAAAAAAAAAAAAAAAAAATAKBggq
    hkjOPQQDAwNpADBmAjEAs5T9rvexFgbC4uBRRoeWubIUIXV2Ligib1YWIXhEP+AF
    yyHoMc0GAYdu76tfAbWiAjEA4zZH6pKTDUtXkgJy+qST9MO+CGE5+6LZUCv/16Uc
    UN4gShGx7BZySWxFgVXMIPiq
    -----END CERTIFICATE-----
  certificate_fingerprint: 9a72429d054d4213f1002d44defcfa14ccfd41126cac0a17c06867af4e18704e
  driver: lxc | qemu
  driver_version: 6.0.0 | 8.2.2
  instance_types:
  - container
  - virtual-machine
  firewall: nftables
  kernel: Linux
  kernel_architecture: x86_64
  kernel_features:
    idmapped_mounts: "true"
    netnsid_getifaddrs: "true"
    seccomp_listener: "true"
    seccomp_listener_continue: "true"
    uevent_injection: "true"
    unpriv_binfmt: "true"
    unpriv_fscaps: "true"
  kernel_version: 6.8.0-48-generic
  lxc_features:
    cgroup2: "true"
    core_scheduling: "true"
    devpts_fd: "true"
    idmapped_mounts_v2: "true"
    mount_injection_file: "true"
    network_gateway_device_route: "true"
    network_ipvlan: "true"
    network_l2proxy: "true"
    network_phys_macvlan_mtu: "true"
    network_veth_router: "true"
    pidfd: "true"
    seccomp_allow_deny_syntax: "true"
    seccomp_notify: "true"
    seccomp_proxy_send_notify_fd: "true"
  os_name: Ubuntu
  os_version: "22.04"
  project: default
  server: lxd
  server_clustered: false
  server_event_mode: full-mesh
  server_name: infinity-Legion-5-15ARH7H
  server_pid: 374084
  server_version: "6.1"
  server_lts: false
  storage: zfs
  storage_version: 2.2.2-0ubuntu9
  storage_supported_drivers:
  - name: zfs
    version: 2.2.2-0ubuntu9
    remote: false
  - name: btrfs
    version: 6.6.3
    remote: false
  - name: ceph
    version: 19.2.0
    remote: true
  - name: cephfs
    version: 19.2.0
    remote: true
  - name: cephobject
    version: 19.2.0
    remote: true
  - name: dir
    version: "1"
    remote: false
  - name: lvm
    version: 2.03.16(2) (2022-05-18) / 1.02.185 (2022-05-18) / 4.48.0
    remote: false
  - name: powerflex
    version: 2.8 (nvme-cli)
    remote: true

A couple of things that are worth being mentioned:

  • I'm running an Ubuntu 24.04 machine with ZFS 2.2.2-0ubuntu9. Your version is different. Maybe it is related to that. Is it possible for you to try with this configuration? Plus, this issue is rather old, so the state of things might have changed since..

Thanks.

gabrielmougard avatar Nov 07 '24 09:11 gabrielmougard

@simondeziel did you reproduce this previously, can you help @gabrielmougard reproduce it if so? Thanks

tomponline avatar Nov 07 '24 09:11 tomponline

@simondeziel did you reproduce this previously, can you help @gabrielmougard reproduce it if so? Thanks

I mean, I managed to reproduce this but everything looked fine. @simondeziel I suspect that this is related to the ZFS version that we were currently using 1y ago that has changed quite a bit since. Can you retry on your side (just for confirmation) ?

gabrielmougard avatar Nov 07 '24 10:11 gabrielmougard

Required information

* Distribution: Ubuntu

* Distribution version:  22.04 (Jammy)

* Kernel version: 6.2.0-33-generic

* LXC version: 5.18

* LXD version: 5.18

* Storage backend in use: ZFS

@gabrielmougard I suspect this issue was indeed fixed in the kernel/ZFS version. Kernel 6.2 is long EOL now and it works with ZFS 2.2 as shipped in LXD latest/edge.

Issue description

Similar to #11901 (which has been fixed) there is an issue when 2 group ACLs are applied to the same directory. The first one is now restored correctly, but the second one is restored as a numerical value.

Steps to reproduce

1. Create a container, apply 2 ACL groups to the same directory, stop the container

It seems the group ACL for wibble was omitted.

$ lxc launch images:ubuntu/jammy c1
$ lxc exec c1 -- apt install acl -y
$ lxc exec c1 -- groupadd foo
$ lxc exec c1 -- groupadd wibble
$ lxc exec c1 -- touch ./foo
$ lxc exec c1 -- setfacl -m g:foo:rwx ./foo

Here it's missing lxc exec c1 -- setfacl -m g:wibble:rwx ./foo

$ lxc exec c1 -- getfacl foo

file: foo

owner: root

group: root

user::rw- group::r-- group:foo:rwx

With the missing setfacl in place, we also see group:wibble:rwx here.

mask::rwx other::r--


    2. Publish container to image, launch new container from image, check ACL


$ lxc stop c1 $ lxc publish c1 Instance published with fingerprint: ad7f2ecc32d26416a6b59b65f4fb3061a5490e1658b06a8ddfec3cee77c6e016 $ lxc launch ad7f2ecc32d26416a6b59b65f4fb3061a5490e1658b06a8ddfec3cee77c6e016 c2 Creating c2 Starting c2
$ lxc exec c2 -- getfacl foo

file: foo

owner: root

group: root

user::rw- group::r-- group:foo:rwx group:4294967295:rwx mask::rwx other::r--


Instead of `wibble` we get `4294967295`.

This weird GID is no longer visible after launching from a published container using ZFS or btrfs. Both show wibble as they should:

root@v1:~# lxc launch ae2c95d47dd64ecd5f175369a765e34020e349ca712f0fadd9c216fef6d2f319 c2-btrfs
Launching c2-btrfs
root@v1:~# lxc exec c2-btrfs -- getfacl foo
# file: foo
# owner: root
# group: root
user::rw-
group::r--
group:foo:rwx
group:wibble:rwx
mask::rwx
other::r--

root@v1:~# lxc publish c1-zfs
Instance published with fingerprint: 80e736436f7b5bdaef55e9c504dce02ee0ab7ea7a08136c0ccfcddf9174870fa
root@v1:~# lxc launch 80e736436f7b5bdaef55e9c504dce02ee0ab7ea7a08136c0ccfcddf9174870fa c2-zfs
Launching c2-zfs
root@v1:~# lxc exec c2-zfs -- getfacl foo
# file: foo
# owner: root
# group: root
user::rw-
group::r--
group:foo:rwx
group:wibble:rwx
mask::rwx
other::r--

As such, let's close this one.

simondeziel avatar Nov 26 '24 16:11 simondeziel