terraform-oci-oke
terraform-oci-oke copied to clipboard
Issues involving use of our own Vault KMS Key in this module
Community Note
- Please vote on this issue by adding a 👍 reaction to the original issue to help the community and maintainers prioritize this request
- Please do not leave "+1" or "me too" comments, they generate extra noise for issue followers and do not help prioritize the request
- If you are interested in working on this issue or have submitted a pull request, please leave a comment
Terraform Version and Provider Version
terraform 1.5.7 and provider oracle/oci 5.27.0
Affected Resource(s)
module.iam.oci_identity_policy.cluster[0]
Terraform Configuration Files
- Terraform to apply network:
locals {
#sensitive info
...
}
provider "oci" {
config_file_profile = "sensedia"
region = "sa-saopaulo-1"
}
provider "oci" {
alias = "home"
config_file_profile = "sensedia"
region = "sa-saopaulo-1"
}
module "oke" {
# general oci parameters
tenancy_id = local.tenancy_ocid
compartment_id = local.compartment_ocid
# Identity
create_iam_resources = true
create_iam_kms_policy = "always"
cluster_kms_key_id = local.cluster_kms_key_id
# Network
create_vcn = true
vcn_cidrs = local.cidrs
vcn_create_internet_gateway = "always"
vcn_create_nat_gateway = "always"
vcn_create_service_gateway = "always"
vcn_name = local.shard
vcn_dns_label = replace(local.shard, "/[-_]/", "")
assign_dns = true
subnets = {
cp = {
create = "always"
newbits = 9
}
int_lb = {
create = "always"
newbits = 3
}
pub_lb = {
create = "always"
newbits = 3
}
workers = {
create = "always"
newbits = 2
}
pods = {
create = "always"
newbits = 2
}
}
# Network Security
nsgs = {
cp = { create = "always" }
int_lb = { create = "always" }
pub_lb = { create = "always" }
workers = { create = "always" }
pods = { create = "always" }
}
allow_node_port_access = false
allow_pod_internet_access = true
allow_rules_internal_lb = {}
allow_rules_public_lb = {}
allow_worker_internet_access = true
allow_worker_ssh_access = true
enable_waf = false
bastion_allowed_cidrs = []
control_plane_allowed_cidrs = ["0.0.0.0/0"]
control_plane_is_public = true
load_balancers = "both" #can be: public, internal, both
worker_is_public = false
# Bastion
create_bastion = false
# Cluster
create_cluster = false
preferred_load_balancer = "internal" #depends on: load_balancers value.
create_operator = false
}
- Terraform to apply cluster and worker:
locals {
#sensitive info
...
}
provider "oci" {
config_file_profile = "sensedia"
region = "sa-saopaulo-1"
}
provider "oci" {
alias = "home"
config_file_profile = "sensedia"
region = "sa-saopaulo-1"
}
module "oke" {
# general oci parameters
tenancy_id = local.tenancy_ocid
compartment_id = local.compartment_ocid
state_id = local.state_id #already created before
# Cluster General
timezone = "America/Sao_Paulo"
output_detail = true
# Identity
create_iam_resources = true
create_iam_autoscaler_policy = "always"
create_iam_worker_policy = "always"
create_iam_kms_policy = "never" #already created before
# Network
create_vcn = false
vcn_id = local.vcn_id #already created before
vcn_create_internet_gateway = "never"
vcn_create_nat_gateway = "never"
vcn_create_service_gateway = "never"
assign_dns = true
ig_route_table_id = local.ig_route_table_id
worker_nsg_ids = compact([local.worker_nsg_id])
pod_nsg_ids = compact([local.pod_nsg_id])
subnets = {
cp = { create = "never", id = local.control_plane_subnet_id }
int_lb = { create = "never", id = local.int_lb_subnet_id }
pub_lb = { create = "never", id = local.pub_lb_subnet_id }
workers = { create = "never", id = local.worker_subnet_id }
pods = { create = "never", id = local.pod_subnet_id }
}
# Network Security
nsgs = {
cp = { create = "never", id = local.control_plane_nsg_id }
workers = { create = "never", id = local.worker_nsg_id }
pods = { create = "never", id = local.pod_nsg_id }
}
allow_worker_internet_access = true
allow_worker_ssh_access = true
control_plane_allowed_cidrs = []
control_plane_is_public = true
load_balancers = "both" #can be: public, internal, both
worker_is_public = false
# Bastion
create_bastion = false
# Cluster
create_cluster = true
preferred_load_balancer = "internal" #depends on: load_balancers value.
create_operator = false
cluster_kms_key_id = local.cluster_kms_key_id #already created before
cluster_name = local.cluster_1
cluster_type = "basic"
cni_type = "npn"
kubernetes_version = "v1.28.2"
# Workers
worker_pool_size = 1
worker_pool_mode = "node-pool" #OKE-managed Node Pool
worker_volume_kms_key_id = local.cluster_kms_key_id #already created before
worker_drain_ignore_daemonsets = true
worker_drain_delete_local_data = true
worker_drain_timeout_seconds = 1800
worker_pools = {
oke-vm-standard-a1 = {
description = "OKE-managed Node Pool with OKE Oracle Linux 8 image",
shape = "VM.Standard.A1.Flex",
create = true,
ocpus = 1,
memory = 8,
boot_volume_size = 50,
# os = "Oracle Linux",
# os_version = "8",
size = 2,
min_size = 1,
max_size = 3,
autoscale = true,
},
}
}
Debug Output
Panic Output
Expected Behavior
When you have our own Vault with KMS Key to use in terraform-oci-oke module, you add values in variables create_iam_resources = true
, create_iam_kms_policy = "always"
, cluster_kms_key_id = var.cluster_kms_key_id
and worker_volume_kms_key_id = var.worker_volume_kms_key_id
, in this way you'll use our own KMS Key in your OKE cluster.
Actual Behavior
When a already existed Vault with our own KMS Key and a VCN created using the same terraform oke module (only-network-mode) and IAM resources for KMS. The terraform oke module (only-cluster-worker-mode) for a OKE cluster with worker nodes conflict the name of the policy for IAM resources.
If you put together network/cluster/worker you'll receive a error in creation of cluster OKE, because there won't be policy for cluster OKE to permit use our own KMS Key, because the chain of dependencies of resources and variables put the creation of module.iam.oci_identity_policy.cluster[0]
after module.cluster[0].oci_containerengine_cluster.k8s_cluster
, but the creation of cluster depends on permissions to use KMS Key.
Steps to Reproduce
- Enable use of your own KMS Key:
create_iam_resources = true
create_iam_kms_policy = "always"
cluster_kms_key_id = var.cluster_kms_key_id
worker_volume_kms_key_id = var.worker_volume_kms_key_id
- Run terraform:
terraform apply
Important Factoids
I already tried apply terraform-oci-oke module put together network/cluster/worker and split in two pieces network and cluster/worker, but root cause of the error involving module.iam.oci_identity_policy.cluster[0]
persist.
References
- https://oracle-terraform-modules.github.io/terraform-oci-oke/guide/identity_policies.html
- https://oracle-terraform-modules.github.io/terraform-oci-oke/guide/deploy.html