pulumi-eks
pulumi-eks copied to clipboard
[go] Nodegroup creation failure - "Error: The IAM role mappings provided could not be properly serialized to YAML"
Problem description
Example here:
import (
"fmt"
"github.com/pulumi/pulumi-aws/sdk/v3/go/aws/iam"
"github.com/pulumi/pulumi-eks/sdk/go/eks"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
var managedPolicyArns = []string{
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy",
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
}
func createRole(ctx *pulumi.Context, name string) (iam.RoleInput, error) {
version := "2012-10-17"
statementId := "AllowAssumeRole"
effect := "Allow"
instanceAssumeRolePolicy, err := iam.GetPolicyDocument(ctx, &iam.GetPolicyDocumentArgs{
Version: &version,
Statements: []iam.GetPolicyDocumentStatement{
{
Sid: &statementId,
Effect: &effect,
Actions: []string{
"sts:AssumeRole",
},
Principals: []iam.GetPolicyDocumentStatementPrincipal{
{
Type: "Service",
Identifiers: []string{
"ec2.amazonaws.com",
},
},
},
},
},
}, nil)
if err != nil {
return nil, err
}
role, err := iam.NewRole(ctx, name, &iam.RoleArgs{
AssumeRolePolicy: pulumi.String(instanceAssumeRolePolicy.Json),
})
if err != nil {
return nil, err
}
for i, policy := range managedPolicyArns {
_, err := iam.NewRolePolicyAttachment(ctx, fmt.Sprintf("%s-policy-%d", name, i), &iam.RolePolicyAttachmentArgs{
PolicyArn: pulumi.String(policy),
Role: role.ID(),
})
if err != nil {
return nil, err
}
}
return role, nil
}
func main() {
pulumi.Run(func(ctx *pulumi.Context) error {
role0, err := createRole(ctx, "example-role0")
if err != nil {
return err
}
role1, err := createRole(ctx, "example-role1")
if err != nil {
return err
}
role2, err := createRole(ctx, "example-role2")
if err != nil {
return err
}
cluster, err := eks.NewCluster(ctx, "example-managed-nodegroups-go", &eks.ClusterArgs{
SkipDefaultNodeGroup: pulumi.BoolPtr(true),
InstanceRoles: iam.RoleArray{
role0,
role1,
role2,
},
})
if err != nil {
return err
}
// Export the kubeconfig for the cluster
ctx.Export("kubeconfig", cluster.Kubeconfig)
// Create a simple AWS managed node group using a cluster as input and the
// refactored API.
_, err = eks.NewManagedNodeGroup(ctx, "aws-managed-ng0",
&eks.ManagedNodeGroupArgs{
Cluster: cluster.Core,
NodeRole: role0,
})
if err != nil {
return err
}
return nil
})
}
Results in:
Type Name Status Info
pulumi:pulumi:Stack managed-nodegroups-go-dev running. error: Error: The IAM role mappings provided could not be properly serialized to YAML for
+ ├─ aws:iam:Role example-role0 created
+ ├─ aws:iam:Role example-role1 created
+ ├─ aws:iam:Role example-role2 created
+ ├─ aws:iam:RolePolicyAttachment example-role0-policy-2 created
+ ├─ aws:iam:RolePolicyAttachment example-role0-policy-1 created
+ ├─ aws:iam:RolePolicyAttachment example-role0-policy-0 created
+ ├─ aws:iam:RolePolicyAttachment example-role1-policy-2 created
+ ├─ aws:iam:RolePolicyAttachment example-role1-policy-0 created
+ ├─ aws:iam:RolePolicyAttachment example-role1-policy-1 created
+ ├─ aws:iam:RolePolicyAttachment example-role2-policy-1 created
+ ├─ aws:iam:RolePolicyAttachment example-role2-policy-2 created
+ ├─ aws:iam:RolePolicyAttachment example-role2-policy-0 created
└─ eks:index:Cluster example-managed-nodegroups-go
├─ eks:index:ServiceRole example-managed-nodegroups-go-eksRole
+ │ ├─ aws:iam:Role example-managed-nodegroups-go-eksRole-role created
+ │ ├─ aws:iam:RolePolicyAttachment example-managed-nodegroups-go-eksRole-90eb1c99 created
+ │ └─ aws:iam:RolePolicyAttachment example-managed-nodegroups-go-eksRole-4b490823 created
+ ├─ aws:eks:Cluster example-managed-nodegroups-go-eksCluster created Cluster is ready
~ ├─ aws:ec2:SecurityGroup example-managed-nodegroups-go-nodeSecurityGroup updated [diff: ~tags]
+- ├─ pulumi:providers:kubernetes example-managed-nodegroups-go-provider replaced [diff: ~kubeconfig]
+- ├─ pulumi:providers:kubernetes example-managed-nodegroups-go-eks-k8s replaced [diff: ~kubeconfig]
~ └─ eks:index:VpcCni example-managed-nodegroups-go-vpc-cni updated [diff: ~kubeconfig]
Two problems:
- See the above error
- The deployment hangs after the cluster is created.
Backtrace from cancelling the update:
error: Error: The IAM role mappings provided could not be properly serialized to YAML for the aws-auth ConfigMap
at /Users/vivekl/.pulumi/plugins/resource-eks-v0.21.0/node_modules/@pulumi/cluster.ts:652:23
at /Users/vivekl/.pulumi/plugins/resource-eks-v0.21.0/node_modules/@pulumi/pulumi/output.js:249:35
at Generator.next (<anonymous>)
at /Users/vivekl/.pulumi/plugins/resource-eks-v0.21.0/node_modules/@pulumi/pulumi/output.js:21:71
at new Promise (<anonymous>)
at __awaiter (/Users/vivekl/.pulumi/plugins/resource-eks-v0.21.0/node_modules/@pulumi/pulumi/output.js:17:12)
at applyHelperAsync (/Users/vivekl/.pulumi/plugins/resource-eks-v0.21.0/node_modules/@pulumi/pulumi/output.js:228:12)
at /Users/vivekl/.pulumi/plugins/resource-eks-v0.21.0/node_modules/@pulumi/pulumi/output.js:182:65
at processTicksAndRejections (internal/process/task_queues.js:97:5)
error: update canceled
2. The deployment hangs after the cluster is created.
I thought I'd addressed this, but it looks like it's still happening. I've re-opened https://github.com/pulumi/pulumi-eks/issues/480 to track. Evan recently did some investigation around hangs related to Automation API and I'm wondering if the the TS provider code for multi-lang components has similar issues: https://github.com/pulumi/pulumi/issues/6153#issuecomment-767943455
Note the above example will require https://github.com/pulumi/pulumi/pull/6224
I haven't tried it out yet, but I suspect this may now be fixed in 1.0, by passing cluster
directly rather than cluster.Core
:
_, err = eks.NewManagedNodeGroup(ctx, "aws-managed-ng0",
&eks.ManagedNodeGroupArgs{
- Cluster: cluster.Core,
+ Cluster: cluster,
NodeRole: role0,
})
if err != nil {