kubernetes-nfs-volume-on-gke icon indicating copy to clipboard operation
kubernetes-nfs-volume-on-gke copied to clipboard

Pulumi

Open duebbert opened this issue 4 years ago • 0 comments

For those who might be interested, here is a Pulumi (https://www.pulumi.com/) script. I'm not sure if you want to integrate it into your repo.

# Massively based on https://github.com/mappedinn/kubernetes-nfs-volume-on-gke
import subprocess

import pulumi
from pulumi import ResourceOptions

from pulumi_gcp.compute import Disk
from pulumi_kubernetes.apps.v1 import Deployment
from pulumi_kubernetes.core.v1 import (
    Service,
    PersistentVolume,
    PersistentVolumeClaim,
)

nfs_size = "1Gi"

# Setup default gcloud first
# gcloud auth login
# gcloud config set project <YOUR_GCP_PROJECT_HERE>
# gcloud auth application-default login

########################################################################################
# Create a GCP persistent disk to be used by the NFS server

project_id = subprocess.run(
    ["gcloud", "config", "get-value", "project"], capture_output=True, text=True
).stdout.strip()

zone_id = subprocess.run(
    ["gcloud", "config", "get-value", "compute/zone"], capture_output=True, text=True
).stdout.strip()

nfs_disk_name = "gce-nfs-disk"
nfs_disk = Disk(
    nfs_disk_name,
    name=nfs_disk_name,
    size=nfs_size.replace("Gi", ""),
    project=project_id,
    zone=zone_id,
)

########################################################################################
# Create NFS server deployment with persistent disk

nfs_app_name = "nfs-server"
nfs_app_labels = {"role": nfs_app_name}
nfs_app_image = "gcr.io/google_containers/volume-nfs:0.8"

nfs_volumes = [
    # FOR GCE
    {
        "name": nfs_disk_name,
        "gcePersistentDisk": {"pdName": nfs_disk_name, "fsType": "ext4"},
    },
    # FOR LOCAL
    # {"name": "nfs-disk", "hostPath": {"path": "/tmp", "type": ""}},
]

nfs_deployment = Deployment(
    nfs_app_name,
    metadata={"name": nfs_app_name},
    spec={
        "replicas": 1,
        "selector": {"match_labels": nfs_app_labels},
        "revisionHistoryLimit": 5,
        "template": {
            "metadata": {"labels": nfs_app_labels},
            "spec": {
                "containers": [
                    {
                        "name": nfs_app_name,
                        "image": nfs_app_image,
                        "ports": [
                            {"name": "nfs", "containerPort": 2049},
                            {"name": "mountd", "containerPort": 20048},
                            {"name": "rpcbind", "containerPort": 111},
                        ],
                        "securityContext": {"privileged": True},
                        "volumeMounts": [
                            {"mountPath": "/exports", "name": nfs_disk_name}
                        ],
                    }
                ],
                "volumes": nfs_volumes,
            },
        },
    },
    opts=ResourceOptions(depends_on=[nfs_disk]),
)
pulumi.export("nfs_server_pod", nfs_deployment.metadata["name"])

########################################################################################
# Create NFS service to have a fixed IP which is independent from the ephemeral node IP

nfs_service = Service(
    nfs_app_name,
    metadata={"name": nfs_app_name},
    spec={
        "selector": nfs_deployment.spec["template"]["metadata"]["labels"],
        "ports": [
            {"name": "nfs", "port": 2049},
            {"name": "mountd", "port": 20048},
            {"name": "rpcbind", "port": 111},
        ],
    },
)
nfs_service_ip = nfs_service.spec.apply(
    lambda v: v["cluster_ip"] if "cluster_ip" in v else None
)
pulumi.export("nfs_service_ip", nfs_service_ip)

########################################################################################
# Create NFS PersistentVolume and PersistentVolumeClaim to be used by pods

nfs_persistent_volume = PersistentVolume(
    nfs_app_name,
    metadata={"name": nfs_app_name},
    spec={
        "capacity": {"storage": nfs_size},
        "accessModes": ["ReadWriteMany"],
        "nfs": {"server": nfs_service_ip, "path": "/"},
    },
    opts=ResourceOptions(depends_on=[nfs_deployment]),
)

nfs_persistent_volume_claim = PersistentVolumeClaim(
    nfs_app_name,
    metadata={"name": nfs_app_name},
    spec={
        "accessModes": ["ReadWriteMany"],
        "storageClassName": "",
        "resources": {"requests": {"storage": nfs_size}},
    },
    opts=ResourceOptions(depends_on=[nfs_persistent_volume]),
)

########################################################################################
# Create a test pod to see if it's all working

app_name = "nfs-busybox"
app_labels = {"name": app_name}
app_image = "busybox"
test_deployment = Deployment(
    app_name,
    metadata={"name": app_name},
    spec={
        "replicas": 1,
        "selector": {"match_labels": app_labels},
        "revisionHistoryLimit": 0,
        "template": {
            "metadata": {"labels": app_labels},
            "spec": {
                "containers": [
                    {
                        "name": app_name,
                        "image": app_image,
                        "imagePullPolicy": "IfNotPresent",
                        "command": [
                            "/bin/sh",
                            "-c",
                            "while true; do date >> /mnt/dates.txt; sleep 5; done",
                        ],
                        "volumeMounts": [
                            {"mountPath": "/mnt", "name": "nfs-server-pvc"}
                        ],
                    }
                ],
                "volumes": [
                    {
                        "name": "nfs-server-pvc",
                        "persistentVolumeClaim": {"claimName": nfs_app_name},
                    },
                ],
            },
        },
    },
    opts=ResourceOptions(
        depends_on=[
            nfs_deployment,
            nfs_service,
            nfs_persistent_volume,
            nfs_persistent_volume_claim,
        ]
    ),
)
pulumi.export("test_server_pod", test_deployment.metadata["name"])

duebbert avatar Jan 30 '20 12:01 duebbert