terraform-provider-libvirt
terraform-provider-libvirt copied to clipboard
Cannot see ~/.ssh/known_hosts on 0.8.0
System Information
Linux distribution
$ nixos-version
24.11.20241004.bc947f5 (Vicuna)
Terraform version
$ terraform version
Terraform v1.9.7
on linux_amd64
+ provider registry.terraform.io/dmacvicar/libvirt v0.8.0
+ provider registry.terraform.io/hashicorp/null v3.2.3
Provider and libvirt versions
+ provider registry.terraform.io/dmacvicar/libvirt v0.8.0
Checklist
- [x] Is your issue/contribution related with enabling some setting/option exposed by libvirt that the plugin does not yet support, or requires changing/extending the provider terraform schema?
No
- [x] Make sure you explain why this option is important to you, why it should be important to everyone. Describe your use-case with detail and provide examples where possible.
My assumption is that if others were using an SSH connection and ran into this issue, they would want to know the default/expected behaviours.
- [X] If it is a very special case, consider using the XSLT support in the provider to tweak the definition instead of opening an issue
I feel like this warrants an issue, as to be it is not a special case.
- [X] Maintainers do not have expertise in every libvirt setting, so please, describe the feature and how it is used. Link to the appropriate documentation
docs detailing supported extra parameters on the URI for libvirt.
- [x] Is it a bug or something that does not work as expected? Please make sure you fill in the version information below:
Description of Issue/Question
Setup
variables.tf
variable "cluster_name" {
default = "local-talos-cluster"
}
variable "talos_version" {
type = string
default = "v1.7.6"
}
variable "controlplane_name" {
default = "talos-controlplane"
}
variable "controlplane_instances" {
default = "3"
}
variable "controlplane-memory" {
default = "2048"
}
variable "controlplane-vcpu" {
default = "2"
}
variable "controlplane-diskBytes" {
default = 1024*1024*1024*10 #10GB
}
variable "extra-storage-controlplane-diskBytes" {
default = 1024*1024*1024*10 #10GB
}
variable "worker_name" {
default = "talos-worker"
}
variable "worker_instances" {
default = "3"
}
variable "worker-memory" {
default = "1024"
}
variable "worker-vcpu" {
default = "1"
}
variable "worker-diskBytes" {
default = 1024*1024*1024*10 #10GB
}
variable "extra-storage-worker-diskBytes" {
default = 1024*1024*1024*10 #10GB
}
variable "network-name0"{
default = "k8snet-0"
}
variable "domain-name0"{
default = "k8s-0.local"
}
variable "k8s_network0"{
default = "10.17.3." #first three octets are customizable
}
variable "network-name1"{
default = "k8snet-private"
}
variable "domain-name1"{
default = "k8s-1.local"
}
variable "k8s_network1"{
default = "10.17.4." #first three octets are customizable
}
provider.tf
provider "libvirt" {
uri = "qemu+ssh://[email protected]/system"
}
extensions.tf
terraform {
required_version = ">= 0.8.0"
required_providers {
libvirt = {
source = "dmacvicar/libvirt"
}
}
}
main.tf
resource "null_resource" "get_talos_tgz" {
provisioner "local-exec" {
command = "mkdir -p logs; touch logs/get-images.log; export talos_version=${var.talos_version} ; /bin/bash helpers/get-images.sh | tee logs/get-images.log"
}
provisioner "local-exec" {
when = destroy
command = "mkdir -p logs; touch logs/del-images.log; /bin/bash helpers/del-images.sh | tee logs/del-images.log"
}
}
resource "null_resource" "get_talos_config" {
depends_on = [libvirt_domain.domain-talos-controlplane-raw, libvirt_domain.domain-talos-worker-raw]
provisioner "local-exec" {
command = <<EOT
mkdir -p logs
touch logs/gen-config.log
export cluster_name=${var.cluster_name}
export k8s_network=${var.k8s_network0}
export controlplane_name=${var.controlplane_name}
export worker_name=${var.worker_name}
export controlplane_instances=${var.controlplane_instances}
export worker_instances=${var.worker_instances}
export talos_version=${var.talos_version}
/bin/bash helpers/gen-config.sh | tee logs/gen-config.log
EOT
}
provisioner "local-exec" {
when = destroy
command = "mkdir -p logs; touch logs/del-config.log; /bin/bash helpers/del-config.sh | tee logs/del-config.log"
}
}
network.tf
resource "libvirt_network" "kube_network0" {
name = "${var.network-name0}"
domain = "${var.domain-name0}"
mode = "route"
addresses = ["${var.k8s_network0}0/24"]
autostart = "true"
dhcp {
enabled = "true"
}
dns {
enabled = "true"
local_only = "false"
}
}
volumes.tf
resource "libvirt_volume" "talos-raw" {
depends_on = [null_resource.get_talos_tgz]
name = "talos-raw"
format = "raw"
source = "images/${var.talos_version}/disk.raw"
}
resource "libvirt_volume" "vol-talos-controlplane-raw" {
count = var.controlplane_instances
name = "vol-talos-controlplane-raw-${count.index}"
base_volume_id = libvirt_volume.talos-raw.id
size = var.controlplane-diskBytes
}
resource "libvirt_volume" "vol-talos-worker-raw" {
count = var.worker_instances
name = "vol-talos-worker-raw-${count.index}"
base_volume_id = libvirt_volume.talos-raw.id
size = var.worker-diskBytes
}
resource "libvirt_volume" "extra-vol-talos-controlplane-qcow" {
count = var.controlplane_instances
name = "extra-vol-talos-controlplane-qcow-${count.index}"
size = var.extra-storage-controlplane-diskBytes
}
resource "libvirt_volume" "extra-vol-talos-worker-qcow" {
count = var.worker_instances
name = "extra-vol-talos-worker-qcow-${count.index}"
size = var.extra-storage-worker-diskBytes
}
controlplane.tf
resource "libvirt_domain" "domain-talos-controlplane-raw" {
count = var.controlplane_instances
depends_on = [libvirt_network.kube_network0]
name = "${var.controlplane_name}-${count.index}"
memory = var.controlplane-memory
vcpu = var.controlplane-vcpu
autostart = "true"
machine = "q35"
boot_device {
dev = [ "hd" ]
}
disk {
volume_id = libvirt_volume.vol-talos-controlplane-raw[count.index].id
}
disk {
volume_id = libvirt_volume.extra-vol-talos-controlplane-qcow[count.index].id
}
cpu {
mode = "host-passthrough"
}
network_interface {
network_name = "${var.network-name0}"
hostname = "${var.controlplane_name}-${count.index}"
addresses = ["${var.k8s_network0}20${count.index}"]
mac = "AA:BB:CC:11:20:0${count.index}"
wait_for_lease = "true"
}
# network_interface {
# network_name = "${var.network-name1}"
# hostname = "${var.controlplane_name}-${count.index}"
# addresses = ["${var.k8s_network1}20${count.index}"]
# mac = "AA:BB:CC:11:30:0${count.index}"
# wait_for_lease = "true"
# }
graphics {
type = "vnc"
listen_type = "none"
}
}
worker.tf
resource "libvirt_domain" "domain-talos-worker-raw" {
count = var.worker_instances
depends_on = [libvirt_network.kube_network0]
name = "${var.worker_name}-${count.index}"
memory = var.worker-memory
vcpu = var.worker-vcpu
autostart = "true"
machine = "q35"
boot_device {
dev = [ "hd" ]
}
disk {
volume_id = libvirt_volume.vol-talos-worker-raw[count.index].id
}
disk {
volume_id = libvirt_volume.extra-vol-talos-worker-qcow[count.index].id
}
cpu {
mode = "host-passthrough"
}
network_interface {
network_name = "${var.network-name0}"
hostname = "${var.worker_name}-${count.index}"
addresses = ["${var.k8s_network0}22${count.index}"]
mac = "AA:BB:CC:11:20:2${count.index}"
wait_for_lease = "true"
}
# network_interface {
# network_name = "${var.network-name1}"
# hostname = "${var.worker_name}-${count.index}"
# addresses = ["${var.k8s_network1}22${count.index}"]
# mac = "AA:BB:CC:11:30:2${count.index}"
# wait_for_lease = "true"
# }
graphics {
type = "vnc"
listen_type = "none"
}
}
Steps to Reproduce Issue
terraform initterraform plan
Error
Plan: 1 to add, 0 to change, 0 to destroy.
╷
│ Error: failed to connect: failed to read ssh known hosts: open ~/.ssh/known_hosts: no such file or directory
│
│ with provider["registry.terraform.io/dmacvicar/libvirt"],
│ on providers.tf line 2, in provider "libvirt":
│ 2: provider "libvirt" {
If I then change my connection string to uri = "qemu+ssh://[email protected]/system?knownhosts=/home/dustin/.ssh/known_hosts" it will work. I also tested with uri = "qemu+ssh://[email protected]/system?knownhosts=~/.ssh/known_hosts" which also failed. It seems like it no longer understands ~.
Additional information:
Do you have SELinux or Apparmor/Firewall enabled? Some special configuration?
- No
Have you tried to reproduce the issue without them enabled?
- No (NA)
I noted that the SSH Config stuff was reworked for 0.8.0. Is this behaviour intended?
Thank you in advance.