terraform-provider-ovh
terraform-provider-ovh copied to clipboard
[BUG] Terraform/OVH API crashes randomly
Describe the bug
Hi, not sure if this indeed is a provider bug, feels a lot more like an API bug, but here it goes: Terraform plan/apply randomly crashes with timeouts/500 response code/other errors. There is a business support ticket exists for this very issue on OVH already (9391258), but in 2 weeks we got absolutely nowhere. Opening the bug here as per the suggestion on Discord.
Terraform Version
terraform -v
Terraform v1.7.5
on darwin_arm64
OVH Terraform Provider Version
0.39.0 and 0.40.0 Earlier versions had the same or similar issues.
Affected Resource(s)
- ovh_cloud_project_kube
- ovh_cloud_project_kube_nodepool
- ovh_cloud_project_database_ip_restriction
- ovh_cloud_project_database_mongodb_user
- ovh_cloud_project_database
Would also list here the VM instances resource as well, but that is a different provider. Essentially any resource we use can cause issues.
This is not an issue with Terraform core as other providers elsewhere works just fine, or restarting the plan/apply - sometimes multiple times - gets the job done eventually.
Terraform Configuration Files
provider "ovh" {
endpoint = var.ovh_endpoint
application_key = var.ovh_application_key
application_secret = var.ovh_application_secret
consumer_key = var.ovh_consumer_key
alias = "ovh"
}
provider "openstack" {
user_name = var.openstack_user
password = var.openstack_password
auth_url = "https://auth.cloud.ovh.net/v3/" # Authentication URL
domain_name = "default"
alias = "ovh"
tenant_id = var.ovh_service_name
}
terraform {
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "1.54.1"
configuration_aliases = [openstack.ovh]
}
ovh = {
source = "ovh/ovh"
version = "0.39.0"
configuration_aliases = [ovh.ovh]
}
}
backend "http" {
address = "[REDACTED]"
lock_address = "[REDACTED]"
unlock_address = "[REDACTED]"
lock_method = "POST"
unlock_method = "DELETE"
retry_wait_min = 5
}
}
## Snippets only, the entire config is over 400 different resources
variable "authorized_networks_beta_ovh" {
description = "List of authorized networks to be used for managed databases"
type = map(any)
default = {
"243ce7" = {
description = "243ce7"
ip = "0.1.2.3/32"
}
"3db359" = {
description = "3db359"
ip = "4.5.6.7/32"
}
"4690d8" = {
description = "4690d8"
ip = "8.9.10.11/32"
}
}
resource "ovh_cloud_project_database" "mysql_beta" {
provider = ovh.ovh
description = "beta-mysql"
disk_size = 80
advanced_configuration = {
"mysql.sql_mode" = "ANSI,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,STRICT_ALL_TABLES"
"mysql.sql_require_primary_key" = "true"
}
engine = "mysql"
flavor = "db1-4"
opensearch_acls_enabled = false
plan = "business"
service_name = "3985b358e7d049e6b6dd4044a40fdcfe"
version = "8"
nodes {
region = "DE"
}
nodes {
region = "DE"
}
}
resource "ovh_cloud_project_database" "psql_beta" {
provider = ovh.ovh
advanced_configuration = {
"pglookout.max_failover_replication_time_lag" = "60"
}
description = "beta-psql"
disk_size = 80
engine = "postgresql"
flavor = "db1-4"
opensearch_acls_enabled = false
plan = "business"
service_name = var.ovh_service_name
version = "15"
nodes {
region = "DE"
}
nodes {
region = "DE"
}
}
resource "ovh_cloud_project_database" "mongodb_beta" {
provider = ovh.ovh
description = "beta-mongo"
disk_size = 40
engine = "mongodb"
flavor = "db2-4"
opensearch_acls_enabled = false
plan = "production"
service_name = var.ovh_service_name
version = "6.0"
nodes {
region = "DE"
}
nodes {
region = "DE"
}
nodes {
region = "DE"
}
}
resource "ovh_cloud_project_database_ip_restriction" "restricted-ips-mongo-beta" {
provider = ovh.ovh
for_each = var.authorized_networks_beta_ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mongodb_beta.id
engine = ovh_cloud_project_database.mongodb_beta.engine
description = each.value.description
ip = each.value.ip
}
resource "ovh_cloud_project_database_ip_restriction" "restricted-ips-mysql-beta" {
provider = ovh.ovh
for_each = var.authorized_networks_beta_ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mysql_beta.id
engine = ovh_cloud_project_database.mysql_beta.engine
description = each.value.description
ip = each.value.ip
}
resource "ovh_cloud_project_database_ip_restriction" "restricted-ips-psql-beta" {
provider = ovh.ovh
for_each = var.authorized_networks_beta_ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.psql_beta.id
engine = ovh_cloud_project_database.psql_beta.engine
description = each.value.description
ip = each.value.ip
}
resource "ovh_cloud_project_kube" "development_cluster" {
service_name = var.ovh_service_name
name = var.k8s_name
region = var.k8s_region
version = var.k8s_version
kube_proxy_mode = "iptables"
update_policy = "ALWAYS_UPDATE"
}
resource "ovh_cloud_project_kube_nodepool" "development_cluster_pool" {
service_name = var.ovh_service_name
kube_id = ovh_cloud_project_kube.development_cluster.id
name = "${var.k8s_name}-pool"
flavor_name = var.k8s_machine_type
desired_nodes = var.k8s_node_count
max_nodes = var.k8s_max_nodes
min_nodes = var.k8s_min_nodes
monthly_billed = false
autoscale = true
}
resource "ovh_cloud_project_kube_nodepool" "development_cluster_me_pool" {
service_name = var.ovh_service_name
kube_id = ovh_cloud_project_kube.development_cluster.id
name = "${var.k8s_name}-me-pool"
flavor_name = var.k8s_machine_type
desired_nodes = var.me_k8s_node_count
max_nodes = var.me_k8s_max_nodes
min_nodes = var.me_k8s_min_nodes
monthly_billed = false
autoscale = true
template {
metadata {
annotations = {}
finalizers = []
labels = {
medical-engine = "true"
component = "medical-engine"
}
}
spec {
unschedulable = false
taints = [
{
effect = "NoSchedule"
key = "special"
value = "true"
}
]
}
}
}
resource "ovh_cloud_project_kube_oidc" "development-oidc" {
service_name = var.ovh_service_name
kube_id = ovh_cloud_project_kube.development_cluster.id
client_id = "[REDACTED]"
issuer_url = "https://accounts.google.com"
oidc_username_claim = "email"
}
Debug Output
The most common outputs we see: https://gist.github.com/apinter/cdda84c7eb975c2f52beff5d701bd488
Panic Output
Expected Behavior
Terraform can be used seamlessly with OVH to deploy resources.
Actual Behavior
Terraform/OVH API randomly crashes rendering an unreliable
Steps to Reproduce
Please list the steps required to reproduce the issue, for example:
terraform planterraform apply
The more resources the more likely it will crash, but can happen with a simple 4 resource deployment as well or with an MKS cluster upgrade. (This just happened yesterday when we upgraded from 1.26 to 1.27 with TF, timed out after 10 minutes)
References
Additional context
This is not a networking error on our end. Doesn't matter where are we running the deployment from it can end up with a crash. Support suggested that we reach a ratelimit, but based on the official documentation that should give a 429 response, not a 500. Even if we reach a ratelimit, and the endpoint responds with 500, that is a bug.
Hi,
I'm having the same problems and I'd like to add that this also happens when I delete a public cloud network.
I see that @amstuta linked #591 to this issue, but that is unlikely to fix all the problems. I don't believe that the problem is strongly provider related. Sure, some timeout errors might be fixed by it, but the provider can't account for an MKS upgrade. For example we have MKS clusters with different numbers of nodes, different machine types etc. where clusters would reach different ready state at likely different times. Unless timeout is pushed out to some ridiculously high values. I still believe that the issue is on the API side, but of course I could be wrong.
@apinter you're right, we're currently planning various patches on the API and on the provider to improve the overall experience with the products mentioned in your issue, that's why I linked the PR.
I've just tried to deploy a mongo cluster with the latest provider (v0.41.0) and ran into a timeout again. Pretty sure that something is not right lately with OVH considering that yesterday a VM backup took over 2.5hrs instead of the "few second" 🙃 Anywho, this is the module:
resource "ovh_cloud_project_database" "mongodb_medres" {
provider = ovh.ovh
description = "medres-mongo"
disk_size = 40
engine = "mongodb"
flavor = "db2-4"
opensearch_acls_enabled = false
plan = "production"
service_name = var.ovh_service_name
version = "6.0"
nodes {
region = "DE"
}
nodes {
region = "DE"
}
nodes {
region = "DE"
}
}
resource "ovh_cloud_project_database_mongodb_user" "xund_medres_admin" {
provider = ovh.ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mongodb_medres.id
name = "admin"
roles = [
"clusterMonitor@admin",
"readWriteAnyDatabase@admin",
"userAdminAnyDatabase@admin",
]
}
resource "ovh_cloud_project_database_mongodb_user" "xund_medres" {
provider = ovh.ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mongodb_medres.id
name = "xund_medres"
roles = [
"dbAdminAnyDatabase@admin",
"readWriteAnyDatabase@admin",
]
}
## Authorized ips
resource "ovh_cloud_project_database_ip_restriction" "medres_authip_mongo" {
provider = ovh.ovh
for_each = var.authorized_networks_medres
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mongodb_medres.id
engine = ovh_cloud_project_database.mongodb_medres.engine
description = each.value.description
ip = each.value.ip
}
Made the output with debug mode available here
EDIT: btw went over with about 10m the new default 40m timeout.
I've just tried to deploy a mongo cluster with the latest provider (v0.41.0) and ran into a timeout again. Pretty sure that something is not right lately with OVH considering that yesterday a VM backup took over 2.5hrs instead of the "few second" 🙃 Anywho, this is the module:
resource "ovh_cloud_project_database" "mongodb_medres" { provider = ovh.ovh description = "medres-mongo" disk_size = 40 engine = "mongodb" flavor = "db2-4" opensearch_acls_enabled = false plan = "production" service_name = var.ovh_service_name version = "6.0" nodes { region = "DE" } nodes { region = "DE" } nodes { region = "DE" } } resource "ovh_cloud_project_database_mongodb_user" "xund_medres_admin" { provider = ovh.ovh service_name = var.ovh_service_name cluster_id = ovh_cloud_project_database.mongodb_medres.id name = "admin" roles = [ "clusterMonitor@admin", "readWriteAnyDatabase@admin", "userAdminAnyDatabase@admin", ] } resource "ovh_cloud_project_database_mongodb_user" "xund_medres" { provider = ovh.ovh service_name = var.ovh_service_name cluster_id = ovh_cloud_project_database.mongodb_medres.id name = "xund_medres" roles = [ "dbAdminAnyDatabase@admin", "readWriteAnyDatabase@admin", ] } ## Authorized ips resource "ovh_cloud_project_database_ip_restriction" "medres_authip_mongo" { provider = ovh.ovh for_each = var.authorized_networks_medres service_name = var.ovh_service_name cluster_id = ovh_cloud_project_database.mongodb_medres.id engine = ovh_cloud_project_database.mongodb_medres.engine description = each.value.description ip = each.value.ip }Made the output with debug mode available here
EDIT: btw went over with about 10m the new default 40m timeout.
TF refresh did nothing, while TF apply just started to create a new mongo cluster just now with the same name. Not sure if this is a new behavior, or expect to work like this or not.
TF refresh did nothing, while TF apply just started to create a new mongo cluster just now with the same name. Not sure if this is a new behavior, or expect to work like this or not.
Apparently, the 2nd deployment went better, but was able to reproduce the very same timeout like before. With that said, it still missed out the admin user "creation". Re-running the apply for the following resource:
resource "ovh_cloud_project_database_mongodb_user" "xund_medres_admin" {
provider = ovh.ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mongodb_medres.id
name = "admin"
roles = [
"clusterMonitor@admin",
"readWriteAnyDatabase@admin",
"userAdminAnyDatabase@admin",
]
}
The apply also timed out. The debug output is available here. Add this point I have 2 mongo prod grade clusters deployed...
Downgraded to v0.40.0 and the same issue.... in the output you can see the 409 responses, yet it runs around for 20m for no reason. This worked just fine in 0.40.0 so again, it is unlikely to be a provider issue...
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: ---[ RESPONSE ]--------------------------------------
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: HTTP/1.1 409 Conflict
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Connection: close
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Content-Length: 170
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Access-Control-Allow-Headers: X-Ovh-Timestamp, X-Ovh-Consumer, X-Ovh-Application, X-Ovh-Signature, X-Ovh-Session, Authorization, Content-Type, X-Challenge-Response, X-Challenge-Payload
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Access-Control-Allow-Methods: GET, POST
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Access-Control-Allow-Origin: *
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Access-Control-Expose-Headers: X-Pagination-Cursor-Next, X-Ovh-Queryid
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Cache-Control: no-cache, no-store
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Content-Type: application/json; charset=utf-8
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Date: Fri, 05 Apr 2024 14:26:51 GMT
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: Server: nginx
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: X-Content-Type-Options: nosniff
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: X-Frame-Options: DENY
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: X-Iplb-Instance: 47058
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: X-Iplb-Request-Id: 67796CB4:C404_8D5FBADF:01BB_66100A2A_28554CD0:1C0B9
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: X-Ovh-Queryid: EU.ext-4.66100a2b.2905257.a2a489d777baab2d6490711965b0d927
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: X-Xss-Protection: 1; mode=block
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: {
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: "class": "Client::Conflict::MongodbUserAlreadyExist",
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: "message": "user \"admin\" already exist",
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: "details": {
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: "service": "0394a25e-f7b9-4d0c-99c6-c72c92c6acfe",
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: "user": "admin"
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: }
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: }
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: -----------------------------------------------------
2024-04-05T21:26:51.004+0700 [DEBUG] provider.terraform-provider-ovh_v0.40.0: 2024/04/05 21:26:51 [TRACE] Waiting 10s before next try
ovh_cloud_project_database_mongodb_user.xund_medres_admin: Still creating... [4m30s elapsed]
Little update, had some API issues again. Happened 3-4 times in a row.
Trying to remove my playground env, but constantly running into this error in the past 15mins:
│ Error: calling /cloud/project/REDACTED/database/mongodb/REDACTED/ipRestriction/85.217.184.102%2F32:
│ Get "https://eu.api.ovh.com/1.0/cloud/project/REDACTED/database/mongodb/REDACTED/ipRestriction/85.217.184.102%2F32": net/http: TLS handshake timeout
After verifying internally, I confirm you that neither your IP nor your region are being blocked. There is also no widespread problem with TLS handshakes on our side, so it seems you encounter a localized issue.
Since you mentioned that using a VPN in Singapore solved the issue, one possibility is that there is a poor connectivity between your AS and ours.
If you are OK with the situation of using a VPN, then let's stick to it. If you are not OK with using the VPN, we need more information to escalate the case to our network team, such as a mtr -n eu.api.ovh.com to start.
After verifying internally, I confirm you that neither your IP nor your region are being blocked. There is also no widespread problem with TLS handshakes on our side, so it seems you encounter a localized issue.
Since you mentioned that using a VPN in Singapore solved the issue, one possibility is that there is a poor connectivity between your AS and ours.
If you are OK with the situation of using a VPN, then let's stick to it. If you are not OK with using the VPN, we need more information to escalate the case to our network team, such as a
mtr -n eu.api.ovh.comto start.
Thanks for looking into this. I'm ok with using my VPN, but my employer might not be on the long run 🙃
This is what I see in mtr:
My traceroute [v0.95]
Mac-Q6CVDWDJVJ.local (172.168.255.6) -> eu.api.ovh.com (141.95.186.223)2024-05-22T17:08:32+0700
Keys: Help Display mode Restart statistics Order of fields quit
Packets Pings
Host Loss% Snt Last Avg Best Wrst StDev
1. 172.168.0.1 0.0% 179 2.0 2.3 1.6 6.6 0.7
2. (waiting for reply)
3. 45.126.80.112 0.0% 179 4.3 13.0 3.6 114.7 15.2
4. 103.61.250.81 0.0% 179 4.3 3.8 3.0 6.4 0.6
5. 125.208.158.73 0.0% 179 3.4 3.8 3.0 7.4 0.7
6. 210.210.161.117 0.0% 179 3.3 3.8 3.0 7.2 0.6
7. 121.100.4.53 0.0% 179 4.2 5.8 2.8 45.2 5.9
8. 121.100.7.169 0.0% 179 15.0 17.0 14.6 57.3 5.5
9. 198.32.176.106 0.0% 179 185.5 186.7 184.2 194.8 1.3
10. (waiting for reply)
11. (waiting for reply)
12. (waiting for reply)
13. 198.27.73.149 16.9% 179 231.3 231.8 230.9 236.1 0.7
14. 198.27.73.206 15.7% 179 404.4 321.8 313.8 418.7 15.7
15. 198.27.73.205 80.8% 178 318.1 319.2 314.9 321.6 1.6
16. (waiting for reply)
17. 94.23.122.144 14.6% 178 402.5 424.0 399.3 650.6 53.4
18. (waiting for reply)
19. (waiting for reply)
20. (waiting for reply)
21. 141.95.186.223 16.3% 178 394.3 395.2 392.1 400.7 1.1
This is the result if I use wireguard:
My traceroute [v0.95]
Mac-Q6CVDWDJVJ.local (10.222.0.35) -> eu.api.ovh.com (141.95.186.223) 2024-05-22T17:12:59+0700
Keys: Help Display mode Restart statistics Order of fields quit
Packets Pings
Host Loss% Snt Last Avg Best Wrst StDev
1. 10.222.0.1 0.0% 59 16.8 18.1 16.5 24.1 1.3
2. 10.209.1.118 0.0% 59 17.7 18.6 16.7 37.1 3.0
3. 10.209.35.4 0.0% 59 20.2 18.3 17.0 26.6 1.5
4. 10.209.32.2 1.7% 59 19.1 18.1 16.8 21.0 0.8
5. 139.162.0.102 0.0% 59 18.6 19.4 16.7 58.3 5.5
6. 210.57.38.132 0.0% 59 18.1 20.2 17.1 45.7 4.7
7. 202.84.224.193 87.9% 59 18.0 24.3 18.0 31.5 5.0
8. 202.84.140.234 0.0% 59 205.6 206.9 204.5 215.3 2.0
9. 202.84.140.234 0.0% 59 206.4 206.7 204.5 216.9 1.7
10. 202.84.143.198 0.0% 59 205.4 206.6 204.1 211.9 1.4
11. 202.84.143.198 0.0% 59 205.4 206.6 204.2 216.6 1.7
12. 202.84.247.41 0.0% 59 210.1 208.5 206.8 218.5 1.8
13. 178.32.135.150 3.4% 58 204.5 204.8 202.4 209.2 1.6
14. (waiting for reply)
15. (waiting for reply)
16. (waiting for reply)
17. 198.27.73.149 0.0% 58 242.2 242.7 241.3 248.0 1.1
18. 198.27.73.206 0.0% 58 257.6 261.6 257.1 316.6 11.7
19. 198.27.73.205 77.2% 58 260.4 260.9 258.5 265.1 2.1
20. (waiting for reply)
21. 94.23.122.144 0.0% 58 345.6 369.4 344.9 643.8 61.1
22. (waiting for reply)
23. (waiting for reply)
24. (waiting for reply)
25. 141.95.186.223 0.0% 58 346.7 347.0 345.5 350.8 0.9
Very different results.
After an investigation by our network team, it seems that the issue happens before reaching OVH network.
What we saw is that your working traffic goes through Telstra network, while the failing one goes though an operator named PT. Cyberindo Aditama, so if you want to dig you could check on their side, or with your internet service provider, but we can't help you further with this particular issue.
After an investigation by our network team, it seems that the issue happens before reaching OVH network. What we saw is that your working traffic goes through Telstra network, while the failing one goes though an operator named
PT. Cyberindo Aditama, so if you want to dig you could check on their side, or with your internet service provider, but we can't help you further with this particular issue.
Thank you for looking into this, not sure why this only happens with OVH, but will dig further on my end. Appreciate your effort, thanks again!
Deployed a new mongo cluster today, got this:
│ Error: timeout while waiting database 301b1097-6f9c-4a1e-9179-3785b11c3001 to be READY: context deadline exceeded
│
│ with ovh_cloud_project_database.mongodb,
│ on dev-uat.tf line 3, in resource "ovh_cloud_project_database" "mongodb":
│ 3: resource "ovh_cloud_project_database" "mongodb" {
│
╵
Releasing state lock. This may take a few moments...
Don't have the debug log for this one sadly, but the error seems pretty obvious. Did nothing special tho...
resource "ovh_cloud_project_database" "mongodb" {
provider = ovh.ovh
description = "adathors-test-mongo"
disk_size = 40
engine = "mongodb"
flavor = var.ovh_dbaas_mongodb_flavor
opensearch_acls_enabled = false
plan = var.ovh_dbaas_plan
service_name = var.ovh_service_name
version = var.ovh_dbaas_mongodb_version
nodes {
region = var.ovh_region
}
nodes {
region = var.ovh_region
}
nodes {
region = var.ovh_region
}
dynamic "ip_restrictions" {
for_each = var.authorized_networks_dev_ovh
content {
description = ip_restrictions.value.description
ip = ip_restrictions.value.ip
}
}
}
resource "ovh_cloud_project_database_mongodb_user" "mongo_admin" {
provider = ovh.ovh
service_name = var.ovh_service_name
cluster_id = ovh_cloud_project_database.mongodb.id
name = var.ovh_dbaas_default_user
roles = [
"readWriteAnyDatabase@admin",
"userAdminAnyDatabase@admin",
]
}
I don't see a ton of change re: original subject. Still see random errors from the API no matter the source (EU, Asia, VPN or not...), and I still strongly doubt that it has anything to do with this project's scope, but more with the team who is maintaining the API services at OVH. At this point we had to turn off pipelines which were starting, and stopping, up- and down scaling VM instances, because it was so unpredictable if it will fail with some random error response or not.
With that said IMO the TF provider is in a pretty great shape, and grateful for the work that was put into it along with the effort that was put in to supporting our case, but I don't see the point of keeping this issue open any further. Keep up the great work 👍️
Thanks for your feedback @apinter, and don't hesitate to open new issues related to the provider if needed :)