azureterraformazure-marketplacealienvault

Deploy AlienVault USM from Azure Marketplace using Terraform


Has anyone had any success/experience deploying the AlienVault USM Linux VM from the Azure Marketplace using Terraform?

I have the following main.tf module:

# VM depends on NIC
# Create network interface first
resource "azurerm_network_interface" "nic" {
  for_each = var.virtual_machines

  name                = each.value.nic_name
  location            = var.location
  resource_group_name = var.resource_group_name

  ip_configuration {
    name                          = each.value.ip_configuration.name
    subnet_id                     = each.value.ip_configuration.subnet_id
    private_ip_address_allocation = each.value.ip_configuration.private_ip_address_allocation
    private_ip_address            = each.value.ip_configuration.private_ip_address
  }
}

resource "azurerm_linux_virtual_machine" "vm" {
  for_each = var.virtual_machines

  name                            = each.value.name
  computer_name                   = each.value.computer_name
  location                        = var.location
  resource_group_name             = var.resource_group_name
  size                            = var.vm_size
  network_interface_ids           = [azurerm_network_interface.nic[each.key].id]
  availability_set_id             = var.availability_set_name != "" ? azurerm_availability_set.avset[0].id : null
  admin_username                  = var.admin_username
  admin_password                  = var.admin_password
  disable_password_authentication = false


  dynamic "os_disk" {
    for_each = {
      for index, os_disk in each.value.os_disk : os_disk.name => os_disk
    }
    content {
      name                 = os_disk.value.name
      caching              = os_disk.value.caching
      storage_account_type = os_disk.value.storage_account_type
      disk_size_gb         = os_disk.value.disk_size_gb
    }
  }

  dynamic "source_image_reference" {
    for_each = {
      for index, source_image_reference in each.value.source_image_reference : source_image_reference.publisher => source_image_reference
    }
    content {
      publisher = source_image_reference.value.publisher
      offer     = source_image_reference.value.offer
      sku       = source_image_reference.value.sku
      version   = source_image_reference.value.version
    }
  }
}

# Optional availability set
resource "azurerm_availability_set" "avset" {
  count                       = var.availability_set_name != "" ? 1 : 0
  name                        = var.availability_set_name
  location                    = var.location
  resource_group_name         = var.resource_group_name
  managed                     = true
  platform_fault_domain_count = 2 # For managed disks this can only be in the range of 1-2
}

# Data disks
resource "azurerm_managed_disk" "disk" {
  for_each = {
    for index, data_disk in var.data_disks : data_disk.name => data_disk
  }

  name                 = each.value.name
  location             = var.location
  resource_group_name  = var.resource_group_name
  create_option        = "Empty"
  storage_account_type = each.value.storage_account_type
  disk_size_gb         = each.value.disk_size_gb
}

resource "azurerm_virtual_machine_data_disk_attachment" "data_disk_attach" {
  for_each = {
    for index, data_disk in var.data_disks : data_disk.name => data_disk
  }

  managed_disk_id    = azurerm_managed_disk.disk[each.key].id
  virtual_machine_id = values(azurerm_linux_virtual_machine.vm)[0].id
  lun                = each.value.lun
  caching            = each.value.caching
}

And I'm passing in the from the calling/child module:

locals {
  primary_location = "UK South"
  environment      = "dev"
  rg_name          = "rg-temp"
}

module "linux_vm" {
  source = "../"

  location              = local.primary_location
  resource_group_name   = local.rg_name
  vm_size               = "Standard_B2ms"
  admin_username        = "xadmin " # Default user to create?
  admin_password        = "TerraPass11."
  availability_set_name = ""
  tags                  = {}

  virtual_machines = {
    "usm-001" = {
      name          = "usm-001"
      computer_name = "usm-001"
      os_disk = [
        {
          name                 = "usm-001-osdisk-001"
          caching              = "None"
          storage_account_type = "StandardSSD_LRS"
          disk_size_gb         = 128
          create_option        = "FromImage"
        }
      ]

      source_image_reference = [
        {
          publisher = "alienvault"
          offer     = "unified-security-management-anywhere"
          sku       = "unified-security-management-anywhere"
          version   = "latest"
        }
      ]

      nic_name = "nic-dev-usm-001"
      ip_configuration = {
        name                          = "usm-001"
        subnet_id                     = "/subscriptions/e286703f-8ba4-4a0d-xxxx-xxxxxxxxxxxx/resourceGroups/shared-networks/providers/Microsoft.Network/virtualNetworks/shared-vnet-10/subnets/1-24"
        private_ip_address_allocation = "Static"
        private_ip_address            = "10.10.1.20"
      }
    }
  }

  data_disks = [
    {
      name                 = "data-disk-001-usm-001"
      caching              = "None"
      create_option        = "Empty"
      storage_account_type = "StandardSSD_LRS"
      disk_size_gb         = 50
      lun                  = 1
    }
  ]
}

But it's complaining about something to do with the storage? (I think it's specifically unhappy about dataDisks?).

│ Error: creating Linux Virtual Machine (Subscription: "e286703f-8ba4-4a0d-xxxx-xxxxxxxxxxxx"
│ Resource Group Name: "rg-temp"
│ Virtual Machine Name: "usm-001"): performing CreateOrUpdate: unexpected status 400 (400 Bad Request) with error: InvalidParameter: StorageProfile.dataDisks.lun does not have required value(s) for image specified in storage profile.
│
│   with module.linux_vm.azurerm_linux_virtual_machine.vm["usm-001"],
│   on ../main.tf line 18, in resource "azurerm_linux_virtual_machine" "vm":
│   18: resource "azurerm_linux_virtual_machine" "vm" {

Running this for/against a standard Ubuntu Marketplace server image selection works correctly, I end up with a Linux VM with x1 OS disk and x1 attached empty data disk.

The marketplace AlienVault image on Azure isn't really giving much info. I tried running the Azure deployment wizard in the UI to see what kind of ARM template it creates, and I'm seeing this as part of the overall template:

"dataDisks1": {
            "value": [
                {
                    "lun": 0,
                    "createOption": "fromImage",
                    "deleteOption": "Detach",
                    "caching": "None",
                    "writeAcceleratorEnabled": false,
                    "id": null,
                    "name": null,
                    "storageAccountType": null,
                    "diskSizeGB": null,
                    "tier": null,
                    "diskEncryptionSet": null
                }
            ]
        },

Why it would need to create an empty disk using FromImage?

My understanding is that if you set a Terrafrom Azure data disk create_option to FromImage then you have to supply the image ID?.....but I don't see any mention of this anywhere in the whole ARM template or how to obtain this ID (is it possible to reference it from the source_image_reference sub-block in the main vm block?).

The AlienVault website is sadly void of any Terraform documentation of any kind that I can see.

Has anyone had an experience with this kind of thing before?

Appreciate any guidance anyone might have.

Thanks.


Solution

  • Deploy AlienVault USM from Azure Marketplace using Terraform

    Hello Scott, seems like you already found a solution to your problem, I am just posting it here for ease of other folks who are facing similar issue on SO. Please feel free to add any points / your inputs to this if required.

    What you mentioned in the comment is on track because if a vendor publishes a Marketplace image, they may specify certain plans that need to be associated with that image when they deployed it which in general happens in azurerm_virtual_machine& not included in your azurerm_linux_virtual_machine. Sometimes OS specifications can also be the issue for these cases.

    I tried a demo terraform code as per suggestion such that it can be helpful for the community people who might try to provision the same image VM.

    Configuration:

    resource "azurerm_virtual_machine" "vm" {
      name                  = "usm-001"
      location              = azurerm_resource_group.rg.location
      resource_group_name   = azurerm_resource_group.rg.name
      network_interface_ids = [azurerm_network_interface.nic.id]
      vm_size               = "Standard_B2ms"
    
      storage_os_disk {
        name              = "usm-001-osdisk-001"
        caching           = "None"
        create_option     = "FromImage"
        managed_disk_type = "StandardSSD_LRS"
      }
    
      os_profile {
        computer_name  = "usm-001"
        admin_username = "xadmin"
        admin_password = "TerraPass11." 
    
      os_profile_linux_config {
        disable_password_authentication = false
      }
    
      storage_image_reference {
        publisher = "alienvault"
        offer     = "unified-security-management-anywhere"
        sku       = "unified-security-management-anywhere"
        version   = "latest"
      }
      
       plan {
        name      = "unified-security-management-anywhere"
        product   = "unified-security-management-anywhere"
        publisher = "alienvault"
      }
    }
    
    
    resource "azurerm_managed_disk" "data_disk" {
      name                 = "data-disk-001"
      location             = azurerm_resource_group.rg.location
      resource_group_name  = azurerm_resource_group.rg.name
      create_option        = "Empty"
      storage_account_type = "StandardSSD_LRS"
      disk_size_gb         = 50
    
      depends_on = [ azurerm_virtual_machine.vm ]
    }
    
    resource "azurerm_virtual_machine_data_disk_attachment" "data_disk_attach" {
      managed_disk_id    = azurerm_managed_disk.data_disk.id
      virtual_machine_id = azurerm_virtual_machine.vm.id
      lun                = 1  # Logical Unit Number for the data disk
      caching            = "None"
      depends_on = [ azurerm_virtual_machine.vm, azurerm_managed_disk.data_disk ]
    }
    

    Deployment:

    enter image description here

    enter image description here

    enter image description here

    Refer:

    azurerm_virtual_machine | Resources | hashicorp/azurerm | Terraform | Terraform Registry