terraformterraform-provider-awsamazon-eksterraform-provider-helm

Helm Repositories Are Not Getting Created in Terraform


I got a predicament, where I am trying to spin up our helm repositories with Terraform. The problem is that they are not getting deployed with the EKS Infrastructure. I have tried the depends_on method, as I am ingesting a module, but that doesn't work either. As a result, what am I doing wrong here?

Here is my main.tf

terraform {
  required_version = ">= 1.1.5"

  required_providers {
    aws = ">= 4.12.0"
    helm = {
      source  = "hashicorp/helm"
      version = "2.9.0"
    }
  }
}

data "aws_eks_cluster_auth" "primary" {
  name = module.primary.cluster_id
}

data "aws_eks_cluster" "primary" {
  name = module.primary.cluster_id
}

terraform {
  backend "s3" {
    bucket  = "impinj-canary-terraform"
    key     = "terraform-aws-eks-primary.tfstate"
    region  = "us-west-2"
    encrypt = true
  }
}

data "aws_iam_account_alias" "current" {}

data "terraform_remote_state" "route53" {
  backend = "s3"
  config = {
    bucket = "impinj-canary-terraform"
    key    = "route53.tfstate"
    region = "us-west-2"
  }
}

data "terraform_remote_state" "s3" {
  backend = "s3"
  config = {
    bucket = "impinj-canary-terraform"
    key    = "s3.tfstate"
    region = "us-west-2"
  }
}

data "terraform_remote_state" "subnets" {
  backend = "s3"
  config = {
    bucket = "impinj-canary-terraform"
    key    = "vpc-shared.tfstate"
    region = "us-west-2"
  }
}

data "aws_vpc" "shared" {
  filter {
    name   = "tag:Name"
    values = ["shared"]
  }
}

provider "aws" {
  alias  = "sec"
  region = "us-west-2"
}

provider "kubernetes" {
  host                   = data.aws_eks_cluster.primary.endpoint
  cluster_ca_certificate = base64decode(data.aws_eks_cluster.primary.certificate_authority[0].data)
  token                  = data.aws_eks_cluster_auth.primary.token
  exec {
    api_version = "client.authentication.k8s.io/v1alpha1"
    command     = "aws"
    # This requires the awscli to be installed locally where Terraform is executed
    args = ["eks", "get-token", "--cluster-name", module.primary.cluster_id]
  }
}

provider "helm" {
  kubernetes {
    host                   = data.aws_eks_cluster.primary.endpoint
    cluster_ca_certificate = base64decode(data.aws_eks_cluster.primary.certificate_authority[0].data)
    token                  = data.aws_eks_cluster_auth.primary.token
    exec {
      api_version = "client.authentication.k8s.io/v1beta1"
      # This requires the awscli to be installed locally where Terraform is executed
      args    = ["eks", "get-token", "--cluster-name", module.primary.cluster_id]
      command = "aws"
    }
  }
}

##################################
#       KUBERNETES CLUSTER       #
##################################
module "primary" {
  source = "../../"

  cluster_version = "1.26"

  # these must be set to 'true' on initial deployment and then set
  # to false so that destroy works properly
  create_cni_ipv6_iam_policy = var.create_cni_ipv6_iam_policy
  iam_role_attach_cni_policy = var.iam_role_attach_cni_policy

  vpc_id     = data.aws_vpc.shared.id
  subnet_ids = data.terraform_remote_state.subnets.outputs.private_subnets.*.id

  instance_types = ["t2.xlarge"]
  disk_size      = 20

  aws_auth_roles               = local.aws_auth_roles
  cert_manager_hosted_zone     = data.terraform_remote_state.route53.outputs.account_zone_id
  db_import_bucket_arn         = data.terraform_remote_state.s3.outputs.impinjcanary_test_arn
  external_dns_hosted_zone     = data.terraform_remote_state.route53.outputs.account_zone_id
  rf_probe_reporter_bucket_arn = data.terraform_remote_state.s3.outputs.impinjcanary_test_arn
}

###########################
#       HELM CHARTS       #
###########################
resource "helm_release" "prometheus" {
  chart           = "prometheus"
  name            = "prometheus"
  repository      = "https://prometheus-community.github.io/helm-charts"
  namespace       = "prometheus"
  version         = "22.5.0"
  replace         = true
  cleanup_on_fail = true
  depends_on      = [module.primary]

  set {
    name  = "podSecurityPolicy.enabled"
    value = true
  }

  set {
    name  = "server.persistentVolume.enabled"
    value = false
  }

  # You can provide a map of value using yamlencode. Don't forget to escape the last element after point in the name
  set {
    name = "server\\.resources"
    value = yamlencode({
      limits = {
        cpu    = "200m"
        memory = "50Mi"
      }
      requests = {
        cpu    = "100m"
        memory = "30Mi"
      }
    })
  }
}

resource "helm_release" "falco_security" {
  name            = "falcosecurity"
  repository      = "https://falcosecurity.github.io/charts"
  chart           = "falcosecurity"
  namespace       = "falco"
  version         = "3.1.4"
  replace         = true
  cleanup_on_fail = true
  depends_on      = [module.primary]
}

I think it has something to do with the provider configuration but I am not sure. Any help would be much appreciated.


Solution

  • Turns out it was the provider configuration, if you want to use the helm provider alongside EKS match it to your kubernetes provider block:

    provider "kubernetes" {
      host                   = module.primary.cluster_endpoint
      cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data)
      token                  = data.aws_eks_cluster_auth.primary.token
      exec {
        api_version = "client.authentication.k8s.io/v1beta1"
        command     = "aws"
        args        = ["eks", "get-token", "--cluster-name", module.primary.cluster_name]
      }
    }
    
    provider "helm" {
      kubernetes {
        host                   = module.primary.cluster_endpoint
        cluster_ca_certificate = base64decode(module.primary.cluster_certificate_authority_data)
        exec {
          api_version = "client.authentication.k8s.io/v1beta1"
          args        = ["eks", "get-token", "--cluster-name", module.primary.cluster_name]
          command     = "aws"
        }
      }
    }
    

    You need both to use both providers, then use depends_on to make it occur after your EKS Cluster is deployed.