mongodbamazon-web-servicessslterraformtls1.2

How can I create a TLS/SSL connection to a mongodb instance on AWS with a certificate made by certificate manager? Health check failed


I am trying to deploy a publicly accessible MongoDB instance on AWS, I have a terraform config to deploy this:

terraform {
  backend "s3" {
    bucket  = "terraform-state"
    key     = "mongodb/terraform.tfstate"
    region  = "eu-west-2"
    encrypt = "true"
  }
}

terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.78.0"
    }
  }
  required_version = ">= 0.14.5"
}

provider "aws" {
  region = "eu-west-2" # Specify your desired AWS region

  default_tags {
    tags = {
      CreatedBy = "Tom McLean"
      Terraform = "true"
    }
  }
}

resource "aws_security_group" "alb_sg" {
  name_prefix = "mongodb-alb-sg-"
  vpc_id      = var.vpc_id
  ingress {
    from_port   = 27017
    to_port     = 27017
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }
  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_security_group" "lb_sg" {
  name_prefix = "mongodb-lb-sg"
  vpc_id = var.vpc_id
  ingress {
    from_port = 27017
    to_port = 27017
    protocol = "tcp"
    security_groups = [aws_security_group.alb_sg.id]
  }

  egress {
    from_port = 0
    to_port = 0
    protocol = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_security_group" "mongodb_sg" {
  name        = "mongodb-security-group"
  description = "Security group for MongoDB"
  vpc_id = var.vpc_id

  ingress {
    from_port   = 22
    to_port     = 22
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  ingress {
    from_port   = 27017
    to_port     = 27017
    protocol    = "tcp"
    security_groups = [aws_security_group.lb_sg.id]
  }

  egress {
    from_port   = 0
    to_port     = 0
    protocol    = -1
    cidr_blocks = ["0.0.0.0/0"]
  }
}

resource "aws_instance" "mongodb" {
  ami             = "ami-091f18e98bc129c4e" # Ubuntu
  instance_type   = var.instance_type
  vpc_security_group_ids = [aws_security_group.mongodb_sg.id]
  subnet_id = element(var.subnet_ids, 0)
  user_data = templatefile("${path.module}/user-data.sh", {
    device_name    = var.device_name,
    MONGO_USER     = var.mongo_user,
    MONGO_PASSWORD = var.mongo_password
    MONGO_VERSION = var.mongo_version
  })
  user_data_replace_on_change = true

  root_block_device {
    volume_size = 32
    volume_type = "gp2"
    delete_on_termination = false
  }

  tags = {
    Name = "MongoDB"
  }
}

resource "aws_ebs_volume" "mongodb_volume" {
  availability_zone = aws_instance.mongodb.availability_zone
  size              = var.ebs_size
  type              = "gp2"
  tags = {
    Name = "MongoDB-EBS"
  }
}

resource "aws_volume_attachment" "mongodb_attachment" {
  device_name = var.device_name
  volume_id   = aws_ebs_volume.mongodb_volume.id
  instance_id = aws_instance.mongodb.id
}

resource "aws_lb" "mongodb_lb" {
  name = "mongodb-lb"
  internal = false
  load_balancer_type = "network"
  security_groups = [aws_security_group.alb_sg.id]
  subnets = var.subnet_ids
}

resource "aws_lb_target_group" "mongodb_tg" {
  name = "mongodb-tg"
  port = 27017
  protocol = "TCP"
  vpc_id = var.vpc_id
  target_type = "instance"

  health_check {
    healthy_threshold   = 3
    interval            = 30
    port                = 27017
    protocol            = "TCP"
    timeout             = 5
    unhealthy_threshold = 3
  }

  tags = {
    Name = "MongoDB-tg"
  }
}

resource "aws_lb_target_group_attachment" "mongodb_attachment" {
  target_group_arn = aws_lb_target_group.mongodb_tg.arn
  target_id = aws_instance.mongodb.id
  port = 27017
}

resource "aws_lb_listener" "mongodb_listener" {
  load_balancer_arn = aws_lb.mongodb_lb.arn
  protocol = "TLS"
  port = 27017
  ssl_policy = "ELBSecurityPolicy-2016-08"
  certificate_arn = var.certificate_arn

  default_action {
    type = "forward"
    target_group_arn = aws_lb_target_group.mongodb_tg.arn
  }
}

resource "aws_route53_record" "mongodb_dns" {
  zone_id = data.aws_route53_zone.main.zone_id
  name = "${var.domain_prefix}.${var.domain_name}"
  type = "A"

  alias {
    name = aws_lb.mongodb_lb.dns_name
    zone_id = aws_lb.mongodb_lb.zone_id
    evaluate_target_health = true
  }
}

data "aws_route53_zone" "main" {
    name = "${var.domain_name}"
    private_zone = false
}

With user-data.sh being:

#!/bin/bash

# Install docker
apt-get update -y
apt-get install -y docker.io
systemctl start docker
usermod -aG docker ubuntu

DEVICE="${device_name}"
MOUNT_POINT="/var/lib/mongodb"

# Wait for the device to become available
while [ ! -e "$DEVICE" ]; do
    echo "$DEVICE not yet available, waiting..."
    sleep 5
done

mkdir -p $MOUNT_POINT

# Format the volume if it's not formatted yet
if ! blkid $DEVICE | grep ext4 > /dev/null; then
    mkfs.ext4 $DEVICE
fi

mount $DEVICE $MOUNT_POINT

# Ensure the volume mounts automatically after reboot
echo "$DEVICE $MOUNT_POINT ext4 defaults,nofail,x-systemd.device-timeout=10s 0 2" >> /etc/fstab

resize2fs $DEVICE

chown -R 999:999 $MOUNT_POINT

# Create a systemd service for MongoDB
cat > /etc/systemd/system/mongodb.service <<EOL
[Unit]
Description=MongoDB container
After=docker.service
Requires=docker.service

[Service]
Restart=always
ExecStartPre=-/usr/bin/docker rm -f mongodb
ExecStart=/usr/bin/docker run --name=mongodb \
  -p 27017:27017 \
  -v $MOUNT_POINT:/data/db \
  -e MONGO_INITDB_ROOT_USERNAME=${MONGO_USER} \
  -e MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD} \
  --user 999:999 \
  mongo:${MONGO_VERSION}
ExecStop=/usr/bin/docker stop mongodb

[Install]
WantedBy=multi-user.target
EOL

# Enable and start MongoDB systemd service
systemctl daemon-reload
sysemctl enable mongodb
systemctl start mongodb

I can see that the instance is running the mongodb docker container, and listening on port 27017:

ubuntu@ip:~$ docker container ls
CONTAINER ID   IMAGE          COMMAND                  CREATED          STATUS          PORTS                                           NAMES
49e5ed319c76   mongo:latest   "docker-entrypoint.s…"   10 minutes ago   Up 10 minutes   0.0.0.0:27017->27017/tcp, :::27017->27017/tcp   mongodb

However the health check is failing: enter image description here

The alternative solutions i've considered are to use an application load balancer, but they dont work on TCP connections and I've considered doing the certificate stuff on the mongo instance it's self, however I cant download the .pem file for my domain certificate as it is issued by amazon certificate manager. How can I do a TLS/SSL connection to my mongodb instance in AWS?


Solution

  • You have two security groups alb_sg and lb_sg that have the same security group rules. Then you have a mongodb_sg security group that only allows MongoDB traffic from the lb_sg security group.

    You are assigning alb_sg to the network load balancer, and you are assigning mongodb_sg to the EC2 instance. So the way you have it currently configured, the load balancer, using alb_sg can't connect to the EC2 instance, which is using mongodb_sg, because the EC2 instance only allows traffic from lb_sg, which isn't assigned to anything.

    You need to delete either alb_sg or lb_sg to clear up the confusion, and make sure your EC2 security group allows incoming requests from the security group you actually have assigned to the load balancer.