terraform

Terraform AWS EC2 Instance

Terraform configuration for an AWS EC2 instance with VPC, security groups, SSH access, and user data bootstrapping.

Overview

A Terraform configuration for launching an AWS EC2 instance with a proper VPC setup, security groups, SSH key pair, and bootstrap scripting. Suitable for web servers, application hosts, or bastion instances.

Configuration

# main.tf

terraform {
  required_version = ">= 1.5"

  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
  }
}

provider "aws" {
  region = var.aws_region

  default_tags {
    tags = {
      Environment = var.environment
      ManagedBy   = "terraform"
      Project     = var.project_name
    }
  }
}

# ── Data Sources ──
# Fetch the latest Amazon Linux 2023 AMI
data "aws_ami" "amazon_linux" {
  most_recent = true
  owners      = ["amazon"]

  filter {
    name   = "name"
    values = ["al2023-ami-*-x86_64"]   # Amazon Linux 2023
  }

  filter {
    name   = "virtualization-type"
    values = ["hvm"]
  }
}

# Get default VPC (or use a custom one)
data "aws_vpc" "default" {
  default = true
}

data "aws_subnets" "default" {
  filter {
    name   = "vpc-id"
    values = [data.aws_vpc.default.id]
  }
}

# ── Security Group ──
resource "aws_security_group" "instance" {
  name        = "${var.project_name}-${var.environment}-sg"
  description = "Security group for EC2 instance"
  vpc_id      = data.aws_vpc.default.id

  # SSH access (restrict to your IP in production)
  ingress {
    description = "SSH"
    from_port   = 22
    to_port     = 22
    protocol    = "tcp"
    cidr_blocks = [var.ssh_cidr]       # e.g., "203.0.113.0/32"
  }

  # HTTP access
  ingress {
    description = "HTTP"
    from_port   = 80
    to_port     = 80
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  # HTTPS access
  ingress {
    description = "HTTPS"
    from_port   = 443
    to_port     = 443
    protocol    = "tcp"
    cidr_blocks = ["0.0.0.0/0"]
  }

  # Allow all outbound traffic
  egress {
    from_port   = 0
    to_port     = 0
    protocol    = "-1"                 # All protocols
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = {
    Name = "${var.project_name}-${var.environment}-sg"
  }
}

# ── SSH Key Pair ──
resource "aws_key_pair" "deployer" {
  key_name   = "${var.project_name}-${var.environment}-key"
  public_key = var.ssh_public_key      # Your SSH public key
}

# ── EC2 Instance ──
resource "aws_instance" "main" {
  ami                    = data.aws_ami.amazon_linux.id
  instance_type          = var.instance_type
  key_name               = aws_key_pair.deployer.key_name
  vpc_security_group_ids = [aws_security_group.instance.id]
  subnet_id              = data.aws_subnets.default.ids[0]

  # Root volume configuration
  root_block_device {
    volume_size           = 20         # GB
    volume_type           = "gp3"      # General Purpose SSD v3
    encrypted             = true       # Encrypt at rest
    delete_on_termination = true
  }

  # Bootstrap script — runs on first boot
  user_data = <<-EOF
    #!/bin/bash
    set -euo pipefail

    # Update system packages
    dnf update -y

    # Install common tools
    dnf install -y docker git

    # Start and enable Docker
    systemctl start docker
    systemctl enable docker
    usermod -aG docker ec2-user

    # Write a simple health check
    echo "OK" > /var/www/html/health
  EOF

  # Enable detailed monitoring
  monitoring = true

  # Prevent accidental termination
  disable_api_termination = var.environment == "prod" ? true : false

  tags = {
    Name = "${var.project_name}-${var.environment}"
  }
}

# ── Elastic IP (static public IP) ──
resource "aws_eip" "main" {
  instance = aws_instance.main.id
  domain   = "vpc"

  tags = {
    Name = "${var.project_name}-${var.environment}-eip"
  }
}
# variables.tf

variable "aws_region" {
  description = "AWS region"
  type        = string
  default     = "us-east-1"
}

variable "environment" {
  description = "Environment (dev, staging, prod)"
  type        = string
  default     = "dev"
}

variable "project_name" {
  description = "Project name for resource naming"
  type        = string
}

variable "instance_type" {
  description = "EC2 instance type"
  type        = string
  default     = "t3.micro"            # Free tier eligible
}

variable "ssh_public_key" {
  description = "SSH public key for instance access"
  type        = string
}

variable "ssh_cidr" {
  description = "CIDR block allowed for SSH access"
  type        = string
  default     = "0.0.0.0/0"           # Restrict in production!
}
# outputs.tf

output "instance_id" {
  description = "EC2 instance ID"
  value       = aws_instance.main.id
}

output "public_ip" {
  description = "Elastic IP address"
  value       = aws_eip.main.public_ip
}

output "ssh_command" {
  description = "SSH command to connect"
  value       = "ssh -i ~/.ssh/key.pem ec2-user@${aws_eip.main.public_ip}"
}

Key Options Explained

  • data "aws_ami" — Dynamically fetches the latest Amazon Linux 2023 AMI, ensuring you always use a patched image without hardcoding AMI IDs.
  • gp3 volume — Third-generation GP SSD provides 3,000 IOPS baseline for free (vs. gp2’s 100 IOPS/GB scaling). Better performance at lower cost.
  • user_data — Shell script that runs once on first boot via cloud-init. Useful for installing packages, starting services, and basic configuration.
  • disable_api_termination — Enabled only for production. Prevents accidental instance termination via the AWS console or API.
  • Elastic IP — Provides a static public IP that persists through stop/start cycles. Without it, the public IP changes every time the instance restarts.
  • Security group SSH CIDR — Always restrict SSH to known IP ranges in production. 0.0.0.0/0 allows access from anywhere.

Common Modifications

  • Use a custom VPC: Replace data.aws_vpc.default with aws_vpc and aws_subnet resources for proper network isolation.
  • Add an IAM role: Create aws_iam_instance_profile for the instance to access AWS services (S3, SSM) without static credentials.
  • SSM Session Manager: Install the SSM agent and use aws ssm start-session instead of SSH for keyless, audited access.
  • Auto Scaling Group: Replace the single instance with aws_launch_template + aws_autoscaling_group for high availability.
  • Spot instances: Add instance_market_options { market_type = "spot" } for up to 90% cost savings on interruptible workloads.