1
0
Fork 0

initial import of terraform files

trunk
Thomas 2 years ago
parent 8b7ae08718
commit b8173f110b

@ -0,0 +1,3 @@
# README
This folder holds the terraform infrastructure content for the course. See course for details on how to use it.

@ -0,0 +1,102 @@
locals {
hostname = format("%s-bastion", var.bastion_name)
}
// Dedicated service account for the Bastion instance.
resource "google_service_account" "bastion" {
account_id = format("%s-bastion-sa", var.bastion_name)
display_name = "GKE Bastion Service Account"
}
// Allow access to the Bastion Host via SSH.
resource "google_compute_firewall" "bastion-ssh" {
name = format("%s-bastion-ssh", var.bastion_name)
network = var.network_name
direction = "INGRESS"
project = var.project_id
source_ranges = ["0.0.0.0/0"] // TODO: Restrict further.
allow {
protocol = "tcp"
ports = ["22"]
}
target_tags = ["bastion"]
}
// The user-data script on Bastion instance provisioning.
data "template_file" "startup_script" {
template = <<-EOF
sudo apt-get update -y
sudo apt-get install -y tinyproxy
EOF
}
// The Bastion host.
resource "google_compute_instance" "bastion" {
name = local.hostname
machine_type = "e2-micro"
zone = var.zone
project = var.project_id
tags = ["bastion"]
boot_disk {
initialize_params {
image = "debian-cloud/debian-10"
}
}
shielded_instance_config {
enable_secure_boot = true
enable_vtpm = true
enable_integrity_monitoring = true
}
// Install tinyproxy on startup.
metadata_startup_script = data.template_file.startup_script.rendered
network_interface {
subnetwork = var.subnet_name
access_config {
// Not setting "nat_ip", use an ephemeral external IP.
network_tier = "STANDARD"
}
}
// Allow the instance to be stopped by Terraform when updating configuration.
allow_stopping_for_update = true
service_account {
email = google_service_account.bastion.email
scopes = ["cloud-platform"]
}
/* local-exec providers may run before the host has fully initialized.
However, they are run sequentially in the order they were defined.
This provider is used to block the subsequent providers until the instance is available. */
provisioner "local-exec" {
command = <<EOF
READY=""
for i in $(seq 1 20); do
if gcloud compute ssh ${local.hostname} --project ${var.project_id} --zone ${var.region}-a --command uptime; then
READY="yes"
break;
fi
echo "Waiting for ${local.hostname} to initialize..."
sleep 10;
done
if [[ -z $READY ]]; then
echo "${local.hostname} failed to start in time."
echo "Please verify that the instance starts and then re-run `terraform apply`"
exit 1
fi
EOF
}
scheduling {
preemptible = true
automatic_restart = false
}
}

@ -0,0 +1,14 @@
output "ip" {
value = google_compute_instance.bastion.network_interface.0.network_ip
description = "The IP address of the Bastion instance."
}
output "ssh" {
description = "GCloud ssh command to connect to the Bastion instance."
value = "gcloud compute ssh ${google_compute_instance.bastion.name} --project ${var.project_id} --zone ${google_compute_instance.bastion.zone} -- -L8888:127.0.0.1:8888"
}
output "kubectl_command" {
description = "kubectl command using the local proxy once the Bastion ssh command is running."
value = "HTTPS_PROXY=localhost:8888 kubectl"
}

@ -0,0 +1,29 @@
variable "project_id" {
type = string
description = "The project ID to host the network in."
}
variable "region" {
type = string
description = "The region to use"
}
variable "zone" {
type = string
description = "The zone where the Bastion host is located in."
}
variable "bastion_name" {
type = string
description = "The name to use for the bastion instance."
}
variable "network_name" {
type = string
description = "The name of the network that should be used."
}
variable "subnet_name" {
type = string
description = "The name of the subnet that should be used."
}

@ -0,0 +1,120 @@
resource "google_container_cluster" "app_cluster" {
name = var.cluster_name
location = var.region
# We can't create a cluster with no node pool defined, but we want to only use
# separately managed node pools. So we create the smallest possible default
# node pool and immediately delete it.
remove_default_node_pool = true
initial_node_count = 2
ip_allocation_policy {
cluster_ipv4_cidr_block = var.pods_ipv4_cidr_block
services_ipv4_cidr_block = var.services_ipv4_cidr_block
}
network = var.network_name
subnetwork = var.subnet_name
logging_service = "logging.googleapis.com/kubernetes"
monitoring_service = "monitoring.googleapis.com/kubernetes"
maintenance_policy {
daily_maintenance_window {
start_time = "02:00"
}
}
dynamic "master_authorized_networks_config" {
for_each = var.authorized_ipv4_cidr_block != null ? [var.authorized_ipv4_cidr_block] : []
content {
cidr_blocks {
cidr_block = master_authorized_networks_config.value
display_name = "External Control Plane access"
}
}
}
private_cluster_config {
enable_private_endpoint = true
enable_private_nodes = true
master_ipv4_cidr_block = var.master_ipv4_cidr_block
}
release_channel {
channel = "STABLE"
}
addons_config {
// Enable network policy (Calico)
network_policy_config {
disabled = false
}
}
/* Enable network policy configurations (like Calico).
For some reason this has to be in here twice. */
network_policy {
enabled = "true"
}
workload_identity_config {
workload_pool = format("%s.svc.id.goog", var.project_id)
}
}
resource "google_container_node_pool" "app_cluster_linux_node_pool" {
name = "${google_container_cluster.app_cluster.name}--linux-node-pool"
location = google_container_cluster.app_cluster.location
node_locations = var.node_zones
cluster = google_container_cluster.app_cluster.name
node_count = 1
autoscaling {
max_node_count = 2
min_node_count = 1
}
max_pods_per_node = 100
management {
auto_repair = true
auto_upgrade = true
}
node_config {
preemptible = true
disk_size_gb = 10
service_account = var.service_account
oauth_scopes = [
"https://www.googleapis.com/auth/devstorage.read_only",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append",
]
labels = {
cluster = google_container_cluster.app_cluster.name
}
shielded_instance_config {
enable_secure_boot = true
}
// Enable workload identity on this node pool.
workload_metadata_config {
mode = "GKE_METADATA"
}
metadata = {
// Set metadata on the VM to supply more entropy.
google-compute-enable-virtio-rng = "true"
// Explicitly remove GCE legacy metadata API endpoint.
disable-legacy-endpoints = "true"
}
}
upgrade_settings {
max_surge = 1
max_unavailable = 1
}
}

@ -0,0 +1,8 @@
output "name" {
value = google_container_cluster.app_cluster.name
description = "The Kubernetes cluster name."
}
output "id" {
description = "Id of the cluster to be used for other parts of project"
value = google_container_cluster.app_cluster.id
}

@ -0,0 +1,55 @@
variable "project_id" {
type = string
description = "The project ID to host the network in"
}
variable "region" {
type = string
description = "The region to use"
}
variable "node_zones" {
type = list(string)
description = "The zones where worker nodes are located"
}
variable "network_name" {
type = string
description = "The name of the app VPC"
}
variable "subnet_name" {
type = string
description = "The name of the app subnet"
}
variable "service_account" {
type = string
description = "The service account to use"
}
variable "pods_ipv4_cidr_block" {
type = string
description = "The CIDR block to use for pod IPs"
}
variable "services_ipv4_cidr_block" {
type = string
description = "The CIDR block to use for the service IPs"
}
variable "authorized_ipv4_cidr_block" {
type = string
description = "The CIDR block where HTTPS access is allowed from"
default = null
}
variable "master_ipv4_cidr_block" {
type = string
description = "The /28 CIDR block to use for the master IPs"
}
variable "cluster_name" {
type = string
description = "The name of the cluster to create"
}

@ -0,0 +1,58 @@
terraform {
required_providers {
google = {
source = "hashicorp/google"
version = "3.90.0"
}
}
}
provider "google" {
credentials = file(var.credentials_file_path)
project = var.project_id
region = var.region
zone = var.main_zone
}
module "google_networks" {
source = "./networks"
project_id = var.project_id
region = var.region
}
module "google_kubernetes_cluster" {
source = "./kubernetes_cluster"
project_id = var.project_id
region = var.region
node_zones = var.cluster_node_zones
service_account = var.service_account
cluster_name = var.cluster_name
network_name = module.google_networks.network.name
subnet_name = module.google_networks.subnet.name
master_ipv4_cidr_block = module.google_networks.cluster_master_ip_cidr_range
pods_ipv4_cidr_block = module.google_networks.cluster_pods_ip_cidr_range
services_ipv4_cidr_block = module.google_networks.cluster_services_ip_cidr_range
authorized_ipv4_cidr_block = "${module.bastion.ip}/32"
}
module "bastion" {
source = "./bastion"
project_id = var.project_id
region = var.region
zone = var.main_zone
bastion_name = var.cluster_name
network_name = module.google_networks.network.name
subnet_name = module.google_networks.subnet.name
}
module "example_app_registry" {
source = "./registry"
region = var.region
app_name = "example-app"
project_id = var.project_id
}

@ -0,0 +1,53 @@
locals {
network_name = "kubernetes-cluster"
subnet_name = "${google_compute_network.vpc.name}--subnet"
cluster_master_ip_cidr_range = "10.100.100.0/28"
cluster_pods_ip_cidr_range = "10.101.0.0/16"
cluster_services_ip_cidr_range = "10.102.0.0/16"
}
resource "google_compute_network" "vpc" {
name = local.network_name
auto_create_subnetworks = false
routing_mode = "GLOBAL"
delete_default_routes_on_create = true
}
resource "google_compute_subnetwork" "subnet" {
name = local.subnet_name
ip_cidr_range = "10.10.0.0/16"
region = var.region
network = google_compute_network.vpc.name
private_ip_google_access = true
}
resource "google_compute_route" "egress_internet" {
name = "egress-internet"
dest_range = "0.0.0.0/0"
network = google_compute_network.vpc.name
next_hop_gateway = "default-internet-gateway"
}
resource "google_compute_router" "router" {
name = "${local.network_name}-router"
region = google_compute_subnetwork.subnet.region
network = google_compute_network.vpc.name
}
resource "google_compute_router_nat" "nat_router" {
name = "${google_compute_subnetwork.subnet.name}-nat-router"
router = google_compute_router.router.name
region = google_compute_router.router.region
nat_ip_allocate_option = "AUTO_ONLY"
source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
subnetwork {
name = google_compute_subnetwork.subnet.name
source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
}
log_config {
enable = true
filter = "ERRORS_ONLY"
}
}

@ -0,0 +1,24 @@
output "network" {
value = google_compute_network.vpc
description = "The VPC"
}
output "subnet" {
value = google_compute_subnetwork.subnet
description = "The subnet"
}
output "cluster_master_ip_cidr_range" {
value = local.cluster_master_ip_cidr_range
description = "The CIDR range to use for Kubernetes cluster master"
}
output "cluster_pods_ip_cidr_range" {
value = local.cluster_pods_ip_cidr_range
description = "The CIDR range to use for Kubernetes cluster pods"
}
output "cluster_services_ip_cidr_range" {
value = local.cluster_services_ip_cidr_range
description = "The CIDR range to use for Kubernetes cluster services"
}

@ -0,0 +1,9 @@
variable "project_id" {
type = string
description = "The project ID to host the network in"
}
variable "region" {
type = string
description = "The region to use"
}

@ -0,0 +1,9 @@
output "bastion_open_tunnel_command" {
description = "Command that opens an SSH tunnel to the Bastion instance."
value = "${module.bastion.ssh} -f tail -f /dev/null"
}
output "kubectl_alias_command" {
description = "Command that creates an alias for kubectl using Bastion as proxy. Bastion ssh tunnel must be running."
value = "alias kube='${module.bastion.kubectl_command}'"
}

@ -0,0 +1,9 @@
resource "google_artifact_registry_repository" "docker_app" {
provider = google-beta
project = var.project_id
location = var.region
repository_id = var.app_name
description = "docker repository"
format = "DOCKER"
}

@ -0,0 +1,4 @@
output "registry" {
value = google_artifact_registry_repository.docker_app.id
description = "registry address"
}

@ -0,0 +1,14 @@
variable "region" {
type = string
description = "The region to use"
}
variable "app_name" {
type = string
description = "The application name"
}
variable "project_id" {
type = string
description = "The ID of the project to create resources in"
}

@ -0,0 +1,7 @@
project_id = "my-project-113346"
credentials_file_path = "./service-account-credentials.json"
service_account = "my-project@my-project-113346.iam.gserviceaccount.com"
cluster_name = "app-cluster"
region = "europe-west3"
main_zone = "europe-west3-b"
cluster_node_zones = ["europe-west3-b"]

@ -0,0 +1,33 @@
variable "project_id" {
type = string
description = "The ID of the project to create resources in"
}
variable "region" {
type = string
description = "The region to use"
}
variable "main_zone" {
type = string
description = "The zone to use as primary"
}
variable "cluster_node_zones" {
type = list(string)
description = "The zones where Kubernetes cluster worker nodes should be located"
}
variable "service_account" {
type = string
description = "The GCP service account"
}
variable "cluster_name" {
type = string # app-cluster
description = "The name of the cluster"
}
variable "credentials_file_path" {
type = string
description = "The credentials JSON file used to authenticate with GCP"
}
Loading…
Cancel
Save