variable "bucket_name" {
type = string
}
variable "cluster_name" {
type = string
}
variable "secret_prefix" {
type = string
}
locals {
issuer_hostpath = "s3.${data.aws_region.current.region}.amazonaws.com/${var.bucket_name}"
openid_configuration = jsonencode({
issuer = "https://${local.issuer_hostpath}"
jwks_uri = "https://${local.issuer_hostpath}/keys.json"
authorization_endpoint = "urn:kubernetes:programmatic_authorization"
response_types_supported = ["id_token"]
subject_types_supported = ["public"]
id_token_signing_alg_values_supported = ["RS256"]
claims_supported = ["sub", "iss"]
})
# the JWKS format and encodings are defined in the RFC
# https://datatracker.ietf.org/doc/html/rfc7517
jwks = jsonencode({
keys = [
{
use = "sig"
alg = "RS256"
kty = "RSA"
kid = data.external.pub_der.result.der
n = data.external.modulus.result.modulus
e = "AQAB"
}]
})
}
data "aws_region" "current" {}
data "aws_caller_identity" "current" {}
# generate a key pair that will be used to sign projected service account tokens
resource "tls_private_key" "key" {
algorithm = "RSA"
rsa_bits = 2048
}
# this needs to go into talos machine configuration under cluster.serviceAccount.key
resource "aws_secretsmanager_secret" "signing_key" {
name = "${var.secret_prefix}/${var.cluster_name}"
}
resource "aws_secretsmanager_secret_version" "signing_key" {
secret_id = aws_secretsmanager_secret.signing_key.id
secret_string = base64encode(tls_private_key.key.private_key_pem)
}
# bucket that we're using as an OIDC discovery endpoint
resource "aws_s3_bucket" "oidc" {
bucket = var.bucket_name
}
# registering the public bucket host as an IdP
resource "aws_iam_openid_connect_provider" "oidc" {
url = "https://${local.issuer_hostpath}"
client_id_list = ["sts.amazonaws.com"]
thumbprint_list = [data.tls_certificate.oidc.certificates[0].sha1_fingerprint]
}
resource "aws_s3_bucket_ownership_controls" "oidc" {
bucket = aws_s3_bucket.oidc.id
rule {
object_ownership = "BucketOwnerPreferred"
}
}
# we _want_ this bucket to have publicly accessible files
resource "aws_s3_bucket_public_access_block" "oidc" {
bucket = aws_s3_bucket.oidc.id
block_public_acls = false
block_public_policy = false
ignore_public_acls = false
restrict_public_buckets = false
}
# the two files we need to serve
resource "aws_s3_object" "keys_json" {
bucket = aws_s3_bucket.oidc.id
key = "keys.json"
content = local.jwks
acl = "public-read"
etag = md5(local.jwks)
depends_on = [
aws_s3_bucket_ownership_controls.oidc,
aws_s3_bucket_public_access_block.oidc,
]
}
resource "aws_s3_object" "openid-configuration" {
bucket = aws_s3_bucket.oidc.id
key = ".well-known/openid-configuration"
content = local.openid_configuration
acl = "public-read"
etag = md5(local.openid_configuration)
depends_on = [
aws_s3_bucket_ownership_controls.oidc,
aws_s3_bucket_public_access_block.oidc,
]
}
data "tls_certificate" "oidc" {
url = "https://${local.issuer_hostpath}"
}
# This is used for the `kid` Key ID field in the JWKS, which is an arbitrary string that can uniquely
# identify a key.
# This logic comes from https://github.com/kubernetes/kubernetes/pull/78502. It creates unique and
# deterministic outputs across platforms.
# See also https://datatracker.ietf.org/doc/html/rfc4648#section-5 for final base64url encoding
data "external" "pub_der" {
program = ["bash", "-c", <<EOF
set -euo pipefail
pem=$(jq -r .pem)
der=$(echo "$pem" | openssl pkey -pubin -inform PEM -outform DER | openssl dgst -sha256 -binary | base64 | tr -d '=' | tr '/+' '_-')
jq -n --arg der "$der" '{"der":$der}'
EOF
]
query = { pem = tls_private_key.key.public_key_pem }
}
data "external" "modulus" {
program = ["bash", "-c", <<EOF
set -euo pipefail
pem=$(jq -r .pem)
modulus=$(echo "$pem" | openssl rsa -inform PEM -modulus -noout | cut -d'=' -f2 | xxd -r -p | base64 | tr -d '=' | tr '/+' '_-')
jq -n --arg modulus "$modulus" '{"modulus":$modulus}'
EOF
]
query = { pem = tls_private_key.key.private_key_pem }
}