summaryrefslogtreecommitdiff
path: root/terraform/modules/llm
diff options
context:
space:
mode:
Diffstat (limited to 'terraform/modules/llm')
-rw-r--r--terraform/modules/llm/main.tf99
-rw-r--r--terraform/modules/llm/outputs.tf12
-rw-r--r--terraform/modules/llm/variables.tf112
3 files changed, 223 insertions, 0 deletions
diff --git a/terraform/modules/llm/main.tf b/terraform/modules/llm/main.tf
new file mode 100644
index 0000000..cd22019
--- /dev/null
+++ b/terraform/modules/llm/main.tf
@@ -0,0 +1,99 @@
1resource "kubernetes_namespace_v1" "this" {
2 metadata {
3 name = var.namespace
4 labels = {
5 "app.kubernetes.io/part-of" = "llm-platform"
6 }
7 }
8}
9
10resource "kubernetes_horizontal_pod_autoscaler_v2" "llm" {
11 count = var.hpa.enabled ? 1 : 0
12
13 metadata {
14 name = "${var.release_name}-llm-app"
15 namespace = kubernetes_namespace_v1.this.metadata[0].name
16 }
17 spec {
18 scale_target_ref {
19 api_version = "apps/v1"
20 kind = "Deployment"
21 name = "${var.release_name}-llm-app"
22 }
23 min_replicas = var.hpa.min_replicas
24 max_replicas = var.hpa.max_replicas
25
26 metric {
27 type = "Pods"
28 pods {
29 metric {
30 name = var.hpa.metric_name
31 }
32 target {
33 type = "AverageValue"
34 average_value = var.hpa.target_average_value
35 }
36 }
37 }
38 }
39
40 depends_on = [helm_release.llm]
41}
42
43resource "helm_release" "llm" {
44 name = var.release_name
45 chart = var.chart_path
46 namespace = kubernetes_namespace_v1.this.metadata[0].name
47 create_namespace = false
48 atomic = false
49 wait = true
50 timeout = 1800
51
52 values = [
53 yamlencode({
54 replicaCount = var.replicas
55
56 image = {
57 repository = var.image_repository
58 tag = var.image_tag
59 digest = var.image_digest
60 pullPolicy = "IfNotPresent"
61 }
62
63 model = {
64 name = var.model_name
65 alias = var.model_alias
66 maxModelLen = var.max_model_len
67 dtype = var.dtype
68 }
69
70 server = {
71 port = 8000
72 ompThreads = var.omp_threads
73 extraArgs = var.extra_args
74 }
75
76 resources = var.resources
77
78 ingress = {
79 enabled = true
80 className = var.ingress_class
81 host = var.ingress_host
82 }
83
84 monitoring = {
85 serviceMonitor = {
86 enabled = true
87 interval = "15s"
88 labels = {
89 release = var.service_monitor_release_label
90 }
91 }
92 }
93
94 modelCache = {
95 sizeLimit = var.model_cache_size
96 }
97 }),
98 ]
99}
diff --git a/terraform/modules/llm/outputs.tf b/terraform/modules/llm/outputs.tf
new file mode 100644
index 0000000..a953e73
--- /dev/null
+++ b/terraform/modules/llm/outputs.tf
@@ -0,0 +1,12 @@
1output "service_dns" {
2 value = "${var.release_name}-llm-app.${var.namespace}.svc.cluster.local"
3 description = "In-cluster DNS name for the LLM Service."
4}
5
6output "ingress_host" {
7 value = var.ingress_host
8}
9
10output "namespace" {
11 value = kubernetes_namespace_v1.this.metadata[0].name
12}
diff --git a/terraform/modules/llm/variables.tf b/terraform/modules/llm/variables.tf
new file mode 100644
index 0000000..3a7d8f7
--- /dev/null
+++ b/terraform/modules/llm/variables.tf
@@ -0,0 +1,112 @@
1variable "release_name" {
2 type = string
3 description = "Helm release name."
4}
5
6variable "namespace" {
7 type = string
8 description = "Kubernetes namespace to deploy into."
9}
10
11variable "chart_path" {
12 type = string
13 description = "Path to the local llm-app chart."
14}
15
16variable "replicas" {
17 type = number
18 default = 1
19}
20
21variable "model_name" {
22 type = string
23 description = "HuggingFace repo id, passed as vLLM model_tag (positional)."
24}
25
26variable "model_alias" {
27 type = string
28 description = "Value clients pass in the OpenAI 'model' field (maps to --served-model-name)."
29}
30
31variable "max_model_len" {
32 type = number
33 default = 2048
34}
35
36variable "dtype" {
37 type = string
38 default = "bfloat16"
39}
40
41variable "omp_threads" {
42 type = number
43 default = 0
44 description = "OMP_NUM_THREADS for vLLM CPU backend. 0 = autodetect."
45}
46
47variable "extra_args" {
48 type = list(string)
49 default = []
50 description = "Extra CLI args passed to `vllm serve`, appended after the stock set."
51}
52
53variable "resources" {
54 type = object({
55 requests = object({ cpu = string, memory = string })
56 limits = object({ cpu = string, memory = string })
57 })
58}
59
60variable "ingress_host" {
61 type = string
62}
63
64variable "ingress_class" {
65 type = string
66 default = "nginx"
67}
68
69variable "image_repository" {
70 type = string
71 default = "public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo"
72}
73
74variable "image_tag" {
75 type = string
76 default = "latest"
77 description = "Used only when image_digest is empty."
78}
79
80variable "image_digest" {
81 type = string
82 default = ""
83 description = "Optional sha256:abc... content-addressable digest. Takes precedence over image_tag."
84}
85
86variable "service_monitor_release_label" {
87 type = string
88 default = "kube-prometheus-stack"
89 description = "Must match the release label the Prometheus Operator selects on."
90}
91
92variable "model_cache_size" {
93 type = string
94 default = "10Gi"
95}
96
97variable "hpa" {
98 type = object({
99 enabled = bool
100 min_replicas = number
101 max_replicas = number
102 metric_name = string
103 target_average_value = string
104 })
105 default = {
106 enabled = false
107 min_replicas = 1
108 max_replicas = 3
109 metric_name = "vllm:num_requests_running"
110 target_average_value = "500m"
111 }
112}