diff --git a/.gitignore b/.gitignore
index 12b624e7..a6e1839d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@ node_modules/
.npmrc
kubernetes-mcp-server
+!charts/kubernetes-mcp-server
!cmd/kubernetes-mcp-server
!pkg/kubernetes-mcp-server
npm/kubernetes-mcp-server/README.md
@@ -27,3 +28,5 @@ python/build/
python/dist/
python/kubernetes_mcp_server.egg-info/
!python/kubernetes-mcp-server
+
+config-dev.toml
diff --git a/README.md b/README.md
index 4758c0d9..97317a30 100644
--- a/README.md
+++ b/README.md
@@ -208,12 +208,12 @@ The following sets of tools are available (toolsets marked with ✓ in the Defau
-| Toolset | Description | Default |
-|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
-| config | View and manage the current local Kubernetes configuration (kubeconfig) | ✓ |
-| core | Most common tools for Kubernetes management (Pods, Generic Resources, Events, etc.) | ✓ |
-| helm | Tools for managing Helm charts and releases | ✓ |
-| kiali | Most common tools for managing Kiali, check the [Kiali integration documentation](https://github.com/containers/kubernetes-mcp-server/blob/main/docs/KIALI_INTEGRATION.md) for more details. | |
+| Toolset | Description | Default |
+|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
+| config | View and manage the current local Kubernetes configuration (kubeconfig) | ✓ |
+| core | Most common tools for Kubernetes management (Pods, Generic Resources, Events, etc.) | ✓ |
+| helm | Tools for managing Helm charts and releases | ✓ |
+| kiali | Most common tools for managing Kiali, check the [Kiali documentation](https://github.com/containers/kubernetes-mcp-server/blob/main/docs/KIALI.md) for more details. | |
@@ -348,22 +348,22 @@ In case multi-cluster support is enabled (default) and you have access to multip
kiali
-- **graph** - Check the status of my mesh by querying Kiali graph
+- **kiali_graph** - Check the status of my mesh by querying Kiali graph
- `namespace` (`string`) - Optional single namespace to include in the graph (alternative to namespaces)
- `namespaces` (`string`) - Optional comma-separated list of namespaces to include in the graph
-- **mesh_status** - Get the status of mesh components including Istio, Kiali, Grafana, Prometheus and their interactions, versions, and health status
+- **kiali_mesh_status** - Get the status of mesh components including Istio, Kiali, Grafana, Prometheus and their interactions, versions, and health status
-- **istio_config** - Get all Istio configuration objects in the mesh including their full YAML resources and details
+- **kiali_istio_config** - Get all Istio configuration objects in the mesh including their full YAML resources and details
-- **istio_object_details** - Get detailed information about a specific Istio object including validation and help information
+- **kiali_istio_object_details** - Get detailed information about a specific Istio object including validation and help information
- `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- `name` (`string`) **(required)** - Name of the Istio object
- `namespace` (`string`) **(required)** - Namespace containing the Istio object
- `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-- **istio_object_patch** - Modify an existing Istio object using PATCH method. The JSON patch data will be applied to the existing object.
+- **kiali_istio_object_patch** - Modify an existing Istio object using PATCH method. The JSON patch data will be applied to the existing object.
- `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- `json_patch` (`string`) **(required)** - JSON patch data to apply to the object
- `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
@@ -371,34 +371,34 @@ In case multi-cluster support is enabled (default) and you have access to multip
- `namespace` (`string`) **(required)** - Namespace containing the Istio object
- `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-- **istio_object_create** - Create a new Istio object using POST method. The JSON data will be used to create the new object.
+- **kiali_istio_object_create** - Create a new Istio object using POST method. The JSON data will be used to create the new object.
- `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- `json_data` (`string`) **(required)** - JSON data for the new object
- `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- `namespace` (`string`) **(required)** - Namespace where the Istio object will be created
- `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-- **istio_object_delete** - Delete an existing Istio object using DELETE method.
+- **kiali_istio_object_delete** - Delete an existing Istio object using DELETE method.
- `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- `name` (`string`) **(required)** - Name of the Istio object
- `namespace` (`string`) **(required)** - Namespace containing the Istio object
- `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-- **validations_list** - List all the validations in the current cluster from all namespaces
+- **kiali_validations_list** - List all the validations in the current cluster from all namespaces
- `namespace` (`string`) - Optional single namespace to retrieve validations from (alternative to namespaces)
- `namespaces` (`string`) - Optional comma-separated list of namespaces to retrieve validations from
-- **namespaces** - Get all namespaces in the mesh that the user has access to
+- **kiali_namespaces** - Get all namespaces in the mesh that the user has access to
-- **services_list** - Get all services in the mesh across specified namespaces with health and Istio resource information
+- **kiali_services_list** - Get all services in the mesh across specified namespaces with health and Istio resource information
- `namespaces` (`string`) - Comma-separated list of namespaces to get services from (e.g. 'bookinfo' or 'bookinfo,default'). If not provided, will list services from all accessible namespaces
-- **service_details** - Get detailed information for a specific service in a namespace, including validation, health status, and configuration
+- **kiali_service_details** - Get detailed information for a specific service in a namespace, including validation, health status, and configuration
- `namespace` (`string`) **(required)** - Namespace containing the service
- `service` (`string`) **(required)** - Name of the service to get details for
-- **service_metrics** - Get metrics for a specific service in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters
+- **kiali_service_metrics** - Get metrics for a specific service in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters
- `byLabels` (`string`) - Comma-separated list of labels to group metrics by (e.g., 'source_workload,destination_service'). Optional
- `direction` (`string`) - Traffic direction: 'inbound' or 'outbound'. Optional, defaults to 'outbound'
- `duration` (`string`) - Duration of the query period in seconds (e.g., '1800' for 30 minutes). Optional, defaults to 1800 seconds
@@ -410,14 +410,14 @@ In case multi-cluster support is enabled (default) and you have access to multip
- `service` (`string`) **(required)** - Name of the service to get metrics for
- `step` (`string`) - Step between data points in seconds (e.g., '15'). Optional, defaults to 15 seconds
-- **workloads_list** - Get all workloads in the mesh across specified namespaces with health and Istio resource information
+- **kiali_workloads_list** - Get all workloads in the mesh across specified namespaces with health and Istio resource information
- `namespaces` (`string`) - Comma-separated list of namespaces to get workloads from (e.g. 'bookinfo' or 'bookinfo,default'). If not provided, will list workloads from all accessible namespaces
-- **workload_details** - Get detailed information for a specific workload in a namespace, including validation, health status, and configuration
+- **kiali_workload_details** - Get detailed information for a specific workload in a namespace, including validation, health status, and configuration
- `namespace` (`string`) **(required)** - Namespace containing the workload
- `workload` (`string`) **(required)** - Name of the workload to get details for
-- **workload_metrics** - Get metrics for a specific workload in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters
+- **kiali_workload_metrics** - Get metrics for a specific workload in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters
- `byLabels` (`string`) - Comma-separated list of labels to group metrics by (e.g., 'source_workload,destination_service'). Optional
- `direction` (`string`) - Traffic direction: 'inbound' or 'outbound'. Optional, defaults to 'outbound'
- `duration` (`string`) - Duration of the query period in seconds (e.g., '1800' for 30 minutes). Optional, defaults to 1800 seconds
@@ -429,7 +429,7 @@ In case multi-cluster support is enabled (default) and you have access to multip
- `step` (`string`) - Step between data points in seconds (e.g., '15'). Optional, defaults to 15 seconds
- `workload` (`string`) **(required)** - Name of the workload to get metrics for
-- **health** - Get health status for apps, workloads, and services across specified namespaces in the mesh. Returns health information including error rates and status for the requested resource type
+- **kiali_health** - Get health status for apps, workloads, and services across specified namespaces in the mesh. Returns health information including error rates and status for the requested resource type
- `namespaces` (`string`) - Comma-separated list of namespaces to get health from (e.g. 'bookinfo' or 'bookinfo,default'). If not provided, returns health for all accessible namespaces
- `queryTime` (`string`) - Unix timestamp (in seconds) for the prometheus query. If not provided, uses current time. Optional
- `rateInterval` (`string`) - Rate interval for fetching error rate (e.g., '10m', '5m', '1h'). Default: '10m'
@@ -442,7 +442,7 @@ In case multi-cluster support is enabled (default) and you have access to multip
- `tail` (`integer`) - Number of lines to retrieve from the end of logs (default: 100)
- `workload` (`string`) **(required)** - Name of the workload to get logs for
-- **app_traces** - Get distributed tracing data for a specific app in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
+- **kiali_app_traces** - Get distributed tracing data for a specific app in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
- `app` (`string`) **(required)** - Name of the app to get traces for
- `clusterName` (`string`) - Cluster name for multi-cluster environments (optional)
- `endMicros` (`string`) - End time for traces in microseconds since epoch (optional)
@@ -452,7 +452,7 @@ In case multi-cluster support is enabled (default) and you have access to multip
- `startMicros` (`string`) - Start time for traces in microseconds since epoch (optional)
- `tags` (`string`) - JSON string of tags to filter traces (optional)
-- **service_traces** - Get distributed tracing data for a specific service in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
+- **kiali_service_traces** - Get distributed tracing data for a specific service in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
- `clusterName` (`string`) - Cluster name for multi-cluster environments (optional)
- `endMicros` (`string`) - End time for traces in microseconds since epoch (optional)
- `limit` (`integer`) - Maximum number of traces to return (default: 100)
@@ -462,7 +462,7 @@ In case multi-cluster support is enabled (default) and you have access to multip
- `startMicros` (`string`) - Start time for traces in microseconds since epoch (optional)
- `tags` (`string`) - JSON string of tags to filter traces (optional)
-- **workload_traces** - Get distributed tracing data for a specific workload in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
+- **kiali_workload_traces** - Get distributed tracing data for a specific workload in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
- `clusterName` (`string`) - Cluster name for multi-cluster environments (optional)
- `endMicros` (`string`) - End time for traces in microseconds since epoch (optional)
- `limit` (`integer`) - Maximum number of traces to return (default: 100)
@@ -477,6 +477,10 @@ In case multi-cluster support is enabled (default) and you have access to multip
+## Helm Chart
+
+A [Helm Chart](https://helm.sh) is available to simplify the deployment of the Kubernetes MCP server. Additional details can be found in the [chart README](./charts/kubernetes-mcp-server/README.md).
+
## 🧑💻 Development
### Running with mcp-inspector
diff --git a/charts/kubernetes-mcp-server/.helmignore b/charts/kubernetes-mcp-server/.helmignore
new file mode 100644
index 00000000..0e8a0eb3
--- /dev/null
+++ b/charts/kubernetes-mcp-server/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/kubernetes-mcp-server/Chart.yaml b/charts/kubernetes-mcp-server/Chart.yaml
new file mode 100644
index 00000000..a66650bb
--- /dev/null
+++ b/charts/kubernetes-mcp-server/Chart.yaml
@@ -0,0 +1,13 @@
+apiVersion: v2
+name: kubernetes-mcp-server
+description: Helm Chart for the Kubernetes MCP Server
+home: https://github.com/containers/kubernetes-mcp-server
+keywords:
+ - kubernetes
+ - mcp
+maintainers:
+ - name: Andrew Block
+ email: ablock@redhat.com
+ - name: Marc Nuri
+ email: marc.nuri@redhat.com
+version: 0.1.0
diff --git a/charts/kubernetes-mcp-server/README.md b/charts/kubernetes-mcp-server/README.md
new file mode 100644
index 00000000..49b5759d
--- /dev/null
+++ b/charts/kubernetes-mcp-server/README.md
@@ -0,0 +1,76 @@
+# kubernetes-mcp-server
+
+
+
+Helm Chart for the Kubernetes MCP Server
+
+**Homepage:**
+
+## Maintainers
+
+| Name | Email | Url |
+| ---- | ------ | --- |
+| Andrew Block | | |
+| Marc Nuri | | |
+
+## Installing the Chart
+
+The Chart can be installed quickly and easily to a Kubernetes cluster. Since an _Ingress_ is added as part of the default install of the Chart, the `ingress.host` Value must be specified.
+
+Install the Chart using the following command from the root of this directory:
+
+```shell
+helm upgrade -i -n kubernetes-mcp-server --create-namespace kubernetes-mcp-server . --set openshift=true --set ingress.host=
+```
+
+### Optimized OpenShift Deployment
+
+Functionality has bee added to the Chart to simplify the deployment to OpenShift Cluster.
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| affinity | object | `{}` | |
+| config.port | string | `"{{ .Values.service.port }}"` | |
+| configFilePath | string | `"/etc/kubernetes-mcp-server/config.toml"` | |
+| defaultPodSecurityContext | object | `{"seccompProfile":{"type":"RuntimeDefault"}}` | Default Security Context for the Pod when one is not provided |
+| defaultSecurityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"runAsNonRoot":true}` | Default Security Context for the Container when one is not provided |
+| extraVolumeMounts | list | `[]` | Additional volumeMounts on the output Deployment definition. |
+| extraVolumes | list | `[]` | Additional volumes on the output Deployment definition. |
+| fullnameOverride | string | `""` | |
+| image | object | `{"pullPolicy":"IfNotPresent","registry":"quay.io","repository":"containers/kubernetes_mcp_server","version":"latest"}` | This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/ |
+| image.pullPolicy | string | `"IfNotPresent"` | This sets the pull policy for images. |
+| image.version | string | `"latest"` | This sets the tag or sha digest for the image. |
+| imagePullSecrets | list | `[]` | This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ |
+| ingress | object | `{"annotations":{},"className":"","enabled":true,"host":"","hosts":null,"path":"/","pathType":"ImplementationSpecific","termination":"edge","tls":null}` | This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/ |
+| livenessProbe | object | `{"httpGet":{"path":"/healthz","port":"http"}}` | Liveness and readiness probes for the container. |
+| nameOverride | string | `""` | |
+| nodeSelector | object | `{}` | |
+| openshift | bool | `false` | Enable OpenShift specific features |
+| podAnnotations | object | `{}` | For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ |
+| podLabels | object | `{}` | For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ |
+| podSecurityContext | object | `{}` | Define the Security Context for the Pod |
+| readinessProbe.httpGet.path | string | `"/healthz"` | |
+| readinessProbe.httpGet.port | string | `"http"` | |
+| replicaCount | int | `1` | This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/ |
+| resources | object | `{"limits":{"cpu":"100m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"128Mi"}}` | Resource requests and limits for the container. |
+| securityContext | object | `{}` | Define the Security Context for the Container |
+| service | object | `{"port":8080,"type":"ClusterIP"}` | This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/ |
+| service.port | int | `8080` | This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports |
+| service.type | string | `"ClusterIP"` | This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types |
+| serviceAccount | object | `{"annotations":{},"create":true,"name":""}` | This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/ |
+| serviceAccount.annotations | object | `{}` | Annotations to add to the service account |
+| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
+| serviceAccount.name | string | `""` | If not set and create is true, a name is generated using the fullname template |
+| tolerations | list | `[]` | |
+
+## Updating the README
+
+The contents of the README.md file is generated using [helm-docs](https://github.com/norwoodj/helm-docs). Whenever changes are introduced to the Chart and its _Values_, the documentation should be regenerated.
+
+Execute the following command to regenerate the documentation from within the Helm Chart directory.
+
+```shell
+helm-docs -t README.md.gotpl
+```
diff --git a/charts/kubernetes-mcp-server/README.md.gotmpl b/charts/kubernetes-mcp-server/README.md.gotmpl
new file mode 100644
index 00000000..b1b30d68
--- /dev/null
+++ b/charts/kubernetes-mcp-server/README.md.gotmpl
@@ -0,0 +1,40 @@
+{{ template "chart.header" . }}
+{{ template "chart.deprecationWarning" . }}
+
+{{ template "chart.badgesSection" . }}
+
+{{ template "chart.description" . }}
+
+{{ template "chart.homepageLine" . }}
+
+{{ template "chart.maintainersSection" . }}
+
+{{ template "chart.sourcesSection" . }}
+
+{{ template "chart.requirementsSection" . }}
+
+## Installing the Chart
+
+The Chart can be installed quickly and easily to a Kubernetes cluster. Since an _Ingress_ is added as part of the default install of the Chart, the `ingress.host` Value must be specified.
+
+Install the Chart using the following command from the root of this directory:
+
+```shell
+helm upgrade -i -n kubernetes-mcp-server --create-namespace kubernetes-mcp-server . --set openshift=true --set ingress.host=
+```
+
+### Optimized OpenShift Deployment
+
+Functionality has been added to the Chart to simplify the deployment to OpenShift Cluster.
+
+{{ template "chart.valuesSection" . }}
+
+## Updating the README
+
+The contents of the README.md file is generated using [helm-docs](https://github.com/norwoodj/helm-docs). Whenever changes are introduced to the Chart and its _Values_, the documentation should be regenerated.
+
+Execute the following command to regenerate the documentation from within the Helm Chart directory.
+
+```shell
+helm-docs -t README.md.gotpl
+```
diff --git a/charts/kubernetes-mcp-server/templates/_helpers.tpl b/charts/kubernetes-mcp-server/templates/_helpers.tpl
new file mode 100644
index 00000000..991c9331
--- /dev/null
+++ b/charts/kubernetes-mcp-server/templates/_helpers.tpl
@@ -0,0 +1,73 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "kubernetes-mcp-server.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "kubernetes-mcp-server.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "kubernetes-mcp-server.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "kubernetes-mcp-server.labels" -}}
+helm.sh/chart: {{ include "kubernetes-mcp-server.chart" . }}
+{{ include "kubernetes-mcp-server.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "kubernetes-mcp-server.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "kubernetes-mcp-server.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "kubernetes-mcp-server.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "kubernetes-mcp-server.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create the image path for the passed in image field
+*/}}
+{{- define "kubernetes-mcp-server.image" -}}
+{{- if eq (substr 0 7 .version) "sha256:" -}}
+{{- printf "%s/%s@%s" .registry .repository .version -}}
+{{- else -}}
+{{- printf "%s/%s:%s" .registry .repository .version -}}
+{{- end -}}
+{{- end -}}
diff --git a/charts/kubernetes-mcp-server/templates/configmap.yaml b/charts/kubernetes-mcp-server/templates/configmap.yaml
new file mode 100644
index 00000000..9d066b35
--- /dev/null
+++ b/charts/kubernetes-mcp-server/templates/configmap.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "kubernetes-mcp-server.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kubernetes-mcp-server.labels" . | nindent 4 }}
+data:
+ config.toml: |
+ {{- tpl (toToml .Values.config) . | replace ".0" "" | nindent 4 }}
diff --git a/charts/kubernetes-mcp-server/templates/deployment.yaml b/charts/kubernetes-mcp-server/templates/deployment.yaml
new file mode 100644
index 00000000..372c7ba7
--- /dev/null
+++ b/charts/kubernetes-mcp-server/templates/deployment.yaml
@@ -0,0 +1,82 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "kubernetes-mcp-server.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kubernetes-mcp-server.labels" . | nindent 4 }}
+spec:
+ replicas: {{ .Values.replicaCount }}
+ selector:
+ matchLabels:
+ {{- include "kubernetes-mcp-server.selectorLabels" . | nindent 6 }}
+ template:
+ metadata:
+ annotations:
+ checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ {{- with .Values.podAnnotations }}
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
+ labels:
+ {{- include "kubernetes-mcp-server.labels" . | nindent 8 }}
+ {{- with .Values.podLabels }}
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
+ spec:
+ {{- with .Values.imagePullSecrets }}
+ imagePullSecrets:
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
+ serviceAccountName: {{ include "kubernetes-mcp-server.serviceAccountName" . }}
+ securityContext:
+ {{- tpl (toYaml (default .Values.defaultPodSecurityContext .Values.podSecurityContext)) . | nindent 8 }}
+ containers:
+ - name: {{ .Chart.Name }}
+ securityContext:
+ {{- tpl (toYaml (default .Values.defaultSecurityContext .Values.securityContext)) . | nindent 12 }}
+ image: "{{ template "kubernetes-mcp-server.image" .Values.image }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ ports:
+ - name: http
+ containerPort: {{ .Values.service.port }}
+ protocol: TCP
+ args:
+ - "--config"
+ - "{{ .Values.configFilePath }}"
+ {{- with .Values.livenessProbe }}
+ livenessProbe:
+ {{- tpl (toYaml .) . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.readinessProbe }}
+ readinessProbe:
+ {{- tpl (toYaml .) . | nindent 12 }}
+ {{- end }}
+ {{- with .Values.resources }}
+ resources:
+ {{- tpl (toYaml .) . | nindent 12 }}
+ {{- end }}
+ volumeMounts:
+ - name: config
+ mountPath: {{ .Values.configFilePath | dir }}
+ {{- with .Values.extraVolumeMounts }}
+ {{- tpl (toYaml .) . | nindent 12 }}
+ {{- end }}
+ volumes:
+ - name: config
+ configMap:
+ name: {{ include "kubernetes-mcp-server.fullname" . }}
+ {{- with .Values.extraVolumes }}
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.nodeSelector }}
+ nodeSelector:
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.affinity }}
+ affinity:
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
+ {{- with .Values.tolerations }}
+ tolerations:
+ {{- tpl (toYaml .) . | nindent 8 }}
+ {{- end }}
diff --git a/charts/kubernetes-mcp-server/templates/ingress.yaml b/charts/kubernetes-mcp-server/templates/ingress.yaml
new file mode 100644
index 00000000..e6179fcb
--- /dev/null
+++ b/charts/kubernetes-mcp-server/templates/ingress.yaml
@@ -0,0 +1,38 @@
+{{- if .Values.ingress.enabled -}}
+{{- $host := required "Ingress hostname must be specified" (tpl .Values.ingress.host .) }}
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: {{ include "kubernetes-mcp-server.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kubernetes-mcp-server.labels" . | nindent 4 }}
+ annotations:
+ {{- if eq .Values.openshift true }}
+ route.openshift.io/termination: {{ .Values.ingress.termination }}
+ {{- end }}
+ {{- with .Values.ingress.annotations }}
+ {{- toYaml . | nindent 4 }}
+ {{- end }}
+spec:
+ {{- with .Values.ingress.className }}
+ ingressClassName: {{ . }}
+ {{- end }}
+ {{- if .Values.ingress.tls }}
+ tls:
+ - hosts:
+ - "{{ $host }}"
+ secretName: {{ .Values.ingress.tls.secretName }}
+ {{- end }}
+ rules:
+ - host: "{{ $host }}"
+ http:
+ paths:
+ - path: {{ .Values.ingress.path }}
+ pathType: {{ .Values.ingress.pathType }}
+ backend:
+ service:
+ name: {{ include "kubernetes-mcp-server.fullname" $ }}
+ port:
+ number: {{ $.Values.service.port }}
+{{- end }}
diff --git a/charts/kubernetes-mcp-server/templates/service.yaml b/charts/kubernetes-mcp-server/templates/service.yaml
new file mode 100644
index 00000000..20bccf21
--- /dev/null
+++ b/charts/kubernetes-mcp-server/templates/service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "kubernetes-mcp-server.fullname" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kubernetes-mcp-server.labels" . | nindent 4 }}
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: http
+ protocol: TCP
+ name: http
+ selector:
+ {{- include "kubernetes-mcp-server.selectorLabels" . | nindent 4 }}
diff --git a/charts/kubernetes-mcp-server/templates/serviceaccount.yaml b/charts/kubernetes-mcp-server/templates/serviceaccount.yaml
new file mode 100644
index 00000000..b75f47bf
--- /dev/null
+++ b/charts/kubernetes-mcp-server/templates/serviceaccount.yaml
@@ -0,0 +1,13 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{ include "kubernetes-mcp-server.serviceAccountName" . }}
+ namespace: {{ .Release.Namespace }}
+ labels:
+ {{- include "kubernetes-mcp-server.labels" . | nindent 4 }}
+ {{- with .Values.serviceAccount.annotations }}
+ annotations:
+ {{- tpl (toYaml .) . | nindent 4 }}
+ {{- end }}
+{{- end }}
diff --git a/charts/kubernetes-mcp-server/values.yaml b/charts/kubernetes-mcp-server/values.yaml
new file mode 100644
index 00000000..4e4d3299
--- /dev/null
+++ b/charts/kubernetes-mcp-server/values.yaml
@@ -0,0 +1,121 @@
+# -- Enable OpenShift specific features
+openshift: false
+
+# -- This will set the replicaset count more information can be found here: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
+replicaCount: 1
+
+# -- This sets the container image more information can be found here: https://kubernetes.io/docs/concepts/containers/images/
+image:
+ registry: quay.io
+ repository: containers/kubernetes_mcp_server
+ # -- This sets the tag or sha digest for the image.
+ version: latest
+ # -- This sets the pull policy for images.
+ pullPolicy: IfNotPresent
+
+# -- This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
+imagePullSecrets: []
+# This is to override the chart name.
+nameOverride: ""
+fullnameOverride: ""
+
+# -- This section builds out the service account more information can be found here: https://kubernetes.io/docs/concepts/security/service-accounts/
+serviceAccount:
+ # -- Specifies whether a service account should be created
+ create: true
+ # -- Annotations to add to the service account
+ annotations: {}
+ # -- The name of the service account to use.
+ # -- If not set and create is true, a name is generated using the fullname template
+ name: ""
+
+# -- This is for setting Kubernetes Annotations to a Pod.
+# -- For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
+podAnnotations: {}
+# -- This is for setting Kubernetes Labels to a Pod.
+# -- For more information checkout: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
+podLabels: {}
+
+# -- Default Security Context for the Pod when one is not provided
+defaultPodSecurityContext:
+ seccompProfile:
+ type: RuntimeDefault
+
+# -- Define the Security Context for the Pod
+podSecurityContext: {}
+
+# -- Default Security Context for the Container when one is not provided
+defaultSecurityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ runAsNonRoot: true
+
+# -- Define the Security Context for the Container
+securityContext: {}
+
+# -- This is for setting up a service more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/
+service:
+ # -- This sets the service type more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
+ type: ClusterIP
+ # -- This sets the ports more information can be found here: https://kubernetes.io/docs/concepts/services-networking/service/#field-spec-ports
+ port: 8080
+
+# -- This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
+ingress:
+ enabled: true
+ className: ""
+ annotations: {}
+ host: ""
+ path: /
+ pathType: ImplementationSpecific
+ termination: edge
+ hosts:
+ tls:
+ #secretName: ""
+
+# -- Resource requests and limits for the container.
+resources:
+ limits:
+ cpu: 100m
+ memory: 128Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
+
+# -- Liveness and readiness probes for the container.
+livenessProbe:
+ httpGet:
+ path: /healthz
+ port: http
+readinessProbe:
+ httpGet:
+ path: /healthz
+ port: http
+
+# -- Additional volumes on the output Deployment definition.
+extraVolumes: []
+# - name: foo
+# secret:
+# secretName: mysecret
+# optional: false
+
+# -- Additional volumeMounts on the output Deployment definition.
+extraVolumeMounts: []
+# - name: foo
+# mountPath: "/etc/foo"
+# readOnly: true
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
+
+# Path to the configuration file inside the container
+configFilePath: /etc/kubernetes-mcp-server/config.toml
+
+# MCP Server configuration options. See https://github.com/containers/kubernetes-mcp-server/blob/main/pkg/config/config.go for details.
+config:
+ port: "{{ .Values.service.port }}"
diff --git a/docs/AUTH_HEADERS_PROVIDER.md b/docs/AUTH_HEADERS_PROVIDER.md
new file mode 100644
index 00000000..c8dd3b9b
--- /dev/null
+++ b/docs/AUTH_HEADERS_PROVIDER.md
@@ -0,0 +1,177 @@
+# Auth-Headers Provider
+
+The `auth-headers` cluster provider strategy enables multi-tenant Kubernetes MCP server deployments where each request provides complete cluster connection details and authentication via HTTP headers or MCP tool parameters.
+
+## Overview
+
+This provider:
+- **Requires cluster connection details per request** via custom headers (server URL, CA certificate)
+- **Requires authentication per request** via bearer token OR client certificates
+- **Does not use kubeconfig** - all configuration comes from request headers
+- **Creates dynamic Kubernetes clients** per request using the provided credentials
+
+## Use Cases
+
+- **Multi-tenant SaaS deployments** - Single MCP server instance serving multiple users/clusters
+- **Zero-trust architectures** - No stored credentials, complete authentication per request
+- **Dynamic cluster access** - Connect to different clusters without server configuration
+- **Auditing & compliance** - Each request uses the user's actual identity for Kubernetes RBAC
+- **Temporary access** - Short-lived credentials without persistent configuration
+
+## Configuration
+
+### Basic Setup
+
+```bash
+kubernetes-mcp-server \
+ --port 8080 \
+ --cluster-provider-strategy auth-headers
+```
+
+The server will:
+1. Accept requests with cluster connection details in headers
+2. Create a Kubernetes client dynamically for each request
+3. Reject any requests without required authentication headers
+
+### TOML Configuration
+
+```toml
+cluster_provider_strategy = "auth-headers"
+# No kubeconfig needed - all details come from request headers
+```
+
+### Required Headers
+
+Each request must include the following custom headers:
+
+**Required for all requests:**
+- `kubernetes-server` - Kubernetes API server URL (e.g., `https://kubernetes.example.com:6443`)
+- `kubernetes-certificate-authority-data` - Base64-encoded CA certificate
+
+**Authentication (choose one):**
+
+Option 1: Bearer Token
+- `kubernetes-authorization` - Bearer token (e.g., `Bearer eyJhbGci...`)
+
+Option 2: Client Certificate
+- `kubernetes-client-certificate-data` - Base64-encoded client certificate
+- `kubernetes-client-key-data` - Base64-encoded client key
+
+**Optional:**
+- `kubernetes-insecure-skip-tls-verify` - Set to `true` to skip TLS verification (not recommended for production)
+
+## How It Works
+
+### 1. Initialization
+
+When the server starts:
+```
+Server starts with auth-headers provider
+ ↓
+No kubeconfig or credentials loaded
+ ↓
+Ready to accept requests with headers
+```
+
+### 2. Request Processing
+
+For each MCP request:
+```
+HTTP Request with custom headers
+ ↓
+Extract kubernetes-server, kubernetes-certificate-authority-data
+ ↓
+Extract authentication (token OR client cert/key)
+ ↓
+Create K8sAuthHeaders struct
+ ↓
+Build rest.Config dynamically
+ ↓
+Create new Kubernetes client
+ ↓
+Execute Kubernetes operation
+ ↓
+Discard client after request
+```
+
+### 3. Header Extraction
+
+Headers can be provided in two ways:
+
+**A. HTTP Request Headers** (standard way):
+```
+POST /mcp HTTP/1.1
+kubernetes-server: https://k8s.example.com:6443
+kubernetes-certificate-authority-data: LS0tLS1CRUdJ...
+kubernetes-authorization: Bearer eyJhbGci...
+```
+
+**B. MCP Tool Parameters Meta** (advanced):
+```json
+{
+ "jsonrpc": "2.0",
+ "method": "tools/call",
+ "params": {
+ "name": "pods_list",
+ "arguments": {"namespace": "default"},
+ "_meta": {
+ "kubernetes-server": "https://k8s.example.com:6443",
+ "kubernetes-certificate-authority-data": "LS0tLS1CRUdJ...",
+ "kubernetes-authorization": "Bearer eyJhbGci..."
+ }
+ }
+}
+```
+
+### 4. Security Model
+
+```
+┌──────────────────┐
+│ MCP Client │
+│ (Claude, etc) │
+└────────┬─────────┘
+ │ All cluster info + auth in headers
+ ↓
+┌──────────────────┐
+│ MCP Server │
+│ (auth-headers) │
+│ NO CREDENTIALS │
+│ STORED │
+└────────┬─────────┘
+ │ Creates temporary client
+ ↓
+┌──────────────────┐
+│ Kubernetes API │
+│ Server │
+└──────────────────┘
+ ↓
+ RBAC enforced with
+ credentials from headers
+```
+
+## Client Usage
+
+### Using the Go MCP Client
+
+```go
+import (
+ "encoding/base64"
+ "github.com/mark3labs/mcp-go/client/transport"
+)
+
+// Get cluster connection details
+serverURL := "https://k8s.example.com:6443"
+caCert := getCAcertificate() // PEM-encoded CA certificate
+token := getUserKubernetesToken()
+
+// Encode CA certificate to base64
+caCertBase64 := base64.StdEncoding.EncodeToString(caCert)
+
+client := NewMCPClient(
+ transport.WithHTTPHeaders(map[string]string{
+ "kubernetes-server": serverURL,
+ "kubernetes-certificate-authority-data": caCertBase64,
+ "kubernetes-authorization": "Bearer " + token,
+ })
+)
+```
diff --git a/docs/KIALI.md b/docs/KIALI.md
new file mode 100644
index 00000000..5b8f4b9f
--- /dev/null
+++ b/docs/KIALI.md
@@ -0,0 +1,37 @@
+## Kiali integration
+
+This server can expose Kiali tools so assistants can query mesh information (e.g., mesh status/graph).
+
+### Enable the Kiali toolset
+
+Enable the Kiali tools via the server TOML configuration file.
+
+Config (TOML):
+
+```toml
+toolsets = ["core", "kiali"]
+
+[toolset_configs.kiali]
+url = "https://kiali.example" # Endpoint/route to reach Kiali console
+# insecure = true # optional: allow insecure TLS (not recommended in production)
+# certificate_authority = """-----BEGIN CERTIFICATE-----
+# MIID...
+# -----END CERTIFICATE-----"""
+# When url is https and insecure is false, certificate_authority is required.
+```
+
+When the `kiali` toolset is enabled, a Kiali toolset configuration is required via `[toolset_configs.kiali]`. If missing or invalid, the server will refuse to start.
+
+### How authentication works
+
+- The server uses your existing Kubernetes credentials (from kubeconfig or in-cluster) to set a bearer token for Kiali calls.
+- If you pass an HTTP Authorization header to the MCP HTTP endpoint, that is not required for Kiali; Kiali calls use the server's configured token.
+
+### Troubleshooting
+
+- Missing Kiali configuration when `kiali` toolset is enabled → set `[toolset_configs.kiali].url` in the config TOML.
+- Invalid URL → ensure `[toolset_configs.kiali].url` is a valid `http(s)://host` URL.
+- TLS certificate validation:
+ - If `[toolset_configs.kiali].url` uses HTTPS and `[toolset_configs.kiali].insecure` is false, you must set `[toolset_configs.kiali].certificate_authority` with the PEM-encoded certificate(s) used by the Kiali server. This field expects inline PEM content, not a file path. You may concatenate multiple PEM blocks to include an intermediate chain.
+ - For non-production environments you can set `[toolset_configs.kiali].insecure = true` to skip certificate verification.
+
diff --git a/docs/KIALI_INTEGRATION.md b/docs/KIALI_INTEGRATION.md
deleted file mode 100644
index 00952744..00000000
--- a/docs/KIALI_INTEGRATION.md
+++ /dev/null
@@ -1,170 +0,0 @@
-## Kiali integration
-
-This server can expose Kiali tools so assistants can query mesh information (e.g., mesh status/graph).
-
-### Enable the Kiali toolset
-
-Enable the Kiali tools via the server TOML configuration file.
-
-Config (TOML):
-
-```toml
-toolsets = ["core", "kiali"]
-
-[toolset_configs.kiali]
-url = "https://kiali.example"
-# insecure = true # optional: allow insecure TLS (not recommended in production)
-# certificate_authority = """-----BEGIN CERTIFICATE-----
-# MIID...
-# -----END CERTIFICATE-----"""
-# When url is https and insecure is false, certificate_authority is required.
-```
-
-When the `kiali` toolset is enabled, a Kiali toolset configuration is required via `[toolset_configs.kiali]`. If missing or invalid, the server will refuse to start.
-
-### How authentication works
-
-- The server uses your existing Kubernetes credentials (from kubeconfig or in-cluster) to set a bearer token for Kiali calls.
-- If you pass an HTTP Authorization header to the MCP HTTP endpoint, that is not required for Kiali; Kiali calls use the server's configured token.
-
-### Available tools (initial)
-
-
-
-kiali
-
-- **graph** - Check the status of my mesh by querying Kiali graph
- - `namespace` (`string`) - Optional single namespace to include in the graph (alternative to namespaces)
- - `namespaces` (`string`) - Optional comma-separated list of namespaces to include in the graph
-
-- **mesh_status** - Get the status of mesh components including Istio, Kiali, Grafana, Prometheus and their interactions, versions, and health status
-
-- **istio_config** - Get all Istio configuration objects in the mesh including their full YAML resources and details
-
-- **istio_object_details** - Get detailed information about a specific Istio object including validation and help information
- - `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- - `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- - `name` (`string`) **(required)** - Name of the Istio object
- - `namespace` (`string`) **(required)** - Namespace containing the Istio object
- - `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-
-- **istio_object_patch** - Modify an existing Istio object using PATCH method. The JSON patch data will be applied to the existing object.
- - `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- - `json_patch` (`string`) **(required)** - JSON patch data to apply to the object
- - `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- - `name` (`string`) **(required)** - Name of the Istio object
- - `namespace` (`string`) **(required)** - Namespace containing the Istio object
- - `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-
-- **istio_object_create** - Create a new Istio object using POST method. The JSON data will be used to create the new object.
- - `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- - `json_data` (`string`) **(required)** - JSON data for the new object
- - `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- - `namespace` (`string`) **(required)** - Namespace where the Istio object will be created
- - `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-
-- **istio_object_delete** - Delete an existing Istio object using DELETE method.
- - `group` (`string`) **(required)** - API group of the Istio object (e.g., 'networking.istio.io', 'gateway.networking.k8s.io')
- - `kind` (`string`) **(required)** - Kind of the Istio object (e.g., 'DestinationRule', 'VirtualService', 'HTTPRoute', 'Gateway')
- - `name` (`string`) **(required)** - Name of the Istio object
- - `namespace` (`string`) **(required)** - Namespace containing the Istio object
- - `version` (`string`) **(required)** - API version of the Istio object (e.g., 'v1', 'v1beta1')
-
-- **validations_list** - List all the validations in the current cluster from all namespaces
- - `namespace` (`string`) - Optional single namespace to retrieve validations from (alternative to namespaces)
- - `namespaces` (`string`) - Optional comma-separated list of namespaces to retrieve validations from
-
-- **namespaces** - Get all namespaces in the mesh that the user has access to
-
-- **services_list** - Get all services in the mesh across specified namespaces with health and Istio resource information
- - `namespaces` (`string`) - Comma-separated list of namespaces to get services from (e.g. 'bookinfo' or 'bookinfo,default'). If not provided, will list services from all accessible namespaces
-
-- **service_details** - Get detailed information for a specific service in a namespace, including validation, health status, and configuration
- - `namespace` (`string`) **(required)** - Namespace containing the service
- - `service` (`string`) **(required)** - Name of the service to get details for
-
-- **service_metrics** - Get metrics for a specific service in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters
- - `byLabels` (`string`) - Comma-separated list of labels to group metrics by (e.g., 'source_workload,destination_service'). Optional
- - `direction` (`string`) - Traffic direction: 'inbound' or 'outbound'. Optional, defaults to 'outbound'
- - `duration` (`string`) - Duration of the query period in seconds (e.g., '1800' for 30 minutes). Optional, defaults to 1800 seconds
- - `namespace` (`string`) **(required)** - Namespace containing the service
- - `quantiles` (`string`) - Comma-separated list of quantiles for histogram metrics (e.g., '0.5,0.95,0.99'). Optional
- - `rateInterval` (`string`) - Rate interval for metrics (e.g., '1m', '5m'). Optional, defaults to '1m'
- - `reporter` (`string`) - Metrics reporter: 'source', 'destination', or 'both'. Optional, defaults to 'source'
- - `requestProtocol` (`string`) - Filter by request protocol (e.g., 'http', 'grpc', 'tcp'). Optional
- - `service` (`string`) **(required)** - Name of the service to get metrics for
- - `step` (`string`) - Step between data points in seconds (e.g., '15'). Optional, defaults to 15 seconds
-
-- **workloads_list** - Get all workloads in the mesh across specified namespaces with health and Istio resource information
- - `namespaces` (`string`) - Comma-separated list of namespaces to get workloads from (e.g. 'bookinfo' or 'bookinfo,default'). If not provided, will list workloads from all accessible namespaces
-
-- **workload_details** - Get detailed information for a specific workload in a namespace, including validation, health status, and configuration
- - `namespace` (`string`) **(required)** - Namespace containing the workload
- - `workload` (`string`) **(required)** - Name of the workload to get details for
-
-- **workload_metrics** - Get metrics for a specific workload in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters
- - `byLabels` (`string`) - Comma-separated list of labels to group metrics by (e.g., 'source_workload,destination_service'). Optional
- - `direction` (`string`) - Traffic direction: 'inbound' or 'outbound'. Optional, defaults to 'outbound'
- - `duration` (`string`) - Duration of the query period in seconds (e.g., '1800' for 30 minutes). Optional, defaults to 1800 seconds
- - `namespace` (`string`) **(required)** - Namespace containing the workload
- - `quantiles` (`string`) - Comma-separated list of quantiles for histogram metrics (e.g., '0.5,0.95,0.99'). Optional
- - `rateInterval` (`string`) - Rate interval for metrics (e.g., '1m', '5m'). Optional, defaults to '1m'
- - `reporter` (`string`) - Metrics reporter: 'source', 'destination', or 'both'. Optional, defaults to 'source'
- - `requestProtocol` (`string`) - Filter by request protocol (e.g., 'http', 'grpc', 'tcp'). Optional
- - `step` (`string`) - Step between data points in seconds (e.g., '15'). Optional, defaults to 15 seconds
- - `workload` (`string`) **(required)** - Name of the workload to get metrics for
-
-- **health** - Get health status for apps, workloads, and services across specified namespaces in the mesh. Returns health information including error rates and status for the requested resource type
- - `namespaces` (`string`) - Comma-separated list of namespaces to get health from (e.g. 'bookinfo' or 'bookinfo,default'). If not provided, returns health for all accessible namespaces
- - `queryTime` (`string`) - Unix timestamp (in seconds) for the prometheus query. If not provided, uses current time. Optional
- - `rateInterval` (`string`) - Rate interval for fetching error rate (e.g., '10m', '5m', '1h'). Default: '10m'
- - `type` (`string`) - Type of health to retrieve: 'app', 'service', or 'workload'. Default: 'app'
-
-- **workload_logs** - Get logs for a specific workload's pods in a namespace. Only requires namespace and workload name - automatically discovers pods and containers. Optionally filter by container name, time range, and other parameters. Container is auto-detected if not specified.
- - `container` (`string`) - Optional container name to filter logs. If not provided, automatically detects and uses the main application container (excludes istio-proxy and istio-init)
- - `namespace` (`string`) **(required)** - Namespace containing the workload
- - `since` (`string`) - Time duration to fetch logs from (e.g., '5m', '1h', '30s'). If not provided, returns recent logs
- - `tail` (`integer`) - Number of lines to retrieve from the end of logs (default: 100)
- - `workload` (`string`) **(required)** - Name of the workload to get logs for
-
-- **app_traces** - Get distributed tracing data for a specific app in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
- - `app` (`string`) **(required)** - Name of the app to get traces for
- - `clusterName` (`string`) - Cluster name for multi-cluster environments (optional)
- - `endMicros` (`string`) - End time for traces in microseconds since epoch (optional)
- - `limit` (`integer`) - Maximum number of traces to return (default: 100)
- - `minDuration` (`integer`) - Minimum trace duration in microseconds (optional)
- - `namespace` (`string`) **(required)** - Namespace containing the app
- - `startMicros` (`string`) - Start time for traces in microseconds since epoch (optional)
- - `tags` (`string`) - JSON string of tags to filter traces (optional)
-
-- **service_traces** - Get distributed tracing data for a specific service in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
- - `clusterName` (`string`) - Cluster name for multi-cluster environments (optional)
- - `endMicros` (`string`) - End time for traces in microseconds since epoch (optional)
- - `limit` (`integer`) - Maximum number of traces to return (default: 100)
- - `minDuration` (`integer`) - Minimum trace duration in microseconds (optional)
- - `namespace` (`string`) **(required)** - Namespace containing the service
- - `service` (`string`) **(required)** - Name of the service to get traces for
- - `startMicros` (`string`) - Start time for traces in microseconds since epoch (optional)
- - `tags` (`string`) - JSON string of tags to filter traces (optional)
-
-- **workload_traces** - Get distributed tracing data for a specific workload in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.
- - `clusterName` (`string`) - Cluster name for multi-cluster environments (optional)
- - `endMicros` (`string`) - End time for traces in microseconds since epoch (optional)
- - `limit` (`integer`) - Maximum number of traces to return (default: 100)
- - `minDuration` (`integer`) - Minimum trace duration in microseconds (optional)
- - `namespace` (`string`) **(required)** - Namespace containing the workload
- - `startMicros` (`string`) - Start time for traces in microseconds since epoch (optional)
- - `tags` (`string`) - JSON string of tags to filter traces (optional)
- - `workload` (`string`) **(required)** - Name of the workload to get traces for
-
-
-
-### Troubleshooting
-
-- Missing Kiali configuration when `kiali` toolset is enabled → set `[toolset_configs.kiali].url` in the config TOML.
-- Invalid URL → ensure `[toolset_configs.kiali].url` is a valid `http(s)://host` URL.
-- TLS certificate validation:
- - If `[toolset_configs.kiali].url` uses HTTPS and `[toolset_configs.kiali].insecure` is false, you must set `[toolset_configs.kiali].certificate_authority` with the PEM-encoded certificate(s) used by the Kiali server. This field expects inline PEM content, not a file path. You may concatenate multiple PEM blocks to include an intermediate chain.
- - For non-production environments you can set `[toolset_configs.kiali].insecure = true` to skip certificate verification.
-
-
diff --git a/docs/README.md b/docs/README.md
index 0eaa634e..d33557d8 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -16,7 +16,14 @@ Choose the guide that matches your needs:
1. **Complete the base setup**: Start with [Getting Started with Kubernetes](GETTING_STARTED_KUBERNETES.md) to create a ServiceAccount and kubeconfig file
2. **Configure Claude Code**: Then follow the [Claude Code CLI guide](GETTING_STARTED_CLAUDE_CODE.md)
+## Other toolsets
+
+- **[Kiali](KIALI.md)** - Tools for Kiali ServiceMesh with Istio
+
## Additional Documentation
- **[Keycloak OIDC Setup](KEYCLOAK_OIDC_SETUP.md)** - Developer guide for local Keycloak environment and testing with MCP Inspector
- **[Main README](../README.md)** - Project overview and general information
+
+
+
diff --git a/go.mod b/go.mod
index 53425351..f9f1d232 100644
--- a/go.mod
+++ b/go.mod
@@ -4,11 +4,11 @@ go 1.24.10
require (
github.com/BurntSushi/toml v1.5.0
- github.com/coreos/go-oidc/v3 v3.16.0
+ github.com/coreos/go-oidc/v3 v3.17.0
github.com/fsnotify/fsnotify v1.9.0
github.com/go-jose/go-jose/v4 v4.1.3
github.com/google/jsonschema-go v0.3.0
- github.com/mark3labs/mcp-go v0.43.0
+ github.com/mark3labs/mcp-go v0.43.1
github.com/modelcontextprotocol/go-sdk v1.1.0
github.com/pkg/errors v0.9.1
github.com/spf13/afero v1.15.0
diff --git a/go.sum b/go.sum
index 7cd638b4..2a04ced6 100644
--- a/go.sum
+++ b/go.sum
@@ -48,8 +48,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
-github.com/coreos/go-oidc/v3 v3.16.0 h1:qRQUCFstKpXwmEjDQTIbyY/5jF00+asXzSkmkoa/mow=
-github.com/coreos/go-oidc/v3 v3.16.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
+github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
+github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -187,8 +187,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
-github.com/mark3labs/mcp-go v0.43.0 h1:lgiKcWMddh4sngbU+hoWOZ9iAe/qp/m851RQpj3Y7jA=
-github.com/mark3labs/mcp-go v0.43.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw=
+github.com/mark3labs/mcp-go v0.43.1 h1:WXNVd+bRM/7mOzCM9zulSwn/s9YEdAxbmeh9LoRHEXY=
+github.com/mark3labs/mcp-go v0.43.1/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
diff --git a/internal/test/mcp.go b/internal/test/mcp.go
index 174fe4eb..b330fd13 100644
--- a/internal/test/mcp.go
+++ b/internal/test/mcp.go
@@ -13,7 +13,9 @@ import (
)
func McpInitRequest() mcp.InitializeRequest {
- initRequest := mcp.InitializeRequest{}
+ initRequest := mcp.InitializeRequest{
+ Request: mcp.Request{Method: "initialize"},
+ }
initRequest.Params.ProtocolVersion = mcp.LATEST_PROTOCOL_VERSION
initRequest.Params.ClientInfo = mcp.Implementation{Name: "test", Version: "1.33.7"}
return initRequest
diff --git a/internal/test/test.go b/internal/test/test.go
index c2ccec4e..3fb39626 100644
--- a/internal/test/test.go
+++ b/internal/test/test.go
@@ -3,6 +3,7 @@ package test
import (
"fmt"
"net"
+ "net/http"
"os"
"path/filepath"
"runtime"
@@ -49,3 +50,24 @@ func WaitForServer(tcpAddr *net.TCPAddr) error {
}
return err
}
+
+// WaitForHealthz waits for the /healthz endpoint to return a non-404 response
+func WaitForHealthz(tcpAddr *net.TCPAddr) error {
+ url := fmt.Sprintf("http://%s/healthz", tcpAddr.String())
+ var resp *http.Response
+ var err error
+ for i := 0; i < 100; i++ {
+ resp, err = http.Get(url)
+ if err == nil {
+ _ = resp.Body.Close()
+ if resp.StatusCode != http.StatusNotFound {
+ return nil
+ }
+ }
+ time.Sleep(50 * time.Millisecond)
+ }
+ if err != nil {
+ return err
+ }
+ return fmt.Errorf("healthz endpoint returned 404 after retries")
+}
diff --git a/pkg/config/config.go b/pkg/config/config.go
index 5601e7f0..ce13f14a 100644
--- a/pkg/config/config.go
+++ b/pkg/config/config.go
@@ -11,9 +11,10 @@ import (
)
const (
- ClusterProviderKubeConfig = "kubeconfig"
- ClusterProviderInCluster = "in-cluster"
- ClusterProviderDisabled = "disabled"
+ ClusterProviderKubeConfig = "kubeconfig"
+ ClusterProviderInCluster = "in-cluster"
+ ClusterProviderAuthHeaders = "auth-headers"
+ ClusterProviderDisabled = "disabled"
)
// StaticConfig is the configuration for the server.
@@ -73,9 +74,9 @@ type StaticConfig struct {
ToolsetConfigs map[string]toml.Primitive `toml:"toolset_configs,omitempty"`
// Internal: parsed provider configs (not exposed to TOML package)
- parsedClusterProviderConfigs map[string]ProviderConfig
+ parsedClusterProviderConfigs map[string]Extended
// Internal: parsed toolset configs (not exposed to TOML package)
- parsedToolsetConfigs map[string]ToolsetConfig
+ parsedToolsetConfigs map[string]Extended
// Internal: the config.toml directory, to help resolve relative file paths
configDirPath string
@@ -129,87 +130,28 @@ func ReadToml(configData []byte, opts ...ReadConfigOpt) (*StaticConfig, error) {
opt(config)
}
- if err := config.parseClusterProviderConfigs(md); err != nil {
+ ctx := withConfigDirPath(context.Background(), config.configDirPath)
+
+ config.parsedClusterProviderConfigs, err = providerConfigRegistry.parse(ctx, md, config.ClusterProviderConfigs)
+ if err != nil {
return nil, err
}
- if err := config.parseToolsetConfigs(md); err != nil {
+ config.parsedToolsetConfigs, err = toolsetConfigRegistry.parse(ctx, md, config.ToolsetConfigs)
+ if err != nil {
return nil, err
}
return config, nil
}
-func (c *StaticConfig) GetProviderConfig(strategy string) (ProviderConfig, bool) {
+func (c *StaticConfig) GetProviderConfig(strategy string) (Extended, bool) {
config, ok := c.parsedClusterProviderConfigs[strategy]
return config, ok
}
-func (c *StaticConfig) parseClusterProviderConfigs(md toml.MetaData) error {
- if c.parsedClusterProviderConfigs == nil {
- c.parsedClusterProviderConfigs = make(map[string]ProviderConfig, len(c.ClusterProviderConfigs))
- }
-
- ctx := withConfigDirPath(context.Background(), c.configDirPath)
-
- for strategy, primitive := range c.ClusterProviderConfigs {
- parser, ok := getProviderConfigParser(strategy)
- if !ok {
- continue
- }
-
- providerConfig, err := parser(ctx, primitive, md)
- if err != nil {
- return fmt.Errorf("failed to parse config for ClusterProvider '%s': %w", strategy, err)
- }
-
- if err := providerConfig.Validate(); err != nil {
- return fmt.Errorf("invalid config file for ClusterProvider '%s': %w", strategy, err)
- }
-
- c.parsedClusterProviderConfigs[strategy] = providerConfig
- }
-
- return nil
-}
-
-func (c *StaticConfig) parseToolsetConfigs(md toml.MetaData) error {
- if c.parsedToolsetConfigs == nil {
- c.parsedToolsetConfigs = make(map[string]ToolsetConfig, len(c.ToolsetConfigs))
- }
-
- ctx := withConfigDirPath(context.Background(), c.configDirPath)
-
- for name, primitive := range c.ToolsetConfigs {
- parser, ok := getToolsetConfigParser(name)
- if !ok {
- continue
- }
-
- toolsetConfig, err := parser(ctx, primitive, md)
- if err != nil {
- return fmt.Errorf("failed to parse config for Toolset '%s': %w", name, err)
- }
-
- if err := toolsetConfig.Validate(); err != nil {
- return fmt.Errorf("invalid config file for Toolset '%s': %w", name, err)
- }
-
- c.parsedToolsetConfigs[name] = toolsetConfig
- }
-
- return nil
-}
-
-func (c *StaticConfig) GetToolsetConfig(name string) (ToolsetConfig, bool) {
+func (c *StaticConfig) GetToolsetConfig(name string) (Extended, bool) {
cfg, ok := c.parsedToolsetConfigs[name]
return cfg, ok
}
-
-func (c *StaticConfig) SetToolsetConfig(name string, cfg ToolsetConfig) {
- if c.parsedToolsetConfigs == nil {
- c.parsedToolsetConfigs = make(map[string]ToolsetConfig)
- }
- c.parsedToolsetConfigs[name] = cfg
-}
diff --git a/pkg/config/context.go b/pkg/config/context.go
new file mode 100644
index 00000000..e5dbd8d6
--- /dev/null
+++ b/pkg/config/context.go
@@ -0,0 +1,23 @@
+package config
+
+import "context"
+
+type configDirPathKey struct{}
+
+func withConfigDirPath(ctx context.Context, dirPath string) context.Context {
+ return context.WithValue(ctx, configDirPathKey{}, dirPath)
+}
+
+func ConfigDirPathFromContext(ctx context.Context) string {
+ val := ctx.Value(configDirPathKey{})
+
+ if val == nil {
+ return ""
+ }
+
+ if strVal, ok := val.(string); ok {
+ return strVal
+ }
+
+ return ""
+}
diff --git a/pkg/config/extended.go b/pkg/config/extended.go
new file mode 100644
index 00000000..a1f0598d
--- /dev/null
+++ b/pkg/config/extended.go
@@ -0,0 +1,61 @@
+package config
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/BurntSushi/toml"
+)
+
+// Extended is the interface that all configuration extensions must implement.
+// Each extended config manager registers a factory function to parse its config from TOML primitives
+type Extended interface {
+ Validate() error
+}
+
+type ExtendedConfigParser func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (Extended, error)
+
+type extendedConfigRegistry struct {
+ parsers map[string]ExtendedConfigParser
+}
+
+func newExtendedConfigRegistry() *extendedConfigRegistry {
+ return &extendedConfigRegistry{
+ parsers: make(map[string]ExtendedConfigParser),
+ }
+}
+
+func (r *extendedConfigRegistry) register(name string, parser ExtendedConfigParser) {
+ if _, exists := r.parsers[name]; exists {
+ panic("extended config parser already registered for name: " + name)
+ }
+
+ r.parsers[name] = parser
+}
+
+func (r *extendedConfigRegistry) parse(ctx context.Context, metaData toml.MetaData, configs map[string]toml.Primitive) (map[string]Extended, error) {
+ if len(configs) == 0 {
+ return make(map[string]Extended), nil
+ }
+ parsedConfigs := make(map[string]Extended, len(configs))
+
+ for name, primitive := range configs {
+ parser, ok := r.parsers[name]
+ if !ok {
+ continue
+ }
+
+ extendedConfig, err := parser(ctx, primitive, metaData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse extended config for '%s': %w", name, err)
+ }
+
+ if err = extendedConfig.Validate(); err != nil {
+ return nil, fmt.Errorf("failed to validate extended config for '%s': %w", name, err)
+ }
+
+ parsedConfigs[name] = extendedConfig
+ }
+
+ return parsedConfigs, nil
+}
diff --git a/pkg/config/provider_config.go b/pkg/config/provider_config.go
index 45dd2f8d..2e514d0a 100644
--- a/pkg/config/provider_config.go
+++ b/pkg/config/provider_config.go
@@ -1,54 +1,7 @@
package config
-import (
- "context"
- "fmt"
+var providerConfigRegistry = newExtendedConfigRegistry()
- "github.com/BurntSushi/toml"
-)
-
-// ProviderConfig is the interface that all provider-specific configurations must implement.
-// Each provider registers a factory function to parse its config from TOML primitives
-type ProviderConfig interface {
- Validate() error
-}
-
-type ProviderConfigParser func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error)
-
-type configDirPathKey struct{}
-
-func withConfigDirPath(ctx context.Context, dirPath string) context.Context {
- return context.WithValue(ctx, configDirPathKey{}, dirPath)
-}
-
-func ConfigDirPathFromContext(ctx context.Context) string {
- val := ctx.Value(configDirPathKey{})
-
- if val == nil {
- return ""
- }
-
- if strVal, ok := val.(string); ok {
- return strVal
- }
-
- return ""
-}
-
-var (
- providerConfigParsers = make(map[string]ProviderConfigParser)
-)
-
-func RegisterProviderConfig(strategy string, parser ProviderConfigParser) {
- if _, exists := providerConfigParsers[strategy]; exists {
- panic(fmt.Sprintf("provider config parser already registered for strategy '%s'", strategy))
- }
-
- providerConfigParsers[strategy] = parser
-}
-
-func getProviderConfigParser(strategy string) (ProviderConfigParser, bool) {
- provider, ok := providerConfigParsers[strategy]
-
- return provider, ok
+func RegisterProviderConfig(name string, parser ExtendedConfigParser) {
+ providerConfigRegistry.register(name, parser)
}
diff --git a/pkg/config/provider_config_test.go b/pkg/config/provider_config_test.go
index 84902da4..2afbd2d7 100644
--- a/pkg/config/provider_config_test.go
+++ b/pkg/config/provider_config_test.go
@@ -12,21 +12,16 @@ import (
type ProviderConfigSuite struct {
BaseConfigSuite
- originalProviderConfigParsers map[string]ProviderConfigParser
+ originalProviderConfigRegistry *extendedConfigRegistry
}
func (s *ProviderConfigSuite) SetupTest() {
- s.originalProviderConfigParsers = make(map[string]ProviderConfigParser)
- for k, v := range providerConfigParsers {
- s.originalProviderConfigParsers[k] = v
- }
+ s.originalProviderConfigRegistry = providerConfigRegistry
+ providerConfigRegistry = newExtendedConfigRegistry()
}
func (s *ProviderConfigSuite) TearDownTest() {
- providerConfigParsers = make(map[string]ProviderConfigParser)
- for k, v := range s.originalProviderConfigParsers {
- providerConfigParsers[k] = v
- }
+ providerConfigRegistry = s.originalProviderConfigRegistry
}
type ProviderConfigForTest struct {
@@ -35,7 +30,7 @@ type ProviderConfigForTest struct {
IntProp int `toml:"int_prop"`
}
-var _ ProviderConfig = (*ProviderConfigForTest)(nil)
+var _ Extended = (*ProviderConfigForTest)(nil)
func (p *ProviderConfigForTest) Validate() error {
if p.StrProp == "force-error" {
@@ -44,7 +39,7 @@ func (p *ProviderConfigForTest) Validate() error {
return nil
}
-func providerConfigForTestParser(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
+func providerConfigForTestParser(_ context.Context, primitive toml.Primitive, md toml.MetaData) (Extended, error) {
var providerConfigForTest ProviderConfigForTest
if err := md.PrimitiveDecode(primitive, &providerConfigForTest); err != nil {
return nil, err
@@ -133,7 +128,7 @@ func (s *ProviderConfigSuite) TestReadConfigUnregisteredProviderConfig() {
}
func (s *ProviderConfigSuite) TestReadConfigParserError() {
- RegisterProviderConfig("test", func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
+ RegisterProviderConfig("test", func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (Extended, error) {
return nil, errors.New("parser error forced by test")
})
invalidConfigPath := s.writeConfig(`
@@ -156,7 +151,7 @@ func (s *ProviderConfigSuite) TestReadConfigParserError() {
func (s *ProviderConfigSuite) TestConfigDirPathInContext() {
var capturedDirPath string
- RegisterProviderConfig("test", func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ProviderConfig, error) {
+ RegisterProviderConfig("test", func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (Extended, error) {
capturedDirPath = ConfigDirPathFromContext(ctx)
var providerConfigForTest ProviderConfigForTest
if err := md.PrimitiveDecode(primitive, &providerConfigForTest); err != nil {
diff --git a/pkg/config/toolset_config.go b/pkg/config/toolset_config.go
index fb230e71..33af6a38 100644
--- a/pkg/config/toolset_config.go
+++ b/pkg/config/toolset_config.go
@@ -1,34 +1,7 @@
package config
-import (
- "context"
- "fmt"
+var toolsetConfigRegistry = newExtendedConfigRegistry()
- "github.com/BurntSushi/toml"
-)
-
-// ToolsetConfig is the interface that all toolset-specific configurations must implement.
-// Each toolset registers a factory function to parse its config from TOML primitives
-type ToolsetConfig interface {
- Validate() error
-}
-
-type ToolsetConfigParser func(ctx context.Context, primitive toml.Primitive, md toml.MetaData) (ToolsetConfig, error)
-
-var (
- toolsetConfigParsers = make(map[string]ToolsetConfigParser)
-)
-
-func RegisterToolsetConfig(name string, parser ToolsetConfigParser) {
- if _, exists := toolsetConfigParsers[name]; exists {
- panic(fmt.Sprintf("toolset config parser already registered for toolset '%s'", name))
- }
-
- toolsetConfigParsers[name] = parser
-}
-
-func getToolsetConfigParser(name string) (ToolsetConfigParser, bool) {
- parser, ok := toolsetConfigParsers[name]
-
- return parser, ok
+func RegisterToolsetConfig(name string, parser ExtendedConfigParser) {
+ toolsetConfigRegistry.register(name, parser)
}
diff --git a/pkg/config/toolset_config_test.go b/pkg/config/toolset_config_test.go
new file mode 100644
index 00000000..86f79c66
--- /dev/null
+++ b/pkg/config/toolset_config_test.go
@@ -0,0 +1,128 @@
+package config
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ "github.com/stretchr/testify/suite"
+)
+
+type ToolsetConfigSuite struct {
+ BaseConfigSuite
+ originalToolsetConfigRegistry *extendedConfigRegistry
+}
+
+func (s *ToolsetConfigSuite) SetupTest() {
+ s.originalToolsetConfigRegistry = toolsetConfigRegistry
+ toolsetConfigRegistry = newExtendedConfigRegistry()
+}
+
+func (s *ToolsetConfigSuite) TearDownTest() {
+ toolsetConfigRegistry = s.originalToolsetConfigRegistry
+}
+
+type ToolsetConfigForTest struct {
+ Enabled bool `toml:"enabled"`
+ Endpoint string `toml:"endpoint"`
+ Timeout int `toml:"timeout"`
+}
+
+var _ Extended = (*ToolsetConfigForTest)(nil)
+
+func (t *ToolsetConfigForTest) Validate() error {
+ if t.Endpoint == "force-error" {
+ return errors.New("validation error forced by test")
+ }
+ return nil
+}
+
+func toolsetConfigForTestParser(_ context.Context, primitive toml.Primitive, md toml.MetaData) (Extended, error) {
+ var toolsetConfigForTest ToolsetConfigForTest
+ if err := md.PrimitiveDecode(primitive, &toolsetConfigForTest); err != nil {
+ return nil, err
+ }
+ return &toolsetConfigForTest, nil
+}
+
+func (s *ToolsetConfigSuite) TestRegisterToolsetConfig() {
+ s.Run("panics when registering duplicate toolset config parser", func() {
+ s.Panics(func() {
+ RegisterToolsetConfig("test-toolset", toolsetConfigForTestParser)
+ RegisterToolsetConfig("test-toolset", toolsetConfigForTestParser)
+ }, "Expected panic when registering duplicate toolset config parser")
+ })
+}
+
+func (s *ToolsetConfigSuite) TestReadConfigValid() {
+ RegisterToolsetConfig("test-toolset", toolsetConfigForTestParser)
+ validConfigPath := s.writeConfig(`
+ [toolset_configs.test-toolset]
+ enabled = true
+ endpoint = "https://example.com"
+ timeout = 30
+ `)
+
+ config, err := Read(validConfigPath)
+ s.Run("returns no error for valid file with registered toolset config", func() {
+ s.Require().NoError(err, "Expected no error for valid file, got %v", err)
+ })
+ s.Run("returns config for valid file with registered toolset config", func() {
+ s.Require().NotNil(config, "Expected non-nil config for valid file")
+ })
+ s.Run("parses toolset config correctly", func() {
+ toolsetConfig, ok := config.GetToolsetConfig("test-toolset")
+ s.Require().True(ok, "Expected to find toolset config for 'test-toolset'")
+ s.Require().NotNil(toolsetConfig, "Expected non-nil toolset config for 'test-toolset'")
+ testToolsetConfig, ok := toolsetConfig.(*ToolsetConfigForTest)
+ s.Require().True(ok, "Expected toolset config to be of type *ToolsetConfigForTest")
+ s.Equal(true, testToolsetConfig.Enabled, "Expected Enabled to be true")
+ s.Equal("https://example.com", testToolsetConfig.Endpoint, "Expected Endpoint to be 'https://example.com'")
+ s.Equal(30, testToolsetConfig.Timeout, "Expected Timeout to be 30")
+ })
+}
+
+func (s *ToolsetConfigSuite) TestReadConfigInvalidToolsetConfig() {
+ RegisterToolsetConfig("test-toolset", toolsetConfigForTestParser)
+ invalidConfigPath := s.writeConfig(`
+ [toolset_configs.test-toolset]
+ enabled = true
+ endpoint = "force-error"
+ timeout = 30
+ `)
+
+ config, err := Read(invalidConfigPath)
+ s.Run("returns error for invalid toolset config", func() {
+ s.Require().NotNil(err, "Expected error for invalid toolset config, got nil")
+ s.ErrorContains(err, "validation error forced by test", "Expected validation error from toolset config")
+ })
+ s.Run("returns nil config for invalid toolset config", func() {
+ s.Nil(config, "Expected nil config for invalid toolset config")
+ })
+}
+
+func (s *ToolsetConfigSuite) TestReadConfigUnregisteredToolsetConfig() {
+ unregisteredConfigPath := s.writeConfig(`
+ [toolset_configs.unregistered-toolset]
+ enabled = true
+ endpoint = "https://example.com"
+ timeout = 30
+ `)
+
+ config, err := Read(unregisteredConfigPath)
+ s.Run("returns no error for unregistered toolset config", func() {
+ s.Require().NoError(err, "Expected no error for unregistered toolset config, got %v", err)
+ })
+ s.Run("returns config for unregistered toolset config", func() {
+ s.Require().NotNil(config, "Expected non-nil config for unregistered toolset config")
+ })
+ s.Run("does not parse unregistered toolset config", func() {
+ _, ok := config.GetToolsetConfig("unregistered-toolset")
+ s.Require().False(ok, "Expected no toolset config for unregistered toolset")
+ })
+}
+
+func TestToolsetConfig(t *testing.T) {
+ suite.Run(t, new(ToolsetConfigSuite))
+}
diff --git a/pkg/http/http_authorization_test.go b/pkg/http/http_authorization_test.go
index 68987c00..29b1b736 100644
--- a/pkg/http/http_authorization_test.go
+++ b/pkg/http/http_authorization_test.go
@@ -31,6 +31,7 @@ func (s *AuthorizationSuite) SetupTest() {
s.BaseHttpSuite.SetupTest()
// Capture logs
+ s.logBuffer.Reset()
s.klogState = klog.CaptureState()
flags := flag.NewFlagSet("test", flag.ContinueOnError)
klog.InitFlags(flags)
@@ -59,14 +60,14 @@ func (s *AuthorizationSuite) TearDownTest() {
func (s *AuthorizationSuite) StartClient(options ...transport.StreamableHTTPCOption) {
var err error
- s.mcpClient, err = client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), options...)
+ s.mcpClient, err = client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%s/mcp", s.StaticConfig.Port), options...)
s.Require().NoError(err, "Expected no error creating Streamable HTTP MCP client")
err = s.mcpClient.Start(s.T().Context())
s.Require().NoError(err, "Expected no error starting Streamable HTTP MCP client")
}
func (s *AuthorizationSuite) HttpGet(authHeader string) *http.Response {
- req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), nil)
+ req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://127.0.0.1:%s/mcp", s.StaticConfig.Port), nil)
s.Require().NoError(err, "Failed to create request")
if authHeader != "" {
req.Header.Set("Authorization", authHeader)
@@ -324,6 +325,7 @@ func (s *AuthorizationSuite) TestAuthorizationRequireOAuthFalse() {
}
func (s *AuthorizationSuite) TestAuthorizationRawToken() {
+ s.MockServer.ResetHandlers()
tokenReviewHandler := test.NewTokenReviewHandler()
s.MockServer.Handle(tokenReviewHandler)
@@ -339,6 +341,7 @@ func (s *AuthorizationSuite) TestAuthorizationRawToken() {
for _, c := range cases {
s.StaticConfig.OAuthAudience = c.audience
s.StaticConfig.ValidateToken = c.validateToken
+ s.logBuffer.Reset()
s.StartServer()
s.StartClient(transport.WithHTTPHeaders(map[string]string{
"Authorization": "Bearer " + tokenBasicNotExpired,
@@ -362,11 +365,14 @@ func (s *AuthorizationSuite) TestAuthorizationRawToken() {
})
})
_ = s.mcpClient.Close()
+ s.mcpClient = nil
s.StopServer()
+ s.Require().NoError(s.WaitForShutdown())
}
}
func (s *AuthorizationSuite) TestAuthorizationOidcToken() {
+ s.MockServer.ResetHandlers()
tokenReviewHandler := test.NewTokenReviewHandler()
s.MockServer.Handle(tokenReviewHandler)
@@ -407,11 +413,14 @@ func (s *AuthorizationSuite) TestAuthorizationOidcToken() {
})
})
_ = s.mcpClient.Close()
+ s.mcpClient = nil
s.StopServer()
+ s.Require().NoError(s.WaitForShutdown())
}
}
func (s *AuthorizationSuite) TestAuthorizationOidcTokenExchange() {
+ s.MockServer.ResetHandlers()
tokenReviewHandler := test.NewTokenReviewHandler()
s.MockServer.Handle(tokenReviewHandler)
@@ -440,6 +449,7 @@ func (s *AuthorizationSuite) TestAuthorizationOidcTokenExchange() {
s.StaticConfig.StsClientSecret = "test-sts-client-secret"
s.StaticConfig.StsAudience = "backend-audience"
s.StaticConfig.StsScopes = []string{"backend-scope"}
+ s.logBuffer.Reset()
s.StartServer()
s.StartClient(transport.WithHTTPHeaders(map[string]string{
"Authorization": "Bearer " + validOidcClientToken,
@@ -463,7 +473,9 @@ func (s *AuthorizationSuite) TestAuthorizationOidcTokenExchange() {
})
})
_ = s.mcpClient.Close()
+ s.mcpClient = nil
s.StopServer()
+ s.Require().NoError(s.WaitForShutdown())
}
}
diff --git a/pkg/http/http_mcp_test.go b/pkg/http/http_mcp_test.go
index 2a79b4be..0bd5cd9b 100644
--- a/pkg/http/http_mcp_test.go
+++ b/pkg/http/http_mcp_test.go
@@ -25,7 +25,7 @@ func (s *McpTransportSuite) TearDownTest() {
}
func (s *McpTransportSuite) TestSseTransport() {
- sseClient, sseClientErr := client.NewSSEMCPClient(fmt.Sprintf("http://127.0.0.1:%d/sse", s.TcpAddr.Port))
+ sseClient, sseClientErr := client.NewSSEMCPClient(fmt.Sprintf("http://127.0.0.1:%s/sse", s.StaticConfig.Port))
s.Require().NoError(sseClientErr, "Expected no error creating SSE MCP client")
startErr := sseClient.Start(s.T().Context())
s.Require().NoError(startErr, "Expected no error starting SSE MCP client")
@@ -44,7 +44,7 @@ func (s *McpTransportSuite) TestSseTransport() {
}
func (s *McpTransportSuite) TestStreamableHttpTransport() {
- httpClient, httpClientErr := client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%d/mcp", s.TcpAddr.Port), transport.WithContinuousListening())
+ httpClient, httpClientErr := client.NewStreamableHttpClient(fmt.Sprintf("http://127.0.0.1:%s/mcp", s.StaticConfig.Port), transport.WithContinuousListening())
s.Require().NoError(httpClientErr, "Expected no error creating Streamable HTTP MCP client")
startErr := httpClient.Start(s.T().Context())
s.Require().NoError(startErr, "Expected no error starting Streamable HTTP MCP client")
diff --git a/pkg/http/http_test.go b/pkg/http/http_test.go
index 64c3355e..091fca2e 100644
--- a/pkg/http/http_test.go
+++ b/pkg/http/http_test.go
@@ -33,7 +33,6 @@ import (
type BaseHttpSuite struct {
suite.Suite
MockServer *test.MockServer
- TcpAddr *net.TCPAddr
StaticConfig *config.StaticConfig
mcpServer *mcp.Server
OidcProvider *oidc.Provider
@@ -43,18 +42,19 @@ type BaseHttpSuite struct {
}
func (s *BaseHttpSuite) SetupTest() {
- var err error
http.DefaultClient.Timeout = 10 * time.Second
s.MockServer = test.NewMockServer()
- s.TcpAddr, err = test.RandomPortAddress()
- s.Require().NoError(err, "Expected no error getting random port address")
+ s.MockServer.Handle(&test.DiscoveryClientHandler{})
s.StaticConfig = config.Default()
s.StaticConfig.KubeConfig = s.MockServer.KubeconfigFile(s.T())
- s.StaticConfig.Port = strconv.Itoa(s.TcpAddr.Port)
}
func (s *BaseHttpSuite) StartServer() {
- var err error
+
+ tcpAddr, err := test.RandomPortAddress()
+ s.Require().NoError(err, "Expected no error getting random port address")
+ s.StaticConfig.Port = strconv.Itoa(tcpAddr.Port)
+
s.mcpServer, err = mcp.NewServer(mcp.Configuration{StaticConfig: s.StaticConfig})
s.Require().NoError(err, "Expected no error creating MCP server")
s.Require().NotNil(s.mcpServer, "MCP server should not be nil")
@@ -64,7 +64,8 @@ func (s *BaseHttpSuite) StartServer() {
cancelCtx, s.StopServer = context.WithCancel(gc)
group.Go(func() error { return Serve(cancelCtx, s.mcpServer, s.StaticConfig, s.OidcProvider, nil) })
s.WaitForShutdown = group.Wait
- s.Require().NoError(test.WaitForServer(s.TcpAddr), "HTTP server did not start in time")
+ s.Require().NoError(test.WaitForServer(tcpAddr), "HTTP server did not start in time")
+ s.Require().NoError(test.WaitForHealthz(tcpAddr), "HTTP server /healthz endpoint did not respond with non-404 in time")
}
func (s *BaseHttpSuite) TearDownTest() {
diff --git a/pkg/kiali/config.go b/pkg/kiali/config.go
index 82e8d7f3..1e64b4b0 100644
--- a/pkg/kiali/config.go
+++ b/pkg/kiali/config.go
@@ -17,7 +17,7 @@ type Config struct {
CertificateAuthority string `toml:"certificate_authority,omitempty"`
}
-var _ config.ToolsetConfig = (*Config)(nil)
+var _ config.Extended = (*Config)(nil)
func (c *Config) Validate() error {
if c == nil {
@@ -36,7 +36,7 @@ func (c *Config) Validate() error {
return nil
}
-func kialiToolsetParser(_ context.Context, primitive toml.Primitive, md toml.MetaData) (config.ToolsetConfig, error) {
+func kialiToolsetParser(_ context.Context, primitive toml.Primitive, md toml.MetaData) (config.Extended, error) {
var cfg Config
if err := md.PrimitiveDecode(primitive, &cfg); err != nil {
return nil, err
diff --git a/pkg/kiali/kiali.go b/pkg/kiali/kiali.go
index 5d777e8a..2755ddca 100644
--- a/pkg/kiali/kiali.go
+++ b/pkg/kiali/kiali.go
@@ -60,12 +60,19 @@ func (k *Kiali) validateAndGetURL(endpoint string) (string, error) {
if err != nil {
return "", fmt.Errorf("invalid endpoint path: %w", err)
}
+ // Reject absolute URLs - endpoint should be a relative path
+ if endpointURL.Scheme != "" || endpointURL.Host != "" {
+ return "", fmt.Errorf("endpoint must be a relative path, not an absolute URL")
+ }
resultURL, err := url.JoinPath(baseURL.String(), endpointURL.Path)
if err != nil {
return "", fmt.Errorf("failed to join kiali base URL with endpoint path: %w", err)
}
- u, _ := url.Parse(resultURL)
+ u, err := url.Parse(resultURL)
+ if err != nil {
+ return "", fmt.Errorf("failed to parse joined URL: %w", err)
+ }
u.RawQuery = endpointURL.RawQuery
u.Fragment = endpointURL.Fragment
@@ -145,7 +152,10 @@ func (k *Kiali) executeRequest(ctx context.Context, method, endpoint, contentTyp
return "", err
}
defer func() { _ = resp.Body.Close() }()
- respBody, _ := io.ReadAll(resp.Body)
+ respBody, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", fmt.Errorf("failed to read response body: %w", err)
+ }
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
if len(respBody) > 0 {
return "", fmt.Errorf("kiali API error: %s", strings.TrimSpace(string(respBody)))
diff --git a/pkg/kiali/kiali_test.go b/pkg/kiali/kiali_test.go
index 5028c32b..403bfd2d 100644
--- a/pkg/kiali/kiali_test.go
+++ b/pkg/kiali/kiali_test.go
@@ -128,6 +128,49 @@ func (s *KialiSuite) TestValidateAndGetURL() {
s.Equal("true", u.Query().Get("health"), "Unexpected query parameter health")
})
})
+
+ s.Run("Rejects absolute URLs in endpoint", func() {
+ s.Config = test.Must(config.ReadToml([]byte(`
+ [toolset_configs.kiali]
+ url = "https://kiali.example/"
+ insecure = true
+ `)))
+ k := NewKiali(s.Config, s.MockServer.Config())
+
+ s.Run("rejects http URLs", func() {
+ _, err := k.validateAndGetURL("http://other-server.com/api")
+ s.Require().Error(err, "Expected error for absolute URL")
+ s.ErrorContains(err, "endpoint must be a relative path", "Unexpected error message")
+ })
+
+ s.Run("rejects https URLs", func() {
+ _, err := k.validateAndGetURL("https://other-server.com/api")
+ s.Require().Error(err, "Expected error for absolute URL")
+ s.ErrorContains(err, "endpoint must be a relative path", "Unexpected error message")
+ })
+
+ s.Run("rejects URLs with host but no scheme", func() {
+ _, err := k.validateAndGetURL("//other-server.com/api")
+ s.Require().Error(err, "Expected error for URL with host")
+ s.ErrorContains(err, "endpoint must be a relative path", "Unexpected error message")
+ })
+ })
+
+ s.Run("Preserves fragment in endpoint", func() {
+ s.Config = test.Must(config.ReadToml([]byte(`
+ [toolset_configs.kiali]
+ url = "https://kiali.example/"
+ insecure = true
+ `)))
+ k := NewKiali(s.Config, s.MockServer.Config())
+
+ full, err := k.validateAndGetURL("/api/path#section")
+ s.Require().NoError(err, "Expected no error validating URL with fragment")
+ u, err := url.Parse(full)
+ s.Require().NoError(err, "Expected to parse full URL")
+ s.Equal("/api/path", u.Path, "Unexpected path in parsed URL")
+ s.Equal("section", u.Fragment, "Unexpected fragment in parsed URL")
+ })
}
// CurrentAuthorizationHeader behavior is now implicit via executeRequest using Manager.BearerToken
diff --git a/pkg/kubernetes/accesscontrol.go b/pkg/kubernetes/accesscontrol.go
index e35b5dfb..276009a4 100644
--- a/pkg/kubernetes/accesscontrol.go
+++ b/pkg/kubernetes/accesscontrol.go
@@ -1,40 +1 @@
package kubernetes
-
-import (
- "fmt"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
-)
-
-// isAllowed checks the resource is in denied list or not.
-// If it is in denied list, this function returns false.
-func isAllowed(
- staticConfig *config.StaticConfig, // TODO: maybe just use the denied resource slice
- gvk *schema.GroupVersionKind,
-) bool {
- if staticConfig == nil {
- return true
- }
-
- for _, val := range staticConfig.DeniedResources {
- // If kind is empty, that means Group/Version pair is denied entirely
- if val.Kind == "" {
- if gvk.Group == val.Group && gvk.Version == val.Version {
- return false
- }
- }
- if gvk.Group == val.Group &&
- gvk.Version == val.Version &&
- gvk.Kind == val.Kind {
- return false
- }
- }
-
- return true
-}
-
-func isNotAllowedError(gvk *schema.GroupVersionKind) error {
- return fmt.Errorf("resource not allowed: %s", gvk.String())
-}
diff --git a/pkg/kubernetes/accesscontrol_clientset.go b/pkg/kubernetes/accesscontrol_clientset.go
index a6c3fccd..e871bd96 100644
--- a/pkg/kubernetes/accesscontrol_clientset.go
+++ b/pkg/kubernetes/accesscontrol_clientset.go
@@ -1,204 +1,113 @@
package kubernetes
import (
- "context"
"fmt"
+ "net/http"
- authenticationv1api "k8s.io/api/authentication/v1"
- authorizationv1api "k8s.io/api/authorization/v1"
- v1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/apimachinery/pkg/util/httpstream"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
+ "k8s.io/client-go/discovery/cached/memory"
+ "k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1"
authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
- "k8s.io/client-go/tools/remotecommand"
- "k8s.io/metrics/pkg/apis/metrics"
- metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1"
+ "k8s.io/client-go/restmapper"
metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
)
// AccessControlClientset is a limited clientset delegating interface to the standard kubernetes.Clientset
// Only a limited set of functions are implemented with a single point of access to the kubernetes API where
// apiVersion and kinds are checked for allowed access
type AccessControlClientset struct {
- cfg *rest.Config
- delegate kubernetes.Interface
- discoveryClient discovery.DiscoveryInterface
+ cfg *rest.Config
+ kubernetes.Interface
+ restMapper meta.ResettableRESTMapper
+ discoveryClient discovery.CachedDiscoveryInterface
+ dynamicClient dynamic.Interface
metricsV1beta1 *metricsv1beta1.MetricsV1beta1Client
- staticConfig *config.StaticConfig // TODO: maybe just store the denied resource slice
-}
-
-func (a *AccessControlClientset) DiscoveryClient() discovery.DiscoveryInterface {
- return a.discoveryClient
}
-func (a *AccessControlClientset) Nodes() (corev1.NodeInterface, error) {
- gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
+func NewAccessControlClientset(staticConfig *config.StaticConfig, restConfig *rest.Config) (*AccessControlClientset, error) {
+ rest.CopyConfig(restConfig)
+ acc := &AccessControlClientset{
+ cfg: rest.CopyConfig(restConfig),
}
- return a.delegate.CoreV1().Nodes(), nil
-}
-
-func (a *AccessControlClientset) NodesLogs(ctx context.Context, name string) (*rest.Request, error) {
- gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
+ if acc.cfg.UserAgent == "" {
+ acc.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
}
-
- if _, err := a.delegate.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil {
- return nil, fmt.Errorf("failed to get node %s: %w", name, err)
+ acc.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
+ return &AccessControlRoundTripper{
+ delegate: original,
+ staticConfig: staticConfig,
+ restMapper: acc.restMapper,
+ }
+ })
+ discoveryClient, err := discovery.NewDiscoveryClientForConfig(acc.cfg)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create discovery client: %v", err)
}
-
- url := []string{"api", "v1", "nodes", name, "proxy", "logs"}
- return a.delegate.CoreV1().RESTClient().
- Get().
- AbsPath(url...), nil
-}
-
-func (a *AccessControlClientset) NodesMetricses(ctx context.Context, name string, listOptions metav1.ListOptions) (*metrics.NodeMetricsList, error) {
- gvk := &schema.GroupVersionKind{Group: metrics.GroupName, Version: metricsv1beta1api.SchemeGroupVersion.Version, Kind: "NodeMetrics"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
+ acc.discoveryClient = memory.NewMemCacheClient(discoveryClient)
+ acc.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(acc.discoveryClient)
+ acc.Interface, err = kubernetes.NewForConfig(acc.cfg)
+ if err != nil {
+ return nil, err
}
- versionedMetrics := &metricsv1beta1api.NodeMetricsList{}
- var err error
- if name != "" {
- m, err := a.metricsV1beta1.NodeMetricses().Get(ctx, name, metav1.GetOptions{})
- if err != nil {
- return nil, fmt.Errorf("failed to get metrics for node %s: %w", name, err)
- }
- versionedMetrics.Items = []metricsv1beta1api.NodeMetrics{*m}
- } else {
- versionedMetrics, err = a.metricsV1beta1.NodeMetricses().List(ctx, listOptions)
- if err != nil {
- return nil, fmt.Errorf("failed to list node metrics: %w", err)
- }
+ acc.dynamicClient, err = dynamic.NewForConfig(acc.cfg)
+ if err != nil {
+ return nil, err
}
- convertedMetrics := &metrics.NodeMetricsList{}
- return convertedMetrics, metricsv1beta1api.Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(versionedMetrics, convertedMetrics, nil)
+ acc.metricsV1beta1, err = metricsv1beta1.NewForConfig(acc.cfg)
+ if err != nil {
+ return nil, err
+ }
+ return acc, nil
}
-func (a *AccessControlClientset) NodesStatsSummary(ctx context.Context, name string) (*rest.Request, error) {
- gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
+func (a *AccessControlClientset) RESTMapper() meta.ResettableRESTMapper {
+ return a.restMapper
+}
- if _, err := a.delegate.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil {
- return nil, fmt.Errorf("failed to get node %s: %w", name, err)
- }
+func (a *AccessControlClientset) DiscoveryClient() discovery.CachedDiscoveryInterface {
+ return a.discoveryClient
+}
- url := []string{"api", "v1", "nodes", name, "proxy", "stats", "summary"}
- return a.delegate.CoreV1().RESTClient().
- Get().
- AbsPath(url...), nil
+func (a *AccessControlClientset) DynamicClient() dynamic.Interface {
+ return a.dynamicClient
}
-func (a *AccessControlClientset) Pods(namespace string) (corev1.PodInterface, error) {
- gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- return a.delegate.CoreV1().Pods(namespace), nil
+func (a *AccessControlClientset) MetricsV1beta1Client() *metricsv1beta1.MetricsV1beta1Client {
+ return a.metricsV1beta1
}
-func (a *AccessControlClientset) PodsExec(namespace, name string, podExecOptions *v1.PodExecOptions) (remotecommand.Executor, error) {
- gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- // Compute URL
- // https://github.com/kubernetes/kubectl/blob/5366de04e168bcbc11f5e340d131a9ca8b7d0df4/pkg/cmd/exec/exec.go#L382-L397
- execRequest := a.delegate.CoreV1().RESTClient().
- Post().
- Resource("pods").
- Namespace(namespace).
- Name(name).
- SubResource("exec")
- execRequest.VersionedParams(podExecOptions, ParameterCodec)
- spdyExec, err := remotecommand.NewSPDYExecutor(a.cfg, "POST", execRequest.URL())
- if err != nil {
- return nil, err
- }
- webSocketExec, err := remotecommand.NewWebSocketExecutor(a.cfg, "GET", execRequest.URL().String())
- if err != nil {
- return nil, err
- }
- return remotecommand.NewFallbackExecutor(webSocketExec, spdyExec, func(err error) bool {
- return httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err)
- })
+// Nodes returns NodeInterface
+// Deprecated: use CoreV1().Nodes() directly
+func (a *AccessControlClientset) Nodes() (corev1.NodeInterface, error) {
+ return a.CoreV1().Nodes(), nil
}
-func (a *AccessControlClientset) PodsMetricses(ctx context.Context, namespace, name string, listOptions metav1.ListOptions) (*metrics.PodMetricsList, error) {
- gvk := &schema.GroupVersionKind{Group: metrics.GroupName, Version: metricsv1beta1api.SchemeGroupVersion.Version, Kind: "PodMetrics"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- versionedMetrics := &metricsv1beta1api.PodMetricsList{}
- var err error
- if name != "" {
- m, err := a.metricsV1beta1.PodMetricses(namespace).Get(ctx, name, metav1.GetOptions{})
- if err != nil {
- return nil, fmt.Errorf("failed to get metrics for pod %s/%s: %w", namespace, name, err)
- }
- versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m}
- } else {
- versionedMetrics, err = a.metricsV1beta1.PodMetricses(namespace).List(ctx, listOptions)
- if err != nil {
- return nil, fmt.Errorf("failed to list pod metrics in namespace %s: %w", namespace, err)
- }
- }
- convertedMetrics := &metrics.PodMetricsList{}
- return convertedMetrics, metricsv1beta1api.Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(versionedMetrics, convertedMetrics, nil)
+// Pods returns PodInterface
+// Deprecated: use CoreV1().Pods(namespace) directly
+func (a *AccessControlClientset) Pods(namespace string) (corev1.PodInterface, error) {
+ return a.CoreV1().Pods(namespace), nil
}
+// Services returns ServiceInterface
+// Deprecated: use CoreV1().Services(namespace) directly
func (a *AccessControlClientset) Services(namespace string) (corev1.ServiceInterface, error) {
- gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- return a.delegate.CoreV1().Services(namespace), nil
+ return a.CoreV1().Services(namespace), nil
}
+// SelfSubjectAccessReviews returns SelfSubjectAccessReviewInterface
+// Deprecated: use AuthorizationV1().SelfSubjectAccessReviews() directly
func (a *AccessControlClientset) SelfSubjectAccessReviews() (authorizationv1.SelfSubjectAccessReviewInterface, error) {
- gvk := &schema.GroupVersionKind{Group: authorizationv1api.GroupName, Version: authorizationv1api.SchemeGroupVersion.Version, Kind: "SelfSubjectAccessReview"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- return a.delegate.AuthorizationV1().SelfSubjectAccessReviews(), nil
+ return a.AuthorizationV1().SelfSubjectAccessReviews(), nil
}
// TokenReview returns TokenReviewInterface
+// Deprecated: use AuthenticationV1().TokenReviews() directly
func (a *AccessControlClientset) TokenReview() (authenticationv1.TokenReviewInterface, error) {
- gvk := &schema.GroupVersionKind{Group: authenticationv1api.GroupName, Version: authorizationv1api.SchemeGroupVersion.Version, Kind: "TokenReview"}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- return a.delegate.AuthenticationV1().TokenReviews(), nil
-}
-
-func NewAccessControlClientset(cfg *rest.Config, staticConfig *config.StaticConfig) (*AccessControlClientset, error) {
- clientSet, err := kubernetes.NewForConfig(cfg)
- if err != nil {
- return nil, err
- }
- metricsClient, err := metricsv1beta1.NewForConfig(cfg)
- if err != nil {
- return nil, err
- }
- return &AccessControlClientset{
- cfg: cfg,
- delegate: clientSet,
- discoveryClient: clientSet.DiscoveryClient,
- metricsV1beta1: metricsClient,
- staticConfig: staticConfig,
- }, nil
+ return a.AuthenticationV1().TokenReviews(), nil
}
diff --git a/pkg/kubernetes/accesscontrol_restmapper.go b/pkg/kubernetes/accesscontrol_restmapper.go
deleted file mode 100644
index 06269480..00000000
--- a/pkg/kubernetes/accesscontrol_restmapper.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package kubernetes
-
-import (
- "k8s.io/apimachinery/pkg/api/meta"
- "k8s.io/apimachinery/pkg/runtime/schema"
- "k8s.io/client-go/restmapper"
-
- "github.com/containers/kubernetes-mcp-server/pkg/config"
-)
-
-type AccessControlRESTMapper struct {
- delegate *restmapper.DeferredDiscoveryRESTMapper
- staticConfig *config.StaticConfig // TODO: maybe just store the denied resource slice
-}
-
-var _ meta.RESTMapper = &AccessControlRESTMapper{}
-
-func (a AccessControlRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) {
- gvk, err := a.delegate.KindFor(resource)
- if err != nil {
- return schema.GroupVersionKind{}, err
- }
- if !isAllowed(a.staticConfig, &gvk) {
- return schema.GroupVersionKind{}, isNotAllowedError(&gvk)
- }
- return gvk, nil
-}
-
-func (a AccessControlRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) {
- gvks, err := a.delegate.KindsFor(resource)
- if err != nil {
- return nil, err
- }
- for i := range gvks {
- if !isAllowed(a.staticConfig, &gvks[i]) {
- return nil, isNotAllowedError(&gvks[i])
- }
- }
- return gvks, nil
-}
-
-func (a AccessControlRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) {
- return a.delegate.ResourceFor(input)
-}
-
-func (a AccessControlRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) {
- return a.delegate.ResourcesFor(input)
-}
-
-func (a AccessControlRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
- for _, version := range versions {
- gvk := &schema.GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- }
- return a.delegate.RESTMapping(gk, versions...)
-}
-
-func (a AccessControlRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) {
- for _, version := range versions {
- gvk := &schema.GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind}
- if !isAllowed(a.staticConfig, gvk) {
- return nil, isNotAllowedError(gvk)
- }
- }
- return a.delegate.RESTMappings(gk, versions...)
-}
-
-func (a AccessControlRESTMapper) ResourceSingularizer(resource string) (singular string, err error) {
- return a.delegate.ResourceSingularizer(resource)
-}
-
-func (a AccessControlRESTMapper) Reset() {
- a.delegate.Reset()
-}
-
-func NewAccessControlRESTMapper(delegate *restmapper.DeferredDiscoveryRESTMapper, staticConfig *config.StaticConfig) *AccessControlRESTMapper {
- return &AccessControlRESTMapper{delegate: delegate, staticConfig: staticConfig}
-}
diff --git a/pkg/kubernetes/accesscontrol_round_tripper.go b/pkg/kubernetes/accesscontrol_round_tripper.go
new file mode 100644
index 00000000..c818bb71
--- /dev/null
+++ b/pkg/kubernetes/accesscontrol_round_tripper.go
@@ -0,0 +1,96 @@
+package kubernetes
+
+import (
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "k8s.io/apimachinery/pkg/api/meta"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+type AccessControlRoundTripper struct {
+ delegate http.RoundTripper
+ staticConfig *config.StaticConfig
+ restMapper meta.RESTMapper
+}
+
+func (rt *AccessControlRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ gvr, ok := parseURLToGVR(req.URL.Path)
+ // Not an API resource request, just pass through
+ if !ok {
+ return rt.delegate.RoundTrip(req)
+ }
+
+ gvk, err := rt.restMapper.KindFor(gvr)
+ if err != nil {
+ return nil, fmt.Errorf("failed to make request: AccessControlRoundTripper failed to get kind for gvr %v: %w", gvr, err)
+ }
+ if !rt.isAllowed(gvk) {
+ return nil, fmt.Errorf("resource not allowed: %s", gvk.String())
+ }
+
+ return rt.delegate.RoundTrip(req)
+}
+
+// isAllowed checks the resource is in denied list or not.
+// If it is in denied list, this function returns false.
+func (rt *AccessControlRoundTripper) isAllowed(
+ gvk schema.GroupVersionKind,
+) bool {
+ if rt.staticConfig == nil {
+ return true
+ }
+
+ for _, val := range rt.staticConfig.DeniedResources {
+ // If kind is empty, that means Group/Version pair is denied entirely
+ if val.Kind == "" {
+ if gvk.Group == val.Group && gvk.Version == val.Version {
+ return false
+ }
+ }
+ if gvk.Group == val.Group &&
+ gvk.Version == val.Version &&
+ gvk.Kind == val.Kind {
+ return false
+ }
+ }
+
+ return true
+}
+
+func parseURLToGVR(path string) (gvr schema.GroupVersionResource, ok bool) {
+ parts := strings.Split(strings.Trim(path, "/"), "/")
+
+ gvr = schema.GroupVersionResource{}
+ switch parts[0] {
+ case "api":
+ // /api or /api/v1 are discovery endpoints
+ if len(parts) < 3 {
+ return
+ }
+ gvr.Group = ""
+ gvr.Version = parts[1]
+ if parts[2] == "namespaces" && len(parts) > 4 {
+ gvr.Resource = parts[4]
+ } else {
+ gvr.Resource = parts[2]
+ }
+ case "apis":
+ // /apis, /apis/apps, or /apis/apps/v1 are discovery endpoints
+ if len(parts) < 4 {
+ return
+ }
+ gvr.Group = parts[1]
+ gvr.Version = parts[2]
+ if parts[3] == "namespaces" && len(parts) > 5 {
+ gvr.Resource = parts[5]
+ } else {
+ gvr.Resource = parts[3]
+ }
+ default:
+ return
+ }
+ return gvr, true
+}
diff --git a/pkg/kubernetes/accesscontrol_round_tripper_test.go b/pkg/kubernetes/accesscontrol_round_tripper_test.go
new file mode 100644
index 00000000..8706df20
--- /dev/null
+++ b/pkg/kubernetes/accesscontrol_round_tripper_test.go
@@ -0,0 +1,247 @@
+package kubernetes
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/BurntSushi/toml"
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/suite"
+ "k8s.io/client-go/discovery/cached/memory"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/restmapper"
+)
+
+type mockRoundTripper struct {
+ called *bool
+ onRequest func(w http.ResponseWriter, r *http.Request)
+}
+
+func (m *mockRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ *m.called = true
+ rec := httptest.NewRecorder()
+ m.onRequest(rec, req)
+ return rec.Result(), nil
+}
+
+type AccessControlRoundTripperTestSuite struct {
+ suite.Suite
+ mockServer *test.MockServer
+ restMapper *restmapper.DeferredDiscoveryRESTMapper
+}
+
+func (s *AccessControlRoundTripperTestSuite) SetupTest() {
+ s.mockServer = test.NewMockServer()
+ s.mockServer.Handle(&test.DiscoveryClientHandler{})
+
+ clientSet, err := kubernetes.NewForConfig(s.mockServer.Config())
+ s.Require().NoError(err, "Expected no error creating clientset")
+
+ s.restMapper = restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(clientSet.Discovery()))
+}
+
+func (s *AccessControlRoundTripperTestSuite) TearDownTest() {
+ s.mockServer.Close()
+}
+
+func (s *AccessControlRoundTripperTestSuite) TestRoundTripForNonAPIResources() {
+ delegateCalled := false
+ mockDelegate := &mockRoundTripper{
+ called: &delegateCalled,
+ onRequest: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ },
+ }
+
+ rt := &AccessControlRoundTripper{
+ delegate: mockDelegate,
+ staticConfig: nil,
+ restMapper: s.restMapper,
+ }
+
+ testCases := []string{"healthz", "readyz", "livez", "metrics", "version"}
+ for _, testCase := range testCases {
+ s.Run("/"+testCase+" check endpoint bypasses access control", func() {
+ delegateCalled = false
+ resp, err := rt.RoundTrip(httptest.NewRequest("GET", "/"+testCase, nil))
+ s.NoError(err)
+ s.NotNil(resp)
+ s.Equal(http.StatusOK, resp.StatusCode)
+ s.Truef(delegateCalled, "Expected delegate to be called for /%s", testCase)
+ })
+ }
+}
+
+func (s *AccessControlRoundTripperTestSuite) TestRoundTripForDiscoveryRequests() {
+ delegateCalled := false
+ mockDelegate := &mockRoundTripper{
+ called: &delegateCalled,
+ onRequest: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ },
+ }
+
+ rt := &AccessControlRoundTripper{
+ delegate: mockDelegate,
+ staticConfig: nil,
+ restMapper: s.restMapper,
+ }
+
+ testCases := []string{"/api", "/apis", "/api/v1", "/api/v1/", "/apis/apps", "/apis/apps/v1", "/apis/batch/v1"}
+ for _, testCase := range testCases {
+ s.Run("API Discovery endpoint "+testCase+" bypasses access control", func() {
+ delegateCalled = false
+ resp, err := rt.RoundTrip(httptest.NewRequest("GET", testCase, nil))
+ s.NoError(err)
+ s.NotNil(resp)
+ s.Equal(http.StatusOK, resp.StatusCode)
+ s.True(delegateCalled, "Expected delegate to be called for /api")
+ })
+ }
+}
+
+func (s *AccessControlRoundTripperTestSuite) TestRoundTripForAllowedAPIResources() {
+ delegateCalled := false
+ mockDelegate := &mockRoundTripper{
+ called: &delegateCalled,
+ onRequest: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ },
+ }
+
+ rt := &AccessControlRoundTripper{
+ delegate: mockDelegate,
+ staticConfig: nil, // nil config allows all resources
+ restMapper: s.restMapper,
+ }
+
+ s.Run("List all pods is allowed", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/pods", nil)
+ resp, err := rt.RoundTrip(req)
+ s.NoError(err)
+ s.NotNil(resp)
+ s.Equal(http.StatusOK, resp.StatusCode)
+ s.True(delegateCalled, "Expected delegate to be called for listing pods")
+ })
+
+ s.Run("List pods in namespace is allowed", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/namespaces/default/pods", nil)
+ resp, err := rt.RoundTrip(req)
+ s.NoError(err)
+ s.NotNil(resp)
+ s.True(delegateCalled, "Expected delegate to be called for namespaced pods list")
+ })
+
+ s.Run("Get specific pod is allowed", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/my-pod", nil)
+ resp, err := rt.RoundTrip(req)
+ s.NoError(err)
+ s.NotNil(resp)
+ s.True(delegateCalled, "Expected delegate to be called for getting specific pod")
+ })
+
+ s.Run("Resource path with trailing slash is allowed", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/pods/", nil)
+ resp, err := rt.RoundTrip(req)
+ s.NoError(err)
+ s.NotNil(resp)
+ s.True(delegateCalled, "Expected delegate to be called for path with trailing slash")
+ })
+
+ s.Run("List Deployments is allowed", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/apis/apps/v1/deployments", nil)
+ resp, err := rt.RoundTrip(req)
+ s.NoError(err)
+ s.NotNil(resp)
+ s.True(delegateCalled, "Expected delegate to be called for listing deployments")
+ })
+
+ s.Run("List Deployments in namespace is allowed", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/apis/apps/v1/namespaces/default/deployments", nil)
+ resp, err := rt.RoundTrip(req)
+ s.NoError(err)
+ s.NotNil(resp)
+ s.True(delegateCalled, "Expected delegate to be called for namespaced deployments list")
+ })
+}
+
+func (s *AccessControlRoundTripperTestSuite) TestRoundTripForDeniedAPIResources() {
+ delegateCalled := false
+ mockDelegate := &mockRoundTripper{
+ called: &delegateCalled,
+ onRequest: func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ },
+ }
+ rt := &AccessControlRoundTripper{
+ delegate: mockDelegate,
+ staticConfig: config.Default(),
+ restMapper: s.restMapper,
+ }
+
+ s.Run("Specific resource kind is denied", func() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { version = "v1", kind = "Pod" } ]
+ `), rt.staticConfig), "Expected to parse denied resources config")
+
+ s.Run("List pods is denied", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/pods", nil)
+ resp, err := rt.RoundTrip(req)
+ s.Error(err)
+ s.Nil(resp)
+ s.False(delegateCalled, "Expected delegate not to be called for denied resource")
+ s.Contains(err.Error(), "resource not allowed")
+ s.Contains(err.Error(), "Pod")
+ })
+
+ s.Run("Get specific pod is denied", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/namespaces/default/pods/my-pod", nil)
+ resp, err := rt.RoundTrip(req)
+ s.Error(err)
+ s.Nil(resp)
+ s.False(delegateCalled)
+ s.Contains(err.Error(), "resource not allowed")
+ })
+ })
+
+ s.Run("Entire group/version is denied", func() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { version = "v1", kind = "" } ]
+ `), rt.staticConfig), "Expected to v1 denied resources config")
+
+ s.Run("Pods in core/v1 are denied", func() {
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/pods", nil)
+ resp, err := rt.RoundTrip(req)
+ s.Error(err)
+ s.Nil(resp)
+ s.False(delegateCalled)
+ })
+
+ })
+
+ s.Run("RESTMapper error for unknown resource", func() {
+ rt.staticConfig = nil
+ delegateCalled = false
+ req := httptest.NewRequest("GET", "/api/v1/unknownresources", nil)
+ resp, err := rt.RoundTrip(req)
+ s.Error(err)
+ s.Nil(resp)
+ s.False(delegateCalled, "Expected delegate not to be called when RESTMapper fails")
+ s.Contains(err.Error(), "failed to make request")
+ })
+}
+
+func TestAccessControlRoundTripper(t *testing.T) {
+ suite.Run(t, new(AccessControlRoundTripperTestSuite))
+}
diff --git a/pkg/kubernetes/auth_headers.go b/pkg/kubernetes/auth_headers.go
new file mode 100644
index 00000000..5b0c22e3
--- /dev/null
+++ b/pkg/kubernetes/auth_headers.go
@@ -0,0 +1,108 @@
+package kubernetes
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+)
+
+// AuthType represents the type of Kubernetes authentication.
+type AuthType string
+type ContextKey string
+
+const (
+ // AuthHeadersContextKey is the context key for the Kubernetes authentication headers.
+ AuthHeadersContextKey ContextKey = "k8s_auth_headers"
+)
+
+// K8sAuthHeaders represents Kubernetes API authentication headers.
+type K8sAuthHeaders struct {
+ // Server is the Kubernetes cluster URL.
+ Server string
+ // ClusterCertificateAuthorityData is the Certificate Authority data.
+ CertificateAuthorityData []byte
+ // AuthorizationToken is the optional bearer token for authentication.
+ AuthorizationToken string
+ // ClientCertificateData is the optional client certificate data.
+ ClientCertificateData []byte
+ // ClientKeyData is the optional client key data.
+ ClientKeyData []byte
+ // InsecureSkipTLSVerify is the optional flag to skip TLS verification.
+ InsecureSkipTLSVerify bool
+}
+
+// GetDecodedData decodes and returns the data.
+func GetDecodedData(data string) ([]byte, error) {
+ return base64.StdEncoding.DecodeString(data)
+}
+
+// NewK8sAuthHeadersFromHeaders creates a new K8sAuthHeaders from the provided headers.
+func NewK8sAuthHeadersFromHeaders(data map[string]any) (*K8sAuthHeaders, error) {
+ var ok bool
+ var err error
+
+ // Initialize auth headers with default values.
+ authHeaders := &K8sAuthHeaders{
+ InsecureSkipTLSVerify: false,
+ }
+
+ // Get cluster URL from headers.
+ authHeaders.Server, ok = data[string(CustomServerHeader)].(string)
+ if !ok || authHeaders.Server == "" {
+ return nil, fmt.Errorf("%s header is required", CustomServerHeader)
+ }
+
+ // Get certificate authority data from headers.
+ certificateAuthorityDataBase64, ok := data[string(CustomCertificateAuthorityDataHeader)].(string)
+ if !ok || certificateAuthorityDataBase64 == "" {
+ return nil, fmt.Errorf("%s header is required", CustomCertificateAuthorityDataHeader)
+ }
+ // Decode certificate authority data.
+ authHeaders.CertificateAuthorityData, err = GetDecodedData(certificateAuthorityDataBase64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid certificate authority data: %w", err)
+ }
+
+ // Get insecure skip TLS verify flag from headers.
+ if data[string(CustomInsecureSkipTLSVerifyHeader)] != nil && strings.ToLower(data[string(CustomInsecureSkipTLSVerifyHeader)].(string)) == "true" {
+ authHeaders.InsecureSkipTLSVerify = true
+ }
+
+ // Get authorization token from headers.
+ authHeaders.AuthorizationToken, _ = data[string(CustomAuthorizationHeader)].(string)
+
+ // Get client certificate data from headers.
+ clientCertificateDataBase64, _ := data[string(CustomClientCertificateDataHeader)].(string)
+ if clientCertificateDataBase64 != "" {
+ authHeaders.ClientCertificateData, err = GetDecodedData(clientCertificateDataBase64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid client certificate data: %w", err)
+ }
+ }
+ // Get client key data from headers.
+ clientKeyDataBase64, _ := data[string(CustomClientKeyDataHeader)].(string)
+ if clientKeyDataBase64 != "" {
+ authHeaders.ClientKeyData, err = GetDecodedData(clientKeyDataBase64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid client key data: %w", err)
+ }
+ }
+
+ // Check if a valid authentication type is provided.
+ if !authHeaders.IsValid() {
+ return nil, fmt.Errorf("either %s header for token authentication or (%s and %s) headers for client certificate authentication required", CustomAuthorizationHeader, CustomClientCertificateDataHeader, CustomClientKeyDataHeader)
+ }
+
+ return authHeaders, nil
+}
+
+// IsValid checks if the authentication headers are valid.
+func (h *K8sAuthHeaders) IsValid() bool {
+ if h.AuthorizationToken != "" {
+ return true
+ }
+ if len(h.ClientCertificateData) > 0 && len(h.ClientKeyData) > 0 {
+ return true
+ }
+ return false
+}
diff --git a/pkg/kubernetes/auth_headers_test.go b/pkg/kubernetes/auth_headers_test.go
new file mode 100644
index 00000000..fb89e96e
--- /dev/null
+++ b/pkg/kubernetes/auth_headers_test.go
@@ -0,0 +1,413 @@
+package kubernetes
+
+import (
+ "encoding/base64"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetDecodedData(t *testing.T) {
+ t.Run("decodes valid base64 string", func(t *testing.T) {
+ input := "SGVsbG8gV29ybGQ=" // "Hello World" in base64
+ expected := []byte("Hello World")
+
+ result, err := GetDecodedData(input)
+ require.NoError(t, err)
+ assert.Equal(t, expected, result)
+ })
+
+ t.Run("decodes empty string", func(t *testing.T) {
+ input := ""
+ expected := []byte{}
+
+ result, err := GetDecodedData(input)
+ require.NoError(t, err)
+ assert.Equal(t, expected, result)
+ })
+
+ t.Run("returns error for invalid base64", func(t *testing.T) {
+ input := "not-valid-base64!!!"
+
+ _, err := GetDecodedData(input)
+ require.Error(t, err)
+ })
+
+ t.Run("decodes base64 with padding", func(t *testing.T) {
+ input := "dGVzdA==" // "test" in base64
+ expected := []byte("test")
+
+ result, err := GetDecodedData(input)
+ require.NoError(t, err)
+ assert.Equal(t, expected, result)
+ })
+}
+
+func TestNewK8sAuthHeadersFromHeaders(t *testing.T) {
+ serverURL := "https://kubernetes.example.com:6443"
+ caCert := []byte("test-ca-cert")
+ caCertBase64 := base64.StdEncoding.EncodeToString(caCert)
+ token := "Bearer test-token"
+ clientCert := []byte("test-client-cert")
+ clientCertBase64 := base64.StdEncoding.EncodeToString(clientCert)
+ clientKey := []byte("test-client-key")
+ clientKeyBase64 := base64.StdEncoding.EncodeToString(clientKey)
+
+ t.Run("creates auth headers with token authentication", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ require.NotNil(t, authHeaders)
+
+ assert.Equal(t, serverURL, authHeaders.Server)
+ assert.Equal(t, caCert, authHeaders.CertificateAuthorityData)
+ assert.Equal(t, token, authHeaders.AuthorizationToken)
+ assert.Nil(t, authHeaders.ClientCertificateData)
+ assert.Nil(t, authHeaders.ClientKeyData)
+ assert.False(t, authHeaders.InsecureSkipTLSVerify)
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("creates auth headers with client certificate authentication", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomClientCertificateDataHeader): clientCertBase64,
+ string(CustomClientKeyDataHeader): clientKeyBase64,
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ require.NotNil(t, authHeaders)
+
+ assert.Equal(t, serverURL, authHeaders.Server)
+ assert.Equal(t, caCert, authHeaders.CertificateAuthorityData)
+ assert.Equal(t, "", authHeaders.AuthorizationToken)
+ assert.Equal(t, clientCert, authHeaders.ClientCertificateData)
+ assert.Equal(t, clientKey, authHeaders.ClientKeyData)
+ assert.False(t, authHeaders.InsecureSkipTLSVerify)
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("creates auth headers with both token and client certificate", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ string(CustomClientCertificateDataHeader): clientCertBase64,
+ string(CustomClientKeyDataHeader): clientKeyBase64,
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ require.NotNil(t, authHeaders)
+
+ // Should have both auth methods
+ assert.Equal(t, token, authHeaders.AuthorizationToken)
+ assert.Equal(t, clientCert, authHeaders.ClientCertificateData)
+ assert.Equal(t, clientKey, authHeaders.ClientKeyData)
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("sets InsecureSkipTLSVerify to true when header is 'true'", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ string(CustomInsecureSkipTLSVerifyHeader): "true",
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.True(t, authHeaders.InsecureSkipTLSVerify)
+ })
+
+ t.Run("sets InsecureSkipTLSVerify to true when header is 'TRUE' (case insensitive)", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ string(CustomInsecureSkipTLSVerifyHeader): "TRUE",
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.True(t, authHeaders.InsecureSkipTLSVerify)
+ })
+
+ t.Run("sets InsecureSkipTLSVerify to false when header is 'false'", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ string(CustomInsecureSkipTLSVerifyHeader): "false",
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.False(t, authHeaders.InsecureSkipTLSVerify)
+ })
+
+ t.Run("sets InsecureSkipTLSVerify to false when header is missing", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.False(t, authHeaders.InsecureSkipTLSVerify)
+ })
+
+ t.Run("returns error when server header is missing", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-server")
+ assert.Contains(t, err.Error(), "required")
+ })
+
+ t.Run("returns error when server header is empty string", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): "",
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-server")
+ })
+
+ t.Run("returns error when server header is not a string", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): 123,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-server")
+ })
+
+ t.Run("returns error when CA data header is missing", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomAuthorizationHeader): token,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-certificate-authority-data")
+ assert.Contains(t, err.Error(), "required")
+ })
+
+ t.Run("returns error when CA data header is empty string", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): "",
+ string(CustomAuthorizationHeader): token,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-certificate-authority-data")
+ })
+
+ t.Run("returns error when CA data is invalid base64", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): "invalid-base64!!!",
+ string(CustomAuthorizationHeader): token,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid certificate authority data")
+ })
+
+ t.Run("returns error when no authentication method is provided", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "authentication")
+ assert.Contains(t, err.Error(), "kubernetes-authorization")
+ assert.Contains(t, err.Error(), "kubernetes-client-certificate-data")
+ assert.Contains(t, err.Error(), "kubernetes-client-key-data")
+ })
+
+ t.Run("returns error when only client certificate is provided without key", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomClientCertificateDataHeader): clientCertBase64,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "authentication")
+ })
+
+ t.Run("returns error when only client key is provided without certificate", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomClientKeyDataHeader): clientKeyBase64,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "authentication")
+ })
+
+ t.Run("returns error when client certificate is invalid base64", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomClientCertificateDataHeader): "invalid-base64!!!",
+ string(CustomClientKeyDataHeader): clientKeyBase64,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid client certificate data")
+ })
+
+ t.Run("returns error when client key is invalid base64", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomClientCertificateDataHeader): clientCertBase64,
+ string(CustomClientKeyDataHeader): "invalid-base64!!!",
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "invalid client key data")
+ })
+
+ t.Run("handles empty token string gracefully", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): "",
+ string(CustomClientCertificateDataHeader): clientCertBase64,
+ string(CustomClientKeyDataHeader): clientKeyBase64,
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ // Empty token is OK if we have client cert
+ assert.Equal(t, "", authHeaders.AuthorizationToken)
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("handles empty client cert/key strings gracefully when token is provided", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): serverURL,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): token,
+ string(CustomClientCertificateDataHeader): "",
+ string(CustomClientKeyDataHeader): "",
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.Nil(t, authHeaders.ClientCertificateData)
+ assert.Nil(t, authHeaders.ClientKeyData)
+ assert.True(t, authHeaders.IsValid())
+ })
+}
+
+func TestK8sAuthHeaders_IsValid(t *testing.T) {
+ t.Run("returns true when token is provided", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ AuthorizationToken: "Bearer test-token",
+ }
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns true when client certificate and key are provided", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ ClientCertificateData: []byte("cert-data"),
+ ClientKeyData: []byte("key-data"),
+ }
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns true when both token and client cert are provided", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ AuthorizationToken: "Bearer test-token",
+ ClientCertificateData: []byte("cert-data"),
+ ClientKeyData: []byte("key-data"),
+ }
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when no authentication is provided", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{}
+ assert.False(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when only client certificate is provided", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ ClientCertificateData: []byte("cert-data"),
+ }
+ assert.False(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when only client key is provided", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ ClientKeyData: []byte("key-data"),
+ }
+ assert.False(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when token is empty string", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ AuthorizationToken: "",
+ }
+ assert.False(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when client cert and key are empty slices", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ ClientCertificateData: []byte{},
+ ClientKeyData: []byte{},
+ }
+ // Empty slices have length 0, so they're considered invalid
+ assert.False(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when client cert is nil and key has data", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ ClientCertificateData: nil,
+ ClientKeyData: []byte("key-data"),
+ }
+ assert.False(t, authHeaders.IsValid())
+ })
+
+ t.Run("returns false when client cert has data and key is nil", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ ClientCertificateData: []byte("cert-data"),
+ ClientKeyData: nil,
+ }
+ assert.False(t, authHeaders.IsValid())
+ })
+}
diff --git a/pkg/kubernetes/kubernetes.go b/pkg/kubernetes/kubernetes.go
index 7de8d6ff..22442cdb 100644
--- a/pkg/kubernetes/kubernetes.go
+++ b/pkg/kubernetes/kubernetes.go
@@ -2,19 +2,30 @@ package kubernetes
import (
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/kubernetes/scheme"
+ _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"github.com/containers/kubernetes-mcp-server/pkg/helm"
"github.com/containers/kubernetes-mcp-server/pkg/kiali"
- "k8s.io/client-go/kubernetes/scheme"
-
- _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
type HeaderKey string
const (
+ // CustomServerHeader is the Kubernetes cluster URL.
+ CustomServerHeader = HeaderKey("kubernetes-server")
+ // CustomCertificateAuthorityData is the base64-encoded CA certificate.
+ CustomCertificateAuthorityDataHeader = HeaderKey("kubernetes-certificate-authority-data")
+ // CustomAuthorizationHeader is the optional bearer token for authentication.
CustomAuthorizationHeader = HeaderKey("kubernetes-authorization")
- OAuthAuthorizationHeader = HeaderKey("Authorization")
+ // CustomClientCertificateData is the base64-encoded client certificate.
+ CustomClientCertificateDataHeader = HeaderKey("kubernetes-client-certificate-data")
+ // CustomClientKeyData is the base64-encoded client key.
+ CustomClientKeyDataHeader = HeaderKey("kubernetes-client-key-data")
+ // CustomInsecureSkipTLSVerify is the optional flag to skip TLS verification.
+ CustomInsecureSkipTLSVerifyHeader = HeaderKey("kubernetes-insecure-skip-tls-verify")
+
+ OAuthAuthorizationHeader = HeaderKey("Authorization")
CustomUserAgent = "kubernetes-mcp-server/bearer-token-auth"
)
@@ -42,5 +53,5 @@ func (k *Kubernetes) NewHelm() *helm.Helm {
// NewKiali returns a Kiali client initialized with the same StaticConfig and bearer token
// as the underlying derived Kubernetes manager.
func (k *Kubernetes) NewKiali() *kiali.Kiali {
- return kiali.NewKiali(k.manager.staticConfig, k.manager.cfg)
+ return kiali.NewKiali(k.manager.staticConfig, k.AccessControlClientset().cfg)
}
diff --git a/pkg/kubernetes/kubernetes_derived_test.go b/pkg/kubernetes/kubernetes_derived_test.go
index 69d4ef33..88a39da3 100644
--- a/pkg/kubernetes/kubernetes_derived_test.go
+++ b/pkg/kubernetes/kubernetes_derived_test.go
@@ -82,10 +82,10 @@ users:
s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
s.Run("RestConfig is correctly copied and sensitive fields are omitted", func() {
- derivedCfg := derived.manager.cfg
+ derivedCfg := derived.manager.accessControlClientSet.cfg
s.Require().NotNil(derivedCfg, "derived config is nil")
- originalCfg := testManager.cfg
+ originalCfg := testManager.accessControlClientSet.cfg
s.Equalf(originalCfg.Host, derivedCfg.Host, "expected Host %s, got %s", originalCfg.Host, derivedCfg.Host)
s.Equalf(originalCfg.APIPath, derivedCfg.APIPath, "expected APIPath %s, got %s", originalCfg.APIPath, derivedCfg.APIPath)
s.Equalf(originalCfg.QPS, derivedCfg.QPS, "expected QPS %f, got %f", originalCfg.QPS, derivedCfg.QPS)
@@ -121,12 +121,11 @@ users:
})
s.Run("derived manager has initialized clients", func() {
// Verify that the derived manager has proper clients initialized
- s.NotNilf(derived.manager.accessControlClientSet, "expected accessControlClientSet to be initialized")
- s.Equalf(testStaticConfig, derived.manager.accessControlClientSet.staticConfig, "staticConfig not properly wired to derived manager")
- s.NotNilf(derived.manager.discoveryClient, "expected discoveryClient to be initialized")
- s.NotNilf(derived.manager.accessControlRESTMapper, "expected accessControlRESTMapper to be initialized")
- s.Equalf(testStaticConfig, derived.manager.accessControlRESTMapper.staticConfig, "staticConfig not properly wired to derived manager")
- s.NotNilf(derived.manager.dynamicClient, "expected dynamicClient to be initialized")
+ s.NotNilf(derived.AccessControlClientset(), "expected accessControlClientSet to be initialized")
+ s.Equalf(testStaticConfig, derived.manager.staticConfig, "staticConfig not properly wired to derived manager")
+ s.NotNilf(derived.AccessControlClientset().DiscoveryClient(), "expected discoveryClient to be initialized")
+ s.NotNilf(derived.AccessControlClientset().RESTMapper(), "expected accessControlRESTMapper to be initialized")
+ s.NotNilf(derived.AccessControlClientset().DynamicClient(), "expected dynamicClient to be initialized")
})
})
})
@@ -172,7 +171,7 @@ users:
s.NotEqual(derived.manager, testManager, "expected new derived manager, got original manager")
s.Equal(derived.manager.staticConfig, testStaticConfig, "staticConfig not properly wired to derived manager")
- derivedCfg := derived.manager.cfg
+ derivedCfg := derived.manager.accessControlClientSet.cfg
s.Require().NotNil(derivedCfg, "derived config is nil")
s.Equalf("aiTana-julIA", derivedCfg.BearerToken, "expected BearerToken %s, got %s", "aiTana-julIA", derivedCfg.BearerToken)
diff --git a/pkg/kubernetes/manager.go b/pkg/kubernetes/manager.go
index d09b8790..b92bc55b 100644
--- a/pkg/kubernetes/manager.go
+++ b/pkg/kubernetes/manager.go
@@ -13,22 +13,15 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/discovery"
- "k8s.io/client-go/discovery/cached/memory"
- "k8s.io/client-go/dynamic"
"k8s.io/client-go/rest"
- "k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/klog/v2"
)
type Manager struct {
- cfg *rest.Config
- clientCmdConfig clientcmd.ClientConfig
- discoveryClient discovery.CachedDiscoveryInterface
- accessControlClientSet *AccessControlClientset
- accessControlRESTMapper *AccessControlRESTMapper
- dynamicClient *dynamic.DynamicClient
+ clientCmdConfig clientcmd.ClientConfig
+ accessControlClientSet *AccessControlClientset
staticConfig *config.StaticConfig
CloseWatchKubeConfig CloseWatchKubeConfig
@@ -98,30 +91,54 @@ func NewInClusterManager(config *config.StaticConfig) (*Manager, error) {
return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil))
}
+func NewAuthHeadersClusterManager(authHeaders *K8sAuthHeaders, config *config.StaticConfig) (*Manager, error) {
+
+ var certData []byte = nil
+ if len(authHeaders.ClientCertificateData) > 0 {
+ certData = authHeaders.ClientCertificateData
+ }
+
+ var keyData []byte = nil
+ if len(authHeaders.ClientKeyData) > 0 {
+ keyData = authHeaders.ClientKeyData
+ }
+
+ restConfig := &rest.Config{
+ Host: authHeaders.Server,
+ BearerToken: authHeaders.AuthorizationToken,
+ TLSClientConfig: rest.TLSClientConfig{
+ Insecure: authHeaders.InsecureSkipTLSVerify,
+ CAData: authHeaders.CertificateAuthorityData,
+ CertData: certData,
+ KeyData: keyData,
+ },
+ }
+ // Create a dummy kubeconfig clientcmdapi.Config to be used in places where clientcmd.ClientConfig is required.
+ clientCmdConfig := clientcmdapi.NewConfig()
+ clientCmdConfig.Clusters["cluster"] = &clientcmdapi.Cluster{
+ Server: authHeaders.Server,
+ InsecureSkipTLSVerify: authHeaders.InsecureSkipTLSVerify,
+ }
+ clientCmdConfig.AuthInfos["user"] = &clientcmdapi.AuthInfo{
+ Token: authHeaders.AuthorizationToken,
+ ClientCertificateData: certData,
+ ClientKeyData: keyData,
+ }
+
+ return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil))
+}
+
func newManager(config *config.StaticConfig, restConfig *rest.Config, clientCmdConfig clientcmd.ClientConfig) (*Manager, error) {
k8s := &Manager{
staticConfig: config,
- cfg: restConfig,
clientCmdConfig: clientCmdConfig,
}
- if k8s.cfg.UserAgent == "" {
- k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent()
- }
var err error
// TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO())
//k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper {
// return &impersonateRoundTripper{original}
//})
- k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.cfg, k8s.staticConfig)
- if err != nil {
- return nil, err
- }
- k8s.discoveryClient = memory.NewMemCacheClient(k8s.accessControlClientSet.DiscoveryClient())
- k8s.accessControlRESTMapper = NewAccessControlRESTMapper(
- restmapper.NewDeferredDiscoveryRESTMapper(k8s.discoveryClient),
- k8s.staticConfig,
- )
- k8s.dynamicClient, err = dynamic.NewForConfig(k8s.cfg)
+ k8s.accessControlClientSet, err = NewAccessControlClientset(k8s.staticConfig, restConfig)
if err != nil {
return nil, err
}
@@ -185,16 +202,16 @@ func (m *Manager) NamespaceOrDefault(namespace string) string {
}
func (m *Manager) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
- return m.discoveryClient, nil
+ return m.accessControlClientSet.DiscoveryClient(), nil
}
func (m *Manager) ToRESTMapper() (meta.RESTMapper, error) {
- return m.accessControlRESTMapper, nil
+ return m.accessControlClientSet.RESTMapper(), nil
}
// ToRESTConfig returns the rest.Config object (genericclioptions.RESTClientGetter)
func (m *Manager) ToRESTConfig() (*rest.Config, error) {
- return m.cfg, nil
+ return m.accessControlClientSet.cfg, nil
}
// ToRawKubeConfigLoader returns the clientcmd.ClientConfig object (genericclioptions.RESTClientGetter)
@@ -203,10 +220,7 @@ func (m *Manager) ToRawKubeConfigLoader() clientcmd.ClientConfig {
}
func (m *Manager) VerifyToken(ctx context.Context, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
- tokenReviewClient, err := m.accessControlClientSet.TokenReview()
- if err != nil {
- return nil, nil, err
- }
+ tokenReviewClient := m.accessControlClientSet.AuthenticationV1().TokenReviews()
tokenReview := &authenticationv1api.TokenReview{
TypeMeta: metav1.TypeMeta{
APIVersion: "authentication.k8s.io/v1",
@@ -243,21 +257,22 @@ func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
}
klog.V(5).Infof("%s header found (Bearer), using provided bearer token", OAuthAuthorizationHeader)
derivedCfg := &rest.Config{
- Host: m.cfg.Host,
- APIPath: m.cfg.APIPath,
+ Host: m.accessControlClientSet.cfg.Host,
+ APIPath: m.accessControlClientSet.cfg.APIPath,
+ WrapTransport: m.accessControlClientSet.cfg.WrapTransport,
// Copy only server verification TLS settings (CA bundle and server name)
TLSClientConfig: rest.TLSClientConfig{
- Insecure: m.cfg.Insecure,
- ServerName: m.cfg.ServerName,
- CAFile: m.cfg.CAFile,
- CAData: m.cfg.CAData,
+ Insecure: m.accessControlClientSet.cfg.Insecure,
+ ServerName: m.accessControlClientSet.cfg.ServerName,
+ CAFile: m.accessControlClientSet.cfg.CAFile,
+ CAData: m.accessControlClientSet.cfg.CAData,
},
BearerToken: strings.TrimPrefix(authorization, "Bearer "),
// pass custom UserAgent to identify the client
UserAgent: CustomUserAgent,
- QPS: m.cfg.QPS,
- Burst: m.cfg.Burst,
- Timeout: m.cfg.Timeout,
+ QPS: m.accessControlClientSet.cfg.QPS,
+ Burst: m.accessControlClientSet.cfg.Burst,
+ Timeout: m.accessControlClientSet.cfg.Timeout,
Impersonate: rest.ImpersonationConfig{},
}
clientCmdApiConfig, err := m.clientCmdConfig.RawConfig()
@@ -272,11 +287,10 @@ func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
derived := &Kubernetes{
manager: &Manager{
clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil),
- cfg: derivedCfg,
staticConfig: m.staticConfig,
},
}
- derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.cfg, derived.manager.staticConfig)
+ derived.manager.accessControlClientSet, err = NewAccessControlClientset(derived.manager.staticConfig, derivedCfg)
if err != nil {
if m.staticConfig.RequireOAuth {
klog.Errorf("failed to get kubeconfig: %v", err)
@@ -284,18 +298,5 @@ func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) {
}
return &Kubernetes{manager: m}, nil
}
- derived.manager.discoveryClient = memory.NewMemCacheClient(derived.manager.accessControlClientSet.DiscoveryClient())
- derived.manager.accessControlRESTMapper = NewAccessControlRESTMapper(
- restmapper.NewDeferredDiscoveryRESTMapper(derived.manager.discoveryClient),
- derived.manager.staticConfig,
- )
- derived.manager.dynamicClient, err = dynamic.NewForConfig(derived.manager.cfg)
- if err != nil {
- if m.staticConfig.RequireOAuth {
- klog.Errorf("failed to initialize dynamic client: %v", err)
- return nil, errors.New("failed to initialize dynamic client")
- }
- return &Kubernetes{manager: m}, nil
- }
return derived, nil
}
diff --git a/pkg/kubernetes/manager_test.go b/pkg/kubernetes/manager_test.go
index 63241fa9..d351e79a 100644
--- a/pkg/kubernetes/manager_test.go
+++ b/pkg/kubernetes/manager_test.go
@@ -49,7 +49,7 @@ func (s *ManagerTestSuite) TestNewInClusterManager() {
s.Equal("in-cluster", rawConfig.CurrentContext, "expected current context to be 'in-cluster'")
})
s.Run("sets default user-agent", func() {
- s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
+ s.Contains(manager.accessControlClientSet.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
})
})
s.Run("with explicit kubeconfig", func() {
@@ -98,10 +98,10 @@ func (s *ManagerTestSuite) TestNewKubeconfigManager() {
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match")
})
s.Run("sets default user-agent", func() {
- s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
+ s.Contains(manager.accessControlClientSet.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")")
})
s.Run("rest config host points to mock server", func() {
- s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
+ s.Equal(s.mockServer.Config().Host, manager.accessControlClientSet.cfg.Host, "expected rest config host to match mock server")
})
})
s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() {
@@ -124,7 +124,7 @@ func (s *ManagerTestSuite) TestNewKubeconfigManager() {
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit")
})
s.Run("rest config host points to mock server", func() {
- s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
+ s.Equal(s.mockServer.Config().Host, manager.accessControlClientSet.cfg.Host, "expected rest config host to match mock server")
})
})
s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() {
@@ -149,7 +149,7 @@ func (s *ManagerTestSuite) TestNewKubeconfigManager() {
s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match")
})
s.Run("rest config host points to mock server", func() {
- s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server")
+ s.Equal(s.mockServer.Config().Host, manager.accessControlClientSet.cfg.Host, "expected rest config host to match mock server")
})
})
s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() {
@@ -197,6 +197,166 @@ func (s *ManagerTestSuite) TestNewKubeconfigManager() {
})
}
+func (s *ManagerTestSuite) TestNewAuthHeadersClusterManager() {
+ serverURL := s.mockServer.Config().Host
+ token := "test-token"
+
+ s.Run("creates manager with token authentication", func() {
+ authHeaders := &K8sAuthHeaders{
+ Server: serverURL,
+ CertificateAuthorityData: nil, // Use insecure for testing
+ AuthorizationToken: token,
+ InsecureSkipTLSVerify: true,
+ }
+
+ cfg := &config.StaticConfig{}
+ manager, err := NewAuthHeadersClusterManager(authHeaders, cfg)
+
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+
+ s.Run("rest config is properly configured", func() {
+ restConfig, err := manager.ToRESTConfig()
+ s.Require().NoError(err)
+ s.Equal(serverURL, restConfig.Host)
+ s.Equal(token, restConfig.BearerToken)
+ s.Nil(restConfig.CAData)
+ s.Nil(restConfig.CertData)
+ s.Nil(restConfig.KeyData)
+ s.True(restConfig.Insecure)
+ })
+
+ s.Run("client cmd config is properly configured", func() {
+ rawConfig, err := manager.ToRawKubeConfigLoader().RawConfig()
+ s.Require().NoError(err)
+ s.NotNil(rawConfig.Clusters["cluster"])
+ s.Equal(serverURL, rawConfig.Clusters["cluster"].Server)
+ s.True(rawConfig.Clusters["cluster"].InsecureSkipTLSVerify)
+ s.NotNil(rawConfig.AuthInfos["user"])
+ s.Equal(token, rawConfig.AuthInfos["user"].Token)
+ s.Nil(rawConfig.AuthInfos["user"].ClientCertificateData)
+ s.Nil(rawConfig.AuthInfos["user"].ClientKeyData)
+ })
+
+ s.Run("manager can create discovery client", func() {
+ discoveryClient, err := manager.ToDiscoveryClient()
+ s.Require().NoError(err)
+ s.NotNil(discoveryClient)
+ })
+
+ s.Run("manager can create REST mapper", func() {
+ restMapper, err := manager.ToRESTMapper()
+ s.Require().NoError(err)
+ s.NotNil(restMapper)
+ })
+ })
+
+ // Note: Client certificate tests are omitted because they require valid PEM-encoded certificates
+ // to pass Kubernetes client initialization. The logic for setting cert data is covered by
+ // the tests for empty/nil certificate handling below.
+
+ s.Run("creates manager with InsecureSkipTLSVerify enabled and no CA", func() {
+ authHeaders := &K8sAuthHeaders{
+ Server: serverURL,
+ CertificateAuthorityData: nil, // No CA data when using insecure
+ AuthorizationToken: token,
+ InsecureSkipTLSVerify: true,
+ }
+
+ cfg := &config.StaticConfig{}
+ manager, err := NewAuthHeadersClusterManager(authHeaders, cfg)
+
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+
+ s.Run("rest config has insecure flag enabled", func() {
+ restConfig, err := manager.ToRESTConfig()
+ s.Require().NoError(err)
+ s.True(restConfig.Insecure)
+ s.Nil(restConfig.CAData)
+ })
+
+ s.Run("client cmd config has insecure flag enabled", func() {
+ rawConfig, err := manager.ToRawKubeConfigLoader().RawConfig()
+ s.Require().NoError(err)
+ s.True(rawConfig.Clusters["cluster"].InsecureSkipTLSVerify)
+ })
+ })
+
+ s.Run("creates manager with empty client certificate slices", func() {
+ authHeaders := &K8sAuthHeaders{
+ Server: serverURL,
+ CertificateAuthorityData: nil, // Use insecure for testing
+ AuthorizationToken: token,
+ ClientCertificateData: []byte{},
+ ClientKeyData: []byte{},
+ InsecureSkipTLSVerify: true,
+ }
+
+ cfg := &config.StaticConfig{}
+ manager, err := NewAuthHeadersClusterManager(authHeaders, cfg)
+
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+
+ s.Run("rest config has nil cert data for empty slices", func() {
+ restConfig, err := manager.ToRESTConfig()
+ s.Require().NoError(err)
+ s.Nil(restConfig.CertData)
+ s.Nil(restConfig.KeyData)
+ })
+ })
+
+ s.Run("creates manager with nil client certificate data", func() {
+ authHeaders := &K8sAuthHeaders{
+ Server: serverURL,
+ CertificateAuthorityData: nil, // Use insecure for testing
+ AuthorizationToken: token,
+ ClientCertificateData: nil,
+ ClientKeyData: nil,
+ InsecureSkipTLSVerify: true,
+ }
+
+ cfg := &config.StaticConfig{}
+ manager, err := NewAuthHeadersClusterManager(authHeaders, cfg)
+
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+
+ s.Run("rest config has nil cert data", func() {
+ restConfig, err := manager.ToRESTConfig()
+ s.Require().NoError(err)
+ s.Nil(restConfig.CertData)
+ s.Nil(restConfig.KeyData)
+ })
+ })
+
+ s.Run("creates manager with custom static config", func() {
+ authHeaders := &K8sAuthHeaders{
+ Server: serverURL,
+ CertificateAuthorityData: nil, // Use insecure for testing
+ AuthorizationToken: token,
+ InsecureSkipTLSVerify: true,
+ }
+
+ cfg := &config.StaticConfig{
+ DeniedResources: []config.GroupVersionKind{
+ {Group: "", Version: "v1", Kind: "Secret"},
+ },
+ }
+ manager, err := NewAuthHeadersClusterManager(authHeaders, cfg)
+
+ s.Require().NoError(err)
+ s.Require().NotNil(manager)
+
+ s.Run("manager is created successfully with denied resources", func() {
+ // We can't directly access staticConfig, but we can verify the manager was created
+ // The access control will be tested when actually using the manager
+ s.NotNil(manager)
+ })
+ })
+}
+
func TestManager(t *testing.T) {
suite.Run(t, new(ManagerTestSuite))
}
diff --git a/pkg/kubernetes/nodes.go b/pkg/kubernetes/nodes.go
index a4321a9f..152f84cf 100644
--- a/pkg/kubernetes/nodes.go
+++ b/pkg/kubernetes/nodes.go
@@ -18,11 +18,13 @@ func (k *Kubernetes) NodesLog(ctx context.Context, name string, query string, ta
// - /var/log/kube-proxy.log - kube-proxy logs
// - /var/log/containers/ - container logs
- req, err := k.AccessControlClientset().NodesLogs(ctx, name)
- if err != nil {
- return "", err
+ if _, err := k.AccessControlClientset().CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil {
+ return "", fmt.Errorf("failed to get node %s: %w", name, err)
}
+ req := k.AccessControlClientset().CoreV1().RESTClient().
+ Get().
+ AbsPath("api", "v1", "nodes", name, "proxy", "logs")
req.Param("query", query)
// Query parameters for tail
if tailLines > 0 {
@@ -47,12 +49,14 @@ func (k *Kubernetes) NodesStatsSummary(ctx context.Context, name string) (string
// https://kubernetes.io/docs/reference/instrumentation/understand-psi-metrics/
// This endpoint provides CPU, memory, filesystem, and network statistics
- req, err := k.AccessControlClientset().NodesStatsSummary(ctx, name)
- if err != nil {
- return "", err
+ if _, err := k.AccessControlClientset().CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil {
+ return "", fmt.Errorf("failed to get node %s: %w", name, err)
}
- result := req.Do(ctx)
+ result := k.AccessControlClientset().CoreV1().RESTClient().
+ Get().
+ AbsPath("api", "v1", "nodes", name, "proxy", "stats", "summary").
+ Do(ctx)
if result.Error() != nil {
return "", fmt.Errorf("failed to get node stats summary: %w", result.Error())
}
@@ -75,5 +79,20 @@ func (k *Kubernetes) NodesTop(ctx context.Context, options NodesTopOptions) (*me
if !k.supportsGroupVersion(metrics.GroupName + "/" + metricsv1beta1api.SchemeGroupVersion.Version) {
return nil, errors.New("metrics API is not available")
}
- return k.manager.accessControlClientSet.NodesMetricses(ctx, options.Name, options.ListOptions)
+ versionedMetrics := &metricsv1beta1api.NodeMetricsList{}
+ var err error
+ if options.Name != "" {
+ m, err := k.AccessControlClientset().MetricsV1beta1Client().NodeMetricses().Get(ctx, options.Name, metav1.GetOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metrics for node %s: %w", options.Name, err)
+ }
+ versionedMetrics.Items = []metricsv1beta1api.NodeMetrics{*m}
+ } else {
+ versionedMetrics, err = k.AccessControlClientset().MetricsV1beta1Client().NodeMetricses().List(ctx, options.ListOptions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list node metrics: %w", err)
+ }
+ }
+ convertedMetrics := &metrics.NodeMetricsList{}
+ return convertedMetrics, metricsv1beta1api.Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(versionedMetrics, convertedMetrics, nil)
}
diff --git a/pkg/kubernetes/openshift.go b/pkg/kubernetes/openshift.go
index 7cb3e273..cc6558cc 100644
--- a/pkg/kubernetes/openshift.go
+++ b/pkg/kubernetes/openshift.go
@@ -10,9 +10,13 @@ type Openshift interface {
IsOpenShift(context.Context) bool
}
-func (m *Manager) IsOpenShift(_ context.Context) bool {
+func (m *Manager) IsOpenShift(ctx context.Context) bool {
// This method should be fast and not block (it's called at startup)
- _, err := m.discoveryClient.ServerResourcesForGroupVersion(schema.GroupVersion{
+ k, err := m.Derived(ctx)
+ if err != nil {
+ return false
+ }
+ _, err = k.AccessControlClientset().DiscoveryClient().ServerResourcesForGroupVersion(schema.GroupVersion{
Group: "project.openshift.io",
Version: "v1",
}.String())
diff --git a/pkg/kubernetes/pods.go b/pkg/kubernetes/pods.go
index 4d333ea8..f36f1bee 100644
--- a/pkg/kubernetes/pods.go
+++ b/pkg/kubernetes/pods.go
@@ -12,6 +12,7 @@ import (
labelutil "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/httpstream"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/client-go/tools/remotecommand"
@@ -22,7 +23,7 @@ import (
"github.com/containers/kubernetes-mcp-server/pkg/version"
)
-// Default number of lines to retrieve from the end of the logs
+// DefaultTailLines is the default number of lines to retrieve from the end of the logs
const DefaultTailLines = int64(100)
type PodsTopOptions struct {
@@ -65,10 +66,7 @@ func (k *Kubernetes) PodsDelete(ctx context.Context, namespace, name string) (st
// Delete managed service
if isManaged {
- services, err := k.manager.accessControlClientSet.Services(namespace)
- if err != nil {
- return "", err
- }
+ services := k.AccessControlClientset().CoreV1().Services(namespace)
if sl, _ := services.List(ctx, metav1.ListOptions{
LabelSelector: managedLabelSelector.String(),
}); sl != nil {
@@ -80,7 +78,7 @@ func (k *Kubernetes) PodsDelete(ctx context.Context, namespace, name string) (st
// Delete managed Route
if isManaged && k.supportsGroupVersion("route.openshift.io/v1") {
- routeResources := k.manager.dynamicClient.
+ routeResources := k.AccessControlClientset().DynamicClient().
Resource(schema.GroupVersionResource{Group: "route.openshift.io", Version: "v1", Resource: "routes"}).
Namespace(namespace)
if rl, _ := routeResources.List(ctx, metav1.ListOptions{
@@ -97,10 +95,7 @@ func (k *Kubernetes) PodsDelete(ctx context.Context, namespace, name string) (st
}
func (k *Kubernetes) PodsLog(ctx context.Context, namespace, name, container string, previous bool, tail int64) (string, error) {
- pods, err := k.manager.accessControlClientSet.Pods(k.NamespaceOrDefault(namespace))
- if err != nil {
- return "", err
- }
+ pods := k.AccessControlClientset().CoreV1().Pods(k.NamespaceOrDefault(namespace))
logOptions := &v1.PodLogOptions{
Container: container,
@@ -218,15 +213,27 @@ func (k *Kubernetes) PodsTop(ctx context.Context, options PodsTopOptions) (*metr
} else {
namespace = k.NamespaceOrDefault(namespace)
}
- return k.manager.accessControlClientSet.PodsMetricses(ctx, namespace, options.Name, options.ListOptions)
+ var err error
+ versionedMetrics := &metricsv1beta1api.PodMetricsList{}
+ if options.Name != "" {
+ m, err := k.AccessControlClientset().MetricsV1beta1Client().PodMetricses(namespace).Get(ctx, options.Name, metav1.GetOptions{})
+ if err != nil {
+ return nil, fmt.Errorf("failed to get metrics for pod %s/%s: %w", namespace, options.Name, err)
+ }
+ versionedMetrics.Items = []metricsv1beta1api.PodMetrics{*m}
+ } else {
+ versionedMetrics, err = k.AccessControlClientset().MetricsV1beta1Client().PodMetricses(namespace).List(ctx, options.ListOptions)
+ if err != nil {
+ return nil, fmt.Errorf("failed to list pod metrics in namespace %s: %w", namespace, err)
+ }
+ }
+ convertedMetrics := &metrics.PodMetricsList{}
+ return convertedMetrics, metricsv1beta1api.Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(versionedMetrics, convertedMetrics, nil)
}
func (k *Kubernetes) PodsExec(ctx context.Context, namespace, name, container string, command []string) (string, error) {
namespace = k.NamespaceOrDefault(namespace)
- pods, err := k.manager.accessControlClientSet.Pods(namespace)
- if err != nil {
- return "", err
- }
+ pods := k.AccessControlClientset().CoreV1().Pods(namespace)
pod, err := pods.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return "", err
@@ -244,7 +251,26 @@ func (k *Kubernetes) PodsExec(ctx context.Context, namespace, name, container st
Stdout: true,
Stderr: true,
}
- executor, err := k.manager.accessControlClientSet.PodsExec(namespace, name, podExecOptions)
+ // Compute URL
+ // https://github.com/kubernetes/kubectl/blob/5366de04e168bcbc11f5e340d131a9ca8b7d0df4/pkg/cmd/exec/exec.go#L382-L397
+ execRequest := k.AccessControlClientset().CoreV1().RESTClient().
+ Post().
+ Resource("pods").
+ Namespace(namespace).
+ Name(name).
+ SubResource("exec")
+ execRequest.VersionedParams(podExecOptions, ParameterCodec)
+ spdyExec, err := remotecommand.NewSPDYExecutor(k.AccessControlClientset().cfg, "POST", execRequest.URL())
+ if err != nil {
+ return "", err
+ }
+ webSocketExec, err := remotecommand.NewWebSocketExecutor(k.AccessControlClientset().cfg, "GET", execRequest.URL().String())
+ if err != nil {
+ return "", err
+ }
+ executor, err := remotecommand.NewFallbackExecutor(webSocketExec, spdyExec, func(err error) bool {
+ return httpstream.IsUpgradeFailure(err) || httpstream.IsHTTPSProxyError(err)
+ })
if err != nil {
return "", err
}
diff --git a/pkg/kubernetes/provider_auth_headers.go b/pkg/kubernetes/provider_auth_headers.go
new file mode 100644
index 00000000..1bc183db
--- /dev/null
+++ b/pkg/kubernetes/provider_auth_headers.go
@@ -0,0 +1,78 @@
+package kubernetes
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ authenticationv1api "k8s.io/api/authentication/v1"
+ "k8s.io/klog/v2"
+)
+
+// AuthHeadersClusterProvider implements Provider for authentication via request headers.
+// This provider requires users to provide authentication tokens via request headers.
+// It uses cluster connection details from configuration but does not use any
+// authentication credentials from kubeconfig files.
+type AuthHeadersClusterProvider struct {
+ staticConfig *config.StaticConfig
+}
+
+var _ Provider = &AuthHeadersClusterProvider{}
+
+func init() {
+ RegisterProvider(config.ClusterProviderAuthHeaders, newAuthHeadersClusterProvider)
+}
+
+// newAuthHeadersClusterProvider creates a provider that requires header-based authentication.
+// Users must provide tokens via request headers (server URL, Token or client certificate and key).
+func newAuthHeadersClusterProvider(cfg *config.StaticConfig) (Provider, error) {
+ klog.V(1).Infof("Auth-headers provider initialized - all requests must include valid k8s auth headers")
+
+ return &AuthHeadersClusterProvider{staticConfig: cfg}, nil
+}
+
+func (p *AuthHeadersClusterProvider) IsOpenShift(ctx context.Context) bool {
+ klog.V(1).Infof("IsOpenShift not supported for auth-headers provider. Returning false.")
+ return false
+}
+
+func (p *AuthHeadersClusterProvider) VerifyToken(ctx context.Context, target, token, audience string) (*authenticationv1api.UserInfo, []string, error) {
+ return nil, nil, fmt.Errorf("VerifyToken not supported for auth-headers provider")
+}
+
+func (p *AuthHeadersClusterProvider) GetTargets(_ context.Context) ([]string, error) {
+ klog.V(1).Infof("GetTargets not supported for auth-headers provider. Returning empty list.")
+ return []string{""}, nil
+}
+
+func (p *AuthHeadersClusterProvider) GetTargetParameterName() string {
+ klog.V(1).Infof("GetTargetParameterName not supported for auth-headers provider. Returning empty name.")
+ return ""
+}
+
+func (p *AuthHeadersClusterProvider) GetDerivedKubernetes(ctx context.Context, target string) (*Kubernetes, error) {
+ authHeaders, ok := ctx.Value(AuthHeadersContextKey).(*K8sAuthHeaders)
+ if !ok {
+ return nil, errors.New("authHeaders required")
+ }
+
+ manager, err := NewAuthHeadersClusterManager(authHeaders, p.staticConfig)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create auth headers cluster manager: %w", err)
+ }
+
+ return &Kubernetes{manager: manager}, nil
+}
+
+func (p *AuthHeadersClusterProvider) GetDefaultTarget() string {
+ klog.V(1).Infof("GetDefaultTarget not supported for auth-headers provider. Returning empty name.")
+ return ""
+}
+
+func (p *AuthHeadersClusterProvider) WatchTargets(watch func() error) {
+ klog.V(1).Infof("WatchTargets not supported for auth-headers provider. Ignoring watch function.")
+}
+
+func (p *AuthHeadersClusterProvider) Close() {
+}
diff --git a/pkg/kubernetes/provider_auth_headers_test.go b/pkg/kubernetes/provider_auth_headers_test.go
new file mode 100644
index 00000000..6f637b45
--- /dev/null
+++ b/pkg/kubernetes/provider_auth_headers_test.go
@@ -0,0 +1,265 @@
+package kubernetes
+
+import (
+ "context"
+ "encoding/base64"
+ "testing"
+
+ "github.com/containers/kubernetes-mcp-server/internal/test"
+ "github.com/containers/kubernetes-mcp-server/pkg/config"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAuthHeadersProviderFactory(t *testing.T) {
+ t.Run("auth-headers provider initializes without kubeconfig", func(t *testing.T) {
+ cfg := &config.StaticConfig{
+ ClusterProviderStrategy: config.ClusterProviderAuthHeaders,
+ }
+
+ provider, err := newAuthHeadersClusterProvider(cfg)
+ require.NoError(t, err)
+ require.NotNil(t, provider)
+ assert.IsType(t, &AuthHeadersClusterProvider{}, provider)
+ })
+
+ t.Run("auth-headers provider initializes with minimal config", func(t *testing.T) {
+ cfg := &config.StaticConfig{
+ ClusterProviderStrategy: config.ClusterProviderAuthHeaders,
+ }
+
+ provider, err := newAuthHeadersClusterProvider(cfg)
+ require.NoError(t, err)
+ require.NotNil(t, provider)
+ })
+}
+
+func TestAuthHeadersProviderInterface(t *testing.T) {
+ cfg := &config.StaticConfig{
+ ClusterProviderStrategy: config.ClusterProviderAuthHeaders,
+ }
+
+ provider, err := newAuthHeadersClusterProvider(cfg)
+ require.NoError(t, err)
+
+ t.Run("GetTargets returns single empty target", func(t *testing.T) {
+ targets, err := provider.GetTargets(context.Background())
+ require.NoError(t, err)
+ assert.Equal(t, []string{""}, targets)
+ })
+
+ t.Run("GetTargetParameterName returns empty string", func(t *testing.T) {
+ assert.Equal(t, "", provider.GetTargetParameterName())
+ })
+
+ t.Run("GetDefaultTarget returns empty string", func(t *testing.T) {
+ assert.Equal(t, "", provider.GetDefaultTarget())
+ })
+
+ t.Run("IsOpenShift returns false", func(t *testing.T) {
+ assert.False(t, provider.IsOpenShift(context.Background()))
+ })
+
+ t.Run("VerifyToken not supported", func(t *testing.T) {
+ _, _, err := provider.VerifyToken(context.Background(), "", "token", "audience")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "not supported")
+ })
+
+ t.Run("WatchTargets does nothing", func(t *testing.T) {
+ called := false
+ provider.WatchTargets(func() error {
+ called = true
+ return nil
+ })
+ // WatchTargets should not call the function
+ assert.False(t, called)
+ })
+
+ t.Run("Close does nothing", func(t *testing.T) {
+ // Should not panic
+ provider.Close()
+ })
+}
+
+func TestAuthHeadersProviderGetDerivedKubernetes(t *testing.T) {
+ mockServer := test.NewMockServer()
+ defer mockServer.Close()
+
+ cfg := &config.StaticConfig{
+ ClusterProviderStrategy: config.ClusterProviderAuthHeaders,
+ }
+
+ provider, err := newAuthHeadersClusterProvider(cfg)
+ require.NoError(t, err)
+
+ // Generate test CA certificate data in valid PEM format
+ caCert := []byte(`-----BEGIN CERTIFICATE-----
+MIIBkTCB+wIJAKHHCgVZU8BiMA0GCSqGSIb3DQEBBQUAMA0xCzAJBgNVBAYTAlVT
+MB4XDTA5MDUxOTE1MTc1N1oXDTEwMDUxOTE1MTc1N1owDTELMAkGA1UEBhMCVVMw
+gZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANLJhPHhITqQbPklG3ibCVxwGMRf
+p/v4XqhfdQHdcVfHap6NQ5Wok/9X5gK7d1ONlGjn/Ut9Pz4xwqGy3nLxVz1CsE2k
+TqQxdqEQBVNvFrAB4OlD9K9wQ3R+0S1wPPQ9yg9i6vF2JlOvD1HFJzIGcz1kLZU2
+wj5FqYY5SHmXF2YbAgMBAAEwDQYJKoZIhvcNAQEFBQADgYEAc9NQIv8J/cqV0zBX
+c6d5Wm1NJdTxYwG/+xHDaLDK8R3W5Y1e7YwNg7nN8K2GqMh3YYxmDJCLDhGdKDEV
+V5qHcKhFCFPxTmKgzVjy8vhR7VqZU4dJhC8sDbE/IkKH7hBo7CLHH/T2Ly9LcDY0
+9C2zNtDN3KEzGW3V7/J7IvVBDy0=
+-----END CERTIFICATE-----`)
+ caCertBase64 := base64.StdEncoding.EncodeToString(caCert)
+
+ t.Run("GetDerivedKubernetes requires auth headers in context", func(t *testing.T) {
+ ctx := context.Background()
+ _, err := provider.GetDerivedKubernetes(ctx, "")
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "authHeaders required")
+ })
+
+ t.Run("GetDerivedKubernetes works with token authentication", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ Server: mockServer.Config().Host,
+ CertificateAuthorityData: nil,
+ AuthorizationToken: "test-token",
+ InsecureSkipTLSVerify: true,
+ }
+
+ ctx := context.WithValue(context.Background(), AuthHeadersContextKey, authHeaders)
+ k, err := provider.GetDerivedKubernetes(ctx, "")
+ require.NoError(t, err)
+ require.NotNil(t, k)
+ assert.NotNil(t, k.manager)
+ })
+
+ t.Run("GetDerivedKubernetes accepts client certificate authentication", func(t *testing.T) {
+ // Note: We use dummy cert/key data since we can't easily create valid certificates for testing.
+ // The actual validation happens when connecting to the cluster, not during manager creation.
+ clientCert := []byte("dummy-cert")
+ clientKey := []byte("dummy-key")
+
+ authHeaders := &K8sAuthHeaders{
+ Server: mockServer.Config().Host,
+ CertificateAuthorityData: nil,
+ ClientCertificateData: clientCert,
+ ClientKeyData: clientKey,
+ InsecureSkipTLSVerify: true,
+ AuthorizationToken: "", // No token when using client cert
+ }
+
+ // This should fail because the certificates are invalid, but we're testing that the provider
+ // accepts the auth headers and attempts to create the manager
+ ctx := context.WithValue(context.Background(), AuthHeadersContextKey, authHeaders)
+ _, err := provider.GetDerivedKubernetes(ctx, "")
+ // Expect an error about invalid certificates, which means the provider accepted the headers
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "failed to create auth headers cluster manager")
+ })
+
+ t.Run("GetDerivedKubernetes works with insecure skip TLS verify", func(t *testing.T) {
+ authHeaders := &K8sAuthHeaders{
+ Server: mockServer.Config().Host,
+ CertificateAuthorityData: nil, // Don't provide CA data when skipping TLS verification
+ AuthorizationToken: "test-token",
+ InsecureSkipTLSVerify: true,
+ }
+
+ ctx := context.WithValue(context.Background(), AuthHeadersContextKey, authHeaders)
+ k, err := provider.GetDerivedKubernetes(ctx, "")
+ require.NoError(t, err)
+ require.NotNil(t, k)
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders parses token auth correctly", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): mockServer.Config().Host,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): "Bearer test-token",
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.Equal(t, mockServer.Config().Host, authHeaders.Server)
+ assert.Equal(t, caCert, authHeaders.CertificateAuthorityData)
+ assert.Equal(t, "Bearer test-token", authHeaders.AuthorizationToken)
+ assert.False(t, authHeaders.InsecureSkipTLSVerify)
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders parses cert auth correctly", func(t *testing.T) {
+ clientCert := []byte("test-client-cert")
+ clientKey := []byte("test-client-key")
+ clientCertBase64 := base64.StdEncoding.EncodeToString(clientCert)
+ clientKeyBase64 := base64.StdEncoding.EncodeToString(clientKey)
+
+ headers := map[string]any{
+ string(CustomServerHeader): mockServer.Config().Host,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomClientCertificateDataHeader): clientCertBase64,
+ string(CustomClientKeyDataHeader): clientKeyBase64,
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.Equal(t, mockServer.Config().Host, authHeaders.Server)
+ assert.Equal(t, caCert, authHeaders.CertificateAuthorityData)
+ assert.Equal(t, clientCert, authHeaders.ClientCertificateData)
+ assert.Equal(t, clientKey, authHeaders.ClientKeyData)
+ assert.True(t, authHeaders.IsValid())
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders requires server header", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): "Bearer test-token",
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-server")
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders requires CA data header", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): mockServer.Config().Host,
+ string(CustomAuthorizationHeader): "Bearer test-token",
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "kubernetes-certificate-authority-data")
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders requires valid auth method", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): mockServer.Config().Host,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "authentication")
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders handles insecure skip TLS verify", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): mockServer.Config().Host,
+ string(CustomCertificateAuthorityDataHeader): caCertBase64,
+ string(CustomAuthorizationHeader): "Bearer test-token",
+ string(CustomInsecureSkipTLSVerifyHeader): "true",
+ }
+
+ authHeaders, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.NoError(t, err)
+ assert.True(t, authHeaders.InsecureSkipTLSVerify)
+ })
+
+ t.Run("NewK8sAuthHeadersFromHeaders handles invalid base64 CA data", func(t *testing.T) {
+ headers := map[string]any{
+ string(CustomServerHeader): mockServer.Config().Host,
+ string(CustomCertificateAuthorityDataHeader): "invalid-base64!!!",
+ string(CustomAuthorizationHeader): "Bearer test-token",
+ }
+
+ _, err := NewK8sAuthHeadersFromHeaders(headers)
+ require.Error(t, err)
+ assert.Contains(t, err.Error(), "certificate authority data")
+ })
+}
diff --git a/pkg/kubernetes/resources.go b/pkg/kubernetes/resources.go
index 1f559e12..c73cc0f4 100644
--- a/pkg/kubernetes/resources.go
+++ b/pkg/kubernetes/resources.go
@@ -3,10 +3,11 @@ package kubernetes
import (
"context"
"fmt"
- "k8s.io/apimachinery/pkg/runtime"
"regexp"
"strings"
+ "k8s.io/apimachinery/pkg/runtime"
+
"github.com/containers/kubernetes-mcp-server/pkg/version"
authv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -42,7 +43,7 @@ func (k *Kubernetes) ResourcesList(ctx context.Context, gvk *schema.GroupVersion
if options.AsTable {
return k.resourcesListAsTable(ctx, gvk, gvr, namespace, options)
}
- return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).List(ctx, options.ListOptions)
+ return k.AccessControlClientset().DynamicClient().Resource(*gvr).Namespace(namespace).List(ctx, options.ListOptions)
}
func (k *Kubernetes) ResourcesGet(ctx context.Context, gvk *schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) {
@@ -55,7 +56,7 @@ func (k *Kubernetes) ResourcesGet(ctx context.Context, gvk *schema.GroupVersionK
if namespaced, nsErr := k.isNamespaced(gvk); nsErr == nil && namespaced {
namespace = k.NamespaceOrDefault(namespace)
}
- return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
+ return k.AccessControlClientset().DynamicClient().Resource(*gvr).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
}
func (k *Kubernetes) ResourcesCreateOrUpdate(ctx context.Context, resource string) ([]*unstructured.Unstructured, error) {
@@ -82,7 +83,7 @@ func (k *Kubernetes) ResourcesDelete(ctx context.Context, gvk *schema.GroupVersi
if namespaced, nsErr := k.isNamespaced(gvk); nsErr == nil && namespaced {
namespace = k.NamespaceOrDefault(namespace)
}
- return k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{})
+ return k.AccessControlClientset().DynamicClient().Resource(*gvr).Namespace(namespace).Delete(ctx, name, metav1.DeleteOptions{})
}
// resourcesListAsTable retrieves a list of resources in a table format.
@@ -101,7 +102,7 @@ func (k *Kubernetes) resourcesListAsTable(ctx context.Context, gvk *schema.Group
}
url = append(url, gvr.Resource)
var table metav1.Table
- err := k.manager.discoveryClient.RESTClient().
+ err := k.AccessControlClientset().CoreV1().RESTClient().
Get().
SetHeader("Accept", strings.Join([]string{
fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName),
@@ -145,7 +146,7 @@ func (k *Kubernetes) resourcesCreateOrUpdate(ctx context.Context, resources []*u
if namespaced, nsErr := k.isNamespaced(&gvk); nsErr == nil && namespaced {
namespace = k.NamespaceOrDefault(namespace)
}
- resources[i], rErr = k.manager.dynamicClient.Resource(*gvr).Namespace(namespace).Apply(ctx, obj.GetName(), obj, metav1.ApplyOptions{
+ resources[i], rErr = k.AccessControlClientset().DynamicClient().Resource(*gvr).Namespace(namespace).Apply(ctx, obj.GetName(), obj, metav1.ApplyOptions{
FieldManager: version.BinaryName,
})
if rErr != nil {
@@ -153,14 +154,14 @@ func (k *Kubernetes) resourcesCreateOrUpdate(ctx context.Context, resources []*u
}
// Clear the cache to ensure the next operation is performed on the latest exposed APIs (will change after the CRD creation)
if gvk.Kind == "CustomResourceDefinition" {
- k.manager.accessControlRESTMapper.Reset()
+ k.AccessControlClientset().RESTMapper().Reset()
}
}
return resources, nil
}
func (k *Kubernetes) resourceFor(gvk *schema.GroupVersionKind) (*schema.GroupVersionResource, error) {
- m, err := k.manager.accessControlRESTMapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version)
+ m, err := k.AccessControlClientset().RESTMapper().RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version)
if err != nil {
return nil, err
}
@@ -168,7 +169,7 @@ func (k *Kubernetes) resourceFor(gvk *schema.GroupVersionKind) (*schema.GroupVer
}
func (k *Kubernetes) isNamespaced(gvk *schema.GroupVersionKind) (bool, error) {
- apiResourceList, err := k.manager.discoveryClient.ServerResourcesForGroupVersion(gvk.GroupVersion().String())
+ apiResourceList, err := k.AccessControlClientset().DiscoveryClient().ServerResourcesForGroupVersion(gvk.GroupVersion().String())
if err != nil {
return false, err
}
@@ -181,17 +182,14 @@ func (k *Kubernetes) isNamespaced(gvk *schema.GroupVersionKind) (bool, error) {
}
func (k *Kubernetes) supportsGroupVersion(groupVersion string) bool {
- if _, err := k.manager.discoveryClient.ServerResourcesForGroupVersion(groupVersion); err != nil {
+ if _, err := k.AccessControlClientset().DiscoveryClient().ServerResourcesForGroupVersion(groupVersion); err != nil {
return false
}
return true
}
func (k *Kubernetes) canIUse(ctx context.Context, gvr *schema.GroupVersionResource, namespace, verb string) bool {
- accessReviews, err := k.manager.accessControlClientSet.SelfSubjectAccessReviews()
- if err != nil {
- return false
- }
+ accessReviews := k.AccessControlClientset().AuthorizationV1().SelfSubjectAccessReviews()
response, err := accessReviews.Create(ctx, &authv1.SelfSubjectAccessReview{
Spec: authv1.SelfSubjectAccessReviewSpec{ResourceAttributes: &authv1.ResourceAttributes{
Namespace: namespace,
diff --git a/pkg/mcp/helm_test.go b/pkg/mcp/helm_test.go
index f2af3d23..b2b67a08 100644
--- a/pkg/mcp/helm_test.go
+++ b/pkg/mcp/helm_test.go
@@ -202,6 +202,41 @@ func (s *HelmSuite) TestHelmList() {
})
}
+func (s *HelmSuite) TestHelmListDenied() {
+ s.Require().NoError(toml.Unmarshal([]byte(`
+ denied_resources = [ { version = "v1", kind = "Secret" } ]
+ `), s.Cfg), "Expected to parse denied resources config")
+ kc := kubernetes.NewForConfigOrDie(envTestRestConfig)
+ _, err := kc.CoreV1().Secrets("default").Create(s.T().Context(), &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "sh.helm.release.v1.release-to-list-denied",
+ Labels: map[string]string{"owner": "helm", "name": "release-to-list-denied"},
+ },
+ Data: map[string][]byte{
+ "release": []byte(base64.StdEncoding.EncodeToString([]byte("{" +
+ "\"name\":\"release-to-list-denied\"," +
+ "\"info\":{\"status\":\"deployed\"}" +
+ "}"))),
+ },
+ }, metav1.CreateOptions{})
+ s.Require().NoError(err)
+ s.InitMcpClient()
+ s.Run("helm_list() with deployed release (denied)", func() {
+ toolResult, err := s.CallTool("helm_list", map[string]interface{}{})
+ s.Run("has error", func() {
+ s.Truef(toolResult.IsError, "call tool should fail")
+ s.Nilf(err, "call tool should not return error object")
+ })
+ s.Run("describes denial", func() {
+ msg := toolResult.Content[0].(mcp.TextContent).Text
+ s.Contains(msg, "resource not allowed:")
+ s.Truef(strings.HasPrefix(msg, "failed to list helm releases"), "expected descriptive error, got %v", toolResult.Content[0].(mcp.TextContent).Text)
+ expectedMessage := ": resource not allowed: /v1, Kind=Secret"
+ s.Truef(strings.HasSuffix(msg, expectedMessage), "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text)
+ })
+ })
+}
+
func (s *HelmSuite) TestHelmUninstallNoReleases() {
s.InitMcpClient()
s.Run("helm_uninstall(name=release-to-uninstall) with no releases", func() {
diff --git a/pkg/mcp/mcp.go b/pkg/mcp/mcp.go
index 6a4a6d2f..06e5a78d 100644
--- a/pkg/mcp/mcp.go
+++ b/pkg/mcp/mcp.go
@@ -82,6 +82,9 @@ func NewServer(configuration Configuration) (*Server, error) {
}),
}
+ if configuration.ClusterProviderStrategy == config.ClusterProviderAuthHeaders {
+ s.server.AddReceivingMiddleware(customAuthHeadersPropagationMiddleware)
+ }
s.server.AddReceivingMiddleware(authHeaderPropagationMiddleware)
s.server.AddReceivingMiddleware(toolCallLoggingMiddleware)
if configuration.RequireOAuth && false { // TODO: Disabled scope auth validation for now
diff --git a/pkg/mcp/mcp_middleware_test.go b/pkg/mcp/mcp_middleware_test.go
index ce88e7b4..2b150ae5 100644
--- a/pkg/mcp/mcp_middleware_test.go
+++ b/pkg/mcp/mcp_middleware_test.go
@@ -85,3 +85,65 @@ func (s *McpLoggingSuite) TestLogsToolCallHeaders() {
func TestMcpLogging(t *testing.T) {
suite.Run(t, new(McpLoggingSuite))
}
+
+type CustomAuthHeadersMiddlewareSuite struct {
+ BaseMcpSuite
+}
+
+func (s *CustomAuthHeadersMiddlewareSuite) TestParsesAuthHeadersFromHTTPHeaders() {
+ caCertBase64 := "dGVzdC1jYS1jZXJ0" // base64 of "test-ca-cert"
+ serverURL := "https://k8s.example.com:6443"
+ token := "Bearer test-token"
+
+ s.InitMcpClient(transport.WithHTTPHeaders(map[string]string{
+ "kubernetes-server": serverURL,
+ "kubernetes-certificate-authority-data": caCertBase64,
+ "kubernetes-authorization": token,
+ }))
+
+ _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false})
+ s.Require().NoError(err, "call to tool configuration_view failed")
+
+ // The middleware should have successfully parsed and added auth headers to context
+ // This is validated indirectly by the tool call succeeding
+}
+
+func (s *CustomAuthHeadersMiddlewareSuite) TestHeadersAreLowercased() {
+ caCertBase64 := "dGVzdC1jYS1jZXJ0" // base64 of "test-ca-cert"
+ serverURL := "https://k8s.example.com:6443"
+ token := "Bearer test-token"
+
+ // Use uppercase header names
+ s.InitMcpClient(transport.WithHTTPHeaders(map[string]string{
+ "Kubernetes-Server": serverURL, // uppercase K
+ "KUBERNETES-CERTIFICATE-AUTHORITY-DATA": caCertBase64, // all uppercase
+ "Kubernetes-Authorization": token, // mixed case
+ }))
+
+ _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false})
+ s.Require().NoError(err, "call should succeed even with uppercase headers")
+}
+
+func (s *CustomAuthHeadersMiddlewareSuite) TestIgnoresInvalidAuthHeadersWhenNotUsingAuthHeadersProvider() {
+ // When not using auth-headers provider, invalid custom headers are ignored
+ // and the default kubeconfig provider is used instead
+ s.InitMcpClient(transport.WithHTTPHeaders(map[string]string{
+ "kubernetes-server": "https://k8s.example.com:6443",
+ // Missing CA cert and authorization - will be ignored
+ }))
+
+ _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false})
+ s.Require().NoError(err, "call should succeed using default kubeconfig provider")
+}
+
+func (s *CustomAuthHeadersMiddlewareSuite) TestPassesThroughWithNoHeaders() {
+ // No custom headers provided - should work with default kubeconfig
+ s.InitMcpClient()
+
+ _, err := s.CallTool("configuration_view", map[string]interface{}{"minified": false})
+ s.Require().NoError(err, "call should succeed without custom headers")
+}
+
+func TestCustomAuthHeadersMiddleware(t *testing.T) {
+ suite.Run(t, new(CustomAuthHeadersMiddlewareSuite))
+}
diff --git a/pkg/mcp/middleware.go b/pkg/mcp/middleware.go
index ec6f4d42..7a85a3fc 100644
--- a/pkg/mcp/middleware.go
+++ b/pkg/mcp/middleware.go
@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"slices"
+ "strings"
internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes"
"github.com/modelcontextprotocol/go-sdk/mcp"
@@ -30,6 +31,46 @@ func authHeaderPropagationMiddleware(next mcp.MethodHandler) mcp.MethodHandler {
}
}
+func customAuthHeadersPropagationMiddleware(next mcp.MethodHandler) mcp.MethodHandler {
+ return func(ctx context.Context, method string, req mcp.Request) (mcp.Result, error) {
+
+ var authHeaders *internalk8s.K8sAuthHeaders = nil
+ var err error
+ // Try to parse auth headers from tool params meta.
+ if req.GetParams() != nil {
+ if toolParams, ok := req.GetParams().(*mcp.CallToolParamsRaw); ok {
+ toolParamsMeta := toolParams.GetMeta()
+ authHeaders, err = internalk8s.NewK8sAuthHeadersFromHeaders(toolParamsMeta)
+ if err != nil {
+ klog.V(4).ErrorS(err, "failed to parse custom auth headers from tool params meta", "tool", req.GetParams().(*mcp.CallToolParamsRaw).Name)
+ }
+ }
+ }
+
+ // If auth headers are not found in tool params meta, try to parse from request extra.
+ if authHeaders == nil && req.GetExtra() != nil && req.GetExtra().Header != nil {
+ // Convert http.Header to map[string]any with lowercased keys.
+ headerMap := make(map[string]any)
+ for key, values := range req.GetExtra().Header {
+ if len(values) > 0 {
+ headerMap[strings.ToLower(key)] = values[0]
+ }
+ }
+ // Filter auth headers to only include the ones that are allowed.
+ authHeaders, err = internalk8s.NewK8sAuthHeadersFromHeaders(headerMap)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Add auth headers to context
+ if authHeaders != nil {
+ ctx = context.WithValue(ctx, internalk8s.AuthHeadersContextKey, authHeaders)
+ }
+ return next(ctx, method, req)
+ }
+}
+
func toolCallLoggingMiddleware(next mcp.MethodHandler) mcp.MethodHandler {
return func(ctx context.Context, method string, req mcp.Request) (mcp.Result, error) {
switch params := req.GetParams().(type) {
diff --git a/pkg/toolsets/kiali/graph.go b/pkg/toolsets/kiali/graph.go
index 6bf32d47..a0a50f92 100644
--- a/pkg/toolsets/kiali/graph.go
+++ b/pkg/toolsets/kiali/graph.go
@@ -14,7 +14,7 @@ func initGraph() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "graph",
+ Name: "kiali_graph",
Description: "Check the status of my mesh by querying Kiali graph",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/health.go b/pkg/toolsets/kiali/health.go
index 01b86e1e..dd9f98fb 100644
--- a/pkg/toolsets/kiali/health.go
+++ b/pkg/toolsets/kiali/health.go
@@ -15,7 +15,7 @@ func initHealth() []api.ServerTool {
// Cluster health tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "health",
+ Name: "kiali_health",
Description: "Get health status for apps, workloads, and services across specified namespaces in the mesh. Returns health information including error rates and status for the requested resource type",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/istio_config.go b/pkg/toolsets/kiali/istio_config.go
index df8a97c6..79fb75c5 100644
--- a/pkg/toolsets/kiali/istio_config.go
+++ b/pkg/toolsets/kiali/istio_config.go
@@ -13,7 +13,7 @@ func initIstioConfig() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "istio_config",
+ Name: "kiali_istio_config",
Description: "Get all Istio configuration objects in the mesh including their full YAML resources and details",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -45,7 +45,7 @@ func initIstioObjectDetails() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "istio_object_details",
+ Name: "kiali_istio_object_details",
Description: "Get detailed information about a specific Istio object including validation and help information",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -105,7 +105,7 @@ func initIstioObjectPatch() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "istio_object_patch",
+ Name: "kiali_istio_object_patch",
Description: "Modify an existing Istio object using PATCH method. The JSON patch data will be applied to the existing object.",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -170,7 +170,7 @@ func initIstioObjectCreate() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "istio_object_create",
+ Name: "kiali_istio_object_create",
Description: "Create a new Istio object using POST method. The JSON data will be used to create the new object.",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -230,7 +230,7 @@ func initIstioObjectDelete() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "istio_object_delete",
+ Name: "kiali_istio_object_delete",
Description: "Delete an existing Istio object using DELETE method.",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/mesh.go b/pkg/toolsets/kiali/mesh.go
index d13fa48b..6e134f8a 100644
--- a/pkg/toolsets/kiali/mesh.go
+++ b/pkg/toolsets/kiali/mesh.go
@@ -13,7 +13,7 @@ func initMeshStatus() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "mesh_status",
+ Name: "kiali_mesh_status",
Description: "Get the status of mesh components including Istio, Kiali, Grafana, Prometheus and their interactions, versions, and health status",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/namespaces.go b/pkg/toolsets/kiali/namespaces.go
index a006f2b1..a380cc82 100644
--- a/pkg/toolsets/kiali/namespaces.go
+++ b/pkg/toolsets/kiali/namespaces.go
@@ -13,7 +13,7 @@ func initNamespaces() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "namespaces",
+ Name: "kiali_namespaces",
Description: "Get all namespaces in the mesh that the user has access to",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/services.go b/pkg/toolsets/kiali/services.go
index 1fd2018c..30ff6557 100644
--- a/pkg/toolsets/kiali/services.go
+++ b/pkg/toolsets/kiali/services.go
@@ -15,7 +15,7 @@ func initServices() []api.ServerTool {
// Services list tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "services_list",
+ Name: "kiali_services_list",
Description: "Get all services in the mesh across specified namespaces with health and Istio resource information",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -39,7 +39,7 @@ func initServices() []api.ServerTool {
// Service details tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "service_details",
+ Name: "kiali_service_details",
Description: "Get detailed information for a specific service in a namespace, including validation, health status, and configuration",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -68,7 +68,7 @@ func initServices() []api.ServerTool {
// Service metrics tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "service_metrics",
+ Name: "kiali_service_metrics",
Description: "Get metrics for a specific service in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/toolset.go b/pkg/toolsets/kiali/toolset.go
index c89afcd4..7467992c 100644
--- a/pkg/toolsets/kiali/toolset.go
+++ b/pkg/toolsets/kiali/toolset.go
@@ -17,7 +17,7 @@ func (t *Toolset) GetName() string {
}
func (t *Toolset) GetDescription() string {
- return "Most common tools for managing Kiali, check the [Kiali integration documentation](https://github.com/containers/kubernetes-mcp-server/blob/main/docs/KIALI_INTEGRATION.md) for more details."
+ return "Most common tools for managing Kiali, check the [Kiali documentation](https://github.com/containers/kubernetes-mcp-server/blob/main/docs/KIALI.md) for more details."
}
func (t *Toolset) GetTools(_ internalk8s.Openshift) []api.ServerTool {
diff --git a/pkg/toolsets/kiali/traces.go b/pkg/toolsets/kiali/traces.go
index fd5aacc9..e9169505 100644
--- a/pkg/toolsets/kiali/traces.go
+++ b/pkg/toolsets/kiali/traces.go
@@ -15,7 +15,7 @@ func initTraces() []api.ServerTool {
// App traces tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "app_traces",
+ Name: "kiali_app_traces",
Description: "Get distributed tracing data for a specific app in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -71,7 +71,7 @@ func initTraces() []api.ServerTool {
// Service traces tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "service_traces",
+ Name: "kiali_service_traces",
Description: "Get distributed tracing data for a specific service in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -127,7 +127,7 @@ func initTraces() []api.ServerTool {
// Workload traces tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "workload_traces",
+ Name: "kiali_workload_traces",
Description: "Get distributed tracing data for a specific workload in a namespace. Returns trace information including spans, duration, and error details for troubleshooting and performance analysis.",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/validations.go b/pkg/toolsets/kiali/validations.go
index 898f7d03..6201da9a 100644
--- a/pkg/toolsets/kiali/validations.go
+++ b/pkg/toolsets/kiali/validations.go
@@ -14,7 +14,7 @@ func initValidations() []api.ServerTool {
ret := make([]api.ServerTool, 0)
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "validations_list",
+ Name: "kiali_validations_list",
Description: "List all the validations in the current cluster from all namespaces",
InputSchema: &jsonschema.Schema{
Type: "object",
diff --git a/pkg/toolsets/kiali/workloads.go b/pkg/toolsets/kiali/workloads.go
index f8d03a28..6d2b30cb 100644
--- a/pkg/toolsets/kiali/workloads.go
+++ b/pkg/toolsets/kiali/workloads.go
@@ -15,7 +15,7 @@ func initWorkloads() []api.ServerTool {
// Workloads list tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "workloads_list",
+ Name: "kiali_workloads_list",
Description: "Get all workloads in the mesh across specified namespaces with health and Istio resource information",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -39,7 +39,7 @@ func initWorkloads() []api.ServerTool {
// Workload details tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "workload_details",
+ Name: "kiali_workload_details",
Description: "Get detailed information for a specific workload in a namespace, including validation, health status, and configuration",
InputSchema: &jsonschema.Schema{
Type: "object",
@@ -68,7 +68,7 @@ func initWorkloads() []api.ServerTool {
// Workload metrics tool
ret = append(ret, api.ServerTool{
Tool: api.Tool{
- Name: "workload_metrics",
+ Name: "kiali_workload_metrics",
Description: "Get metrics for a specific workload in a namespace. Supports filtering by time range, direction (inbound/outbound), reporter, and other query parameters",
InputSchema: &jsonschema.Schema{
Type: "object",