Initial commit

This commit is contained in:
Caleb Xu
2022-12-28 01:54:25 -05:00
commit 3932ca2b77
46 changed files with 22259 additions and 0 deletions

8
.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
dist/
node_modules/
.DS_Store
yarn-error.log
coverage/
tmp/
locales/**/**.js
lens.log

18
LICENSE Normal file
View File

@@ -0,0 +1,18 @@
Copyright (c) 2022 OpenLens Authors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE

19
Makefile Normal file
View File

@@ -0,0 +1,19 @@
install-deps:
npm ci --prefix src/kube-object-event-status
npm ci --prefix src/metrics-cluster-feature
npm ci --prefix src/node-menu
npm ci --prefix src/pod-menu
build: install-deps
mkdir dist
npm run --prefix src/kube-object-event-status build ${PWD}/dist
npm run --prefix src/metrics-cluster-feature build ${PWD}/dist
npm run --prefix src/node-menu build ${PWD}/dist
npm run --prefix src/pod-menu build ${PWD}/dist
clean:
rm -rf dist \
src/kube-object-event-status/dist src/kube-object-event-status/node_modules \
src/metrics-cluster-feature/dist src/metrics-cluster-feature/node_modules \
src/node-menu/dist src/node-menu/node_modules \
src/pod-menu/dist src/pod-menu/node_modules

27
README.md Normal file
View File

@@ -0,0 +1,27 @@
# OpenLens Extensions
This repository contains the last version of MIT-licensed extensions from the OpenLens repository (https://github.com/lensapp/lens) prior to their removal in version 6.3.0.
Some minor changes have been added to allow these extensions to be built outside of the OpenLens tree (e.g. adding some `devDependencies`).
# How to use these extensions
From the root of this repository:
```sh
# Choose the same version of Node that is used in the Electron version
# that OpenLens uses. It might work with other (newer) versions of
# Node but I haven't tested it.
nvm install 16.14.2
make build
```
Tarballs for the four extensions will be output to a subdirectory `dist`. In OpenLens, navigate to the Extensions list and provide the path to the tarball for each extension to be loaded, or drag and drop the extension tarball into the OpenLens window. After loading for a moment, the extension should appear in the list of enabled extensions. Verify that the features provided by the desired extensions are working as expected.
# License
Like the OpenLens repository itself at the point from which these extensions were extracted, the content of this repository is released under the MIT license. See the file `LICENSE` for details.
# Disclaimer
I don't use all of the features provided by these extensions on a regular basis. My evaluation and testing of these extensions only extends insofar as me using it to get work done during my day job. I will probably try to maintain these as best I can but it's a best-effort basis for now.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,23 @@
{
"name": "openlens-kube-object-event-status",
"version": "6.1.1",
"description": "Adds kube object status from events into OpenLens",
"renderer": "dist/renderer.js",
"engines": {
"node": "^16.14.2",
"lens": "^6.3.0"
},
"scripts": {
"build": "npx webpack && npm pack --pack-destination $@",
"dev": "npx webpack -- --watch",
"test": "echo NO TESTS"
},
"files": [
"dist/**/*"
],
"devDependencies": {
"@k8slens/extensions": "^6.3.0",
"ts-loader": "^9.4.2",
"webpack-cli": "^5.0.1"
}
}

View File

@@ -0,0 +1,53 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import { Renderer } from "@k8slens/extensions";
import { resolveStatus, resolveStatusForCronJobs, resolveStatusForPods } from "./src/resolver";
export default class EventResourceStatusRendererExtension extends Renderer.LensExtension {
kubeObjectStatusTexts = [
{
kind: "Pod",
apiVersions: ["v1"],
resolve: (pod: Renderer.K8sApi.Pod) => resolveStatusForPods(pod),
},
{
kind: "ReplicaSet",
apiVersions: ["v1"],
resolve: (replicaSet: Renderer.K8sApi.ReplicaSet) => resolveStatus(replicaSet),
},
{
kind: "Deployment",
apiVersions: ["apps/v1"],
resolve: (deployment: Renderer.K8sApi.Deployment) => resolveStatus(deployment),
},
{
kind: "StatefulSet",
apiVersions: ["apps/v1"],
resolve: (statefulSet: Renderer.K8sApi.StatefulSet) => resolveStatus(statefulSet),
},
{
kind: "DaemonSet",
apiVersions: ["apps/v1"],
resolve: (daemonSet: Renderer.K8sApi.DaemonSet) => resolveStatus(daemonSet),
},
{
kind: "Job",
apiVersions: [
"batch/v1",
"batch/v1beta1",
],
resolve: (job: Renderer.K8sApi.Job) => resolveStatus(job),
},
{
kind: "CronJob",
apiVersions: [
"batch/v1",
"batch/v1beta1",
],
resolve: (cronJob: Renderer.K8sApi.CronJob) => resolveStatusForCronJobs(cronJob),
},
];
}

View File

@@ -0,0 +1,72 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import { Renderer } from "@k8slens/extensions";
const { apiManager, eventApi, KubeObjectStatusLevel } = Renderer.K8sApi;
type KubeObject = Renderer.K8sApi.KubeObject;
type Pod = Renderer.K8sApi.Pod;
type CronJob = Renderer.K8sApi.CronJob;
type KubeObjectStatus = Renderer.K8sApi.KubeObjectStatus;
type EventStore = Renderer.K8sApi.EventStore;
export function resolveStatus(object: KubeObject): KubeObjectStatus {
const eventStore = apiManager.getStore(eventApi);
const events = (eventStore as EventStore).getEventsByObject(object);
const warnings = events.filter(evt => evt.isWarning());
if (!events.length || !warnings.length) {
return null;
}
const event = [...warnings, ...events][0]; // get latest event
return {
level: KubeObjectStatusLevel.WARNING,
text: `${event.message}`,
timestamp: event.metadata.creationTimestamp,
};
}
export function resolveStatusForPods(pod: Pod): KubeObjectStatus {
if (!pod.hasIssues()) {
return null;
}
const eventStore = apiManager.getStore(eventApi);
const events = (eventStore as EventStore).getEventsByObject(pod);
const warnings = events.filter(evt => evt.isWarning());
if (!events.length || !warnings.length) {
return null;
}
const event = [...warnings, ...events][0]; // get latest event
return {
level: KubeObjectStatusLevel.WARNING,
text: `${event.message}`,
timestamp: event.metadata.creationTimestamp,
};
}
export function resolveStatusForCronJobs(cronJob: CronJob): KubeObjectStatus {
const eventStore = apiManager.getStore(eventApi);
let events = (eventStore as EventStore).getEventsByObject(cronJob);
const warnings = events.filter(evt => evt.isWarning());
if (cronJob.isNeverRun()) {
events = events.filter(event => event.reason != "FailedNeedsStart");
}
if (!events.length || !warnings.length) {
return null;
}
const event = [...warnings, ...events][0]; // get latest event
return {
level: KubeObjectStatusLevel.WARNING,
text: `${event.message}`,
timestamp: event.metadata.creationTimestamp,
};
}

View File

@@ -0,0 +1,27 @@
{
"compilerOptions": {
"outDir": "dist",
"module": "CommonJS",
"target": "ES2017",
"lib": ["ESNext", "DOM", "DOM.Iterable"],
"moduleResolution": "Node",
"sourceMap": false,
"declaration": false,
"strict": false,
"noImplicitAny": true,
"skipLibCheck": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"experimentalDecorators": true,
"useDefineForClassFields": true,
"jsx": "react"
},
"include": [
"./*.ts",
"./*.tsx"
],
"exclude": [
"node_modules",
"*.js"
]
}

View File

@@ -0,0 +1,44 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
const path = require("path");
module.exports = [
{
entry: "./renderer.tsx",
context: __dirname,
target: "electron-renderer",
mode: "production",
optimization: {
minimize: false,
},
module: {
rules: [
{
test: /\.tsx?$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
externals: [
{
"@k8slens/extensions": "var global.LensExtensions",
"react": "var global.React",
"react-dom": "var global.ReactDOM",
"mobx": "var global.Mobx",
"mobx-react": "var global.MobxReact",
},
],
resolve: {
extensions: [ ".tsx", ".ts", ".js" ],
},
output: {
libraryTarget: "commonjs2",
globalObject: "this",
filename: "renderer.js",
path: path.resolve(__dirname, "dist"),
},
},
];

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,30 @@
{
"name": "openlens-metrics-cluster-feature",
"version": "6.1.0",
"description": "OpenLens metrics cluster feature",
"renderer": "dist/renderer.js",
"engines": {
"node": "^16.14.2",
"lens": "^6.3.0"
},
"scripts": {
"build": "npx webpack && npm pack --pack-destination $@",
"dev": "npx webpack -- --watch",
"test": "npx jest --passWithNoTests --env=jsdom src $@",
"clean": "rm -rf dist/ && rm *.tgz"
},
"files": [
"dist/**/*",
"resources/**/*"
],
"devDependencies": {
"@k8slens/extensions": "^6.3.0",
"@types/react": "^17.0.52",
"@types/semver": "^7.3.13",
"mobx": "^6.7.0",
"mobx-react": "^7.6.0",
"semver": "^7.3.2",
"ts-loader": "^9.4.2",
"webpack-cli": "^5.0.1"
}
}

View File

@@ -0,0 +1,27 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import React from "react";
import type { Common } from "@k8slens/extensions";
import { Renderer } from "@k8slens/extensions";
import { MetricsSettings } from "./src/metrics-settings";
export default class ClusterMetricsFeatureExtension extends Renderer.LensExtension {
entitySettings = [
{
apiVersions: ["entity.k8slens.dev/v1alpha1"],
kind: "KubernetesCluster",
title: "Lens Metrics",
priority: 5,
components: {
View: ({ entity = null }: { entity: Common.Catalog.KubernetesCluster }) => {
return (
<MetricsSettings cluster={entity} />
);
},
},
},
];
}

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: lens-metrics
annotations:
extensionVersion: "{{ version }}"

View File

@@ -0,0 +1,281 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: lens-metrics
data:
prometheus.yaml: |-
# Global config
global:
scrape_interval: 15s
{{#if alertManagers}}
# AlertManager
alerting:
alertmanagers:
- static_configs:
- targets:
{{#each alertManagers}}
- {{this}}
{{/each}}
{{/if}}
# Scrape configs for running Prometheus on a Kubernetes cluster.
# This uses separate scrape configs for cluster components (i.e. API server, node)
# and services to allow each to use different authentication configs.
#
# Kubernetes labels will be added as Prometheus labels on metrics via the
# `labelmap` relabeling action.
scrape_configs:
# Scrape config for API servers.
#
# Kubernetes exposes API servers as endpoints to the default/kubernetes
# service so this uses `endpoints` role and uses relabelling to only keep
# the endpoints associated with the default/kubernetes service using the
# default named port `https`. This works for single API server deployments as
# well as HA API server deployments.
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# Using endpoints to discover kube-apiserver targets finds the pod IP
# (host IP since apiserver uses host network) which is not used in
# the server certificate.
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
# Keep only the default/kubernetes service endpoints for the https port. This
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- replacement: apiserver
action: replace
target_label: job
# Scrape config for node (i.e. kubelet) /metrics (e.g. 'kubelet_'). Explore
# metrics from a node by scraping kubelet (127.0.0.1:10250/metrics).
- job_name: 'kubelet'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
# Kubelet certs don't have any fixed IP SANs
insecure_skip_verify: true
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- replacement: 'lens-metrics'
target_label: kubernetes_namespace
metric_relabel_configs:
- source_labels:
- namespace
action: replace
regex: (.+)
target_label: kubernetes_namespace
# Scrape config for Kubelet cAdvisor. Explore metrics from a node by
# scraping kubelet (127.0.0.1:10250/metrics/cadvisor).
- job_name: 'kubernetes-cadvisor'
kubernetes_sd_configs:
- role: node
scheme: https
metrics_path: /metrics/cadvisor
tls_config:
# Kubelet certs don't have any fixed IP SANs
insecure_skip_verify: true
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
metric_relabel_configs:
- source_labels:
- namespace
action: replace
target_label: kubernetes_namespace
- source_labels:
- pod
regex: (.*)
replacement: $1
action: replace
target_label: pod_name
- source_labels:
- container
regex: (.*)
replacement: $1
action: replace
target_label: container_name
# Scrap etcd metrics from masters via etcd-scraper-proxy
- job_name: 'etcd'
kubernetes_sd_configs:
- role: pod
scheme: http
relabel_configs:
- source_labels: [__meta_kubernetes_namespace]
action: keep
regex: 'kube-system'
- source_labels: [__meta_kubernetes_pod_label_component]
action: keep
regex: 'etcd-scraper-proxy'
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
# Scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: If the metrics are exposed on a different port to the
# service then set this appropriately.
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
namespaces:
names:
- lens-metrics
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: job
- action: replace
source_labels:
- __meta_kubernetes_pod_node_name
target_label: kubernetes_node
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
metric_relabel_configs:
- source_labels:
- namespace
action: replace
regex: (.+)
target_label: kubernetes_namespace
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe`: Only probe services that have a value of `true`
- job_name: 'kubernetes-services'
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
namespaces:
names:
- lens-metrics
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_service_name]
target_label: job
metric_relabel_configs:
- source_labels:
- namespace
action: replace
regex: (.+)
target_label: kubernetes_namespace
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape endpoint to be configured via the
# following annotations:
#
# * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port`: Scrape the pod on the indicated port instead of the
# pod's declared ports (default is a port-free target if none are declared).
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
namespaces:
names:
- lens-metrics
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
metric_relabel_configs:
- source_labels:
- namespace
action: replace
regex: (.+)
target_label: kubernetes_namespace
# Rule files
rule_files:
- "/etc/prometheus/rules/*.rules"
- "/etc/prometheus/rules/*.yaml"
- "/etc/prometheus/rules/*.yml"

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: lens-metrics

View File

@@ -0,0 +1,18 @@
{{#if prometheus.enabled}}
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: lens-metrics
annotations:
prometheus.io/scrape: 'true'
spec:
type: ClusterIP
selector:
name: prometheus
ports:
- name: web
protocol: TCP
port: 80
targetPort: 9090
{{/if}}

View File

@@ -0,0 +1,113 @@
{{#if prometheus.enabled}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: prometheus
namespace: lens-metrics
spec:
replicas: {{replicas}}
serviceName: prometheus
selector:
matchLabels:
name: prometheus
template:
metadata:
labels:
name: prometheus
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
# <%- if config.node_selector -%>
# nodeSelector:
# <%- node_selector.to_h.each do |key, value| -%>
# <%= key %>: <%= value %>
# <%- end -%>
# <%- end -%>
# <%- unless config.tolerations.empty? -%>
# tolerations:
# <%- config.tolerations.each do |t| -%>
# -
# <%- t.each do |k,v| -%>
# <%= k %>: <%= v %>
# <%- end -%>
# <%- end -%>
# <%- end -%>
serviceAccountName: prometheus
initContainers:
- name: chown
image: docker.io/alpine:3.12
command: ["chown", "-R", "65534:65534", "/var/lib/prometheus"]
volumeMounts:
- name: data
mountPath: /var/lib/prometheus
containers:
- name: prometheus
image: quay.io/prometheus/prometheus:v2.27.1
args:
- --web.listen-address=0.0.0.0:9090
- --config.file=/etc/prometheus/prometheus.yaml
- --storage.tsdb.path=/var/lib/prometheus
- --storage.tsdb.retention.time={{retention.time}}
- --storage.tsdb.retention.size={{retention.size}}
- --storage.tsdb.min-block-duration=2h
- --storage.tsdb.max-block-duration=2h
ports:
- name: web
containerPort: 9090
volumeMounts:
- name: config
mountPath: /etc/prometheus
- name: rules
mountPath: /etc/prometheus/rules
- name: data
mountPath: /var/lib/prometheus
readinessProbe:
httpGet:
path: /-/ready
port: 9090
initialDelaySeconds: 10
timeoutSeconds: 10
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
initialDelaySeconds: 10
timeoutSeconds: 10
resources:
requests:
cpu: 100m
memory: 512Mi
terminationGracePeriodSeconds: 30
volumes:
- name: config
configMap:
name: prometheus-config
- name: rules
configMap:
name: prometheus-rules
{{#unless persistence.enabled}}
- name: data
emptyDir: {}
{{/unless}}
{{#if persistence.enabled}}
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
{{#if persistence.storageClass}}
storageClassName: "{{persistence.storageClass}}"
{{/if}}
resources:
requests:
storage: {{persistence.size}}
{{/if}}
{{/if}}

View File

@@ -0,0 +1,514 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-rules
namespace: lens-metrics
data:
alertmanager.rules.yaml: |
groups:
- name: alertmanager.rules
rules:
- alert: AlertmanagerConfigInconsistent
expr: count_values("config_hash", alertmanager_config_hash) BY (service) / ON(service)
GROUP_LEFT() label_replace(prometheus_operator_alertmanager_spec_replicas, "service",
"alertmanager-$1", "alertmanager", "(.*)") != 1
for: 5m
labels:
severity: critical
annotations:
description: The configuration of the instances of the Alertmanager cluster
`{{$labels.service}}` are out of sync.
- alert: AlertmanagerDownOrMissing
expr: label_replace(prometheus_operator_alertmanager_spec_replicas, "job", "alertmanager-$1",
"alertmanager", "(.*)") / ON(job) GROUP_RIGHT() sum(up) BY (job) != 1
for: 5m
labels:
severity: warning
annotations:
description: An unexpected number of Alertmanagers are scraped or Alertmanagers
disappeared from discovery.
- alert: AlertmanagerFailedReload
expr: alertmanager_config_last_reload_successful == 0
for: 10m
labels:
severity: warning
annotations:
description: Reloading Alertmanager's configuration has failed for {{ $labels.namespace
}}/{{ $labels.pod}}.
etcd3.rules.yaml: |
groups:
- name: ./etcd3.rules
rules:
- alert: InsufficientMembers
expr: count(up{job="etcd"} == 0) > (count(up{job="etcd"}) / 2 - 1)
for: 3m
labels:
severity: critical
annotations:
description: If one more etcd member goes down the cluster will be unavailable
summary: etcd cluster insufficient members
- alert: NoLeader
expr: etcd_server_has_leader{job="etcd"} == 0
for: 1m
labels:
severity: critical
annotations:
description: etcd member {{ $labels.instance }} has no leader
summary: etcd member has no leader
- alert: HighNumberOfLeaderChanges
expr: increase(etcd_server_leader_changes_seen_total{job="etcd"}[1h]) > 3
labels:
severity: warning
annotations:
description: etcd instance {{ $labels.instance }} has seen {{ $value }} leader
changes within the last hour
summary: a high number of leader changes within the etcd cluster are happening
- alert: GRPCRequestsSlow
expr: histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job="etcd",grpc_type="unary"}[5m])) by (grpc_service, grpc_method, le))
> 0.15
for: 10m
labels:
severity: critical
annotations:
description: on etcd instance {{ $labels.instance }} gRPC requests to {{ $labels.grpc_method
}} are slow
summary: slow gRPC requests
- alert: HighNumberOfFailedHTTPRequests
expr: sum(rate(etcd_http_failed_total{job="etcd"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job="etcd"}[5m]))
BY (method) > 0.01
for: 10m
labels:
severity: warning
annotations:
description: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd
instance {{ $labels.instance }}'
summary: a high number of HTTP requests are failing
- alert: HighNumberOfFailedHTTPRequests
expr: sum(rate(etcd_http_failed_total{job="etcd"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job="etcd"}[5m]))
BY (method) > 0.05
for: 5m
labels:
severity: critical
annotations:
description: '{{ $value }}% of requests for {{ $labels.method }} failed on etcd
instance {{ $labels.instance }}'
summary: a high number of HTTP requests are failing
- alert: HTTPRequestsSlow
expr: histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m]))
> 0.15
for: 10m
labels:
severity: warning
annotations:
description: on etcd instance {{ $labels.instance }} HTTP requests to {{ $labels.method
}} are slow
summary: slow HTTP requests
- alert: EtcdMemberCommunicationSlow
expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
> 0.15
for: 10m
labels:
severity: warning
annotations:
description: etcd instance {{ $labels.instance }} member communication with
{{ $labels.To }} is slow
summary: etcd member communication is slow
- alert: HighNumberOfFailedProposals
expr: increase(etcd_server_proposals_failed_total{job="etcd"}[1h]) > 5
labels:
severity: warning
annotations:
description: etcd instance {{ $labels.instance }} has seen {{ $value }} proposal
failures within the last hour
summary: a high number of proposals within the etcd cluster are failing
- alert: HighFsyncDurations
expr: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[5m]))
> 0.5
for: 10m
labels:
severity: warning
annotations:
description: etcd instance {{ $labels.instance }} fync durations are high
summary: high fsync durations
- alert: HighCommitDurations
expr: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[5m]))
> 0.25
for: 10m
labels:
severity: warning
annotations:
description: etcd instance {{ $labels.instance }} commit durations are high
summary: high commit durations
general.rules.yaml: |
groups:
- name: general.rules
rules:
- alert: TargetDown
expr: 100 * (count(up == 0) BY (job) / count(up) BY (job)) > 10
for: 10m
labels:
severity: warning
annotations:
description: '{{ $value }}% of {{ $labels.job }} targets are down.'
summary: Targets are down
- record: fd_utilization
expr: process_open_fds / process_max_fds
- alert: FdExhaustionClose
expr: predict_linear(fd_utilization[1h], 3600 * 4) > 1
for: 10m
labels:
severity: warning
annotations:
description: '{{ $labels.job }}: {{ $labels.namespace }}/{{ $labels.pod }} instance
will exhaust in file/socket descriptors within the next 4 hours'
summary: file descriptors soon exhausted
- alert: FdExhaustionClose
expr: predict_linear(fd_utilization[10m], 3600) > 1
for: 10m
labels:
severity: critical
annotations:
description: '{{ $labels.job }}: {{ $labels.namespace }}/{{ $labels.pod }} instance
will exhaust in file/socket descriptors within the next hour'
summary: file descriptors soon exhausted
kube-state-metrics.rules.yaml: |
groups:
- name: kube-state-metrics.rules
rules:
- alert: DeploymentGenerationMismatch
expr: kube_deployment_status_observed_generation != kube_deployment_metadata_generation
for: 15m
labels:
severity: warning
annotations:
description: Observed deployment generation does not match expected one for
deployment {{$labels.namespaces}}/{{$labels.deployment}}
summary: Deployment is outdated
- alert: DeploymentReplicasNotUpdated
expr: ((kube_deployment_status_replicas_updated != kube_deployment_spec_replicas)
or (kube_deployment_status_replicas_available != kube_deployment_spec_replicas))
unless (kube_deployment_spec_paused == 1)
for: 15m
labels:
severity: warning
annotations:
description: Replicas are not updated and available for deployment {{$labels.namespaces}}/{{$labels.deployment}}
summary: Deployment replicas are outdated
- alert: DaemonSetRolloutStuck
expr: kube_daemonset_status_number_ready / kube_daemonset_status_desired_number_scheduled
* 100 < 100
for: 15m
labels:
severity: warning
annotations:
description: Only {{$value}}% of desired pods scheduled and ready for daemon
set {{$labels.namespaces}}/{{$labels.daemonset}}
summary: DaemonSet is missing pods
- alert: K8SDaemonSetsNotScheduled
expr: kube_daemonset_status_desired_number_scheduled - kube_daemonset_status_current_number_scheduled
> 0
for: 10m
labels:
severity: warning
annotations:
description: A number of daemonsets are not scheduled.
summary: Daemonsets are not scheduled correctly
- alert: DaemonSetsMissScheduled
expr: kube_daemonset_status_number_misscheduled > 0
for: 10m
labels:
severity: warning
annotations:
description: A number of daemonsets are running where they are not supposed
to run.
summary: Daemonsets are not scheduled correctly
- alert: PodFrequentlyRestarting
expr: increase(kube_pod_container_status_restarts_total[1h]) > 5
for: 10m
labels:
severity: warning
annotations:
description: Pod {{$labels.namespaces}}/{{$labels.pod}} restarted {{$value}}
times within the last hour
summary: Pod is restarting frequently
kubelet.rules.yaml: |
groups:
- name: kubelet.rules
rules:
- alert: K8SNodeNotReady
expr: kube_node_status_condition{condition="Ready",status="true"} == 0
for: 1h
labels:
severity: warning
annotations:
description: The Kubelet on {{ $labels.node }} has not checked in with the API,
or has set itself to NotReady, for more than an hour
summary: Node status is NotReady
- alert: K8SManyNodesNotReady
expr: count(kube_node_status_condition{condition="Ready",status="true"} == 0)
> 1 and (count(kube_node_status_condition{condition="Ready",status="true"} ==
0) / count(kube_node_status_condition{condition="Ready",status="true"})) > 0.2
for: 1m
labels:
severity: critical
annotations:
description: '{{ $value }}% of Kubernetes nodes are not ready'
- alert: K8SKubeletDown
expr: count(up{job="kubelet"} == 0) / count(up{job="kubelet"}) * 100 > 3
for: 1h
labels:
severity: warning
annotations:
description: Prometheus failed to scrape {{ $value }}% of kubelets.
- alert: K8SKubeletDown
expr: (absent(up{job="kubelet"} == 1) or count(up{job="kubelet"} == 0) / count(up{job="kubelet"}))
* 100 > 10
for: 1h
labels:
severity: critical
annotations:
description: Prometheus failed to scrape {{ $value }}% of kubelets, or all Kubelets
have disappeared from service discovery.
summary: Many Kubelets cannot be scraped
- alert: K8SKubeletTooManyPods
expr: kubelet_running_pod_count > 100
for: 10m
labels:
severity: warning
annotations:
description: Kubelet {{$labels.instance}} is running {{$value}} pods, close
to the limit of 110
summary: Kubelet is close to pod limit
kubernetes.rules.yaml: |
groups:
- name: kubernetes.rules
rules:
- record: pod_name:container_memory_usage_bytes:sum
expr: sum(container_memory_usage_bytes{container_name!="POD",pod_name!=""}) BY
(pod_name)
- record: pod_name:container_spec_cpu_shares:sum
expr: sum(container_spec_cpu_shares{container_name!="POD",pod_name!=""}) BY (pod_name)
- record: pod_name:container_cpu_usage:sum
expr: sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name!=""}[5m]))
BY (pod_name)
- record: pod_name:container_fs_usage_bytes:sum
expr: sum(container_fs_usage_bytes{container_name!="POD",pod_name!=""}) BY (pod_name)
- record: namespace:container_memory_usage_bytes:sum
expr: sum(container_memory_usage_bytes{container_name!=""}) BY (namespace)
- record: namespace:container_spec_cpu_shares:sum
expr: sum(container_spec_cpu_shares{container_name!=""}) BY (namespace)
- record: namespace:container_cpu_usage:sum
expr: sum(rate(container_cpu_usage_seconds_total{container_name!="POD"}[5m]))
BY (namespace)
- record: cluster:memory_usage:ratio
expr: sum(container_memory_usage_bytes{container_name!="POD",pod_name!=""}) BY
(cluster) / sum(machine_memory_bytes) BY (cluster)
- record: cluster:container_spec_cpu_shares:ratio
expr: sum(container_spec_cpu_shares{container_name!="POD",pod_name!=""}) / 1000
/ sum(machine_cpu_cores)
- record: cluster:container_cpu_usage:ratio
expr: sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name!=""}[5m]))
/ sum(machine_cpu_cores)
- record: apiserver_latency_seconds:quantile
expr: histogram_quantile(0.99, rate(apiserver_request_latencies_bucket[5m])) /
1e+06
labels:
quantile: "0.99"
- record: apiserver_latency:quantile_seconds
expr: histogram_quantile(0.9, rate(apiserver_request_latencies_bucket[5m])) /
1e+06
labels:
quantile: "0.9"
- record: apiserver_latency_seconds:quantile
expr: histogram_quantile(0.5, rate(apiserver_request_latencies_bucket[5m])) /
1e+06
labels:
quantile: "0.5"
- alert: APIServerLatencyHigh
expr: apiserver_latency_seconds:quantile{quantile="0.99",subresource!="log",verb!~"^(?:WATCH|WATCHLIST|PROXY|CONNECT)$"}
> 1
for: 10m
labels:
severity: warning
annotations:
description: the API server has a 99th percentile latency of {{ $value }} seconds
for {{$labels.verb}} {{$labels.resource}}
- alert: APIServerLatencyHigh
expr: apiserver_latency_seconds:quantile{quantile="0.99",subresource!="log",verb!~"^(?:WATCH|WATCHLIST|PROXY|CONNECT)$"}
> 4
for: 10m
labels:
severity: critical
annotations:
description: the API server has a 99th percentile latency of {{ $value }} seconds
for {{$labels.verb}} {{$labels.resource}}
- alert: APIServerErrorsHigh
expr: rate(apiserver_request_count{code=~"^(?:5..)$"}[5m]) / rate(apiserver_request_count[5m])
* 100 > 2
for: 10m
labels:
severity: warning
annotations:
description: API server returns errors for {{ $value }}% of requests
- alert: APIServerErrorsHigh
expr: rate(apiserver_request_count{code=~"^(?:5..)$"}[5m]) / rate(apiserver_request_count[5m])
* 100 > 5
for: 10m
labels:
severity: critical
annotations:
description: API server returns errors for {{ $value }}% of requests
- alert: K8SApiserverDown
expr: absent(up{job="apiserver"} == 1)
for: 20m
labels:
severity: critical
annotations:
description: No API servers are reachable or all have disappeared from service
discovery
- alert: K8sCertificateExpirationNotice
labels:
severity: warning
annotations:
description: Kubernetes API Certificate is expiring soon (less than 7 days)
expr: sum(apiserver_client_certificate_expiration_seconds_bucket{le="604800"}) > 0
- alert: K8sCertificateExpirationNotice
labels:
severity: critical
annotations:
description: Kubernetes API Certificate is expiring in less than 1 day
expr: sum(apiserver_client_certificate_expiration_seconds_bucket{le="86400"}) > 0
node.rules.yaml: |
groups:
- name: node.rules
rules:
- record: instance:node_cpu:rate:sum
expr: sum(rate(node_cpu{mode!="idle",mode!="iowait",mode!~"^(?:guest.*)$"}[3m]))
BY (instance)
- record: instance:node_filesystem_usage:sum
expr: sum((node_filesystem_size{mountpoint="/"} - node_filesystem_free{mountpoint="/"}))
BY (instance)
- record: instance:node_network_receive_bytes:rate:sum
expr: sum(rate(node_network_receive_bytes[3m])) BY (instance)
- record: instance:node_network_transmit_bytes:rate:sum
expr: sum(rate(node_network_transmit_bytes[3m])) BY (instance)
- record: instance:node_cpu:ratio
expr: sum(rate(node_cpu{mode!="idle"}[5m])) WITHOUT (cpu, mode) / ON(instance)
GROUP_LEFT() count(sum(node_cpu) BY (instance, cpu)) BY (instance)
- record: cluster:node_cpu:sum_rate5m
expr: sum(rate(node_cpu{mode!="idle"}[5m]))
- record: cluster:node_cpu:ratio
expr: cluster:node_cpu:rate5m / count(sum(node_cpu) BY (instance, cpu))
- alert: NodeExporterDown
expr: absent(up{job="node-exporter"} == 1)
for: 10m
labels:
severity: warning
annotations:
description: Prometheus could not scrape a node-exporter for more than 10m,
or node-exporters have disappeared from discovery
- alert: NodeDiskRunningFull
expr: predict_linear(node_filesystem_free[6h], 3600 * 24) < 0
for: 30m
labels:
severity: warning
annotations:
description: device {{$labels.device}} on node {{$labels.instance}} is running
full within the next 24 hours (mounted at {{$labels.mountpoint}})
- alert: NodeDiskRunningFull
expr: predict_linear(node_filesystem_free[30m], 3600 * 2) < 0
for: 10m
labels:
severity: critical
annotations:
description: device {{$labels.device}} on node {{$labels.instance}} is running
full within the next 2 hours (mounted at {{$labels.mountpoint}})
- alert: InactiveRAIDDisk
expr: node_md_disks - node_md_disks_active > 0
for: 10m
labels:
severity: warning
annotations:
description: '{{$value}} RAID disk(s) on node {{$labels.instance}} are inactive'
prometheus.rules.yaml: |
groups:
- name: prometheus.rules
rules:
- alert: PrometheusConfigReloadFailed
expr: prometheus_config_last_reload_successful == 0
for: 10m
labels:
severity: warning
annotations:
description: Reloading Prometheus' configuration has failed for {{$labels.namespace}}/{{$labels.pod}}
- alert: PrometheusNotificationQueueRunningFull
expr: predict_linear(prometheus_notifications_queue_length[5m], 60 * 30) > prometheus_notifications_queue_capacity
for: 10m
labels:
severity: warning
annotations:
description: Prometheus' alert notification queue is running full for {{$labels.namespace}}/{{
$labels.pod}}
- alert: PrometheusErrorSendingAlerts
expr: rate(prometheus_notifications_errors_total[5m]) / rate(prometheus_notifications_sent_total[5m])
> 0.01
for: 10m
labels:
severity: warning
annotations:
description: Errors while sending alerts from Prometheus {{$labels.namespace}}/{{
$labels.pod}} to Alertmanager {{$labels.Alertmanager}}
- alert: PrometheusErrorSendingAlerts
expr: rate(prometheus_notifications_errors_total[5m]) / rate(prometheus_notifications_sent_total[5m])
> 0.03
for: 10m
labels:
severity: critical
annotations:
description: Errors while sending alerts from Prometheus {{$labels.namespace}}/{{
$labels.pod}} to Alertmanager {{$labels.Alertmanager}}
- alert: PrometheusNotConnectedToAlertmanagers
expr: prometheus_notifications_alertmanagers_discovered < 1
for: 10m
labels:
severity: warning
annotations:
description: Prometheus {{ $labels.namespace }}/{{ $labels.pod}} is not connected
to any Alertmanagers
- alert: PrometheusTSDBReloadsFailing
expr: increase(prometheus_tsdb_reloads_failures_total[2h]) > 0
for: 12h
labels:
severity: warning
annotations:
description: '{{$labels.job}} at {{$labels.instance}} had {{$value | humanize}}
reload failures over the last four hours.'
summary: Prometheus has issues reloading data blocks from disk
- alert: PrometheusTSDBCompactionsFailing
expr: increase(prometheus_tsdb_compactions_failed_total[2h]) > 0
for: 12h
labels:
severity: warning
annotations:
description: '{{$labels.job}} at {{$labels.instance}} had {{$value | humanize}}
compaction failures over the last four hours.'
summary: Prometheus has issues compacting sample blocks
- alert: PrometheusTSDBWALCorruptions
expr: tsdb_wal_corruptions_total > 0
for: 4h
labels:
severity: warning
annotations:
description: '{{$labels.job}} at {{$labels.instance}} has a corrupted write-ahead
log (WAL).'
summary: Prometheus write-ahead log is corrupted
- alert: PrometheusNotIngestingSamples
expr: rate(prometheus_tsdb_head_samples_appended_total[5m]) <= 0
for: 10m
labels:
severity: warning
annotations:
description: "Prometheus {{ $labels.namespace }}/{{ $labels.pod}} isn't ingesting samples."
summary: "Prometheus isn't ingesting samples"

View File

@@ -0,0 +1,18 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: lens-prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- nodes/metrics
- services
- endpoints
- pods
- ingresses
- configmaps
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: lens-prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: lens-prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: lens-metrics

View File

@@ -0,0 +1,79 @@
{{#if nodeExporter.enabled}}
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-exporter
namespace: lens-metrics
spec:
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
name: node-exporter
phase: prod
template:
metadata:
labels:
name: node-exporter
phase: prod
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
securityContext:
runAsNonRoot: true
runAsUser: 65534
hostPID: true
containers:
- name: node-exporter
image: quay.io/prometheus/node-exporter:v1.1.2
args:
- --path.procfs=/host/proc
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker|var/lib/containerd|var/lib/containers/.+)($|/)
- --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
ports:
- name: metrics
containerPort: 9100
resources:
requests:
cpu: 10m
memory: 24Mi
limits:
cpu: 200m
memory: 100Mi
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
- name: root
mountPath: /host/root
readOnly: true
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
- name: root
hostPath:
path: /
{{/if}}

View File

@@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: node-exporter
namespace: lens-metrics
annotations:
prometheus.io/scrape: 'true'
spec:
type: ClusterIP
clusterIP: None
selector:
name: node-exporter
phase: prod
ports:
- name: metrics
protocol: TCP
port: 80
targetPort: 9100

View File

@@ -0,0 +1,128 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: lens-kube-state-metrics
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- nodes
- pods
- services
- resourcequotas
- replicationcontrollers
- limitranges
- persistentvolumeclaims
- persistentvolumes
- namespaces
- endpoints
verbs:
- list
- watch
- apiGroups:
- extensions
resources:
- daemonsets
- deployments
- replicasets
- ingresses
verbs:
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- list
- watch
- apiGroups:
- apps
resources:
- statefulsets
- daemonsets
- deployments
- replicasets
verbs:
- list
- watch
- apiGroups:
- batch
resources:
- cronjobs
- jobs
verbs:
- list
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- list
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- list
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
verbs:
- list
- watch
- apiGroups:
- storage.k8s.io
resources:
- storageclasses
- volumeattachments
verbs:
- list
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
verbs:
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- list
- watch
- apiGroups:
- scheduling.k8s.io
resources:
- priorityclasses
verbs:
- list
- watch

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-state-metrics
namespace: lens-metrics

View File

@@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: lens-kube-state-metrics
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: lens-kube-state-metrics
subjects:
- kind: ServiceAccount
name: kube-state-metrics
namespace: lens-metrics

View File

@@ -0,0 +1,46 @@
{{#if kubeStateMetrics.enabled}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-state-metrics
namespace: lens-metrics
spec:
selector:
matchLabels:
name: kube-state-metrics
replicas: 1
template:
metadata:
labels:
name: kube-state-metrics
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
serviceAccountName: kube-state-metrics
containers:
- name: kube-state-metrics
image: k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.0.0
ports:
- name: metrics
containerPort: 8080
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
cpu: 200m
memory: 150Mi
{{/if}}

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: kube-state-metrics
namespace: lens-metrics
labels:
name: kube-state-metrics
annotations:
prometheus.io/scrape: 'true'
spec:
ports:
- name: metrics
port: 8080
targetPort: 8080
protocol: TCP
selector:
name: kube-state-metrics

View File

@@ -0,0 +1,108 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import type { Common } from "@k8slens/extensions";
import { Renderer } from "@k8slens/extensions";
import semver from "semver";
import * as path from "path";
const { ResourceStack, forCluster, StorageClass, Namespace } = Renderer.K8sApi;
type ResourceStack = Renderer.K8sApi.ResourceStack;
export interface MetricsConfiguration {
// Placeholder for Metrics config structure
prometheus: {
enabled: boolean;
};
persistence: {
enabled: boolean;
storageClass: string;
size: string;
};
nodeExporter: {
enabled: boolean;
};
kubeStateMetrics: {
enabled: boolean;
};
retention: {
time: string;
size: string;
};
alertManagers: string[];
replicas: number;
storageClass: string;
version?: string;
}
export interface MetricsStatus {
installed: boolean;
canUpgrade: boolean;
}
export class MetricsFeature {
name = "lens-metrics";
latestVersion = "v2.26.0-lens1";
protected stack: ResourceStack;
constructor(protected cluster: Common.Catalog.KubernetesCluster) {
this.stack = new ResourceStack(cluster, this.name);
}
get resourceFolder() {
return path.join(__dirname, "../resources/");
}
async install(config: MetricsConfiguration): Promise<string> {
// Check if there are storageclasses
const storageClassApi = forCluster(this.cluster, StorageClass);
const scs = await storageClassApi.list();
config.persistence.enabled = scs.some(sc => (
sc.metadata?.annotations?.["storageclass.kubernetes.io/is-default-class"] === "true" ||
sc.metadata?.annotations?.["storageclass.beta.kubernetes.io/is-default-class"] === "true"
));
config.version = this.latestVersion;
return this.stack.kubectlApplyFolder(this.resourceFolder, config, ["--prune"]);
}
async upgrade(config: MetricsConfiguration): Promise<string> {
return this.install(config);
}
async getStatus(): Promise<MetricsStatus> {
const status: MetricsStatus = { installed: false, canUpgrade: false };
try {
const namespaceApi = forCluster(this.cluster, Namespace);
const namespace = await namespaceApi.get({ name: "lens-metrics" });
if (namespace?.kind) {
const currentVersion = namespace.metadata.annotations?.extensionVersion || "0.0.0";
status.installed = true;
status.canUpgrade = semver.lt(currentVersion, this.latestVersion, true);
} else {
status.installed = false;
}
} catch(e) {
if (e?.error?.code === 404) {
status.installed = false;
} else {
console.warn("[LENS-METRICS] failed to resolve install state", e);
}
}
return status;
}
async uninstall(config: MetricsConfiguration): Promise<string> {
return this.stack.kubectlDeleteFolder(this.resourceFolder, config);
}
}

View File

@@ -0,0 +1,276 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import React from "react";
import type { Common } from "@k8slens/extensions";
import { Renderer } from "@k8slens/extensions";
import { observer } from "mobx-react";
import { computed, observable, makeObservable } from "mobx";
import type { MetricsConfiguration } from "./metrics-feature";
import { MetricsFeature } from "./metrics-feature";
const {
K8sApi: {
forCluster, StatefulSet, DaemonSet, Deployment,
},
Component: {
SubTitle, Switch, Button,
},
} = Renderer;
export interface MetricsSettingsProps {
cluster: Common.Catalog.KubernetesCluster;
}
@observer
export class MetricsSettings extends React.Component<MetricsSettingsProps> {
constructor(props: MetricsSettingsProps) {
super(props);
makeObservable(this);
}
@observable featureStates = {
prometheus: false,
kubeStateMetrics: false,
nodeExporter: false,
};
@observable canUpgrade = false;
@observable upgrading = false;
@observable changed = false;
@observable inProgress = false;
config: MetricsConfiguration = {
prometheus: {
enabled: false,
},
persistence: {
enabled: false,
storageClass: null,
size: "20Gi", // kubernetes yaml value (no B suffix)
},
nodeExporter: {
enabled: false,
},
retention: {
time: "2d",
size: "5GiB", // argument for prometheus (requires B suffix)
},
kubeStateMetrics: {
enabled: false,
},
alertManagers: null,
replicas: 1,
storageClass: null,
};
feature: MetricsFeature;
@computed get isTogglable() {
if (this.inProgress) return false;
if (this.props.cluster.status.phase !== "connected") return false;
if (this.canUpgrade) return false;
if (!this.isActiveMetricsProvider) return false;
return true;
}
get metricsProvider() {
return this.props.cluster.spec?.metrics?.prometheus?.type || "";
}
get isActiveMetricsProvider() {
return (!this.metricsProvider || this.metricsProvider === "lens");
}
async componentDidMount() {
this.feature = new MetricsFeature(this.props.cluster);
await this.updateFeatureStates();
}
async updateFeatureStates() {
const status = await this.feature.getStatus();
this.canUpgrade = status.canUpgrade;
if (this.canUpgrade) {
this.changed = true;
}
const statefulSet = forCluster(this.props.cluster, StatefulSet);
try {
await statefulSet.get({ name: "prometheus", namespace: "lens-metrics" });
this.featureStates.prometheus = true;
} catch(e) {
if (e?.error?.code === 404) {
this.featureStates.prometheus = false;
} else {
this.featureStates.prometheus = undefined;
}
}
const deployment = forCluster(this.props.cluster, Deployment);
try {
await deployment.get({ name: "kube-state-metrics", namespace: "lens-metrics" });
this.featureStates.kubeStateMetrics = true;
} catch(e) {
if (e?.error?.code === 404) {
this.featureStates.kubeStateMetrics = false;
} else {
this.featureStates.kubeStateMetrics = undefined;
}
}
const daemonSet = forCluster(this.props.cluster, DaemonSet);
try {
await daemonSet.get({ name: "node-exporter", namespace: "lens-metrics" });
this.featureStates.nodeExporter = true;
} catch(e) {
if (e?.error?.code === 404) {
this.featureStates.nodeExporter = false;
} else {
this.featureStates.nodeExporter = undefined;
}
}
}
async save() {
this.config.prometheus.enabled = !!this.featureStates.prometheus;
this.config.kubeStateMetrics.enabled = !!this.featureStates.kubeStateMetrics;
this.config.nodeExporter.enabled = !!this.featureStates.nodeExporter;
this.inProgress = true;
try {
if (!this.config.prometheus.enabled && !this.config.kubeStateMetrics.enabled && !this.config.nodeExporter.enabled) {
await this.feature.uninstall(this.config);
} else {
await this.feature.install(this.config);
}
} finally {
this.inProgress = false;
this.changed = false;
await this.updateFeatureStates();
}
}
async togglePrometheus(enabled: boolean) {
this.featureStates.prometheus = enabled;
this.changed = true;
}
async toggleKubeStateMetrics(enabled: boolean) {
this.featureStates.kubeStateMetrics = enabled;
this.changed = true;
}
async toggleNodeExporter(enabled: boolean) {
this.featureStates.nodeExporter = enabled;
this.changed = true;
}
@computed get buttonLabel() {
const allDisabled = !this.featureStates.kubeStateMetrics && !this.featureStates.nodeExporter && !this.featureStates.prometheus;
if (this.inProgress && this.canUpgrade) return "Upgrading ...";
if (this.inProgress && allDisabled) return "Uninstalling ...";
if (this.inProgress) return "Applying ...";
if (this.canUpgrade) return "Upgrade";
if (this.changed && allDisabled) {
return "Uninstall";
}
return "Apply";
}
render() {
return (
<section style={{ display: "flex", flexDirection: "column", rowGap: "1.5rem" }}>
{ this.props.cluster.status.phase !== "connected" && (
<section>
<p style={ { color: "var(--colorError)" } }>
Lens Metrics settings requires established connection to the cluster.
</p>
</section>
)}
{ !this.isActiveMetricsProvider && (
<section>
<p style={ { color: "var(--colorError)" } }>
Other metrics provider is currently active. See &quot;Metrics&quot; tab for details.
</p>
</section>
)}
<section>
<SubTitle title="Prometheus" />
<Switch
disabled={this.featureStates.kubeStateMetrics === undefined || !this.isTogglable}
checked={!!this.featureStates.prometheus && this.props.cluster.status.phase == "connected"}
onChange={checked => this.togglePrometheus(checked)}
name="prometheus"
>
Enable bundled Prometheus metrics stack
</Switch>
<small className="hint">
Enable timeseries data visualization (Prometheus stack) for your cluster.
</small>
</section>
<section>
<SubTitle title="Kube State Metrics" />
<Switch
disabled={this.featureStates.kubeStateMetrics === undefined || !this.isTogglable}
checked={!!this.featureStates.kubeStateMetrics && this.props.cluster.status.phase == "connected"}
onChange={checked => this.toggleKubeStateMetrics(checked)}
name="kube-state-metrics"
>
Enable bundled kube-state-metrics stack
</Switch>
<small className="hint">
Enable Kubernetes API object metrics for your cluster.
Enable this only if you don&apos;t have existing kube-state-metrics stack installed.
</small>
</section>
<section>
<SubTitle title="Node Exporter" />
<Switch
disabled={this.featureStates.nodeExporter === undefined || !this.isTogglable}
checked={!!this.featureStates.nodeExporter && this.props.cluster.status.phase == "connected"}
onChange={checked => this.toggleNodeExporter(checked)}
name="node-exporter"
>
Enable bundled node-exporter stack
</Switch>
<small className="hint">
Enable node level metrics for your cluster.
Enable this only if you don&apos;t have existing node-exporter stack installed.
</small>
</section>
<section>
<div>
<Button
primary
label={this.buttonLabel}
waiting={this.inProgress}
onClick={() => this.save()}
disabled={!this.changed}
style={{ width: "20ch", padding: "0.5rem" }}
/>
{this.canUpgrade && (
<small className="hint">
An update is available for enabled metrics components.
</small>
)}
</div>
</section>
</section>
);
}
}

View File

@@ -0,0 +1,27 @@
{
"compilerOptions": {
"outDir": "dist",
"module": "CommonJS",
"target": "ES2017",
"lib": ["ESNext", "DOM", "DOM.Iterable"],
"moduleResolution": "Node",
"sourceMap": false,
"declaration": false,
"strict": false,
"noImplicitAny": true,
"skipLibCheck": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"experimentalDecorators": true,
"useDefineForClassFields": true,
"jsx": "react"
},
"include": [
"./**/*.ts",
"./**/*.tsx"
],
"exclude": [
"node_modules",
"*.js"
]
}

View File

@@ -0,0 +1,47 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
const path = require("path");
module.exports = [
{
entry: "./renderer.tsx",
context: __dirname,
target: "electron-renderer",
mode: "production",
optimization: {
minimize: false,
},
module: {
rules: [
{
test: /\.tsx?$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
externals: [
{
"@k8slens/extensions": "var global.LensExtensions",
"react": "var global.React",
"react-dom": "var global.ReactDOM",
"mobx": "var global.Mobx",
"mobx-react": "var global.MobxReact",
},
],
resolve: {
extensions: [ ".tsx", ".ts", ".js" ],
},
output: {
libraryTarget: "commonjs2",
globalObject: "this",
filename: "renderer.js",
path: path.resolve(__dirname, "dist"),
},
node: {
__dirname: false,
},
},
];

5426
src/node-menu/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,24 @@
{
"name": "openlens-node-menu",
"version": "6.1.0",
"description": "OpenLens node menu",
"renderer": "dist/renderer.js",
"engines": {
"node": "^16.14.2",
"lens": "^6.3.0"
},
"scripts": {
"build": "npx webpack && npm pack --pack-destination $@",
"dev": "npx webpack -- --watch",
"test": "npx jest --passWithNoTests --env=jsdom src $@"
},
"files": [
"dist/**/*"
],
"devDependencies": {
"@k8slens/extensions": "^6.3.0",
"@types/react": "^17.0.52",
"ts-loader": "^9.4.2",
"webpack-cli": "^5.0.1"
}
}

View File

@@ -0,0 +1,21 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import { Renderer } from "@k8slens/extensions";
import React from "react";
import type { NodeMenuProps } from "./src/node-menu";
import { NodeMenu } from "./src/node-menu";
export default class NodeMenuRendererExtension extends Renderer.LensExtension {
kubeObjectMenuItems = [
{
kind: "Node",
apiVersions: ["v1"],
components: {
MenuItem: (props: NodeMenuProps) => <NodeMenu {...props} />,
},
},
];
}

View File

@@ -0,0 +1,122 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import React from "react";
import { Common, Renderer } from "@k8slens/extensions";
type Node = Renderer.K8sApi.Node;
const {
Component: {
terminalStore,
createTerminalTab,
ConfirmDialog,
MenuItem,
Icon,
},
Navigation,
} = Renderer;
const {
App,
} = Common;
export interface NodeMenuProps extends Renderer.Component.KubeObjectMenuProps<Node> {
}
export function NodeMenu(props: NodeMenuProps) {
const { object: node, toolbar } = props;
if (!node) {
return null;
}
const nodeName = node.getName();
const kubectlPath = App.Preferences.getKubectlPath() || "kubectl";
const sendToTerminal = (command: string) => {
terminalStore.sendCommand(command, {
enter: true,
newTab: true,
});
Navigation.hideDetails();
};
const shell = () => {
createTerminalTab({
title: `Node: ${nodeName}`,
node: nodeName,
});
Navigation.hideDetails();
};
const cordon = () => {
sendToTerminal(`${kubectlPath} cordon ${nodeName}`);
};
const unCordon = () => {
sendToTerminal(`${kubectlPath} uncordon ${nodeName}`);
};
const drain = () => {
const command = `${kubectlPath} drain ${nodeName} --delete-local-data --ignore-daemonsets --force`;
ConfirmDialog.open({
ok: () => sendToTerminal(command),
labelOk: `Drain Node`,
message: (
<p>
{"Are you sure you want to drain "}
<b>{nodeName}</b>
?
</p>
),
});
};
return (
<>
<MenuItem onClick={shell}>
<Icon
svg="ssh"
interactive={toolbar}
tooltip={toolbar && "Node shell"}
/>
<span className="title">Shell</span>
</MenuItem>
{
node.isUnschedulable()
? (
<MenuItem onClick={unCordon}>
<Icon
material="play_circle_filled"
tooltip={toolbar && "Uncordon"}
interactive={toolbar}
/>
<span className="title">Uncordon</span>
</MenuItem>
)
: (
<MenuItem onClick={cordon}>
<Icon
material="pause_circle_filled"
tooltip={toolbar && "Cordon"}
interactive={toolbar}
/>
<span className="title">Cordon</span>
</MenuItem>
)
}
<MenuItem onClick={drain}>
<Icon
material="delete_sweep"
tooltip={toolbar && "Drain"}
interactive={toolbar}
/>
<span className="title">Drain</span>
</MenuItem>
</>
);
}

View File

@@ -0,0 +1,27 @@
{
"compilerOptions": {
"outDir": "dist",
"module": "CommonJS",
"target": "ES2017",
"lib": ["ESNext", "DOM", "DOM.Iterable"],
"moduleResolution": "Node",
"sourceMap": false,
"declaration": false,
"strict": false,
"noImplicitAny": true,
"skipLibCheck": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"experimentalDecorators": true,
"useDefineForClassFields": true,
"jsx": "react"
},
"include": [
"./*.ts",
"./*.tsx"
],
"exclude": [
"node_modules",
"*.js"
]
}

View File

@@ -0,0 +1,44 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
const path = require("path");
module.exports = [
{
entry: "./renderer.tsx",
context: __dirname,
target: "electron-renderer",
mode: "production",
optimization: {
minimize: false,
},
module: {
rules: [
{
test: /\.tsx?$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
externals: [
{
"@k8slens/extensions": "var global.LensExtensions",
"react": "var global.React",
"react-dom": "var global.ReactDOM",
"mobx": "var global.Mobx",
"mobx-react": "var global.MobxReact",
},
],
resolve: {
extensions: [ ".tsx", ".ts", ".js" ],
},
output: {
libraryTarget: "commonjs2",
globalObject: "this",
filename: "renderer.js",
path: path.resolve(__dirname, "dist"),
},
},
];

5426
src/pod-menu/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

24
src/pod-menu/package.json Normal file
View File

@@ -0,0 +1,24 @@
{
"name": "openlens-pod-menu",
"version": "6.1.0",
"description": "OpenLens pod menu",
"renderer": "dist/renderer.js",
"engines": {
"node": "^16.14.2",
"lens": "^6.3.0"
},
"scripts": {
"build": "npx webpack && npm pack --pack-destination $@",
"dev": "npx webpack -- --watch",
"test": "npx jest --passWithNoTests --env=jsdom src $@"
},
"files": [
"dist/**/*"
],
"devDependencies": {
"@k8slens/extensions": "^6.3.0",
"@types/react": "^17.0.52",
"ts-loader": "^9.4.2",
"webpack-cli": "^5.0.1"
}
}

39
src/pod-menu/renderer.tsx Normal file
View File

@@ -0,0 +1,39 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import { Renderer } from "@k8slens/extensions";
import type { PodAttachMenuProps } from "./src/attach-menu";
import { PodAttachMenu } from "./src/attach-menu";
import type { PodShellMenuProps } from "./src/shell-menu";
import { PodShellMenu } from "./src/shell-menu";
import type { PodLogsMenuProps } from "./src/logs-menu";
import { PodLogsMenu } from "./src/logs-menu";
import React from "react";
export default class PodMenuRendererExtension extends Renderer.LensExtension {
kubeObjectMenuItems = [
{
kind: "Pod",
apiVersions: ["v1"],
components: {
MenuItem: (props: PodAttachMenuProps) => <PodAttachMenu {...props} />,
},
},
{
kind: "Pod",
apiVersions: ["v1"],
components: {
MenuItem: (props: PodShellMenuProps) => <PodShellMenu {...props} />,
},
},
{
kind: "Pod",
apiVersions: ["v1"],
components: {
MenuItem: (props: PodLogsMenuProps) => <PodLogsMenu {...props} />,
},
},
];
}

View File

@@ -0,0 +1,106 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import React from "react";
import { Renderer, Common } from "@k8slens/extensions";
type Pod = Renderer.K8sApi.Pod;
const {
Component: {
createTerminalTab,
terminalStore,
MenuItem,
Icon,
SubMenu,
StatusBrick,
},
Navigation,
} = Renderer;
const {
Util,
App,
} = Common;
export interface PodAttachMenuProps extends Renderer.Component.KubeObjectMenuProps<Pod> {
}
export class PodAttachMenu extends React.Component<PodAttachMenuProps> {
async attachToPod(container?: string) {
const { object: pod } = this.props;
const kubectlPath = App.Preferences.getKubectlPath() || "kubectl";
const commandParts = [
kubectlPath,
"attach",
"-i",
"-t",
"-n", pod.getNs(),
pod.getName(),
];
if (window.navigator.platform !== "Win32") {
commandParts.unshift("exec");
}
if (container) {
commandParts.push("-c", container);
}
const shell = createTerminalTab({
title: `Pod: ${pod.getName()} (namespace: ${pod.getNs()}) [Attached]`,
});
terminalStore.sendCommand(commandParts.join(" "), {
enter: true,
tabId: shell.id,
});
Navigation.hideDetails();
}
render() {
const { object, toolbar } = this.props;
const containers = object.getRunningContainers();
if (!containers.length) return null;
return (
<MenuItem onClick={Util.prevDefault(() => this.attachToPod(containers[0].name))}>
<Icon
material="pageview"
interactive={toolbar}
tooltip={toolbar && "Attach to Pod"}
/>
<span className="title">Attach Pod</span>
{containers.length > 1 && (
<>
<Icon className="arrow" material="keyboard_arrow_right"/>
<SubMenu>
{
containers.map(container => {
const { name } = container;
return (
<MenuItem
key={name}
onClick={Util.prevDefault(() => this.attachToPod(name))}
className="flex align-center"
>
<StatusBrick/>
<span>{name}</span>
</MenuItem>
);
})
}
</SubMenu>
</>
)}
</MenuItem>
);
}
}

View File

@@ -0,0 +1,87 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import React from "react";
import { Renderer, Common } from "@k8slens/extensions";
type Pod = Renderer.K8sApi.Pod;
type IPodContainer = Renderer.K8sApi.IPodContainer;
const {
Component: {
logTabStore,
MenuItem,
Icon,
SubMenu,
StatusBrick,
},
Navigation,
} = Renderer;
const {
Util,
} = Common;
export interface PodLogsMenuProps extends Renderer.Component.KubeObjectMenuProps<Pod> {
}
export class PodLogsMenu extends React.Component<PodLogsMenuProps> {
showLogs(container: IPodContainer) {
Navigation.hideDetails();
const pod = this.props.object;
logTabStore.createPodTab({
selectedPod: pod,
selectedContainer: container,
});
}
render() {
const { object: pod, toolbar } = this.props;
const containers = pod.getAllContainers();
const statuses = pod.getContainerStatuses();
if (!containers.length) return null;
return (
<MenuItem onClick={Util.prevDefault(() => this.showLogs(containers[0]))}>
<Icon
material="subject"
interactive={toolbar}
tooltip={toolbar && "Pod Logs"}
/>
<span className="title">Logs</span>
{containers.length > 1 && (
<>
<Icon className="arrow" material="keyboard_arrow_right"/>
<SubMenu>
{
containers.map(container => {
const { name } = container;
const status = statuses.find(status => status.name === name);
const brick = status ? (
<StatusBrick
className={Util.cssNames(Object.keys(status.state)[0], { ready: status.ready })}
/>
) : null;
return (
<MenuItem
key={name}
onClick={Util.prevDefault(() => this.showLogs(container))}
className="flex align-center"
>
{brick}
<span>{name}</span>
</MenuItem>
);
})
}
</SubMenu>
</>
)}
</MenuItem>
);
}
}

View File

@@ -0,0 +1,114 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
import React from "react";
import { Renderer, Common } from "@k8slens/extensions";
type Pod = Renderer.K8sApi.Pod;
const {
Component: {
createTerminalTab,
terminalStore,
MenuItem,
Icon,
SubMenu,
StatusBrick,
},
Navigation,
} = Renderer;
const {
Util,
App,
} = Common;
export interface PodShellMenuProps extends Renderer.Component.KubeObjectMenuProps<Pod> {
}
export class PodShellMenu extends React.Component<PodShellMenuProps> {
async execShell(container?: string) {
const { object: pod } = this.props;
const kubectlPath = App.Preferences.getKubectlPath() || "kubectl";
const commandParts = [
kubectlPath,
"exec",
"-i",
"-t",
"-n", pod.getNs(),
pod.getName(),
];
if (window.navigator.platform !== "Win32") {
commandParts.unshift("exec");
}
if (container) {
commandParts.push("-c", container);
}
commandParts.push("--");
if (pod.getSelectedNodeOs() === "windows") {
commandParts.push("powershell");
} else {
commandParts.push('sh -c "clear; (bash || ash || sh)"');
}
const shell = createTerminalTab({
title: `Pod: ${pod.getName()} (namespace: ${pod.getNs()})`,
});
terminalStore.sendCommand(commandParts.join(" "), {
enter: true,
tabId: shell.id,
});
Navigation.hideDetails();
}
render() {
const { object, toolbar } = this.props;
const containers = object.getRunningContainers();
if (!containers.length) return null;
return (
<MenuItem onClick={Util.prevDefault(() => this.execShell(containers[0].name))}>
<Icon
svg="ssh"
interactive={toolbar}
tooltip={toolbar && "Pod Shell"}
/>
<span className="title">Shell</span>
{containers.length > 1 && (
<>
<Icon className="arrow" material="keyboard_arrow_right"/>
<SubMenu>
{
containers.map(container => {
const { name } = container;
return (
<MenuItem
key={name}
onClick={Util.prevDefault(() => this.execShell(name))}
className="flex align-center"
>
<StatusBrick/>
<span>{name}</span>
</MenuItem>
);
})
}
</SubMenu>
</>
)}
</MenuItem>
);
}
}

View File

@@ -0,0 +1,27 @@
{
"compilerOptions": {
"outDir": "dist",
"module": "CommonJS",
"target": "ES2017",
"lib": ["ESNext", "DOM", "DOM.Iterable"],
"moduleResolution": "Node",
"sourceMap": false,
"declaration": false,
"strict": false,
"noImplicitAny": true,
"skipLibCheck": true,
"esModuleInterop": true,
"allowSyntheticDefaultImports": true,
"experimentalDecorators": true,
"useDefineForClassFields": true,
"jsx": "react"
},
"include": [
"./*.ts",
"./*.tsx"
],
"exclude": [
"node_modules",
"*.js"
]
}

View File

@@ -0,0 +1,44 @@
/**
* Copyright (c) OpenLens Authors. All rights reserved.
* Licensed under MIT License. See LICENSE in root directory for more information.
*/
const path = require("path");
module.exports = [
{
entry: "./renderer.tsx",
context: __dirname,
target: "electron-renderer",
mode: "production",
optimization: {
minimize: false,
},
module: {
rules: [
{
test: /\.tsx?$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
externals: [
{
"@k8slens/extensions": "var global.LensExtensions",
"react": "var global.React",
"react-dom": "var global.ReactDOM",
"mobx": "var global.Mobx",
"mobx-react": "var global.MobxReact",
},
],
resolve: {
extensions: [ ".tsx", ".ts", ".js" ],
},
output: {
libraryTarget: "commonjs2",
globalObject: "this",
filename: "renderer.js",
path: path.resolve(__dirname, "dist"),
},
},
];