mirror of
https://github.com/nikdoof/helm-charts.git
synced 2025-12-16 19:42:15 +00:00
Compare commits
31 Commits
aaisp-to-m
...
zigbee2mqt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f99101d00b | ||
|
|
1548f606e7 | ||
|
f33c104b1f
|
|||
| 1007982a78 | |||
|
|
bea73339d5 | ||
|
|
644cf70b31 | ||
| d929f18571 | |||
| ff5fb746ad | |||
| 6aec140b79 | |||
| 2bb2c4a7d3 | |||
| a29c9d7cff | |||
| 343a41b2e7 | |||
| c38645c597 | |||
| c38382eb5f | |||
|
083e8bd0e6
|
|||
| 4fcba6be6c | |||
|
9fa7c5254d
|
|||
|
1c8b3a7a94
|
|||
| f67ffc9a81 | |||
|
5e16f5a805
|
|||
| 8b67ed8628 | |||
|
0da018df2f
|
|||
|
4dc7b0f9f2
|
|||
|
4f8a41ab16
|
|||
|
10a68e25b9
|
|||
|
c1461dd083
|
|||
|
338beeead8
|
|||
|
365626339a
|
|||
| 9a55b3f92c | |||
|
67b78606aa
|
|||
|
c9e3c402ad
|
16
.editorconfig
Normal file
16
.editorconfig
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
# EditorConfig helps us maintain consistent formatting on non-source files.
|
||||
# Visit https://editorconfig.org/ for details on how to configure your editor to respect these settings.
|
||||
|
||||
# This is the terminal .editorconfig in this repository.
|
||||
root = true
|
||||
|
||||
|
||||
[*]
|
||||
indent_style = space
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
end_of_line = lf
|
||||
|
||||
[*.{yaml,yml}]
|
||||
indent_size = 2
|
||||
43
.github/renovate.json5
vendored
Normal file
43
.github/renovate.json5
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"dependencyDashboard": true,
|
||||
"dependencyDashboardTitle": "Renovate Dashboard",
|
||||
"suppressNotifications": ["prIgnoreNotification"],
|
||||
"rebaseWhen": "conflicted",
|
||||
"prConcurrentLimit": 5,
|
||||
"helm-values": {
|
||||
"enabled": false
|
||||
},
|
||||
"helmv3": {
|
||||
"fileMatch": ["charts/.+/Chart\\.yaml$"]
|
||||
},
|
||||
"packageRules": [
|
||||
{
|
||||
"datasources": ["helm"],
|
||||
"commitMessageTopic": "Helm chart {{depName}}",
|
||||
"separateMinorPatch": true
|
||||
},
|
||||
{
|
||||
"commitMessagePrefix": "[{{{parentDir}}}]",
|
||||
"branchTopic": "{{{parentDir}}}-{{{depNameSanitized}}}-{{{newMajor}}}{{#if isPatch}}.{{{newMinor}}}{{/if}}.x{{#if isLockfileUpdate}}-lockfile{{/if}}",
|
||||
"updateTypes": ["major"],
|
||||
"bumpVersion": "major",
|
||||
"labels": ["dependency/major"],
|
||||
"excludePackageNames": ["common"],
|
||||
},
|
||||
{
|
||||
"updateTypes": ["minor"],
|
||||
"bumpVersion": "minor",
|
||||
"labels": ["dependency/minor"],
|
||||
"excludePackageNames": ["common"],
|
||||
"groupName": ["external minor dep"],
|
||||
},
|
||||
{
|
||||
"updateTypes": ["patch"],
|
||||
"bumpVersion": "patch",
|
||||
"labels": ["dependency/patch"],
|
||||
"excludePackageNames": ["common"],
|
||||
"groupName": ["external patch dep"],
|
||||
}
|
||||
]
|
||||
}
|
||||
39
.github/workflows/lint-test.yaml
vendored
39
.github/workflows/lint-test.yaml
vendored
@@ -7,21 +7,36 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.1.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
id: lint
|
||||
uses: helm/chart-testing-action@v1.0.0-alpha.3
|
||||
with:
|
||||
command: lint
|
||||
run: ct lint
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.0.0-alpha.3
|
||||
with:
|
||||
install_local_path_provisioner: true
|
||||
if: steps.lint.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.2.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
uses: helm/chart-testing-action@v1.0.0-alpha.3
|
||||
with:
|
||||
command: install
|
||||
run: ct install
|
||||
|
||||
22
.github/workflows/release.yaml
vendored
22
.github/workflows/release.yaml
vendored
@@ -10,29 +10,23 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
# See https://github.com/helm/chart-releaser-action/issues/6
|
||||
- name: Install Helm
|
||||
run: |
|
||||
curl -sSLo get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
helm init --client-only
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.0.0-alpha.2
|
||||
uses: helm/chart-releaser-action@v1.2.1
|
||||
with:
|
||||
charts_repo_url: https://nikdoof.github.io/helm-charts
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.CR_TOKEN }}"
|
||||
CR_TOKEN: "${{ secrets.CR_TOKEN }}"
|
||||
|
||||
32
README.md
Normal file
32
README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Helm Charts
|
||||
|
||||
A small set of custom Helm charts to cover some smaller applications not covered by Stable and other repos.
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://github.com/nikdoof/helm-charts/actions)
|
||||
|
||||
## Usage
|
||||
|
||||
[Helm](https://helm.sh) must be installed to use the charts.
|
||||
Please refer to Helm's [documentation](https://helm.sh/docs/) to get started.
|
||||
|
||||
Once Helm is set up properly, add the repo as follows:
|
||||
|
||||
```console
|
||||
helm repo add nikdoof https://nikdoof.github.io/helm-charts/
|
||||
```
|
||||
|
||||
You can then run `helm search nikdoof` to see the charts.
|
||||
|
||||
## Charts
|
||||
|
||||
See [charts folder](./charts) for a complete list.
|
||||
|
||||
* [aaisp2mqtt](./charts/aaisp2mqtt) - A tool to pull information from [Andrews & Arnold](https://www.aa.net.uk/) CHAOSv2 API and output to MQTT
|
||||
* [calibre-web](./charts/calibre-web) - Web app for browsing, reading and downloading eBooks stored in a Calibre database
|
||||
* [deluge](./charts/deluge) - Deluge torrent client
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[MIT License](./LICENSE)
|
||||
@@ -2,4 +2,14 @@ apiVersion: v1
|
||||
appVersion: "0.2"
|
||||
description: Pulls data from the AAISP CHAOSv2 API into MQTT
|
||||
name: aaisp-to-mqtt
|
||||
version: 0.2.4
|
||||
version: 0.2.5
|
||||
keywords:
|
||||
- aaisp
|
||||
- mqtt
|
||||
home: https://github.com/nikdoof/aaisp-to-mqtt
|
||||
sources:
|
||||
- https://hub.docker.com/r/nikdoof/aaisp-to-mqtt/
|
||||
- https://github.com/natm/aaisp-to-mqtt
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
|
||||
4
charts/aaisp-to-mqtt/ci/existingsecret-values.yaml
Normal file
4
charts/aaisp-to-mqtt/ci/existingsecret-values.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
mqtt:
|
||||
broker: localhost
|
||||
|
||||
existingSecretName: aaisp-to-mqtt-secret
|
||||
6
charts/aaisp-to-mqtt/ci/test-values.yaml
Normal file
6
charts/aaisp-to-mqtt/ci/test-values.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
aaisp:
|
||||
username: test1@a
|
||||
password: TesttestTest
|
||||
|
||||
mqtt:
|
||||
broker: localhost
|
||||
22
charts/aaisp2mqtt/.helmignore
Normal file
22
charts/aaisp2mqtt/.helmignore
Normal file
@@ -0,0 +1,22 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
15
charts/aaisp2mqtt/Chart.yaml
Normal file
15
charts/aaisp2mqtt/Chart.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
appVersion: "0.3.1"
|
||||
description: Pulls data from the AAISP CHAOSv2 API into MQTT
|
||||
name: aaisp2mqtt
|
||||
version: 0.3.2
|
||||
keywords:
|
||||
- aaisp
|
||||
- mqtt
|
||||
home: https://github.com/nikdoof/aaisp2mqtt
|
||||
sources:
|
||||
- https://hub.docker.com/r/nikdoof/aaisp2mqtt/
|
||||
- https://github.com/natm/aaisp2mqtt
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
4
charts/aaisp2mqtt/ci/existingsecret-values.yaml
Normal file
4
charts/aaisp2mqtt/ci/existingsecret-values.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
mqtt:
|
||||
broker: localhost
|
||||
|
||||
existingSecretName: aaisp2mqtt-secret
|
||||
9
charts/aaisp2mqtt/ci/homeassistant-values.yaml
Normal file
9
charts/aaisp2mqtt/ci/homeassistant-values.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
aaisp:
|
||||
username: test1@a
|
||||
password: TesttestTest
|
||||
|
||||
mqtt:
|
||||
broker: localhost
|
||||
|
||||
homeassistant:
|
||||
enabled: true
|
||||
6
charts/aaisp2mqtt/ci/test-values.yaml
Normal file
6
charts/aaisp2mqtt/ci/test-values.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
aaisp:
|
||||
username: test1@a
|
||||
password: TesttestTest
|
||||
|
||||
mqtt:
|
||||
broker: localhost
|
||||
56
charts/aaisp2mqtt/templates/_helpers.tpl
Normal file
56
charts/aaisp2mqtt/templates/_helpers.tpl
Normal file
@@ -0,0 +1,56 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "aaisp2mqtt.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "aaisp2mqtt.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "aaisp2mqtt.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "aaisp2mqtt.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
|
||||
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "aaisp2mqtt.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "aaisp2mqtt.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
97
charts/aaisp2mqtt/templates/cronjob.yaml
Normal file
97
charts/aaisp2mqtt/templates/cronjob.yaml
Normal file
@@ -0,0 +1,97 @@
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ include "aaisp2mqtt.fullname" . }}-cronjob
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
|
||||
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "aaisp2mqtt.name" . }}
|
||||
spec:
|
||||
schedule: {{ .Values.cronjob.schedule | quote }}
|
||||
successfulJobsHistoryLimit: {{ .Values.cronjob.successfulJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.cronjob.failedJobsHistoryLimit }}
|
||||
concurrencyPolicy: {{ .Values.cronjob.concurrencyPolicy }}
|
||||
{{- if .Values.cronjob.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.cronjob.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
jobTemplate:
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
|
||||
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "aaisp2mqtt.name" . }}
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.image.pullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 16 }}
|
||||
env:
|
||||
- name: AAISP_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
|
||||
key: aaisp.username
|
||||
- name: AAISP_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
|
||||
key: aaisp.password
|
||||
- name: MQTT_BROKER
|
||||
value: {{ .Values.mqtt.broker }}
|
||||
- name: MQTT_PORT
|
||||
value: "{{ default 1883 .Values.mqtt.port }}"
|
||||
{{- if .Values.mqtt.authenticated }}
|
||||
- name: MQTT_USERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
|
||||
key: mqtt.username
|
||||
- name: MQTT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
|
||||
key: mqtt.password
|
||||
{{- end }}
|
||||
- name: MQTT_TOPIC_PREFIX
|
||||
value: {{ default "aaisp" .Values.mqtt.topicPrefix }}
|
||||
{{- if .Values.homeassistant.enabled }}
|
||||
- name: HOMEASSISTANT_ENABLED
|
||||
value: '{{ .Values.homeassistant.enabled }}'
|
||||
{{- if .Values.homeassistant.discoveryPrefix }}
|
||||
- name: HOMEASSISTANT_DISCOVERY_PREFIX
|
||||
value: {{ .Values.homeassistant.discoveryPrefix }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 12 }}:
|
||||
{{- end }}
|
||||
20
charts/aaisp2mqtt/templates/secrets.yaml
Normal file
20
charts/aaisp2mqtt/templates/secrets.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
{{- if not (.Values.existingSecretName) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: aaisp2mqtt-secret
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
|
||||
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "aaisp2mqtt.name" . }}
|
||||
type: Opaque
|
||||
data:
|
||||
aaisp.username: {{ .Values.aaisp.username | b64enc }}
|
||||
aaisp.password: {{ .Values.aaisp.password | b64enc }}
|
||||
{{- if .Values.mqtt.authenticated }}
|
||||
mqtt.username: {{ .Values.mqtt.username | b64enc }}
|
||||
mqtt.password: {{ .Values.mqtt.password | b64enc }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
61
charts/aaisp2mqtt/values.yaml
Normal file
61
charts/aaisp2mqtt/values.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
# Default values for aaisp2mqtt.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: nikdoof/aaisp2mqtt
|
||||
tag: 0.3.1
|
||||
pullPolicy: IfNotPresent
|
||||
# imagePullSecrets: []
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
## Use a pre-existing secret for login information
|
||||
##
|
||||
# existingSecretName: existing-secret
|
||||
|
||||
## Connection details
|
||||
##
|
||||
aaisp: {}
|
||||
# username: user1@a
|
||||
# password: password
|
||||
|
||||
mqtt:
|
||||
# broker: localhost
|
||||
port: 1883
|
||||
authenticated: false
|
||||
# username: kube
|
||||
# password: kube
|
||||
# topicPrefix: aaisp
|
||||
|
||||
homeassistant:
|
||||
enabled: false
|
||||
# discoveryPrefix: homeassistant
|
||||
|
||||
cronjob:
|
||||
schedule: "*/10 * * * *"
|
||||
successfulJobsHistoryLimit: 3
|
||||
failedJobsHistoryLimit: 1
|
||||
# startingDeadlineSeconds: 10
|
||||
concurrencyPolicy: Allow
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -2,15 +2,14 @@ apiVersion: v1
|
||||
appVersion: 0.6.6-ls58
|
||||
description: A simple web viewer for Calibre libraries
|
||||
name: calibre-web
|
||||
version: 1.1.2
|
||||
version: 1.1.3
|
||||
keywords:
|
||||
- calibre-web
|
||||
- calibre
|
||||
home: https://github.com/nikdoof/home-k8s-flux/tree/master/charts/calibre-web
|
||||
home: https://github.com/janeczku/calibre-web
|
||||
icon: https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/calibre-web-icon.png
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/calibre-web/
|
||||
- https://github.com/janeczku/calibre-web
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
|
||||
@@ -5,7 +5,8 @@ This is a helm chart for [calibre-web](https://calibre-web.com/) leveraging the
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
$ helm install ./calibre-web
|
||||
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
|
||||
$ helm install calibre-web
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
@@ -13,7 +14,7 @@ $ helm install ./calibre-web
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install --name my-release ./calibre-web
|
||||
helm install --name my-release calibre-web
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
@@ -101,4 +102,4 @@ If you get `Error: rendered manifests contain a resource that already exists. Un
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/nikdoof/home-k8s-flux/blob/master/charts/calibre-web/values.yaml) file. It has several commented out suggested values.
|
||||
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/calibre-web/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
apiVersion: v1
|
||||
description: Deluge is a Python BitTorrent client based on libtorrent
|
||||
name: deluge
|
||||
version: 1.1.0
|
||||
version: 1.2.0
|
||||
keywords:
|
||||
- deluge
|
||||
- libtorrent
|
||||
home: https://github.com/nikdoof/helm-charts/charts/deluge
|
||||
home: https://github.com/deluge-torrent/deluge
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/deluge/
|
||||
- https://github.com/janeczku/deluge
|
||||
dependencies: []
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
icon: https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/deluge-icon.png
|
||||
appVersion: 2.0.3-2201906121747ubuntu18.04.1-ls57
|
||||
appVersion: 2.0.3-2201906121747ubuntu18.04.1-ls57
|
||||
|
||||
@@ -5,7 +5,8 @@ This is a helm chart for [deluge](https://deluge.com/) leveraging the [Linuxserv
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
$ helm install ./deluge
|
||||
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
|
||||
$ helm install deluge
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
@@ -13,7 +14,7 @@ $ helm install ./deluge
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install --name my-release ./deluge
|
||||
helm install --name my-release deluge
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
@@ -105,4 +106,4 @@ If you get `Error: rendered manifests contain a resource that already exists. Un
|
||||
|
||||
---
|
||||
|
||||
Read through the [values.yaml](https://github.com/nikdoof/home-k8s-flux/blob/master/charts/deluge/values.yaml) file. It has several commented out suggested values.
|
||||
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/deluge/values.yaml) file. It has several commented out suggested values.
|
||||
|
||||
1
charts/deluge/ci/test-values.yaml
Normal file
1
charts/deluge/ci/test-values.yaml
Normal file
@@ -0,0 +1 @@
|
||||
---
|
||||
@@ -1,3 +1,5 @@
|
||||
---
|
||||
{{ if .Values.btservice.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -40,21 +42,14 @@ spec:
|
||||
externalTrafficPolicy: {{ .Values.btservice.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: daemon
|
||||
port: 58846
|
||||
protocol: TCP
|
||||
targetPort: daemon
|
||||
{{ if (eq .Values.btservice.type "NodePort") }}
|
||||
nodePort: 58846
|
||||
{{ end }}
|
||||
- name: bt-tcp
|
||||
port: 58946
|
||||
protocol: TCP
|
||||
targetPort: bt-tcp
|
||||
{{ if (eq .Values.btservice.type "NodePort") }}
|
||||
nodePort: 58946
|
||||
nodePort: {{ default 30846 .Values.btservice.nodePort }}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "deluge.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{ end }}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
---
|
||||
{{ if .Values.btservice.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -45,9 +47,9 @@ spec:
|
||||
protocol: UDP
|
||||
targetPort: bt-udp
|
||||
{{ if (eq .Values.btservice.type "NodePort") }}
|
||||
nodePort: 58946
|
||||
nodePort: {{ default 30846 .Values.btservice.nodePort }}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "deluge.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
{{ end }}
|
||||
|
||||
53
charts/deluge/templates/daemon-service.yaml
Normal file
53
charts/deluge/templates/daemon-service.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "deluge.fullname" . }}-daemon
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "deluge.name" . }}
|
||||
helm.sh/chart: {{ include "deluge.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.daemonservice.labels }}
|
||||
{{ toYaml .Values.daemonservice.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.daemonservice.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.daemonservice.type "ClusterIP") (empty .Values.daemonservice.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.daemonservice.clusterIP }}
|
||||
clusterIP: {{ .Values.daemonservice.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.daemonservice.type "LoadBalancer" }}
|
||||
type: {{ .Values.daemonservice.type }}
|
||||
{{- if .Values.daemonservice.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.daemonservice.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.daemonservice.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.daemonservice.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.daemonservice.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.daemonservice.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.daemonservice.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.daemonservice.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.daemonservice.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: daemon
|
||||
port: 58846
|
||||
protocol: TCP
|
||||
targetPort: daemon
|
||||
{{ if (eq .Values.daemonservice.type "NodePort") }}
|
||||
nodePort: {{ default 30846 .Values.daemonservice.nodePort }}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "deluge.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
@@ -70,7 +70,7 @@ spec:
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: PGID
|
||||
value: "{{ .Values.pgid }}"
|
||||
{{- if .values.dockerMods }}
|
||||
{{- if .Values.dockerMods }}
|
||||
- name: DOCKER_MODS
|
||||
value: {{ .Values.dockerMods }}
|
||||
{{- end }}
|
||||
|
||||
@@ -32,6 +32,7 @@ pgid: 1001
|
||||
##
|
||||
# dockerMods: linuxserver/deluge:ssh
|
||||
|
||||
## Service for the WebUI port
|
||||
webuiservice:
|
||||
type: ClusterIP
|
||||
port: 8112
|
||||
@@ -53,8 +54,10 @@ webuiservice:
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
btservice:
|
||||
type: NodePort
|
||||
## Service for the Daemon port
|
||||
daemonservice:
|
||||
type: ClusterIP
|
||||
port:
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
@@ -73,6 +76,31 @@ btservice:
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
## Service for the BT taffic port - ideally these should be a LB due to a high port
|
||||
## and also sharing TCP and UDP services on the same port.
|
||||
btservice:
|
||||
enabled: false
|
||||
type: LoadBalancer
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
# annotations:
|
||||
# metallb.universe.tf/allow-shared-ip: deluge
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
|
||||
23
charts/idrac6/.helmignore
Normal file
23
charts/idrac6/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
14
charts/idrac6/Chart.yaml
Normal file
14
charts/idrac6/Chart.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
appVersion: '0.5'
|
||||
description: iDRAC 6 web interface and VNC proxy
|
||||
name: idrac6
|
||||
version: 0.0.2
|
||||
keywords:
|
||||
- dell
|
||||
- idrac
|
||||
home: https://github.com/DomiStyle/docker-idrac6
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/calibre-web/
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
4
charts/idrac6/OWNERS
Normal file
4
charts/idrac6/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- nikdoof
|
||||
reviewers:
|
||||
- nikdoof
|
||||
32
charts/idrac6/README.md
Normal file
32
charts/idrac6/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# idrac6
|
||||
|
||||
This is a helm chart for iDRAC6 Proxy docker image. This Chart is heavily based on the format used by [billimek](https://github.com/billimek/) for his collection of media related [charts](https://github.com/billimek/billimek-charts/).
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
|
||||
$ helm install --set idrac.host=192.168.1.2 --set idrac.user=root --set idrac.password=calvin idrac6
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install --name my-release idrac6
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/idrac6/values.yaml) file. It has several commented out suggested values.
|
||||
5
charts/idrac6/ci/test-values.yaml
Normal file
5
charts/idrac6/ci/test-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
idrac:
|
||||
host: test
|
||||
username: root
|
||||
password: calvin
|
||||
32
charts/idrac6/templates/_helpers.tpl
Normal file
32
charts/idrac6/templates/_helpers.tpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "idrac6.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "idrac6.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "idrac6.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
28
charts/idrac6/templates/app-pvc.yaml
Normal file
28
charts/idrac6/templates/app-pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if and .Values.persistence.app.enabled (not .Values.persistence.app.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-app
|
||||
{{- if .Values.persistence.app.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.app.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.app.size | quote }}
|
||||
{{- if .Values.persistence.app.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.app.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.app.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
144
charts/idrac6/templates/deployment.yaml
Normal file
144
charts/idrac6/templates/deployment.yaml
Normal file
@@ -0,0 +1,144 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "idrac6.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5800
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: USER_ID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: GROUP_ID
|
||||
value: "{{ .Values.pgid }}"
|
||||
- name: IDRAC_HOST
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
|
||||
key: idrac.host
|
||||
- name: IDRAC_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
|
||||
key: idrac.username
|
||||
- name: IDRAC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
|
||||
key: idrac.password
|
||||
- name: IDRAC_PORT
|
||||
value: "{{ .Values.idrac.port }}"
|
||||
- name: IDRAC_KEYCODE_HACK
|
||||
value: "{{ .Values.idrac.keycode_hack }}"
|
||||
volumeMounts:
|
||||
- mountPath: /app
|
||||
name: app
|
||||
{{- if .Values.persistence.app.subPath }}
|
||||
subPath: "{{ .Values.persistence.config.subPath }}"
|
||||
{{- end }}
|
||||
- mountPath: /vmedia
|
||||
name: vmedia
|
||||
{{- if .Values.persistence.vmedia.subPath }}
|
||||
subPath: {{ .Values.persistence.vmedia.subPath }}
|
||||
{{- end }}
|
||||
- mountPath: /screenshots
|
||||
name: screenshots
|
||||
{{- if .Values.persistence.screenshots.subPath }}
|
||||
subPath: {{ .Values.persistence.screenshots.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: app
|
||||
{{- if .Values.persistence.app.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.app.existingClaim }}{{ .Values.persistence.app.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-app{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{ end }}
|
||||
- name: vmedia
|
||||
{{- if .Values.persistence.vmedia.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.vmedia.existingClaim }}{{ .Values.persistence.vmedia.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-vmedia{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: screenshots
|
||||
{{- if .Values.persistence.screenshots.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.screenshots.existingClaim }}{{ .Values.persistence.screenshots.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-screenshots{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
38
charts/idrac6/templates/ingress.yaml
Normal file
38
charts/idrac6/templates/ingress.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "idrac6.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
28
charts/idrac6/templates/screenshots-pvc.yaml
Normal file
28
charts/idrac6/templates/screenshots-pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if and .Values.persistence.screenshots.enabled (not .Values.persistence.screenshots.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-app
|
||||
{{- if .Values.persistence.screenshots.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.screenshots.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.screenshots.size | quote }}
|
||||
{{- if .Values.persistence.screenshots.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.screenshots.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.screenshots.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
17
charts/idrac6/templates/secrets.yaml
Normal file
17
charts/idrac6/templates/secrets.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
{{- if not (.Values.existingSecretName) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-secret
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "idrac6.name" . }}
|
||||
type: Opaque
|
||||
data:
|
||||
idrac.host: {{ .Values.idrac.host | b64enc }}
|
||||
idrac.username: {{ .Values.idrac.username | b64enc }}
|
||||
idrac.password: {{ .Values.idrac.password | b64enc }}
|
||||
{{- end }}
|
||||
52
charts/idrac6/templates/service.yaml
Normal file
52
charts/idrac6/templates/service.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
28
charts/idrac6/templates/vmedia-pvc.yaml
Normal file
28
charts/idrac6/templates/vmedia-pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if and .Values.persistence.vmedia.enabled (not .Values.persistence.vmedia.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-app
|
||||
{{- if .Values.persistence.vmedia.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.vmedia.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.vmedia.size | quote }}
|
||||
{{- if .Values.persistence.vmedia.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.vmedia.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.vmedia.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
171
charts/idrac6/values.yaml
Normal file
171
charts/idrac6/values.yaml
Normal file
@@ -0,0 +1,171 @@
|
||||
# Default values for calibre-web.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: domistyle/idrac6
|
||||
tag: v0.5
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1000
|
||||
pgid: 1000
|
||||
|
||||
# Existing secret, overrides idrac values
|
||||
# existingSecret: test
|
||||
|
||||
# iDRAC connection details
|
||||
idrac:
|
||||
host:
|
||||
username: root
|
||||
password: calvin
|
||||
port: 443
|
||||
keycode_hack: false
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 5800
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
app:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
vmedia:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
screenshots:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts:
|
||||
[]
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources:
|
||||
{}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
21
charts/nfs-client-provisioner/.helmignore
Normal file
21
charts/nfs-client-provisioner/.helmignore
Normal file
@@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
14
charts/nfs-client-provisioner/Chart.yaml
Normal file
14
charts/nfs-client-provisioner/Chart.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
appVersion: 3.1.0
|
||||
description: nfs-client is an automatic provisioner that used your *already configured* NFS server, automatically creating Persistent Volumes.
|
||||
name: nfs-client-provisioner
|
||||
home: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
|
||||
version: 1.2.13
|
||||
sources:
|
||||
- https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
|
||||
keywords:
|
||||
- nfs
|
||||
- storage
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
73
charts/nfs-client-provisioner/README.md
Normal file
73
charts/nfs-client-provisioner/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# nfs-client-provisioner
|
||||
|
||||
The [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your *already configured* NFS server, automatically creating Persistent Volumes.
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```console
|
||||
$ helm install --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
|
||||
```
|
||||
|
||||
For **arm** deployments set `image.repository` to `--set image.repository=quay.io/external_storage/nfs-client-provisioner-arm`
|
||||
|
||||
## Introduction
|
||||
|
||||
This charts installs custom [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) into a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. It also installs a [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) into the cluster which dynamically creates persistent volumes from single NFS share.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.9+
|
||||
- Existing NFS Share
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
$ helm install --name my-release --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
|
||||
```
|
||||
|
||||
The command deploys the given storage class in the default configuration. It can be used afterswards to provision persistent volumes. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of this chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------------------- | ----------------------------------------------------------- | ------------------------------------------------- |
|
||||
| `replicaCount` | Number of provisioner instances to deployed | `1` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `image.repository` | Provisioner image | `quay.io/external_storage/nfs-client-provisioner` |
|
||||
| `image.tag` | Version of provisioner image | `v3.1.0-k8s1.11` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `storageClass.name` | Name of the storageClass | `nfs-client` |
|
||||
| `storageClass.defaultClass` | Set as the default StorageClass | `false` |
|
||||
| `storageClass.allowVolumeExpansion` | Allow expanding the volume | `true` |
|
||||
| `storageClass.reclaimPolicy` | Method used to reclaim an obsoleted volume | `Delete` |
|
||||
| `storageClass.provisionerName` | Name of the provisionerName | null |
|
||||
| `storageClass.archiveOnDelete` | Archive pvc when deleting | `true` |
|
||||
| `storageClass.accessModes` | Set access mode for PV | `ReadWriteOnce` |
|
||||
| `nfs.server` | Hostname of the NFS server | null (ip or hostname) |
|
||||
| `nfs.path` | Basepath of the mount point to be used | `/ifs/kubernetes` |
|
||||
| `nfs.mountOptions` | Mount options (e.g. 'nfsvers=3') | null |
|
||||
| `resources` | Resources required (e.g. CPU, memory) | `{}` |
|
||||
| `rbac.create` | Use Role-based Access Control | `true` |
|
||||
| `podSecurityPolicy.enabled` | Create & use Pod Security Policy resources | `false` |
|
||||
| `priorityClassName` | Set pod priorityClassName | null |
|
||||
| `serviceAccount.create` | Should we create a ServiceAccount | `true` |
|
||||
| `serviceAccount.name` | Name of the ServiceAccount to use | null |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `affinity` | Affinity settings | `{}` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
5
charts/nfs-client-provisioner/ci/test-values.yaml
Normal file
5
charts/nfs-client-provisioner/ci/test-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
nfs:
|
||||
server: 127.0.0.1
|
||||
podSecurityPolicy:
|
||||
enabled: true
|
||||
buildMode: true
|
||||
62
charts/nfs-client-provisioner/templates/_helpers.tpl
Normal file
62
charts/nfs-client-provisioner/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "nfs-client-provisioner.provisionerName" -}}
|
||||
{{- if .Values.storageClass.provisionerName -}}
|
||||
{{- printf .Values.storageClass.provisionerName -}}
|
||||
{{- else -}}
|
||||
cluster.local/{{ template "nfs-client-provisioner.fullname" . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "nfs-client-provisioner.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for podSecurityPolicy.
|
||||
*/}}
|
||||
{{- define "podSecurityPolicy.apiVersion" -}}
|
||||
{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
{{- print "policy/v1beta1" -}}
|
||||
{{- else -}}
|
||||
{{- print "extensions/v1beta1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
30
charts/nfs-client-provisioner/templates/clusterrole.yaml
Normal file
30
charts/nfs-client-provisioner/templates/clusterrole.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,19 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: run-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
77
charts/nfs-client-provisioner/templates/deployment.yaml
Normal file
77
charts/nfs-client-provisioner/templates/deployment.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if and (.Values.tolerations) (semverCompare "<1.6-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
scheduler.alpha.kubernetes.io/tolerations: '{{ toJson .Values.tolerations }}'
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: {{ template "nfs-client-provisioner.provisionerName" . }}
|
||||
- name: NFS_SERVER
|
||||
value: {{ .Values.nfs.server }}
|
||||
- name: NFS_PATH
|
||||
value: {{ .Values.nfs.path }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{ toYaml . | indent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
{{- if .Values.buildMode }}
|
||||
emptyDir: {}
|
||||
{{- else if .Values.nfs.mountOptions }}
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
{{- else }}
|
||||
nfs:
|
||||
server: {{ .Values.nfs.server }}
|
||||
path: {{ .Values.nfs.path }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.tolerations) (semverCompare "^1.6-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,25 @@
|
||||
{{ if .Values.nfs.mountOptions -}}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: pv-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
labels:
|
||||
nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Mi
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- {{ .Values.storageClass.accessModes }}
|
||||
persistentVolumeReclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
|
||||
storageClassName: ""
|
||||
{{- if .Values.nfs.mountOptions }}
|
||||
mountOptions:
|
||||
{{- range .Values.nfs.mountOptions }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
nfs:
|
||||
server: {{ .Values.nfs.server }}
|
||||
path: {{ .Values.nfs.path }}
|
||||
{{ end -}}
|
||||
@@ -0,0 +1,17 @@
|
||||
{{ if .Values.nfs.mountOptions -}}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.storageClass.accessModes }}
|
||||
volumeMode: Filesystem
|
||||
storageClassName: ""
|
||||
selector:
|
||||
matchLabels:
|
||||
nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
{{ end -}}
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
apiVersion: {{ template "podSecurityPolicy.apiVersion" . }}
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
volumes:
|
||||
- 'secret'
|
||||
- 'nfs'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
||||
{{- end }}
|
||||
21
charts/nfs-client-provisioner/templates/role.yaml
Normal file
21
charts/nfs-client-provisioner/templates/role.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
19
charts/nfs-client-provisioner/templates/rolebinding.yaml
Normal file
19
charts/nfs-client-provisioner/templates/rolebinding.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
11
charts/nfs-client-provisioner/templates/serviceaccount.yaml
Normal file
11
charts/nfs-client-provisioner/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
{{ if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
{{- end -}}
|
||||
26
charts/nfs-client-provisioner/templates/storageclass.yaml
Normal file
26
charts/nfs-client-provisioner/templates/storageclass.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
{{ if .Values.storageClass.create -}}
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: {{ .Values.storageClass.name }}
|
||||
{{- if .Values.storageClass.defaultClass }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
{{- end }}
|
||||
provisioner: {{ template "nfs-client-provisioner.provisionerName" . }}
|
||||
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
|
||||
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
|
||||
parameters:
|
||||
archiveOnDelete: "{{ .Values.storageClass.archiveOnDelete }}"
|
||||
{{- if .Values.nfs.mountOptions }}
|
||||
mountOptions:
|
||||
{{- range .Values.nfs.mountOptions }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{ end -}}
|
||||
78
charts/nfs-client-provisioner/values.yaml
Normal file
78
charts/nfs-client-provisioner/values.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
# Default values for nfs-client-provisioner.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
strategyType: Recreate
|
||||
|
||||
image:
|
||||
repository: quay.io/external_storage/nfs-client-provisioner
|
||||
tag: v3.1.0-k8s1.11
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nfs:
|
||||
server:
|
||||
path: /ifs/kubernetes
|
||||
mountOptions:
|
||||
|
||||
# For creating the StorageClass automatically:
|
||||
storageClass:
|
||||
create: true
|
||||
|
||||
# Set a provisioner name. If unset, a name will be generated.
|
||||
# provisionerName:
|
||||
|
||||
# Set StorageClass as the default StorageClass
|
||||
# Ignored if storageClass.create is false
|
||||
defaultClass: false
|
||||
|
||||
# Set a StorageClass name
|
||||
# Ignored if storageClass.create is false
|
||||
name: nfs-client
|
||||
|
||||
# Allow volume to be expanded dynamically
|
||||
allowVolumeExpansion: true
|
||||
|
||||
# Method used to reclaim an obsoleted volume
|
||||
reclaimPolicy: Delete
|
||||
|
||||
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
|
||||
archiveOnDelete: true
|
||||
|
||||
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
|
||||
accessModes: ReadWriteOnce
|
||||
|
||||
## For RBAC support:
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
## Set pod priorityClassName
|
||||
# priorityClassName: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -2,4 +2,15 @@ apiVersion: v1
|
||||
appVersion: "0.8.5"
|
||||
description: Collect VMware vCenter and ESXi performance metrics and send them to InfluxDB
|
||||
name: vsphere-influxdb-go
|
||||
version: 0.1.0
|
||||
version: 0.2.1
|
||||
keywords:
|
||||
- vsphere
|
||||
- influxdb
|
||||
- esx
|
||||
home: https://github.com/Oxalide/vsphere-influxdb-go
|
||||
sources:
|
||||
- https://hub.docker.com/r/nikdoof/vsphere-influxdb-go/
|
||||
- https://github.com/janeczku/calibre-web
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
|
||||
7
charts/vsphere-influxdb-go/ci/test-values.yaml
Normal file
7
charts/vsphere-influxdb-go/ci/test-values.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
vsphere:
|
||||
hostname: vcenter
|
||||
username: test
|
||||
password: test
|
||||
|
||||
influxdb:
|
||||
hostname: test
|
||||
@@ -1,80 +1,5 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
config.json: "{\r\n \"Domain\": \".lab\",\r\n \"RemoveHostDomainName\": false,\r\n
|
||||
\ \"Interval\": 60,\r\n \"VCenters\": [],\r\n \"InfluxDB\": {\r\n \"Prefix\":
|
||||
\"vsphere_\",\r\n \"Hostname\": \"http://influxdb:8086\",\r\n \"Database\":
|
||||
\"vmware_performance\"\r\n },\r\n \"Metrics\": [\r\n {\r\n \"ObjectType\":
|
||||
[\r\n \"VirtualMachine\",\r\n \"HostSystem\"\r\n
|
||||
\ ],\r\n \"Definition\": [\r\n {\r\n \"Metric\":
|
||||
\"cpu.usage.average\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"cpu.usage.maximum\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"cpu.usagemhz.average\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"cpu.usagemhz.maximum\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"cpu.wait.summation\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"cpu.system.summation\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"cpu.ready.summation\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"mem.usage.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"mem.usage.maximum\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"mem.consumed.average\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"mem.consumed.maximum\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"mem.active.average\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"mem.active.maximum\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"mem.vmmemctl.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"mem.vmmemctl.maximum\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"mem.totalCapacity.average\",\r\n \"Instances\": \"*\"\r\n
|
||||
\ },\r\n {\r\n \"Metric\": \"net.packetsRx.summation\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"net.packetsTx.summation\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"net.throughput.usage.average\",\r\n \"Instances\": \"*\"\r\n
|
||||
\ },\r\n {\r\n \"Metric\": \"net.received.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"net.transmitted.average\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"net.throughput.usage.nfs.average\",\r\n \"Instances\": \"*\"\r\n
|
||||
\ },\r\n {\r\n \"Metric\": \"datastore.numberReadAveraged.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"datastore.numberWriteAveraged.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"datastore.read.average\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"datastore.write.average\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"datastore.totalReadLatency.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"datastore.totalWriteLatency.average\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"mem.capacity.provisioned.average\",\r\n \"Instances\": \"*\"\r\n
|
||||
\ },\r\n {\r\n \"Metric\": \"cpu.corecount.provisioned.average\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n }\r\n ]\r\n
|
||||
\ },\r\n {\r\n \"ObjectType\": [\r\n \"VirtualMachine\"\r\n
|
||||
\ ],\r\n \"Definition\": [\r\n {\r\n \"Metric\":
|
||||
\"datastore.datastoreVMObservedLatency.latest\",\r\n \"Instances\":
|
||||
\"*\"\r\n }\r\n ]\r\n },\r\n {\r\n \"ObjectType\":
|
||||
[\r\n \"HostSystem\"\r\n ],\r\n \"Definition\":
|
||||
[\r\n {\r\n \"Metric\": \"disk.maxTotalLatency.latest\",\r\n
|
||||
\ \"Instances\": \"\"\r\n },\r\n {\r\n
|
||||
\ \"Metric\": \"disk.numberReadAveraged.average\",\r\n \"Instances\":
|
||||
\"*\"\r\n },\r\n {\r\n \"Metric\":
|
||||
\"disk.numberWriteAveraged.average\",\r\n \"Instances\": \"*\"\r\n
|
||||
\ },\r\n {\r\n \"Metric\": \"net.throughput.contention.summation\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n }\r\n ]\r\n
|
||||
\ },\r\n {\r\n \"ObjectType\": [\r\n \"Datastore\"\r\n
|
||||
\ ],\r\n \"Definition\": [\r\n {\r\n \"Metric\":
|
||||
\"disk.capacity.latest\",\r\n \"Instances\": \"*\"\r\n },\r\n
|
||||
\ {\r\n \"Metric\": \"disk.used.latest\",\r\n
|
||||
\ \"Instances\": \"*\"\r\n }\r\n ]\r\n
|
||||
\ }\r\n ]\r\n}"
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: vsphere-influxdb-go-config
|
||||
@@ -83,4 +8,6 @@ metadata:
|
||||
helm.sh/chart: {{ include "vsphere-influxdb-go.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "vsphere-influxdb-go.name" . }}
|
||||
|
||||
data:
|
||||
config.json: |-
|
||||
{{ toJson .Values.config | indent 4}}
|
||||
|
||||
@@ -37,6 +37,105 @@ cronjob:
|
||||
failedJobsHistoryLimit: 1
|
||||
# startingDeadlineSeconds: 10
|
||||
|
||||
## Configuration data that is wrote to vsphere-influxdb-go
|
||||
## If you need additional metrics then this is where you can
|
||||
## add them.
|
||||
##
|
||||
config:
|
||||
Domain: ".lab"
|
||||
RemoveHostDomainName: false
|
||||
Interval: 60
|
||||
VCenters: []
|
||||
InfluxDB: []
|
||||
Metrics:
|
||||
- ObjectType:
|
||||
- VirtualMachine
|
||||
- HostSystem
|
||||
Definition:
|
||||
- Metric: cpu.usage.average
|
||||
Instances: "*"
|
||||
- Metric: cpu.usage.maximum
|
||||
Instances: "*"
|
||||
- Metric: cpu.usagemhz.average
|
||||
Instances: "*"
|
||||
- Metric: cpu.usagemhz.maximum
|
||||
Instances: "*"
|
||||
- Metric: cpu.wait.summation
|
||||
Instances: "*"
|
||||
- Metric: cpu.system.summation
|
||||
Instances: "*"
|
||||
- Metric: cpu.ready.summation
|
||||
Instances: "*"
|
||||
- Metric: mem.usage.average
|
||||
Instances: "*"
|
||||
- Metric: mem.usage.maximum
|
||||
Instances: "*"
|
||||
- Metric: mem.consumed.average
|
||||
Instances: "*"
|
||||
- Metric: mem.consumed.maximum
|
||||
Instances: "*"
|
||||
- Metric: mem.active.average
|
||||
Instances: "*"
|
||||
- Metric: mem.active.maximum
|
||||
Instances: "*"
|
||||
- Metric: mem.vmmemctl.average
|
||||
Instances: "*"
|
||||
- Metric: mem.vmmemctl.maximum
|
||||
Instances: "*"
|
||||
- Metric: mem.totalCapacity.average
|
||||
Instances: "*"
|
||||
- Metric: net.packetsRx.summation
|
||||
Instances: "*"
|
||||
- Metric: net.packetsTx.summation
|
||||
Instances: "*"
|
||||
- Metric: net.throughput.usage.average
|
||||
Instances: "*"
|
||||
- Metric: net.received.average
|
||||
Instances: "*"
|
||||
- Metric: net.transmitted.average
|
||||
Instances: "*"
|
||||
- Metric: net.throughput.usage.nfs.average
|
||||
Instances: "*"
|
||||
- Metric: datastore.numberReadAveraged.average
|
||||
Instances: "*"
|
||||
- Metric: datastore.numberWriteAveraged.average
|
||||
Instances: "*"
|
||||
- Metric: datastore.read.average
|
||||
Instances: "*"
|
||||
- Metric: datastore.write.average
|
||||
Instances: "*"
|
||||
- Metric: datastore.totalReadLatency.average
|
||||
Instances: "*"
|
||||
- Metric: datastore.totalWriteLatency.average
|
||||
Instances: "*"
|
||||
- Metric: mem.capacity.provisioned.average
|
||||
Instances: "*"
|
||||
- Metric: cpu.corecount.provisioned.average
|
||||
Instances: "*"
|
||||
- ObjectType:
|
||||
- VirtualMachine
|
||||
Definition:
|
||||
- Metric: datastore.datastoreVMObservedLatency.latest
|
||||
Instances: "*"
|
||||
- ObjectType:
|
||||
- HostSystem
|
||||
Definition:
|
||||
- Metric: disk.maxTotalLatency.latest
|
||||
Instances: ''
|
||||
- Metric: disk.numberReadAveraged.average
|
||||
Instances: "*"
|
||||
- Metric: disk.numberWriteAveraged.average
|
||||
Instances: "*"
|
||||
- Metric: net.throughput.contention.summation
|
||||
Instances: "*"
|
||||
- ObjectType:
|
||||
- Datastore
|
||||
Definition:
|
||||
- Metric: disk.capacity.latest
|
||||
Instances: "*"
|
||||
- Metric: disk.used.latest
|
||||
Instances: "*"
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
|
||||
22
charts/zigbee2mqttassistant/.helmignore
Normal file
22
charts/zigbee2mqttassistant/.helmignore
Normal file
@@ -0,0 +1,22 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
15
charts/zigbee2mqttassistant/Chart.yaml
Normal file
15
charts/zigbee2mqttassistant/Chart.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
appVersion: "0.3.178"
|
||||
description: A web GUI for Zigbee2Mqtt
|
||||
name: zigbee2mqttassistant
|
||||
version: 0.1.1
|
||||
keywords:
|
||||
- zigbee
|
||||
- mqtt
|
||||
- homeassistant
|
||||
home: https://github.com/yllibed/Zigbee2MqttAssistant
|
||||
sources:
|
||||
- https://hub.docker.com/r/carldebilly/zigbee2mqttassistant
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
3
charts/zigbee2mqttassistant/ci/default-values.yaml
Normal file
3
charts/zigbee2mqttassistant/ci/default-values.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
---
|
||||
z2ma:
|
||||
broker: mqtt.svc.cluster.local
|
||||
4
charts/zigbee2mqttassistant/ci/mqtt-existing.yaml
Normal file
4
charts/zigbee2mqttassistant/ci/mqtt-existing.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
z2ma:
|
||||
broker: mqtt.svc.cluster.local
|
||||
existingSecretName: test
|
||||
5
charts/zigbee2mqttassistant/ci/mqtt-userpass-values.yaml
Normal file
5
charts/zigbee2mqttassistant/ci/mqtt-userpass-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
z2ma:
|
||||
broker: mqtt.svc.cluster.local
|
||||
username: test
|
||||
password: test2
|
||||
21
charts/zigbee2mqttassistant/templates/NOTES.txt
Normal file
21
charts/zigbee2mqttassistant/templates/NOTES.txt
Normal file
@@ -0,0 +1,21 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "zigbee2mqttassistant.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "zigbee2mqttassistant.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "zigbee2mqttassistant.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "zigbee2mqttassistant.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl port-forward $POD_NAME 8080:80
|
||||
{{- end }}
|
||||
56
charts/zigbee2mqttassistant/templates/_helpers.tpl
Normal file
56
charts/zigbee2mqttassistant/templates/_helpers.tpl
Normal file
@@ -0,0 +1,56 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "zigbee2mqttassistant.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "zigbee2mqttassistant.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "zigbee2mqttassistant.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "zigbee2mqttassistant.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
|
||||
helm.sh/chart: {{ include "zigbee2mqttassistant.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "zigbee2mqttassistant.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "zigbee2mqttassistant.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
73
charts/zigbee2mqttassistant/templates/deployment.yaml
Normal file
73
charts/zigbee2mqttassistant/templates/deployment.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "zigbee2mqttassistant.fullname" . }}
|
||||
labels:
|
||||
{{ include "zigbee2mqttassistant.labels" . | indent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
#livenessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
#readinessProbe:
|
||||
# httpGet:
|
||||
# path: /
|
||||
# port: http
|
||||
env:
|
||||
- name: TZ
|
||||
value: {{ .Values.TZ }}
|
||||
- name: Z2MA_SETTINGS__MQTTSERVER
|
||||
value: {{ .Values.z2ma.broker }}
|
||||
{{- if and (.Values.z2ma.username) (.Values.z2ma.password) }}
|
||||
- name: Z2MA_SETTINGS__MQTTUSERNAME
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ default "zigbee2mqttassistant-secret" .Values.z2ma.existingSecretName }}
|
||||
key: username
|
||||
- name: Z2MA_SETTINGS__MQTTPASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ default "zigbee2mqttassistant-secret" .Values.z2ma.existingSecretName }}
|
||||
key: password
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
41
charts/zigbee2mqttassistant/templates/ingress.yaml
Normal file
41
charts/zigbee2mqttassistant/templates/ingress.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "zigbee2mqttassistant.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{ include "zigbee2mqttassistant.labels" . | indent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
15
charts/zigbee2mqttassistant/templates/secrets.yaml
Normal file
15
charts/zigbee2mqttassistant/templates/secrets.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
{{- if and (and (not (.Values.existingSecretName)) (.Values.z2ma.username)) (.Values.z2ma.password) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "zigbee2mqttassistant.name" . }}-secret
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
|
||||
helm.sh/chart: {{ include "zigbee2mqttassistant.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "zigbee2mqttassistant.name" . }}
|
||||
type: Opaque
|
||||
data:
|
||||
username: {{ .Values.z2ma.username | b64enc }}
|
||||
password: {{ .Values.z2ma.password | b64enc }}
|
||||
{{- end }}
|
||||
16
charts/zigbee2mqttassistant/templates/service.yaml
Normal file
16
charts/zigbee2mqttassistant/templates/service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "zigbee2mqttassistant.fullname" . }}
|
||||
labels:
|
||||
{{ include "zigbee2mqttassistant.labels" . | indent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
77
charts/zigbee2mqttassistant/values.yaml
Normal file
77
charts/zigbee2mqttassistant/values.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
# Default values for zigbee2mqttassistant.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: carldebilly/zigbee2mqttassistant
|
||||
tag: 0.3.178
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
TZ: UTC
|
||||
|
||||
## Z2MA Settings (more details: https://github.com/yllibed/Zigbee2MqttAssistant/blob/master/README.md)
|
||||
z2ma: {}
|
||||
# broker: mqtt.svc.cluster.local
|
||||
|
||||
## You can provide your MQTT username and password and a secret will be created...
|
||||
##
|
||||
# username: user
|
||||
# password: pass
|
||||
|
||||
## ... or you can provide a existing secret to pull these values from
|
||||
##
|
||||
# existingSecretName: mqtt-secret
|
||||
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8800
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths: []
|
||||
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
Reference in New Issue
Block a user