Compare commits

...

31 Commits

Author SHA1 Message Date
renovate[bot]
f99101d00b [zigbee2mqttassistant] Update to v0.3.178 (#19)
* Update carldebilly/zigbee2mqttassistant Docker tag to v0.3.178

* [zigbee2mqttassistant] Bump chart version

* Fix secret name

Co-authored-by: Renovate Bot <bot@renovateapp.com>
Co-authored-by: Andrew Williams <andy@tensixtyone.com>
2021-07-02 16:38:23 +01:00
renovate[bot]
1548f606e7 Update external minor dep (#24)
Co-authored-by: Renovate Bot <bot@renovateapp.com>
2021-07-02 16:15:21 +01:00
f33c104b1f Update renovate config 2021-07-02 16:14:07 +01:00
1007982a78 [vsphere-influxdb-go] Fix default configmap (#23) 2021-07-02 14:09:40 +01:00
renovate[bot]
bea73339d5 Update helm/chart-releaser-action action to v1.2.1 (#20)
Co-authored-by: Renovate Bot <bot@renovateapp.com>
2021-07-02 09:24:41 +01:00
renovate[bot]
644cf70b31 Add renovate.json (#18)
Co-authored-by: Renovate Bot <bot@renovateapp.com>
2021-07-02 09:23:27 +01:00
d929f18571 [idrac6] Use secrets for iDRAC login details (#16) 2021-05-21 12:31:30 +01:00
ff5fb746ad Add 'idrac6' chart (#14)
* Add idrac6 chart

* Add test values
2021-05-21 11:39:37 +01:00
6aec140b79 [nfs-client-provisioner] Remove depreciation tag (#13)
* [nfs-client-provisioner] Remove depreciation tag

* [nfs-client-provisioner] Add maintainers
2021-01-01 16:18:57 +00:00
2bb2c4a7d3 Update release.yaml 2021-01-01 13:47:48 +00:00
a29c9d7cff [nfs-client-provisioner] Import from helm/stable repo (#11)
* Import nfs-client-provisioner from helm/stable repo

* Update chart testing

* Correct testing
2021-01-01 13:46:14 +00:00
343a41b2e7 [aaisp2mqtt] Bump default version (#10) 2020-10-01 07:20:40 +01:00
c38645c597 [aaisp2mqtt] Fix concurrency for the cronjob (#9)
* [aaisp2mqtt] Fix cronjob concurrency

* [aaisp2mqtt] Bump chart version
2020-08-30 07:16:57 +01:00
c38382eb5f Merge pull request #7 from nikdoof/zigbee2mqttassistant
New Module - zigbee2mqttassistant
2020-05-24 11:07:36 +01:00
083e8bd0e6 [zigbee2mqttassistant] Correct lint issues 2020-05-22 21:55:33 +01:00
4fcba6be6c Merge pull request #6 from nikdoof/aaisp2mqtt
Add aaisp2mqtt
2020-05-16 16:54:25 +01:00
9fa7c5254d Add aaisp2mqtt 2020-05-16 16:45:13 +01:00
1c8b3a7a94 New Module - zigbee2mqttassistant 2020-05-16 16:26:07 +01:00
f67ffc9a81 Merge pull request #5 from nikdoof/config-improvements
vsphere-influxdb-go: Improve configuration
2020-05-08 10:53:34 +01:00
5e16f5a805 [vsphere-influxdb-go] Build config.json from values 2020-05-08 10:43:58 +01:00
8b67ed8628 Merge pull request #4 from nikdoof/vsphere-influxdb-go-testing
Add Chart Testing and fixes for Deluge
2020-05-07 21:55:29 +01:00
0da018df2f Deluge fixes for #3 2020-05-07 21:47:48 +01:00
4dc7b0f9f2 Bump versions 2020-05-07 21:20:04 +01:00
4f8a41ab16 Fix all lint issues 2020-05-07 21:17:52 +01:00
10a68e25b9 Add missing newlines 2020-05-07 21:04:29 +01:00
c1461dd083 Update Chart.yaml files 2020-05-07 21:02:53 +01:00
338beeead8 Add chart testing 2020-05-07 20:46:16 +01:00
365626339a Update documentation 2020-05-07 19:55:03 +01:00
9a55b3f92c Merge pull request #1 from nikdoof/deluge-dockermods-fix
[deluge] Fix DockerMods
2020-05-07 17:13:36 +01:00
67b78606aa [deluge] Add missing CR at the end of Chart.yaml 2020-05-07 16:42:38 +01:00
c9e3c402ad [deluge] Fix DockerMods 2020-05-07 16:38:26 +01:00
74 changed files with 2159 additions and 131 deletions

16
.editorconfig Normal file
View File

@@ -0,0 +1,16 @@
# EditorConfig helps us maintain consistent formatting on non-source files.
# Visit https://editorconfig.org/ for details on how to configure your editor to respect these settings.
# This is the terminal .editorconfig in this repository.
root = true
[*]
indent_style = space
trim_trailing_whitespace = true
insert_final_newline = true
end_of_line = lf
[*.{yaml,yml}]
indent_size = 2

43
.github/renovate.json5 vendored Normal file
View File

@@ -0,0 +1,43 @@
{
"enabled": true,
"dependencyDashboard": true,
"dependencyDashboardTitle": "Renovate Dashboard",
"suppressNotifications": ["prIgnoreNotification"],
"rebaseWhen": "conflicted",
"prConcurrentLimit": 5,
"helm-values": {
"enabled": false
},
"helmv3": {
"fileMatch": ["charts/.+/Chart\\.yaml$"]
},
"packageRules": [
{
"datasources": ["helm"],
"commitMessageTopic": "Helm chart {{depName}}",
"separateMinorPatch": true
},
{
"commitMessagePrefix": "[{{{parentDir}}}]",
"branchTopic": "{{{parentDir}}}-{{{depNameSanitized}}}-{{{newMajor}}}{{#if isPatch}}.{{{newMinor}}}{{/if}}.x{{#if isLockfileUpdate}}-lockfile{{/if}}",
"updateTypes": ["major"],
"bumpVersion": "major",
"labels": ["dependency/major"],
"excludePackageNames": ["common"],
},
{
"updateTypes": ["minor"],
"bumpVersion": "minor",
"labels": ["dependency/minor"],
"excludePackageNames": ["common"],
"groupName": ["external minor dep"],
},
{
"updateTypes": ["patch"],
"bumpVersion": "patch",
"labels": ["dependency/patch"],
"excludePackageNames": ["common"],
"groupName": ["external patch dep"],
}
]
}

View File

@@ -7,21 +7,36 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v1
with:
version: v3.4.0
- uses: actions/setup-python@v2
with:
python-version: 3.7
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.1.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
id: lint
uses: helm/chart-testing-action@v1.0.0-alpha.3
with:
command: lint
run: ct lint
- name: Create kind cluster
uses: helm/kind-action@v1.0.0-alpha.3
with:
install_local_path_provisioner: true
if: steps.lint.outputs.changed == 'true'
uses: helm/kind-action@v1.2.0
if: steps.list-changed.outputs.changed == 'true'
- name: Run chart-testing (install)
uses: helm/chart-testing-action@v1.0.0-alpha.3
with:
command: install
run: ct install

View File

@@ -10,28 +10,22 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
# See https://github.com/helm/chart-releaser-action/issues/6
- name: Install Helm
run: |
curl -sSLo get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get
chmod 700 get_helm.sh
./get_helm.sh
helm init --client-only
- name: Add dependency chart repos
run: |
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
uses: azure/setup-helm@v1
with:
version: v3.4.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.0.0-alpha.2
uses: helm/chart-releaser-action@v1.2.1
with:
charts_repo_url: https://nikdoof.github.io/helm-charts
env:

32
README.md Normal file
View File

@@ -0,0 +1,32 @@
# Helm Charts
A small set of custom Helm charts to cover some smaller applications not covered by Stable and other repos.
[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![](https://github.com/nikdoof/helm-charts/workflows/Release%20Charts/badge.svg?branch=master)](https://github.com/nikdoof/helm-charts/actions)
## Usage
[Helm](https://helm.sh) must be installed to use the charts.
Please refer to Helm's [documentation](https://helm.sh/docs/) to get started.
Once Helm is set up properly, add the repo as follows:
```console
helm repo add nikdoof https://nikdoof.github.io/helm-charts/
```
You can then run `helm search nikdoof` to see the charts.
## Charts
See [charts folder](./charts) for a complete list.
* [aaisp2mqtt](./charts/aaisp2mqtt) - A tool to pull information from [Andrews & Arnold](https://www.aa.net.uk/) CHAOSv2 API and output to MQTT
* [calibre-web](./charts/calibre-web) - Web app for browsing, reading and downloading eBooks stored in a Calibre database
* [deluge](./charts/deluge) - Deluge torrent client
## License
[MIT License](./LICENSE)

View File

@@ -2,4 +2,14 @@ apiVersion: v1
appVersion: "0.2"
description: Pulls data from the AAISP CHAOSv2 API into MQTT
name: aaisp-to-mqtt
version: 0.2.4
version: 0.2.5
keywords:
- aaisp
- mqtt
home: https://github.com/nikdoof/aaisp-to-mqtt
sources:
- https://hub.docker.com/r/nikdoof/aaisp-to-mqtt/
- https://github.com/natm/aaisp-to-mqtt
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

View File

@@ -0,0 +1,4 @@
mqtt:
broker: localhost
existingSecretName: aaisp-to-mqtt-secret

View File

@@ -0,0 +1,6 @@
aaisp:
username: test1@a
password: TesttestTest
mqtt:
broker: localhost

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,15 @@
apiVersion: v1
appVersion: "0.3.1"
description: Pulls data from the AAISP CHAOSv2 API into MQTT
name: aaisp2mqtt
version: 0.3.2
keywords:
- aaisp
- mqtt
home: https://github.com/nikdoof/aaisp2mqtt
sources:
- https://hub.docker.com/r/nikdoof/aaisp2mqtt/
- https://github.com/natm/aaisp2mqtt
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

View File

@@ -0,0 +1,4 @@
mqtt:
broker: localhost
existingSecretName: aaisp2mqtt-secret

View File

@@ -0,0 +1,9 @@
aaisp:
username: test1@a
password: TesttestTest
mqtt:
broker: localhost
homeassistant:
enabled: true

View File

@@ -0,0 +1,6 @@
aaisp:
username: test1@a
password: TesttestTest
mqtt:
broker: localhost

View File

@@ -0,0 +1,56 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "aaisp2mqtt.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "aaisp2mqtt.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "aaisp2mqtt.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "aaisp2mqtt.labels" -}}
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "aaisp2mqtt.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "aaisp2mqtt.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,97 @@
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: {{ include "aaisp2mqtt.fullname" . }}-cronjob
{{- if .Values.deploymentAnnotations }}
annotations:
{{- range $key, $value := .Values.deploymentAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ include "aaisp2mqtt.name" . }}
spec:
schedule: {{ .Values.cronjob.schedule | quote }}
successfulJobsHistoryLimit: {{ .Values.cronjob.successfulJobsHistoryLimit }}
failedJobsHistoryLimit: {{ .Values.cronjob.failedJobsHistoryLimit }}
concurrencyPolicy: {{ .Values.cronjob.concurrencyPolicy }}
{{- if .Values.cronjob.startingDeadlineSeconds }}
startingDeadlineSeconds: {{ .Values.cronjob.startingDeadlineSeconds }}
{{- end }}
jobTemplate:
spec:
backoffLimit: 0
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ include "aaisp2mqtt.name" . }}
spec:
restartPolicy: Never
{{- if .Values.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{ toYaml .Values.resources | indent 16 }}
env:
- name: AAISP_USERNAME
valueFrom:
secretKeyRef:
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
key: aaisp.username
- name: AAISP_PASSWORD
valueFrom:
secretKeyRef:
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
key: aaisp.password
- name: MQTT_BROKER
value: {{ .Values.mqtt.broker }}
- name: MQTT_PORT
value: "{{ default 1883 .Values.mqtt.port }}"
{{- if .Values.mqtt.authenticated }}
- name: MQTT_USERNAME
valueFrom:
secretKeyRef:
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
key: mqtt.username
- name: MQTT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ default "aaisp2mqtt-secret" .Values.existingSecretName }}
key: mqtt.password
{{- end }}
- name: MQTT_TOPIC_PREFIX
value: {{ default "aaisp" .Values.mqtt.topicPrefix }}
{{- if .Values.homeassistant.enabled }}
- name: HOMEASSISTANT_ENABLED
value: '{{ .Values.homeassistant.enabled }}'
{{- if .Values.homeassistant.discoveryPrefix }}
- name: HOMEASSISTANT_DISCOVERY_PREFIX
value: {{ .Values.homeassistant.discoveryPrefix }}
{{- end }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 12 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 12 }}:
{{- end }}

View File

@@ -0,0 +1,20 @@
---
{{- if not (.Values.existingSecretName) }}
apiVersion: v1
kind: Secret
metadata:
name: aaisp2mqtt-secret
labels:
app.kubernetes.io/name: {{ include "aaisp2mqtt.name" . }}
helm.sh/chart: {{ include "aaisp2mqtt.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ include "aaisp2mqtt.name" . }}
type: Opaque
data:
aaisp.username: {{ .Values.aaisp.username | b64enc }}
aaisp.password: {{ .Values.aaisp.password | b64enc }}
{{- if .Values.mqtt.authenticated }}
mqtt.username: {{ .Values.mqtt.username | b64enc }}
mqtt.password: {{ .Values.mqtt.password | b64enc }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,61 @@
# Default values for aaisp2mqtt.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nikdoof/aaisp2mqtt
tag: 0.3.1
pullPolicy: IfNotPresent
# imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
## Use a pre-existing secret for login information
##
# existingSecretName: existing-secret
## Connection details
##
aaisp: {}
# username: user1@a
# password: password
mqtt:
# broker: localhost
port: 1883
authenticated: false
# username: kube
# password: kube
# topicPrefix: aaisp
homeassistant:
enabled: false
# discoveryPrefix: homeassistant
cronjob:
schedule: "*/10 * * * *"
successfulJobsHistoryLimit: 3
failedJobsHistoryLimit: 1
# startingDeadlineSeconds: 10
concurrencyPolicy: Allow
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -2,15 +2,14 @@ apiVersion: v1
appVersion: 0.6.6-ls58
description: A simple web viewer for Calibre libraries
name: calibre-web
version: 1.1.2
version: 1.1.3
keywords:
- calibre-web
- calibre
home: https://github.com/nikdoof/home-k8s-flux/tree/master/charts/calibre-web
home: https://github.com/janeczku/calibre-web
icon: https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/calibre-web-icon.png
sources:
- https://hub.docker.com/r/linuxserver/calibre-web/
- https://github.com/janeczku/calibre-web
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

View File

@@ -5,7 +5,8 @@ This is a helm chart for [calibre-web](https://calibre-web.com/) leveraging the
## TL;DR;
```shell
$ helm install ./calibre-web
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
$ helm install calibre-web
```
## Installing the Chart
@@ -13,7 +14,7 @@ $ helm install ./calibre-web
To install the chart with the release name `my-release`:
```console
helm install --name my-release ./calibre-web
helm install --name my-release calibre-web
```
## Uninstalling the Chart
@@ -101,4 +102,4 @@ If you get `Error: rendered manifests contain a resource that already exists. Un
---
Read through the [values.yaml](https://github.com/nikdoof/home-k8s-flux/blob/master/charts/calibre-web/values.yaml) file. It has several commented out suggested values.
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/calibre-web/values.yaml) file. It has several commented out suggested values.

View File

@@ -1,14 +1,13 @@
apiVersion: v1
description: Deluge is a Python BitTorrent client based on libtorrent
name: deluge
version: 1.1.0
version: 1.2.0
keywords:
- deluge
- libtorrent
home: https://github.com/nikdoof/helm-charts/charts/deluge
home: https://github.com/deluge-torrent/deluge
sources:
- https://hub.docker.com/r/linuxserver/deluge/
- https://github.com/janeczku/deluge
dependencies: []
maintainers:
- name: nikdoof

View File

@@ -5,7 +5,8 @@ This is a helm chart for [deluge](https://deluge.com/) leveraging the [Linuxserv
## TL;DR;
```shell
$ helm install ./deluge
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
$ helm install deluge
```
## Installing the Chart
@@ -13,7 +14,7 @@ $ helm install ./deluge
To install the chart with the release name `my-release`:
```console
helm install --name my-release ./deluge
helm install --name my-release deluge
```
## Uninstalling the Chart
@@ -105,4 +106,4 @@ If you get `Error: rendered manifests contain a resource that already exists. Un
---
Read through the [values.yaml](https://github.com/nikdoof/home-k8s-flux/blob/master/charts/deluge/values.yaml) file. It has several commented out suggested values.
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/deluge/values.yaml) file. It has several commented out suggested values.

View File

@@ -0,0 +1 @@
---

View File

@@ -1,3 +1,5 @@
---
{{ if .Values.btservice.enabled }}
apiVersion: v1
kind: Service
metadata:
@@ -40,21 +42,14 @@ spec:
externalTrafficPolicy: {{ .Values.btservice.externalTrafficPolicy }}
{{- end }}
ports:
- name: daemon
port: 58846
protocol: TCP
targetPort: daemon
{{ if (eq .Values.btservice.type "NodePort") }}
nodePort: 58846
{{ end }}
- name: bt-tcp
port: 58946
protocol: TCP
targetPort: bt-tcp
{{ if (eq .Values.btservice.type "NodePort") }}
nodePort: 58946
nodePort: {{ default 30846 .Values.btservice.nodePort }}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "deluge.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{ end }}

View File

@@ -1,3 +1,5 @@
---
{{ if .Values.btservice.enabled }}
apiVersion: v1
kind: Service
metadata:
@@ -45,9 +47,9 @@ spec:
protocol: UDP
targetPort: bt-udp
{{ if (eq .Values.btservice.type "NodePort") }}
nodePort: 58946
nodePort: {{ default 30846 .Values.btservice.nodePort }}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "deluge.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{ end }}

View File

@@ -0,0 +1,53 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "deluge.fullname" . }}-daemon
labels:
app.kubernetes.io/name: {{ include "deluge.name" . }}
helm.sh/chart: {{ include "deluge.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.daemonservice.labels }}
{{ toYaml .Values.daemonservice.labels | indent 4 }}
{{- end }}
{{- with .Values.daemonservice.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.daemonservice.type "ClusterIP") (empty .Values.daemonservice.type)) }}
type: ClusterIP
{{- if .Values.daemonservice.clusterIP }}
clusterIP: {{ .Values.daemonservice.clusterIP }}
{{end}}
{{- else if eq .Values.daemonservice.type "LoadBalancer" }}
type: {{ .Values.daemonservice.type }}
{{- if .Values.daemonservice.loadBalancerIP }}
loadBalancerIP: {{ .Values.daemonservice.loadBalancerIP }}
{{- end }}
{{- if .Values.daemonservice.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.daemonservice.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.daemonservice.type }}
{{- end }}
{{- if .Values.daemonservice.externalIPs }}
externalIPs:
{{ toYaml .Values.daemonservice.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.daemonservice.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.daemonservice.externalTrafficPolicy }}
{{- end }}
ports:
- name: daemon
port: 58846
protocol: TCP
targetPort: daemon
{{ if (eq .Values.daemonservice.type "NodePort") }}
nodePort: {{ default 30846 .Values.daemonservice.nodePort }}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "deluge.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -70,7 +70,7 @@ spec:
value: "{{ .Values.puid }}"
- name: PGID
value: "{{ .Values.pgid }}"
{{- if .values.dockerMods }}
{{- if .Values.dockerMods }}
- name: DOCKER_MODS
value: {{ .Values.dockerMods }}
{{- end }}

View File

@@ -32,6 +32,7 @@ pgid: 1001
##
# dockerMods: linuxserver/deluge:ssh
## Service for the WebUI port
webuiservice:
type: ClusterIP
port: 8112
@@ -53,8 +54,10 @@ webuiservice:
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
btservice:
type: NodePort
## Service for the Daemon port
daemonservice:
type: ClusterIP
port:
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
@@ -73,6 +76,31 @@ btservice:
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
## Service for the BT taffic port - ideally these should be a LB due to a high port
## and also sharing TCP and UDP services on the same port.
btservice:
enabled: false
type: LoadBalancer
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
# annotations:
# metallb.universe.tf/allow-shared-ip: deluge
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
ingress:
enabled: false
annotations: {}

23
charts/idrac6/.helmignore Normal file
View File

@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
# OWNERS file for Kubernetes
OWNERS

14
charts/idrac6/Chart.yaml Normal file
View File

@@ -0,0 +1,14 @@
apiVersion: v1
appVersion: '0.5'
description: iDRAC 6 web interface and VNC proxy
name: idrac6
version: 0.0.2
keywords:
- dell
- idrac
home: https://github.com/DomiStyle/docker-idrac6
sources:
- https://hub.docker.com/r/linuxserver/calibre-web/
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

4
charts/idrac6/OWNERS Normal file
View File

@@ -0,0 +1,4 @@
approvers:
- nikdoof
reviewers:
- nikdoof

32
charts/idrac6/README.md Normal file
View File

@@ -0,0 +1,32 @@
# idrac6
This is a helm chart for iDRAC6 Proxy docker image. This Chart is heavily based on the format used by [billimek](https://github.com/billimek/) for his collection of media related [charts](https://github.com/billimek/billimek-charts/).
## TL;DR;
```shell
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
$ helm install --set idrac.host=192.168.1.2 --set idrac.user=root --set idrac.password=calvin idrac6
```
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm install --name my-release idrac6
```
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release --purge
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/idrac6/values.yaml) file. It has several commented out suggested values.

View File

@@ -0,0 +1,5 @@
---
idrac:
host: test
username: root
password: calvin

View File

@@ -0,0 +1,32 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "idrac6.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "idrac6.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "idrac6.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}

View File

@@ -0,0 +1,28 @@
{{- if and .Values.persistence.app.enabled (not .Values.persistence.app.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "idrac6.fullname" . }}-app
{{- if .Values.persistence.app.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.app.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.app.size | quote }}
{{- if .Values.persistence.app.storageClass }}
{{- if (eq "-" .Values.persistence.app.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.app.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,144 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "idrac6.fullname" . }}
{{- if .Values.deploymentAnnotations }}
annotations:
{{- range $key, $value := .Values.deploymentAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
replicas: 1
revisionHistoryLimit: 3
strategy:
type: {{ .Values.strategyType }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Values.podAnnotations }}
annotations:
{{- range $key, $value := .Values.podAnnotations }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 5800
protocol: TCP
livenessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
readinessProbe:
tcpSocket:
port: http
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
env:
- name: TZ
value: "{{ .Values.timezone }}"
- name: USER_ID
value: "{{ .Values.puid }}"
- name: GROUP_ID
value: "{{ .Values.pgid }}"
- name: IDRAC_HOST
valueFrom:
secretKeyRef:
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
key: idrac.host
- name: IDRAC_USER
valueFrom:
secretKeyRef:
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
key: idrac.username
- name: IDRAC_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
key: idrac.password
- name: IDRAC_PORT
value: "{{ .Values.idrac.port }}"
- name: IDRAC_KEYCODE_HACK
value: "{{ .Values.idrac.keycode_hack }}"
volumeMounts:
- mountPath: /app
name: app
{{- if .Values.persistence.app.subPath }}
subPath: "{{ .Values.persistence.config.subPath }}"
{{- end }}
- mountPath: /vmedia
name: vmedia
{{- if .Values.persistence.vmedia.subPath }}
subPath: {{ .Values.persistence.vmedia.subPath }}
{{- end }}
- mountPath: /screenshots
name: screenshots
{{- if .Values.persistence.screenshots.subPath }}
subPath: {{ .Values.persistence.screenshots.subPath }}
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
mountPath: {{ .mountPath }}
readOnly: {{ .readOnly }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 12 }}
volumes:
- name: app
{{- if .Values.persistence.app.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.app.existingClaim }}{{ .Values.persistence.app.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-app{{- end }}
{{- else }}
emptyDir: {}
{{ end }}
- name: vmedia
{{- if .Values.persistence.vmedia.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.vmedia.existingClaim }}{{ .Values.persistence.vmedia.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-vmedia{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
- name: screenshots
{{- if .Values.persistence.screenshots.enabled }}
persistentVolumeClaim:
claimName: {{ if .Values.persistence.screenshots.existingClaim }}{{ .Values.persistence.screenshots.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-screenshots{{- end }}
{{- else }}
emptyDir: {}
{{- end }}
{{- range .Values.persistence.extraExistingClaimMounts }}
- name: {{ .name }}
persistentVolumeClaim:
claimName: {{ .existingClaim }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}

View File

@@ -0,0 +1,38 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "idrac6.fullname" . -}}
{{- $ingressPath := .Values.ingress.path -}}
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- with .Values.ingress.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ . | quote }}
http:
paths:
- path: {{ $ingressPath }}
backend:
serviceName: {{ $fullName }}
servicePort: http
{{- end }}
{{- end }}

View File

@@ -0,0 +1,28 @@
{{- if and .Values.persistence.screenshots.enabled (not .Values.persistence.screenshots.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "idrac6.fullname" . }}-app
{{- if .Values.persistence.screenshots.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.screenshots.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.screenshots.size | quote }}
{{- if .Values.persistence.screenshots.storageClass }}
{{- if (eq "-" .Values.persistence.screenshots.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.screenshots.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

View File

@@ -0,0 +1,17 @@
---
{{- if not (.Values.existingSecretName) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "idrac6.fullname" . }}-secret
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ include "idrac6.name" . }}
type: Opaque
data:
idrac.host: {{ .Values.idrac.host | b64enc }}
idrac.username: {{ .Values.idrac.username | b64enc }}
idrac.password: {{ .Values.idrac.password | b64enc }}
{{- end }}

View File

@@ -0,0 +1,52 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "idrac6.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.service.labels }}
{{ toYaml .Values.service.labels | indent 4 }}
{{- end }}
{{- with .Values.service.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
type: ClusterIP
{{- if .Values.service.clusterIP }}
clusterIP: {{ .Values.service.clusterIP }}
{{end}}
{{- else if eq .Values.service.type "LoadBalancer" }}
type: {{ .Values.service.type }}
{{- if .Values.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
{{- if .Values.service.loadBalancerSourceRanges }}
loadBalancerSourceRanges:
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
{{- end -}}
{{- else }}
type: {{ .Values.service.type }}
{{- end }}
{{- if .Values.service.externalIPs }}
externalIPs:
{{ toYaml .Values.service.externalIPs | indent 4 }}
{{- end }}
{{- if .Values.service.externalTrafficPolicy }}
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
{{- end }}
ports:
- name: http
port: {{ .Values.service.port }}
protocol: TCP
targetPort: http
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
nodePort: {{.Values.service.nodePort}}
{{ end }}
selector:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -0,0 +1,28 @@
{{- if and .Values.persistence.vmedia.enabled (not .Values.persistence.vmedia.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "idrac6.fullname" . }}-app
{{- if .Values.persistence.vmedia.skipuninstall }}
annotations:
"helm.sh/resource-policy": keep
{{- end }}
labels:
app.kubernetes.io/name: {{ include "idrac6.name" . }}
helm.sh/chart: {{ include "idrac6.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
accessModes:
- {{ .Values.persistence.vmedia.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.vmedia.size | quote }}
{{- if .Values.persistence.vmedia.storageClass }}
{{- if (eq "-" .Values.persistence.vmedia.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.vmedia.storageClass }}"
{{- end }}
{{- end }}
{{- end -}}

171
charts/idrac6/values.yaml Normal file
View File

@@ -0,0 +1,171 @@
# Default values for calibre-web.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: domistyle/idrac6
tag: v0.5
pullPolicy: IfNotPresent
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
# Probes configuration
probes:
liveness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
nameOverride: ""
fullnameOverride: ""
timezone: UTC
puid: 1000
pgid: 1000
# Existing secret, overrides idrac values
# existingSecret: test
# iDRAC connection details
idrac:
host:
username: root
password: calvin
port: 443
keycode_hack: false
service:
type: ClusterIP
port: 5800
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
ingress:
enabled: false
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
persistence:
app:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
vmedia:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
screenshots:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
extraExistingClaimMounts:
[]
# - name: external-mount
# mountPath: /srv/external-mount
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# readOnly: true
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
deploymentAnnotations: {}

View File

@@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View File

@@ -0,0 +1,14 @@
apiVersion: v1
appVersion: 3.1.0
description: nfs-client is an automatic provisioner that used your *already configured* NFS server, automatically creating Persistent Volumes.
name: nfs-client-provisioner
home: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
version: 1.2.13
sources:
- https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
keywords:
- nfs
- storage
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

View File

@@ -0,0 +1,73 @@
# nfs-client-provisioner
The [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your *already configured* NFS server, automatically creating Persistent Volumes.
## TL;DR;
```console
$ helm install --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
```
For **arm** deployments set `image.repository` to `--set image.repository=quay.io/external_storage/nfs-client-provisioner-arm`
## Introduction
This charts installs custom [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) into a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. It also installs a [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) into the cluster which dynamically creates persistent volumes from single NFS share.
## Prerequisites
- Kubernetes 1.9+
- Existing NFS Share
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm install --name my-release --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
```
The command deploys the given storage class in the default configuration. It can be used afterswards to provision persistent volumes. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following tables lists the configurable parameters of this chart and their default values.
| Parameter | Description | Default |
| ----------------------------------- | ----------------------------------------------------------- | ------------------------------------------------- |
| `replicaCount` | Number of provisioner instances to deployed | `1` |
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
| `image.repository` | Provisioner image | `quay.io/external_storage/nfs-client-provisioner` |
| `image.tag` | Version of provisioner image | `v3.1.0-k8s1.11` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `storageClass.name` | Name of the storageClass | `nfs-client` |
| `storageClass.defaultClass` | Set as the default StorageClass | `false` |
| `storageClass.allowVolumeExpansion` | Allow expanding the volume | `true` |
| `storageClass.reclaimPolicy` | Method used to reclaim an obsoleted volume | `Delete` |
| `storageClass.provisionerName` | Name of the provisionerName | null |
| `storageClass.archiveOnDelete` | Archive pvc when deleting | `true` |
| `storageClass.accessModes` | Set access mode for PV | `ReadWriteOnce` |
| `nfs.server` | Hostname of the NFS server | null (ip or hostname) |
| `nfs.path` | Basepath of the mount point to be used | `/ifs/kubernetes` |
| `nfs.mountOptions` | Mount options (e.g. 'nfsvers=3') | null |
| `resources` | Resources required (e.g. CPU, memory) | `{}` |
| `rbac.create` | Use Role-based Access Control | `true` |
| `podSecurityPolicy.enabled` | Create & use Pod Security Policy resources | `false` |
| `priorityClassName` | Set pod priorityClassName | null |
| `serviceAccount.create` | Should we create a ServiceAccount | `true` |
| `serviceAccount.name` | Name of the ServiceAccount to use | null |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `affinity` | Affinity settings | `{}` |
| `tolerations` | List of node taints to tolerate | `[]` |

View File

@@ -0,0 +1,5 @@
nfs:
server: 127.0.0.1
podSecurityPolicy:
enabled: true
buildMode: true

View File

@@ -0,0 +1,62 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "nfs-client-provisioner.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "nfs-client-provisioner.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "nfs-client-provisioner.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "nfs-client-provisioner.provisionerName" -}}
{{- if .Values.storageClass.provisionerName -}}
{{- printf .Values.storageClass.provisionerName -}}
{{- else -}}
cluster.local/{{ template "nfs-client-provisioner.fullname" . -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "nfs-client-provisioner.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "nfs-client-provisioner.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the appropriate apiVersion for podSecurityPolicy.
*/}}
{{- define "podSecurityPolicy.apiVersion" -}}
{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}}
{{- print "policy/v1beta1" -}}
{{- else -}}
{{- print "extensions/v1beta1" -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,30 @@
{{- if .Values.rbac.create }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "nfs-client-provisioner.fullname" . }}-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
{{- if .Values.podSecurityPolicy.enabled }}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}]
{{- end }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: run-{{ template "nfs-client-provisioner.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: ClusterRole
name: {{ template "nfs-client-provisioner.fullname" . }}-runner
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@@ -0,0 +1,77 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "nfs-client-provisioner.fullname" . }}
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
replicas: {{ .Values.replicaCount }}
strategy:
type: {{ .Values.strategyType }}
selector:
matchLabels:
app: {{ template "nfs-client-provisioner.name" . }}
release: {{ .Release.Name }}
template:
metadata:
annotations:
{{- if and (.Values.tolerations) (semverCompare "<1.6-0" .Capabilities.KubeVersion.GitVersion) }}
scheduler.alpha.kubernetes.io/tolerations: '{{ toJson .Values.tolerations }}'
{{- end }}
labels:
app: {{ template "nfs-client-provisioner.name" . }}
release: {{ .Release.Name }}
spec:
serviceAccountName: {{ template "nfs-client-provisioner.serviceAccountName" . }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName | quote }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: {{ template "nfs-client-provisioner.provisionerName" . }}
- name: NFS_SERVER
value: {{ .Values.nfs.server }}
- name: NFS_PATH
value: {{ .Values.nfs.path }}
{{- with .Values.resources }}
resources:
{{ toYaml . | indent 12 }}
{{- end }}
volumes:
- name: nfs-client-root
{{- if .Values.buildMode }}
emptyDir: {}
{{- else if .Values.nfs.mountOptions }}
persistentVolumeClaim:
claimName: pvc-{{ template "nfs-client-provisioner.fullname" . }}
{{- else }}
nfs:
server: {{ .Values.nfs.server }}
path: {{ .Values.nfs.path }}
{{- end }}
{{- if and (.Values.tolerations) (semverCompare "^1.6-0" .Capabilities.KubeVersion.GitVersion) }}
tolerations:
{{ toYaml .Values.tolerations | indent 6 }}
{{- end }}

View File

@@ -0,0 +1,25 @@
{{ if .Values.nfs.mountOptions -}}
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv-{{ template "nfs-client-provisioner.fullname" . }}
labels:
nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }}
spec:
capacity:
storage: 10Mi
volumeMode: Filesystem
accessModes:
- {{ .Values.storageClass.accessModes }}
persistentVolumeReclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
storageClassName: ""
{{- if .Values.nfs.mountOptions }}
mountOptions:
{{- range .Values.nfs.mountOptions }}
- {{ . }}
{{- end }}
{{- end }}
nfs:
server: {{ .Values.nfs.server }}
path: {{ .Values.nfs.path }}
{{ end -}}

View File

@@ -0,0 +1,17 @@
{{ if .Values.nfs.mountOptions -}}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: pvc-{{ template "nfs-client-provisioner.fullname" . }}
spec:
accessModes:
- {{ .Values.storageClass.accessModes }}
volumeMode: Filesystem
storageClassName: ""
selector:
matchLabels:
nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }}
resources:
requests:
storage: 10Mi
{{ end -}}

View File

@@ -0,0 +1,31 @@
{{- if .Values.podSecurityPolicy.enabled }}
apiVersion: {{ template "podSecurityPolicy.apiVersion" . }}
kind: PodSecurityPolicy
metadata:
name: {{ template "nfs-client-provisioner.fullname" . }}
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
spec:
privileged: false
allowPrivilegeEscalation: false
requiredDropCapabilities:
- ALL
volumes:
- 'secret'
- 'nfs'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false
{{- end }}

View File

@@ -0,0 +1,21 @@
{{- if .Values.rbac.create }}
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
{{- if .Values.podSecurityPolicy.enabled }}
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}]
{{- end }}
{{- end }}

View File

@@ -0,0 +1,19 @@
{{- if .Values.rbac.create }}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
apiGroup: rbac.authorization.k8s.io
{{- end }}

View File

@@ -0,0 +1,11 @@
{{ if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
{{- end -}}

View File

@@ -0,0 +1,26 @@
{{ if .Values.storageClass.create -}}
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
labels:
app: {{ template "nfs-client-provisioner.name" . }}
chart: {{ template "nfs-client-provisioner.chart" . }}
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
name: {{ .Values.storageClass.name }}
{{- if .Values.storageClass.defaultClass }}
annotations:
storageclass.kubernetes.io/is-default-class: "true"
{{- end }}
provisioner: {{ template "nfs-client-provisioner.provisionerName" . }}
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
parameters:
archiveOnDelete: "{{ .Values.storageClass.archiveOnDelete }}"
{{- if .Values.nfs.mountOptions }}
mountOptions:
{{- range .Values.nfs.mountOptions }}
- {{ . }}
{{- end }}
{{- end }}
{{ end -}}

View File

@@ -0,0 +1,78 @@
# Default values for nfs-client-provisioner.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
strategyType: Recreate
image:
repository: quay.io/external_storage/nfs-client-provisioner
tag: v3.1.0-k8s1.11
pullPolicy: IfNotPresent
nfs:
server:
path: /ifs/kubernetes
mountOptions:
# For creating the StorageClass automatically:
storageClass:
create: true
# Set a provisioner name. If unset, a name will be generated.
# provisionerName:
# Set StorageClass as the default StorageClass
# Ignored if storageClass.create is false
defaultClass: false
# Set a StorageClass name
# Ignored if storageClass.create is false
name: nfs-client
# Allow volume to be expanded dynamically
allowVolumeExpansion: true
# Method used to reclaim an obsoleted volume
reclaimPolicy: Delete
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
archiveOnDelete: true
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
accessModes: ReadWriteOnce
## For RBAC support:
rbac:
# Specifies whether RBAC resources should be created
create: true
# If true, create & use Pod Security Policy resources
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
podSecurityPolicy:
enabled: false
## Set pod priorityClassName
# priorityClassName: ""
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@@ -2,4 +2,15 @@ apiVersion: v1
appVersion: "0.8.5"
description: Collect VMware vCenter and ESXi performance metrics and send them to InfluxDB
name: vsphere-influxdb-go
version: 0.1.0
version: 0.2.1
keywords:
- vsphere
- influxdb
- esx
home: https://github.com/Oxalide/vsphere-influxdb-go
sources:
- https://hub.docker.com/r/nikdoof/vsphere-influxdb-go/
- https://github.com/janeczku/calibre-web
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

View File

@@ -0,0 +1,7 @@
vsphere:
hostname: vcenter
username: test
password: test
influxdb:
hostname: test

View File

@@ -1,80 +1,5 @@
---
apiVersion: v1
data:
config.json: "{\r\n \"Domain\": \".lab\",\r\n \"RemoveHostDomainName\": false,\r\n
\ \"Interval\": 60,\r\n \"VCenters\": [],\r\n \"InfluxDB\": {\r\n \"Prefix\":
\"vsphere_\",\r\n \"Hostname\": \"http://influxdb:8086\",\r\n \"Database\":
\"vmware_performance\"\r\n },\r\n \"Metrics\": [\r\n {\r\n \"ObjectType\":
[\r\n \"VirtualMachine\",\r\n \"HostSystem\"\r\n
\ ],\r\n \"Definition\": [\r\n {\r\n \"Metric\":
\"cpu.usage.average\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"cpu.usage.maximum\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"cpu.usagemhz.average\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"cpu.usagemhz.maximum\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"cpu.wait.summation\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"cpu.system.summation\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"cpu.ready.summation\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"mem.usage.average\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"mem.usage.maximum\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"mem.consumed.average\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"mem.consumed.maximum\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"mem.active.average\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"mem.active.maximum\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"mem.vmmemctl.average\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"mem.vmmemctl.maximum\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"mem.totalCapacity.average\",\r\n \"Instances\": \"*\"\r\n
\ },\r\n {\r\n \"Metric\": \"net.packetsRx.summation\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"net.packetsTx.summation\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"net.throughput.usage.average\",\r\n \"Instances\": \"*\"\r\n
\ },\r\n {\r\n \"Metric\": \"net.received.average\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"net.transmitted.average\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"net.throughput.usage.nfs.average\",\r\n \"Instances\": \"*\"\r\n
\ },\r\n {\r\n \"Metric\": \"datastore.numberReadAveraged.average\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"datastore.numberWriteAveraged.average\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"datastore.read.average\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"datastore.write.average\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"datastore.totalReadLatency.average\",\r\n
\ \"Instances\": \"*\"\r\n },\r\n {\r\n
\ \"Metric\": \"datastore.totalWriteLatency.average\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"mem.capacity.provisioned.average\",\r\n \"Instances\": \"*\"\r\n
\ },\r\n {\r\n \"Metric\": \"cpu.corecount.provisioned.average\",\r\n
\ \"Instances\": \"*\"\r\n }\r\n ]\r\n
\ },\r\n {\r\n \"ObjectType\": [\r\n \"VirtualMachine\"\r\n
\ ],\r\n \"Definition\": [\r\n {\r\n \"Metric\":
\"datastore.datastoreVMObservedLatency.latest\",\r\n \"Instances\":
\"*\"\r\n }\r\n ]\r\n },\r\n {\r\n \"ObjectType\":
[\r\n \"HostSystem\"\r\n ],\r\n \"Definition\":
[\r\n {\r\n \"Metric\": \"disk.maxTotalLatency.latest\",\r\n
\ \"Instances\": \"\"\r\n },\r\n {\r\n
\ \"Metric\": \"disk.numberReadAveraged.average\",\r\n \"Instances\":
\"*\"\r\n },\r\n {\r\n \"Metric\":
\"disk.numberWriteAveraged.average\",\r\n \"Instances\": \"*\"\r\n
\ },\r\n {\r\n \"Metric\": \"net.throughput.contention.summation\",\r\n
\ \"Instances\": \"*\"\r\n }\r\n ]\r\n
\ },\r\n {\r\n \"ObjectType\": [\r\n \"Datastore\"\r\n
\ ],\r\n \"Definition\": [\r\n {\r\n \"Metric\":
\"disk.capacity.latest\",\r\n \"Instances\": \"*\"\r\n },\r\n
\ {\r\n \"Metric\": \"disk.used.latest\",\r\n
\ \"Instances\": \"*\"\r\n }\r\n ]\r\n
\ }\r\n ]\r\n}"
kind: ConfigMap
metadata:
name: vsphere-influxdb-go-config
@@ -83,4 +8,6 @@ metadata:
helm.sh/chart: {{ include "vsphere-influxdb-go.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ include "vsphere-influxdb-go.name" . }}
data:
config.json: |-
{{ toJson .Values.config | indent 4}}

View File

@@ -37,6 +37,105 @@ cronjob:
failedJobsHistoryLimit: 1
# startingDeadlineSeconds: 10
## Configuration data that is wrote to vsphere-influxdb-go
## If you need additional metrics then this is where you can
## add them.
##
config:
Domain: ".lab"
RemoveHostDomainName: false
Interval: 60
VCenters: []
InfluxDB: []
Metrics:
- ObjectType:
- VirtualMachine
- HostSystem
Definition:
- Metric: cpu.usage.average
Instances: "*"
- Metric: cpu.usage.maximum
Instances: "*"
- Metric: cpu.usagemhz.average
Instances: "*"
- Metric: cpu.usagemhz.maximum
Instances: "*"
- Metric: cpu.wait.summation
Instances: "*"
- Metric: cpu.system.summation
Instances: "*"
- Metric: cpu.ready.summation
Instances: "*"
- Metric: mem.usage.average
Instances: "*"
- Metric: mem.usage.maximum
Instances: "*"
- Metric: mem.consumed.average
Instances: "*"
- Metric: mem.consumed.maximum
Instances: "*"
- Metric: mem.active.average
Instances: "*"
- Metric: mem.active.maximum
Instances: "*"
- Metric: mem.vmmemctl.average
Instances: "*"
- Metric: mem.vmmemctl.maximum
Instances: "*"
- Metric: mem.totalCapacity.average
Instances: "*"
- Metric: net.packetsRx.summation
Instances: "*"
- Metric: net.packetsTx.summation
Instances: "*"
- Metric: net.throughput.usage.average
Instances: "*"
- Metric: net.received.average
Instances: "*"
- Metric: net.transmitted.average
Instances: "*"
- Metric: net.throughput.usage.nfs.average
Instances: "*"
- Metric: datastore.numberReadAveraged.average
Instances: "*"
- Metric: datastore.numberWriteAveraged.average
Instances: "*"
- Metric: datastore.read.average
Instances: "*"
- Metric: datastore.write.average
Instances: "*"
- Metric: datastore.totalReadLatency.average
Instances: "*"
- Metric: datastore.totalWriteLatency.average
Instances: "*"
- Metric: mem.capacity.provisioned.average
Instances: "*"
- Metric: cpu.corecount.provisioned.average
Instances: "*"
- ObjectType:
- VirtualMachine
Definition:
- Metric: datastore.datastoreVMObservedLatency.latest
Instances: "*"
- ObjectType:
- HostSystem
Definition:
- Metric: disk.maxTotalLatency.latest
Instances: ''
- Metric: disk.numberReadAveraged.average
Instances: "*"
- Metric: disk.numberWriteAveraged.average
Instances: "*"
- Metric: net.throughput.contention.summation
Instances: "*"
- ObjectType:
- Datastore
Definition:
- Metric: disk.capacity.latest
Instances: "*"
- Metric: disk.used.latest
Instances: "*"
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little

View File

@@ -0,0 +1,22 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -0,0 +1,15 @@
apiVersion: v1
appVersion: "0.3.178"
description: A web GUI for Zigbee2Mqtt
name: zigbee2mqttassistant
version: 0.1.1
keywords:
- zigbee
- mqtt
- homeassistant
home: https://github.com/yllibed/Zigbee2MqttAssistant
sources:
- https://hub.docker.com/r/carldebilly/zigbee2mqttassistant
maintainers:
- name: nikdoof
email: andy@tensixtyone.com

View File

@@ -0,0 +1,3 @@
---
z2ma:
broker: mqtt.svc.cluster.local

View File

@@ -0,0 +1,4 @@
---
z2ma:
broker: mqtt.svc.cluster.local
existingSecretName: test

View File

@@ -0,0 +1,5 @@
---
z2ma:
broker: mqtt.svc.cluster.local
username: test
password: test2

View File

@@ -0,0 +1,21 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "zigbee2mqttassistant.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "zigbee2mqttassistant.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "zigbee2mqttassistant.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "zigbee2mqttassistant.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}

View File

@@ -0,0 +1,56 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "zigbee2mqttassistant.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "zigbee2mqttassistant.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "zigbee2mqttassistant.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "zigbee2mqttassistant.labels" -}}
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
helm.sh/chart: {{ include "zigbee2mqttassistant.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "zigbee2mqttassistant.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "zigbee2mqttassistant.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,73 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "zigbee2mqttassistant.fullname" . }}
labels:
{{ include "zigbee2mqttassistant.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
#livenessProbe:
# httpGet:
# path: /
# port: http
#readinessProbe:
# httpGet:
# path: /
# port: http
env:
- name: TZ
value: {{ .Values.TZ }}
- name: Z2MA_SETTINGS__MQTTSERVER
value: {{ .Values.z2ma.broker }}
{{- if and (.Values.z2ma.username) (.Values.z2ma.password) }}
- name: Z2MA_SETTINGS__MQTTUSERNAME
valueFrom:
secretKeyRef:
name: {{ default "zigbee2mqttassistant-secret" .Values.z2ma.existingSecretName }}
key: username
- name: Z2MA_SETTINGS__MQTTPASSWORD
valueFrom:
secretKeyRef:
name: {{ default "zigbee2mqttassistant-secret" .Values.z2ma.existingSecretName }}
key: password
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -0,0 +1,41 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "zigbee2mqttassistant.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{ include "zigbee2mqttassistant.labels" . | indent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ . }}
backend:
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,15 @@
{{- if and (and (not (.Values.existingSecretName)) (.Values.z2ma.username)) (.Values.z2ma.password) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "zigbee2mqttassistant.name" . }}-secret
labels:
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
helm.sh/chart: {{ include "zigbee2mqttassistant.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ include "zigbee2mqttassistant.name" . }}
type: Opaque
data:
username: {{ .Values.z2ma.username | b64enc }}
password: {{ .Values.z2ma.password | b64enc }}
{{- end }}

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "zigbee2mqttassistant.fullname" . }}
labels:
{{ include "zigbee2mqttassistant.labels" . | indent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}

View File

@@ -0,0 +1,77 @@
# Default values for zigbee2mqttassistant.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: carldebilly/zigbee2mqttassistant
tag: 0.3.178
pullPolicy: IfNotPresent
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
TZ: UTC
## Z2MA Settings (more details: https://github.com/yllibed/Zigbee2MqttAssistant/blob/master/README.md)
z2ma: {}
# broker: mqtt.svc.cluster.local
## You can provide your MQTT username and password and a secret will be created...
##
# username: user
# password: pass
## ... or you can provide a existing secret to pull these values from
##
# existingSecretName: mqtt-secret
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8800
ingress:
enabled: false
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths: []
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

2
ct.yaml Normal file
View File

@@ -0,0 +1,2 @@
chart-dirs:
- charts