mirror of
https://github.com/nikdoof/helm-charts.git
synced 2025-12-17 11:59:21 +00:00
Compare commits
13 Commits
zigbee2mqt
...
zigbee2mqt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f99101d00b | ||
|
|
1548f606e7 | ||
|
f33c104b1f
|
|||
| 1007982a78 | |||
|
|
bea73339d5 | ||
|
|
644cf70b31 | ||
| d929f18571 | |||
| ff5fb746ad | |||
| 6aec140b79 | |||
| 2bb2c4a7d3 | |||
| a29c9d7cff | |||
| 343a41b2e7 | |||
| c38645c597 |
43
.github/renovate.json5
vendored
Normal file
43
.github/renovate.json5
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"enabled": true,
|
||||
"dependencyDashboard": true,
|
||||
"dependencyDashboardTitle": "Renovate Dashboard",
|
||||
"suppressNotifications": ["prIgnoreNotification"],
|
||||
"rebaseWhen": "conflicted",
|
||||
"prConcurrentLimit": 5,
|
||||
"helm-values": {
|
||||
"enabled": false
|
||||
},
|
||||
"helmv3": {
|
||||
"fileMatch": ["charts/.+/Chart\\.yaml$"]
|
||||
},
|
||||
"packageRules": [
|
||||
{
|
||||
"datasources": ["helm"],
|
||||
"commitMessageTopic": "Helm chart {{depName}}",
|
||||
"separateMinorPatch": true
|
||||
},
|
||||
{
|
||||
"commitMessagePrefix": "[{{{parentDir}}}]",
|
||||
"branchTopic": "{{{parentDir}}}-{{{depNameSanitized}}}-{{{newMajor}}}{{#if isPatch}}.{{{newMinor}}}{{/if}}.x{{#if isLockfileUpdate}}-lockfile{{/if}}",
|
||||
"updateTypes": ["major"],
|
||||
"bumpVersion": "major",
|
||||
"labels": ["dependency/major"],
|
||||
"excludePackageNames": ["common"],
|
||||
},
|
||||
{
|
||||
"updateTypes": ["minor"],
|
||||
"bumpVersion": "minor",
|
||||
"labels": ["dependency/minor"],
|
||||
"excludePackageNames": ["common"],
|
||||
"groupName": ["external minor dep"],
|
||||
},
|
||||
{
|
||||
"updateTypes": ["patch"],
|
||||
"bumpVersion": "patch",
|
||||
"labels": ["dependency/patch"],
|
||||
"excludePackageNames": ["common"],
|
||||
"groupName": ["external patch dep"],
|
||||
}
|
||||
]
|
||||
}
|
||||
39
.github/workflows/lint-test.yaml
vendored
39
.github/workflows/lint-test.yaml
vendored
@@ -7,21 +7,36 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.1.0
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "::set-output name=changed::true"
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
id: lint
|
||||
uses: helm/chart-testing-action@v1.0.0-alpha.3
|
||||
with:
|
||||
command: lint
|
||||
run: ct lint
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.0.0-alpha.3
|
||||
with:
|
||||
install_local_path_provisioner: true
|
||||
if: steps.lint.outputs.changed == 'true'
|
||||
uses: helm/kind-action@v1.2.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
uses: helm/chart-testing-action@v1.0.0-alpha.3
|
||||
with:
|
||||
command: install
|
||||
run: ct install
|
||||
|
||||
20
.github/workflows/release.yaml
vendored
20
.github/workflows/release.yaml
vendored
@@ -10,28 +10,22 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
# See https://github.com/helm/chart-releaser-action/issues/6
|
||||
- name: Install Helm
|
||||
run: |
|
||||
curl -sSLo get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get
|
||||
chmod 700 get_helm.sh
|
||||
./get_helm.sh
|
||||
helm init --client-only
|
||||
|
||||
- name: Add dependency chart repos
|
||||
run: |
|
||||
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/
|
||||
uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: v3.4.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.0.0-alpha.2
|
||||
uses: helm/chart-releaser-action@v1.2.1
|
||||
with:
|
||||
charts_repo_url: https://nikdoof.github.io/helm-charts
|
||||
env:
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.3.1"
|
||||
description: Pulls data from the AAISP CHAOSv2 API into MQTT
|
||||
name: aaisp2mqtt
|
||||
version: 0.3.0
|
||||
version: 0.3.2
|
||||
keywords:
|
||||
- aaisp
|
||||
- mqtt
|
||||
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
schedule: {{ .Values.cronjob.schedule | quote }}
|
||||
successfulJobsHistoryLimit: {{ .Values.cronjob.successfulJobsHistoryLimit }}
|
||||
failedJobsHistoryLimit: {{ .Values.cronjob.failedJobsHistoryLimit }}
|
||||
concurrencyPolicy: Forbid
|
||||
concurrencyPolicy: {{ .Values.cronjob.concurrencyPolicy }}
|
||||
{{- if .Values.cronjob.startingDeadlineSeconds }}
|
||||
startingDeadlineSeconds: {{ .Values.cronjob.startingDeadlineSeconds }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: nikdoof/aaisp2mqtt
|
||||
tag: 0.3.0
|
||||
tag: 0.3.1
|
||||
pullPolicy: IfNotPresent
|
||||
# imagePullSecrets: []
|
||||
|
||||
@@ -40,6 +40,7 @@ cronjob:
|
||||
successfulJobsHistoryLimit: 3
|
||||
failedJobsHistoryLimit: 1
|
||||
# startingDeadlineSeconds: 10
|
||||
concurrencyPolicy: Allow
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
|
||||
23
charts/idrac6/.helmignore
Normal file
23
charts/idrac6/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
# OWNERS file for Kubernetes
|
||||
OWNERS
|
||||
14
charts/idrac6/Chart.yaml
Normal file
14
charts/idrac6/Chart.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
appVersion: '0.5'
|
||||
description: iDRAC 6 web interface and VNC proxy
|
||||
name: idrac6
|
||||
version: 0.0.2
|
||||
keywords:
|
||||
- dell
|
||||
- idrac
|
||||
home: https://github.com/DomiStyle/docker-idrac6
|
||||
sources:
|
||||
- https://hub.docker.com/r/linuxserver/calibre-web/
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
4
charts/idrac6/OWNERS
Normal file
4
charts/idrac6/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
approvers:
|
||||
- nikdoof
|
||||
reviewers:
|
||||
- nikdoof
|
||||
32
charts/idrac6/README.md
Normal file
32
charts/idrac6/README.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# idrac6
|
||||
|
||||
This is a helm chart for iDRAC6 Proxy docker image. This Chart is heavily based on the format used by [billimek](https://github.com/billimek/) for his collection of media related [charts](https://github.com/billimek/billimek-charts/).
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```shell
|
||||
$ helm repo add nikdoof https://nikdoof.github.io/helm-charts/
|
||||
$ helm install --set idrac.host=192.168.1.2 --set idrac.user=root --set idrac.password=calvin idrac6
|
||||
```
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
helm install --name my-release idrac6
|
||||
```
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
helm delete my-release --purge
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
Read through the [values.yaml](https://github.com/nikdoof/helm-charts/tree/master/charts/idrac6/values.yaml) file. It has several commented out suggested values.
|
||||
5
charts/idrac6/ci/test-values.yaml
Normal file
5
charts/idrac6/ci/test-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
idrac:
|
||||
host: test
|
||||
username: root
|
||||
password: calvin
|
||||
32
charts/idrac6/templates/_helpers.tpl
Normal file
32
charts/idrac6/templates/_helpers.tpl
Normal file
@@ -0,0 +1,32 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "idrac6.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "idrac6.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "idrac6.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
28
charts/idrac6/templates/app-pvc.yaml
Normal file
28
charts/idrac6/templates/app-pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if and .Values.persistence.app.enabled (not .Values.persistence.app.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-app
|
||||
{{- if .Values.persistence.app.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.app.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.app.size | quote }}
|
||||
{{- if .Values.persistence.app.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.app.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.app.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
144
charts/idrac6/templates/deployment.yaml
Normal file
144
charts/idrac6/templates/deployment.yaml
Normal file
@@ -0,0 +1,144 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "idrac6.fullname" . }}
|
||||
{{- if .Values.deploymentAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.deploymentAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 3
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- range $key, $value := .Values.podAnnotations }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 5800
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.liveness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds }}
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: http
|
||||
initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds }}
|
||||
failureThreshold: {{ .Values.probes.readiness.failureThreshold }}
|
||||
timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds }}
|
||||
env:
|
||||
- name: TZ
|
||||
value: "{{ .Values.timezone }}"
|
||||
- name: USER_ID
|
||||
value: "{{ .Values.puid }}"
|
||||
- name: GROUP_ID
|
||||
value: "{{ .Values.pgid }}"
|
||||
- name: IDRAC_HOST
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
|
||||
key: idrac.host
|
||||
- name: IDRAC_USER
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
|
||||
key: idrac.username
|
||||
- name: IDRAC_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.existingSecretName | default (printf "%s-secret" (include "idrac6.fullname" .)) }}
|
||||
key: idrac.password
|
||||
- name: IDRAC_PORT
|
||||
value: "{{ .Values.idrac.port }}"
|
||||
- name: IDRAC_KEYCODE_HACK
|
||||
value: "{{ .Values.idrac.keycode_hack }}"
|
||||
volumeMounts:
|
||||
- mountPath: /app
|
||||
name: app
|
||||
{{- if .Values.persistence.app.subPath }}
|
||||
subPath: "{{ .Values.persistence.config.subPath }}"
|
||||
{{- end }}
|
||||
- mountPath: /vmedia
|
||||
name: vmedia
|
||||
{{- if .Values.persistence.vmedia.subPath }}
|
||||
subPath: {{ .Values.persistence.vmedia.subPath }}
|
||||
{{- end }}
|
||||
- mountPath: /screenshots
|
||||
name: screenshots
|
||||
{{- if .Values.persistence.screenshots.subPath }}
|
||||
subPath: {{ .Values.persistence.screenshots.subPath }}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
mountPath: {{ .mountPath }}
|
||||
readOnly: {{ .readOnly }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumes:
|
||||
- name: app
|
||||
{{- if .Values.persistence.app.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.app.existingClaim }}{{ .Values.persistence.app.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-app{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{ end }}
|
||||
- name: vmedia
|
||||
{{- if .Values.persistence.vmedia.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.vmedia.existingClaim }}{{ .Values.persistence.vmedia.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-vmedia{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
- name: screenshots
|
||||
{{- if .Values.persistence.screenshots.enabled }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ if .Values.persistence.screenshots.existingClaim }}{{ .Values.persistence.screenshots.existingClaim }}{{- else }}{{ template "idrac6.fullname" . }}-screenshots{{- end }}
|
||||
{{- else }}
|
||||
emptyDir: {}
|
||||
{{- end }}
|
||||
{{- range .Values.persistence.extraExistingClaimMounts }}
|
||||
- name: {{ .name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .existingClaim }}
|
||||
{{- end }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
38
charts/idrac6/templates/ingress.yaml
Normal file
38
charts/idrac6/templates/ingress.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "idrac6.fullname" . -}}
|
||||
{{- $ingressPath := .Values.ingress.path -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ . | quote }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $ingressPath }}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: http
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
28
charts/idrac6/templates/screenshots-pvc.yaml
Normal file
28
charts/idrac6/templates/screenshots-pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if and .Values.persistence.screenshots.enabled (not .Values.persistence.screenshots.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-app
|
||||
{{- if .Values.persistence.screenshots.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.screenshots.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.screenshots.size | quote }}
|
||||
{{- if .Values.persistence.screenshots.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.screenshots.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.screenshots.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
17
charts/idrac6/templates/secrets.yaml
Normal file
17
charts/idrac6/templates/secrets.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
---
|
||||
{{- if not (.Values.existingSecretName) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-secret
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ include "idrac6.name" . }}
|
||||
type: Opaque
|
||||
data:
|
||||
idrac.host: {{ .Values.idrac.host | b64enc }}
|
||||
idrac.username: {{ .Values.idrac.username | b64enc }}
|
||||
idrac.password: {{ .Values.idrac.password | b64enc }}
|
||||
{{- end }}
|
||||
52
charts/idrac6/templates/service.yaml
Normal file
52
charts/idrac6/templates/service.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.labels }}
|
||||
{{ toYaml .Values.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml . | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }}
|
||||
type: ClusterIP
|
||||
{{- if .Values.service.clusterIP }}
|
||||
clusterIP: {{ .Values.service.clusterIP }}
|
||||
{{end}}
|
||||
{{- else if eq .Values.service.type "LoadBalancer" }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges:
|
||||
{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }}
|
||||
{{- end -}}
|
||||
{{- else }}
|
||||
type: {{ .Values.service.type }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs:
|
||||
{{ toYaml .Values.service.externalIPs | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalTrafficPolicy }}
|
||||
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: http
|
||||
port: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }}
|
||||
nodePort: {{.Values.service.nodePort}}
|
||||
{{ end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
28
charts/idrac6/templates/vmedia-pvc.yaml
Normal file
28
charts/idrac6/templates/vmedia-pvc.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if and .Values.persistence.vmedia.enabled (not .Values.persistence.vmedia.existingClaim) }}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ template "idrac6.fullname" . }}-app
|
||||
{{- if .Values.persistence.vmedia.skipuninstall }}
|
||||
annotations:
|
||||
"helm.sh/resource-policy": keep
|
||||
{{- end }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "idrac6.name" . }}
|
||||
helm.sh/chart: {{ include "idrac6.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.persistence.vmedia.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.vmedia.size | quote }}
|
||||
{{- if .Values.persistence.vmedia.storageClass }}
|
||||
{{- if (eq "-" .Values.persistence.vmedia.storageClass) }}
|
||||
storageClassName: ""
|
||||
{{- else }}
|
||||
storageClassName: "{{ .Values.persistence.vmedia.storageClass }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
171
charts/idrac6/values.yaml
Normal file
171
charts/idrac6/values.yaml
Normal file
@@ -0,0 +1,171 @@
|
||||
# Default values for calibre-web.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: domistyle/idrac6
|
||||
tag: v0.5
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# upgrade strategy type (e.g. Recreate or RollingUpdate)
|
||||
strategyType: Recreate
|
||||
|
||||
# Probes configuration
|
||||
probes:
|
||||
liveness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
readiness:
|
||||
initialDelaySeconds: 60
|
||||
failureThreshold: 5
|
||||
timeoutSeconds: 10
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
timezone: UTC
|
||||
puid: 1000
|
||||
pgid: 1000
|
||||
|
||||
# Existing secret, overrides idrac values
|
||||
# existingSecret: test
|
||||
|
||||
# iDRAC connection details
|
||||
idrac:
|
||||
host:
|
||||
username: root
|
||||
password: calvin
|
||||
port: 443
|
||||
keycode_hack: false
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 5800
|
||||
## Specify the nodePort value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
# nodePort:
|
||||
## Provide any additional annotations which may be required. This can be used to
|
||||
## set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
annotations: {}
|
||||
labels: {}
|
||||
## Use loadBalancerIP to request a specific static IP,
|
||||
## otherwise leave blank
|
||||
##
|
||||
loadBalancerIP:
|
||||
# loadBalancerSourceRanges: []
|
||||
## Set the externalTrafficPolicy in the Service to either Cluster or Local
|
||||
# externalTrafficPolicy: Cluster
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
hosts:
|
||||
- chart-example.local
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
persistence:
|
||||
app:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
vmedia:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
screenshots:
|
||||
enabled: false
|
||||
## If defined, storageClassName: <storageClass>
|
||||
## If set to "-", storageClassName: "", which disables dynamic provisioning
|
||||
## If undefined (the default) or set to null, no storageClassName spec is
|
||||
## set, choosing the default provisioner. (gp2 on AWS, standard on
|
||||
## GKE, AWS & OpenStack)
|
||||
##
|
||||
# storageClass: "-"
|
||||
##
|
||||
## If you want to reuse an existing claim, you can pass the name of the PVC using
|
||||
## the existingClaim variable
|
||||
# existingClaim: your-claim
|
||||
accessMode: ReadWriteOnce
|
||||
size: 1Gi
|
||||
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
|
||||
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
|
||||
##
|
||||
subPath: ""
|
||||
## Do not delete the pvc upon helm uninstall
|
||||
skipuninstall: false
|
||||
extraExistingClaimMounts:
|
||||
[]
|
||||
# - name: external-mount
|
||||
# mountPath: /srv/external-mount
|
||||
## A manually managed Persistent Volume and Claim
|
||||
## If defined, PVC must be created manually before volume will be bound
|
||||
# existingClaim:
|
||||
# readOnly: true
|
||||
|
||||
resources:
|
||||
{}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
deploymentAnnotations: {}
|
||||
21
charts/nfs-client-provisioner/.helmignore
Normal file
21
charts/nfs-client-provisioner/.helmignore
Normal file
@@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
14
charts/nfs-client-provisioner/Chart.yaml
Normal file
14
charts/nfs-client-provisioner/Chart.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
appVersion: 3.1.0
|
||||
description: nfs-client is an automatic provisioner that used your *already configured* NFS server, automatically creating Persistent Volumes.
|
||||
name: nfs-client-provisioner
|
||||
home: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
|
||||
version: 1.2.13
|
||||
sources:
|
||||
- https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client
|
||||
keywords:
|
||||
- nfs
|
||||
- storage
|
||||
maintainers:
|
||||
- name: nikdoof
|
||||
email: andy@tensixtyone.com
|
||||
73
charts/nfs-client-provisioner/README.md
Normal file
73
charts/nfs-client-provisioner/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# nfs-client-provisioner
|
||||
|
||||
The [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) is an automatic provisioner for Kubernetes that uses your *already configured* NFS server, automatically creating Persistent Volumes.
|
||||
|
||||
## TL;DR;
|
||||
|
||||
```console
|
||||
$ helm install --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
|
||||
```
|
||||
|
||||
For **arm** deployments set `image.repository` to `--set image.repository=quay.io/external_storage/nfs-client-provisioner-arm`
|
||||
|
||||
## Introduction
|
||||
|
||||
This charts installs custom [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/) into a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. It also installs a [NFS client provisioner](https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client) into the cluster which dynamically creates persistent volumes from single NFS share.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Kubernetes 1.9+
|
||||
- Existing NFS Share
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-release`:
|
||||
|
||||
```console
|
||||
$ helm install --name my-release --set nfs.server=x.x.x.x --set nfs.path=/exported/path stable/nfs-client-provisioner
|
||||
```
|
||||
|
||||
The command deploys the given storage class in the default configuration. It can be used afterswards to provision persistent volumes. The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
> **Tip**: List all releases using `helm list`
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-release` deployment:
|
||||
|
||||
```console
|
||||
$ helm delete my-release
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of this chart and their default values.
|
||||
|
||||
| Parameter | Description | Default |
|
||||
| ----------------------------------- | ----------------------------------------------------------- | ------------------------------------------------- |
|
||||
| `replicaCount` | Number of provisioner instances to deployed | `1` |
|
||||
| `strategyType` | Specifies the strategy used to replace old Pods by new ones | `Recreate` |
|
||||
| `image.repository` | Provisioner image | `quay.io/external_storage/nfs-client-provisioner` |
|
||||
| `image.tag` | Version of provisioner image | `v3.1.0-k8s1.11` |
|
||||
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
|
||||
| `storageClass.name` | Name of the storageClass | `nfs-client` |
|
||||
| `storageClass.defaultClass` | Set as the default StorageClass | `false` |
|
||||
| `storageClass.allowVolumeExpansion` | Allow expanding the volume | `true` |
|
||||
| `storageClass.reclaimPolicy` | Method used to reclaim an obsoleted volume | `Delete` |
|
||||
| `storageClass.provisionerName` | Name of the provisionerName | null |
|
||||
| `storageClass.archiveOnDelete` | Archive pvc when deleting | `true` |
|
||||
| `storageClass.accessModes` | Set access mode for PV | `ReadWriteOnce` |
|
||||
| `nfs.server` | Hostname of the NFS server | null (ip or hostname) |
|
||||
| `nfs.path` | Basepath of the mount point to be used | `/ifs/kubernetes` |
|
||||
| `nfs.mountOptions` | Mount options (e.g. 'nfsvers=3') | null |
|
||||
| `resources` | Resources required (e.g. CPU, memory) | `{}` |
|
||||
| `rbac.create` | Use Role-based Access Control | `true` |
|
||||
| `podSecurityPolicy.enabled` | Create & use Pod Security Policy resources | `false` |
|
||||
| `priorityClassName` | Set pod priorityClassName | null |
|
||||
| `serviceAccount.create` | Should we create a ServiceAccount | `true` |
|
||||
| `serviceAccount.name` | Name of the ServiceAccount to use | null |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `affinity` | Affinity settings | `{}` |
|
||||
| `tolerations` | List of node taints to tolerate | `[]` |
|
||||
5
charts/nfs-client-provisioner/ci/test-values.yaml
Normal file
5
charts/nfs-client-provisioner/ci/test-values.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
nfs:
|
||||
server: 127.0.0.1
|
||||
podSecurityPolicy:
|
||||
enabled: true
|
||||
buildMode: true
|
||||
62
charts/nfs-client-provisioner/templates/_helpers.tpl
Normal file
62
charts/nfs-client-provisioner/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "nfs-client-provisioner.provisionerName" -}}
|
||||
{{- if .Values.storageClass.provisionerName -}}
|
||||
{{- printf .Values.storageClass.provisionerName -}}
|
||||
{{- else -}}
|
||||
cluster.local/{{ template "nfs-client-provisioner.fullname" . -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "nfs-client-provisioner.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "nfs-client-provisioner.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Return the appropriate apiVersion for podSecurityPolicy.
|
||||
*/}}
|
||||
{{- define "podSecurityPolicy.apiVersion" -}}
|
||||
{{- if semverCompare ">=1.10-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
{{- print "policy/v1beta1" -}}
|
||||
{{- else -}}
|
||||
{{- print "extensions/v1beta1" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
30
charts/nfs-client-provisioner/templates/clusterrole.yaml
Normal file
30
charts/nfs-client-provisioner/templates/clusterrole.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,19 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: run-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
77
charts/nfs-client-provisioner/templates/deployment.yaml
Normal file
77
charts/nfs-client-provisioner/templates/deployment.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
strategy:
|
||||
type: {{ .Values.strategyType }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
{{- if and (.Values.tolerations) (semverCompare "<1.6-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
scheduler.alpha.kubernetes.io/tolerations: '{{ toJson .Values.tolerations }}'
|
||||
{{- end }}
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
{{- if .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.affinity }}
|
||||
affinity:
|
||||
{{ toYaml .Values.affinity | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.priorityClassName }}
|
||||
priorityClassName: {{ .Values.priorityClassName | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
volumeMounts:
|
||||
- name: nfs-client-root
|
||||
mountPath: /persistentvolumes
|
||||
env:
|
||||
- name: PROVISIONER_NAME
|
||||
value: {{ template "nfs-client-provisioner.provisionerName" . }}
|
||||
- name: NFS_SERVER
|
||||
value: {{ .Values.nfs.server }}
|
||||
- name: NFS_PATH
|
||||
value: {{ .Values.nfs.path }}
|
||||
{{- with .Values.resources }}
|
||||
resources:
|
||||
{{ toYaml . | indent 12 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: nfs-client-root
|
||||
{{- if .Values.buildMode }}
|
||||
emptyDir: {}
|
||||
{{- else if .Values.nfs.mountOptions }}
|
||||
persistentVolumeClaim:
|
||||
claimName: pvc-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
{{- else }}
|
||||
nfs:
|
||||
server: {{ .Values.nfs.server }}
|
||||
path: {{ .Values.nfs.path }}
|
||||
{{- end }}
|
||||
{{- if and (.Values.tolerations) (semverCompare "^1.6-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.tolerations | indent 6 }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,25 @@
|
||||
{{ if .Values.nfs.mountOptions -}}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: pv-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
labels:
|
||||
nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Mi
|
||||
volumeMode: Filesystem
|
||||
accessModes:
|
||||
- {{ .Values.storageClass.accessModes }}
|
||||
persistentVolumeReclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
|
||||
storageClassName: ""
|
||||
{{- if .Values.nfs.mountOptions }}
|
||||
mountOptions:
|
||||
{{- range .Values.nfs.mountOptions }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
nfs:
|
||||
server: {{ .Values.nfs.server }}
|
||||
path: {{ .Values.nfs.path }}
|
||||
{{ end -}}
|
||||
@@ -0,0 +1,17 @@
|
||||
{{ if .Values.nfs.mountOptions -}}
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
spec:
|
||||
accessModes:
|
||||
- {{ .Values.storageClass.accessModes }}
|
||||
volumeMode: Filesystem
|
||||
storageClassName: ""
|
||||
selector:
|
||||
matchLabels:
|
||||
nfs-client-provisioner: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Mi
|
||||
{{ end -}}
|
||||
@@ -0,0 +1,31 @@
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
apiVersion: {{ template "podSecurityPolicy.apiVersion" . }}
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "nfs-client-provisioner.fullname" . }}
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
requiredDropCapabilities:
|
||||
- ALL
|
||||
volumes:
|
||||
- 'secret'
|
||||
- 'nfs'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
||||
{{- end }}
|
||||
21
charts/nfs-client-provisioner/templates/role.yaml
Normal file
21
charts/nfs-client-provisioner/templates/role.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: [{{ template "nfs-client-provisioner.fullname" . }}]
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
19
charts/nfs-client-provisioner/templates/rolebinding.yaml
Normal file
19
charts/nfs-client-provisioner/templates/rolebinding.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: leader-locking-{{ template "nfs-client-provisioner.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
11
charts/nfs-client-provisioner/templates/serviceaccount.yaml
Normal file
11
charts/nfs-client-provisioner/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
{{ if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: {{ template "nfs-client-provisioner.serviceAccountName" . }}
|
||||
{{- end -}}
|
||||
26
charts/nfs-client-provisioner/templates/storageclass.yaml
Normal file
26
charts/nfs-client-provisioner/templates/storageclass.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
{{ if .Values.storageClass.create -}}
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ template "nfs-client-provisioner.name" . }}
|
||||
chart: {{ template "nfs-client-provisioner.chart" . }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
name: {{ .Values.storageClass.name }}
|
||||
{{- if .Values.storageClass.defaultClass }}
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
{{- end }}
|
||||
provisioner: {{ template "nfs-client-provisioner.provisionerName" . }}
|
||||
allowVolumeExpansion: {{ .Values.storageClass.allowVolumeExpansion }}
|
||||
reclaimPolicy: {{ .Values.storageClass.reclaimPolicy }}
|
||||
parameters:
|
||||
archiveOnDelete: "{{ .Values.storageClass.archiveOnDelete }}"
|
||||
{{- if .Values.nfs.mountOptions }}
|
||||
mountOptions:
|
||||
{{- range .Values.nfs.mountOptions }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{ end -}}
|
||||
78
charts/nfs-client-provisioner/values.yaml
Normal file
78
charts/nfs-client-provisioner/values.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
# Default values for nfs-client-provisioner.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
strategyType: Recreate
|
||||
|
||||
image:
|
||||
repository: quay.io/external_storage/nfs-client-provisioner
|
||||
tag: v3.1.0-k8s1.11
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nfs:
|
||||
server:
|
||||
path: /ifs/kubernetes
|
||||
mountOptions:
|
||||
|
||||
# For creating the StorageClass automatically:
|
||||
storageClass:
|
||||
create: true
|
||||
|
||||
# Set a provisioner name. If unset, a name will be generated.
|
||||
# provisionerName:
|
||||
|
||||
# Set StorageClass as the default StorageClass
|
||||
# Ignored if storageClass.create is false
|
||||
defaultClass: false
|
||||
|
||||
# Set a StorageClass name
|
||||
# Ignored if storageClass.create is false
|
||||
name: nfs-client
|
||||
|
||||
# Allow volume to be expanded dynamically
|
||||
allowVolumeExpansion: true
|
||||
|
||||
# Method used to reclaim an obsoleted volume
|
||||
reclaimPolicy: Delete
|
||||
|
||||
# When set to false your PVs will not be archived by the provisioner upon deletion of the PVC.
|
||||
archiveOnDelete: true
|
||||
|
||||
# Set access mode - ReadWriteOnce, ReadOnlyMany or ReadWriteMany
|
||||
accessModes: ReadWriteOnce
|
||||
|
||||
## For RBAC support:
|
||||
rbac:
|
||||
# Specifies whether RBAC resources should be created
|
||||
create: true
|
||||
|
||||
# If true, create & use Pod Security Policy resources
|
||||
# https://kubernetes.io/docs/concepts/policy/pod-security-policy/
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
|
||||
## Set pod priorityClassName
|
||||
# priorityClassName: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a ServiceAccount should be created
|
||||
create: true
|
||||
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
@@ -2,7 +2,7 @@ apiVersion: v1
|
||||
appVersion: "0.8.5"
|
||||
description: Collect VMware vCenter and ESXi performance metrics and send them to InfluxDB
|
||||
name: vsphere-influxdb-go
|
||||
version: 0.2.0
|
||||
version: 0.2.1
|
||||
keywords:
|
||||
- vsphere
|
||||
- influxdb
|
||||
|
||||
@@ -45,8 +45,8 @@ config:
|
||||
Domain: ".lab"
|
||||
RemoveHostDomainName: false
|
||||
Interval: 60
|
||||
VCenters: {}
|
||||
InfluxDB: {}
|
||||
VCenters: []
|
||||
InfluxDB: []
|
||||
Metrics:
|
||||
- ObjectType:
|
||||
- VirtualMachine
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
appVersion: "0.3.165"
|
||||
appVersion: "0.3.178"
|
||||
description: A web GUI for Zigbee2Mqtt
|
||||
name: zigbee2mqttassistant
|
||||
version: 0.1.0
|
||||
version: 0.1.1
|
||||
keywords:
|
||||
- zigbee
|
||||
- mqtt
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
---
|
||||
{{- if and (and (not (.Values.existingSecretName)) (.Values.z2ma.username)) (.Values.z2ma.password) }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: zigbee2mqttassistant-secret
|
||||
name: {{ include "zigbee2mqttassistant.name" . }}-secret
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "zigbee2mqttassistant.name" . }}
|
||||
helm.sh/chart: {{ include "zigbee2mqttassistant.chart" . }}
|
||||
|
||||
@@ -6,7 +6,7 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: carldebilly/zigbee2mqttassistant
|
||||
tag: 0.3.165
|
||||
tag: 0.3.178
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
Reference in New Issue
Block a user