Switch iDRAC6 chart to k8s-at-home common chart (#27)

* Switch iDRAC6 chart to k8s-at-home common chart base

* Add test values and CT updates

* Update build workflows

* Switch to k3d

* Try and work out testing issue

* Try hard specifying the config

* Move ct to github folder

* Remove timeout

* Correct config file path
This commit is contained in:
2021-12-04 20:17:00 +00:00
committed by GitHub
parent bc3c1fa17b
commit e259457f30
21 changed files with 362 additions and 554 deletions

View File

@@ -1,171 +1,63 @@
# Default values for calibre-web.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
#
# IMPORTANT NOTE
#
# This chart inherits from our common library chart. You can check the default values/options here:
# https://github.com/k8s-at-home/library-charts/tree/main/charts/stable/common/values.yaml
#
image:
# -- image repository
repository: domistyle/idrac6
# -- image tag
tag: v0.5
# -- image pull policy
pullPolicy: IfNotPresent
# upgrade strategy type (e.g. Recreate or RollingUpdate)
strategyType: Recreate
# Probes configuration
probes:
liveness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
readiness:
initialDelaySeconds: 60
failureThreshold: 5
timeoutSeconds: 10
nameOverride: ""
fullnameOverride: ""
timezone: UTC
puid: 1000
pgid: 1000
# Existing secret, overrides idrac values
# existingSecret: test
# iDRAC connection details
idrac:
host:
username: root
password: calvin
port: 443
keycode_hack: false
# -- environment variables. See more environment variables in the [idrac6 documentation](https://github.com/DomiStyle/docker-idrac6).
# @default -- See below
env:
# -- iDRAC hostname to connect to
IDRAC_HOST:
# -- iDRAC Username to use
IDRAC_USER: root
# -- iDRAC password to use
IDRAC_PASSWORD: calvin
# -- iDRAC HTTP port
IDRAC_PORT: 443
# -- Enable keycode hack
IDRAC_KEYCODE_HACK: 'false'
# -- Configures service settings for the chart.
# @default -- See values.yaml
service:
type: ClusterIP
port: 5800
## Specify the nodePort value for the LoadBalancer and NodePort service types.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
##
# nodePort:
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
annotations: {}
labels: {}
## Use loadBalancerIP to request a specific static IP,
## otherwise leave blank
##
loadBalancerIP:
# loadBalancerSourceRanges: []
## Set the externalTrafficPolicy in the Service to either Cluster or Local
# externalTrafficPolicy: Cluster
main:
ports:
http:
port: 5800
vnc:
port: 5900
ingress:
enabled: false
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
path: /
hosts:
- chart-example.local
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# -- Enable and configure ingress settings for the chart under this key.
# @default -- See values.yaml
main:
enabled: false
# -- Configure persistence settings for the chart under this key.
# @default -- See values.yaml
persistence:
# -- allows caching of the required JARs in persistent storage, and allows for quicker connections.
# @default -- See values.yaml
app:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
mountPath: /app
# -- Virtual media that can be connected to the iDRAC.
# @default -- See values.yaml
vmedia:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
mountPath: /vmedia
# -- Screenshots took from the iDRAC client.
# @default -- See values.yaml
screenshots:
enabled: false
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# storageClass: "-"
##
## If you want to reuse an existing claim, you can pass the name of the PVC using
## the existingClaim variable
# existingClaim: your-claim
accessMode: ReadWriteOnce
size: 1Gi
## If subPath is set mount a sub folder of a volume instead of the root of the volume.
## This is especially handy for volume plugins that don't natively support sub mounting (like glusterfs).
##
subPath: ""
## Do not delete the pvc upon helm uninstall
skipuninstall: false
extraExistingClaimMounts:
[]
# - name: external-mount
# mountPath: /srv/external-mount
## A manually managed Persistent Volume and Claim
## If defined, PVC must be created manually before volume will be bound
# existingClaim:
# readOnly: true
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
podAnnotations: {}
deploymentAnnotations: {}
mountPath: /screenshots