feat: add v1.23.13

This commit is contained in:
Loïc Kalbermatter 2024-06-10 04:34:13 +02:00
commit 18e5b7903a
Signed by: PulseDev
GPG Key ID: 0516267FEC58F5F3
15 changed files with 819 additions and 0 deletions

23
.helmignore Normal file
View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

14
Chart.yaml Normal file
View File

@ -0,0 +1,14 @@
apiVersion: v2
appVersion: '1.23.13'
deprecated: false
description: A self-hosted Monitoring tool like "Uptime-Robot".
home: https://code.pulseflow.ch/pulsedev/uptime-kuma
icon: https://raw.githubusercontent.com/louislam/uptime-kuma/master/public/icon.png
maintainers:
- name: PulseDev
email: loic.kalbermatter@pulseflow.ch
name: uptime-kuma
sources:
- https://github.com/louislam/uptime-kuma
type: application
version: 1.23.13

84
README.md Normal file
View File

@ -0,0 +1,84 @@
# uptime-kuma
![Version: 1.23.13](https://img.shields.io/badge/Version-1.23.13-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.23.13](https://img.shields.io/badge/AppVersion-1.23.13-informational?style=flat-square)
A self-hosted Monitoring tool like "Uptime-Robot".
**Homepage:** <https://code.pulseflow.ch/pulsedev/uptime-kuma>
## Maintainers
| Name | Email | Url |
| -------- | -------------------------------- | --- |
| PulseDev | <loic.kalbermatter@pulseflow.ch> | |
## Source Code
- <https://github.com/louislam/uptime-kuma>
## Values
| Key | Type | Default | Description |
| -------------------------------------------------------------------- | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ |
| additionalVolumeMounts | list | `[]` | A list of additional volumeMounts to be added to the pod |
| additionalVolumes | list | `[]` | A list of additional volumes to be added to the pod |
| affinity | object | `{}` | |
| dnsConfig | object | `{}` | Use this option to set custom DNS configurations to the created deployment |
| dnsPolicy | string | `""` | Use this option to set a custom DNS policy to the created deployment |
| fullnameOverride | string | `""` | |
| image.pullPolicy | string | `"IfNotPresent"` | |
| image.repository | string | `"louislam/uptime-kuma"` | |
| image.tag | string | `"1.23.13-debian"` | |
| imagePullSecrets | list | `[]` | |
| ingress.annotations."nginx.ingress.kubernetes.io/proxy-read-timeout" | string | `"3600"` | |
| ingress.annotations."nginx.ingress.kubernetes.io/proxy-send-timeout" | string | `"3600"` | |
| ingress.annotations."nginx.ingress.kubernetes.io/server-snippets" | string | `"location / {\n proxy_set_header Upgrade $http_upgrade;\n proxy_http_version 1.1;\n proxy_set_header X-Forwarded-Host $http_host;\n proxy_set_header X-Forwarded-Proto $scheme;\n proxy_set_header X-Forwarded-For $remote_addr;\n proxy_set_header Host $host;\n proxy_set_header Connection \"upgrade\";\n proxy_set_header X-Real-IP $remote_addr;\n proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;\n proxy_set_header Upgrade $http_upgrade;\n proxy_cache_bypass $http_upgrade;\n}\n"` | |
| ingress.enabled | bool | `false` | |
| ingress.extraLabels | object | `{}` | |
| ingress.hosts[0].host | string | `"chart-example.local"` | |
| ingress.hosts[0].paths[0].path | string | `"/"` | |
| ingress.hosts[0].paths[0].pathType | string | `"ImplementationSpecific"` | |
| ingress.tls | list | `[]` | |
| livenessProbe.enabled | bool | `true` | |
| livenessProbe.initialDelaySeconds | int | `15` | |
| livenessProbe.timeoutSeconds | int | `2` | |
| nameOverride | string | `""` | |
| nodeSelector | object | `{}` | |
| podAnnotations | object | `{}` | |
| podEnv[0].name | string | `"UPTIME_KUMA_PORT"` | |
| podEnv[0].value | string | `"3001"` | |
| podLabels | object | `{}` | |
| podSecurityContext | object | `{}` | |
| readinessProbe.enabled | bool | `true` | |
| readinessProbe.initialDelaySeconds | int | `5` | |
| resources | object | `{}` | |
| securityContext | object | `{}` | |
| service.annotations | object | `{}` | |
| service.nodePort | string | `nil` | |
| service.port | int | `3001` | |
| service.type | string | `"ClusterIP"` | |
| serviceAccount.annotations | object | `{}` | |
| serviceAccount.create | bool | `false` | |
| serviceAccount.name | string | `""` | |
| serviceMonitor.additionalLabels | object | `{}` | Additional labels to add to the ServiceMonitor |
| serviceMonitor.annotations | object | `{}` | Additional annotations to add to the ServiceMonitor |
| serviceMonitor.enabled | bool | `false` | |
| serviceMonitor.interval | string | `"60s"` | Scrape interval. If not set, the Prometheus default scrape interval is used. |
| serviceMonitor.metricRelabelings | list | `[]` | Prometheus [MetricRelabelConfigs] to apply to samples before ingestion |
| serviceMonitor.namespace | string | `nil` | Namespace where the ServiceMonitor resource should be created, default is the same as the release namespace |
| serviceMonitor.relabelings | list | `[]` | Prometheus [RelabelConfigs] to apply to samples before scraping |
| serviceMonitor.scheme | string | `nil` | Scheme to use when scraping, e.g. http (default) or https. |
| serviceMonitor.scrapeTimeout | string | `"10s"` | Timeout if metrics can't be retrieved in given time interval |
| serviceMonitor.selector | object | `{}` | Prometheus ServiceMonitor selector, only select Prometheus's with these labels (if not set, select any Prometheus) |
| serviceMonitor.tlsConfig | object | `{}` | TLS configuration to use when scraping, only applicable for scheme https. |
| strategy.type | string | `"Recreate"` | |
| tolerations | list | `[]` | |
| useDeploy | bool | `true` | |
| volume.accessMode | string | `"ReadWriteOnce"` | |
| volume.enabled | bool | `true` | |
| volume.existingClaim | string | `""` | |
| volume.size | string | `"4Gi"` | |
---
Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1)

23
templates/NOTES.txt Normal file
View File

@ -0,0 +1,23 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "uptime-kuma.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "uptime-kuma.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "uptime-kuma.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "uptime-kuma.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:3001 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 3001:$CONTAINER_PORT
{{- end }}

62
templates/_helpers.tpl Normal file
View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "uptime-kuma.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "uptime-kuma.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "uptime-kuma.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "uptime-kuma.labels" -}}
helm.sh/chart: {{ include "uptime-kuma.chart" . }}
{{ include "uptime-kuma.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "uptime-kuma.selectorLabels" -}}
app.kubernetes.io/name: {{ include "uptime-kuma.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "uptime-kuma.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "uptime-kuma.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

115
templates/deployment.yaml Normal file
View File

@ -0,0 +1,115 @@
{{- if .Values.useDeploy -}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "uptime-kuma.fullname" . }}
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 6 }}
{{- end }}
{{- with .Values.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "uptime-kuma.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "uptime-kuma.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- if .Values.dnsPolicy }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig:
{{- toYaml .Values.dnsConfig | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.podEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: 3001
protocol: TCP
{{ if or .Values.volume.enabled .Values.additionalVolumeMounts -}}
volumeMounts:
{{- if .Values.volume.enabled }}
- mountPath: /app/data
name: storage
{{- end -}}
{{ with .Values.additionalVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- extra/healthcheck
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds}}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds}}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /
port: 3001
scheme: HTTP
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds}}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{ if or .Values.volume.enabled .Values.additionalVolumes -}}
volumes:
{{- if .Values.volume.enabled }}
- name: storage
persistentVolumeClaim:
{{- if not .Values.volume.existingClaim }}
claimName: {{ include "uptime-kuma.fullname" . }}-pvc
{{- else }}
claimName: {{ .Values.volume.existingClaim }}
{{- end }}
{{- end -}}
{{- with .Values.additionalVolumes }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end -}}
{{- end -}}

64
templates/ingress.yaml Normal file
View File

@ -0,0 +1,64 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "uptime-kuma.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
{{- if .Values.ingress.extraLabels }}
{{- toYaml .Values.ingress.extraLabels | nindent 4 }}
{{- end }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

19
templates/pvc.yaml Normal file
View File

@ -0,0 +1,19 @@
{{- if and .Values.useDeploy (not .Values.volume.existingClaim) }}
{{- if and .Values.volume.enabled (not .Values.volume.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ include "uptime-kuma.fullname" . }}-pvc
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.volume.size | quote }}
{{- with .Values.volume.storageClassName }}
storageClassName: {{ . }}
{{- end }}
{{- end -}}
{{- end -}}

22
templates/service.yaml Normal file
View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "uptime-kuma.fullname" . }}
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
{{- with .Values.service.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: 3001
protocol: TCP
{{- with .Values.service.nodePort }}
nodePort: {{ . }}
{{- end }}
name: http
selector:
{{- include "uptime-kuma.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "uptime-kuma.serviceAccountName" . }}
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if and .Values.serviceMonitor.enabled .Values.serviceMonitor.basicAuth }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "uptime-kuma.fullname" . }}-metrics-basic-auth
namespace: {{ default .Release.Namespace .Values.serviceMonitor.namespace }}
type: kubernetes.io/basic-auth
stringData:
{{- range $key, $value := .Values.serviceMonitor.basicAuth }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,54 @@
{{- if and .Values.serviceMonitor.enabled (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "uptime-kuma.fullname" . }}
namespace: {{ default .Release.Namespace .Values.serviceMonitor.namespace }}
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
{{- with .Values.serviceMonitor.selector }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceMonitor.additionalLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceMonitor.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels:
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
endpoints:
- port: http
path: /metrics
interval: {{ .Values.serviceMonitor.interval }}
scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }}
{{- with .Values.serviceMonitor.scheme }}
scheme: {{ . }}
{{- end }}
{{- with .Values.serviceMonitor.tlsConfig }}
tlsConfig:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 6 }}
{{- end }}
{{- with .Values.serviceMonitor.basicAuth }}
basicAuth:
{{- range $key, $value := . }}
{{ $key }}:
name: {{ include "uptime-kuma.fullname" $ }}-metrics-basic-auth
key: {{ $key }}
{{- end }}
{{- end }}
{{- end }}

116
templates/statefulset.yaml Normal file
View File

@ -0,0 +1,116 @@
{{- if not .Values.useDeploy -}}
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "uptime-kuma.fullname" . }}
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
spec:
serviceName: {{ include "uptime-kuma.fullname" . }}
replicas: 1
selector:
matchLabels:
{{- include "uptime-kuma.selectorLabels" . | nindent 6 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 6 }}
{{- end }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "uptime-kuma.selectorLabels" . | nindent 8 }}
{{- if .Values.podLabels }}
{{- toYaml .Values.podLabels | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
enableServiceLinks: false
serviceAccountName: {{ include "uptime-kuma.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
{{- if .Values.dnsPolicy }}
dnsPolicy: {{ .Values.dnsPolicy }}
{{- end }}
{{- if .Values.dnsConfig }}
dnsConfig:
{{- toYaml .Values.dnsConfig | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.podEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
ports:
- name: http
containerPort: 3001
protocol: TCP
{{ if or .Values.volume.enabled .Values.additionalVolumeMounts -}}
volumeMounts:
{{- if .Values.volume.enabled }}
- mountPath: /app/data
name: storage
{{- end -}}
{{ with .Values.additionalVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if .Values.livenessProbe.enabled }}
livenessProbe:
exec:
command:
- extra/healthcheck
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds}}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds}}
{{- end }}
{{- if .Values.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: /
port: 3001
scheme: HTTP
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds}}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.additionalVolumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{ if .Values.volume.enabled -}}
volumeClaimTemplates:
- metadata:
name: storage
spec:
accessModes:
- {{ .Values.volume.accessMode }}
resources:
requests:
storage: {{ .Values.volume.size }}
{{- with .Values.volume.storageClassName }}
storageClassName: {{ . }}
{{- end }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "uptime-kuma.fullname" . }}-test-connection"
labels:
{{- include "uptime-kuma.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "uptime-kuma.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

184
values.yaml Normal file
View File

@ -0,0 +1,184 @@
# Default values for uptime-kuma.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: louislam/uptime-kuma
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: '1.23.13-debian'
imagePullSecrets: []
nameOverride: ''
fullnameOverride: ''
# If this option is set to false a StateFulset instead of a Deployment is used
useDeploy: true
serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ''
podAnnotations: {}
podLabels:
{}
# app: uptime-kuma
podEnv:
# a default port must be set. required by container
- name: 'UPTIME_KUMA_PORT'
value: '3001'
podSecurityContext:
{}
# fsGroup: 2000
securityContext:
{}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 3001
nodePort:
annotations: {}
ingress:
enabled: false
# className: ""
extraLabels:
{}
# vhost: uptime-kuma.company.corp
annotations:
nginx.ingress.kubernetes.io/proxy-read-timeout: '3600'
nginx.ingress.kubernetes.io/proxy-send-timeout: '3600'
nginx.ingress.kubernetes.io/server-snippets: |
location / {
proxy_set_header Upgrade $http_upgrade;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header Host $host;
proxy_set_header Connection "upgrade";
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Upgrade $http_upgrade;
proxy_cache_bypass $http_upgrade;
}
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls:
[]
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
livenessProbe:
enabled: true
timeoutSeconds: 2
initialDelaySeconds: 15
readinessProbe:
enabled: true
initialDelaySeconds: 5
volume:
enabled: true
accessMode: ReadWriteOnce
size: 4Gi
# If you want to use a storage class other than the default, uncomment this
# line and define the storage class name
# storageClassName:
# Reuse your own pre-existing PVC.
existingClaim: ''
# -- A list of additional volumes to be added to the pod
additionalVolumes:
[]
# - name: "additional-certificates"
# configMap:
# name: "additional-certificates"
# optional: true
# defaultMode: 420
# -- A list of additional volumeMounts to be added to the pod
additionalVolumeMounts:
[]
# - name: "additional-certificates"
# mountPath: "/etc/ssl/certs/additional/additional-ca.pem"
# readOnly: true
# subPath: "additional-ca.pem"
strategy:
type: Recreate
# Prometheus ServiceMonitor configuration
serviceMonitor:
enabled: false
# -- Scrape interval. If not set, the Prometheus default scrape interval is used.
interval: 60s
# -- Timeout if metrics can't be retrieved in given time interval
scrapeTimeout: 10s
# -- Scheme to use when scraping, e.g. http (default) or https.
scheme: ~
# -- TLS configuration to use when scraping, only applicable for scheme https.
tlsConfig: {}
# -- Prometheus [RelabelConfigs] to apply to samples before scraping
relabelings: []
# -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion
metricRelabelings: []
# -- Prometheus ServiceMonitor selector, only select Prometheus's with these
# labels (if not set, select any Prometheus)
selector: {}
# -- Namespace where the ServiceMonitor resource should be created, default is
# the same as the release namespace
namespace: ~
# -- Additional labels to add to the ServiceMonitor
additionalLabels: {}
# -- Additional annotations to add to the ServiceMonitor
annotations: {}
# -- BasicAuth credentials for scraping metrics, use API token and any string for username
# basicAuth:
# username: "metrics"
# password: ""
# -- Use this option to set a custom DNS policy to the created deployment
dnsPolicy: ''
# -- Use this option to set custom DNS configurations to the created deployment
dnsConfig: {}