Update helm values cloudflare/cloudflared to v2022.5.3 - autoclosed #3

Closed
Renovate wants to merge 0 commits from renovate/cloudflare-cloudflared-2022.x into main
36 changed files with 554 additions and 1145 deletions

2
.gitignore vendored
View File

@ -1,2 +0,0 @@
**/_hold
ignore/

512
.helms/traefik-values.yaml Normal file
View File

@ -0,0 +1,512 @@
# Default values for Traefik
image:
name: library/traefik
# defaults to appVersion
tag: "2.6.0"
pullPolicy: IfNotPresent
cloudflaredImage:
image: &cloudflaredImage cloudflare/cloudflared:2022.5.3
# defaults to appVersion
#
# Configure the deployment
#
deployment:
enabled: true
# Can be either Deployment or DaemonSet
kind: Deployment
# Number of pods of the deployment (only applies when kind == Deployment)
replicas: 1
# Amount of time (in seconds) before Kubernetes will send the SIGKILL signal if Traefik does not shut down
terminationGracePeriodSeconds: 60
# The minimum number of seconds Traefik needs to be up and running before the DaemonSet/Deployment controller considers it available
minReadySeconds: 0
# Additional deployment annotations (e.g. for jaeger-operator sidecar injection)
annotations: {}
# Additional deployment labels (e.g. for filtering deployment by custom labels)
labels: {}
# Additional pod annotations (e.g. for mesh injection or prometheus scraping)
podAnnotations: {}
# Additional Pod labels (e.g. for filtering Pod by custom labels)
podLabels: {}
# Additional containers (e.g. for metric offloading sidecars)
additionalContainers:
# https://docs.datadoghq.com/developers/dogstatsd/unix_socket/?tab=host
- name: cloudflare
image: *cloudflaredImage
# args: ["-s", "-u", "udp-recv:8125", "unix-sendto:/socket/socket"]
# volumeMounts:
# - name: dsdsocket
# mountPath: /socket
# Additional volumes available for use with initContainers and additionalContainers
additionalVolumes:
[]
# - name: dsdsocket
# hostPath:
# path: /var/run/statsd-exporter
# Additional initContainers (e.g. for setting file permission as shown below)
initContainers:
[]
# The "volume-permissions" init container is required if you run into permission issues.
# Related issue: https://github.com/traefik/traefik/issues/6972
# - name: volume-permissions
# image: busybox:1.31.1
# command: ["sh", "-c", "chmod -Rv 600 /data/*"]
# volumeMounts:
# - name: data
# mountPath: /data
# Use process namespace sharing
shareProcessNamespace: false
# Custom pod DNS policy. Apply if `hostNetwork: true`
# dnsPolicy: ClusterFirstWithHostNet
# Additional imagePullSecrets
imagePullSecrets:
[]
# - name: myRegistryKeySecretName
# Pod disruption budget
podDisruptionBudget:
enabled: false
# maxUnavailable: 1
# maxUnavailable: 33%
# minAvailable: 0
# minAvailable: 25%
# Use ingressClass. Ignored if Traefik version < 2.3 / kubernetes < 1.18.x
ingressClass:
# true is not unit-testable yet, pending https://github.com/rancher/helm-unittest/pull/12
enabled: false
isDefaultClass: false
# Use to force a networking.k8s.io API Version for certain CI/CD applications. E.g. "v1beta1"
fallbackApiVersion: ""
# Activate Pilot integration
pilot:
enabled: true
# Enable experimental features
experimental:
http3:
enabled: true
plugins:
enabled: false
kubernetesGateway:
enabled: false
# Create an IngressRoute for the dashboard
ingressRoute:
dashboard:
enabled: true
# Additional ingressRoute annotations (e.g. for kubernetes.io/ingress.class)
annotations: {}
# Additional ingressRoute labels (e.g. for filtering IngressRoute by custom labels)
labels: {}
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
# Customize liveness and readiness probe values.
readinessProbe:
failureThreshold: 1
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
#
# Configure providers
#
providers:
kubernetesCRD:
enabled: true
allowCrossNamespace: false
allowExternalNameServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
namespaces:
[]
# - "default"
kubernetesIngress:
enabled: true
allowExternalNameServices: false
allowEmptyServices: false
# ingressClass: traefik-internal
# labelSelector: environment=production,method=traefik
namespaces:
- "default"
# IP used for Kubernetes Ingress endpoints
publishedService:
enabled: false
# Published Kubernetes Service to copy status from. Format: namespace/servicename
# By default this Traefik service
# pathOverride: ""
#
# Add volumes to the traefik pod. The volume name will be passed to tpl.
# This can be used to mount a cert pair or a configmap that holds a config.toml file.
# After the volume has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--providers.file.filename=/config/dynamic.toml"
# - "--ping"
# - "--ping.entrypoint=web"
volumes:
[]
# - name: public-cert
# mountPath: "/certs"
# type: emptyDir
# - name: '{{ printf "%s-configs" .Release.Name }}'
# mountPath: "/config"
# type: configMap
# Additional volumeMounts to add to the Traefik container
additionalVolumeMounts:
[]
# For instance when using a logshipper for access logs
# - name: traefik-logs
# mountPath: /var/log/traefik
# Logs
# https://docs.traefik.io/observability/logs/
logs:
# Traefik logs concern everything that happens to Traefik itself (startup, configuration, events, shutdown, and so on).
general:
# By default, the logs use a text format (common), but you can
# also ask for the json format in the format option
# format: json
# By default, the level is set to ERROR. Alternative logging levels are DEBUG, PANIC, FATAL, ERROR, WARN, and INFO.
level: ERROR
access:
# To enable access logs
enabled: false
# By default, logs are written using the Common Log Format (CLF).
# To write logs in JSON, use json in the format option.
# If the given format is unsupported, the default (CLF) is used instead.
# format: json
# To write the logs in an asynchronous fashion, specify a bufferingSize option.
# This option represents the number of log lines Traefik will keep in memory before writing
# them to the selected output. In some cases, this option can greatly help performances.
# bufferingSize: 100
# Filtering https://docs.traefik.io/observability/access-logs/#filtering
filters:
{}
# statuscodes: "200,300-302"
# retryattempts: true
# minduration: 10ms
# Fields
# https://docs.traefik.io/observability/access-logs/#limiting-the-fieldsincluding-headers
fields:
general:
defaultmode: keep
names:
{}
# Examples:
# ClientUsername: drop
headers:
defaultmode: drop
names:
{}
# Examples:
# User-Agent: redact
# Authorization: drop
# Content-Type: keep
metrics:
# datadog:
# address: 127.0.0.1:8125
# influxdb:
# address: localhost:8089
# protocol: udp
prometheus:
entryPoint: metrics
# addRoutersLabels: true
# statsd:
# address: localhost:8125
tracing:
{}
# instana:
# enabled: true
# datadog:
# localAgentHostPort: 127.0.0.1:8126
# debug: false
# globalTag: ""
# prioritySampling: false
globalArguments:
- "--global.checknewversion"
#
# Configure Traefik static configuration
# Additional arguments to be passed at Traefik's binary
# All available options available on https://docs.traefik.io/reference/static-configuration/cli/
## Use curly braces to pass values: `helm install --set="additionalArguments={--providers.kubernetesingress.ingressclass=traefik-internal,--log.level=DEBUG}"`
additionalArguments:
- --certificatesresolvers.cf.acme.dnschallenge.provider=cloudflare
- --certificatesresolvers.cf.acme.dnschallenge.resolvers=1.1.1.1:53,1.0.0.1:53
- --certificatesresolvers.cf.acme.storage=/data/acme.json
# - "--providers.kubernetesingress.ingressclass=traefik-internal"
# - "--log.level=DEBUG"
# Environment variables to be passed to Traefik's binary
env: []
# - name: SOME_VAR
# value: some-var-value
# - name: SOME_VAR_FROM_CONFIG_MAP
# valueFrom:
# configMapRef:
# name: configmap-name
# key: config-key
# - name: SOME_SECRET
# valueFrom:
# secretKeyRef:
# name: secret-name
# key: secret-key
envFrom:
# - configMapRef:
# name: config-map-name
- secretRef:
name: traefik-secrets
# Configure ports
ports:
# The name of this one can't be changed as it is used for the readiness and
# liveness probes, but you can adjust its config to your liking
traefik:
port: 9000
# Use hostPort if set.
# hostPort: 9000
#
# Use hostIP if set. If not set, Kubernetes will default to 0.0.0.0, which
# means it's listening on all your interfaces and all your IPs. You may want
# to set this value if you need traefik to listen on specific interface
# only.
# hostIP: 192.168.100.10
# Override the liveness/readiness port. This is useful to integrate traefik
# with an external Load Balancer that performs healthchecks.
# healthchecksPort: 9000
# Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort.
#
# You SHOULD NOT expose the traefik port on production deployments.
# If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress
expose: false
# The exposed port for this service
exposedPort: 9000
# The port protocol (TCP/UDP)
protocol: TCP
web:
port: 8000
# hostPort: 8000
expose: true
exposedPort: 80
# The port protocol (TCP/UDP)
protocol: TCP
# Use nodeport if set. This is useful if you have configured Traefik in a
# LoadBalancer
# nodePort: 32080
# Port Redirections
# Added in 2.2, you can make permanent redirects via entrypoints.
# https://docs.traefik.io/routing/entrypoints/#redirection
# redirectTo: websecure
websecure:
port: 8443
# hostPort: 8443
expose: true
exposedPort: 443
# The port protocol (TCP/UDP)
protocol: TCP
# nodePort: 32443
# Enable HTTP/3.
# Requires enabling experimental http3 feature and tls.
# Note that you cannot have a UDP entrypoint with the same port.
http3: true
# Set TLS at the entrypoint
# https://doc.traefik.io/traefik/routing/entrypoints/#tls
tls:
enabled: true
# this is the name of a TLSOption definition
options: ""
certResolver: cf
domains:
- main: roxedus.com
sans:
- "*.roxedus.com"
# - bar.example.com
metrics:
port: 9100
# hostPort: 9100
# Defines whether the port is exposed if service.type is LoadBalancer or
# NodePort.
#
# You may not want to expose the metrics port on production deployments.
# If you want to access it from outside of your cluster,
# use `kubectl port-forward` or create a secure ingress
expose: false
# The exposed port for this service
exposedPort: 9100
# The port protocol (TCP/UDP)
protocol: TCP
# TLS Options are created as TLSOption CRDs
# https://doc.traefik.io/traefik/https/tls/#tls-options
# Example:
tlsOptions:
default:
sniStrict: true
minVersion: VersionTLS12
# preferServerCipherSuites: true
# foobar:
# curvePreferences:
# - CurveP521
# - CurveP384
# Options for the main traefik service, where the entrypoints traffic comes
# from.
service:
enabled: true
type: NodePort
# Additional annotations applied to both TCP and UDP services (e.g. for cloud provider specific config)
annotations: {}
# Additional annotations for TCP service only
annotationsTCP: {}
# Additional annotations for UDP service only
annotationsUDP: {}
# Additional service labels (e.g. for filtering Service by custom labels)
labels: {}
# Additional entries here will be added to the service spec.
# Cannot contain type, selector or ports entries.
spec:
{}
# externalTrafficPolicy: Cluster
# loadBalancerIP: "1.2.3.4"
# clusterIP: "2.3.4.5"
loadBalancerSourceRanges:
[]
# - 192.168.0.1/32
# - 172.16.0.0/16
externalIPs:
[]
# - 1.2.3.4
# One of SingleStack, PreferDualStack, or RequireDualStack.
# ipFamilyPolicy: SingleStack
# List of IP families (e.g. IPv4 and/or IPv6).
# ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services
# ipFamilies:
# - IPv4
# - IPv6
## Create HorizontalPodAutoscaler object.
##
autoscaling:
enabled: false
# minReplicas: 1
# maxReplicas: 10
# metrics:
# - type: Resource
# resource:
# name: cpu
# targetAverageUtilization: 60
# - type: Resource
# resource:
# name: memory
# targetAverageUtilization: 60
# Enable persistence using Persistent Volume Claims
# ref: http://kubernetes.io/docs/user-guide/persistent-volumes/
# After the pvc has been mounted, add the configs into traefik by using the `additionalArguments` list below, eg:
# additionalArguments:
# - "--certificatesresolvers.le.acme.storage=/data/acme.json"
# It will persist TLS certificates.
persistence:
enabled: false
name: data
# existingClaim: ""
accessMode: ReadWriteOnce
size: 128Mi
# storageClass: ""
path: /data
annotations: {}
# subPath: "" # only mount a subpath of the Volume into the pod
# If hostNetwork is true, runs traefik in the host network namespace
# To prevent unschedulabel pods due to port collisions, if hostNetwork=true
# and replicas>1, a pod anti-affinity is recommended and will be set if the
# affinity is left as default.
hostNetwork: false
# Whether Role Based Access Control objects like roles and rolebindings should be created
rbac:
enabled: true
# If set to false, installs ClusterRole and ClusterRoleBinding so Traefik can be used across namespaces.
# If set to true, installs namespace-specific Role and RoleBinding and requires provider configuration be set to that same namespace
namespaced: false
# Enable to create a PodSecurityPolicy and assign it to the Service Account via RoleBinding or ClusterRoleBinding
podSecurityPolicy:
enabled: false
# The service account the pods will use to interact with the Kubernetes API
serviceAccount:
# If set, an existing service account is used
# If not set, a service account is created automatically using the fullname template
name: ""
# Additional serviceAccount annotations (e.g. for oidc authentication)
serviceAccountAnnotations: {}
resources:
{}
# requests:
# cpu: "100m"
# memory: "50Mi"
# limits:
# cpu: "300m"
# memory: "150Mi"
affinity: {}
# # This example pod anti-affinity forces the scheduler to put traefik pods
# # on nodes where no other traefik pods are scheduled.
# # It should be used when hostNetwork: true to prevent port conflicts
# podAntiAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# - labelSelector:
# matchExpressions:
# - key: app.kubernetes.io/name
# operator: In
# values:
# - {{ template "traefik.name" . }}
# topologyKey: kubernetes.io/hostname
nodeSelector: {}
tolerations: []
# Pods can have priority.
# Priority indicates the importance of a Pod relative to other Pods.
priorityClassName: ""
# Set the container security context
# To run the container with ports below 1024 this will need to be adjust to run as root
securityContext:
capabilities:
drop: [ALL]
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
podSecurityContext:
fsGroup: 65532

View File

@ -4,13 +4,12 @@
],
"kubernetes": {
"fileMatch": [
"Deployments/.+\\.yaml$",
"CI/.+\\.yaml$"
"deployment\\.yaml$"
]
},
"argocd": {
"helm-values": {
"fileMatch": [
"apps/.+\\.yaml$"
"-values\\.yaml$"
]
},
"packageRules": [

1
Charts/.gitignore vendored
View File

@ -1 +0,0 @@
**/charts/

View File

@ -1,6 +0,0 @@
dependencies:
- name: argo-cd
repository: https://argoproj.github.io/argo-helm
version: 5.25.0
digest: sha256:4dfa4a29330c4987747d06ba01f719a155d00baf6adbbcb7e03c76118643afac
generated: "2023-03-09T13:00:39.375996286Z"

View File

@ -1,7 +0,0 @@
apiVersion: v2
name: argo-cd
version: 1.0.0
dependencies:
- name: argo-cd
version: 5.25.0
repository: https://argoproj.github.io/argo-helm

View File

@ -1,86 +0,0 @@
argo-cd:
dex:
enabled: true
server:
rbacConfig:
policy.csv: |
g, roxedus, role:admin
g, ArgoCD Admins, role:admin
#service:
# type: NodePort
extraArgs:
- --insecure
ingress:
enabled: true
ingressClassName: traefik
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/cluster-issuer: roxedus.com-cloudflare
hosts:
- argo.roxedus.com
tls:
- hosts:
- argo.roxedus.com
secretName: argo-roxedus-com-cert
config:
accounts.roxedus: apiKey, login
accounts.admin.enabled: "true"
repositories: |
- type: helm
name: argo-cd
url: https://argoproj.github.io/argo-helm
configs:
cm:
url: https://argo.roxedus.com
resource.customizations.health.networking.k8s.io_Ingress: |
hs = {}
hs.status = "Healthy"
return hs
? resource.customizations.ignoreDifferences.admissionregistration.k8s.io_MutatingWebhookConfiguration
: |
jqPathExpressions:
- '.webhooks[]?.clientConfig.caBundle'
resource.customizations.health.cert-manager.io_Certificate: |
hs = {}
if obj.status ~= nil then
if obj.status.conditions ~= nil then
for i, condition in ipairs(obj.status.conditions) do
if condition.type == "Ready" and condition.status == "False" then
hs.status = "Degraded"
hs.message = condition.message
return hs
end
if condition.type == "Ready" and condition.status == "True" then
hs.status = "Healthy"
hs.message = condition.message
return hs
end
end
end
end
hs.status = "Progressing"
hs.message = "Waiting for certificate"
return hs
dex.config: |
connectors:
- config:
issuer: https://authentik.roxedus.com/application/o/argocd/
clientID: $argo-cd-sso:clientID
clientSecret: $argo-cd-sso:clientSecret
insecureEnableGroups: true
scopes:
- openid
- profile
- email
- groups
name: authentik
type: oidc
id: authentik

View File

@ -1,92 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: organizr
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: organizr
template:
metadata:
labels:
app: organizr
spec:
containers:
- name: organizr
image: docker.roxedus.net/roxedus/org-less
resources:
limits:
memory: "2G"
cpu: "1000m"
env:
- name: TZ
value: Europe/Oslo
- name: PUID
value: "1000"
- name: PGID
value: "1000"
ports:
- containerPort: 80
volumeMounts:
- mountPath: /var/www/data
name: config
volumes:
- name: config
persistentVolumeClaim:
claimName: organizr-pvc
---
apiVersion: v1
kind: Service
metadata:
name: organizr
spec:
type: ClusterIP
selector:
app: organizr
ports:
- port: 80
targetPort: 80
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
# add an annotation indicating the issuer to use.
cert-manager.io/acme-challenge-type: dns01
traefik.ingress.kubernetes.io/router.middlewares: authentik-ak-outpost-authentik-embedded-outpost@kubernetescrd
cert-manager.io/cluster-issuer: roxedus.com-cloudflare
name: organizr
namespace: default
spec:
ingressClassName: traefik
rules:
- host: organizr.roxedus.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: organizr
port:
number: 80
tls:
- hosts:
- organizr.roxedus.com
secretName: organizr-roxedus-com-cert
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: organizr-pvc
labels:
app: organizr
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi

View File

@ -1,219 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: searxng
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: searxng
template:
metadata:
labels:
app: searxng
spec:
containers:
- name: searxng
image: searxng/searxng:2022.11.11-3a765113
resources:
limits:
memory: "2G"
cpu: "1000m"
env:
- name: BASE_URL
value: https://searxng.roxedus.com
- name: INSTANCE_NAME
value: Roxedus.com
envFrom:
- secretRef:
name: searxng
ports:
- containerPort: 8080
volumeMounts:
- mountPath: /etc/searxng
name: config
readOnly: true
volumes:
- name: config
configMap:
name: searxng
items:
- key: "settings"
path: "settings.yml"
- key: "uwsgi"
path: "uwsgi.ini"
---
apiVersion: v1
kind: Service
metadata:
name: searxng
spec:
type: ClusterIP
selector:
app: searxng
ports:
- port: 8080
targetPort: 8080
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: searxng
namespace: default
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: searxng
dataFrom:
- extract:
key: searxng
conversionStrategy: Default
decodingStrategy: Auto
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
# add an annotation indicating the issuer to use.
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/cluster-issuer: roxedus.com-cloudflare
name: searxng
namespace: default
spec:
ingressClassName: traefik
rules:
- host: searxng.roxedus.com
http:
paths:
- pathType: Prefix
path: /
backend:
service:
name: searxng
port:
number: 8080
tls:
- hosts:
- searxng.roxedus.com
secretName: searxng-roxedus-com-cert
---
apiVersion: v1
kind: ConfigMap
metadata:
name: searxng
data:
settings: |
# see https://docs.searxng.org/admin/engines/settings.html#use-default-settings
use_default_settings: true
server:
limiter: false
image_proxy: true
ui:
static_use_hash: true
default_theme: simple
theme_args:
simple_style: dark
infinite_scroll: true
results_on_new_tab: true
query_in_title: true
search:
autocomplete: "google"
enabled_plugins:
- "Hash plugin"
- "Search on category select"
- "Self Informations"
- "Tracker URL remover"
- "Hostname replace"
- "Open Access DOI rewrite"
hostname_replace:
'(.*\.)?codegrepper\.com': false
'(.*\.)?w3schools\.com': false
'(.*\.)?geeksforgeeks\.org': false
'(.*\.)?stackshare\.io': false
'(.*\.)?tutorialspoint\.com': false
'(.*\.)?answeright\.com': false
'(.*\.)?askdev\.info': false
'(.*\.)?askdev\.io': false
'(.*\.)?blogmepost\.com': false
'(.*\.)?c-sharpcorner\.com': false
'(.*\.)?code-examples\.net': false
'(.*\.)?codeflow\.site': false
'(.*\.)?gitmemory\.cn': false
'(.*\.)?gitmemory\.com': false
'(.*\.)?intellipaat\.com': false
'(.*\.)?javaer101\.com': false
'(.*\.)?programmerstart\.com': false
'(.*\.)?programmersought\.com': false
'(.*\.)?qastack\.com': false
'(.*\.)?roboflow\.ai': false
'(.*\.)?stackanswers\.net': false
'(.*\.)?stackoom\.com': false
'(.*\.)?stackovernet\.com': false
'(.*\.)?stackovernet\.xyz': false
'(.*\.)?stackoverrun\.com': false
'(.*\.)?thetopsites\.net': false
'(.*\.)?ubuntugeeks\.com': false
'(.*\.)?cyberciti\.biz': false
'(.*\.)?ispycode\.com': false
'(.*\.)?reposhub\.com': false
'(.*\.)?githubmemory\.com': false
'(.*\.)?issueexplorer\.com': false
'(.*\.)?tabnine\.com': false
'(.*\.)?gitcode\.net': false
'(.*\.)?command-not-found\.com': false
'(.*\.)?im-coder\.com': false
'(.*\.)?i-harness\.com': false
uwsgi: |
[uwsgi]
# Who will run the code
uid = searxng
gid = searxng
# Number of workers (usually CPU count)
workers = %k
threads = 4
# The right granted on the created socket
chmod-socket = 666
# Plugin to use and interpreter config
single-interpreter = true
master = true
plugin = python3
lazy-apps = true
enable-threads = true
# Module to import
module = searx.webapp
# Virtualenv and python path
pythonpath = /usr/local/searxng/
chdir = /usr/local/searxng/searx/
# automatically set processes name to something meaningful
auto-procname = true
# Disable request logging for privacy
disable-logging = true
log-5xx = true
# Set the max size of a request (request-body excluded)
buffer-size = 8192
# No keep alive
# See https://github.com/searx/searx-docker/issues/24
add-header = Connection: close
# uwsgi serves the static files
# expires set to one year since there are hashes
static-map = /static=/usr/local/searxng/searx/static
static-expires = /* 31557600
static-gzip-all = True
offload-threads = %k
# Cache
cache2 = name=searxngcache,items=2000,blocks=2000,blocksize=4096,bitmap=1

View File

@ -1,21 +0,0 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: argo-cd-sso
namespace: argo-cd
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: argo-cd-sso
template:
metadata:
labels:
app.kubernetes.io/part-of: argocd
dataFrom:
- extract:
key: argo-cd-sso
conversionStrategy: Default
decodingStrategy: Auto

View File

@ -1,33 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ext-authentik
namespace: authentik
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: authentik-secret
data:
- secretKey: AUTHENTIK_SECRET_KEY
remoteRef:
key: authentik/authentik
property: secret_key
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ext-authentik-psql
namespace: authentik
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: postgres-secret
data:
- secretKey: password
remoteRef:
key: authentik/postgres
property: password

View File

@ -1,16 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: roxedus.com-cloudflare
namespace: cert-manager
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef:
name: cloudflare-issuer-account-key
solvers:
- dns01:
cloudflare:
apiTokenSecretRef:
name: cloudflare-api-token
key: CLOUDFLARE_API_KEY

View File

@ -1,16 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ext-cloudflare
namespace: cert-manager
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: cloudflare-api-token
data:
- secretKey: CLOUDFLARE_API_KEY
remoteRef:
key: cloudflare-api-token-secret
property: CLOUDFLARE_API_KEY

View File

@ -1,21 +0,0 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: grafana-oauth
namespace: prometheus
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: grafana-oauth
template:
metadata:
labels:
app.kubernetes.io/part-of: grafana
dataFrom:
- extract:
key: prometheus/grafana-sso
conversionStrategy: Default
decodingStrategy: None

View File

@ -1,15 +0,0 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: vlan2-pool
namespace: metallb-system
spec:
addresses:
- 10.0.2.40-10.0.2.50
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: vlan2-pool-advertisement
namespace: metallb-system

View File

@ -1,20 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: ext-renovate
namespace: ci
spec:
secretStoreRef:
name: secret-store
kind: ClusterSecretStore
target:
name: renovate-secret
data:
- secretKey: GITHUB_COM_TOKEN
remoteRef:
key: ci/renovate
property: github
- secretKey: RENOVATE_TOKEN
remoteRef:
key: ci/renovate
property: token

View File

@ -1,20 +0,0 @@
apiVersion: external-secrets.io/v1beta1
kind: ClusterSecretStore
metadata:
name: secret-store
namespace: external-secrets
spec:
provider:
vault:
server: "http://vault.vault:8200"
path: "kv"
version: "v2"
auth:
kubernetes:
mountPath: "kubernetes"
role: "kube-role"
# conditions:
# - namespaceSelector:
# matchLabels:
# secret.roxedus.com/global-store: "true"

View File

@ -1,3 +0,0 @@
apiVersion: v2
name: root
version: 1.0.0

View File

@ -1,22 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: applications
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: default
project: default
source:
path: Deployments/
repoURL: https://git.roxedus.dev/Roxedus/Argo.git
targetRevision: HEAD
directory:
recurse: true
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -1,20 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-cd
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: argo-cd
project: default
source:
path: Charts/argo-cd
repoURL: https://git.roxedus.dev/Roxedus/Argo.git
targetRevision: HEAD
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -1,82 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: authentik
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: authentik
project: default
source:
chart: authentik
helm:
values: |
image:
repository: ghcr.io/goauthentik/server
authentik:
error_reporting:
enabled: true
ingress:
enabled: true
ingressClassName: traefik
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/cluster-issuer: roxedus.com-cloudflare
hosts:
- host: authentik.roxedus.com
paths:
- path: "/"
pathType: Prefix
tls:
- hosts:
- authentik.roxedus.com
secretName: authentik-roxedus-com-cert
envValueFrom:
AUTHENTIK_SECRET_KEY:
secretKeyRef:
key: AUTHENTIK_SECRET_KEY
name: authentik-secret
AUTHENTIK_POSTGRESQL__PASSWORD:
secretKeyRef:
key: password
name: postgres-secret
prometheus.rules.create: true
postgresql:
image:
registry: ghcr.io
repository: zcube/bitnami-compat/postgresql
tag: 11.18.0-debian-11-r39
enabled: true
# auth:
# existingSecret: authentik-postgresql
persistence:
enabled: true
storageClass: longhorn
accessModes:
- ReadWriteOnce
redis:
enabled: true
image:
registry: ghcr.io
repository: zcube/bitnami-compat/redis
tag: 6.2.7-debian-11-r39
repoURL: https://charts.goauthentik.io
targetRevision: 2023.2.4
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,34 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: cert-manager
project: default
source:
chart: cert-manager
helm:
values: |
prometheus:
enabled: true
extraArgs:
- --enable-certificate-owner-ref=true
- --dns01-recursive-nameservers-only
- --dns01-recursive-nameservers=8.8.8.8:53,1.1.1.1:53
repoURL: https://charts.jetstack.io
targetRevision: v1.11.0
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ci
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: ci
project: default
source:
path: CI/
repoURL: https://git.roxedus.dev/Roxedus/Argo.git
targetRevision: HEAD
directory:
recurse: true
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,28 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-secrets
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: external-secrets
project: default
source:
chart: external-secrets
helm:
values: |
prometheus.enabled: true
repoURL: https://charts.external-secrets.io
targetRevision: 0.7.2
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,29 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: loki
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: prometheus
project: default
source:
chart: loki-stack
helm:
values: |
loki.isDefault: false
test_pod: {}
prometheus.enabled: true
repoURL: https://grafana.github.io/helm-charts
targetRevision: 2.9.9
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,56 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: longhorn
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: longhorn-system
project: default
source:
chart: longhorn
helm:
values: |
persistence:
defaultClassReplicaCount: 2
longhornManager:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
longhornDriver:
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
ingress:
enabled: true
ingressClassName: traefik
secureBackends: true
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/cluster-issuer: roxedus.com-cloudflare
# traefik.ingress.kubernetes.io/router.middlewares: authentik-ak-outpost-localcluster@kubernetescrd
traefik.ingress.kubernetes.io/router.priority: "1"
host: longhorn.roxedus.com
tls: true
tlsSecret: longhorn-roxedus-com-cert
repoURL: https://charts.longhorn.io
targetRevision: v1.4.0
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,24 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metallb
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: metallb-system
project: default
source:
chart: metallb
repoURL: https://metallb.github.io/metallb
targetRevision: 0.13.9
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,22 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metaobjects
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: default
project: default
source:
path: MetaObjects/
repoURL: https://git.roxedus.dev/Roxedus/Argo.git
targetRevision: HEAD
directory:
recurse: true
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -1,25 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: metrics-server
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: kube-system
project: default
source:
chart: metrics-server
helm:
values: |
args:
- --kubelet-insecure-tls
repoURL: https://kubernetes-sigs.github.io/metrics-server/
targetRevision: 3.8.4
syncPolicy:
automated:
prune: true
selfHeal: true

View File

@ -1,71 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: prometheus
project: default
source:
chart: kube-prometheus-stack
helm:
values: |
namespaceOverride: prometheus
alertmanager.enabled: true
kubeApiServer.enabled: false
kubelet.enabled: false
kubeControllerManager.enabled: false
coreDns.enabled: false
kubeDns.enabled: false
kubeEtcd.enabled: false
kubeScheduler.enabled: false
kubeProxy.enabled: false
kubeStateMetrics.enabled: false
grafana:
sidecar.datasources.isDefaultDatasource: false
enabled: true
persistence:
enabled: true
storageClassName: longhorn
env:
GF_SERVER_ROOT_URL: https://%(domain)s/
GF_AUTH_GENERIC_OAUTH_ENABLED: "true"
GF_AUTH_GENERIC_OAUTH_NAME: authentik
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
GF_AUTH_GENERIC_OAUTH_AUTH_URL: https://authentik.roxedus.com/application/o/authorize/
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: https://authentik.roxedus.com/application/o/token/
GF_AUTH_GENERIC_OAUTH_API_URL: https://authentik.roxedus.com/application/o/userinfo/
GF_AUTH_SIGNOUT_REDIRECT_URL: https://authentik.roxedus.com/application/o/grafana/
GF_AUTH_OAUTH_AUTO_LOGIN: "true"
GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH: "contains(groups[*], 'Grafana Admins') && 'Admin' || contains(groups[*], 'Grafana Editors') && 'Editor' || 'Viewer'"
envFromSecrets:
- name: grafana-oauth
ingress:
enabled: true
ingressClassName: traefik
annotations:
cert-manager.io/acme-challenge-type: dns01
cert-manager.io/cluster-issuer: roxedus.com-cloudflare
hosts:
- grafana.roxedus.com
tls:
- hosts:
- grafana.roxedus.com
secretName: grafana-roxedus-com-cert
repoURL: https://prometheus-community.github.io/helm-charts
targetRevision: 45.7.1
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- ServerSideApply=true

View File

@ -1,85 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: traefik
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: traefik
project: default
source:
chart: traefik
helm:
values: |
experimental:
http3:
enabled: true
plugins:
enabled: false
kubernetesGateway:
enabled: false
additionalArguments:
- "--api.insecure=true"
- "--ping"
- "--ping.entrypoint=traefik"
ports:
traefik:
port: 9000
expose: true
exposedPort: 9900
protocol: TCP
web:
port: 8080
exposedPort: 80
expose: true
protocol: TCP
redirectTo: websecure
websecure:
port: 4443
exposedPort: 443
expose: true
protocol: TCP
tls:
enabled: true
metrics:
port: 9102
expose: false
udp:
port: 6666
protocol: UDP
expose: true
tlsOptions:
default:
sniStrict: true
minVersion: VersionTLS12
service:
enabled: true
type: LoadBalancer
providers:
kubernetesCRD:
allowCrossNamespace: true
kubernetesIngress:
publishedService:
enabled: true
ingressClass:
enabled: true
isDefaultClass: true
repoURL: https://helm.traefik.io/traefik
targetRevision: 21.2.0
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

@ -1,33 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: vault
namespace: argo-cd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
server: https://kubernetes.default.svc
namespace: vault
project: default
source:
chart: vault
helm:
values: |
server.dataStorage.storageClass: longhorn
ui:
enabled: true
serviceType: NodePort
global.serverTelemetry.prometheusOperator: true
repoURL: https://helm.releases.hashicorp.com
targetRevision: 0.23.0
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View File

View File

@ -0,0 +1,27 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: py-kms
spec:
replicas: 1
revisionHistoryLimit: 3
selector:
matchLabels:
app: py-kms
template:
metadata:
labels:
app: py-kms
spec:
containers:
- image: ghcr.io/thespad/py-kms@sha256:48f2a58b03eb84da40d2be79eb49eb4c14978ef2c2d4a4f8d63a0c1f1d9b23c3
name: py-kms
resources:
limits:
cpu: "800m"
memory: "100Mi"
requests:
cpu: "300m"
memory: "40Mi"
ports:
- containerPort: 1688

10
py-kms/py-kms-svc.yaml Normal file
View File

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: py-kms
spec:
ports:
- port: 1688
targetPort: 1688
selector:
app: py-kms

View File

@ -17,10 +17,9 @@ spec:
# Update this to the latest available and then enable Renovate on
# the manifest
image: renovate/renovate:35.1.2
image: renovate/renovate:32.0.0
args:
- Roxedus/Argo
- Roxedus/Infra
# Environment Variables
env:
- name: LOG_LEVEL
@ -33,16 +32,11 @@ spec:
value: "https://git.roxedus.dev/api/v1"
envFrom:
- secretRef:
name: renovate-secret
name: renovate-pat
volumeMounts:
- name: work-volume
mountPath: /tmp/renovate/
restartPolicy: Never
tolerations:
- key: "node-role.kubernetes.io/control-plane"
operator: "Exists"
- key: "node-role.kubernetes.io/master"
operator: "Exists"
volumes:
- name: work-volume
emptyDir: {}