k0s-cluster/k0sctl.yaml

391 lines
12 KiB
YAML

apiVersion: k0sctl.k0sproject.io/v1beta1
kind: Cluster
metadata:
name: k0s-cluster
user: admin
spec:
hosts:
- role: controller
openSSH:
user: smig
port: 22
address: k1.lab.smig.tech
options:
StrictHostkeyChecking: false # -o StrictHostkeyChecking: no
installFlags:
- --enable-metrics-scraper
files:
- name: prometheus-config
src: manifests/prometheues-service-monitor.yaml
dstDir: /var/lib/k0s/manifests/prometheus/
perm: 0644
- name: weed-namespace
src: manifests/weed-namespace.yaml
dstDir: /var/lib/k0s/manifests/weed/
perm: 0644
- name: weed-cnpg-config
src: manifests/weed-pg.yaml
dstDir: /var/lib/k0s/manifests/weed/
perm: 0644
- name: weed-secret-config
src: manifests/weed-secret.yaml
dstDir: /var/lib/k0s/manifests/weed/
perm: 0644
- name: selinux-stuff
src: selinux.conf
dstDir: /etc/containerd.d
perm: 0644
- role: worker
openSSH:
address: k2.lab.smig.tech
user: smig
port: 22
options:
StrictHostkeyChecking: false # -o StrictHostkeyChecking: no
files:
- name: selinux-script
src: ./selinux-script.sh
dstDir: /home/smig/
perm: 0700
user: smig
group: smig
- name: selinux-stuff
src: selinux.conf
dstDir: /etc/containerd.d
perm: 0644
hooks:
apply:
after:
- date > k0s-selinux.log
- echo "Starting SELinux Script" >> k0s-selinux.log
- bash /home/smig/selinux-script.sh &>> k0s-selinux.log
reset:
after:
- rm /home/smig/k0s-selinux.log /home/smig/selinux-script.sh /home/smig/.k0s-selinuxsetup-complete
- role: worker
openSSH:
address: k3.lab.smig.tech
user: smig
options:
StrictHostkeyChecking: false # -o StrictHostkeyChecking: no
files:
- name: selinux-script
src: ./selinux-script.sh
dstDir: /home/smig/
perm: 0700
user: smig
group: smig
- name: selinux-stuff
src: selinux.conf
dstDir: /etc/containerd.d
perm: 0644
hooks:
apply:
after:
- date > k0s-selinux.log
- echo "Starting SELinux Script" >> k0s-selinux.log
- bash /home/smig/selinux-script.sh &>> k0s-selinux.log
reset:
after:
- rm /home/smig/k0s-selinux.log /home/smig/selinux-script.sh /home/smig/.k0s-selinuxsetup-complete
- role: worker
openSSH:
address: k4.lab.smig.tech
user: smig
options:
StrictHostkeyChecking: false # -o StrictHostkeyChecking: no
files:
- name: selinux-script
src: ./selinux-script.sh
dstDir: /home/smig/
perm: 0700
user: smig
group: smig
- name: selinux-stuff
src: selinux.conf
dstDir: /etc/containerd.d
perm: 0644
hooks:
apply:
after:
- date > k0s-selinux.log
- echo "Starting SELinux Script" >> k0s-selinux.log
- bash /home/smig/selinux-script.sh &>> k0s-selinux.log
reset:
after:
- rm /home/smig/k0s-selinux.log /home/smig/selinux-script.sh /home/smig/.k0s-selinuxsetup-complete
k0s:
config:
apiVersion: k0s.k0sproject.io/v1beta1
kind: Cluster
metadata:
name: k0s
spec:
api:
k0sApiPort: 9443
port: 6443
installConfig:
users:
etcdUser: etcd
kineUser: kube-apiserver
konnectivityUser: konnectivity-server
kubeAPIserverUser: kube-apiserver
kubeSchedulerUser: kube-scheduler
konnectivity:
adminPort: 8133
agentPort: 8132
network:
kubeProxy:
disabled: true
# mode: iptables
kuberouter:
autoMTU: true
mtu: 0
peerRouterASNs: ""
peerRouterIPs: ""
podCIDR: 10.244.0.0/16
provider: custom
serviceCIDR: 10.96.0.0/12
podSecurityPolicy:
defaultPolicy: 00-k0s-privileged
storage:
type: etcd
telemetry:
enabled: false
extensions:
helm:
repositories:
- name: prometheus
url: https://prometheus-community.github.io/helm-charts
- name: cilium
url: https://helm.cilium.io/
- name: cert-manager
url: https://charts.jetstack.io
- name: openebs-internal
url: https://openebs.github.io/charts
- name: cloudnative-pg
url: https://cloudnative-pg.github.io/charts
# - name: seaweedfs
# url: oci://git.thecodedom.com/smig/seaweedfs:4.0.392
charts:
- name: seaweedfs
namespace: weed
chartname: oci://git.thecodedom.com/smig/seaweedfs
order: 4
version: 4.0.392
timeout: 20m
values: |
global:
logginglevel: 3
master:
affinity: null
data:
type: "persistentVolumeClaim"
storageClass: openebs-hostpath
size: 1Gi
logs:
type: "emptyDir"
nodeSelector: null
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 200m
memory: 1Gi
volume:
affinity: null
dataDirs:
- name: data
type: "persistentVolumeClaim"
storageClass: openebs-hostpath
size: 30Gi
maxVolumes: 0
nodeSelector: null
logs:
type: "emptyDir"
resources:
requests:
cpu: 200m
memory: 1Gi
limits:
cpu: 500m
memory: 2Gi
filer:
enabled: true
affinity: null
nodeSelector: null
data:
type: "persistentVolumeClaim"
size: "1Gi"
storageClass: "openebs-hostpath"
logs:
type: "emptyDir"
resources:
requests:
cpu: 100m
memory: 512Mi
limits:
cpu: 400m
memory: 1Gi
extraEnvironmentVars:
WEED_LEVELDB2_ENABLED: "false"
WEED_POSTGRES_ENABLED: "true"
WEED_POSTGRES_HOSTNAME: "weed-pg-rw.weed.svc.cluster.local"
WEED_POSTGRES_PORT: "5432"
WEED_POSTGRES_DATABASE: "weed"
secretExtraEnvironmentVars:
WEED_POSTGRES_USERNAME:
secretKeyRef:
name: weed-pg-secret
key: username
WEED_POSTGRES_PASSWORD:
secretKeyRef:
name: weed-pg-secret
key: password
s3:
enabled: true
enableAuth: true
existingConfigSecret: weed-creds
domainName: k0s-s3.lab.smig.tech
nodeSelector: null
httpsPort: null
logs:
type: "emptryDir"
ingress:
enabled: true
className: "cilium"
host: k0s-s3.lab.smig.tech
createBuckets:
- name: registry
anonymousRead: false
- name: prometheus
chartName: prometheus/kube-prometheus-stack
version: "75.1.0"
namespace: monitoring
order: 3
values: |
prometheus:
prometheusSpec:
maximumStartupDurationSeconds: null
serviceMonitorSelectorNilUsesHelmValues: false
additionalPrometheusRulesMap:
k0s-control-plane-alerts:
groups:
- name: control-plane-activity
rules:
- alert: KubeSchedulerDown
expr: absent(apiserver_audit_event_total{job="kube-scheduler"})
for: 15m
labels:
severity: critical
- alert: KubeControllerManagerDown
expr: absent(apiserver_audit_event_total{job="kube-controller-manager"})
for: 15m
labels:
severity: critical
alertmanager:
alertmanagerSpec:
replicas: 1
grafana:
initChownData:
enabled: false
persistence:
enabled: true
storageClassName: openebs-hostpath
ingress:
enabled: true
ingressClassName: cilium
hosts:
- grafana-k0s.lab.smig.tech
- name: cloudnative-pg
namespace: cnpg-system
version: 0.24.0
chartname: cloudnative-pg/cloudnative-pg
order: 2
- name: cert-manager
chartName: cert-manager/cert-manager
version: "v1.17.2"
order: 2
namespace: cert-manager
values: |
crds:
enabled: true
- name: openebs
chartname: openebs-internal/openebs
version: "3.9.0"
namespace: openebs
order: 1
values: |
localprovisioner:
hostpathClass:
enabled: true
isDefaultClass: false
- name: cilium
chartName: cilium/cilium
namespace: kube-system
version: "1.18.0-pre.3"
order: 0
values: |
hubble:
enabled: true
cluster:
name: k0s-cluster
envoy:
enabled: true
image:
digest: sha256:bb73643e4b8c95d852bf25fc0e2f44e6d77617a809b63b119aba9edc001f4ea4
repository: git.thecodedom.com/smig/cilium-envoy
tag: latest
k8sServiceHost: k1.lab.smig.tech
k8sServicePort: 6443
kubeProxyReplacement: true
operator:
replicas: 1
routingMode: tunnel
tunnelProtocol: vxlan
nodeIPAM:
enabled: true
defaultLBServiceIPAM: nodeipam
gatewayAPI:
enabled: true
ingressController:
enabled: true
enforceHttps: false
loadbalancerMode: shared
service:
externalTrafficPolicy: Cluster
options:
wait:
enabled: true
drain:
enabled: true
gracePeriod: 2m0s
timeout: 5m0s
force: true
ignoreDaemonSets: true
deleteEmptyDirData: true
podSelector: ""
skipWaitForDeleteTimeout: 0s
concurrency:
limit: 30
workerDisruptionPercent: 10
uploads: 5
evictTaint:
enabled: false
taint: k0sctl.k0sproject.io/evict=true
effect: NoExecute
controllerWorkers: false