Add loadbalancer

master
guppy 2023-05-14 23:17:13 +02:00
parent 9156341792
commit 78a9db4489
12 changed files with 84 additions and 26 deletions

View File

@ -1,5 +1,5 @@
# K8s cluster on Debian 11
Tested successfully on 14/05/2023 with kubernetes 1.27.1, containerd 1.6.21 and flannel 1.1.2
Tested successfully on 14/05/2023 with kubernetes 1.27.1, containerd 1.6.21, flannel 1.1.2, ingress-nginx and PureLB
Durée approximative du deployment: 8 minutes
kubeadm version
@ -14,7 +14,7 @@ cf https://git.metatux.fr:3001/GRETA/TP-IaC-Ansible
## Prepare les noeuds et fait l'installation de kubernetes
> ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i ../terraform/hosts.ini pre-start.yaml
## Initialisation du cluster + join des workes, ajout de flannel, openebs, ingress-nginx, helm.
## Initialisation du cluster + join des workers, ajout de flannel, openebs, ingress-nginx, purelb, helm.
## Mise en service de Gitea
> ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i ../terraform/hosts.ini start.yaml
## Si besoin de reset le cluster

View File

@ -12,3 +12,8 @@ gitea:
replicacount: 3
servicetype: LoadBalancer
externalips: 192.168.230.10
metallb:
iprange: 192.168.230.100-192.168.230.200
purelb:
pool: 192.168.230.100-192.168.230.200
subnet: 192.168.230.0/24

View File

@ -1,6 +1,6 @@
---
- hosts: control-plane
name: "Install OpenEBS Storagee provider"
name: "Install OpenEBS Storage provider"
roles:
- role: openebs
run_once: True

View File

@ -5,7 +5,20 @@
src: values.yaml.j2
dest: /tmp/values.yaml
# TODO: Add a wait_for condition to test if ingress-nginx avalaible
- name: get Ingress-nginx internal IP
shell: >
kubectl get svc -A | grep ingress-nginx-controller-admission | awk '{print $4}'
register: nginxip
delay: 10
# Add a wait_for condition to test if ingress-nginx avalaible
- name: Attente du démarrage d Ingress Nginx
retries: 6
wait_for:
host: "{{ nginxip.stdout }}"
port: 443
delay: 10
state: present
- name: install gitea via helm
shell: |
@ -13,14 +26,39 @@
helm repo update
helm install -f /tmp/values.yaml gitea gitea-charts/gitea
- name: Attente du démarrage de Gitea
- name: Is pod gitea-0 running
shell: kubectl get pods | grep gitea-0 | awk '{print $3}'
register: gitearunning
until: "'Running' in gitearunning.stdout"
retries: 6
delay: 10
- name: get Gitea http IP
shell: >
kubectl get svc | grep gitea-http | awk '{print $4}'
register: giteaip
- name:
debug:
msg: IP du serveur http Gitea {{ giteaip.stdout }}
- name: get Gitea http port
shell: >
kubectl get svc | grep gitea-http | awk '{print $5}' | sed 's/3000://;s/\/TCP//'
register: giteaport
- name:
debug:
msg: Port du serveur http Gitea {{ giteaport.stdout }}
- name: Attente du démarrage du serveur web Gitea
retries: 6
wait_for:
host: "{{ gitea.externalips }}"
port: 3000
host: "{{ giteaip.stdout }}"
port: "{{ giteaport.stdout }}"
delay: 10
state: present
- name:
debug:
msg: Le serveur Gitea est operationel est joignable sur http://{{ gitea.externalips }}:3000
msg: Le serveur Gitea est operationel et joignable sur http://{{ giteaip.stdout }}:{{ giteaport.stdout }}

View File

@ -95,7 +95,7 @@ service:
nodePort:
externalTrafficPolicy:
externalIPs:
- {{ gitea.externalips }}
# - {{ gitea.externalips }}
ipFamilyPolicy:
ipFamilies:
loadBalancerSourceRanges: []
@ -114,13 +114,13 @@ service:
## @param service.ssh.annotations SSH service annotations
ssh:
type: {{ gitea.servicetype }}
port: 22
port: 2222
clusterIP: None
loadBalancerIP:
nodePort:
externalTrafficPolicy:
externalIPs:
- {{ gitea.externalips }}
# - {{ gitea.externalips }}
ipFamilyPolicy:
ipFamilies:
hostPort:
@ -344,12 +344,12 @@ gitea:
# customEmailUrl:
## @param gitea.config Configuration for the Gitea server,ref: [config-cheat-sheet](https://docs.gitea.io/en-us/config-cheat-sheet/)
config: {}
config:
# APP_NAME: "Gitea: Git with a cup of tea"
# RUN_MODE: dev
#
# server:
# SSH_PORT: 22
server:
SSH_PORT: 2222
#
# security:
# PASSWORD_COMPLEXITY: spec

View File

@ -1,11 +1,19 @@
- name: Enable structARP on kube-proxy
shell: |
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl apply -f - -n kube-system
- name: install MetalLB
shell: |
helm repo add metallb https://metallb.github.io/metallb
helm install metallb metallb/metallb --create-namespace --namespace metallb-system
- name: copy MetalLB config
become: yes
template:
src: metallb-config.yaml
dest: /etc/metallb-config.yaml
- name: install MetalLB
command: kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.9/config/manifests/metallb-native.yaml
dest: /tmp/metallb-config.yaml
- name: install MetalLB config
command: kubectl apply -f /etc/metallb-config.yaml
command: kubectl apply -f /tmp/metallb-config.yaml

View File

@ -2,11 +2,11 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: nat
name: first-pool
namespace: metallb-system
spec:
addresses:
- "{{ metalrange }}"
- "{{ metallb.iprange }}"
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement

View File

@ -0,0 +1,6 @@
configInline:
address-pools:
- name: default
protocol: layer2
addresses:
- {{ metallb.iprange }}

View File

@ -7,7 +7,7 @@
- name: install PureLB
command: kubectl apply -f https://gitlab.com/api/v4/projects/purelb%2Fpurelb/packages/generic/manifest/0.0.1/purelb-complete.yaml
retries: 2
delays: 5
delay: 5
- name: install PureLB config
command: kubectl apply -f /etc/purelb-config.yaml

View File

@ -6,6 +6,6 @@ metadata:
spec:
local:
v4pools:
- subnet: '{{ lbsubnet }}'
pool: '{{ lbpool }}'
aggregation: /25
- subnet: '{{ purelb.subnet }}'
pool: '{{ purelb.pool }}'
aggregation: default

View File

@ -6,6 +6,7 @@
- import_playbook: kubernetes-worker.yaml
- import_playbook: openebs.yaml
- import_playbook: ingress-nginx.yaml
- import_playbook: purelb.yaml
#- import_playbook: metallb.yaml
- import_playbook: helm.yaml
- import_playbook: gitea-helm.yaml
#- import_playbook: purelb.yaml