Initial commit

master
guppy 2023-05-10 07:46:51 +02:00
commit 7828ed0fe7
33 changed files with 6330 additions and 0 deletions

12
bootstrap.yaml 100644
View File

@ -0,0 +1,12 @@
---
- hosts: all
name: "Bootstrapping hosts"
roles:
- name: base setup
role: commons
- name: installing container runtime
role: container-runtime
- name: installing Kubernetes packages
role: kubernetes-packages

6
gitea.yaml 100644
View File

@ -0,0 +1,6 @@
---
- hosts: control-plane
name: "Install Gitea"
roles:
- role: gitea
run_once: True

10
group_vars/all 100644
View File

@ -0,0 +1,10 @@
---
ansible_user: ansuser
pods_subnet: "192.168.64.0/20"
services_subnet: "10.96.0.0/12"
os: "Debian_11"
version: "1.27"
dns: "192.168.230.254"
metalrange: "192.168.230.100-192.168.230.200"
gitea:
fqdn: gitea.domain.test

6
helm.yaml 100644
View File

@ -0,0 +1,6 @@
---
- hosts: control-plane
name: "Install Helm"
roles:
- role: helm
run_once: True

46
hostname.yaml 100644
View File

@ -0,0 +1,46 @@
---
- hosts: control-plane
name: "rename CP"
tasks:
- name:
set_fact:
hostnameid: "{{ groups[group_names[0]].index(inventory_hostname) | int + 1 }}"
- debug:
msg: "K8s-{{ group_names[0] }}{{ hostnameid }}"
- name: set hostname
become: yes
hostname:
name: "K8s-{{ group_names[0] }}{{ hostnameid }}"
use: systemd
- hosts: worker-node
name: "rename WN"
tasks:
- name:
set_fact:
hostnameid: "{{ groups[group_names[0]].index(inventory_hostname) | int + 1 }}"
- debug:
msg: "K8s-{{ group_names[0] }}{{ hostnameid }}"
- name: set hostname
become: yes
hostname:
name: "K8s-{{ group_names[0] }}{{ hostnameid }}"
use: systemd
- hosts: all
name: "change host file"
tasks:
- name: generate file
become: yes
blockinfile:
backup: yes
path: /etc/hosts
block: |
{% for host in groups['all'] %}
{{ hostvars[host]['ansible_facts']['eth0']['ipv4']['address'] }} {{ hostvars[host]['ansible_facts']['fqdn'] }} {{ hostvars[host]['ansible_facts']['hostname'] }}
{% endfor %}

View File

@ -0,0 +1,109 @@
---
- hosts: control-plane
name: "Initialize Kubernetes"
tasks:
- name: kubeadm config
become: yes
command: kubeadm config images pull
register: kubeadmconfig
- debug: var=kubeadmconfig.stdout_lines
- name: crictl image
become: yes
command: crictl image
register: crictl
- debug: var=crictl.stdout_lines
- name: kubadm init
become: yes
command: kubeadm init --pod-network-cidr={{ pods_subnet }} --service-cidr={{ services_subnet }} --apiserver-advertise-address={{ ansible_default_ipv4.address }} --cri-socket=unix:///var/run/crio/crio.sock
register: kubeadminit
- debug: var=kubeadminit.stdout_lines
- name: Set Up Kubernetes credential
block:
- name: create ~/.kube folder
file:
path: $HOME/.kube
state: directory
mode: '0755'
- name: get current user
command: whoami
register: c_user
- name: get current group
command: id -g
register: c_group
- name: Copy admin.conf to .kube
become: yes
copy:
src: /etc/kubernetes/admin.conf
dest: /home/{{ c_user.stdout }}/.kube/config
remote_src: yes
owner: "{{ c_user.stdout }}"
group: "{{ c_group.stdout }}"
mode: '0600'
- name: kubectl cluster-info
command: kubectl cluster-info
register: kubectl
- debug: var=kubectl.stdout_lines
- name: get calico conf
template:
src: calico.yaml.j2
dest: $HOME/calico.yaml
- name: apply calico conf1
shell: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/tigera-operator.yaml
register: apply1
- debug: var=apply1.stdout_lines
- name: apply calico conf2
shell: kubectl create -f https://raw.githubusercontent.com/projectcalico/calico/v3.25.1/manifests/custom-resources.yaml
register: apply2
- debug: var=apply2.stdout_lines
- name: apply calico conf3
shell: kubectl apply -f calico.yaml
register: apply3
- debug: var=apply3.stdout_lines
- name: show pods
command: kubectl get pods --all-namespaces
register: pods
- debug: var=pods.stdout_lines
- name: "Cluster token"
shell: kubeadm token list | cut -d ' ' -f1 | sed -n '2p'
register: K8S_TOKEN
- name: "CA Hash"
shell: openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
register: K8S_MASTER_CA_HASH
- name: "IP and port"
shell: kubectl cluster-info | sed "s,\x1B\[[0-9;]*[a-zA-Z],,g" | cut -d ' ' -f 7 | sed -n '1p' | cut -c 9-
register: K8S_IP_PORT
- name: "Add K8S Token and Hash to dummy host"
add_host:
name: "K8S_TOKEN_HOLDER"
token: "{{ K8S_TOKEN.stdout }}"
hash: "{{ K8S_MASTER_CA_HASH.stdout }}"
ipport: "{{ K8S_IP_PORT.stdout }}"
- name:
debug:
msg: "[Master] K8S_TOKEN_HOLDER K8S token is {{ hostvars['K8S_TOKEN_HOLDER']['token'] }}"
- name:
debug:
msg: "[Master] K8S_TOKEN_HOLDER K8S Hash is {{ hostvars['K8S_TOKEN_HOLDER']['hash'] }}"
- name:
debug:
msg: "[Master] K8S_TOKEN_HOLDER K8S IP and port is {{ hostvars['K8S_TOKEN_HOLDER']['ipport'] }}"

View File

@ -0,0 +1,13 @@
---
- hosts: worker-node
name: "Resetting worker nodes"
roles:
- name: run kubeadm reset
role: kubeadm-reset
- hosts: control-plane
name: "Resetting control plane nodes"
roles:
- name: run kubeadm reset
role: kubeadm-reset

View File

@ -0,0 +1,23 @@
---
- hosts: worker-node
name: add the worker
tasks:
- name:
debug:
msg: "[Worker] K8S_TOKEN_HOLDER K8S token is {{ hostvars['K8S_TOKEN_HOLDER']['token'] }}"
- name:
debug:
msg: "[Worker] K8S_TOKEN_HOLDER K8S Hash is {{ hostvars['K8S_TOKEN_HOLDER']['hash'] }}"
- name:
debug:
msg: "[Worker] K8S_TOKEN_HOLDER K8S IP and Port is {{ hostvars['K8S_TOKEN_HOLDER']['ipport'] }}"
- name: "Kubeadm join"
become: yes
shell: >
kubeadm join --token={{ hostvars['K8S_TOKEN_HOLDER']['token'] }}
--discovery-token-ca-cert-hash sha256:{{ hostvars['K8S_TOKEN_HOLDER']['hash'] }}
{{ hostvars['K8S_TOKEN_HOLDER']['ipport'] }}

6
metallb.yaml 100644
View File

@ -0,0 +1,6 @@
---
- hosts: control-plane
name: "Install MetalLB"
roles:
- role: metallb
run_once: True

15
openebs.yaml 100644
View File

@ -0,0 +1,15 @@
---
- hosts: control-plane
name: "Start OpenEBS"
tasks:
- name: Install openEBS
shell: kubectl apply -f https://openebs.github.io/charts/openebs-operator-lite.yaml
- name: Copy conf
become: yes
template:
src: openebs.yaml
dest: /etc/openebs.yaml
- name: Apply Conf
shell: kubectl apply -f /etc/openebs.yaml

3
pre-start.yaml 100644
View File

@ -0,0 +1,3 @@
---
- import_playbook: bootstrap.yaml
- import_playbook: hostname.yaml

6
purelb.yml 100644
View File

@ -0,0 +1,6 @@
---
- hosts: control-plane
name: "Install PureLB"
roles:
- role: purelb
run_once: True

View File

@ -0,0 +1,37 @@
- name: install common packages
become: yes
apt:
pkg:
- apt-transport-https
- curl
- gnupg2
update_cache: yes
- name: disable swap
become: yes
command: swapoff -a
- name: disable swap in fstab
become: yes
replace:
path: /etc/fstab
regexp: '^([^#].*?\sswap\s+sw\s+.*)$'
replace: '# \1'
- name: enable br_netfilter
become: yes
command: modprobe br_netfilter
- name: ensure iptables enabled
become: yes
template:
src: k8s.iptables.conf
dest: /etc/sysctl.d/k8s.iptables.conf
- name: enable port forward
become: yes
sysctl:
name: net.ipv4.ip_forward
value: 1
sysctl_set: yes
reload: yes

View File

@ -0,0 +1,2 @@
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

View File

@ -0,0 +1,68 @@
- name: install container runtime
become: yes
block:
- name: Create a /usr/share/keyrings
ansible.builtin.file:
path: /usr/share/keyrings
state: directory
mode: '0755'
- name: get key libcontainer
get_url:
url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ os }}/Release.key
dest: /usr/share/keyrings/libcontainer.key
- name: get gpg libcontainer
shell: cat /usr/share/keyrings/libcontainer.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-archive-keyring.gpg
args:
chdir: /usr/share/keyrings
creates: /usr/share/keyrings/libcontainers-archive-keyring.gpg
- name: get key crio
get_url:
url: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ version }}/{{ os }}/Release.key
dest: /usr/share/keyrings/crio.key
- name: get gpg crio
shell: cat /usr/share/keyrings/crio.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-crio-archive-keyring.gpg
args:
chdir: /usr/share/keyrings
creates: /usr/share/keyrings/libcontainers-crio-archive-keyring.gpg
- name: libcontainer | apt source
ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/{{ os }}/ /"
state: present
- name: crio | apt source
ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/libcontainers-crio-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/{{ version }}/{{ os }}/ /"
state: present
- name: install container runtime
ansible.builtin.apt:
pkg:
- cri-o
- cri-o-runc
- cri-tools
become: yes
- name: change crio.conf
become: yes
template:
src: etc/crio/crio.conf
dest: /etc/crio/crio.conf
- name: change 100-crio-bridge.conflist
become: yes
template:
src: etc/cni/net.d/100-crio-bridge.conflist.j2
dest: /etc/cni/net.d/100-crio-bridge.conflist
- name: Restart service crio
become: yes
ansible.builtin.systemd:
enabled: true
state: restarted
daemon_reload: true
name: crio

View File

@ -0,0 +1,24 @@
{
"cniVersion": "1.0.0",
"name": "crio",
"plugins": [
{
"type": "bridge",
"bridge": "cni0",
"isGateway": true,
"ipMasq": true,
"hairpinMode": true,
"ipam": {
"type": "host-local",
"routes": [
{ "dst": "0.0.0.0/0" },
{ "dst": "::/0" }
],
"ranges": [
[{ "subnet": "{{ pods_subnet }}" }],
[{ "subnet": "1100:200::/24" }]
]
}
}
]
}

View File

@ -0,0 +1,634 @@
# The CRI-O configuration file specifies all of the available configuration
# options and command-line flags for the crio(8) OCI Kubernetes Container Runtime
# daemon, but in a TOML format that can be more easily modified and versioned.
#
# Please refer to crio.conf(5) for details of all configuration options.
# CRI-O supports partial configuration reload during runtime, which can be
# done by sending SIGHUP to the running process. Currently supported options
# are explicitly mentioned with: 'This option supports live configuration
# reload'.
# CRI-O reads its storage defaults from the containers-storage.conf(5) file
# located at /etc/containers/storage.conf. Modify this storage configuration if
# you want to change the system's defaults. If you want to modify storage just
# for CRI-O, you can change the storage configuration options here.
[crio]
# Path to the "root directory". CRI-O stores all of its data, including
# containers images, in this directory.
# root = "/home/abuild/.local/share/containers/storage"
# Path to the "run directory". CRI-O stores all of its state in this directory.
# runroot = "/tmp/containers-user-399/containers"
# Storage driver used to manage the storage of images and containers. Please
# refer to containers-storage.conf(5) to see all available storage drivers.
# storage_driver = "vfs"
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
# storage_option = [
# ]
# The default log directory where all logs will go unless directly specified by
# the kubelet. The log directory specified must be an absolute directory.
# log_dir = "/var/log/crio/pods"
# Location for CRI-O to lay down the temporary version file.
# It is used to check if crio wipe should wipe containers, which should
# always happen on a node reboot
# version_file = "/var/run/crio/version"
# Location for CRI-O to lay down the persistent version file.
# It is used to check if crio wipe should wipe images, which should
# only happen when CRI-O has been upgraded
# version_file_persist = ""
# InternalWipe is whether CRI-O should wipe containers and images after a reboot when the server starts.
# If set to false, one must use the external command 'crio wipe' to wipe the containers and images in these situations.
# internal_wipe = true
# Location for CRI-O to lay down the clean shutdown file.
# It is used to check whether crio had time to sync before shutting down.
# If not found, crio wipe will clear the storage directory.
# clean_shutdown_file = "/var/lib/crio/clean.shutdown"
# The crio.api table contains settings for the kubelet/gRPC interface.
[crio.api]
# Path to AF_LOCAL socket on which CRI-O will listen.
# listen = "/var/run/crio/crio.sock"
# IP address on which the stream server will listen.
# stream_address = "127.0.0.1"
# The port on which the stream server will listen. If the port is set to "0", then
# CRI-O will allocate a random free port number.
# stream_port = "0"
# Enable encrypted TLS transport of the stream server.
# stream_enable_tls = false
# Length of time until open streams terminate due to lack of activity
# stream_idle_timeout = ""
# Path to the x509 certificate file used to serve the encrypted stream. This
# file can change, and CRI-O will automatically pick up the changes within 5
# minutes.
# stream_tls_cert = ""
# Path to the key file used to serve the encrypted stream. This file can
# change and CRI-O will automatically pick up the changes within 5 minutes.
# stream_tls_key = ""
# Path to the x509 CA(s) file used to verify and authenticate client
# communication with the encrypted stream. This file can change and CRI-O will
# automatically pick up the changes within 5 minutes.
# stream_tls_ca = ""
# Maximum grpc send message size in bytes. If not set or <=0, then CRI-O will default to 80 * 1024 * 1024.
# grpc_max_send_msg_size = 83886080
# Maximum grpc receive message size. If not set or <= 0, then CRI-O will default to 80 * 1024 * 1024.
# grpc_max_recv_msg_size = 83886080
# The crio.runtime table contains settings pertaining to the OCI runtime used
# and options for how to set up and manage the OCI runtime.
[crio.runtime]
# A list of ulimits to be set in containers by default, specified as
# "<ulimit name>=<soft limit>:<hard limit>", for example:
# "nofile=1024:2048"
# If nothing is set here, settings will be inherited from the CRI-O daemon
# default_ulimits = [
# ]
# If true, the runtime will not use pivot_root, but instead use MS_MOVE.
# no_pivot = false
# decryption_keys_path is the path where the keys required for
# image decryption are stored. This option supports live configuration reload.
# decryption_keys_path = "/etc/crio/keys/"
# Path to the conmon binary, used for monitoring the OCI runtime.
# Will be searched for using $PATH if empty.
# This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
# conmon = ""
# Cgroup setting for conmon
# This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorCgroup.
# conmon_cgroup = ""
# Environment variable list for the conmon process, used for passing necessary
# environment variables to conmon or the runtime.
# This option is currently deprecated, and will be replaced with RuntimeHandler.MonitorEnv.
# conmon_env = [
# ]
# Additional environment variables to set for all the
# containers. These are overridden if set in the
# container image spec or in the container runtime configuration.
# default_env = [
# ]
# If true, SELinux will be used for pod separation on the host.
# selinux = false
# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime. If not specified, then the internal default seccomp profile
# will be used. This option supports live configuration reload.
# seccomp_profile = ""
# Changes the meaning of an empty seccomp profile. By default
# (and according to CRI spec), an empty profile means unconfined.
# This option tells CRI-O to treat an empty profile as the default profile,
# which might increase security.
# This option is currently deprecated,
# and will be replaced by the SeccompDefault FeatureGate in Kubernetes.
# seccomp_use_default_when_empty = true
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default". This profile only takes effect if the user
# does not specify a profile via the Kubernetes Pod's metadata annotation. If
# the profile is set to "unconfined", then this equals to disabling AppArmor.
# This option supports live configuration reload.
# apparmor_profile = "crio-default"
# Path to the blockio class configuration file for configuring
# the cgroup blockio controller.
# blockio_config_file = ""
# Used to change irqbalance service config file path which is used for configuring
# irqbalance daemon.
# irqbalance_config_file = "/etc/sysconfig/irqbalance"
# irqbalance_config_restore_file allows to set a cpu mask CRI-O should
# restore as irqbalance config at startup. Set to empty string to disable this flow entirely.
# By default, CRI-O manages the irqbalance configuration to enable dynamic IRQ pinning.
# irqbalance_config_restore_file = "/etc/sysconfig/orig_irq_banned_cpus"
# Path to the RDT configuration file for configuring the resctrl pseudo-filesystem.
# This option supports live configuration reload.
# rdt_config_file = ""
# Cgroup management implementation used for the runtime.
# cgroup_manager = "systemd"
# Specify whether the image pull must be performed in a separate cgroup.
# separate_pull_cgroup = ""
# List of default capabilities for containers. If it is empty or commented out,
# only the capabilities defined in the containers json file by the user/kube
# will be added.
# default_capabilities = [
# "CHOWN",
# "DAC_OVERRIDE",
# "FSETID",
# "FOWNER",
# "SETGID",
# "SETUID",
# "SETPCAP",
# "NET_BIND_SERVICE",
# "KILL",
# ]
# Add capabilities to the inheritable set, as well as the default group of permitted, bounding and effective.
# If capabilities are expected to work for non-root users, this option should be set.
# add_inheritable_capabilities = false
# List of default sysctls. If it is empty or commented out, only the sysctls
# defined in the container json file by the user/kube will be added.
# default_sysctls = [
# ]
# List of devices on the host that a
# user can specify with the "io.kubernetes.cri-o.Devices" allowed annotation.
# allowed_devices = [
# "/dev/fuse",
# ]
# List of additional devices. specified as
# "<device-on-host>:<device-on-container>:<permissions>", for example: "--device=/dev/sdc:/dev/xvdc:rwm".
# If it is empty or commented out, only the devices
# defined in the container json file by the user/kube will be added.
# additional_devices = [
# ]
# List of directories to scan for CDI Spec files.
# cdi_spec_dirs = [
# "/etc/cdi",
# "/var/run/cdi",
# ]
# Change the default behavior of setting container devices uid/gid from CRI's
# SecurityContext (RunAsUser/RunAsGroup) instead of taking host's uid/gid.
# Defaults to false.
# device_ownership_from_security_context = false
# Path to OCI hooks directories for automatically executed hooks. If one of the
# directories does not exist, then CRI-O will automatically skip them.
# hooks_dir = [
# "/usr/share/containers/oci/hooks.d",
# ]
# Path to the file specifying the defaults mounts for each container. The
# format of the config is /SRC:/DST, one mount per line. Notice that CRI-O reads
# its default mounts from the following two files:
#
# 1) /etc/containers/mounts.conf (i.e., default_mounts_file): This is the
# override file, where users can either add in their own default mounts, or
# override the default mounts shipped with the package.
#
# 2) /usr/share/containers/mounts.conf: This is the default file read for
# mounts. If you want CRI-O to read from a different, specific mounts file,
# you can change the default_mounts_file. Note, if this is done, CRI-O will
# only add mounts it finds in this file.
#
# default_mounts_file = ""
# Maximum number of processes allowed in a container.
# This option is deprecated. The Kubelet flag '--pod-pids-limit' should be used instead.
# pids_limit = 0
# Maximum sized allowed for the container log file. Negative numbers indicate
# that no size limit is imposed. If it is positive, it must be >= 8192 to
# match/exceed conmon's read buffer. The file is truncated and re-opened so the
# limit is never exceeded. This option is deprecated. The Kubelet flag '--container-log-max-size' should be used instead.
# log_size_max = -1
# Whether container output should be logged to journald in addition to the kuberentes log file
# log_to_journald = false
# Path to directory in which container exit files are written to by conmon.
# container_exits_dir = "/var/run/crio/exits"
# Path to directory for container attach sockets.
# container_attach_socket_dir = "/var/run/crio"
# The prefix to use for the source of the bind mounts.
# bind_mount_prefix = ""
# If set to true, all containers will run in read-only mode.
# read_only = false
# Changes the verbosity of the logs based on the level it is set to. Options
# are fatal, panic, error, warn, info, debug and trace. This option supports
# live configuration reload.
# log_level = "info"
# Filter the log messages by the provided regular expression.
# This option supports live configuration reload.
# log_filter = ""
# The UID mappings for the user namespace of each container. A range is
# specified in the form containerUID:HostUID:Size. Multiple ranges must be
# separated by comma.
# uid_mappings = ""
# The GID mappings for the user namespace of each container. A range is
# specified in the form containerGID:HostGID:Size. Multiple ranges must be
# separated by comma.
# gid_mappings = ""
# If set, CRI-O will reject any attempt to map host UIDs below this value
# into user namespaces. A negative value indicates that no minimum is set,
# so specifying mappings will only be allowed for pods that run as UID 0.
# minimum_mappable_uid = -1
# If set, CRI-O will reject any attempt to map host GIDs below this value
# into user namespaces. A negative value indicates that no minimum is set,
# so specifying mappings will only be allowed for pods that run as UID 0.
# minimum_mappable_gid = -1
# The minimal amount of time in seconds to wait before issuing a timeout
# regarding the proper termination of the container. The lowest possible
# value is 30s, whereas lower values are not considered by CRI-O.
# ctr_stop_timeout = 30
# drop_infra_ctr determines whether CRI-O drops the infra container
# when a pod does not have a private PID namespace, and does not use
# a kernel separating runtime (like kata).
# It requires manage_ns_lifecycle to be true.
# drop_infra_ctr = true
# infra_ctr_cpuset determines what CPUs will be used to run infra containers.
# You can use linux CPU list format to specify desired CPUs.
# To get better isolation for guaranteed pods, set this parameter to be equal to kubelet reserved-cpus.
# infra_ctr_cpuset = ""
# The directory where the state of the managed namespaces gets tracked.
# Only used when manage_ns_lifecycle is true.
# namespaces_dir = "/var/run"
# pinns_path is the path to find the pinns binary, which is needed to manage namespace lifecycle
# pinns_path = ""
# Globally enable/disable CRIU support which is necessary to
# checkpoint and restore container or pods (even if CRIU is found in $PATH).
# enable_criu_support = false
# Enable/disable the generation of the container,
# sandbox lifecycle events to be sent to the Kubelet to optimize the PLEG
# enable_pod_events = false
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# default_runtime is the _name_ of the OCI runtime to be used as the default.
# The name is matched against the runtimes map below.
# default_runtime = "runc"
# A list of paths that, when absent from the host,
# will cause a container creation to fail (as opposed to the current behavior being created as a directory).
# This option is to protect from source locations whose existence as a directory could jepordize the health of the node, and whose
# creation as a file is not desired either.
# An example is /etc/hostname, which will cause failures on reboot if it's created as a directory, but often doesn't exist because
# the hostname is being managed dynamically.
# absent_mount_sources_to_reject = [
# ]
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime handler provided by the CRI.
# If no runtime handler is provided, the "default_runtime" will be used.
# Each entry in the table should follow the format:
#
# [crio.runtime.runtimes.runtime-handler]
# runtime_path = "/path/to/the/executable"
# runtime_type = "oci"
# runtime_root = "/path/to/the/root"
# monitor_path = "/path/to/container/monitor"
# monitor_cgroup = "/cgroup/path"
# monitor_exec_cgroup = "/cgroup/path"
# monitor_env = []
# privileged_without_host_devices = false
# allowed_annotations = []
# Where:
# - runtime-handler: Name used to identify the runtime.
# - runtime_path (optional, string): Absolute path to the runtime executable in
# the host filesystem. If omitted, the runtime-handler identifier should match
# the runtime executable name, and the runtime executable should be placed
# in $PATH.
# - runtime_type (optional, string): Type of runtime, one of: "oci", "vm". If
# omitted, an "oci" runtime is assumed.
# - runtime_root (optional, string): Root directory for storage of containers
# state.
# - runtime_config_path (optional, string): the path for the runtime configuration
# file. This can only be used with when using the VM runtime_type.
# - privileged_without_host_devices (optional, bool): an option for restricting
# host devices from being passed to privileged containers.
# - allowed_annotations (optional, array of strings): an option for specifying
# a list of experimental annotations that this runtime handler is allowed to process.
# The currently recognized values are:
# "io.kubernetes.cri-o.userns-mode" for configuring a user namespace for the pod.
# "io.kubernetes.cri-o.cgroup2-mount-hierarchy-rw" for mounting cgroups writably when set to "true".
# "io.kubernetes.cri-o.Devices" for configuring devices for the pod.
# "io.kubernetes.cri-o.ShmSize" for configuring the size of /dev/shm.
# "io.kubernetes.cri-o.UnifiedCgroup.$CTR_NAME" for configuring the cgroup v2 unified block for a container.
# "io.containers.trace-syscall" for tracing syscalls via the OCI seccomp BPF hook.
# "io.kubernetes.cri.rdt-class" for setting the RDT class of a container
# - monitor_path (optional, string): The path of the monitor binary. Replaces
# deprecated option "conmon".
# - monitor_cgroup (optional, string): The cgroup the container monitor process will be put in.
# Replaces deprecated option "conmon_cgroup".
# - monitor_exec_cgroup (optional, string): If set to "container", indicates exec probes
# should be moved to the container's cgroup
# - monitor_env (optional, array of strings): Environment variables to pass to the montior.
# Replaces deprecated option "conmon_env".
#
# Using the seccomp notifier feature:
#
# This feature can help you to debug seccomp related issues, for example if
# blocked syscalls (permission denied errors) have negative impact on the workload.
#
# To be able to use this feature, configure a runtime which has the annotation
# "io.kubernetes.cri-o.seccompNotifierAction" in the allowed_annotations array.
#
# It also requires at least runc 1.1.0 or crun 0.19 which support the notifier
# feature.
#
# If everything is setup, CRI-O will modify chosen seccomp profiles for
# containers if the annotation "io.kubernetes.cri-o.seccompNotifierAction" is
# set on the Pod sandbox. CRI-O will then get notified if a container is using
# a blocked syscall and then terminate the workload after a timeout of 5
# seconds if the value of "io.kubernetes.cri-o.seccompNotifierAction=stop".
#
# This also means that multiple syscalls can be captured during that period,
# while the timeout will get reset once a new syscall has been discovered.
#
# This also means that the Pods "restartPolicy" has to be set to "Never",
# otherwise the kubelet will restart the container immediately.
#
# Please be aware that CRI-O is not able to get notified if a syscall gets
# blocked based on the seccomp defaultAction, which is a general runtime
# limitation.
# [crio.runtime.runtimes.runc]
# runtime_path = ""
# runtime_type = "oci"
# runtime_root = "/run/runc"
# runtime_config_path = ""
# monitor_path = ""
# monitor_cgroup = "system.slice"
# monitor_exec_cgroup = ""
# monitor_env = [
# "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
# ]
# allowed_annotations = [
# "io.containers.trace-syscall",
# ]
# privileged_without_host_devices = false
# The workloads table defines ways to customize containers with different resources
# that work based on annotations, rather than the CRI.
# Note, the behavior of this table is EXPERIMENTAL and may change at any time.
# Each workload, has a name, activation_annotation, annotation_prefix and set of resources it supports mutating.
# The currently supported resources are "cpu" (to configure the cpu shares) and "cpuset" to configure the cpuset.
# Each resource can have a default value specified, or be empty.
# For a container to opt-into this workload, the pod should be configured with the annotation $activation_annotation (key only, value is ignored).
# To customize per-container, an annotation of the form $annotation_prefix.$resource/$ctrName = "value" can be specified
# signifying for that resource type to override the default value.
# If the annotation_prefix is not present, every container in the pod will be given the default values.
# Example:
# [crio.runtime.workloads.workload-type]
# activation_annotation = "io.crio/workload"
# annotation_prefix = "io.crio.workload-type"
# [crio.runtime.workloads.workload-type.resources]
# cpuset = 0
# cpushares = "0-1"
# Where:
# The workload name is workload-type.
# To specify, the pod must have the "io.crio.workload" annotation (this is a precise string match).
# This workload supports setting cpuset and cpu resources.
# annotation_prefix is used to customize the different resources.
# To configure the cpu shares a container gets in the example above, the pod would have to have the following annotation:
# "io.crio.workload-type/$container_name = {"cpushares": "value"}"
# hostnetwork_disable_selinux determines whether
# SELinux should be disabled within a pod when it is running in the host network namespace
# Default value is set to true
# hostnetwork_disable_selinux = true
# The crio.image table contains settings pertaining to the management of OCI images.
#
# CRI-O reads its configured registries defaults from the system wide
# containers-registries.conf(5) located in /etc/containers/registries.conf. If
# you want to modify just CRI-O, you can change the registries configuration in
# this file. Otherwise, leave insecure_registries and registries commented out to
# use the system's defaults from /etc/containers/registries.conf.
[crio.image]
# Default transport for pulling images from a remote container storage.
# default_transport = "docker://"
# The path to a file containing credentials necessary for pulling images from
# secure registries. The file is similar to that of /var/lib/kubelet/config.json
# global_auth_file = ""
# The image used to instantiate infra containers.
# This option supports live configuration reload.
# pause_image = "registry.k8s.io/pause:3.6"
# The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json
# This option supports live configuration reload.
# pause_image_auth_file = ""
# The command to run to have a container stay in the paused state.
# When explicitly set to "", it will fallback to the entrypoint and command
# specified in the pause image. When commented out, it will fallback to the
# default: "/pause". This option supports live configuration reload.
# pause_command = "/pause"
# Path to the file which decides what sort of policy we use when deciding
# whether or not to trust an image that we've pulled. It is not recommended that
# this option be used, as the default behavior of using the system-wide default
# policy (i.e., /etc/containers/policy.json) is most often preferred. Please
# refer to containers-policy.json(5) for more details.
# signature_policy = ""
# List of registries to skip TLS verification for pulling images. Please
# consider configuring the registries via /etc/containers/registries.conf before
# changing them here.
# insecure_registries = [
# ]
# Controls how image volumes are handled. The valid values are mkdir, bind and
# ignore; the latter will ignore volumes entirely.
# image_volumes = "mkdir"
# Temporary directory to use for storing big files
# big_files_temporary_dir = ""
# The crio.network table containers settings pertaining to the management of
# CNI plugins.
[crio.network]
# The default CNI network name to be selected. If not set or "", then
# CRI-O will pick-up the first one found in network_dir.
# cni_default_network = ""
# Path to the directory where CNI configuration files are located.
network_dir = "/etc/cni/net.d/"
# Paths to directories where CNI plugin binaries are located.
plugin_dirs = [
"/opt/cni/bin/",
]
# A necessary configuration for Prometheus based metrics retrieval
[crio.metrics]
# Globally enable or disable metrics support.
# enable_metrics = false
# Specify enabled metrics collectors.
# Per default all metrics are enabled.
# It is possible, to prefix the metrics with "container_runtime_" and "crio_".
# For example, the metrics collector "operations" would be treated in the same
# way as "crio_operations" and "container_runtime_crio_operations".
# metrics_collectors = [
# "operations",
# "operations_latency_microseconds_total",
# "operations_latency_microseconds",
# "operations_errors",
# "image_pulls_by_digest",
# "image_pulls_by_name",
# "image_pulls_by_name_skipped",
# "image_pulls_failures",
# "image_pulls_successes",
# "image_pulls_layer_size",
# "image_layer_reuse",
# "containers_oom_total",
# "containers_oom",
# "processes_defunct",
# "operations_total",
# "operations_latency_seconds",
# "operations_latency_seconds_total",
# "operations_errors_total",
# "image_pulls_bytes_total",
# "image_pulls_skipped_bytes_total",
# "image_pulls_failure_total",
# "image_pulls_success_total",
# "image_layer_reuse_total",
# "containers_oom_count_total",
# "containers_seccomp_notifier_count_total",
# ]
# The port on which the metrics server will listen.
# metrics_port = 9090
# Local socket path to bind the metrics server to
# metrics_socket = ""
# The certificate for the secure metrics server.
# If the certificate is not available on disk, then CRI-O will generate a
# self-signed one. CRI-O also watches for changes of this path and reloads the
# certificate on any modification event.
# metrics_cert = ""
# The certificate key for the secure metrics server.
# Behaves in the same way as the metrics_cert.
# metrics_key = ""
# A necessary configuration for OpenTelemetry trace data exporting
[crio.tracing]
# Globally enable or disable exporting OpenTelemetry traces.
# enable_tracing = false
# Address on which the gRPC trace collector listens on.
# tracing_endpoint = "0.0.0.0:4317"
# Number of samples to collect per million spans. Set to 1000000 to always sample.
# tracing_sampling_rate_per_million = 0
# CRI-O NRI configuration.
[crio.nri]
# Globally enable or disable NRI.
# enable_nri = false
# NRI socket to listen on.
# nri_listen = "/var/run/nri/nri.sock"
# NRI plugin directory to use.
# nri_plugin_dir = "/opt/nri/plugins"
# NRI plugin configuration directory to use.
# nri_plugin_config_dir = "/etc/nri/conf.d"
# Disable connections from externally launched NRI plugins.
# nri_disable_connections = false
# Timeout for a plugin to register itself with NRI.
# nri_plugin_registration_timeout = "5s"
# Timeout for a plugin to handle an NRI request.
# nri_plugin_request_timeout = "2s"
# Necessary information pertaining to container and pod stats reporting.
[crio.stats]
# The number of seconds between collecting pod and container stats.
# If set to 0, the stats are collected on-demand instead.
# stats_collection_period = 0

View File

@ -0,0 +1,26 @@
- name: create /etc/gitea
become: yes
file:
path: /etc/gitea
state: directory
- name: copy 010-deployment
become: yes
template:
src: 010-deployment.yaml
dest: /etc/gitea/010-deployment.yaml
- name: copy 020-volumes
become: yes
template:
src: 020-volumes.yaml
dest: /etc/gitea/020-volumes.yaml
- name: copy 030-network
become: yes
template:
src: 030-network.yaml.j2
dest: /etc/gitea/030-network.yaml
- name: Apply gitea config
command: kubectl apply -f /etc/gitea/

View File

@ -0,0 +1,44 @@
###################################################
# Namespace Gitea
###################################################
apiVersion: v1
kind: Namespace
metadata:
name: gitea-repo
###############################
# Deplyoment Gitea
###############################
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: gitea-repo
labels:
app: gitea
spec:
replicas: 3
selector:
matchLabels:
app: gitea
template:
metadata:
labels:
app: gitea
spec:
containers:
- name: gitea
image: gitea/gitea:1.13.2
ports:
- containerPort: 3000
name: gitea
- containerPort: 22
name: git-ssh
volumeMounts:
- mountPath: /data
name: git-data
volumes:
- name: git-data
persistentVolumeClaim:
claimName: git-pvc

View File

@ -0,0 +1,39 @@
---
###################################################
# Persistence Volume Claim
###################################################
kind: PersistentVolume
apiVersion: v1
metadata:
name: git-pv
namespace: gitea-repo
spec:
capacity:
storage: 5Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
claimRef:
namespace: gitea-repo
name: git-pvc
csi:
driver: driver.longhorn.io
fsType: ext4
volumeHandle: git-data
storageClassName: longhorn-durable
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: git-pvc
namespace: gitea-repo
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn-durable
resources:
requests:
storage: 5Gi
volumeName: "git-pv"

View File

@ -0,0 +1,40 @@
###############################
# Service
###############################
---
kind: Service
apiVersion: v1
metadata:
name: gitea-service
namespace: gitea-repo
spec:
selector:
app: gitea
ports:
- name: gitea-http
port: 3000
- name: gitea-ssh
port: 22
---
###################################################
# Ingress
###################################################
kind: Ingress
apiVersion: networking.k8s.io/v1
metadata:
name: git
namespace: gitea-repo
spec:
rules:
- host: {{ gitea.fqdn }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gitea-service
port:
number: 3000

View File

@ -0,0 +1,29 @@
- name: install helm repo
become: yes
block:
- name: Create a /usr/share/keyrings
ansible.builtin.file:
path: /usr/share/keyrings
state: directory
mode: '0755'
- name: get asc helm
get_url:
url: https://baltocdn.com/helm/signing.asc
dest: /usr/share/keyrings/helm.asc
- name: get gpg helm
shell: cat /usr/share/keyrings/helm.asc | gpg --dearmor -o /usr/share/keyrings/helm.gpg
args:
chdir: /usr/share/keyrings
creates: /usr/share/keyrings/helm.gpg
- name: helm | apt source
ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main"
state: present
- name: install helm
ansible.builtin.apt:
pkg: helm
become: yes

View File

@ -0,0 +1,8 @@
- name: copy ingress config
become: yes
template:
src: ingress-config.yaml.j2
dest: /etc/ingress-config.yaml
- name: Apply Ingress config
command: kubectl apply -f /etc/ingress-config.yaml

View File

@ -0,0 +1,36 @@
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: nginx-ingress-controller
spec:
selector:
matchLabels:
component: ingress-controller
template:
metadata:
labels:
component: ingress-controller
spec:
restartPolicy: Always
hostNetwork: true
containers:
- name: nginx-ingress-lb
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0
ports:
- name: http
hostPort: 80
containerPort: 80
protocol: TCP
- name: https
hostPort: 443
containerPort: 443
protocol: TCP
env:
- name: {{ pod_namespace }}
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
args:
- /nginx-ingress-controller
- '--default-backend-service={{ pod_namespace }}/default-http-backend'

View File

@ -0,0 +1,3 @@
- name: run kubeadm reset
become: yes
command: kubeadm reset -f

View File

@ -0,0 +1,26 @@
- name: install kubernetes packages
become: yes
block:
- name: Create a /usr/share/keyrings
ansible.builtin.file:
path: /usr/share/keyrings
state: directory
mode: '0755'
- name: get gpg google
get_url:
url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
dest: /usr/share/keyrings/kubernetes-archive-keyring.gpg
- name: kubernetes | apt source
ansible.builtin.apt_repository:
repo: "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main"
state: present
- name: install container runtime
ansible.builtin.apt:
pkg:
- kubelet
- kubeadm
- kubectl
become: yes

View File

@ -0,0 +1,11 @@
- name: copy MetalLB config
become: yes
template:
src: metallb-config.yaml
dest: /etc/metallb-config.yaml
- name: install MetalLB
command: kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.9/config/manifests/metallb-native.yaml
- name: install MetalLB config
command: kubectl apply -f /etc/metallb-config.yaml

View File

@ -0,0 +1,15 @@
---
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: nat
namespace: metallb-system
spec:
addresses:
- "{{ metalrange }}"
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: empty
namespace: metallb-system

View File

@ -0,0 +1,13 @@
- name: copy PureLB config
become: yes
template:
src: purelb-config.yaml.j2
dest: /etc/purelb-config.yaml
- name: install PureLB
command: kubectl apply -f https://gitlab.com/api/v4/projects/purelb%2Fpurelb/packages/generic/manifest/0.0.1/purelb-complete.yaml
retries: 2
delays: 5
- name: install PureLB config
command: kubectl apply -f /etc/purelb-config.yaml

View File

@ -0,0 +1,11 @@
apiVersion: purelb.io/v1
kind: ServiceGroup
metadata:
name: default
namespace: purelb
spec:
local:
v4pools:
- subnet: '{{ lbsubnet }}'
pool: '{{ lbpool }}'
aggregation: /25

5
start.yaml 100644
View File

@ -0,0 +1,5 @@
---
- import_playbook: initialize-kubernetes.yaml
- import_playbook: kubernetes-worker.yaml
#- import_playbook: openebs.yaml
#- import_playbook: purelb.yaml

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: openebs-hostpath
annotations:
storageclass.kubernetes.io/is-default-class: "true"
openebs.io/cas-type: local
cas.openebs.io/config: |
- name: StorageType
value: "hostpath"
- name: BasePath
value: "/var/openebs/local/"
provisioner: openebs.io/local
volumeBindingMode: WaitForFirstConsumer
reclaimPolicy: Delete