娃哈哈好喝-真的!
技术够用就行,吃好喝好睡好!

centos7使用kubeadm方式安装k8s

1、环境介绍

k8s版本:1.23.6

docker:20.10.24

host ip
K8s-master01 172.10.10.10
K8s-node01 172.10.10.11
K8s-node02 172.10.10.12
vip 172.10.10.9
2、升级内核(所有节点)

安装ELRepo的yum源

yum -y install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm

安装最新内核

yum -y --enablerepo=elrepo-kernel  install kernel-lt

修改内核启动顺序

# 先查看内核默认的启动顺序
[root@k8s-master01 ~]# awk -F\' '$1=="menuentry " {print i++ " : " $2}' /boot/grub2/grub.cfg
# 设置内核默认启动顺序
[root@k8s-master01 ~]# grub2-set-default 0
# 编辑/etc/default/grub文件
[root@k8s-master01 ~]# vim /etc/default/grub
#修改GRUB_DEFAULT=saved,将saved改为0
GRUB_DEFAULT=0
# 生成新的grub文件
[root@k8s-master01 ~]# grub2-mkconfig -o /boot/grub2/grub.cfg
# 重启
[root@k8s-master01 ~]# reboot
3、修改系统配置

修改主机名,根据实际情况修改每台的主机名

[root@k8s-master01 ~]# hostnamectl set-hostname k8s-master01

将主机名和对应ip写入/etc/hosts

172.10.10.10 k8s-master01
172.10.10.11 k8s-node01
172.10.10.12 k8s-node02

关闭firewalld、selinux、swap

# 关闭firewalld
[root@k8s-master01 ~]# systemctl stop firewalld.service 
[root@k8s-master01 ~]# systemctl disable firewalld.service 
# 临时关闭swap
[root@k8s-master01 ~]# swapoff -a 
# 以下为永久关闭
[root@k8s-master01 ~]# cp /etc/fstab /etc/fstab_bak 
[root@k8s-master01 ~]# cat /etc/fstab_bak |grep -v swap > /etc/fstab
[root@k8s-master01 ~]# cat /etc/fstab
# 临时关闭selinux
[root@k8s-master01 ~]# setenforce 0
# 永久关闭selinux
[root@k8s-master01 ~]# sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config

升级系统软件包

# 时间有点久
[root@k8s-master01 ~]# yum upgrade

安装ipvs

[root@k8s-master01 ~]# yum -y install ipvsadm ipset sysstat conntrack libseccomp

调整内核参数

cat <<EOF >  /etc/sysctl.d/k8s.conf  
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0 
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1
net.ipv4.tcp_max_tw_buckets=6000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.netfilter.nf_conntrack_max=2310720
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
net.core.netdev_max_backlog=16384
net.core.rmem_max = 16777216 
net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 8096 
net.core.somaxconn = 32768 
fs.inotify.max_user_instances=8192 
fs.inotify.max_user_watches=524288 
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0 
vm.overcommit_memory=1 
vm.panic_on_oom=0 
vm.max_map_count = 262144
EOF

内核参数解释

net.ipv4.tcp_keepalive_time=600 #此参数表示TCP发送keepalive探测消息的间隔时间(秒)
net.ipv4.tcp_keepalive_intvl=30 #tcp检查间隔时间(keepalive探测包的发送间隔)
net.ipv4.tcp_keepalive_probes=10  #tcp检查次数(如果对方不予应答,探测包的发送次数)
net.ipv6.conf.all.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv6.conf.default.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv6.conf.lo.disable_ipv6=1 #禁用IPv6,修为0为启用IPv6
net.ipv4.neigh.default.gc_stale_time=120 #ARP缓存条目超时
net.ipv4.conf.all.rp_filter=0  #默认为1,系统会严格校验数据包的反向路径,可能导致丢包
net.ipv4.conf.default.rp_filter=0 #不开启源地址校验
net.ipv4.conf.default.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.lo.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.all.arp_announce=2 #始终使用与目的IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.ip_local_port_range= 45001 65000 # 定义网络连接可用作其源(本地)端口的最小和最大端口的限制,同时适用于TCP和UDP连接。
net.ipv4.ip_forward=1 # 其值为0,说明禁止进行IP转发;如果是1,则说明IP转发功能已经打开。
net.ipv4.tcp_max_tw_buckets=6000 #配置服务器 TIME_WAIT 数量
net.ipv4.tcp_syncookies=1 #此参数应该设置为1,防止SYN Flood
net.ipv4.tcp_synack_retries=2 #表示回应第二个握手包(SYN+ACK包)给客户端IP后,如果收不到第三次握手包(ACK包),进行重试的次数(默认为5)
net.bridge.bridge-nf-call-ip6tables=1 # 是否在ip6tables链中过滤IPv6包
net.bridge.bridge-nf-call-iptables=1 # 二层的网桥在转发包时也会被iptables的FORWARD规则所过滤,这样有时会出现L3层的iptables rules去过滤L2的帧的问题
net.netfilter.nf_conntrack_max=2310720 #连接跟踪表的大小,建议根据内存计算该值CONNTRACK_MAX = RAMSIZE (in bytes) / 16384 / (x / 32),并满足nf_conntrack_max=4*nf_conntrack_buckets,默认262144

net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536

#gc_thresh3 是表大小的绝对限制
#gc_thresh2 设置为等于系统的最大预期邻居条目数的值
#在这种情况下,gc_thresh3 应该设置为一个比 gc_thresh2 值高的值,例如,比 gc_thresh2 高 25%-50%,将其视为浪涌容量。
#gc_thresh1 提高到较大的值;此设置的作用是,如果表包含的条目少于 gc_thresh1,内核将永远不会删除(超时)过时的条目。
net.core.netdev_max_backlog=16384 # 每CPU网络设备积压队列长度
net.core.rmem_max = 16777216 # 所有协议类型读写的缓存区大小
net.core.wmem_max = 16777216 # 最大的TCP数据发送窗口大小
net.ipv4.tcp_max_syn_backlog = 8096 # 第一个积压队列长度
net.core.somaxconn = 32768 # 第二个积压队列长度
fs.inotify.max_user_instances=8192 # 表示每一个real user ID可创建的inotify instatnces的数量上限,默认128.
fs.inotify.max_user_watches=524288 # 同一用户同时可以添加的watch数目,默认8192。
fs.file-max=52706963 # 文件描述符的最大值
fs.nr_open=52706963 #设置最大微博号打开数
kernel.pid_max = 4194303 #最大进程数
net.bridge.bridge-nf-call-arptables=1 #是否在arptables的FORWARD中过滤网桥的ARP包
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
vm.max_map_count = 262144

加载模块

# 临时加载
[root@k8s-master01 ~]# modprobe overlay
[root@k8s-master01 ~]# modprobe br_netfilter
# 永久加载
[root@k8s-master01 ~]# cat > /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
ip_conntrack
EOF
# 设置开机启动
[root@k8s-master01 ~]# systemctl enable --now systemd-modules-load.service
4、安装docker

先卸载docker

# 卸载旧版 docker
[root@k8s-master01 ~]# docker stop `docker ps -a -q`
[root@k8s-master01 ~]# docker rm `docker ps -a -q`
[root@k8s-master01 ~]# docker rmi -f `docker images -a -q` //这里将会强制删除
# 移除旧版本的软件信息
[root@k8s-master01 ~]# yum -y remove docker docker-common container-selinux

安装docker

# 设置yum源
[root@k8s-master01 ~]# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# 安装docker-20.10.*版本,当前版本的k8s最高支持到此版本
[root@k8s-master01 ~]# yum -y install docker-ce-20.10.24 docker-ce-cli-20.10.24 docker-ce-rootless-extras-20.10.24
# 启动并设置开机启动
[root@k8s-master01 ~]# systemctl start docker
[root@k8s-master01 ~]# systemctl enable docker
# 修改docker配置文件/etc/docker/daemon.json
[root@k8s-master01 ~]# vim /etc/docker/daemon.json
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {"max-size":"200m", "max-file":"3"}
}
# 修改完重启docker
[root@k8s-master01 ~]# systemctl restart docker
5、安装负载均衡器

只在master节点上安装

[root@k8s-master01 ~]# yum -y install haproxy keepalived

keepalive安装完之后每个master节点都需要配置

# k8s-master01
[root@k8s-master01 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
    router_id LVS_DEVEL
}

vrrp_script check-haproxy {
    script "killall -0 haproxy"
    interval 5
    weight -30
}

vrrp_instance VI-kube-master {
    state MASTER    #MASTER或BACKUP
    priority 100    #权限
    dont_track_primary
    interface ens192    #网卡地址名称
    virtual_router_id 68
    advert_int 3
    track_script {
        check-haproxy
    }
    virtual_ipaddress {
        172.10.10.9 #vip
    }
}

haproxy配置,同样都要在master节点配置

[root@k8s-master01 ~]# cat /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2

    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443
    option               tcplog
    default_backend      kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server  k8s-master1 172.10.10.10:6443 check
    #只有一个master所以只写一个
    #server  k8s-master2 192.168.8.11:6443 check
    #server  k8s-master3 192.168.8.12:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats

启动keepalive和haproxy

[root@k8s-master01 ~]# systemctl daemon-reload
[root@k8s-master01 ~]# systemctl enable --now haproxy
[root@k8s-master01 ~]# systemctl enable --now keepalived
6、安装kubeadm、kubectl和kubelet

master节点需要安装:

kubeadm、kubelet、kubectl

node节点需要安装

kubeadm、kubelet

主意版本问题,如果是较新版本可以去官网查看设置yum源,如果官网找不到的版本需要使用三方源,这里使用阿里云的源

[root@k8s-master01 ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
# 结果
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube* 
EOF

安装指定版本的kubelet,kubeadm,kubectl

[root@k8s-master01 ~]# yum -y install kubelet-1.23.6 kubeadm-1.23.6 kubectl-1.23.6 --disableexcludes=Kubernetes

安装完成之后初始化集群,只在一个主节点操作

[root@k8s-master01 ~]# kubeadm init \
        --kubernetes-version v1.23.6 \
        --image-repository registry.aliyuncs.com/google_containers \
        --service-cidr=10.96.0.0/12 \
        --pod-network-cidr=10.244.0.0/16 \
        --control-plane-endpoint=172.10.10.9:16443 \
        --upload-certs \
        --v=5

如果出现下面的信息说明初始化成功

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 172.10.10.9:16443 --token 5om5p3.svbtbhdoonij7p7d \
        --discovery-token-ca-cert-hash sha256:c17bb1e268e827b49dba86524e23d31e2dbb388f3a1ad78dcaa4af57f60a4d28 \
        --control-plane --certificate-key 24dfa2ab5dd9a9c2b0df1d233d6d03f79b359d91c836dad4fb75957ff9327336

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.10.10.9:16443 --token 5om5p3.svbtbhdoonij7p7d \
        --discovery-token-ca-cert-hash sha256:c17bb1e268e827b49dba86524e23d31e2dbb388f3a1ad78dcaa4af57f60a4d28

接下来配置一下kubectl

# 如果是root用户不需要使用sudo
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

根据上面提示添加节点用这个命令

kubeadm join 172.10.10.9:16443 --token 5om5p3.svbtbhdoonij7p7d \
        --discovery-token-ca-cert-hash sha256:c17bb1e268e827b49dba86524e23d31e2dbb388f3a1ad78dcaa4af57f60a4d28 \
        --control-plane --certificate-key 24dfa2ab5dd9a9c2b0df1d233d6d03f79b359d91c836dad4fb75957ff9327336

加入节点的token是24小时有效,如果过期了可以使用命令重新生成

# 重新生成加入节点token
[root@k8s-master01 ~]# kubeadm token create --print-join-command
# 查看token
[root@k8s-master01 ~]# kubeadm token list

节点加入之后可以查看节点的状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS   ROLES                  AGE     VERSION
k8s-master01   Ready    control-plane,master   2d1h    v1.23.6
k8s-node01     Ready    <none>                 5h11m   v1.23.6
k8s-node02     Ready    <none>                 2d      v1.23.6
[root@k8s-master01 ~]# 
7、安装网络模块

这里使用的是calico网络组件,首先下载安装文件

[root@k8s-master01 ~]# wget https://docs.projectcalico.org/manifests/calico.yaml
# 修改CALICO_IPV4POOL_CIDR配置项的ip段,改成和k8s初始化时用的--pod-network-cidr网段相同
[root@k8s-master01 ~]# vim calico.yaml
# The IP Pool CIDR for this installation
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"

修改完保存退出开始安装网络组件

[root@k8s-master01 ~]# kubectl apply -f calico.yaml
# 查看系统pods的状态
[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME                                       READY   STATUS    RESTARTS        AGE
calico-kube-controllers-64cc74d646-bsqzc   1/1     Running   0               5h57m
calico-node-6fxrp                          1/1     Running   3 (107m ago)    5h42m
calico-node-t5k2r                          1/1     Running   0               5h57m
calico-node-vlrpx                          1/1     Running   0               5h57m
coredns-6d8c4cb4d-n9xvk                    1/1     Running   0               2d1h
coredns-6d8c4cb4d-rpqhc                    1/1     Running   0               2d1h
etcd-k8s-master01                          1/1     Running   26              2d1h
etcd-k8s-node01                            1/1     Running   5 (107m ago)    5h42m
etcd-k8s-node02                            1/1     Running   3 (7h24m ago)   2d1h
kube-apiserver-k8s-master01                1/1     Running   34              2d1h
kube-apiserver-k8s-node01                  1/1     Running   6 (107m ago)    5h42m
kube-apiserver-k8s-node02                  1/1     Running   2 (7h24m ago)   2d1h
kube-controller-manager-k8s-master01       1/1     Running   5 (8h ago)      2d1h
kube-controller-manager-k8s-node01         1/1     Running   6 (107m ago)    5h42m
kube-controller-manager-k8s-node02         1/1     Running   3 (7h24m ago)   2d1h
kube-proxy-287qf                           1/1     Running   3 (107m ago)    5h42m
kube-proxy-4ng2v                           1/1     Running   0               2d1h
kube-proxy-wl6k4                           1/1     Running   3 (7h24m ago)   2d1h
kube-scheduler-k8s-master01                1/1     Running   5 (8h ago)      2d1h
kube-scheduler-k8s-node01                  1/1     Running   5 (107m ago)    5h42m
kube-scheduler-k8s-node02                  1/1     Running   2 (7h24m ago)   2d1h
[root@k8s-master01 ~]#

不出意外的话会出现上面这些信息,表明各组件都已运行

赞(0)
未经允许不得转载:娃哈哈好喝 » centos7使用kubeadm方式安装k8s
分享到: 更多 (0)

相关推荐

  • 暂无文章