Administrator
发布于 2025-04-02 / 9 阅读
0
0

【kubernetes】基于KeepAlived和haproxy搭建kubernetes集群(1.26.1)使用containerd

# 基于KeepAlived和haproxy搭建kubernetes集群(1.26.1)使用containerd

## 一、组件选择

### 基础组件
| 组件         | 版本       | 部署方式                                                       |
| ------------ | ---------- | -------------------------------------------------------------|
|Virtual System|CentOS 7.6         |系统|
|Kubernetes    |1.26.1             |集群|  
|Etcd          |3.4.23             |集群|
|Calico        |3.25.0             |容器|
|Coredns       |                   |容器|
|Traefik       |                   |容器|
|containerd    |1.6.14             |系统|
|keepalived    |2.2.7              |集群|
|haproxy       |2.7.1              |集群|
## 二、部署架构

| 角色|IP地址| 配置|组件|
|------------|-------------------|-----------|---------------|
| Master_1     |10.62.130.67| 2vCPU 4GiB 80GB | 1、etcd<br/>2、kube-apiserver<br/>3、kube-controller-manager<br/>4、kube-scheduler<br/>5、kubelet<br/>6、kube-proxy<br/>7、containerd |
| Master_2     |10.62.130.68| 2vCPU 4GiB 80GB | 1、etcd<br/>2、kube-apiserver<br/>3、kube-controller-manager<br/>4、kube-scheduler<br/>5、kubelet<br/>6、kube-proxy<br/>7、containerd |
| Master_3     |10.62.130.69| 2vCPU 4GiB 80GB | 1、etcd<br/>2、kube-apiserver<br/>3、kube-controller-manager<br/>4、kube-scheduler<br/>5、kubelet<br/>6、kube-proxy<br/>7、containerd |
| node_1       |10.62.130.70| 4vCPU 16GiB 150GB | 1、kubelet<br/>2、kube-proxy<br/>3、containerd |
| node_2       |10.62.130.71| 4vCPU 16GiB 150GB | 1、kubelet<br/>2、kube-proxy<br/>3、containerd |
| node_3       |10.62.130.72| 4vCPU 16GiB 150GB | 1、kubelet<br/>2、kube-proxy<br/>3、containerd |
| 数据库服务器  |10.62.130.73| 4vCPU 16GiB 200GB  |1、nginx<br />2、Mysql<br />3、Redis|
## 三、服务器设置
添加节点名称(所有节点,解析名称一定要与集群名称一致)

```sh
[root@master ~]# cat >> /etc/hosts <<EOF
>10.62.130.67 k8s-master1
>10.62.130.68 k8s-master2
>10.62.130.69 k8s-master3
>10.62.130.70 k8s-node1
>10.62.130.71 k8s-node2
>10.62.130.73 k8s-node3
>EOF
```
关闭Selinux(所有节点)

```sh
[root@master ~]# setenforce 0
[root@master ~]# sed -i '/^SELINUX=/c SELINUX=disabled' /etc/selinux/config
```
关闭防火墙(所有节点)

```sh
[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
```
关闭swap(所有节点,如不关闭swap需给kubelet配置文件添加fail-swap-on=false参数)

```sh
[root@master ~]# swapoff -a
[root@master ~]# echo vm.swappiness=0 >> /etc/sysctl.conf
[root@master ~]# sed -i 's/.*swap.*/#&/' /etc/fstab
##注释swap开机自动挂载
  #/dev/mapper/centos-swap swap                    swap    defaults        0 0
```
开启ipvs(所有节点,ipvs在效率上优于iptables,特别是针对大规模节点集群)
```sh
[root@master ~]# yum install -y ipvsadm ipset sysstat conntrack libseccomp 
[root@master ~]# cat > /etc/modules-load.d/ipvs.conf <<EOF
>ip_vs
>ip_vs_lc
>ip_vs_wlc
>ip_vs_rr
>ip_vs_wrr
>ip_vs_lblc
>ip_vs_lblcr
>ip_vs_dh
>ip_vs_sh
>ip_vs_fo   #内核低于4.19删除该参数
>ip_vs_nq
>ip_vs_sed
>ip_vs_ftp
>ip_vs_sh
>nf_conntrack #内核低于4.19改为nf_conntrack_ipv4
>nf_net #内核低于4.19删除该参数
>ip_tables
>ip_set
>xt_set
>ipt_set
>ipt_rpfilter
>ipt_REJECT
>ipip
EOF
[root@master ~]# systemctl enable --now systemd-modules-load.service
```
将桥接的IPv4流量传递到iptables的链(所有节点)

```sh
[root@master ~]# cat >> /etc/sysctl.d/k8s.conf << EOF
>net.bridge.bridge-nf-call-ip6tables = 1
>net.bridge.bridge-nf-call-iptables = 1
>net.ipv4.ip_forward = 1
EOF
[root@master ~]# sysctl --system
```
内核参数优化(所有节点)

```sh
[root@master ~]# cat >> /etc/sysctl.conf << EOF    
# max-file 表示系统级别的能够打开的文件句柄的数量, 一般如果遇到文件句柄达到上限时,会碰到
>fs.file-max=6553500
# 配置arp cache 大小
>net.ipv4.neigh.default.gc_thresh1=1024
>net.ipv4.neigh.default.gc_thresh2=4096
>net.ipv4.neigh.default.gc_thresh3=8192
# 允许的最大跟踪连接条目,是在内核内存中netfilter可以同时处理的“任务”(连接跟踪条目)
>net.netfilter.nf_conntrack_max=10485760
>net.netfilter.nf_conntrack_tcp_timeout_established=300
# 哈希表大小(只读)(64位系统、8G内存默认 65536,16G翻倍,如此类推)
>net.netfilter.nf_conntrack_buckets=1310720
# 每个网络接口接收数据包的速率比内核处理这些包的速率快时,允许送到队列的数据包的最大数目。
>net.core.netdev_max_backlog=262144
# 默认值: 128 指定了每一个real user ID可创建的inotify instatnces的数量上限
>fs.inotify.max_user_instances=209715
# 默认值: 8192 指定了每个inotify instance相关联的watches的上限
>fs.inotify.max_user_watches=209715
>net.ipv4.tcp_fin_timeout = 30
>net.ipv4.tcp_keepalive_time = 60
>net.ipv4.tcp_keepalive_intvl = 30
>net.ipv4.tcp_keepalive_probes = 5
>net.ipv4.tcp_timestamps = 0
>net.ipv4.tcp_tw_reuse = 1 #表示开启重用。允许将TIME-WAIT sockets重新用于新的TCP连接,默认为0,表示关闭;
>net.ipv4.tcp_tw_recycle = 1 #表示开启TCP连接中TIME-WAIT sockets的快速回收,默认为0,表示关闭。
>net.core.somaxconn=65535
EOF
[root@master ~]# sysctl --system
```
ulimit参数修改(所有节点)

```sh
[root@master ~]# vi /etc/security/limits.conf 
#修改最后的数值为655350
root soft nofile 655350
root hard nofile 655350
* soft nofile 655350
* hard nofile 655350
```

禁用Transparent Huge Pages(redis节点)

```sh
[root@master ~]# echo never > /sys/kernel/mm/transparent_hugepage/enabled
[root@master ~]# vi /etc/rc.local
##新增条目
echo never> /sys/kernel/mm/transparent_hugepage/enabled
```
## 四、容器部署(基于containerd)

创建目录(按照实际情况创建),部署过程需要涉及到源码安装,请先安装gcc编译器(所有节点)

```sh
[root@master ~]# mkdir -p /u01/install #存放安装包目录
[root@master ~]# mkdir -p /u01/ssl/  #存放书目录
```
### 安装containerd
上传containerd离线压缩包,解压并创建应用程序 (所有节点)
```sh
[root@master install]# tar -zxvf containerd-1.6.14-linux-amd64.tar.gz
[root@master install]# cp /u01/install/bin/containerd /usr/local/bin/
[root@master install]# cp /u01/install/bin/ctr /usr/local/bin/
[root@master install]# mkdir -p /etc/containerd
[root@master install]# vi /etc/systemd/system/containerd.service 
######内容如下######
# Copyright The containerd Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target

[Service]
#uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration
#Environment="ENABLE_CRI_SANDBOXES=sandboxed"
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd

Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
# Comment TasksMax if your systemd version does not supports it.
# Only systemd 226 and above support this version.
TasksMax=infinity
OOMScoreAdjust=-999

[Install]
WantedBy=multi-user.target
###################
:wq #保存退出
[root@master install]#systemctl daemon-reload
[root@master install]#systemctl enable --now containerd
```
新增默认配置文件,并修改cgroup,sandbox镜像修改为registry.aliyuncs.com/google_containers/pause:3.8
```sh
[root@master install]# containerd config default > /etc/containerd/config.toml
[root@master install]# sed -i 's/SystemdCgroup\ =\ false/SystemdCgroup\ =\ true/g' /etc/containerd/config.toml
[root@master install]# sed -i 's/sandbox_image\ =.*/sandbox_image\ =\ "registry.aliyuncs.com\/google_containers\/pause:3.8"/g' /etc/containerd/config.toml|grep sandbox_image
[root@master install]# systemctl daemon-reload 
[root@master install]# systemctl restart containerd
验证版本
[root@master install]# ctr version
```
### 安装runc(容器运行标准的api)
上传runc
```sh
[root@master install]# install -m 755 /u01/install/runc.amd64 /usr/local/sbin/runc
验证版本
[root@master install]# runc -v
```
### 安装CNI(容器网络接口)
上传CNI离线压缩包
```sh
[root@master install]# tar -xvf cni-plugins-linux-amd64-v1.1.1.tgz -C /u01/cni/bin/
[root@master install]# cp /u01/cni/bin/* /usr/local/bin/
```
### 安装crictl(kubernetes管理容器API)
上传CNI离线压缩包
```sh
[root@master install]# tar -zxvf crictl-v1.26.0-linux-amd64.tar.gz
[root@master install]# cp crictl /usr/local/bin/
验证版本
[root@master install]# crictl -v 
[root@master install]# vi /etc/crictl.yaml
######内容如下######
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
###################
:wq #保存退出
```
### 安装nerdctl(优雅地使用容器命令)
```sh
[root@master install]# tar -zxvf nerdctl-1.1.0-linux-amd64.tar.gz
[root@master install]# cp nerdctl /usr/local/bin/
验证版本
[root@master install]# nerdctl -v
[root@master install]# nerdctl -n k8s.io ps
```
## 五、数据库部署
创建目录(按照实际情况创建),部署过程需要涉及到源码安装,请先安装gcc编译器(所有节点)

etcd集群部署(Master节点)

etcd集群证书创建(主节点)
```sh
[root@master ~]# mkdir -p /u01/ssl/etcd_ssl  #存放etcd证书目录
上传cfssl证书工具并赋权
[root@master ~]# chmod u+x /usr/local/bin/cf*
```
创建etcd CA认证中心(主节点)
```sh
[root@master etcd_ssl]# cd /u01/ssl/etcd_ssl
[root@master etcd_ssl]# vi etcd-root-ca-csr.json
######内容如下######
{
  "CN": "etcd-root-ca",
  "key": {
    "algo": "rsa",
    "size": 4096
  },
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "etcd",
      "OU": "etcd Security"
    }
  ]
}
###################
:wq #保存退出
```
创建CA认证中心定义文件(主节点)
```sh
[root@master etcd_ssl]# vi etcd-gencert.json
#######内容如下#####
{
    "signing":{
        "default":{
            "expiry":"876000h"
        },
        "profiles":{
            "etcd":{
                "usages":[
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ],
                "expiry":"876000h"
            }
        }
    }
}
###############
:wq #保存退出
```
创建etcd证书签名请求(主节点)
```sh
[root@master etcd_ssl]# vi etcd-csr.json
#######内容如下(IP地址改为K8S集群地址)#####
{
  "CN": "etcd",
  "key": {
    "algo": "rsa",
    "size": 4096
  },
  "hosts": [
    "127.0.0.1",
    "10.62.130.67",
    "10.62.130.68",
    "10.62.130.69"
  ],
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "etcd",
      "OU": "etcd Security"
    }
  ]
}
###############
:wq #保存退出
```
初始化CA认证中心(主节点)
```sh
[root@master etcd_ssl]# /usr/local/bin/cfssl gencert --initca=true etcd-root-ca-csr.json | cfssljson --bare etcd-root-ca
```
生成etcd证书(主节点)
```sh
[root@master etcd_ssl]# /usr/local/bin/cfssl gencert --ca etcd-root-ca.pem --ca-key etcd-root-ca-key.pem --config etcd-gencert.json --profile=etcd etcd-csr.json | cfssljson --bare etcd
```
分发证书(主节点)
```sh
[root@master etcd_ssl]# scp -r /u01/ssl/etcd_ssl root@10.62.130.68:/u01/ssl/
[root@master etcd_ssl]# scp -r /u01/ssl/etcd_ssl root@10.62.130.69:/u01/ssl/
```
安装etcd(所有节点)
```sh
[root@master ~]# cd  /u01/install
[root@master install]# tar -zxvf etcd-v3.4.23-linux-amd64.tar.gz
[root@master install]# cp etcd-v3.4.23-linux-amd64/etcd* /usr/local/bin
验证版本
[root@master install]# etcdctl version
```
创建etcd用户和组(所有节点)
```sh
[root@master ~]# groupadd etcd
[root@master ~]# useradd -c "Etcd user" -g etcd -s /sbin/nologin -r etcd
[root@master ~]# mkdir -p /etc/etcd/ && mkdir -p /u01/etcd/
[root@master ~]# chown -R etcd:etcd /etc/etcd/ && chown -R etcd:etcd /u01/etcd/&& chown -R etcd:etcd /u01/ssl/etcd_ssl && chmod -R 775 /u01/ssl/
```
创建etcd系统运行服务(所有节点)
```sh
[root@master ~]# vi /usr/lib/systemd/system/etcd.service
#######内容如下######
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/u01/etcd/
EnvironmentFile=/etc/etcd/etcd-conf.yml
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/local/bin/etcd --config-file=/etc/etcd/etcd-conf.yml"
Restart=on-failure
LimitNOFILE=655350

[Install]       
WantedBy=multi-user.target

####################
:wq #保存退出
[root@master ~]#systemctl daemon-reload
```
创建etcd配置文件(根据对应的IP填写)
node1
```sh
[root@master ~]#vi /etc/etcd/etcd-conf.yml
#######内容如下#####
name: 'etcd1'
data-dir: /u01/etcd/data
wal-dir: /u01/etcd/data/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.62.130.67:2380'
listen-client-urls: 'https://10.62.130.67:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.62.130.67:2380'
advertise-client-urls: 'https://10.62.130.67:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd1=https://10.62.130.67:2380,etcd2=https://10.62.130.68:2380,etcd3=https://10.62.130.69:2380'
initial-cluster-token: 'etcd-caec'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/u01/ssl/etcd_ssl/etcd.pem'
  key-file: '/u01/ssl/etcd_ssl/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/u01/ssl/etcd_ssl/etcd-root-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/u01/ssl/etcd_ssl/etcd.pem'
  key-file: '/u01/ssl/etcd_ssl/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/u01/ssl/etcd_ssl/etcd-root-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
###############
:wq #保存退出
```
node2
```sh
[root@node1 ~]#vi /etc/etcd/etcd-conf,yml
#######内容如下#####
name: 'etcd2'
data-dir: /u01/etcd/data
wal-dir: /u01/etcd/data/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.62.130.68:2380'
listen-client-urls: 'https://10.62.130.68:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.62.130.68:2380'
advertise-client-urls: 'https://10.62.130.68:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd1=https://10.62.130.67:2380,etcd2=https://10.62.130.68:2380,etcd3=https://10.62.130.69:2380'
initial-cluster-token: 'etcd-caec'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/u01/ssl/etcd_ssl/etcd.pem'
  key-file: '/u01/ssl/etcd_ssl/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/u01/ssl/etcd_ssl/etcd-root-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/u01/ssl/etcd_ssl/etcd.pem'
  key-file: '/u01/ssl/etcd_ssl/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/u01/ssl/etcd_ssl/etcd-root-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
###############
:wq #保存退出
```
node3
```sh
[root@node2 ~]#vi /etc/etcd/etcd-conf.yml
#######内容如下#####
name: 'etcd3'
data-dir: /u01/etcd/data
wal-dir: /u01/etcd/data/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.62.130.69:2380'
listen-client-urls: 'https://10.62.130.69:2379,http://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.62.130.69:2380'
advertise-client-urls: 'https://10.62.130.69:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd1=https://10.62.130.67:2380,etcd2=https://10.62.130.68:2380,etcd3=https://10.62.130.69:2380'
initial-cluster-token: 'etcd-caec'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
  cert-file: '/u01/ssl/etcd_ssl/etcd.pem'
  key-file: '/u01/ssl/etcd_ssl/etcd-key.pem'
  client-cert-auth: true
  trusted-ca-file: '/u01/ssl/etcd_ssl/etcd-root-ca.pem'
  auto-tls: true
peer-transport-security:
  cert-file: '/u01/ssl/etcd_ssl/etcd.pem'
  key-file: '/u01/ssl/etcd_ssl/etcd-key.pem'
  peer-client-cert-auth: true
  trusted-ca-file: '/u01/ssl/etcd_ssl/etcd-root-ca.pem'
  auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
###############
:wq #保存退出
```

启动etcd集群(同时启动)
```sh
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start etcd
[root@master ~]# systemctl enable etcd
```
查看集群状态
```sh
[root@master ~]# export ETCDCTL_API=3  #修改etcdctl环境变量
[root@master ~]# etcdctl --endpoints="https://10.62.130.67:2379,https://10.62.130.68:2379,https://10.62.130.69:2379" --cacert=/u01/ssl/etcd_ssl/etcd-root-ca.pem --cert=/u01/ssl/etcd_ssl/etcd.pem --key=/u01/ssl/etcd_ssl/etcd-key.pem endpoint health --write-out=table
自动压缩keys空间后系统数据库会产生碎片,碎片依旧占用系统存储空间,定期使用defrag整理碎片(可以写成定时任务)
[root@master ~]# export ETCDCTL_API=3 
[root@master ~]# etcdctl defrag
```
## 六、高可用负载均衡部署(基于Keepalived和haproxy)
### 部署Keepalived
```sh
#上传离线安装包并解压
[root@master install]# tar -zxvf keepalived-2.2.7.tar.gz
[root@master install]# ./configure  --prefix=/u01/keepalived #缺少依赖自行安装
编译安装
[root@master install]# make&&make install
[root@master install]# vi /u01/keepalived/etc/sysconfig #重定向配置地址
###修改后内容如下####
KEEPALIVED_OPTIONS="-f /u01/keepalived/etc/keepalived/keepalived.conf -D -S 0"
###################
```
修改配置文件(节点一)
```sh
[root@master keepalived]# cp /u01/keepalived/etc/keepalived/keepalived.conf.sample  keepalived.conf
[root@master keepalived]# vi keepalived.conf
#######内容如下#####
global_defs {
  notification_email {
  }
  router_id LVS_DEVEL
  vrrp_skip_check_adv_addr
  vrrp_garp_interval 0
  vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}

vrrp_instance haproxy-vip {
  state MASTER
  priority 100
  interface ens192                       # Network card
  virtual_router_id 60
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass changan@123
  }
  unicast_src_ip 10.62.130.72      # The IP address of this machine
  unicast_peer {
    10.62.130.73                         # The IP address of peer machines
  }

  virtual_ipaddress {
    10.62.130.74/24                  # The VIP address
  }

  track_script {
    chk_haproxy
  }
}
###################
```
修改配置文件(节点二)
```sh
[root@master keepalived]# cp /u01/keepalived/etc/keepalived/keepalived.conf.sample  keepalived.conf
[root@master keepalived]# vi keepalived.conf
#######内容如下#####
global_defs {
  notification_email {
  }
  router_id LVS_DEVEL
  vrrp_skip_check_adv_addr
  vrrp_garp_interval 0
  vrrp_gna_interval 0
}

vrrp_script chk_haproxy {
  script "killall -0 haproxy"
  interval 2
  weight 2
}

vrrp_instance haproxy-vip {
  state BACKUP
  priority 100
  interface ens192                       # Network card
  virtual_router_id 60
  advert_int 1
  authentication {
    auth_type PASS
    auth_pass changan@123
  }
  unicast_src_ip 10.62.130.73      # The IP address of this machine
  unicast_peer {
    10.62.130.72                         # The IP address of peer machines
  }

  virtual_ipaddress {
    10.62.130.74/24                  # The VIP address
  }

  track_script {
    chk_haproxy
  }
}
###################
```
启动keepalived(所有节点)
```sh
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start keepalived
[root@master ~]# systemctl enable keepalived
```
### 部署haproxy
```sh
#上传离线安装包并解压
[root@master install]# tar -zxvf haproxy-2.7.1.tar.gz
编译安装
[root@master install]# make TARGET=linux310 PREFIX=/u01/haproxy ARCH=x86_64
[root@master install]# make install PREFIX=/u01/haproxy 
创建相应目录
[root@master install]# mkdir -p /u01/haproxy/data
[root@master install]# mkdir -p /u01/haproxy/etc
[root@master install]# mkdir -p /u01/haproxy/logs
```
修改配置文件(节点一)
```sh
[root@master install]# vi /u01/haproxy/etc/haproxy.conf
#######内容如下#####
global
    log /u01/haproxy/logs  local0 warning
    chroot      /u01/haproxy
    pidfile     /u01/haproxy/data/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

   stats socket /u01/haproxy/stats

defaults
  log global
  option  httplog
  option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000

frontend kube-apiserver
  bind *:6443
  mode tcp
  option tcplog
  default_backend kube-apiserver

backend kube-apiserver
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    server kube-apiserver-1 10.62.130.67:6443 check # Replace the IP address with your own.
    server kube-apiserver-2 10.62.130.68:6443 check # Replace the IP address with your own.
    server kube-apiserver-3 10.62.130.69:6443 check # Replace the IP address with your own.
#后台管理的配置
frontend admin_stats
  bind :8080
     mode http
     stats enable
     option httplog
     maxconn 10
     stats refresh 30s
     #地址ip:端口/admin
     stats uri /admin
     stats auth admin:changan@123
     stats hide-version
     stats admin if TRUE
###################
```
修改配置文件(节点二)
```sh
[root@master install]# vi /u01/haproxy/etc/haproxy.conf
#######内容如下#####
global
    log /u01/haproxy/logs  local0 warning
    chroot      /u01/haproxy
    pidfile     /u01/haproxy/data/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon

   stats socket /u01/haproxy/stats

defaults
  log global
  option  httplog
  option  dontlognull
        timeout connect 5000
        timeout client 50000
        timeout server 50000

frontend kube-apiserver
  bind *:6443
  mode tcp
  option tcplog
  default_backend kube-apiserver

backend kube-apiserver
    mode tcp
    option tcplog
    option tcp-check
    balance roundrobin
    default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    server kube-apiserver-1 10.62.130.67:6443 check # Replace the IP address with your own.
    server kube-apiserver-2 10.62.130.68:6443 check # Replace the IP address with your own.
    server kube-apiserver-3 10.62.130.69:6443 check # Replace the IP address with your own.
#后台管理的配置
frontend admin_stats
  bind :8080
     mode http
     stats enable
     option httplog
     maxconn 10
     stats refresh 30s
     #地址ip:端口/admin
     stats uri /admin
     stats auth admin:changan@123
     stats hide-version
     stats admin if TRUE
###################
```
启动haproxy(所有节点)
```sh
[root@master ~]# /u01/haproxy/sbin/haproxy  -f /u01/haproxy/etc/haproxy.conf
增加开机自启
[root@master ~]#vi /etc/rc.local
###在最后新增如下一条####
/u01/haproxy/sbin/haproxy  -f /u01/haproxy/etc/haproxy.conf
###################
```
## 七、部署kubernetes集群
### MASTER节点部署
创建kubernetes CA认证中心(master节点一)
```sh
[root@master ~]# mkdir -p /u01/ssl/kubernets_ssl
[root@master ~]# cd /u01/ssl/kubernets_ssl
[root@master kubernets_ssl]# vi k8s-root-ca-csr.json
#######内容如下#####
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 4096
  },
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
##################
:wq #保存退出
```
创建api聚合 CA认证中心(master节点一)
```sh
[root@master kubernets_ssl]# vi front-proxy-ca-csr.json
#######内容如下#####
{
  "CN": "kubernetes",
  "key": {
      "algo": "rsa",
      "size": 2048
  },
  "names": [{
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "k8s",
      "OU": "system"
    }]
}
##################
:wq #保存退出
[root@master kubernets_ssl]# cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca  #生成CA证书
```
创建CA认证中心定义文件(master节点一)
```sh
[root@master kubernets_ssl]# vi k8s-gencert.json
#######内容如下#####
{
    "signing":{
        "default":{
            "expiry":"876000h"
        },
        "profiles":{
            "kubernetes":{
                "usages":[
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ],
                "expiry":"876000h"
            }
        }
    }
}
##################
:wq #保存退出
```
创建api聚合客户端请求证书
```sh
[root@master kubernets_ssl]# vi front-proxy-client-csr.json
#######内容如下#####
{
  "CN": "front-proxy-client",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [{
    "C": "CN",
    "ST": "Chongqing",
    "L": "Chongqing",
    "O": "k8s",
    "OU": "system"
  }]
}
##################
:wq #保存退出
[root@master kubernets_ssl]# 	cfssl gencert --ca=/u01/ssl/kubernets_ssl/front-proxy-ca.pem --ca-key=/u01/ssl/kubernets_ssl/front-proxy-ca-key.pem --config /u01/ssl/kubernets_ssl/k8s-gencert.json  --profile kubernetes front-proxy-client-csr.json | cfssljson  -bare front-proxy-client
  #生成CA证书
```
创建kube-apiserver证书签名请求(master节点一)
```sh
[root@master kubernets_ssl]# vi kubernetes-csr.json
#######内容如下######
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "hosts": [
"127.0.0.1",
"10.254.0.1",
"10.62.130.67",
"10.62.130.68",
"10.62.130.69",
"10.62.130.70",
"10.62.130.71",
"10.62.130.72",
"10.62.130.73",
"10.62.130.74",
"k8s-master1",
"k8s-master2",
"k8s-master3",
"k8s-node1",
"k8s-node2",
"k8s-node3"
],
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "kubernetes",
      "OU": "System"
    }
  ]
}
##################
:wq #保存退出
```
生成kube-apiserver证书(master节点一)

```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --initca=true k8s-root-ca-csr.json | cfssljson --bare k8s-root-ca
```
生成kube-apiserver ca根证书(master节点一)

```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --ca=k8s-root-ca.pem --ca-key=k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes kubernetes-csr.json | cfssljson --bare kubernetes
```
创建kubelet证书签名请求(master节点一)

```sh
[root@master kubernets_ssl]# vi admin-csr.json
#######内容如下#####
{
  "CN": "admin",
  "hosts": [
"127.0.0.1",
"10.254.0.1",
"10.62.130.67",
"10.62.130.68",
"10.62.130.69",
"10.62.130.70",
"10.62.130.71",
"10.62.130.72",
"10.62.130.73",
"10.62.130.74",
"k8s-master1",
"k8s-master2",
"k8s-master3",
"k8s-node1",
"k8s-node2",
"k8s-node3"
],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Beijing",
      "L": "Beijing",
      "O": "system:masters",
      "OU": "System"
    }
  ]
} 
###############
:wq #保存退出
```
生成kubelet证书(master节点一)

```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --ca=k8s-root-ca.pem --ca-key=k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes admin-csr.json | cfssljson --bare admin
```
创建kube-proxy证书签名请求(master节点一)

```sh
[root@master kubernets_ssl]# vi  kube-proxy-csr.json
#######内容如下#####
{
  "CN": "system:kube-proxy",
  "hosts": [
  "127.0.0.1",
"10.254.0.1",
"10.62.130.67",
"10.62.130.68",
"10.62.130.69",
"10.62.130.70",
"10.62.130.71",
"10.62.130.72",
"10.62.130.73",
"10.62.130.74",
"k8s-master1",
"k8s-master2",
"k8s-master3",
"k8s-node1",
"k8s-node2",
"k8s-node3"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "K8s",
      "OU": "System"
    }
  ]
}
###############
:wq #保存退出
```
生成kub-proxy证书(master节点一)
```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --ca=k8s-root-ca.pem --ca-key=k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes kube-proxy-csr.json | cfssljson --bare kube-proxy 
```
创建kube-controller-manager证书签名请求(master节点一)

```sh
[root@master kubernets_ssl]# vi  kube-controller-manager-csr.json
#######内容如下#####
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
    "127.0.0.1",
    "10.254.0.1",
    "10.62.130.67",
    "10.62.130.68",
    "10.62.130.69",
    "10.62.130.70",
    "10.62.130.71",
    "10.62.130.72",
    "10.62.130.73",
    "10.62.130.74",
    "k8s-master1",
    "k8s-master2",
    "k8s-master3",
    "k8s-node1",
    "k8s-node2",
    "k8s-node3"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "Chongqing",
        "L": "Chongqing",
        "O": "system:kube-controller-manager",
        "OU": "system"
      }
    ]
}
###############
:wq #保存退出
```
生成kube-controller-manager证书(master节点一)
```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --ca=k8s-root-ca.pem --ca-key=k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager 
```
创建创建kube-scheduler证书签名请求(master节点一)

```sh
[root@master kubernets_ssl]# vi  kube-scheduler-csr.json
#######内容如下#####
{
    "CN": "system:kube-scheduler",
    "hosts": [
    "127.0.0.1",
    "10.254.0.1",
    "10.62.130.67",
    "10.62.130.68",
    "10.62.130.69",
    "10.62.130.70",
    "10.62.130.71",
    "10.62.130.72",
    "10.62.130.73",
    "10.62.130.74",
    "k8s-master1",
    "k8s-master2",
    "k8s-master3",
    "k8s-node1",
    "k8s-node2",
    "k8s-node3"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "Chongqing",
        "L": "Chongqing",
        "O": "system:kube-scheduler",
        "OU": "system"
      }
    ]
}
###############
:wq #保存退出
```
生成kube-scheduler证书(master节点一)
```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --ca=k8s-root-ca.pem --ca-key=k8s-root-ca-key.pem --config k8s-gencert.json --profile kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
```
创建metrics-server请求证书(master节点一)
```sh
[root@master kubernets_ssl]# vi  metrics-proxy-csr.json 
#######内容如下#####
{
  "CN": "metrics-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "hosts": [
    "127.0.0.1",
    "10.254.0.1",
    "10.62.130.67",
    "10.62.130.68",
    "10.62.130.69",
    "10.62.130.70",
    "10.62.130.71",
    "10.62.130.72",
    "10.62.130.73",
    "10.62.130.74",
    "k8s-master1",
    "k8s-master2",
    "k8s-master3",
    "k8s-node1",
    "k8s-node2",
    "k8s-node3"
  ],
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "metrics-proxy",
      "OU": "System"
    }
  ]
}
###############
:wq #保存退出
```
生成metrics-server证书(master节点一)
```sh
[root@master kubernets_ssl]# /usr/local/bin/cfssl gencert --ca=k8s-root-ca.pem --ca-key=k8s-root-ca-key.pem --config k8s-gencert.json --profile=kubernetes metrics-proxy-csr.json | cfssljson -bare metrics-proxy
```
创建service公/私钥(master节点一)
```sh
# 生成私钥
[root@master kubernets_ssl]# openssl genrsa -out ./service.key 2048
# 生成公钥
[root@master kubernets_ssl]# openssl rsa -in ./service.key -pubout -out ./service.pub
```
证书授权,分发kubernets证书(master节点一)

```sh
[root@master kubernets_ssl]# chmod -R 775 /u01/ssl/kubernets_ssl
[root@master kubernets_ssl]# scp -r /u01/ssl/kubernets_ssl/ root@10.62.130.68:/u01/ssl/
[root@master kubernets_ssl]# scp -r /u01/ssl/kubernets_ssl/ root@10.62.130.69:/u01/ssl/
[root@master kubernets_ssl]# scp -r /u01/ssl/kubernets_ssl/ root@10.62.130.70:/u01/ssl/
[root@master kubernets_ssl]# scp -r /u01/ssl/kubernets_ssl/ root@10.62.130.71:/u01/ssl/
[root@master kubernets_ssl]# scp -r /u01/ssl/kubernets_ssl/ root@10.62.130.72:/u01/ssl/
```
上传kubernet程序包(所有节点)

生成kubectl kubeconfig 文件(master节点)
```sh
[root@master ~]# kubectl config set-cluster kubernetes --certificate-authority=/u01/ssl/kubernets_ssl/k8s-root-ca.pem --embed-certs=true --server=https://10.62.130.74:6443  #设置集群参数
[root@master ~]# kubectl config set-credentials admin --client-certificate=/u01/ssl/kubernets_ssl/admin.pem --embed-certs=true --client-key=/u01/ssl/kubernets_ssl/admin-key.pem  #设置客户端认证参数
[root@master ~]# kubectl config set-context kubernetes --cluster=kubernetes --user=admin  #设置上下文参数
[root@master ~]# kubectl config use-context kubernetes #切换默认上下文
[root@master ~]# mkdir -p /etc/kubernetes
```
创建kubelet的kubeconfig(master节点一)
```sh
[root@master ~]# cd /etc/kubernetes
[root@master kubernetes]# kubectl config set-cluster kubernetes --certificate-authority=/u01/ssl/kubernets_ssl/k8s-root-ca.pem --embed-certs=true --server=https://10.62.130.74:6443 --kubeconfig=bootstrap.kubeconfig #IP地址为VIP地址
[root@master kubernetes]# kubectl config set-credentials kubelet-bootstrap --token="b8e44f5c1109ea7edd29d69de6666be9,65513a0dc7996a75544c45d9c01e7e37,bde2cafc4a29960a22d26bde1ec40388" --kubeconfig=bootstrap.kubeconfig #${BOOTSTRAP_TOKEN}上一步生成TOKEN
[root@master kubernetes]# kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=bootstrap.kubeconfig
[root@master kubernetes]# kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
```
创建kube-proxy的kubeconfig(master节点一)
```sh
[root@master ~]# cd /etc/kubernetes
[root@master kubernetes]# kubectl config set-cluster kubernetes --certificate-authority=/u01/ssl/kubernets_ssl/k8s-root-ca.pem --embed-certs=true --server=https://10.62.130.74:6443 --kubeconfig=kube-proxy.kubeconfig
[root@master kubernetes]# kubectl config set-credentials kube-proxy --client-certificate=/u01/ssl/kubernets_ssl/kube-proxy.pem --client-key=/u01/ssl/kubernets_ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig
[root@master kubernetes]# kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig
[root@master kubernetes]# kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
```
分发证书(master节点一)
```sh
[root@master kubernetes]# scp /etc/kubernetes/*.kubeconfig root@10.62.130.68:/etc/kubernetes/
[root@master kubernetes]# scp /etc/kubernetes/*.kubeconfig root@10.62.130.69:/etc/kubernetes/
[root@master kubernetes]# scp /etc/kubernetes/*.kubeconfig root@10.62.130.70:/etc/kubernetes/
[root@master kubernetes]# scp /etc/kubernetes/*.kubeconfig root@10.62.130.71:/etc/kubernetes/
[root@master kubernetes]# scp /etc/kubernetes/*.kubeconfig root@10.62.130.72:/etc/kubernetes/
```
生成审计配置并新建审计目录(master节点)
```sh
[root@master kubernetes]# vi /etc/kubernetes/audit-policy.yaml
#######内容如下#####
# Log all requests at the Metadata level.
apiVersion: audit.k8s.io/v1
kind: Policy
rules:
- level: Metadata
###############
:wq #保存退出
[root@master kubernetes]# mkdir -p  /var/log/kube-audit/
[root@master kubernetes]# chmod -R 775  /var/log/kube-audit/
```
生成TOKEN随机数(master节点)
```sh
[root@master ~]# export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
[root@master ~]# echo "Tokne: ${BOOTSTRAP_TOKEN}"    #生成TOKEN随机数
```

创建token文件(master节点)
```sh
[root@master ~]# vi /etc/kubernetes/token.csv
#######内容如下##### 
"b8e44f5c1109ea7edd29d69de6666be9,65513a0dc7996a75544c45d9c01e7e37,bde2cafc4a29960a22d26bde1ec40388",kubelet-bootstrap,10001,system:kubelet-bootstrap   #${BOOTSTRAP_TOKEN}上一步生成TOKEN,如果是单个apiserver时后面不加引号,如果是高可用api,需要添加引号。三个token用,隔开,并在api配置中修改个数
###############
:wq #保存退出
```
新建KubernetesAPI服务(master节点)
```sh
[root@master ~]# vi /usr/lib/systemd/system/kube-apiserver.service
#######内容如下#####
[Unit]
Description=KubernetesAPI Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
After=etcd.service
[Service]
EnvironmentFile=-/etc/kubernetes/apiserver
ExecStart=/usr/local/bin/kube-apiserver  $KUBE_ETCD_SERVERS $KUBE_API_ADDRESS $KUBE_API_PORT $KUBELET_PORT $KUBE_ALLOW_PRIV $KUBE_SERVICE_ADDRESSES $KUBE_ADMISSION_CONTROL $KUBE_API_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
###############
:wq #保存退出
```
新建KubernetesAPI配置(master节点)
```sh
[root@master ~]# vi /etc/kubernetes/apiserver
#######内容如下(修改advertise-address为每个节点的IP地址)#####
KUBE_API_ADDRESS="--advertise-address=10.62.130.67 --bind-address=0.0.0.0"
KUBE_API_PORT="--secure-port=6443"
KUBE_ETCD_SERVERS="--etcd-servers=https://10.62.130.67:2379,https://10.62.130.68:2379,https://10.62.130.69:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction"
KUBE_API_ARGS=" \
--allow-privileged=true \
--runtime-config=api/all=true \
--authorization-mode=RBAC,Node \
--anonymous-auth=false \
--enable-bootstrap-token-auth \
--token-auth-file=/etc/kubernetes/token.csv \
--service-node-port-range=1-65535 \
--tls-cert-file=/u01/ssl/kubernets_ssl/kubernetes.pem \
--tls-private-key-file=/u01/ssl/kubernets_ssl/kubernetes-key.pem \
--client-ca-file=/u01/ssl/kubernets_ssl/k8s-root-ca.pem \
--service-account-key-file=/u01/ssl/kubernets_ssl/service.pub \
--service-account-issuer=https://10.62.130.74:6443 \
--service-account-signing-key-file=/u01/ssl/kubernets_ssl/service.key \
--storage-backend=etcd3 \
--etcd-cafile=/u01/ssl/etcd_ssl/etcd-root-ca.pem \
--etcd-certfile=/u01/ssl/etcd_ssl/etcd.pem \
--etcd-keyfile=/u01/ssl/etcd_ssl/etcd-key.pem \
--apiserver-count=3 \
--audit-policy-file=/etc/kubernetes/audit-policy.yaml \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/var/log/kube-audit/audit.log \
--event-ttl=1h \
--max-mutating-requests-inflight=3000 \
--max-requests-inflight=1000 \
--proxy-client-cert-file=/u01/ssl/kubernets_ssl/front-proxy-client.pem \
--proxy-client-key-file=/u01/ssl/kubernets_ssl/front-proxy-client-key.pem \
--requestheader-client-ca-file=/u01/ssl/kubernets_ssl/front-proxy-ca.pem \
--requestheader-allowed-names= \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
"
###############
:wq #保存退出
```
新建kube-controller-manager服务(master节点)
```sh
[root@master kubernetes]# vi /usr/lib/systemd/system/kube-controller-manager.service
#######内容如下#####
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/controller-manager
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_ARGS
Restart=on-failure
LimitNOFILE=655350
[Install]
WantedBy=multi-user.target
###############
:wq #保存退出
```
创建kube-controller-manager配置文件(master节点)
```sh
[root@master kubernetes]# vi /etc/kubernetes/controller-manager
#######内容如下#####
# The following values are used toconfigure the kubernetes controller-manager
# defaults from config and apiserver shouldbe adequate
# Add your own!
KUBE_CONTROLLER_MANAGER_ARGS=" \
--master=https://10.62.130.74:6443 \
--service-cluster-ip-range=10.254.0.0/16 \
--cluster-cidr=10.254.0.0/16 \
--kubeconfig=/root/.kube/config \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/u01/ssl/kubernets_ssl/k8s-root-ca.pem \
--cluster-signing-key-file=/u01/ssl/kubernets_ssl/k8s-root-ca-key.pem \
--service-account-private-key-file=/u01/ssl/kubernets_ssl/service.key \
--root-ca-file=/u01/ssl/kubernets_ssl/k8s-root-ca.pem \
--tls-cert-file=/u01/ssl/kubernets_ssl/kube-controller-manager.pem \
--tls-private-key-file=/u01/ssl/kubernets_ssl/kube-controller-manager-key.pem \
--requestheader-client-ca-file=/u01/ssl/kubernets_ssl/front-proxy-ca.pem \
--requestheader-allowed-names=front-proxy-client \
--requestheader-extra-headers-prefix=X-Remote-Extra-  \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--leader-elect=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=5m0s \
--kube-api-qps=100 \
--kube-api-burst=100 \
"
###############
:wq #保存退出
```
新建kube-scheduler.service服务(master节点)
```sh
[root@master kubernetes]# vi /usr/lib/systemd/system/kube-scheduler.service
#######内容如下#####
[Unit]
Description=Kubernetes Scheduler Plugin
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
[Service]
EnvironmentFile=-/etc/kubernetes/scheduler
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_ARGS
Restart=on-failure
LimitNOFILE=655350
[Install]
WantedBy=multi-user.target
###############
:wq #保存退出
```
创建kube-scheduler.service配置文件(master节点)
```sh
[root@master kubernetes]# vi /etc/kubernetes/scheduler
#######内容如下#####
# kubernetes scheduler config
# default config should be adequate
# Add your own!
KUBE_SCHEDULER_ARGS=" \
--master=https://10.62.130.74:6443 \
--kubeconfig=/root/.kube/config  \
--feature-gates=EphemeralContainers=true \
--tls-cert-file=/u01/ssl/kubernets_ssl/kube-scheduler.pem \
--tls-private-key-file=/u01/ssl/kubernets_ssl/kube-scheduler-key.pem \
--requestheader-client-ca-file=/u01/ssl/kubernets_ssl/front-proxy-ca.pem \
--requestheader-allowed-names=front-proxy-client \
--requestheader-extra-headers-prefix=X-Remote-Extra-  \
--requestheader-group-headers=X-Remote-Group \
--requestheader-username-headers=X-Remote-User \
--leader-elect=true \
"
###############
:wq #保存退出
```
启动kubernets核心服务(master节点)
```sh
[root@master kubernetes]# systemctl daemon-reload
[root@master kubernetes]# systemctl start kube-apiserver
[root@master kubernetes]# systemctl start kube-controller-manager
[root@master kubernetes]# systemctl start kube-scheduler
[root@master kubernetes]# systemctl enable kube-apiserver
[root@master kubernetes]# systemctl enable kube-controller-manager
[root@master kubernetes]# systemctl enable kube-scheduler
```
### node节点部署
新建kubelet服务(所有节点)
```sh
[root@master ~]# mkdir -p /u01/kubelet
[root@master ~]# vi /usr/lib/systemd/system/kubelet.service
#######内容如下#####
[Unit]
Description=KubernetesAPI Service
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
EnvironmentFile=-/etc/kubernetes/kubelet
ExecStart=/usr/local/bin/kubelet $KUBELET_ARGS
Restart=on-failure
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
###############
:wq #保存退出
```
新建kubelet配置(所有节点)
```sh
[root@master ~]# vi /etc/kubernetes/kubelet
#######内容如下(修改advertise-address为每个节点的IP地址)#####
KUBELET_ARGS=" \
--cgroup-driver=systemd \
--hostname-override=k8s-master1 \
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
--cert-dir=/u01/kubernetes/ssl \
--cluster-dns=10.254.0.2 \
--cluster-domain=cluster.local. \
--fail-swap-on=false \
--pod-infra-container-image=10.62.130.73/common/pause:3.8 \
--container-runtime=remote \
--container-runtime-endpoint=unix:///run//containerd/containerd.sock \
"
###############
:wq #保存退出
```
新建kube-proxy服务(所有节点)

```sh
[root@master ~]# vi /usr/lib/systemd/system/kube-proxy.service
#######内容如下#####
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target
[Service]
WorkingDirectory=/u01/kube-proxy
ExecStart=/usr/local/bin/kube-proxy \
  --master=https://10.62.130.74:6443 \
  --cluster-cidr=10.254.0.0/16 \
  --hostname-override=k8s-master1 \
  --config=/etc/kubernetes/kube-proxy.yaml
Restart=on-failure
LimitNOFILE=655350
[Install]
WantedBy=multi-user.target
###############
:wq #保存退出
```
创建proxy配置(所有节点)
```sh
[root@master kubernetes]# vi /etc/kubernetes/kube-proxy.yaml
#######内容如下(节点一)#####
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
bindAddress: 0.0.0.0
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
  qps: 5
clusterCIDR: 10.62.0.0/24
configSyncPeriod: 15m0s
conntrack:
  max: null
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
healthzBindAddress: 0.0.0.0:10256
metricsBindAddress: 0.0.0.0:10249
enableProfiling: false
hostnameOverride: ""
iptables:
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 0s
  syncPeriod: 30s 
ipvs:
  masqueradeAll: true
  minSyncPeriod: 5s
  scheduler: "rr"
  syncPeriod: 30s 
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
udpIdleTimeout: 250ms
###############
:wq #保存退出
```
创建许可(主节点)

```sh
[root@master ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
```
启动kubelet(所有节点)

```sh
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl start kubelet
[root@master ~]# systemctl enable kubelet
```

主节点通过证书请求(主节点)

```sh
[root@master ~]# kubectl get csr   ##查看证书请求
[root@master ~]# kubectl certificate approve node-csr-TcJZDfKMnQbwvXPf-_Jy3ClRbjveJGuaYzn0CChgtIo  ##同意证书请求
[root@master ~]# kubectl get nodes  ##查看节点状态(由于没有安装网络插件,)
```

启动kube-proxy(所有节点)

```sh
[root@master ~]# systemctl start kube-proxy
[root@master ~]# systemctl enable kube-proxy
```
## 七、插件安装
### harbor部署
官网下载offline版本,注意docker和docker-compose版本匹配(harbor节点)
```sh
[root@master install]# tar -xvf harbor-offline-installer-v1.10.17.tgz
```
创建SSL证书配置(证书发放节点)
```sh
[root@master ~]# vi /u01/ssl/harbor_ssl/k8s-gencert.json
#######内容如下#####
{
    "signing":{
        "default":{
            "expiry":"876000h"
        },
        "profiles":{
            "kubernetes":{
                "usages":[
                    "signing",
                    "key encipherment",
                    "server auth",
                    "client auth"
                ],
                "expiry":"876000h"
            }
        }
    }
}
###############
:wq #保存退出
```
创建SSL请求证书(证书发放节点)
```sh
[root@master harbor_ssl]# vi /u01/ssl/harbor_ssl/harbor-csr.json
#######内容如下#####
{
  "CN": "harbori",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 4096
  },
  "names": [
    {
      "C": "CN",
      "ST": "Chongqing",
      "L": "Chongqing",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

###############
:wq #保存退出
```
生成证书(证书发放节点)
```sh
[root@master harbor_ssl]#  cfssl gencert -ca=/u01/ssl/kubernets_ssl/k8s-root-ca.pem -ca-key=/u01/ssl/kubernets_ssl/k8s-root-ca-key.pem -config=k8s-gencert.json -profile=kubernetes harbor-csr.json | cfssljson -bare harbor
[root@master harbor_ssl]# scp -r  /u01/ssl/harbor_ssl root@10.62.130.73:/u01/ssl/harbor_ssl #证书分发
```
修改配置
```sh
[root@master install]# mkdir -p  /u01/harbor/data/
[root@master install]# vi harbor.yml
#######修改内容如下#####
hostname: 10.62.130.73
certificate: /u01/ssl/harbor_ssl/harbor.pem
private_key: /u01/ssl/harbor_ssl/harbor-key.pem
data_volume: /u01/harbor/data/
###############
:wq #保存退出
```
执行脚本
```sh
[root@master harbor]# ./install.sh
[root@master harbor]# docker ps -a #查看运行情况
```
客户端登录
```sh
[root@master ~]# mkdir -p /etc/containerd/certs.d/10.62.130.73
[root@master ~]# scp  /u01/ssl/kubernets_ssl/k8s-root-ca.pem   root@IP:/etc/containerd/certs.d/10.62.130.73/ca.crt #将登录证书发放到每个客户端
[root@master ~]# vi /etc/containerd/config.toml
#######修改内容如下#####
sandbox_image = "10.62.130.73/common/pause:3.8"
      [plugins."io.containerd.grpc.v1.cri".registry.configs]
        [plugins."io.containerd.grpc.v1.cri".registry.configs."10.62.130.73".tls]
          insecure_skip_verify = true
        [plugins."io.containerd.grpc.v1.cri".registry.configs."10.62.130.73".auth]
          username = "admin"
          password = "Changan@123"
###############
:wq #保存退出
[root@master ~]# nerdctl login 10.62.130.73 #登录
```
### calico部署(主节点一)
使用calico-etcd的方式部署,下载对应的版本的yaml文件
下载对应的镜像(离线环境下需要先从互联网服务器下载对应的镜像然后导入到k8s服务器中)
```sh
[root@k8smaster1app002 calico]# cat calico.yaml |grep image
          image: 10.62.130.73/common/calico-cni:v3.25.0
          imagePullPolicy: IfNotPresent
          image: 10.62.130.73/common/calico-node:v3.25.0 
          imagePullPolicy: IfNotPresent
          image: 10.62.130.73/common/calico-node:v3.25.0 
          imagePullPolicy: IfNotPresent
          image: 10.62.130.73/common/calico-kube-controllers:v3.25.0 
          imagePullPolicy: IfNotPresent
[root@k8smaster1app002 calico]# nerctl save -o  #下载镜像
[root@k8smaster1app002 calico]# nercrl load -i  #加载镜像
[root@k8smaster1app002 calico]# nercrl tag      #镜像更名
[root@k8smaster1app002 calico]# nercrl push     #镜像上传到harbor
```
配置修改
```yaml
[root@k8smaster1app002 calico]# vi calico.yaml
#######修改内容如下#####
etcd_endpoints: "https://10.62.130.67:2379,https://10.62.130.68:2379,https://10.62.130.69:2379"
##修改etcd证书,将证书转换为base64编码
# encoded contents of each file that would be associated with the TLS data.
# Example command for encoding a file contents: cat <file> | base64 -w 0
etcd-key:  cat /data/ssl/etcd_ssl/etcd-key.pem | base64 | tr -d '\n'
etcd-cert: cat /data/ssl/etcd_ssl/etcd.pem | base64 | tr -d '\n'
etcd-ca:   cat /data/ssl/etcd_ssl/etcd-root-ca.pem | base64 | tr -d '\n'
取消下面文件位置的注释
# If you're using TLS enabled etcd uncomment the following.
# You must also populate the Secret below with these files.
etcd_ca: "/calico-secrets/etcd-ca"
etcd_cert: "/calico-secrets/etcd-cert"
etcd_key: "/calico-secrets/etcd-key"
##修改内网IP地址
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "10.254.0.0/16"
- name: IP_AUTODETECTION_METHOD
value: can-reach=10.62.130.74
- name: KUBERNETES_SERVICE_HOST
value: "10.62.130.74"
- name: KUBERNETES_SERVICE_PORT
value: "6443"
- name: KUBERNETES_SERVICE_PORT_HTTPS
value: "6443"

###############
:wq #保存退出
```
创建caloco服务
```sh
[root@k8smaster1app002 calico]# kubectl  create -f calico.yaml
[root@k8smaster1app002 calico]# kubectl  get pods -A -o wide #查看容器状态
[root@k8smaster1app002 calico]# kubectl  get node #查看节点状态是否变成Ready
NAME          STATUS   ROLES    AGE   VERSION
k8s-master1   Ready    <none>   16d   v1.26.1
k8s-master2   Ready    <none>   12d   v1.26.1
k8s-master3   Ready    <none>   12d   v1.26.1
k8s-node1     Ready    <none>   12d   v1.26.1
k8s-node2     Ready    <none>   12d   v1.26.1
k8s-node3     Ready    <none>   12d   v1.26.1
```
### CoreDNS部署(主节点一)
创建yaml文件
```sh
[root@k8smaster1app002 coredns]# vi coredns.yaml
```
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
  - apiGroups:
    - ""
    resources:
    - endpoints
    - services
    - pods
    - namespaces
    verbs:
    - list
    - watch
  - apiGroups:
    - discovery.k8s.io
    resources:
    - endpointslices
    verbs:
    - list
    - watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health {
          lameduck 5s
        }
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf {
          max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
    app.kubernetes.io/name: coredns
spec:
  # replicas: not specified here:
  # 1. Default is 1.
  # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
      app.kubernetes.io/name: coredns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
        app.kubernetes.io/name: coredns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        kubernetes.io/os: linux
      affinity:
         podAntiAffinity:
           requiredDuringSchedulingIgnoredDuringExecution:
           - labelSelector:
               matchExpressions:
               - key: k8s-app
                 operator: In
                 values: ["kube-dns"]
             topologyKey: kubernetes.io/hostname
      containers:
      - name: coredns
        image: 10.62.130.73/common/coredns:1.9.4
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 1024Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
    app.kubernetes.io/name: coredns
spec:
  selector:
    k8s-app: kube-dns
    app.kubernetes.io/name: coredns
  clusterIP: 10.254.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
```
### traefik部署(主节点一)
从官网下载第三方自定义方法yaml文件
```sh
[root@k8smaster1app002 traefik]# vi traefik-crd-definition.yml #修改命名空间
namespace: kube-system
```
创建角色权限配置yaml文件
```sh
[root@k8smaster1app002 traefik]# vi traefik-crd-rbac.yml
```
```yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
rules:
  - apiGroups:
      - ""
    resources:
      - services
      - endpoints
      - secrets
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
      - networking.k8s.io
    resources:
      - ingresses
      - ingressclasses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - extensions
      - networking.k8s.io
    resources:
      - ingresses/status
    verbs:
      - update
  - apiGroups:
      - traefik.containo.us
    resources:
      - middlewares
      - middlewaretcps
      - ingressroutes
      - traefikservices
      - ingressroutetcps
      - ingressrouteudps
      - tlsoptions
      - tlsstores
      - serverstransports
    verbs:
      - get
      - list
      - watch

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: traefik-ingress-controller
subjects:
  - kind: ServiceAccount
    name: traefik-ingress-controller
    namespace: kube-system
```
创建traefil配置文件
```sh
[root@k8smaster1app002 traefik]# vi traefik-config.yml
```
```yaml
kind: ConfigMap
apiVersion: v1
metadata:
  name: traefik-config
  namespace: kube-system
data:
  traefik.yaml: |-
    global:
      checkNewVersion: false    # 周期性的检查是否有新版本发布
      sendAnonymousUsage: false # 周期性的匿名发送使用统计信息
    serversTransport:
      insecureSkipVerify: true  # Traefik忽略验证代理服务的TLS证书
    api:
      insecure: true            # 允许HTTP 方式访问API
      dashboard: true           # 启用Dashboard
      debug: false              # 启用Debug调试模式
    metrics:
      prometheus:               # 配置Prometheus监控指标数据,并使用默认配置
        addRoutersLabels: true  # 添加routers metrics
        entryPoint: "metrics"   # 指定metrics监听地址
    entryPoints:
      web:
        address: ":80"          # 配置80端口,并设置入口名称为web
        forwardedHeaders: 
          insecure: true        # 信任所有的forward headers
      websecure:
        address: ":443"         # 配置443端口,并设置入口名称为 websecure
        forwardedHeaders: 
          insecure: true
      traefik:
        address: ":9000"        # 配置9000端口,并设置入口名称为 dashboard
      metrics:
        address: ":9101"        # 配置9100端口,作为metrics收集入口
      tcpep:
        address: ":9200"        # 配置9200端口,作为tcp入口
      udpep:
        address: ":9300/udp"    # 配置9300端口,作为udp入口
    providers:
      kubernetesCRD:            # 启用Kubernetes CRD方式来配置路由规则
        ingressClass: ""
        allowCrossNamespace: true   #允许跨namespace
        allowEmptyServices: true    #允许空endpoints的service
    log:
      filePath: "/etc/traefik/logs/traefik.log" # 设置调试日志文件存储路径,如果为空则输出到控制台
      level: "INFO"             # 设置调试日志级别
      format: "common"          # 设置调试日志格式
    accessLog:
      filePath: "/etc/traefik/logs/access.log" # 设置访问日志文件存储路径,如果为空则输出到控制台
      format: "common"          # 设置访问调试日志格式
      bufferingSize: 0          # 设置访问日志缓存行数
      filters:
        statusCodes: ["200"]   # 设置只保留指定状态码范围内的访问日志
        retryAttempts: true     # 设置代理访问重试失败时,保留访问日志
        minDuration: 20         # 设置保留请求时间超过指定持续时间的访问日志
      fields:                   # 设置访问日志中的字段是否保留(keep保留、drop不保留)
        defaultMode: keep       # 设置默认保留访问日志字段
        names:                  # 针对访问日志特别字段特别配置保留模式
          ClientUsername: drop
          StartUTC: drop        # 禁用日志timestamp使用UTC
        headers:                # 设置Header中字段是否保留
          defaultMode: keep     # 设置默认保留Header中字段
          names:                # 针对Header中特别字段特别配置保留模式
            #User-Agent: redact # 可以针对指定agent
            Authorization: drop
            Content-Type: keep
```
创建traefik应用文件
```sh
[root@k8smaster1app002 traefik]# vi traefik-deployment.yaml
```
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  namespace: kube-system
  name: traefik-ingress-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: traefik-ingress-controller
  namespace: kube-system
  labels:
    app: traefik
spec:
  replicas: 1 
  selector:
    matchLabels:
      app: traefik
  template:
    metadata:
      name: traefik
      labels:
        app: traefik
    spec:
      nodeName: k8s-master1
      serviceAccountName: traefik-ingress-controller
      terminationGracePeriodSeconds: 1
      containers:
      - name: traefik
        image: 10.62.130.73/common/traefik:v2.9
        env:
        - name: KUBERNETES_SERVICE_HOST       # 手动指定k8s api地址,避免网络组件不稳定。
          value: "10.62.130.74"
        - name: KUBERNETES_SERVICE_PORT_HTTPS # API server端口
          value: "6443"
        - name: KUBERNETES_SERVICE_PORT       # API server端口
          value: "6443"
        - name: TZ                            # 指定时区
          value: "Asia/Shanghai"
        ports:
          - name: web
            containerPort: 80
            hostPort: 80                      # 将容器端口绑定所在服务器的 80 端口
          - name: websecure
            containerPort: 443
            hostPort: 443                     # 将容器端口绑定所在服务器的 443 端口
          - name: admin
            containerPort: 9000               # Traefik Dashboard 端口
          - name: metrics
            containerPort: 9101               # metrics端口
          - name: tcpep
            containerPort: 9200               # tcp端口
          - name: udpep
            containerPort: 9300               # udp端口
        securityContext:                      # 只开放网络权限  
          capabilities:
            drop:
              - ALL
            add:
              - NET_BIND_SERVICE
        args:
          - --configfile=/etc/traefik/config/traefik.yaml
        volumeMounts:
        - mountPath: /etc/traefik/config
          name: config
        - mountPath: /etc/traefik/logs
          name: logdir
        - mountPath: /etc/localtime
          name: timezone
          readOnly: true
      volumes:
        - name: config
          configMap:
            name: traefik-config 
        - name: logdir
          hostPath:
            path: /u01/traefik/logs
            type: "DirectoryOrCreate"
        - name: timezone                       #挂载时区文件
          hostPath:
            path: /etc/localtime
            type: File
      tolerations:                             # 设置容忍所有污点,防止节点被设置污点
        - operator: "Exists"
      hostNetwork: true                        # 开启host网络,提高网络入口的网络性能
---
apiVersion: v1
kind: Service
metadata:
  name: traefik
  namespace: kube-system
spec:
  type: NodePort    # 官网示例为ClusterIP,此处为NodePort
  selector:
    app: traefik
  ports:
    - name: web
      protocol: TCP
      port: 80
      targetPort: 80
    - name: websecure
      protocol: TCP
      port: 443
      targetPort: 443
    - name: admin
      protocol: TCP
      port: 9000
      targetPort: 9000
    - name: metrics
      protocol: TCP
      port: 9101
      targetPort: 9101
    - name: tcpep
      protocol: TCP
      port: 9200
      targetPort: 9200
    - name: udpep
      protocol: UDP
      port: 9300
      targetPort: 9300
---
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
  name: traefik-dashboard-route
  namespace: kube-system
spec:
  entryPoints:
    - web
  routes:
    - match: Host(`traefik.test.mall.changan.com`)
      kind: Rule
      services:
        - name: api@internal
          kind: TraefikService
```
绑定域名hosts后 访问http://traefik.test.mall.changan.com/dashboard/#/验证


评论