Kubernetes V1.10 集群搭建

Kubernetes V1.10 集群文档

1.服务器信息: (CentOS-7-x86_64-Minimal-1804) (keepalived 浮动IP:192.168.0.10)
2.lab1, lab2, lab3 为控制节点 lab4, lab5 lab6 为计算节点

主机名 IP 备注
lab1 192.168.0.11 master, etcd, keepalived, kubeadm, kubectl, kubelet, docker-ce, cfssl
lab2 192.168.0.22 master, etcd, keepalived, kubeadm, kubectl, kubelet, docker-ce
lab3 192.168.0.33 master, etcd, keepalived, kubeadm, kubectl, kubelet, docker-ce
lab4 192.168.0.44 node, kubeadm, kubectl, kubelet, docker-ce
lab5 192.168.0.55 node, kubeadm, kubectl, kubelet, docker-ce
lab6 192.168.0.66 node, kubeadm, kubectl, kubelet, docker-ce

以下所有的命令均在 lab1 执行(注意:需要配置好lab1 ssh连接其他节点需免密钥登陆)

初始化配置变量

cat <<'EOF' >./list
192.168.0.11  lab1
192.168.0.22  lab2
192.168.0.33  lab3
192.168.0.44  lab4
192.168.0.55  lab5
192.168.0.66  lab6
EOF

# keepalived 浮动IP
export vip=192.168.0.10

while read LINE; do
    IP=$(echo $LINE | awk '{print $1}')
    HOST=$(echo $LINE | awk '{print $2}')
    export $(echo $HOST="$IP")
done < ./list

配置ssh免密码登录

# 远程root密码
passwd='redhat'

# hosts
if [ -z "$(grep '# kubernetes' /etc/hosts)" ];then
    echo '# kubernetes' >>/etc/hosts
    cat ./list >>/etc/hosts
fi

# 生成密钥
[ -f ~/.ssh/id_dsa ] || { ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa; }

# 安装 expect
[ -f /usr/bin/expect ] || { yum -y install expect; }

# 取出 list 中的每行
while read -r LINE; do
    HOST=$(echo $LINE | awk '{print $2}')
    # 添加公钥到远程主机
    /usr/bin/expect <<EOF
    set timeout 30
    spawn ssh-copy-id -i /root/.ssh/id_dsa.pub root@$HOST
    expect {
        "(yes/no)?" { send "yes\r"; exp_continue }
        "password:" { send "$passwd\r" }
    }
    expect eof
EOF
done < ./list

初始化环境

while read LINE; do
    HOST=$(echo $LINE | awk '{print $2}')
    echo "--------------- $HOST ---------------"
    scp /etc/hosts $HOST:/etc/hosts
    ssh -T $HOST <<EOF
        # 配置主机名
        hostnamectl set-hostname $HOST

        # 配置阿里yum源
        if [ ! -f "/etc/yum.repos.d/Centos-7.repo" ];then
            yum -y install epel-release >/dev/null 2>&1
            rm -f /etc/yum.repos.d/*
            curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
            curl -o /etc/yum.repos.d/epel-7.repo http://mirrors.aliyun.com/repo/epel-7.repo
            sed -i '/aliyuncs.com/d' /etc/yum.repos.d/*.repo
            yum makecache >/dev/null 2>&1
        fi

        # Selinux, Firewalld
        systemctl stop firewalld
        systemctl disable firewalld
        firewall-cmd --state
        setenforce 0
        sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
        grep --color=auto '^SELINUX' /etc/selinux/config

        # 常用工具
        yum -y install wget vim ntpdate net-tools tree lrzsz lsof >/dev/null 2>&1 && echo "软件安装完成"

        # 时间同步
        ntpdate ntp.aliyun.com && hwclock -w >/dev/null 2>&1 && echo "时间同步成功"
        echo "*/20 * * * * /usr/sbin/ntpdate pool.ntp.org >/dev/null 2>&1 && /usr/sbin/hwclock -w" >/tmp/crontab
        crontab /tmp/crontab

        # 关闭 Swap
        swapoff -a 
        sed -i 's/.*swap.*/#&/' /etc/fstab
        sysctl -w vm.swappiness=0

        # 内核参数优化
        echo 'net.ipv4.ip_forward = 1' > /etc/sysctl.d/k8s.conf
        echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.d/k8s.conf
        echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.d/k8s.conf
        echo 'vm.swappiness=0' >> /etc/sysctl.d/k8s.conf
        sysctl -p /etc/sysctl.d/k8s.conf >/dev/null 2>&1

        echo '# myset' >> /etc/security/limits.conf
        echo '* soft nofile 65536' >> /etc/security/limits.conf
        echo '* hard nofile 65536' >> /etc/security/limits.conf
        echo '* soft nproc 65536' >> /etc/security/limits.conf
        echo '* hard nproc 65536' >> /etc/security/limits.conf
        echo '* soft  memlock  unlimited' >> /etc/security/limits.conf
        echo '* hard memlock  unlimited' >> /etc/security/limits.conf
EOF
done < ./list

安装/配置 keepalived

浮动IP优先级依次为 lab1 > lab2 > lab3 修改配置文件 priority 值可调整优先级

lab1

ssh lab1 <<EEOOFF
# install keepalived
yum install -y keepalived

# lab1 keepalived.conf
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://$vip:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens33
    virtual_router_id 61
    priority 100
    advert_int 1
    mcast_src_ip $lab1
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        $lab2
        $lab3
    }
    virtual_ipaddress {
        $vip/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF
systemctl enable keepalived
EEOOFF

lab2

ssh lab2 <<EEOOFF
# install keepalived
yum install -y keepalived

# lab2 keepalived.conf
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://$vip:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 61
    priority 90
    advert_int 1
    mcast_src_ip $lab2
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        $lab1
        $lab3
    }
    virtual_ipaddress {
        $vip/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF
systemctl enable keepalived
EEOOFF

lab3

ssh lab3 <<EEOOFF
# install keepalived
yum install -y keepalived

# lab1 keepalived.conf
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
   router_id LVS_k8s
}

global_defs {
   router_id LVS_k8s
}

vrrp_script CheckK8sMaster {
    script "curl -k https://$vip:6443"
    interval 3
    timeout 9
    fall 2
    rise 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens33
    virtual_router_id 61
    priority 80
    advert_int 1
    mcast_src_ip $lab3
    nopreempt
    authentication {
        auth_type PASS
        auth_pass sqP05dQgMSlzrxHj
    }
    unicast_peer {
        $lab1
        $lab2
    }
    virtual_ipaddress {
        $vip/24
    }
    track_script {
        CheckK8sMaster
    }

}
EOF
systemctl enable keepalived
EEOOFF

查看状态

for NODE in lab1 lab2 lab3; do
    echo "---------- $NODE ----------"
    ssh $NODE "systemctl restart keepalived; systemctl status keepalived"
done

生成证书文件

安装证书生成工具

wget -c https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget -c https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
wget -c https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -O /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo

生成证书文件

# 创建临时目录
mkdir /root/ssl && cd /root/ssl

# 创建证书策略
cat <<EOF >ca-config.json 
{
"signing": {
"default": {
  "expiry": "8760h"
},
"profiles": {
  "kubernetes-Soulmate": {
    "usages": [
        "signing",
        "key encipherment",
        "server auth",
        "client auth"
    ],
    "expiry": "8760h"
  }
}
}
}
EOF

# 创建证书区域加密方式信息
cat <<EOF >ca-csr.json 
{
"CN": "kubernetes-Soulmate",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
  "C": "CN",
  "ST": "shanghai",
  "L": "shanghai",
  "O": "k8s",
  "OU": "System"
}
]
}
EOF

# 生成证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

# 创建 Etcd 证书生成信息
cat <<EOF >etcd-csr.json 
{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "$lab1",
    "$lab2",
    "$lab3",
    "$vip"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "shanghai",
      "L": "shanghai",
      "O": "k8s",
      "OU": "System"
    }
  ]
}
EOF

# 生成etcd 证书
cfssl gencert -ca=ca.pem \
    -ca-key=ca-key.pem \
    -config=ca-config.json \
    -profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd

# 回到家目录
cd

同步证书文件

for NODE in lab1 lab2 lab3; do
    echo "---------- SCP Etcd certificate -=> $NODE ----------"
    ssh $NODE "mkdir -p /etc/etcd/ssl/"
    scp /root/ssl/{etcd.pem,etcd-key.pem,ca.pem} $NODE:/etc/etcd/ssl/
done

安装配置Etcd

lab1

ssh lab1 <<EEOOFF
yum -y install etcd
mkdir -p /var/lib/etcd
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name lab1 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://$lab1:2380 \
  --listen-peer-urls https://$lab1:2380 \
  --listen-client-urls https://$lab1:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://$lab1:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster lab1=https://$lab1:2380,lab2=https://$lab2:2380,lab3=https://$lab3:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable etcd
EEOOFF

lab2

ssh lab2 <<EEOOFF
yum -y install etcd
mkdir -p /var/lib/etcd
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name lab2 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://$lab2:2380 \
  --listen-peer-urls https://$lab2:2380 \
  --listen-client-urls https://$lab2:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://$lab2:2379 \
  --initial-cluster-token etcd-cluster-0 \
  --initial-cluster lab1=https://$lab1:2380,lab2=https://$lab2:2380,lab3=https://$lab3:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable etcd
EEOOFF

lab3

ssh lab3 <<EEOOFF
yum -y install etcd
mkdir -p /var/lib/etcd
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd \
  --name lab3 \
  --cert-file=/etc/etcd/ssl/etcd.pem \
  --key-file=/etc/etcd/ssl/etcd-key.pem \
  --peer-cert-file=/etc/etcd/ssl/etcd.pem \
  --peer-key-file=/etc/etcd/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem \
  --initial-advertise-peer-urls https://$lab3:2380 \
  --listen-peer-urls https://$lab3:2380 \
  --listen-client-urls https://$lab3:2379,http://127.0.0.1:2379 \
  --advertise-client-urls https://$lab3:2379 \
  --initial-cluster-token etcd-cluster-0 \
--initial-cluster lab1=https://$lab1:2380,lab2=https://$lab2:2380,lab3=https://$lab3:2380 \
  --initial-cluster-state new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF
    systemctl daemon-reload
    systemctl enable etcd
EEOOFF

启动服务

for NODE in lab1 lab2 lab3; do
    {
    ssh $NODE <<EOF
        systemctl start etcd
EOF
 } &
done

# 查看状态
etcdctl --endpoints=https://$lab1:2379,https://$lab2:2379,https://$lab3:2379 \
    --ca-file=/etc/etcd/ssl/ca.pem \
    --cert-file=/etc/etcd/ssl/etcd.pem \
    --key-file=/etc/etcd/ssl/etcd-key.pem  cluster-health

安装/配置 Docker

while read LINE; do
    HOST=$(echo $LINE | awk '{print $2}')
    echo "--------------- $HOST ---------------"
    ssh -T $HOST <<EOF
    # Docker install
    yum -y install yum-utils device-mapper-persistent-data lvm2
    yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    yum makecache fast
    yum -y install docker-ce

    # 配置Docker 加速
    mkdir /etc/docker
    if [ ! -f "/etc/docker/daemon.json" ];then
        echo '{' >/etc/docker/daemon.json
        echo '    "registry-mirrors": ["http://3272dd08.m.daocloud.io"]' >>/etc/docker/daemon.json
        echo '}' >>/etc/docker/daemon.json
    fi

    # 启动服务
    systemctl start docker
    systemctl enable docker

    # 验证
    docker info
    docker -v
EOF
done < ./list

安装/配置 kubelet kubeadm kubectl

while read LINE; do
    IP=$(echo $LINE | awk '{print $1}')
    NODE=$(echo $LINE | awk '{print $2}')
    ssh $NODE <<EOF
        # 配置 kubernetes 安装源
        echo '[kubernetes]' > /etc/yum.repos.d/kubernetes.repo
        echo 'name=Kubernetes' >> /etc/yum.repos.d/kubernetes.repo
        echo 'baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64' >> /etc/yum.repos.d/kubernetes.repo
        echo 'enabled=1' >> /etc/yum.repos.d/kubernetes.repo
        echo 'gpgcheck=1' >> /etc/yum.repos.d/kubernetes.repo
        echo 'repo_gpgcheck=1' >> /etc/yum.repos.d/kubernetes.repo
        echo 'gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg' >> /etc/yum.repos.d/kubernetes.repo

        # 安装
        # yum install -y kubelet kubeadm kubectl
        yum -y install kubelet-1.10.1-0.x86_64 kubeadm-1.10.1-0.x86_64 kubectl-1.10.1-0.x86_64 ipvsadm
        systemctl enable kubelet
        # 配置启动kubelet
        DOCKER_CGROUPS=$(docker info | grep 'Cgroup' | cut -d' ' -f3)
        echo 'KUBELET_EXTRA_ARGS="--cgroup-driver=$DOCKER_CGROUPS --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1"' > /etc/sysconfig/kubelet

        # 更换 kubelet 驱动
        sed -i 's/driver=systemd/driver=cgroupfs/' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

        # 配置kubelet使用国内可用镜像
        # 修改/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
        # sed -i '/ExecStart=$/i Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image=registry.cn-shanghai.aliyuncs.com/gcr-k8s/pause-amd64:3.0"'  /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
        sed -i '/ExecStart=$/i Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0"'  /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

        # 重新载入配置 启动服务
        systemctl daemon-reload

        #  命令补全
        yum install -y bash-completion
        source /usr/share/bash-completion/bash_completion
        source <(kubectl completion bash)
        echo "source <(kubectl completion bash)" >> ~/.bashrc
EOF
done < ./list

初始化集群

生成配置文件

token=$(kubeadm token generate)

cat <<EOF > config.yaml 
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
etcd:
  endpoints:
  - https://$lab1:2379
  - https://$lab2:2379
  - https://$lab3:2379
  caFile: /etc/etcd/ssl/ca.pem
  certFile: /etc/etcd/ssl/etcd.pem
  keyFile: /etc/etcd/ssl/etcd-key.pem
  dataDir: /var/lib/etcd
networking:
  podSubnet: 10.244.0.0/16
kubernetesVersion: 1.10.0
api:
  advertiseAddress: "$vip"
token: "$token"
tokenTTL: "0"
apiServerCertSANs:
  - lab1
  - lab2
  - lab3
  - $lab1
  - $lab2
  - $lab3
  - $lab4
  - $vip
featureGates:
  CoreDNS: true
imageRepository: "registry.cn-hangzhou.aliyuncs.com/k8sth"
EOF

初始化 lab1

# kubeadm reset
kubeadm init --config config.yaml | tee add.node.log

# 配置访问权限
mkdir -p $HOME/.kube
/usr/bin/cp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

# coredns 访问异常是因为 kube-flannel 镜像未启动
kubectl get pods --all-namespaces

部署flannel 网络

# 下载 kube-flannel.yml
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

# 修改镜像源
# image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64
sed -i 's#image:.*#image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64#' kube-flannel.yml

# 创建 flannel
kubectl create -f  kube-flannel.yml

# 查看状态
kubectl get pods --all-namespaces -o wide

部署 dashboard

# 下载 kubernetes-dashboard.yaml
wget https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml

# 更换镜像源
sed -i 's#image:.*#image: registry.cn-hangzhou.aliyuncs.com/k8sth/kubernetes-dashboard-amd64:v1.8.3#' kubernetes-dashboard.yaml

# 添加暴露端口 30000
sed -i "162a\  type: NodePort" kubernetes-dashboard.yaml
sed -i "166a\      nodePort: 30000" kubernetes-dashboard.yaml

# 创建 dashboard
kubectl create -f kubernetes-dashboard.yaml

# 查看状态
kubectl get pods --all-namespaces -o wide
kubectl -n kube-system get po,svc -l k8s-app=kubernetes-dashboard

# 建立一个 service account 来绑定 cluster-admin role
kubectl -n kube-system create sa dashboard
kubectl create clusterrolebinding dashboard --clusterrole cluster-admin --serviceaccount=kube-system:dashboard

# 获取浏览器登录令牌
# kubectl -n kube-system describe secrets $(kubectl -n kube-system get sa dashboard -o yaml | awk '/dashboard-token/ {print $3}')  | awk '/token:/{print $2}'
SECRET=$(kubectl -n kube-system get sa dashboard -o yaml | awk '/dashboard-token/ {print $3}')
kubectl -n kube-system describe secrets ${SECRET} | awk '/token:/{print $2}' | tee browser-token.log

# 浏览器登录  token
echo https://$vip:30000/#'!'/login
echo '#######################################################################'
cat browser-token.log
echo '#######################################################################'

初始化 lab2 lab3

# 证书密码配置文件分发到lab2 lab3
scp -r /etc/kubernetes/pki  lab2:/etc/kubernetes/
scp -r /etc/kubernetes/pki  lab3:/etc/kubernetes/
scp /root/config.yaml lab2:/root
scp /root/config.yaml lab3:/root

# lab2 lab3 初始化
for NODE in lab2 lab3; do
    ssh $NODE <<EOF
        kubeadm init --config config.yaml
        mkdir -p $HOME/.kube
        /usr/bin/cp /etc/kubernetes/admin.conf $HOME/.kube/config
        chown $(id -u):$(id -g) $HOME/.kube/config
EOF
done

# 查看状态
kubectl get svc --namespace kube-system
kubectl get pods --all-namespaces -o wide

# 查看所有节点状态
kubectl get nodes
kubectl get nodes -o wide

测试安装 heapster

# 下载部署文件
wget -c https://github.com/kubernetes/heapster/archive/v1.5.3.tar.gz -O heapster-1.5.3.tar.gz
tar xvf heapster-1.5.3.tar.gz -C /tmp
mkdir kube-heapster
/usr/bin/cp -r /tmp/heapster-1.5.3/deploy/kube-config/{influxdb,rbac} kube-heapster/

# 替换镜像源
sed -i "s#image:.*#image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-influxdb-amd64:v1.3.3#" kube-heapster/influxdb/influxdb.yaml
sed -i "s#image:.*#image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-grafana-amd64:v4.4.3#" kube-heapster/influxdb/grafana.yaml
sed -i "s#image:.*#image: registry.cn-hangzhou.aliyuncs.com/k8sth/heapster-amd64:v1.4.2#" kube-heapster/influxdb/heapster.yaml

# 添加暴露端口 30001
sed -i "s/# type: NodePort/type: NodePort/"  kube-heapster/influxdb/grafana.yaml
sed -i "70a\    nodePort: 30001"   kube-heapster/influxdb/grafana.yaml

# 部署 influxdb rbac grafana
kubectl create -f kube-heapster/influxdb/
kubectl create -f kube-heapster/rbac/

# 查看容器状态
kubectl get pods --all-namespaces -o wide

# 查看所有节点状态
kubectl get nodes -o wide

# 删除
# kubectl delete -f kube-heapster/influxdb/
# kubectl delete -f kube-heapster/rbac/

# 浏览器打开
echo http://192.168.150.181:3001/#'!'/login
echo 'USER: admin  PPASSWD: admin'

添加计算节点到集群 lab4 lab5 lab6

export sha=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //')
export token=$(kubeadm token list | awk '{print $1}' | head -n2 | tail -n1)
for NODE in lab4 lab5 lab6; do
    ssh $NODE "kubeadm join 192.168.0.10:6443 --token $token --discovery-token-ca-cert-hash sha256:$sha"
done

kubeadm join 重新生成(过期)

# 查看 token
kubeadm token list

# 创建 token
token=$(kubeadm token create)

# 获取证书 sha256值
sha=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //')

# New kubeadm join
# token=$(kubeadm token list | awk '{print $1}' | head -n2 | tail -n1)
echo kubeadm join 192.168.0.10:6443 --token $token --discovery-token-ca-cert-hash sha256:$sha