1、环境准备
IP | HOSTNAME |
k8s1 | |
k8s2 | |
k8s3 |
注意hostname不要用下划线、小数点与字母。
2、环境配置(所有节点)
# stop firewalld
systemctl stop firewalld
systemctl disable firewalld
# disable selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
# disable sawp
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
# config route forward
cat <> /etc/sysctl.d/k8s.conf
# ipv6 config
echo 'net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.d/k8s.conf
echo 'net.bridge.bridge-nf-call-iptables = 1' >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.all.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.default.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.lo.disable_ipv6 = 1" >> /etc/sysctl.d/k8s.conf
echo "net.ipv6.conf.all.forwarding = 1" >> /etc/sysctl.d/k8s.conf
modprobe br_netfilter
sysctl --system
# config ipvs
cat <
3、安装Docker(所有节点)
# 删除系统自带docker组件
yum -y remove docker*
# 安装相关组件
yum -y install yum-utils
# 配置docker安装镜像源
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# 查看可以安装的版本
yum list docker-ce --showduplicates | sort -r
# 这里安装的是版本,需与kubernetes匹配一致
yum install -y docker-ce- docker-ce-cli- containerd.io
# 启动服务
systemctl start docker
systemctl enable docker
# 配置镜像加速, 修改cgroupdriver
vi /etc/docker/daemon.json
{
"registry-mirrors": [
"https://registry.docker-cn.com",
"http://hub-mirror.c..com",
"https://docker.mirrors.ustc.edu.cn"
],
"exec-opts": ["native.cgroupdriver=systemd"]
}
#重启服务
systemctl daemon-reload
systemctl restart docker
4、安装K8S服务(所有节点)
# 卸载旧版本kubenetes
yum remove -y kubelet kubeadm kubectl
# 设置K8S的yum安装源
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 查看可以安装的版本
yum list kubelet --showduplicates | sort -r
# 安装kubelet、kubeadm、kubectl, 这里安装的版本为, 可以支持上面的版本
yum install -y kubelet- kubeadm- kubectl-
# 开机启动kubelet
systemctl enable kubelet
systemctl start kubelet
5、配置Master主节点(主节点)
1) 查看需要的镜像
[root@k8s1 ~]# kubeadm config images list
k8s.gcr.io/kube-apiserver:v1.
k8s.gcr.io/kube-controller-manager:v1.
k8s.gcr.io/kube-scheduler:v1.
k8s.gcr.io/kube-proxy:v1.
k8s.gcr.io/pause:
k8s.gcr.io/etcd:
k8s.gcr.io/coredns/coredns:v1.
2) 下载镜像
# 创建脚本
vi images.sh
# 配置脚本内容
images=(
kube-apiserver:v1.
kube-controller-manager:v1.
kube-scheduler:v1.
kube-proxy:v1.
pause:
etcd:
coredns:v1.
)
for imageName in ${images[@]} ;
do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
# 执行脚本
chmod +x images.sh && ./images.sh
如果需修改镜像标签, 命令(默认情况下不需改,下面初始化命令会指定对应空间名称):
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
3) 主节点执行初始化命令
# 初始化节点的配置信息
kubeadm init \
--apiserver-advertise-address= \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1. \
--service-cidr=/ \
--pod-network-cidr=/
# --ignore-preflight-errors=Swap 参数可以忽略交换内存的提示错误
注意: 如果出现问题, 执行kubeadm reset进行还原。
如果出现初始化报错:
[kubelet-check] The HTTP call equal to 'curl -sSL http://localhost:/healthz' failed with error: Get "http://localhost:/healthz": dial tcp :: connect: connection refused.
执行重置命令, 再修改docker的cgroupdriver配置:
vi /etc/docker/daemon.json
{
"registry-mirrors": [
"https://registry.docker-cn.com",
"http://hub-mirror.c..com",
"https://docker.mirrors.ustc.edu.cn"
],
"exec-opts": ["native.cgroupdriver=systemd"]
}
重启docker:
systemctl daemon-reload
systemctl restart docker
初始化,执行成功后, 会出现如下提示:
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join : --token 6vmxut.lb3hvlxhux5suugx \
--discovery-token-ca-cert-hash sha256:0f2b3e95ecee06bc40eca641548c3ca8afb86ebc2279f3fe2a75960330b0dbd1
4)主节点执行初始化配置
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
kubeadm join : --token 6vmxut.lb3hvlxhux5suugx \
--discovery-token-ca-cert-hash sha256:0f2b3e95ecee06bc40eca641548c3ca8afb86ebc2279f3fe2a75960330b0dbd1
5) 安装helm插件
wget https://get.helm.sh/helm-v3.-linux-amd64.tar.gz
tar -zxvf helm-v3.-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/
执行helm list检查能否正确识别。
6) 安装calico网络插件
下载tigera-operator插件, 地址:
https://github.com/projectcalico/calico/releases
wget https://github.com/projectcalico/calico/releases/download/v3./tigera-operator-v3..tgz
安装calico:
helm install calico tigera-operator-v3..tgz
检查确认相关的pod处于Running状态:
watch kubectl get pods -n calico-system
输出结果:
Every 2.0s: kubectl get pods -n calico-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-7bbdbd789c-jcvhz 1/1 Running 0 62m
calico-node-z6t9g 1/1 Running 0 62m
calico-typha-9d5f49b9f-7xvw5 1/1 Running 0 62m
calico的api资源不建议采用kubectl来管理, 安装calicoctl插件来管理:
wget https://github.com/projectcalico/calico/releases/download/v3./calicoctl-linux-amd64
mv calicoctl-linux-amd64 kubectl-calico
chmod +x kubectl-calico
验证插件是否正常:
kubectl calico -h
7) 验证k8s的dns
运行curl 容器
kubectl run curl --image=radial/busyboxplus:curl -it
查看curl状态, kubectl describe pod curl 如果出现错误不能调度:
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 38m default-scheduler 0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.
Warning FailedScheduling 23m (x14 over 37m) default-scheduler 0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.
将master该为可调度:
kubectl taint nodes --all node-role.kubernetes.io/master-
进入curl容器:
kubectl exec -it curl /bin/sh
进行解析,输出一下结果,确认正常:
[ root@curl:/ ]$ nslookup kubernetes.default
Server:
Address 1: kube-dns.kube-system.svc.cluster.local
Name: kubernetes.default
Address 1: kubernetes.default.svc.cluster.local
6、部署一个Nginx应用服务
kubectl create deployment my-nginx --image=nginx
查看IP地址:
[root@k8s1 bin]# kubectl get pod -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
curl 1/1 Running 0 121m k8s1
my-nginx-c54945c55-lhmlt 1/1 Running 0 8m27s k8s3
tigera-operator-56d4765449-tw7bv 1/1 Running 1 (136m ago) 140m k8s1
进行访问:
[root@k8s1 bin]# curl
Welcome to nginx!
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
nginx.org.
Commercial support is available at
nginx.com.
Thank you for using nginx.
应用服务部署成功, 可以正常访问。
选择对应的calico版本, 确定能够支持所安装kubernetes
查看地址:
https://projectcalico.docs.tigera.io/archive/v3./getting-started/kubernetes/requirements
执行命令:
kubectl apply -f https://projectcalico.docs.tigera.io/archive/v3./manifests/tigera-operator.yaml
kubectl apply -f https://projectcalico.docs.tigera.io/archive/v3./manifests/custom-resources.yaml
7、部署Dashboard管理后台
下载配置文件:
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2./aio/deploy/recommended.yaml
对外暴露, 修改recommended.yaml文件:
增加type: NodePort
...
spec:
ports:
- port:
targetPort:
type: NodePort
...
执行: kubectl get svc -A 查看dashboard的对外访问端口:
chrome浏览器进行访问:
如果不能访问, 点击页面任意空白位置,盲敲:thisisunsafe 确认即可访问。
配置dashboard-config.yaml:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kubernetes-dashboard
namespace: kube-system
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
tls:
- hosts:
- k8s.example.com
secretName: example-com-tls-secret
rules:
- host: k8s.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kubernetes-dashboard
port:
number:
创建管理员:
kubectl create serviceaccount kube-dashboard-admin-sa -n kube-system
kubectl create clusterrolebinding kube-dashboard-admin-sa \
--clusterrole=cluster-admin --serviceaccount=kube-system:kube-dashboard-admin-sa
获取登录token:
[root@k8s1 ~]# kubectl -n kube-system get secret | grep kube-dashboard-admin-sa-token
kube-dashboard-admin-sa-token-jtfxk kubernetes.io/service-account-token 3 78s
执行describe指令获取
[root@k8s1 ~]# kubectl describe -n kube-system secret/kube-dashboard-admin-sa-token-jtfxk
Name: kube-dashboard-admin-sa-token-jtfxk
Namespace: kube-system
Labels:
Annotations: kubernetes.io/service-account.name: kube-dashboard-admin-sa
kubernetes.io/service-account.uid: 85dabe0c-dd93-457e-b077-6c732e72a009
Type: kubernetes.io/service-account-token
Data
====
ca.crt: bytes
namespace: bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IlBJV1FfTDN2ZWNIUTBMWVU2NGJ0WnRTVzF6QVNjZXlNWDNuY1o4S3B0V2MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlLWRhc2hib2FyZC1hZG1pbi1zYS10b2tlbi1qdGZ4ayIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlLWRhc2hib2FyZC1hZG1pbi1zYSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6Ijg1ZGFiZTBjLWRkOTMtNDU3ZS1iMDc3LTZjNzMyZTcyYTAwOSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTprdWJlLWRhc2hib2FyZC1hZG1pbi1zYSJ9.YPke1H3fdH_Vmw2980e-Kn2yRWklcvOt3o9ryfedmD5SLR_lkkUZb996SwZPb0mxReZi7Gjws5JdDYKnskIgvTTp8encsQ2UpLiC0myyzPUg6KP_3IHiTJ52n40mFNaZ7BzdyyYizatDWB89LruE2QrhEXdgOFxe-Z1GvzMdUpeAzrhV_a_bfE5iCkWmiw1jmaVba3X_MLiDoVPdUsQRovk6oZCNAzs9ElS0Hvb-vt4Ye6zI68Z0q3An36QFRk1CIE2RZfysq92QRSKgvRf8SgKN1UqyGFr9ICQcZTeiL0wNFGk04t6Z83RTK5n0BzojTrZwT-r0OGJb5coBJIkLPA
通过token进行登录,可以看到, 能够正确显示集群信息: