1. 安装环境准备
1.1 配置IP
vi /etc/sysconfig/network-scripts/ifcfg-enp0s3
## 配置如下
TYPE="Ethernet"
PROXY_METHOD="none"
BROWSER_ONLY="no"
BOOTPROTO="static"
DEFROUTE="yes"
IPADDR=192.168.19.170
NETMASK=255.255.255.0
GATEWAY=192.168.19.1
IPV4_FAILURE_FATAL="no"
IPV6INIT="yes"
IPV6_AUTOCONF="yes"
IPV6_DEFROUTE="yes"
IPV6_FAILURE_FATAL="no"
IPV6_ADDR_GEN_MODE="stable-privacy"
NAME="enp0s3"
DEVICE="enp0s3"
ONBOOT="yes"
1.2 配置dns
vi /etc/resolv.conf
## 配置如下
nameserver 8.8.8.8
nameserver 114.114.114.114
1.3 设置时区
timedatectl set-timezone Asia/Shanghai
1.4 关闭seliux以及firewalld
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
systemctl disable firewalld
systemctl stop firewalld
1.5 配置yum源
添加repo文件
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
repo文件再追加如下内容:
[k8s]
name=k8s
failovermethod=priority
baseurl=http://mirrors.aliyun.com/centos/$releasever/virt/$basearch/kubernetes110/
http://mirrors.aliyuncs.com/centos/$releasever/virt/$basearch/kubernetes110/
gpgcheck=0
设置kubeadmin的yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
更新yum缓存
sudo yum makecache fast
1.6 更新操作系统
此步为可选步骤
yum update
1.7 安装必要的基础工具
yum install -y wget vim yum-utils device-mapper-persistent-data lvm2 tcpdump
1.8 安装必要的网络组件
yum install -y conntrack ipvsadm ipset jq sysstat curl iptables libseccomp socat
1.9 添加docker的yum源:
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
1.10 开启IPVS
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip\_vs
modprobe -- ip\_vs\_rr
modprobe -- ip\_vs\_wrr
modprobe -- ip\_vs\_sh
modprobe -- nf\_conntrack\_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip\_vs -e nf\_conntrack\_ipv4
1.11 开启br_netfilter
modprobe br_netfilter
1.12 开启bridge和ipv4.ip_forward
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
/usr/lib/sysctl.d/00-system.conf文件中也有bridge-nf-call参数,需要同时修改
vi /usr/lib/sysctl.d/00-system.conf
## 修改以下参数
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-arptables = 1
sysctl --system
1.13 关闭swap
swapoff -a
#再编辑 /etc/fstab 文件把swap一行注释 (vi /etc/fstab #swap)
1.14 安装docker-ce
sudo yum -y install docker-ce
配置cgroupfs
cat << EOF > /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=cgroupfs"],
"registry-mirrors": ["https://registry.docker-cn.com"]
}
EOF
启动docker,并设置为开机自启
systemctl start docker && systemctl enable docker
1.15 安装kubelet组件
#如果是1.17安装命令
yum install kubeadm-1.17.0-0.x86_64 kubectl-1.17.0-0.x86_64 kubelet-1.17.0-0.x86_64 kubernetes-cni-0.7.5-0.x86_64 --disableexcludes=kubernetes
# 如果是1.18.2的安装命令
yum install kubeadm-1.18.2-0.x86_64 kubectl-1.18.2-0.x86_64 kubelet-1.18.2-0.x86_64 kubernetes-cni-0.7.5-0.x86_64 --disableexcludes=kubernetes
# 启动命令
systemctl enable --now kubelet
2. 初始化master1主节点
2.1 编辑ipvs模式安装的初始化配置文件(如果用iptables模式安装,则不需要配置这个文件)
文件可以通过"kubeadm config print init-defaults --component-configs KubeletConfiguration,KubeProxyConfiguration > kube-init.yaml"命令生成,再修改里面几个参数即可。涉及修改的地方如下图:
同时修改imageRepository的配置为registry.aliyuncs.com/google_containers
:
imageRepository: registry.aliyuncs.com/google_containers
修改后的最终文件参考如下:
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.19.170
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: ipvs-master1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
certSANs:
- 192.168.19.170
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.18.2
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
clientConnection:
acceptContentTypes: ""
burst: 0
contentType: ""
kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
qps: 0
clusterCIDR: ""
clusterCIDR: 10.244.0.0/16
configSyncPeriod: 0s
conntrack:
maxPerCore: null
min: null
tcpCloseWaitTimeout: null
tcpEstablishedTimeout: null
enableProfiling: false
healthzBindAddress: ""
hostnameOverride: ""
iptables:
masqueradeAll: false
masqueradeBit: null
minSyncPeriod: 0s
syncPeriod: 0s
ipvs:
excludeCIDRs: null
minSyncPeriod: 0s
scheduler: ""
strictARP: false
syncPeriod: 0s
kind: KubeProxyConfiguration
metricsBindAddress: ""
mode: ""
nodePortAddresses: null
oomScoreAdj: null
portRange: ""
udpIdleTimeout: 0s
winkernel:
enableDSR: false
networkName: ""
sourceVip: ""
2.2 kubeadm init主节点,在master1节点执行
如果以ipvs模式安装,则执行:
kubeadm init --config kube-init.yaml
2.3 配置kubectl客户端
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
2.4 kubectl自动提示
yum install bash-completion
source /usr/share/bash-completion/bash_completion
kubectl completion bash >/etc/bash_completion.d/kubectl
2.5 检查所有的pod状态
检查所有的pod状态(kubedns也依赖于容器网络,此时pending是正常的)
kubectl get pod --all-namespaces
4. 安装网络插件(1.17.0安装flannel网络插件出现service跨节点网络不通问题,安装calico网络插件正常)
4.1 如果安装calico网络插件
4.1.1 下载并配置文件
curl -O https://docs.projectcalico.org/v3.8/manifests/calico.yaml
#或
curl -O https://docs.projectcalico.org/v3.11/manifests/calico.yaml
#再编辑配置文件
vi calico.yaml
# 修改CALICO_IPV4POOL_CIDR为10.244.0.0/16
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
# 你之前下载的calico镜像是什么版本,就把calico.yaml中的镜像改成对应的版本
4.1.2 执行安装
kubectl create -f calico.yaml
5. 去除master污点
去除后pod才能安装到master节点
kubectl taint nodes --all node-role.kubernetes.io/master-
6. 修改apiserver配置的basic认证
6.1 修改basic-auth.csv
vi /etc/kubernetes/pki/basic-auth.csv
#增加如下内容
admin,admin,1
6.2 修改kube-apiserver.yaml
vi /etc/kubernetes/manifests/kube-apiserver.yaml
#新增以下内容
- --service-node-port-range=8000-40000
- --insecure-port=8080
- --insecure-bind-address=0.0.0.0
- --basic-auth-file=/etc/kubernetes/pki/basic-auth.csv
#删除以下内容不然前面设置的8080端口不能用
- --insecure-port=0
6.3. 配置admin用的访问权限
vim java-admin-rbac.yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin-javaclient
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- '*'
- nonResourceURLs:
- '*'
verbs:
- '*'
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: admin-javaclient
subjects:
- kind: User
name: admin
namespace: default
roleRef:
kind: ClusterRole
name: admin-javaclient
apiGroup: rbac.authorization.k8s.io
配置生效:
kubectl create -f java-admin-rbac.yaml
7. 安装验证
安装完成后可以创建个nginx的deployment和service来验证
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.12.2
ports:
- containerPort: 80
---
kind: Service
apiVersion: v1
metadata:
name: nginx
spec:
selector:
app: nginx
ports:
- protocol: TCP
port: 1080
targetPort: 80
然后找到service的ip,通过"curl http://service的ip:1080/"测试服务是否可用。
8. 安装harbor
8.1 安装docker-compose-Linux-x86_64
cp docker-compose-Linux-x86_64 /usr/bin/docker-compose
8.2 解压harbor-offline
解压/mnt/soft/harbor-offline-installer-v2.1.1.tgz
8.3 配置harbor.yml
# 配置主机名
hostname: 192.168.0.42
# 配置http端口
http:
port: 8888
# 注释掉所有https下的配置
#https:
# port: 443
# certificate: /your/certificate/path
# private_key: /your/private/key/path
# 配置data路径
data_volume: /harbor_data
8.4 安装harbor
./install.sh
8.5 常用命令
## 后台启动
docker-compose up -d
## 查看状态
docker-compose ps
## 停止
docker-compose stop