Skip to content

1、基本环境

systemctl stop firewalld && systemctl disable firewalld
sed -i 's/enforcing/disabled/g' /etc/selinux/config; setenforce 0

2、安装docker

安装必要的一些系统工具

sudo yum install -y yum-utils device-mapper-persistent-data lvm2

添加软件源信息

sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

更新并安装 Docker-CE

sudo yum makecache fast
sudo yum -y install docker-ce
或
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun

可选 #添加当前用户到docker组中允许普通用户运行docker命令,此步需要登出linux环境重新再登录方能生效

sudo usermod -aG docker $USER

添加开机启动

sudo systemctl enable docker.service

启动docker

sudo systemctl start docker.service

查看docker版本

docker version

修改配置

#修改docker启动方式为system启动
#修改docker数据路径
#增加docker加速站为清华大学加速站
vim /etc/docker/daemon.json
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
       "exec-opts": ["native.cgroupdriver=systemd"],
          "graph" : "/data/docker"
}
systemctl daemon-reload
systemctl restart docker

验证docker环境是否能运行镜像

docker run hello-world

3、安装k8s

添加kubernetes源

#arm架构
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-aarch64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF

#x86架构
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

加载源

执行yum clean all清除原有yum缓存。
执行yum makecache(刷新缓存)或者yum repolist all(查看所有配置可以使用的文件,会自动刷新缓存)。
#master、node加入etc/hosts
vim /etc/hosts

{ip1} master 
{ip2} node1 

安装kubelet kubeadm kubectl

#yum install -y kubelet kubeadm kubectl
#k8s 1.24之后有调整,建议1.24之前版本,
yum --showduplicates list kubelet
#yum install -y kubelet-1.23.1-0 kubelet-1.23.1-0 kubeadm-1.23.1-0

配置内核参数

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl --system
sysctl -w net.ipv4.ip_forward=1
swapoff -a   #关闭swap
sed -ri 's/.*swap.*/#&/' /etc/fstab  #永久关闭swap

初始化master节点

systemctl enable kubelet.service
systemctl start kubelet.service
kubeadm init --kubernetes-version=v1.23.1 --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16

–pod-network-cidr:选择一个 Pod 网络插件,并检查是否在 kubeadm 初始化过程中需要传入什么参数。这个取决于您选择的网络插件,您可能需要设置 --Pod-network-cidr 来指定网络驱动的CIDR。Kubernetes 支持多种网络方案,而且不同网络方案对 --pod-network-cidr有自己的要求,flannel设置为 10.244.0.0/16,calico设置为192.168.0.0/16

–image-repository:Kubenetes默认Registries地址是k8s.gcr.io,国内无法访问,在1.13版本后可以增加–image-repository参数,将其指定为可访问的镜像地址,这里使用registry.aliyuncs.com/google_containers。

配置 kubectl

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#node加入master集群命令
kubeadm token create --print-join-command
#举例
kubeadm join 192.168.0.104:6443 --token 4ual51.gbgeu5r5ujfj17mr \
    --discovery-token-ca-cert-hash sha256:14c2a981f770ec06c8ee68cb00b15ac0a799b89d1ddbca33b98da5e8d199b5c9

calico.yaml 【附件】calico.yaml

安装flannel网络插件

#下面两个命令二选一
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=v1.19.3

kubectl apply -f  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

【附件】flannel.yaml

允许master节点调度

kubectl taint nodes --all node-role.kubernetes.io/master-

查看master节点状态为Ready

#可能容器才创建,创建到运行需要时间,建议上述命令执行后,过几秒后查
kubectl get nodes
kubectl -n kube-system get pods

至此,Kubernetes 的 Master 节点就部署完成了。如果只需要一个单节点的 Kubernetes,现在你就可以使用了。部署参考:https://blog.csdn.net/networken/article/details/84991940

#k8s命令补全
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
#解决node节点加入集群失败,Node节点出现这个报错:The connection to the server localhost:8080 was refused - did you specify the ri
解决如下:
1、#在Master节点运行下面命令将admin.conf文件拷贝到从节点:
sudo scp /etc/kubernetes/admin.conf root@{node_ip}:~
2、#在Node节点运行下面命令配置环境变量:
export KUBECONFIG=$HOME/admin.conf

4、部署rancher并导入k8s

rancher安装

docker pull rancher/rancher:stable
docker run -d --restart=unless-stopped --name rancher-server -v /data/rancher/:/var/lib/rancher/ -p 8088:80 -p 8443:443 --privileged rancher/rancher:stable

页面访问

设置初始密码

#查密码
docker logs  container-id  2>&1 | grep "Bootstrap Password:"
[root@localhost /]# docker logs  7a808eb955ac  2>&1 | grep "Bootstrap Password:"
2022/03/13 13:28:53 [INFO] Bootstrap Password: qst8b2ld7t5fgnt9bvl644ccfgznln8srkvxfqpcv7bpwr5nbd9h79
[root@localhost /]#

登陆到k8s Master主机

curl --insecure -sfL https://172.16.215.133/v3/import/jtlznlcjmdvp9rtvgk75mzzqxsw8bwfwhld7hwrgbcxt5vvjxc64cp_c-8kxk5.yaml | kubectl apply -f -

一些k8s命令

查看所有pod

kubectl get pods --all-namespaces -o wide

查看某个pod日志

kubectl logs cattle-cluster-agent-65b74b775c-b2q5l -n cattle-system

k8s删除pod方式

# 获取pod

kubectl get pods -n cattle-system

# 删除pod

kubectl delete pod cattle-cluster-agent-6d9b5d97b9-fspbr -n cattle-system

# 获取deployment

kubectl get deployment -n cattle-system

# 删除deployment

kubectl delete deployment cattle-cluster-agent -n cattle-system

5、基于k8s部署gitlab

# 创建命名空间gitlab

kubectl create namespace gitlab

# 编辑gitlab_depoyment.yaml

vim gitlab_delpoyment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: gitlab
  name: gitlab-deployment
spec:
  selector:
    matchLabels:
      app: gitlab
  template:
    metadata:
      labels:
        app: gitlab
    spec:
      containers:
      - name: gitlab
        image: gitlab/gitlab-ce:14.0.7-ce.0 #镜像名称
        ports:
        - containerPort: 80

# 执行

kubectl apply -f gitlab_delpoyment.yaml

部署service

vim gitlab_service.yaml
apiVersion: v1
kind: Service
metadata:
  namespace: gitlab
  name: gitlab-service
spec:
  selector:
    app: gitlab
  type: NodePort
  ports:

  - protocol: TCP
    port: 80
    targetPort: 80
    nodePort: 30880
kubectl apply -f gitlab_service.yaml

6、安装helm

wget https://get.helm.sh/helm-v3.4.0-linux-amd64.tar.gz
tar fx helm-v3.4.0-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin
helm version

7、安装harbor

#安装harbor
kubectl create namespace harbor
wget https://github.com/goharbor/harbor-helm/archive/v1.5.0.tar.gz
tar fx v1.5.0.tar.gz
cd harbor-helm-1.5.0/

#创建证书
mkdir harbor
cd harbor
# 获得证书
openssl req -newkey rsa:4096 -nodes -sha256 -keyout ca.key -x509 -days 3650 -out ca.crt
# 生成证书签名请求
openssl req -newkey rsa:4096 -nodes -sha256 -keyout tls.key -out tls.csr
# 生成证书
openssl x509 -req -days 3650 -in tls.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt

#harbor-harbor-ingress为secret名字
kubectl create secret generic harbor-harbor-ingress --from-file=tls.crt --from-file=tls.key --from-file=ca.crt -n harbor
#查看证书过期时间
openssl x509 -in ca.crt  -noout -dates


kubectl get secrets/harbor-harbor-ingress -n harbor -o jsonpath="{.data.ca\.crt}" | base64 --decode
-----BEGIN CERTIFICATE-----
MIIC9DCCAdygAwIBAgIQS7zjdWHqFMC7RsAd3jYT/DANBgkqhkiG9w0BAQsFADAU
MRIwEAYDVQQDEwloYXJib3ItY2EwHhcNMjAxMDI4MDc1MDAwWhcNMjExMDI4MDc1
MDAwWjAUMRIwEAYDVQQDEwloYXJib3ItY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCuu51lc9wZ2n1cXjn36cyNsUgZmDBJbJ9GMBjNHXa7ctEZQJdo
cMhFr315qxteeeZicNgnmlf00ETz6q9mRSXvfezY82yBdsYz2ZeLLcHfLdl5yu7J
RCSwTOAvOGPUvZIkkCU4L/2WCsfZrHLLMzQTnrT2LVYyzI9QkZMW2biafjfPdAC3
XVY+FPd6jUYpLXb5pEKuRusFTJWSP4Lu6Jw1f+ZwgMhyRWVmDVZlBPtcX8s108Fb
R097n3bvZ4YujM4HYTZ8dEKdj5jwxbNWntMLNYPkK7lgxWuwZJ0tzuWGZQI0qQzg
s9DFnetUeUnbMZ0F65iKaQW1CEBO+UajPyQDAgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwICpDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw
AwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAK6ii5qshCaUednjcvOieHoCu1CsCIURK
D6Zq5RoCdvU0v70s9fK5XohYMjw5InwYEYSTB05H2kbePjk+D4BgRmmg7Svc6Dr0
rsRfFCkG+cQFQ6/lf5flerAGCHGIsxJ2MLSG5P5s81wOPMRtwSF2bhZIcvjchPZE
1ExCQFeRkqgEP1zy3++XW5BL77k8pR0uK2NI16VM6GzT4IR7lEfrlYgjGEPjWma+
ZxL5UprZofuxkfiFzTcEUHqvmwMZo2Iwros5ARPtJikbeHn7BbSMBX2U4HOqHih9
6661XWyZAhdCC2NlPYeF+1Bd7rRLWyH7MU5BDVodvX7WesXmmbkFsg==
-----END CERTIFICATE-----

#创建证书
mkdir -pv /etc/docker/certs.d/core.harbor.domain/

cat <<EOF > /etc/docker/certs.d/core.harbor.domain/ca.crt
-----BEGIN CERTIFICATE-----
MIIC9DCCAdygAwIBAgIQS7zjdWHqFMC7RsAd3jYT/DANBgkqhkiG9w0BAQsFADAU
MRIwEAYDVQQDEwloYXJib3ItY2EwHhcNMjAxMDI4MDc1MDAwWhcNMjExMDI4MDc1
MDAwWjAUMRIwEAYDVQQDEwloYXJib3ItY2EwggEiMA0GCSqGSIb3DQEBAQUAA4IB
DwAwggEKAoIBAQCuu51lc9wZ2n1cXjn36cyNsUgZmDBJbJ9GMBjNHXa7ctEZQJdo
cMhFr315qxteeeZicNgnmlf00ETz6q9mRSXvfezY82yBdsYz2ZeLLcHfLdl5yu7J
RCSwTOAvOGPUvZIkkCU4L/2WCsfZrHLLMzQTnrT2LVYyzI9QkZMW2biafjfPdAC3
XVY+FPd6jUYpLXb5pEKuRusFTJWSP4Lu6Jw1f+ZwgMhyRWVmDVZlBPtcX8s108Fb
R097n3bvZ4YujM4HYTZ8dEKdj5jwxbNWntMLNYPkK7lgxWuwZJ0tzuWGZQI0qQzg
s9DFnetUeUnbMZ0F65iKaQW1CEBO+UajPyQDAgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwICpDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUw
AwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAK6ii5qshCaUednjcvOieHoCu1CsCIURK
D6Zq5RoCdvU0v70s9fK5XohYMjw5InwYEYSTB05H2kbePjk+D4BgRmmg7Svc6Dr0
rsRfFCkG+cQFQ6/lf5flerAGCHGIsxJ2MLSG5P5s81wOPMRtwSF2bhZIcvjchPZE
1ExCQFeRkqgEP1zy3++XW5BL77k8pR0uK2NI16VM6GzT4IR7lEfrlYgjGEPjWma+
ZxL5UprZofuxkfiFzTcEUHqvmwMZo2Iwros5ARPtJikbeHn7BbSMBX2U4HOqHih9
6661XWyZAhdCC2NlPYeF+1Bd7rRLWyH7MU5BDVodvX7WesXmmbkFsg==
-----END CERTIFICATE-----
EOF

#添加6个可持久化卷
yum install nfs-utils -y
systemctl start nfs
chkconfig nfs on
cat <<EOF > /etc/exports
/data/harbor/data01     192.168.0.0/24(rw,no_root_squash)
/data/harbor/data02     192.168.0.0/24(rw,no_root_squash)
/data/harbor/data03     192.168.0.0/24(rw,no_root_squash)
/data/harbor/data04     192.168.0.0/24(rw,no_root_squash)
/data/harbor/data05     192.168.0.0/24(rw,no_root_squash)
/data/harbor/data06     192.168.0.0/24(rw,no_root_squash)
EOF
mkdir -p /data/harbor/data0{1..6}
exportfs -arv
systemctl restart nfs
chmod 777 /data/harbor/*

#脚本创建pv和pvc
#cat <<EOF | kubectl create -f -
for i in {1..6}; do
cat <<EOF > /data/harbor/data0${i}.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: harbor-data0${i}
spec:
  capacity:
    storage: 2Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Delete
  nfs:
    path: /data/harbor/data0${i}
    server: 192.168.0.104 #本地IP地址
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: harbor-data0${i}
  namespace: harbor
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
EOF
done
#修改values.yaml
[root@ecs-50b0 harbor-helm-1.5.0]# diff values.yaml values1.yaml
5c5
<   type: nodePort
---
>   type: ingress
18,19c18,19
<     # tls certificatcore.harbor.domaine is configured in the ingress controller, choose this option
<     certSource: secret
---
>     # tls certificate is configured in the ingress controller, choose this option
>     certSource: auto
28c28
<       secretName: "harbor-harbor-ingress"
---
>       secretName: ""
36c36
<       core: core.harbor.domain
---
>       core: core.harbor.domain
108c108
< externalURL: https://119.13.83.82:30003
---
> externalURL: https://core.harbor.domain
197c197
<       existingClaim: "harbor-data01"
---
>       existingClaim: ""
206c206
<       existingClaim: "harbor-data02"
---
>       existingClaim: ""
212c212
<       existingClaim: "harbor-data03"
---
>       existingClaim: ""
220c220
<       existingClaim: "harbor-data04"
---
>       existingClaim: ""
228c228
<       existingClaim: "harbor-data05"
---
>       existingClaim: ""
234c234
<       existingClaim: "harbor-data06"
---
>       existingClaim: ""


#安装harbor
helm install harbor --debug --namespace harbor .
#卸载harbor
helm uninstall harbor --debug --namespace harbor

8、Jenkins

创建Jenkins-k8s命令空间

kubectl create namespace jenkins-k8s

配置Jenkins的nfs共享存储目录

mkdir -p /data/jenkins
vim /etc/exports
/data/jenkins   *(rw,no_root_squash)
exportfs -arv
systemctl restart nfs

创建pv和pvc的yaml文件

mkdir jenkins && cd jenkins
cat <<EOF > jenkins-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: jenkins-k8s-pv
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteMany
  nfs:
    server: 192.168.0.104
    path: /data/jenkins
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: jenkins-k8s-pvc
  namespace: jenkins-k8s
spec:
  resources:
    requests:
      storage: 10Gi
  accessModes:
  - ReadWriteMany
EOF
done

创建Jenkins-deployment.yaml

cat <<EOF > jenkins-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: jenkins
  namespace: jenkins-k8s
spec:
  replicas: 1
  selector:
    matchLabels:
      app: jenkins
  template:
    metadata:
      labels:
        app: jenkins
    spec:
      serviceAccount: jenkins-k8s-sa
      containers:
      - name: jenkins
        image: jenkins/jenkins:centos7
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 8080
          name: web
          protocol: TCP
        - containerPort: 50000
          name: agent
          protocol: TCP
        resources:
          limits:
            cpu: 1000m
            memory: 1Gi
          requests:
            cpu: 500m
            memory: 512Mi
        livenessProbe:
          httpGet:
            path: /login
            port: 8080
          initialDelaySeconds: 60
          timeoutSeconds: 5
          failureThreshold: 12
        readinessProbe:
          httpGet:
            path: /login
            port: 8080
          initialDelaySeconds: 60
          timeoutSeconds: 5
          failureThreshold: 12
        volumeMounts:
        - name: jenkins-volume
          subPath: jenkins-home
          mountPath: /var/jenkins_home
      volumes:
      - name: jenkins-volume
        persistentVolumeClaim:
          claimName: jenkins-k8s-pvc
EOF
done

创建Jenkins-service.yaml

cat <<EOF > jenkins-service.yaml
apiVersion: v1
kind: Service
metadata:
  name: jenkins-service
  namespace: jenkins-k8s
  labels:
    app: jenkins
spec:
  selector:
    app: jenkins
  type: NodePort
  ports:
  - name: web
    port: 8080
    targetPort: web
    nodePort: 30001
  - name: agent
    port: 50000
    targetPort: agent
EOF
done

运行yaml文件

kubectl apply -f jenkins-pv.yaml
kubectl apply -f jenkins-deployment.yaml
kubectl apply -f jenkins-service.yaml

查看 Jenkins Pod 启动日志

kubectl logs $(kubectl get pods -n jenkins-k8s | awk '{print $1}' | grep jenkins) -n jenkins-k8s