Skip to content

迁移案例

案例一 svn服务迁移到k8s平台

基本信息 旧环境 新环境
IP地址 1.1.1.32 2.2.2.238
端口 1022 22
登录 账号:aaa 秘钥:aaa -
服务部署方式 docker k8s

一、获取镜像

注意:这里重新制作了svn-server的镜像,由于未找到使用http插件的以alpine为基础镜像的镜像包,且alpine没有svn的http插件包。

在238上做如下操作

[root@ecs-arm-k8s-master01 svn-server]# docker pull garethflowers/svn-server:latest
说明:以上镜像没有http服务插件,alpine没有支持apache+svn的组件,需要重新制作镜像。
[root@ecs-arm-k8s-master01 svn-server]# vim Dockerfile
FROM centos:7

CMD [ "/usr/bin/svnserve", "--daemon", "--foreground", "--root", "/var/opt/svn" ]
EXPOSE 3690
HEALTHCHECK CMD netstat -ln | grep 3690 || exit 1
VOLUME [ "/var/opt/svn" ]
WORKDIR /var/opt/svn

RUN yum repolist && \
    yum -y install subversion wget vim httpd mod_dav_svn 

[root@ecs-arm-k8s-master01 svn-server]# docker build -t cocl666/svn-httpd:zl

补充:以上镜像需要在启动后(通过k8s启动容器,参考步骤二、三、四)执行以下步骤,这里启动指的是http方式访问的启动配置:
进入启动后的容器
cd /var/opt/svn
创建svn的repo
svnadmin create Kunpeng_Technology_Center
修改repo权限
chown -R apache:apache Kunpeng_Technology_Center
修改httpd和svn配置文件
vim /etc/httpd/conf/httpd.conf
添加ServerName localhost:80
vim /etc/httpd/conf.d/svn.conf
LoadModule dav_module modules/mod_dav.so
LoadModule dav_svn_module modules/mod_dav_svn.so
LoadModule authz_svn_module modules/mod_authz_svn.so
<Location /Kunpeng_Technology_Center>
   DAV svn
   SVNPath /var/opt/svn/Kunpeng_Technology_Center
   # SVNParentPath /var/opt/svn

   AuthType Basic
   AuthName "Authorization Realm"
   AuthUserFile /var/opt/svn/Kunpeng_Technology_Center/conf/passwd
   AuthzSVNAccessFile /var/opt/svn/Kunpeng_Technology_Center/conf/authz
   Satisfy all
   Require valid-user
</Location>
使用/usr/sbin/httpd启动http服务

注意:由于以上步骤限制需要在svn的repo创建后才能配置http服务,因此不能一次性写入镜像中;

二、建立storageclass

搭建nfs服务器,并在k8s中创建并配置默认storageclass。

在238上做如下操作

NFS服务搭建
yum install -y nfs-utils rpcbuild
systemctl start rpcbind
systemctl enable rpcbind
systemctl start nfs-server
systemctl start nfs-secure.service
systemctl enable nfs-server nfs-secure.service
mkdir /data/nfs_data
vim /etc/exports

/data/nfs_data 192.168.0.0/24(rw,sync,no_root_squash,no_subtree_check)

systemctl reload nfs

k8s中创建storageclass
1、安装nfs-client-provisioner
helm repo add az-stable http://mirror.azure.cn/kubernetes/charts/
helm repo update
由于默认镜像是amd64的,架构需要改变
docker pull kopkop/nfs-client-provisioner-arm64:v3.1.0-k8s1.11
docker tag kopkop/nfs-client-provisioner-arm64:v3.1.0-k8s1.11 cocl666/nfs-client-provisioner:v3.1.0-k8s1.11
helm pull az-stable/nfs-client-provisioner
tar -xvf nfs-client-provisioner-1.2.11.tgz
cd nfs-client-provisioner/
vim values.yaml
改变镜像仓地址为:cocl666/nfs-client-provisioner
版本不变。

cd ..
helm install nfs-client-provisioner /data/download/zl/nfs-client-provisioner --set nfs.server=192.168.0.156 --set nfs.path=/data/nfs_data

2、查看是否创建成功
kubectl get pods
NAME                                      READY   STATUS    RESTARTS   AGE
nfs-client-provisioner-84dc4599fb-7nsgz   2/2     Running   1          7s

kubectl get sc
NAME         PROVISIONER                            RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   cluster.local/nfs-client-provisioner   Delete          Immediate           true                   56s

3、安装成功后会自动创建名为“nfs-client”的storageclass,将这个sc设为默认sc
kubectl patch storageclass nfs-client -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'

4、再次查看
kubectl get sc
NAME                   PROVISIONER                            RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client (default)   cluster.local/nfs-client-provisioner   Delete          Immediate           true                   116s

注意:
由于Kubernetes v1.20 (opens new window)开始,(我们使用的是1.21.5版本)默认删除了 metadata.selfLink 字段,然而,部分应用仍然依赖于这个字段,例如:nfs-client-provisioner。
启用 selfLink 字段
vim /etc/kubernetes/manifests/kube-apiserver.yaml
kube-apiserver参数配置下增加:
第44行 - --feature-gates=RemoveSelfLink=false

三、建立svn命名空间和pvc

在238上做如下操作

[root@ecs-arm-k8s-master01 svn-server]# kubectl create namespace svn
[root@ecs-arm-k8s-master01 svn-server]# vim svn-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: svn-pvc
  namespace: svn
  annotations:
    volume.beta.kubernetes.io/storage-class: "nfs-client"   #与kubectl edit storageclasses.storage.k8s.io查到的metadata.name保持一致
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 50Gi #最新版本12192达到了16G,暂存所有版本达到43G,这里只迁移最新版本;

[root@ecs-arm-k8s-master01 svn-server]# kubectl apply -f svn-pvc.yaml
[root@ecs-arm-k8s-master01 svn-server]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                 STORAGECLASS   REASON   AGE
pvc-bfa9333d-97b3-4aa4-915e-4a5424598100   50Gi       RWX            Delete           Bound    svn/svn-pvc           nfs-client              6s
[root@ecs-arm-k8s-master01 svn-server]# kubectl get pvc --namespace svn
NAME      STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
svn-pvc   Bound    pvc-bfa9333d-97b3-4aa4-915e-4a5424598100   50Gi       RWX            nfs-client     14s

四、拉起服务

在238上做如下操作

vim svn-server.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: svn-server
  namespace: svn
  labels:
    app: svn-server
spec:
  replicas: 1
  selector:
    matchLabels:
      app: svn-server
  template:
    metadata:
      labels:
        app: svn-server
    spec:
      containers:
      - name: svn
        image: cocl666/svn-httpd:zl #制作的镜像名
        ports:
        - containerPort: 80
        volumeMounts:
        - name: nfs-pvc
          mountPath: "/var/opt/svn" #地址同容器中svn数据地址
      volumes:
        - name: nfs-pvc
          persistentVolumeClaim:
            claimName: svn-pvc
---
apiVersion: v1
kind: Service
metadata:
  name: svn-service
  namespace: svn
spec:
  ports:
  - port: 80   #http配置时80,svn访问的话统一改为3690
    protocol: TCP
    targetPort: 80
    nodePort: 30690
  selector:
    app: svn-server
  type: NodePort
[root@ecs-arm-k8s-master01 svn-server]# kubectl apply -f svn-server.yaml
[root@ecs-arm-k8s-master01 svn-server]# kubectl get pod --namespace svn 
NAME                          READY   STATUS    RESTARTS   AGE
svn-server-85c6977d69-7dk6l   1/1     Running   0          5m58s
[root@ecs-arm-k8s-master01 svn-server]# kubectl get svc --namespace svn 
NAME          TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
svn-service   NodePort   10.103.151.16   <none>        80:30690/TCP   6m5s

五、迁移旧服务器最新版本到新服务

在32上做如下操作

docker exec -it svn bash
cd /var/lib/svn
svnlook youngest Kunpeng_Technology_Center
12192
备份最新版本
svnadmin dump Kunpeng_Technology_Center -r 12192 > 12192.dumpfile
exit
docker cp svn:/var/lib/svn/12192.dumpfile .
同步备份包到238的svn-pvc数据目录下
rsync -avzP 12192.dumpfile root@2.2.2.2:/data/nfs_data/svn-svn-pvc-pvc-bfa9333d-97b3-4aa4-915e-4a5424598100
参数auvzP解释:参数a是归档传输,保留文件属性,v是显示详细过程,z是压缩传输,P是断点传输。

在238上做如下操作

[root@ecs-arm-k8s-master01 svn-server]# kubectl exec -it --namespace svn svn-server-776fdc9fd8-7qt8n sh
/var/opt/svn # ls
12192.dumpfile
/var/opt/svn # svnadmin create Kunpeng_Technology_Center
/var/opt/svn # ls
12192.dumpfile             Kunpeng_Technology_Center
将备份好的版本导入新库
/var/opt/svn # svnadmin load Kunpeng_Technology_Center < 12192.dumpfile
配置authz  passwd  svnserve.conf与32一样
/var/opt/svn # chown -R apache:apache Kunpeng_Technology_Center
/var/opt/svn # vim /etc/httpd/conf/httpd.conf
添加ServerName localhost:80
vim /etc/httpd/conf.d/svn.conf配置文件
LoadModule dav_module modules/mod_dav.so
LoadModule dav_svn_module modules/mod_dav_svn.so
LoadModule authz_svn_module modules/mod_authz_svn.so
<Location /Kunpeng_Technology_Center>
   DAV svn
   SVNPath /var/opt/svn/Kunpeng_Technology_Center
   # SVNParentPath /var/opt/svn

   AuthType Basic
   AuthName "Authorization Realm"
   AuthUserFile /var/opt/svn/Kunpeng_Technology_Center/conf/passwd
   AuthzSVNAccessFile /var/opt/svn/Kunpeng_Technology_Center/conf/authz
   Satisfy all
   Require valid-user
</Location>

注意:svn访问需要禁掉svnserve.conf中的use-sasl = true,并使用passwd.default中的明文密码访问,且svn-server.yaml中端口改为3690;
http访问需要打开use-sasl = true,并使用passwd中的加密密码访问,且svn-server.yaml中端口改为80;

使用/usr/sbin/httpd启动http服务
验证
svn客户端连接方式为:svn://2.2.2.2:30690/Kunpeng_Technology_Center,http://2.2.2.2:30690/Kunpeng_Technology_Center,两者不能同时存在,由于配置不同。

案例二 gitlab-ce服务迁移

一、备份并传送数据文件

备份
1、确保gitlab-ce处于正常运行状态,直接执行gitlab-rake gitlab:backup:create进行备份;
2、默认的备份目录是:/var/opt/gitlab/backups/,会创建xxx.tar文件,迁移的话还要手动备份gitlab.rb和gitlab-secrets.json;
3、开启备份并配置备份目录:
vim /etc/gitlab/gitlab.rb
gitlab_rails['manage_backup_path'] = true #开启备份功能
gitlab_rails['backup_path'] = "/var/opt/gitlab/backups" #设置备份文件存储地址
4、设置备份权限和过期时间:
vim /etc/gitlab/gitlab.rb
gitlab_rails['backup_archive_permissions']=0644 #生成的备份文件权限
gitlab_rails['backup_keep_time'] = 604800 #以秒为单位,7天
使用gitlab-ctl reconfigure重载配置文件
5、定时任务配置自动备份:
crontab -e
30 17 * * 5 /opt/gitlab/bin/gitlab-rake gitlab:backup:create
6、docker部署,定时任务如何使用?
写好任务脚本如下:
vim /data/zl/shell/backups_formal.sh
#!/bin/bash
DOCKER_ID_formal=0149f7c4c7c6
docker exec $DOCKER_ID_formal /bin/bash -c '/opt/gitlab/bin/gitlab-rake gitlab:backup:create'
配置定时任务:
crontab -e
30 17 * * 5 /bin/bash /data/zl/shell/backups_formal.sh #每周五下午5:30分

传送备份好的文件到238
rsync -avP 1642301279_2022_01_16_13.8.3_gitlab_backup.tar root@2.2.2.2:/data/download/zl/gitlab-ce/

二、应用迁移

1、使用docker-compose部署
docker push cocl666/gitlab-ce:13.8.3 #必须与旧服务版本相同,若迁移有版本限制建议先在旧版本升级到可迁移版本,根据官方升级途径;
cd /data/gitlab-ce
vim docker-compose.yaml
version: "3.6"
services:
    gitlab-ce:
        container_name: gitlab-ce
        image: 'cocl666/gitlab-ce:13.8.3'
        restart: always
        hostname: 'gitlab.example.com'
        environment:
          GITLAB_OMNIBUS_CONFIG: | #这个|不能少,会报格式错误,以下配置属于精简优化配置,可酌情删减;
            external_url 'http://2.2.2.2:80' # 配置本机ip
            nginx['redirect_http_to_https'] = true
            unicorn['worker_processes'] = 4
            postgresql['max_worker_processes'] = 4
            nginx['worker_processes'] = 4
            postgresql['shared_buffers'] = "256MB"
            sidekiq['concurrency'] = 20 #配置不可与gitlab-rb中min和max冲突,这里建议注释掉
            gitlab_pages['enable'] = false
            pages_nginx['enable'] = false
            prometheus_monitoring['enable'] = false
            alertmanager['enable'] = false
            node_exporter['enable'] = false
            redis_exporter['enable'] = false
            postgres_exporter['enable'] = false
            pgbouncer_exporter['enable'] = false
            gitlab_exporter['enable'] = false
            grafana['enable'] = false
            sidekiq['metrics_enabled'] = false
            gitlab_rails['usage_ping_enabled'] = false
            gitlab_rails['sentry_enabled'] = false
            grafana['reporting_enabled'] = false
            gitlab_rails['smtp_enable'] = false
            gitlab_rails['gitlab_email_enabled'] = false
            gitlab_rails['incoming_email_enabled'] = false
            gitlab_rails['gitlab_default_projects_features_container_registry'] = false
            gitlab_rails['registry_enabled'] = false
            registry['enable'] = false
            registry_nginx['enable'] = false
            gitlab_rails['manage_backup_path'] = true
            gitlab_rails['backup_path'] = "/var/opt/gitlab/backups"
            gitlab_rails['backup_archive_permissions'] = 0644
        ports:
            - "8083:80"
        volumes:
            - '/data/gitlab-ce/conf:/etc/gitlab'
            - '/data/gitlab-ce/logs:/var/log/gitlab'
            - '/data/gitlab-ce/data:/var/opt/gitlab'
安装新版docker-compose,具体版本可以从github网站查找
curl -L https://download.fastgit.org/docker/compose/releases/download/v2.0.1/docker-compose-linux-aarch64 -o /usr/bin/docker-compose
chmod +x /usr/bin/docker-compose

2、拉起容器
docker-compose up -d
报错一
Error executing action `run` on resource 'execute[reload all sysctl conf]'
解决
cd /opt/gitlab/embedded/cookbooks/gitlab/recipes
vim selinux.rb
注释以下,保存后gitlab-ctl reconfigure、gitlab-ctl start,每次重启容器都需要修改。
#if RedhatHelper.system_is_rhel7? || RedhatHelper.system_is_rhel8?
#  ssh_keygen_module = 'gitlab-7.2.0-ssh-keygen'
#  execute "semodule -i /opt/gitlab/embedded/selinux/rhel/7/#{ssh_keygen_module}.pp" do
#    not_if "getenforce | grep Disabled"
#    not_if "semodule -l | grep '^#{ssh_keygen_module}\\s'"
#  end

#  authorized_keys_module = 'gitlab-10.5.0-ssh-authorized-keys'
#  execute "semodule -i /opt/gitlab/embedded/selinux/rhel/7/#{authorized_keys_module}.pp" do
#    not_if "getenforce | grep Disabled"
#    not_if "semodule -l | grep '^#{authorized_keys_module}\\s'"
#  end

#  gitlab_shell_module = 'gitlab-13.5.0-gitlab-shell'
#  execute "semodule -i /opt/gitlab/embedded/selinux/rhel/7/#{gitlab_shell_module}.pp" do
#    not_if "getenforce | grep Disabled"
#    not_if "semodule -l | grep '^#{gitlab_shell_module}\\s'"
#  end
#end

3、迁移服务配置,还原备份文件
进入容器内部
gitlab-ctl stop puma
gitlab-ctl stop sidekiq
gitlab-ctl status 以上两个服务为down

chmod 777 /var/opt/gitlab/backups/xxx_gitlab_backup.tar

gitlab-rake gitlab:backup:restore /var/opt/gitlab/backups/xxx_gitlab_backup.tar

输入两次yes,回车后,完成恢复

手动备份gitlab.rb(配置优化)和gitlab-secrets.json到conf目录下,若gitlab.rb和docker-compose.yaml中配置有冲突,则修改其中之一即可;

gitlab-ctl reconfigure
gitlab-ctl start

验证http://2.2.2.2:8083