Browse Source

Merge pull request '部署服务自动化脚本' (#71) from auto_deploy_script into master

dev-lhz
fanshuai 1 year ago
parent
commit
7c1fc4f017
18 changed files with 856 additions and 1 deletions
  1. +25
    -0
      k8s/build-java.sh
  2. +21
    -0
      k8s/build-node.sh
  3. +143
    -0
      k8s/build.sh
  4. +60
    -0
      k8s/build_and_deploy.sh
  5. +148
    -0
      k8s/deploy.sh
  6. +1
    -1
      k8s/k8s-3nacos.yaml
  7. +36
    -0
      k8s/template-yaml/deploy/k8s-12front.yaml
  8. +36
    -0
      k8s/template-yaml/deploy/k8s-7management.yaml
  9. +36
    -0
      k8s/template-yaml/k8s-10gen.yaml
  10. +36
    -0
      k8s/template-yaml/k8s-11visual.yaml
  11. +36
    -0
      k8s/template-yaml/k8s-12front.yaml
  12. +62
    -0
      k8s/template-yaml/k8s-3nacos.yaml
  13. +36
    -0
      k8s/template-yaml/k8s-4gateway.yaml
  14. +36
    -0
      k8s/template-yaml/k8s-5auth.yaml
  15. +36
    -0
      k8s/template-yaml/k8s-6system.yaml
  16. +36
    -0
      k8s/template-yaml/k8s-7management.yaml
  17. +36
    -0
      k8s/template-yaml/k8s-8file.yaml
  18. +36
    -0
      k8s/template-yaml/k8s-9job.yaml

+ 25
- 0
k8s/build-java.sh View File

@@ -0,0 +1,25 @@
#!/bin/bash

baseDir="/home/somuns/ci4s"
#判断$1是否为all,如果是,则编译所有模块,否则只编译management-platform模块
if [ "$1" == "all" ]; then
buildDir=$baseDir
else
buildDir="$baseDir/ruoyi-modules/management-platform"
fi

echo "Building $buildDir"
cd $buildDir && mvn clean install

if [ $? -ne 0 ]; then
echo "Failed to build ruoyi-modules"
exit 1
fi









+ 21
- 0
k8s/build-node.sh View File

@@ -0,0 +1,21 @@
#!/bin/bash

baseDir="/home/somuns/ci4s"
cd ${baseDir}/react-ui

npm install

if [ $? -ne 0 ]; then
echo "Failed to install npm depend package"
exit 1
fi


npm run build
if [ $? -ne 0 ]; then
echo "Failed to build react-ui"
exit 1
fi




+ 143
- 0
k8s/build.sh View File

@@ -0,0 +1,143 @@
#!/bin/bash

#记录开始时间
start=$(date +%s)

# 默认参数
branch="master"
service="manage-front"


show_help() {
echo "Usage: $0 [-b branch] [-s service]"
echo
echo "Options:"
echo " -b Branch to deploy, default is master"
echo " -s Service to deploy (manage-front, manage, front, all, default is manage-front)"
echo " -h Show this help message"
}

# 解析命令行选项
while getopts "b:s:e:h" opt; do
case $opt in
b) branch=$OPTARG ;;
s) service=$OPTARG ;;
h) show_help; exit 0 ;;
\?) echo "Invalid option -$OPTARG" >&2; show_help; exit 1 ;;
esac
done

echo "branch: $branch"
echo "service: $service"


# 登录到目标环境
baseDir="/home/somuns/ci4s"
cd ${baseDir}

# 拉取指定分支的最新代码
echo "Checking out and pulling branch $branch..."
git checkout $branch
if [ $? -ne 0 ]; then
echo "切换到分支 $branch 失败,请检查分支名称是否正确!"
exit 1
fi

git pull origin $branch
if [ $? -ne 0 ]; then
echo "拉取代码失败,请检查网络或联系管理员!"
exit 1
fi

# 创建目录
mkdir -p ${baseDir}/k8s/dockerfiles/jar
mkdir -p ${baseDir}/k8s/dockerfiles/html

compile_front() {
# 清理前端构建文件
if [ -d "${baseDir}/react-ui/dist" ]; then
rm -rf ${baseDir}/react-ui/dist
fi

# 编译前端
docker run -v ${baseDir}:${baseDir} \
-e http_proxy=http://172.20.32.253:3128 -e https_proxy=http://172.20.32.253:3128 \
172.20.32.187/ci4s/node:16.16.0 ${baseDir}/k8s/build-node.sh
if [ $? -ne 0 ]; then
echo "编译失败,请检查代码!"
exit 1
fi

# 复制前端文件
cp -rf ${baseDir}/react-ui/dist/ ${baseDir}/k8s/dockerfiles/html
if [ $? -ne 0 ]; then
echo "复制html文件失败,请检查代码!"
exit 1
fi
}

compile_java() {
param=$1
# 编译java
docker run -v ${baseDir}:${baseDir} -v /home/maven:/home/maven \
-e http_proxy=http://172.20.32.253:3128 -e https_proxy=http://172.20.32.253:3128 \
172.20.32.187/ci4s/build:v1 ${baseDir}/k8s/build-java.sh $param
if [ $? -ne 0 ]; then
echo "编译失败,请检查代码!"
exit 1
fi

# 复制jar包
cp -rf ${baseDir}/ruoyi-modules/management-platform/target/management-platform.jar ${baseDir}/k8s/dockerfiles/jar/management-platform.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi

if [ "$param" == "all" ]; then
cp -rf ${baseDir}/ruoyi-modules/ruoyi-system/target/ruoyi-modules-system.jar ${baseDir}/k8s/dockerfiles/jar/ruoyi-modules-system.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi

cp -rf ${baseDir}/ruoyi-auth/target/ruoyi-auth.jar ${baseDir}/k8s/dockerfiles/jar/ruoyi-auth.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi

cp -rf ${baseDir}/ruoyi-gateway/target/ruoyi-gateway.jar ${baseDir}/k8s/dockerfiles/jar/ruoyi-gateway.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi
fi
}

if [ "$service" == "manage-front" ] || [ "$service" == "front" ]; then
# 编译前端
compile_front
fi


if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
# 编译java
compile_java $service
fi

if [ "$service" == "all" ]; then
# 编译前端
compile_front

# 编译java
compile_java "all"
fi


# 记录结束时间
end=$(date +%s)

#计算运行时间
runtime=$((end-start))
echo "编译成功,耗时:$runtime 秒"

+ 60
- 0
k8s/build_and_deploy.sh View File

@@ -0,0 +1,60 @@
#!/bin/bash

#记录开始时间
startTime=$(date +%s)

# 登录到目标环境
baseDir="/home/somuns/ci4s"
cd ${baseDir}


#build
# 默认参数
branch="master"
service="manage-front"
env="dev"

#
show_help() {
echo "Usage: $0 [-b branch] [-s service] [-e environment]"
echo
echo "Options:"
echo " -b Branch to deploy, default: master"
echo " -s Service to deploy (manage-front, manage, front, all, default: manage-front)"
echo " -e Environment (e.g., dev, test, default: dev)"
echo " -h Show this help message"
}

# 解析命令行选项
while getopts "b:s:e:h" opt; do
case $opt in
b) branch=$OPTARG ;;
s) service=$OPTARG ;;
e) env=$OPTARG ;;
h) show_help; exit 0 ;;
\?) echo "Invalid option -$OPTARG" >&2; show_help; exit 1 ;;
esac
done

echo "start build"
sh ${baseDir}/k8s/build.sh -b ${branch} -s ${service}
if [ $? -ne 0 ]; then
echo "Build failed"
exit 1
fi
echo "build success"

# 部署
echo "start deploy"
sh ${baseDir}/k8s/deploy.sh -s ${service} -e ${env}
if [ $? -ne 0 ]; then
echo "Deploy failed"
exit 1
fi
echo "deploy success"

# 记录结束时间
endTime=$(date +%s)
# 计算运行时间
duration=$(( $endTime - $startTime ))
echo "编译发布总耗时: $duration 秒"

+ 148
- 0
k8s/deploy.sh View File

@@ -0,0 +1,148 @@
#!/bin/bash

# 记录开始时间
start=$(date +%s)
# 默认参数
service="manage-front"
env="dev"

show_help() {
echo "Usage: $0 [-s service] [-e environment]"
echo
echo "Options:"
echo " -s Service to deploy (manage-front, manage, front, all default: manage-front)"
echo " -e Environment (e.g., dev, test, default: dev)"
echo " -h Show this help message"
}

# 解析命令行参数
while getopts "s:e:h" opt; do
case $opt in
s) service=$OPTARG ;;
e) env=$OPTARG ;;
h) show_help; exit 0 ;;
\?) echo "Invalid option -$OPTARG" >&2; exit 1 ;;
esac
done

echo "Deploy service: $service, environment: $env"

# 根据环境设置 IP 地址
if [ "$env" == "dev" ]; then
remote_ip="172.20.32.181"
elif [ "$env" == "test" ]; then
remote_ip="172.20.32.185"
else
echo "Invalid environment - $env"
exit 1
fi

baseDir=/home/somuns/ci4s
tag=$(date +'%Y%m%d%H%M')
remote_deploy_dir=/home/deploy/manage-platform

# 构建镜像函数
build_image() {
local dockerfile=$1
local image=$2
cd ${baseDir}/k8s/dockerfiles
docker build -t ${image} -f ${dockerfile} .
if [ $? -ne 0 ]; then
echo "Build ${image} image fail"
exit 1
fi
docker push ${image}
}

# 复制和替换 YAML 文件函数
prepare_yaml() {
local yaml_file=$1
local image=$2

placeholder="\${${yaml_file%.yaml}-image}"
cd ${baseDir}/k8s/template-yaml
cp -rf ${yaml_file} deploy/
cd deploy/
sed -i "s|${placeholder}|${image}|g" ${yaml_file}
if [ $? -ne 0 ]; then
echo "Replace ${image} image fail"
exit 1
fi

# 建立远程目录并备份文件
ssh root@$remote_ip "mkdir -p ${remote_deploy_dir} && if [ -f ${remote_deploy_dir}/${yaml_file} ]; then mv ${remote_deploy_dir}/${yaml_file} ${remote_deploy_dir}/${yaml_file}.bak; fi"
if [ $? -ne 0 ]; then
echo "Failed to create remote directory or backup ${yaml_file}"
exit 1
else
echo "Successfully created remote directory and backup ${yaml_file}"
fi

scp ${baseDir}/k8s/template-yaml/deploy/${yaml_file} root@$remote_ip:${remote_deploy_dir}/${yaml_file}
if [ $? -ne 0 ]; then
echo "Failed to copy ${yaml_file}"
exit 1
else
echo "Successfully copied ${yaml_file}"
fi
}

# 部署服务函数
deploy_service() {
local yaml_file=$1
ssh root@$remote_ip "kubectl apply -n argo -f ${remote_deploy_dir}/${yaml_file}"
if [ $? -ne 0 ]; then
echo "Failed to deploy ${yaml_file}"
exit 1
else
echo "Successfully deployed ${yaml_file}"
fi
}

deploy_nacos() {
local yaml_file=$1
scp ${baseDir}/k8s/${yaml_file} root@$remote_ip:${remote_deploy_dir}/${yaml_file}
deploy_service ${yaml_file}
}

build_and_deploy() {
local dockerfile=$1
local image=$2
local yaml_file=$3

build_image ${dockerfile} ${image}
prepare_yaml ${yaml_file} ${image}
deploy_service ${yaml_file}
}

# 构建和部署 manage 服务
if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
build_and_deploy "managent-dockerfile" "172.20.32.187/ci4s/ci4s-managent:${tag}" "k8s-7management.yaml"
fi


# 构建和部署 front 服务
if [ "$service" == "manage-front" ] || [ "$service" == "front" ]; then
build_and_deploy "nginx-dockerfile" "172.20.32.187/ci4s/ci4s-front:${tag}" "k8s-12front.yaml"
fi


if [ "$service" == "all" ]; then
#部署前端
build_and_deploy "nginx-dockerfile" "172.20.32.187/ci4s/ci4s-front:${tag}" "k8s-12front.yaml"
#部署管理平台
build_and_deploy "managent-dockerfile" "172.20.32.187/ci4s/ci4s-managent:${tag}" "k8s-7management.yaml"
#部署认证中心
build_and_deploy "auth-dockerfile" "172.20.32.187/ci4s/ci4s-auth:${tag}" "k8s-5auth.yaml"
#部署网关
build_and_deploy "gateway-dockerfile" "172.20.32.187/ci4s/ci4s-gateway:${tag}" "k8s-4gateway.yaml"
#部署系统服务
build_and_deploy "system-dockerfile" "172.20.32.187/ci4s/ci4s-system:${tag}" "k8s-6system.yaml"
#部署配置中心
deploy_nacos "k8s-3nacos.yaml"
fi


# 记录结束时间
end=$(date +%s)
echo "部署成功, 耗时: $((end-start))秒"

+ 1
- 1
k8s/k8s-3nacos.yaml View File

@@ -24,7 +24,7 @@ spec:
- name: MODE
value: standalone
- name: MYSQL_SERVICE_HOST
value: mysql2.argo.svc
value: mysql.argo.svc
- name: MYSQL_SERVICE_PORT
value: "3306"
- name: MYSQL_SERVICE_DB_NAME


+ 36
- 0
k8s/template-yaml/deploy/k8s-12front.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-front-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-front
template:
metadata:
labels:
app: ci4s-front
spec:
containers:
- name: ci4s-front
image: 172.20.32.187/ci4s/ci4s-front:202406120836
ports:
- containerPort: 8000

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-front-service
namespace: argo
spec:
type: NodePort
ports:
- port: 8000
nodePort: 31213
protocol: TCP
selector:
app: ci4s-front


+ 36
- 0
k8s/template-yaml/deploy/k8s-7management.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-management-platform-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-management-platform
template:
metadata:
labels:
app: ci4s-management-platform
spec:
containers:
- name: ci4s-management-platform
image: 172.20.32.187/ci4s/managent:202406121003
ports:
- containerPort: 9213

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-management-platform-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9213
nodePort: 31208
protocol: TCP
selector:
app: ci4s-management-platform


+ 36
- 0
k8s/template-yaml/k8s-10gen.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-gen-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-gen
template:
metadata:
labels:
app: ci4s-gen
spec:
containers:
- name: ci4s-gen
image: ${k8s-10gen-image}
ports:
- containerPort: 9202

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-gen-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9202
nodePort: 31211
protocol: TCP
selector:
app: ci4s-gen


+ 36
- 0
k8s/template-yaml/k8s-11visual.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-visual-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-visual
template:
metadata:
labels:
app: ci4s-visual
spec:
containers:
- name: ci4s-visual
image: ${k8s-11visual-image}
ports:
- containerPort: 9100

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-visual-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9100
nodePort: 31212
protocol: TCP
selector:
app: ci4s-visual


+ 36
- 0
k8s/template-yaml/k8s-12front.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-front-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-front
template:
metadata:
labels:
app: ci4s-front
spec:
containers:
- name: ci4s-front
image: ${k8s-12front-image}
ports:
- containerPort: 8000

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-front-service
namespace: argo
spec:
type: NodePort
ports:
- port: 8000
nodePort: 31213
protocol: TCP
selector:
app: ci4s-front


+ 62
- 0
k8s/template-yaml/k8s-3nacos.yaml View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: argo
name: nacos-ci4s
labels:
app: nacos-ci4s
spec:
replicas: 1
selector:
matchLabels:
app: nacos-ci4s
template:
metadata:
labels:
app: nacos-ci4s
spec:
containers:
- name: nacos-ci4s
image: ${k8s-3nacos-image}
env:
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
- name: MODE
value: standalone
- name: MYSQL_SERVICE_HOST
value: mysql2.argo.svc
- name: MYSQL_SERVICE_PORT
value: "3306"
- name: MYSQL_SERVICE_DB_NAME
value: nacos-ci4s-config
- name: MYSQL_SERVICE_USER
value: root
- name: MYSQL_SERVICE_PASSWORD
value: qazxc123456.
ports:
- containerPort: 8848
- containerPort: 9848
restartPolicy: Always

---

apiVersion: v1
kind: Service
metadata:
namespace: argo
name: nacos-ci4s
labels:
app: nacos-ci4s
spec:
type: NodePort
selector:
app: nacos-ci4s
ports:
- port: 8848
targetPort: 8848
nodePort: 31203
name: web
- port: 9848
targetPort: 9848
nodePort: 31204
name: podsa

+ 36
- 0
k8s/template-yaml/k8s-4gateway.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-gateway-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-gateway
template:
metadata:
labels:
app: ci4s-gateway
spec:
containers:
- name: ci4s-gateway
image: ${k8s-4gateway-image}
ports:
- containerPort: 8082

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-gateway-service
namespace: argo
spec:
type: NodePort
ports:
- port: 8082
nodePort: 31205
protocol: TCP
selector:
app: ci4s-gateway


+ 36
- 0
k8s/template-yaml/k8s-5auth.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-auth-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-auth
template:
metadata:
labels:
app: ci4s-auth
spec:
containers:
- name: ci4s-auth
image: ${k8s-5auth-image}
ports:
- containerPort: 9200

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-auth-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9200
nodePort: 31206
protocol: TCP
selector:
app: ci4s-auth


+ 36
- 0
k8s/template-yaml/k8s-6system.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-system-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-system
template:
metadata:
labels:
app: ci4s-system
spec:
containers:
- name: ci4s-system
image: ${k8s-6system-image}
ports:
- containerPort: 9201

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-system-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9201
nodePort: 31207
protocol: TCP
selector:
app: ci4s-system


+ 36
- 0
k8s/template-yaml/k8s-7management.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-management-platform-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-management-platform
template:
metadata:
labels:
app: ci4s-management-platform
spec:
containers:
- name: ci4s-management-platform
image: ${k8s-7management-image}
ports:
- containerPort: 9213

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-management-platform-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9213
nodePort: 31208
protocol: TCP
selector:
app: ci4s-management-platform


+ 36
- 0
k8s/template-yaml/k8s-8file.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-file-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-file
template:
metadata:
labels:
app: ci4s-file
spec:
containers:
- name: ci4s-file
image: ${k8s-8file-image}
ports:
- containerPort: 9300

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-file-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9300
nodePort: 31209
protocol: TCP
selector:
app: ci4s-file


+ 36
- 0
k8s/template-yaml/k8s-9job.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-job-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-job
template:
metadata:
labels:
app: ci4s-job
spec:
containers:
- name: ci4s-job
image: ${k8s-9job-image}
ports:
- containerPort: 9203

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-job-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9203
nodePort: 31210
protocol: TCP
selector:
app: ci4s-job


Loading…
Cancel
Save