Browse Source

重构部署脚本

pull/71/head
somunslotus 1 year ago
parent
commit
69a51b95a9
17 changed files with 697 additions and 44 deletions
  1. +142
    -0
      k8s/AAdeploy.sh
  2. +10
    -2
      k8s/build-java.sh
  3. +69
    -38
      k8s/build.sh
  4. +1
    -1
      k8s/build_and_deploy.sh
  5. +17
    -3
      k8s/deploy.sh
  6. +36
    -0
      k8s/template-yaml/deploy/k8s-12front.yaml
  7. +36
    -0
      k8s/template-yaml/deploy/k8s-7management.yaml
  8. +36
    -0
      k8s/template-yaml/k8s-10gen.yaml
  9. +36
    -0
      k8s/template-yaml/k8s-11visual.yaml
  10. +36
    -0
      k8s/template-yaml/k8s-12front.yaml
  11. +62
    -0
      k8s/template-yaml/k8s-3nacos.yaml
  12. +36
    -0
      k8s/template-yaml/k8s-4gateway.yaml
  13. +36
    -0
      k8s/template-yaml/k8s-5auth.yaml
  14. +36
    -0
      k8s/template-yaml/k8s-6system.yaml
  15. +36
    -0
      k8s/template-yaml/k8s-7management.yaml
  16. +36
    -0
      k8s/template-yaml/k8s-8file.yaml
  17. +36
    -0
      k8s/template-yaml/k8s-9job.yaml

+ 142
- 0
k8s/AAdeploy.sh View File

@@ -0,0 +1,142 @@
#!/bin/bash

# 记录开始时间
start=$(date +%s)
# 默认参数
service="manage-front"
env="dev"

show_help() {
echo "Usage: $0 [-s service] [-e environment]"
echo
echo "Options:"
echo " -s Service to deploy (manage-front, manage, front, all default: manage-front)"
echo " -e Environment (e.g., dev, test, default: dev)"
echo " -h Show this help message"
}

# 解析命令行参数
while getopts "s:e:h" opt; do
case $opt in
s) service=$OPTARG ;;
e) env=$OPTARG ;;
h) show_help; exit 0 ;;
\?) echo "Invalid option -$OPTARG" >&2; exit 1 ;;
esac
done

echo "Deploy service: $service, environment: $env"

# 根据环境设置 IP 地址
if [ "$env" == "dev" ]; then
remote_ip="172.20.32.181"
elif [ "$env" == "test" ]; then
remote_ip="172.20.32.185"
else
echo "Invalid environment - $env"
exit 1
fi

baseDir=/home/somuns/ci4s
tag=$(date +'%Y%m%d%H%M')
remote_deploy_dir=/home/deploy/manage-platform

# 构建镜像函数
build_image() {
local dockerfile=$1
local image=$2
cd ${baseDir}/k8s/dockerfiles
docker build -t ${image} -f ${dockerfile} .
if [ $? -ne 0 ]; then
echo "Build ${image} image fail"
exit 1
fi
docker push ${image}
}

# 复制和替换 YAML 文件函数
prepare_yaml() {
local yaml_file=$1
local image=$2

placeholder="\${${yaml_file%.yaml}-image}"
cd ${baseDir}/k8s/template-yaml
cp -rf ${yaml_file} deploy/
cd deploy/
sed -i "s|${placeholder}|${image}|g" ${yaml_file}
if [ $? -ne 0 ]; then
echo "Replace ${image} image fail"
exit 1
fi

# 建立远程目录并备份文件
ssh root@$remote_ip "mkdir -p ${remote_deploy_dir} && if [ -f ${remote_deploy_dir}/${yaml_file} ]; then mv ${remote_deploy_dir}/${yaml_file} ${remote_deploy_dir}/${yaml_file}.bak; fi"
if [ $? -ne 0 ]; then
echo "Failed to create remote directory or backup ${yaml_file}"
exit 1
else
echo "Successfully created remote directory and backup ${yaml_file}"
fi

scp ${baseDir}/k8s/template-yaml/deploy/${yaml_file} root@$remote_ip:${remote_deploy_dir}/${yaml_file}
if [ $? -ne 0 ]; then
echo "Failed to copy ${yaml_file}"
exit 1
else
echo "Successfully copied ${yaml_file}"
fi
}

# 部署服务函数
deploy_service() {
local yaml_file=$1
ssh root@$remote_ip "kubectl apply -n argo -f ${remote_deploy_dir}/${yaml_file}"
if [ $? -ne 0 ]; then
echo "Failed to deploy ${yaml_file}"
exit 1
else
echo "Successfully deployed ${yaml_file}"
fi
}

deploy_nacos() {
local yaml_file=$1
scp ${baseDir}/k8s/${yaml_file} root@$remote_ip:${remote_deploy_dir}/${yaml_file}
deploy_service
}

build_and_deploy() {
local dockerfile=$1
local image=$2
local yaml_file=$3

build_image ${dockerfile} ${image}
prepare_yaml ${yaml_file} ${image}
deploy_service ${yaml_file}
}

# 构建和部署 manage 服务
if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
build_and_deploy "managent-dockerfile" "172.20.32.187/ci4s/ci4s-managent:${tag}" "k8s-7management.yaml"
fi


# 构建和部署 front 服务
if [ "$service" == "manage-front" ] || [ "$service" == "front" ]; then
build_and_deploy "nginx-dockerfile" "172.20.32.187/ci4s/ci4s-front:${tag}" "k8s-12front.yaml"
fi


if [ "$service" == "all" ]; then
build_and_deploy "nginx-dockerfile" "172.20.32.187/ci4s/ci4s-front:${tag}" "k8s-12front.yaml"
build_and_deploy "managent-dockerfile" "172.20.32.187/ci4s/ci4s-managent:${tag}" "k8s-7management.yaml"
build_and_deploy "auth-dockerfile" "172.20.32.187/ci4s/ci4s-auth:${tag}" "k8s-5auth.yaml"
build_and_deploy "gateway-dockerfile" "172.20.32.187/ci4s/ci4s-gateway:${tag}" "k8s-4gateway.yaml"
build_and_deploy "system-dockerfile" "172.20.32.187/ci4s/ci4s-system:${tag}" "k8s-6system.yaml"
deploy_nacos "k8s-3nacos.yaml"
fi


# 记录结束时间
end=$(date +%s)
echo "部署成功, 耗时: $((end-start))秒"

+ 10
- 2
k8s/build-java.sh View File

@@ -1,9 +1,16 @@
#!/bin/bash #!/bin/bash


baseDir="/home/somuns/ci4s" baseDir="/home/somuns/ci4s"
cd $baseDir && mvn clean install
#判断$1是否为all,如果是,则编译所有模块,否则只编译management-platform模块
if [ "$1" == "all" ]; then
buildDir=$baseDir
else
buildDir="$baseDir/ruoyi-modules/management-platform"
fi

echo "Building $buildDir"
cd $buildDir && mvn clean install


#cd ${baseDir}/ruoyi-modules && mvn install
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Failed to build ruoyi-modules" echo "Failed to build ruoyi-modules"
exit 1 exit 1
@@ -15,3 +22,4 @@ fi








+ 69
- 38
k8s/build.sh View File

@@ -6,16 +6,14 @@ start=$(date +%s)
# 默认参数 # 默认参数
branch="master" branch="master"
service="manage-front" service="manage-front"
env="dev"


#
show_help() { show_help() {
echo "Usage: $0 [-b branch] [-s service] [-e environment]"
echo "Usage: $0 [-b branch] [-s service]"
echo echo
echo "Options:" echo "Options:"
echo " -b Branch to deploy"
echo " -s Service to deploy (manage-front, manage, front)"
echo " -e Environment (e.g., dev, prod)"
echo " -b Branch to deploy, default is master"
echo " -s Service to deploy (manage-front, manage, front, all, default is manage-front)"
echo " -h Show this help message" echo " -h Show this help message"
} }


@@ -24,7 +22,6 @@ while getopts "b:s:e:h" opt; do
case $opt in case $opt in
b) branch=$OPTARG ;; b) branch=$OPTARG ;;
s) service=$OPTARG ;; s) service=$OPTARG ;;
e) env=$OPTARG ;;
h) show_help; exit 0 ;; h) show_help; exit 0 ;;
\?) echo "Invalid option -$OPTARG" >&2; show_help; exit 1 ;; \?) echo "Invalid option -$OPTARG" >&2; show_help; exit 1 ;;
esac esac
@@ -32,7 +29,7 @@ done


echo "branch: $branch" echo "branch: $branch"
echo "service: $service" echo "service: $service"
echo "env: $env"


# 登录到目标环境 # 登录到目标环境
baseDir="/home/somuns/ci4s" baseDir="/home/somuns/ci4s"
@@ -56,39 +53,35 @@ fi
mkdir -p ${baseDir}/k8s/dockerfiles/jar mkdir -p ${baseDir}/k8s/dockerfiles/jar
mkdir -p ${baseDir}/k8s/dockerfiles/html mkdir -p ${baseDir}/k8s/dockerfiles/html


if [ "$service" == "manage-front" ] || [ "$service" == "front" ]; then
# 清理前端构建文件
if [ -d "${baseDir}/react-ui/dist" ]; then
rm -rf ${baseDir}/react-ui/dist
fi

# 编译前端
docker run -v ${baseDir}:${baseDir} \
-e http_proxy=http://172.20.32.253:3128 -e https_proxy=http://172.20.32.253:3128 \
172.20.32.187/ci4s/node:16.16.0 ${baseDir}/k8s/build-node.sh
if [ $? -ne 0 ]; then
echo "编译失败,请检查代码!"
exit 1
fi

# 复制前端文件
cp -rf ${baseDir}/react-ui/dist/ ${baseDir}/k8s/dockerfiles/html
if [ $? -ne 0 ]; then
echo "复制html文件失败,请检查代码!"
exit 1
fi
fi

if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
# 清理java构建文件
if [ -f "${baseDir}/ruoyi-modules/management-platform/target/management-platform.jar" ]; then
rm -rf ${baseDir}/ruoyi-modules/management-platform/target/management-platform.jar
fi
compile_front() {
# 清理前端构建文件
if [ -d "${baseDir}/react-ui/dist" ]; then
rm -rf ${baseDir}/react-ui/dist
fi

# 编译前端
docker run -v ${baseDir}:${baseDir} \
-e http_proxy=http://172.20.32.253:3128 -e https_proxy=http://172.20.32.253:3128 \
172.20.32.187/ci4s/node:16.16.0 ${baseDir}/k8s/build-node.sh
if [ $? -ne 0 ]; then
echo "编译失败,请检查代码!"
exit 1
fi

# 复制前端文件
cp -rf ${baseDir}/react-ui/dist/ ${baseDir}/k8s/dockerfiles/html
if [ $? -ne 0 ]; then
echo "复制html文件失败,请检查代码!"
exit 1
fi
}


# 编译java
compile_java() {
param=$1
# 编译java
docker run -v ${baseDir}:${baseDir} -v /home/maven:/home/maven \ docker run -v ${baseDir}:${baseDir} -v /home/maven:/home/maven \
-e http_proxy=http://172.20.32.253:3128 -e https_proxy=http://172.20.32.253:3128 \ -e http_proxy=http://172.20.32.253:3128 -e https_proxy=http://172.20.32.253:3128 \
172.20.32.187/ci4s/build:v1 ${baseDir}/k8s/build-java.sh
172.20.32.187/ci4s/build:v1 ${baseDir}/k8s/build-java.sh $param
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "编译失败,请检查代码!" echo "编译失败,请检查代码!"
exit 1 exit 1
@@ -100,9 +93,47 @@ if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
echo "复制jar包失败,请检查代码!" echo "复制jar包失败,请检查代码!"
exit 1 exit 1
fi fi

if [ "$param" == "all" ]; then
cp -rf ${baseDir}/ruoyi-modules/ruoyi-system/target/ruoyi-modules-system.jar ${baseDir}/k8s/dockerfiles/jar/ruoyi-modules-system.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi

cp -rf ${baseDir}/ruoyi-auth/target/ruoyi-auth.jar ${baseDir}/k8s/dockerfiles/jar/ruoyi-auth.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi

cp -rf ${baseDir}/ruoyi-gateway/target/ruoyi-gateway.jar ${baseDir}/k8s/dockerfiles/jar/ruoyi-gateway.jar
if [ $? -ne 0 ]; then
echo "复制jar包失败,请检查代码!"
exit 1
fi
fi
}

if [ "$service" == "manage-front" ] || [ "$service" == "front" ]; then
# 编译前端
compile_front
fi fi




if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
# 编译java
compile_java $service
fi

if [ "$service" == "all" ]; then
# 编译前端
compile_front

# 编译java
compile_java "all"
fi



# 记录结束时间 # 记录结束时间
end=$(date +%s) end=$(date +%s)


+ 1
- 1
k8s/build_and_deploy.sh View File

@@ -46,7 +46,7 @@ echo "build success"


# 部署 # 部署
echo "start deploy" echo "start deploy"
sh ${baseDir}/k8s/deploy.sh -s ${service} -e ${env}
sh ${baseDir}/k8s/AAdeploy.sh -s ${service} -e ${env}
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Deploy failed" echo "Deploy failed"
exit 1 exit 1


+ 17
- 3
k8s/deploy.sh View File

@@ -6,11 +6,21 @@ start=$(date +%s)
service="manage-front" service="manage-front"
env="dev" env="dev"


show_help() {
echo "Usage: $0 [-s service] [-e environment]"
echo
echo "Options:"
echo " -s Service to deploy (manage-front, manage, front, all ,default: manage-front)"
echo " -e Environment (e.g., dev, test, default: dev)"
echo " -h Show this help message"
}

# 解析命令行参数 # 解析命令行参数
while getopts "s:e:" opt; do
while getopts "s:e:h" opt; do
case $opt in case $opt in
s) service=$OPTARG ;; s) service=$OPTARG ;;
e) env=$OPTARG ;; e) env=$OPTARG ;;
h) show_help; exit 0 ;;
\?) echo "Invalid option -$OPTARG" >&2; exit 1 ;; \?) echo "Invalid option -$OPTARG" >&2; exit 1 ;;
esac esac
done done
@@ -52,7 +62,9 @@ if [ "$service" == "manage-front" ] || [ "$service" == "manage" ]; then
cp -rf ${manager_yaml} deploy/ cp -rf ${manager_yaml} deploy/
# 镜像替换 # 镜像替换
cd deploy/ cd deploy/
sed -i "s#managenent-image#${managent}#g" ${manager_yaml}
placeholder="\${k8s-7management-image}"
#sed -i "s#managenent-image#${managent}#g" ${manager_yaml}
sed -i "s#$placeholder#${managent}#g" ${manager_yaml}
if [ "$?" -ne "0" ];then if [ "$?" -ne "0" ];then
echo "replace ${managent} image fail" echo "replace ${managent} image fail"
exit 3 exit 3
@@ -93,7 +105,9 @@ if [ "$service" == "manage-front" ] || [ "$service" == "front" ]; then


# 镜像替换 # 镜像替换
cd deploy/ cd deploy/
sed -i "s#front-image#${front}#g" ${front_yaml}
placeholder="\${k8s-12front-image}"
#sed -i "s#front-image#${front}#g" ${front_yaml}
sed -i "s#$placeholder#${front}#g" ${front_yaml}
if [ "$?" -ne "0" ];then if [ "$?" -ne "0" ];then
echo "replace ${front} image fail" echo "replace ${front} image fail"
exit 4 exit 4


+ 36
- 0
k8s/template-yaml/deploy/k8s-12front.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-front-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-front
template:
metadata:
labels:
app: ci4s-front
spec:
containers:
- name: ci4s-front
image: 172.20.32.187/ci4s/ci4s-front:202406120836
ports:
- containerPort: 8000

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-front-service
namespace: argo
spec:
type: NodePort
ports:
- port: 8000
nodePort: 31213
protocol: TCP
selector:
app: ci4s-front


+ 36
- 0
k8s/template-yaml/deploy/k8s-7management.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-management-platform-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-management-platform
template:
metadata:
labels:
app: ci4s-management-platform
spec:
containers:
- name: ci4s-management-platform
image: 172.20.32.187/ci4s/managent:202406121003
ports:
- containerPort: 9213

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-management-platform-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9213
nodePort: 31208
protocol: TCP
selector:
app: ci4s-management-platform


+ 36
- 0
k8s/template-yaml/k8s-10gen.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-gen-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-gen
template:
metadata:
labels:
app: ci4s-gen
spec:
containers:
- name: ci4s-gen
image: ${k8s-10gen-image}
ports:
- containerPort: 9202

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-gen-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9202
nodePort: 31211
protocol: TCP
selector:
app: ci4s-gen


+ 36
- 0
k8s/template-yaml/k8s-11visual.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-visual-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-visual
template:
metadata:
labels:
app: ci4s-visual
spec:
containers:
- name: ci4s-visual
image: ${k8s-11visual-image}
ports:
- containerPort: 9100

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-visual-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9100
nodePort: 31212
protocol: TCP
selector:
app: ci4s-visual


+ 36
- 0
k8s/template-yaml/k8s-12front.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-front-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-front
template:
metadata:
labels:
app: ci4s-front
spec:
containers:
- name: ci4s-front
image: ${k8s-12front-image}
ports:
- containerPort: 8000

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-front-service
namespace: argo
spec:
type: NodePort
ports:
- port: 8000
nodePort: 31213
protocol: TCP
selector:
app: ci4s-front


+ 62
- 0
k8s/template-yaml/k8s-3nacos.yaml View File

@@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: argo
name: nacos-ci4s
labels:
app: nacos-ci4s
spec:
replicas: 1
selector:
matchLabels:
app: nacos-ci4s
template:
metadata:
labels:
app: nacos-ci4s
spec:
containers:
- name: nacos-ci4s
image: ${k8s-3nacos-image}
env:
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
- name: MODE
value: standalone
- name: MYSQL_SERVICE_HOST
value: mysql2.argo.svc
- name: MYSQL_SERVICE_PORT
value: "3306"
- name: MYSQL_SERVICE_DB_NAME
value: nacos-ci4s-config
- name: MYSQL_SERVICE_USER
value: root
- name: MYSQL_SERVICE_PASSWORD
value: qazxc123456.
ports:
- containerPort: 8848
- containerPort: 9848
restartPolicy: Always

---

apiVersion: v1
kind: Service
metadata:
namespace: argo
name: nacos-ci4s
labels:
app: nacos-ci4s
spec:
type: NodePort
selector:
app: nacos-ci4s
ports:
- port: 8848
targetPort: 8848
nodePort: 31203
name: web
- port: 9848
targetPort: 9848
nodePort: 31204
name: podsa

+ 36
- 0
k8s/template-yaml/k8s-4gateway.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-gateway-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-gateway
template:
metadata:
labels:
app: ci4s-gateway
spec:
containers:
- name: ci4s-gateway
image: ${k8s-4gateway-image}
ports:
- containerPort: 8082

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-gateway-service
namespace: argo
spec:
type: NodePort
ports:
- port: 8082
nodePort: 31205
protocol: TCP
selector:
app: ci4s-gateway


+ 36
- 0
k8s/template-yaml/k8s-5auth.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-auth-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-auth
template:
metadata:
labels:
app: ci4s-auth
spec:
containers:
- name: ci4s-auth
image: ${k8s-5auth-image}
ports:
- containerPort: 9200

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-auth-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9200
nodePort: 31206
protocol: TCP
selector:
app: ci4s-auth


+ 36
- 0
k8s/template-yaml/k8s-6system.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-system-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-system
template:
metadata:
labels:
app: ci4s-system
spec:
containers:
- name: ci4s-system
image: ${k8s-6system-image}
ports:
- containerPort: 9201

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-system-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9201
nodePort: 31207
protocol: TCP
selector:
app: ci4s-system


+ 36
- 0
k8s/template-yaml/k8s-7management.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-management-platform-deployment
namespace: argo
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-management-platform
template:
metadata:
labels:
app: ci4s-management-platform
spec:
containers:
- name: ci4s-management-platform
image: ${k8s-7management-image}
ports:
- containerPort: 9213

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-management-platform-service
namespace: argo
spec:
type: NodePort
ports:
- port: 9213
nodePort: 31208
protocol: TCP
selector:
app: ci4s-management-platform


+ 36
- 0
k8s/template-yaml/k8s-8file.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-file-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-file
template:
metadata:
labels:
app: ci4s-file
spec:
containers:
- name: ci4s-file
image: ${k8s-8file-image}
ports:
- containerPort: 9300

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-file-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9300
nodePort: 31209
protocol: TCP
selector:
app: ci4s-file


+ 36
- 0
k8s/template-yaml/k8s-9job.yaml View File

@@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-job-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-job
template:
metadata:
labels:
app: ci4s-job
spec:
containers:
- name: ci4s-job
image: ${k8s-9job-image}
ports:
- containerPort: 9203

---
apiVersion: v1
kind: Service
metadata:
name: ci4s-job-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9203
nodePort: 31210
protocol: TCP
selector:
app: ci4s-job


Loading…
Cancel
Save