Commit edb4c418 authored by admin's avatar admin
Browse files

环境代码变更

parent 9145b746
Showing with 802 additions and 5 deletions
+802 -5
# 5ea7e4-aliyun-cpg
# cloudiac-alicloud-cpg
......@@ -15,14 +15,14 @@ Already a pro? Just edit this README.md and make it your own. Want to make it ea
```
cd existing_repo
git remote add origin https://gitlab.yun.shop/eos-test/5ea7e4-aliyun-cpg.git
git branch -M master
git push -uf origin master
git remote add origin http://121.199.6.236/ydd-official-modules/cloudiac-alicloud-cpg.git
git branch -M main
git push -uf origin main
```
## Integrate with your tools
- [ ] [Set up project integrations](https://gitlab.yun.shop/eos-test/5ea7e4-aliyun-cpg/-/settings/integrations)
- [ ] [Set up project integrations](http://121.199.6.236/ydd-official-modules/cloudiac-alicloud-cpg/-/settings/integrations)
## Collaborate with your team
......
---
- hosts: cloudiac
gather_facts: false
tasks:
- name: Wait connection
## 每秒尝试进行一次连接直到连接成功或者超时
wait_for_connection:
sleep: 1
timeout: "{{ wait_connection_timeout }}"
- name: export mysql data to oss
shell: |
# 活跃环境判断
if [[ {{ cloudiac_env_status }} == "active" ]]; then
# oss环境初始化
if [[ ! -f ossutil64 ]]; then
curl -LO https://gosspublic.alicdn.com/ossutil/1.7.8/ossutil64
chmod +x ossutil64
cat > /root/.ossutilconfig <<EOF
[Credentials]
language=EN
endpoint={{ oss_address }}
accessKeyID={{ oss_ak }}
accessKeySecret={{ oss_sk }}
EOF
fi
# 数据库备份
docker exec -t mysql bash -c "mysqldump -B -uroot -p{{ mysql_root_pw }} cloudiac > /tmp/{{ cloudiac_env_id }}.sql" && docker cp mysql:/tmp/{{ cloudiac_env_id }}.sql .
# consul备份
docker exec -t consul sh -c "consul kv export --http-addr={{ consul_address }} '' > /tmp/{{ cloudiac_env_id }}.json" && docker cp consul:/tmp/{{ cloudiac_env_id }}.json .
# 打包上传
tar czvf {{ cloudiac_env_id }}.tgz {{ cloudiac_env_id }}.sql {{ cloudiac_env_id }}.json
./ossutil64 cp -f {{ cloudiac_env_id }}.tgz oss://cloudcmp/
else
echo "Env id {{ cloudiac_env_id }} not active,skip export."
fi
when: is_backup == "true"
\ No newline at end of file
---
- hosts: cloudiac
gather_facts: false
tasks:
- name: Wait connection
## 每秒尝试进行一次连接直到连接成功或者超时
wait_for_connection:
sleep: 1
timeout: "{{ wait_connection_timeout }}"
- name: import mysql data from oss
shell: |
echo "{{ cloudiac_env_status }} Start."
sleep 30s
# 环境状态判断,异常状态不备份
if [[ ( {{ cloudiac_env_status }} == inactive ) || ( {{ cloudiac_env_status }} == destroyed ) || ( {{ cloudiac_env_status }} == failed ) ]]; then
# oss环境初始化
if [[ ! -f ossutil64 ]]; then
curl -LO https://gosspublic.alicdn.com/ossutil/1.7.8/ossutil64
chmod +x ossutil64
cat > /root/.ossutilconfig <<EOF
[Credentials]
language=EN
endpoint={{ oss_address }}
accessKeyID={{ oss_ak }}
accessKeySecret={{ oss_sk }}
EOF
fi
# 获取备份文件
./ossutil64 cp -f oss://cloudcmp/{{ cloudiac_env_id }}.tgz . || :
if [[ -f {{ cloudiac_env_id }}.tgz ]]; then
tar xzvf {{ cloudiac_env_id }}.tgz
fi
# 导入数据库备份
if [[ -f {{ cloudiac_env_id }}.sql ]]; then
docker cp {{ cloudiac_env_id }}.sql mysql:/tmp/
docker exec -t mysql bash -c "mysql -uroot -p{{ mysql_root_pw }} < /tmp/{{ cloudiac_env_id }}.sql"
fi
# 导入consul备份
if [[ -f {{ cloudiac_env_id }}.json ]]; then
docker cp {{ cloudiac_env_id }}.json consul:/tmp/
docker exec -t consul sh -c "consul kv import --http-addr={{ consul_address }} @/tmp/{{ cloudiac_env_id }}.json"
fi
else
echo "Env id {{ cloudiac_env_id }} is active,skip import."
fi
when: is_backup == "true"
\ No newline at end of file
# $1 容器名关键字,例如 uc、cpm-core、catalog 等
docker cp `docker ps --format "table {{.ID}} {{.Names}}"|grep $1|awk '{print $1}'`:/usr/yunji/conf.d $1
containerId=`docker ps|grep $1|awk '{print $1}'`
docker stop $containerId
docker rm $containerId
imageId=`docker images|grep $1|awk '{print $3}'`
docker image rm $imageId
cd /opt/idcos/cloudcmp
docker-compose -f *.yml up -d
---
- hosts: all
gather_facts: false
remote_user: root
any_errors_fatal: true
tasks:
- name: Wait connection
## 每秒尝试进行一次连接直到连接成功或者超时
wait_for_connection:
sleep: 1
timeout: "{{ wait_connection_timeout }}"
- name: install docker
shell: |
systemctl is-active docker || (wget -P /root {{docker_installscript}} && bash /root/docker-deploy.sh)
# - name: Copy Docker deploy script to /root
# copy:
# src: "{{ playbook_dir }}/pkg/docker/docker-deploy.sh"
# dest: "/root/docker-deploy.sh"
# mode: "0755" # 设置执行权限
#
# - name: Install Docker
# shell: |
# systemctl is-active docker || (bash /root/docker-deploy.sh)
# args:
# executable: /bin/bash
- name: docker login
shell: |
docker login --username=idcos --password=Yunjikeji#123 registry-vpc.cn-hangzhou.aliyuncs.com
# - name: get base shell
# shell: |
# wget http://yum.idcos.com/cloudcmp/backup/repull.sh && \
# wget http://yum.idcos.com/cloudcmp/backup/copy_config.sh
- name: Copy Docker repull script to /root
copy:
src: "{{ playbook_dir }}/pkg/docker/repull.sh"
dest: "/root/repull.sh"
mode: "0755" # 设置执行权限
- name: Copy Docker copy_config script to /root
copy:
src: "{{ playbook_dir }}/pkg/docker/copy_config.sh"
dest: "/root/copy_config.sh"
mode: "0755" # 设置执行权限
- hosts: etcd
remote_user: root
any_errors_fatal: true
roles:
- calico
- hosts: db-middleware-server
gather_facts: no
remote_user: root
any_errors_fatal: true
roles:
- role: db-middleware-server
vars:
workdir: "/usr/yunji/cloudiac/var"
gitea_dir: "{{workdir}}/gitea"
dump_package_url: "{{restore_package | default('') }}"
gitea_vcs_addr: "http://{{gitea_domain}}:3000"
restore_from_backup: "{{(restore_package | default('')) != ''}}"
- hosts: cloudiac
gather_facts: yes
remote_user: root
any_errors_fatal: true
vars:
# cloudiac_db: "cloudiac"
# gitea_db: "gitea_db"
roles:
- role: "iac-gitea-restore"
vars:
# mysql_host: localhost
# mysql_port: 3306
# force_restore: false
#mysql_user: root
mysql_pass: "{{mysql_password}}"
workdir: "/usr/yunji/cloudiac/var"
gitea_dir: "{{workdir}}/gitea"
dump_package_url: "{{restore_package | default('') }}"
gitea_vcs_addr: "http://{{gitea_domain}}:3000"
when: dump_package_url != ""
- role: cloudiac
vars:
# string 强制转为 bool
deploy_gitea: "{{ _deploy_gitea | bool }}"
mysql_db: "{{cloudiac_db}}"
mysql_pw: "{{mysql_password}}"
- hosts: cmp-controller-server
gather_facts: false
remote_user: root
any_errors_fatal: true
roles:
- cmp-controller-server
- hosts: cmp-core-server
gather_facts: false
remote_user: root
any_errors_fatal: true
roles:
- cmp-core-server
- hosts: web-server
gather_facts: false
remote_user: root
any_errors_fatal: true
roles:
- web-server
- hosts: mid-server
gather_facts: false
remote_user: root
any_errors_fatal: true
roles:
- mid-server
# 重复内容
#- hosts: cloudiac
# gather_facts: yes
# remote_user: root
# any_errors_fatal: true
# vars:
## cloudiac_db: "cloudiac"
## gitea_db: "gitea_db"
# roles:
# - role: cloudiac
# vars:
# # string 强制转为 bool
# deploy_gitea: "{{ _deploy_gitea | bool }}"
# mysql_db: "{{cloudiac_db}}"
# mysql_pw: "{{mysql_password}}"
#
# - role: "iac-gitea-restore"
# vars:
## mysql_host: localhost
## mysql_port: 3306
## force_restore: false
# mysql_user: root
# mysql_pass: "{{mysql_root_pw}}"
# workdir: "/usr/yunji/cloudiac/var"
# gitea_dir: "{{workdir}}/gitea"
# dump_package_url: "{{restore_package | default('') }}"
# gitea_vcs_addr: "http://{{gitea_domain}}:3000"
# when: dump_package_url != ""
- apiVersion: v1
kind: ipPool
metadata:
cidr: 10.88.0.0/24
spec:
ipip:
enabled: true
mode: always
nat-outgoing: true
disabled: false
- apiVersion: v1
kind: policy
metadata:
name: allow-all-tcp
spec:
types:
- ingress
- egress
ingress:
- action: allow
egress:
- action: allow
[Unit]
Description=calico-node
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/opt/platform/calico/calico.env
ExecStartPre=-/usr/bin/docker rm -f calico-node
ExecStart=/usr/bin/docker run --net=host --privileged \
--name=calico-node \
-e NODENAME=${CALICO_NODENAME} \
-e IP=${CALICO_IP} \
-e IP6=${CALICO_IP6} \
-e CALICO_NETWORKING_BACKEND=${CALICO_NETWORKING_BACKEND} \
-e AS=${CALICO_AS} \
-e NO_DEFAULT_POOLS=${CALICO_NO_DEFAULT_POOLS} \
-e CALICO_LIBNETWORK_ENABLED=${CALICO_LIBNETWORK_ENABLED} \
-e ETCD_ENDPOINTS=${ETCD_ENDPOINTS} \
-e ETCD_CA_CERT_FILE=${ETCD_CA_CERT_FILE} \
-e ETCD_CERT_FILE=${ETCD_CERT_FILE} \
-e ETCD_KEY_FILE=${ETCD_KEY_FILE} \
-v /opt/ssl:/etc/calico/certs \
-v /opt/platform/calico/log:/var/log/calico \
-v /run/docker/plugins:/run/docker/plugins \
-v /lib/modules:/lib/modules \
-v /var/run/calico:/var/run/calico \
-v /var/run/docker.sock:/var/run/docker.sock \
quay.io/calico/node:v2.6.12
#calico/node:
ExecStop=-/usr/bin/docker stop calico-node
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "87600h"
},
"server": {
"expiry": "1000000h",
"usages": ["signing", "key encipherment", "server auth", "client auth"]
},
"client": {
"expiry": "1000000h",
"usages": ["signing", "key encipherment", "client auth"]
},
"peer": {
"expiry": "43800h",
"usages": ["signing", "key encipherment", "server auth", "client auth"]
}
}
}
}
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Wuhan",
"L": "Hubei",
"O": "k8s",
"OU": "System"
}
]
}
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAxe7Jgynt1OUm77gXWrY+hCr4QyfZFOtBrGe4v1mMCBA+nD0U
N001XkpnDOHtbnXxQfCxskowJDGv0o4e1Mjijz1GeES0NFroepC6a2or0IjgvQZw
aZafYwmPgol5a8D62JSTuBVbLiDIWJ0lUcVrLYw9iGYIUx9zjfw9LuNO0xX3EIlY
BD/XGLeb728hDEuvzF21XHklOPdR8LQel3fThTNPK9pwfmKWZPquMG6Dl9SYAE7q
AP02ognftVDQdhL2/cKtmg+p1l62vuALnJcOCJCfjyP74BXUEP/BG871bXYgTnHu
Fax+ULy00pbA7fHn/ptwBx7xdG0XkKKQy/T2EwIDAQABAoIBAQCP+jqrJnzSOD4y
cf7LBXZEPd2JPBs0lEyZVHmW91RdKTPiAqwJ7ie8BvMe89j6WOC9zo/z2PNb4Ki8
yJS0w8vJ1hDEu3U6AEbLoSt5pyln76lK1rw+kkouiG3Tzpx8PUw67xjk9uP0kfQq
QWU8LhbgcvrvlQjJLj+oMKtwx/g2oqO3BDIYcfqjI0xk1GuAZnnVjUcrapW9HNNt
5dRiMXr8r98WOjEUwjbmxx749QFJrIqKOHj59YsvUcugX67ZhLjLLnR5w0jwO04E
SxJAp4mfcGdOKitWzGQNq+AmLPO5vY/hoyNwjwkHHnX52bYjafLtCd0rbsPgMR3G
/wstSksBAoGBAO2U8TWG8Mby2vOyvXSrBTu7skpx54M0ll+uC7O+ClGqfKk+gIoU
7O/vjLpGiEt8xZGBWHeKoX90yxWfQJ0gG2v1Set1MUIstbqi11rC8tK+QBUc5U2x
kdqk62+rHAMPa1UOJnJUHSSj7YHAt6ElCSra6C/Ml5Q4rxUTp4GpTdTRAoGBANVG
9+4YlpNHKs3pmsYWV4ogAtv731d7lF3nclF1r4JhNONT2O/pa57vcnnG/oXI7i5e
o/JBhm7vOhdGKTUiOUKESYED4GjYw8Ioh49oN4et17NPI29/Xj1qynsvwjr6JxiV
eAGDrxbhvTb6PsDxrYxtkOnvoMHlEIwW1p3/cGWjAoGAaMrYdf62N1qpXvKJBGvm
3AkXRCTntAS/CybuvHJnhMwJPnF2KvG97LshvIwd1AYN8vc1eUzpnRl79maPDn85
+S1xVG0VtBWadt8a1ErG5lPKWkWrktBvABzcSB4WNfdYJWUfajBVxnKT8TZ7CJYY
bBGwdlzswxEZkNdhfBycWvECgYEA1QN8vSFxwsdhySd4nj/STvKJ6I04FHgpR2DS
nOmd0eqkmCg2k+fexW8CNv65V4XH3ihINr2bN1FYA8bfgWIdLoJRp7nneNCHNHSm
cGT2kVzgAwcXrVTZGx59/WtF4wHV6ofexCRxy7jK8w4dfzfizk7gXcYoGYBfdouY
71UhPWECgYEAzayXYbFvxvV6fZWx1n4cle+MmqK7OACmlvfiQoUi05QyTqkgTDS6
G3iZOClYMIxldGNgxTlqTWg3ywC9rAs75PPOIKXCbCkjqf0an+IUBMzjrqCbJFVY
dfWEz0zBZQDQLFWbD7N+XmDLOfzRuNmwupTmPxIZK5jYRUaeUcIheNA=
-----END RSA PRIVATE KEY-----
\ No newline at end of file
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDF7smDKe3U5SbvuBdatj6EKvhDJ9kU60GsZ7i/WYwIED6cPRQ3TTVeSmcM4e1udfFB8LGySjAkMa/Sjh7UyOKPPUZ4RLQ0Wuh6kLpraivQiOC9BnBplp9jCY+CiXlrwPrYlJO4FVsuIMhYnSVRxWstjD2IZghTH3ON/D0u407TFfcQiVgEP9cYt5vvbyEMS6/MXbVceSU491HwtB6Xd9OFM08r2nB+YpZk+q4wboOX1JgATuoA/TaiCd+1UNB2Evb9wq2aD6nWXra+4Auclw4IkJ+PI/vgFdQQ/8EbzvVtdiBOce4VrH5QvLTSlsDt8ef+m3AHHvF0bReQopDL9PYT root@terraform-test
\ No newline at end of file
- name: 获取文件状态
stat:
path: /opt/platform/calico/
register: calico_result
- name: 开启转发
lineinfile:
path: /etc/sysctl.conf
line: "{{ item }}"
loop:
- "net.ipv4.ip_forward=1"
- "net.ipv6.conf.all.disable_ipv6=1"
- "net.ipv6.conf.default.disable_ipv6=1"
- "net.ipv6.conf.lo.disable_ipv6=1"
- name: 使路由转发生效
shell: sysctl -p
#- name: 生成证书命令
# copy:
# src: "{{ item }}"
# dest: /usr/local/bin/
# mode: +x
# loop:
# - cfssl
# - cfssljson
# when:
# - ansible_play_hosts.index(inventory_hostname) == 0
- name: 生成证书命令
get_url:
url: "{{ item.url }}"
dest: "/usr/local/bin/"
mode: "+x"
loop:
- { url: "http://yum.idcos.com/cloudcmp/install/cfssl" }
- { url: "http://yum.idcos.com/cloudcmp/install/cfssljson" }
- name: 创建证书存放路径
file:
path: "{{ item }}"
state: directory
recurse: yes
loop:
- /opt/platform/calico/
- /etc/calico
- /opt/ssl
- name: 创建证书存放路径
copy:
src: "{{ item }}"
dest: /opt/ssl
loop:
- csr.json
- config.json
when:
- ansible_play_hosts.index(inventory_hostname) == 0
- name: 创建证书存放路径
template:
src: etcd-csr.json.j2
dest: /opt/ssl/etcd-csr.json
when:
- ansible_play_hosts.index(inventory_hostname) == 0
- name: 证书签名
shell: |
cfssl gencert -initca csr.json | cfssljson -bare ca
cfssl gencert -ca=/opt/ssl/ca.pem -ca-key=/opt/ssl/ca-key.pem -config=/opt/ssl/config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
args:
chdir: /opt/ssl/
when: ansible_play_hosts.index(inventory_hostname) == 0 and not calico_result.stat.exists
- name: 存放id_rsa文件
copy:
src: id_rsa
dest: /root/.ssh/
owner: root
group: root
mode: '0600'
when:
- ansible_play_hosts.index(inventory_hostname) == 0 and not calico_result.stat.exists
- name: 存放id_rsa.pub文件
lineinfile:
path: /root/.ssh/authorized_keys
line: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDF7smDKe3U5SbvuBdatj6EKvhDJ9kU60GsZ7i/WYwIED6cPRQ3TTVeSmcM4e1udfFB8LGySjAkMa/Sjh7UyOKPPUZ4RLQ0Wuh6kLpraivQiOC9BnBplp9jCY+CiXlrwPrYlJO4FVsuIMhYnSVRxWstjD2IZghTH3ON/D0u407TFfcQiVgEP9cYt5vvbyEMS6/MXbVceSU491HwtB6Xd9OFM08r2nB+YpZk+q4wboOX1JgATuoA/TaiCd+1UNB2Evb9wq2aD6nWXra+4Auclw4IkJ+PI/vgFdQQ/8EbzvVtdiBOce4VrH5QvLTSlsDt8ef+m3AHHvF0bReQopDL9PYT root@terraform-test"
when: not calico_result.stat.exists
- name: scp 免应答
lineinfile:
path: /etc/ssh/ssh_config
regexp: "# StrictHostKeyChecking ask"
line: " StrictHostKeyChecking no"
when: ansible_play_hosts.index(inventory_hostname) == 0 and not calico_result.stat.exists
- name: 重启 sshd
service:
name: sshd
enabled: yes
state: restarted
- name: 拷贝证书到其他服务器
shell: scp -rp /opt/ssl/{ca,etcd,etcd-key}.pem root@"{{ item }}":/opt/ssl/
loop:
- "{{ etcdip1 }}"
- "{{ etcdip2 }}"
- "{{ etcdip3 }}"
when: ansible_play_hosts.index(inventory_hostname) == 0 and not calico_result.stat.exists
- name: 存放脚本到本地
template:
src: etcd_cluster.sh.j2
dest: /tmp/etcd_cluster.sh
- name: 部署 etcd 集群
shell: bash /tmp/etcd_cluster.sh
when: not calico_result.stat.exists
- name: 等待集群状态
shell: sleep 10
when: not calico_result.stat.exists
- name: 检查集群状态
shell: docker exec -it etcd sh -c "etcdctl --endpoint=https://127.0.0.1:2379 --cert-file=/opt/ssl/etcd.pem --key-file=/opt/ssl/etcd-key.pem --ca-file=/opt/ssl/ca.pem cluster-health"
register: etcd_result
- name: 输出集群状态
debug:
msg: "{{ etcd_result.stdout }}"
#- name: 配置calico命令
# copy:
# src: calicoctl
# dest: /usr/local/bin/
# mode: +x
- name: 配置calico命令
get_url:
url: "http://yum.idcos.com/cloudcmp/install/calicoctl"
dest: "/usr/local/bin/"
mode: "+x"
- name: calico 默认配置
template:
src: calicoctl.cfg.j2
dest: /etc/calico/calicoctl.cfg
- name: calico.env 配置
template:
src: calico.env.j2
dest: /opt/platform/calico/calico.env
- name: 配置systemctl管理calico
copy:
src: calico-node.service
dest: /usr/lib/systemd/system/calico-node.service
- name: 重新加载系统服务文件
systemd:
daemon_reload: yes
- name: 生成 calico 网络文件
copy:
src: 2v-ipPool.yaml
dest: /root/2v-ipPool.yaml
when: ansible_play_hosts.index(inventory_hostname) == 0
- name: 创建 calico 地址池
shell: calicoctl apply -f /root/2v-ipPool.yaml
when: ansible_play_hosts.index(inventory_hostname) == 0 and not calico_result.stat.exists
- name: 启动 calico 网络
service:
name: calico-node
enabled: yes
state: started
---
- include_tasks: etcd.yml
ETCD_ENDPOINTS="https://{{ etcdip1 }}:2379,https://{{ etcdip2 }}:2379,https://{{ etcdip3 }}:2379"
ETCD_CA_CERT_FILE="/etc/calico/certs/ca.pem"
ETCD_CERT_FILE="/etc/calico/certs/etcd.pem"
ETCD_KEY_FILE="/etc/calico/certs/etcd-key.pem"
CALICO_NODENAME=""
CALICO_NO_DEFAULT_POOLS=""
CALICO_IP="{{ ansible_default_ipv4.address }}"
CALICO_IP6=""
CALICO_AS=""
CALICO_LIBNETWORK_ENABLED=true
CALICO_NETWORKING_BACKEND=bird
apiVersion: v1
kind: calicoApiConfig
metadata:
spec:
etcdEndpoints: https://{{ etcdip1 }}:2379,https://{{ etcdip2 }}:2379,https://{{ etcdip3 }}:2379
etcdKeyFile: /opt/ssl/etcd-key.pem
etcdCertFile: /opt/ssl/etcd.pem
etcdCACertFile: /opt/ssl/ca.pem
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"{{ etcdip1 }}",
"{{ etcdip2 }}",
"{{ etcdip3 }}"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Shanghai",
"L": "Shanghai",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
#!/bin/bash
# quay.mirrors.ustc.edu.cn/coreos/etcd
REGISTRY=registry-vpc.cn-hangzhou.aliyuncs.com/idcos-common/etcd
# For each machine
ETCD_VERSION=v3.3.22
TOKEN=9f6ff39d-2ff5-40f0-9216-47ab7c0d29e3
CLUSTER_STATE=new
NAME_1=etcd-1
NAME_2=etcd-2
NAME_3=etcd-3
# change this to real server ip
HOST_1={{ etcdip1 }}
HOST_2={{ etcdip2 }}
HOST_3={{ etcdip3 }}
CLUSTER=${NAME_1}=http://${HOST_1}:2380,${NAME_2}=http://${HOST_2}:2380,${NAME_3}=http://${HOST_3}:2380
DATA_DIR=etcd
# For node 1
node1() {
THIS_NAME=${NAME_1}
THIS_IP=${HOST_1}
docker run -d --name etcd \
--hostname ${THIS_NAME} \
--volume /opt/ssl:/opt/ssl \
--volume=${DATA_DIR}:/etcd-data \
--publish 2379:2379 \
--publish 2380:2380 \
--restart=always \
--env ETCDCTL_API=2 \
--env ETCD_NAME=${THIS_NAME} \
--env ALLOW_NONE_AUTHENTICATION=yes \
--env ETCD_TRUSTED_CA_FILE="/opt/ssl/ca.pem" \
--env ETCD_CERT_FILE="/opt/ssl/etcd.pem" \
--env ETCD_KEY_FILE="/opt/ssl/etcd-key.pem" \
--env ETCD_CLIENT_CERT_AUTH="true" \
--env ETCD_AUTO_TLS="true" \
--env ETCD_ADVERTISE_CLIENT_URLS=https://${THIS_IP}:2379 \
--env ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379 \
--env ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 \
--env ETCD_INITIAL_ADVERTISE_PEER_URLS=http://${THIS_IP}:2380 \
--env ETCD_INITIAL_CLUSTER_TOKEN=${TOKEN} \
--env ETCD_INITIAL_CLUSTER="${CLUSTER}" \
--env ETCD_INITIAL_CLUSTER_STATE=${CLUSTER_STATE} \
--env ETCD_ENABLE_V2=true \
${REGISTRY}:${ETCD_VERSION}
}
# For node 2
node2() {
THIS_NAME=${NAME_2}
THIS_IP=${HOST_2}
docker run -d --name etcd \
--hostname ${THIS_NAME} \
--volume /opt/ssl:/opt/ssl \
--volume=${DATA_DIR}:/etcd-data \
--publish 2379:2379 \
--publish 2380:2380 \
--restart=always \
--env ETCDCTL_API=2 \
--env ETCD_NAME=${THIS_NAME} \
--env ALLOW_NONE_AUTHENTICATION=yes \
--env ETCD_TRUSTED_CA_FILE="/opt/ssl/ca.pem" \
--env ETCD_CERT_FILE="/opt/ssl/etcd.pem" \
--env ETCD_KEY_FILE="/opt/ssl/etcd-key.pem" \
--env ETCD_CLIENT_CERT_AUTH="true" \
--env ETCD_AUTO_TLS="true" \
--env ETCD_ADVERTISE_CLIENT_URLS=https://${THIS_IP}:2379 \
--env ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379 \
--env ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 \
--env ETCD_INITIAL_ADVERTISE_PEER_URLS=http://${THIS_IP}:2380 \
--env ETCD_INITIAL_CLUSTER_TOKEN=${TOKEN} \
--env ETCD_INITIAL_CLUSTER="${CLUSTER}" \
--env ETCD_INITIAL_CLUSTER_STATE=${CLUSTER_STATE} \
--env ETCD_ENABLE_V2=true \
${REGISTRY}:${ETCD_VERSION}
}
# For node 3
node3() {
THIS_NAME=${NAME_3}
THIS_IP=${HOST_3}
docker run -d --name etcd \
--hostname ${THIS_NAME} \
--volume /opt/ssl:/opt/ssl \
--volume=${DATA_DIR}:/etcd-data \
--publish 2379:2379 \
--publish 2380:2380 \
--restart=always \
--env ETCDCTL_API=2 \
--env ETCD_NAME=${THIS_NAME} \
--env ALLOW_NONE_AUTHENTICATION=yes \
--env ETCD_TRUSTED_CA_FILE="/opt/ssl/ca.pem" \
--env ETCD_CERT_FILE="/opt/ssl/etcd.pem" \
--env ETCD_KEY_FILE="/opt/ssl/etcd-key.pem" \
--env ETCD_CLIENT_CERT_AUTH="true" \
--env ETCD_AUTO_TLS="true" \
--env ETCD_ADVERTISE_CLIENT_URLS=https://${THIS_IP}:2379 \
--env ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379 \
--env ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 \
--env ETCD_INITIAL_ADVERTISE_PEER_URLS=http://${THIS_IP}:2380 \
--env ETCD_INITIAL_CLUSTER_TOKEN=${TOKEN} \
--env ETCD_INITIAL_CLUSTER="${CLUSTER}" \
--env ETCD_INITIAL_CLUSTER_STATE=${CLUSTER_STATE} \
--env ETCD_ENABLE_V2=true \
${REGISTRY}:${ETCD_VERSION}
}
# self_ip=$(ip r | awk '/eth0.*src/ { print $NF }')
self_ip=$(ip r get 8.8.8.8 | awk '/src/ { print $NF }')
if [[ $self_ip == $HOST_1 ]]; then
node1
elif [[ $self_ip == $HOST_2 ]]; then
node2
elif [[ $self_ip == $HOST_3 ]]; then
node3
fi
cat > /etc/docker/daemon.json <<EOF
{
"insecure-registries": ["registry.idcos.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"cluster-store":"etcd://$self_ip:2379",
"cluster-store-opts": {
"kv.cacertfile": "/opt/ssl/ca.pem",
"kv.certfile": "/opt/ssl/etcd.pem",
"kv.keyfile": "/opt/ssl/etcd-key.pem"
},
"cluster-advertise":"$self_ip:2376"
}
EOF
systemctl restart docker
- name: Wait connection
wait_for_connection:
sleep: 5
timeout: "{{ wait_connection_timeout }}"
- name: Create app dirs
shell: mkdir -p /usr/yunji/cloudiac/var/{consul,consulconfig,gitea} && mkdir -p /usr/yunji/cloudiac/var/gitea/gitea/conf
- name: Copy config file
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "0644"
with_items:
- { src: "dev.env", dest: /usr/yunji/cloudiac/.env }
- { src: "acl.hcl", dest: /usr/yunji/cloudiac/var/consulconfig/acl.hcl }
- { src: "docker-compose.yml", dest: /usr/yunji/cloudiac/docker-compose.yml}
- { src: "app.ini", dest: /usr/yunji/cloudiac/var/gitea/gitea/conf/app.ini}
- name: Install mysql client
package:
name: mysql
state: present
- name: Create cloudiac and gitea databases
shell: |
mysql -h'{{mysql_host}}' -u'{{mysql_user}}' -p'{{mysql_pw}}' '{{item}}' -e 'show tables;' || \
mysql -h'{{mysql_host}}' -u'{{mysql_user}}' -p'{{mysql_pw}}' '{{item}}' -e 'create database {{item}} charset utf8mb4;'
loop:
- "{{cloudiac_db}}"
- "{{gitea_db}}"
- name: Pull docker images
shell: cd /usr/yunji/cloudiac/ && docker-compose -f docker-compose.yml pull
- name: Start services
shell: |
cd /usr/yunji/cloudiac/ && docker-compose -f docker-compose.yml up --force-recreate -d
- name: Check portal started
uri:
url: "{{ portal_address }}:9030/api/v1/check"
register: check_portal
retries: 20
delay: 3
until: check_portal is success
- name: Install ansible
package:
name: ansible
state: present
\ No newline at end of file
---
- include_tasks: cloudiac.yml
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment