Ansible高階應用--動態建立hosts實踐,簡化部署配置檔案。
使用Ansible 時不可少的時hosts 檔案, 每次執行都會用到。但是這個hosts檔案經常成為專案難以管理的問題源頭。因為需要定義太底層的hosts 檔案。實際專案比這個要複雜很多,如果我們能通過面向物件的原理來管理hosts 檔案,這樣生活就會簡單和可控制的多。 經過2年多的實際的專案部署測試和驗證,終於摸索出一套可行的辦法,現在分享給大家試試。 基本思路是: 1.全域性一個input.yml 配置檔案入口, 配置檔案裡冒號:右邊的是可以更改的,冒號左邊的是不可以更改的。
不同的部署專案,基於相同格式的input.yml風格,採用git 原始碼版本管理工具,可以方便的在不同的專案之間或開發和測試部署環境之間切換.
main.yml 內容如下:
- name: hadoop-cluster playbook include many books. hosts: localhost gather_facts: False
# become: yes # become_method: sudo
vars: projectinfo: “{{ lookup(‘file’,‘input.yml’) | from_yaml }}” vm_host_list: [] domain_group_dict: {}
pre_tasks: - set_fact: task_startup_timestamp="{{lookup(‘pipe’,‘date “+%Y-%m-%d %H:%M:%S”’)}}"
- name: "這個是在每個任務執行之前會執行的任務."
shell: echo "任務開始...,檢查依賴的檔案是否存在."; ./before-run.sh;
# - name: “檢查本地專案資料夾裡的檔案是否存在” # shell: ./check-file-exist-status.sh # register: files_status # # - name: “if stdout check failed,interrupt execution” # fail: msg=“出錯了,有檔案的連結失效,檔案不存在” # when: ‘“does not exist” in files_status.stdout’
- name: "檢查role依賴是否正常,版本是否正確" #todo
shell: ./check-role-dependency.sh
register: role_dependency_status
- name: "role依賴缺失"
fail: msg="role依賴存在問題"
when: '"role does not exist" in role_dependency_status.stdout'
- name: "set short hand hostdict"
set_fact: projectroot="{{projectinfo['project_root']}}"
- name: "set short hand vars"
set_fact: commonsetting="{{projectroot['common']}}"
- name: "set short hand vars"
set_fact: hostdict="{{projectroot['all_hosts']}}"
- name: "set hostconfig short hand vars"
set_fact: hostconfig="{{projectroot['host_config']}}"
- name: "set hostconfig short hand vars"
set_fact: hadoopconfig="{{projectroot['host_config']['hadoop_config']}}"
- name: "set hostconfig short hand vars"
set_fact: dnsconfig="{{projectroot['host_config']['dns_config']}}"
- name: "vcenterconfig"
set_fact: vcenterconfig="{{projectroot['vsphere_platform']['vmware_esxi']}}"
- name: "set fact"
set_fact: virtualbox_template_name="{{projectroot['host_config']['vagrant_config']['virtualbox_template_name']}}"
- name: "set fact"
set_fact: vm_bridge_nic_name="eth1"
- name: "批量合併列表合併物件"
set_fact: vm_host_list="{{ vm_host_list }} + {{ hostdict[item] }}"
with_items: "{{hostdict.keys()}}"
when: hostdict[item] is defined and hostdict[item][0].ismaster == true
- name: "生成臨時group-domain-ip對映表文本檔案/tmp/group_domain_ip.txt"
template: src=templates/group_domain_ip_user_password.txt.j2 dest=/tmp/group_domain_ip_user_password.txt
- name: "把/tmp/group_domain_ip_user_password.txt內容放到註冊變數裡"
shell: cat /tmp/group_domain_ip_user_password.txt
register: group_domain_ip_user_password
#注意密碼和使用者名稱不能包含:和逗號,否則就出錯了,因為分割符號是,:無法正確分割..
#hadoop-namenode-hosts:hadoop-namenode1.ascs.tech:10.20.2.1:centos:YOUR_PASSWORD,hadoop-namenode-hosts:hadoop-namenode2.ascs.tech:10.20.2.2:centos:YOUR_PASSWORD,hadoop-....
- set_fact: group_domain_ip_user_password_list={{ group_domain_ip_user_password.stdout.split(',') }}
- add_host:
hostname: "{{item.split(':')[1]}}"
groups: "{{item.split(':')[0]}}"
ansible_host: "{{item.split(':')[2]}}"
# ansible_port: 22
ansible_user: "{{item.split(':')[3]}}"
ansible_ssh_pass: "{{item.split(':')[4]}}"
with_items: "{{group_domain_ip_user_password_list}}"
##特別注意,這裡都是root 使用者,hadoop 使用者還沒有建立.
核心關鍵檔案: group_domain_ip_user_password.txt.j2 內容如下
happy:templates happy$ more group_domain_ip_user_password.txt.j2 {% set domain_group_dict={} %} {%- for key,dict_item in hostdict.items() -%} {%- for temphost in hostdict[key] -%} {{key}}:{{temphost.name}}:{{temphost.ip}}:{{temphost.username}}:{{temphost.password}}{%- if not loop.last -%},{%- endif -%} {%- endfor -%}{%- if not loop.last -%},{%- endif -%} {%- endfor -%}
配置檔案格式和內容如下: input.yml
— #config file version-1.1.0 2018-08-22 project_root: #字典開頭的空2格,列表開頭的子項空2個空格. project_info: project_descripton: “Hadoop叢集離線自動化部署” version: “1.0” source_code: “” created_date: “2017-02-01” author_list: - name: “” phone: “” email: “” weixin: “” QQ: “”
vsphere_platform:
virtualbox:
vagrant_offline_install_file: "vagrant_2.0.2_x86_64.rpm"
virtualbox_offline_install_file: "VirtualBox-5.2-5.2.6_120293_el7-1.x86_64.rpm"
vagrant_box_name: "centos1708-kernel4.4.116-docker-17.12.0-jre9-ce-go1.9"
vmware_esxi:
vcenterhostname: "192.168.11.10" #vcenter.example.com 如果域名沒有解析,在執行機器上設定hosts也可以
vcenterusername: "[email protected]"
vcenterpassword: ""
datacenter: "hewutong"
default_datastore: "cw_m4_sas_datastore" #"cw_m4_pcie_datastore2 cw_m4_sas_datastore"
template: "centos1611_docker_jdk8_template"
virtual_machine_template_disk_size_in_gb: 30
resource_pool: "hadoopcluster"
folder: "/vm"
dnsserver1: "10.20.1.1" #這個是create-dns-record.yml 裡面要訪問到的IP,也是dns-host[0].ip
dnsserver2: "114.114.114.114"
state: "poweredon"
esxi_nic_network:
vlan: "VM Network" #"192.100.x.x"
gateway: "10.20.0.1" # sudo route add -net 11.23.3.0 -netmask 255.255.255.128 11.23.3.1
netmask: "255.255.0.0"
dnsserver1: "10.20.1.1"
dnsserver2: "114.114.114.114"
datastore:
rabbitmq_datastore: "cw_m4_sas_datastore"
vmware_workstation:
openstack:
huawei_fusion_vsphere:
common:
vm_platform: "vmware-vsphere" #vagrant, vmware-vsphere,huawei-vsphere
period_force_time_sync: "yes"
nic_name: "eens160" #ens160 enp0s3
is_internet_up: false
rabbitmq_datastore: "cw_m4_sas_datastore"
software_root_dir: "/var/server" #這個跟下面的配置是相關的,如果修改了, 下面相關的目錄必須跟著改.
citybox_work_network:
vlan: "10.20.0.0_10G-port" #"10.20.x.x"
gateway: "10.20.0.1" #10.20.1.1 to do
netmask: "255.255.0.0"
dnsserver1: "10.20.1.1"
dnsserver2: "114.114.114.114"
network: "10.20.0.0/16"
host_config:
java_config:
#app_home: "/var/server/jre" #jre-8u181-linux-x64.tar.gz
jre_targz: "jre-8u181-linux-x64.tar.gz" #jre-10.0.1_linux-x64_bin.tar.gz #tar -zxvf jre-9.0.4_linux-x64_bin.tar.gz -C jre9 --strip-components=1
jre_foldername: "jre"
jre_version: "1.8"
jdk_targz: "jdk-8u131-linux-x64.tar.gz"
jdk_foldername: "jdk"
jdk_version: "1.8"
go_config:
app_home: "/var/server/go"
app_foldername: "go"
install_filename: "go1.10.linux-amd64.tar.gz"
version: "1.10"
all_hosts:
zookeeper-hosts:
- name: "zka1.example.com"
uuid: "zka1.example.com"
ip: "10.20.2.51"
cpu: "1"
memory: "4096" # 600MB 以上
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka2.example.com"
uuid: "zka2.example.com"
ip: "10.20.2.52"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka3.example.com"
uuid: "zka3.example.com"
ip: "10.20.2.53"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka4.example.com"
uuid: "zka4.example.com"
ip: "10.20.2.54"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "zka5.example.com"
uuid: "zka5.example.com"
ip: "10.20.2.55"
cpu: "1"
memory: "4096"
disk: 30
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
hadoop-namenode-hosts:
- name: "hadoop-namenode1.example.com"
uuid: "hadoop-namenode1.example.com"
ip: "10.20.2.1"
cpu: "6"
memory: "20480"
disk: "100"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-namenode2.example.com"
uuid: "hadoop-namenode2.example.com"
ip: "10.20.2.2"
cpu: "6"
memory: "20480"
disk: "100"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-namenode3.example.com"
uuid: "hadoop-namenode3.example.com"
ip: "10.20.2.3"
cpu: "6"
memory: "20480"
disk: "100"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
hadoop-datanode-hosts:
- name: "hadoop-datanode1.example.com"
uuid: "hadoop-datanode1.example.com"
ip: "10.20.2.11"
cpu: "6"
memory: "20480"
disk: "200"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore1"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode2.example.com"
uuid: "hadoop-datanode2.example.com"
ip: "10.20.2.12"
cpu: "6"
memory: "20480"
disk: "200"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode3.example.com"
uuid: "hadoop-datanode3.example.com"
ip: "10.20.2.13"
cpu: "6"
memory: "20480"
disk: "200"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_pcie_datastore2"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode4.example.com"
uuid: "hadoop-datanode4.example.com"
ip: "10.20.2.14"
cpu: "6"
memory: "20480"
disk: "800"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_sas_datastore"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode5.example.com"
uuid: "hadoop-datanode5.example.com"
ip: "10.20.2.15"
cpu: "6"
memory: "20480"
disk: "800"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_sas_datastore"
host_machine: "192.168.3.11"
ismaster: true
- name: "hadoop-datanode6.example.com"
uuid: "hadoop-datanode6.example.com"
ip: "10.20.2.16"
cpu: "6"
memory: "20480"
disk: "800"
username: "root"
password: "YOUR_PASSWORD"
datastore: "cw_m4_sas_datastore"
host_machine: "192.168.3.11"
ismaster: true