1. 程式人生 > >Neutron控制節點集群

Neutron控制節點集群

show project auth script firewalld strategy 網卡名 int restart

#Neutron控制節點集群

openstack pike 部署 目錄匯總 http://www.cnblogs.com/elvi/p/7613861.html

#7.Neutron控制節點集群

#本實例網絡配置方式是:公共網絡(flat)
#官方參考 https://docs.openstack.org/neutron/pike/install/controller-install-rdo.html

#創建Neutron數據庫、用戶認證,前面已設置
############以下全部在controller1執行


source ./admin-openstack.sh 
# 創建Neutron服務實體,API端點
openstack service create 
--name neutron --description "OpenStack Networking" network openstack endpoint create --region RegionOne network public http://controller:9696 openstack endpoint create --region RegionOne network internal http://controller:9696 openstack endpoint create --region RegionOne network admin http://controller:9696
#安裝 yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset #Neutron 備份配置 cp /etc/neutron/neutron.conf{,.bak2} cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak} ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
cp /etc/neutron/dhcp_agent.ini{,.bak} cp /etc/neutron/metadata_agent.ini{,.bak} cp /etc/neutron/l3_agent.ini{,.bak} Netname=ens37 #網卡名稱 #配置 echo # [neutron] url = http://controller:9696 auth_url = http://controller:35357 auth_type = password project_domain_name = default user_domain_name = default region_name = RegionOne project_name = service username = neutron password = neutron service_metadata_proxy = true metadata_proxy_shared_secret = metadata #>>/etc/nova/nova.conf # echo [DEFAULT] nova_metadata_ip = controller metadata_proxy_shared_secret = metadata #>/etc/neutron/metadata_agent.ini # echo # [ml2] tenant_network_types = type_drivers = vlan,flat mechanism_drivers = linuxbridge extension_drivers = port_security [ml2_type_flat] flat_networks = provider [securitygroup] enable_ipset = True #vlan # [ml2_type_valn] # network_vlan_ranges = provider:3001:4000 #>/etc/neutron/plugins/ml2/ml2_conf.ini # provider:網卡名 echo # [linux_bridge] physical_interface_mappings = provider:$Netname [vxlan] enable_vxlan = false #local_ip = 10.2.1.20 #l2_population = true [agent] prevent_arp_spoofing = True [securitygroup] firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver enable_security_group = True #>/etc/neutron/plugins/ml2/linuxbridge_agent.ini # echo # [DEFAULT] interface_driver = linuxbridge dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = true #>/etc/neutron/dhcp_agent.ini # echo [DEFAULT] bind_port = 9696 bind_host = controller1 core_plugin = ml2 service_plugins = #service_plugins = trunk #service_plugins = router allow_overlapping_ips = true transport_url = rabbit://openstack:openstack@controller auth_strategy = keystone notify_nova_on_port_status_changes = true notify_nova_on_port_data_changes = true [keystone_authtoken] auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller1:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = neutron password = neutron [nova] auth_url = http://controller:35357 auth_plugin = password project_domain_id = default user_domain_id = default region_name = RegionOne project_name = service username = nova password = nova [database] connection = mysql://neutron:neutron@controller:3306/neutron [oslo_concurrency] lock_path = /var/lib/neutron/tmp #>/etc/neutron/neutron.conf # echo [DEFAULT] interface_driver = linuxbridge #>/etc/neutron/l3_agent.ini # #同步數據庫 su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \ --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron #檢測數據 mysql -h controller -u neutron -pneutron -e "use neutron;show tables;" #haproxy高可用配置 echo #Neutron_API listen Neutron_API_cluster bind controller:9696 balance source option tcpka option tcplog server controller1 controller1:9696 check inter 2000 rise 2 fall 5 server controller2 controller2:9696 check inter 2000 rise 2 fall 5 server controller3 controller3:9696 check inter 2000 rise 2 fall 5 >>/etc/haproxy/haproxy.cfg systemctl restart haproxy.service netstat -antp|grep haproxy #重啟相關服務 systemctl restart openstack-nova-api.service #啟動neutron systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service # # echo "查看網絡節點,正常是:控制節點3個ID,計算節點1個ID" # openstack network agent list # ############在controller2安裝配置############ #安裝 yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset

#同步controller1配置並修改
Node=controller2
rsync -avzP -e ‘ssh -p 22‘ controller1:/etc/nova/* /etc/nova/
rsync -avzP -e ‘ssh -p 22‘ controller1:/etc/neutron/* /etc/neutron/
#sed -i ‘s/controller1/‘$Node‘/‘ /etc/nova/nova.conf
sed -i ‘s/controller1/‘$Node‘/‘ /etc/neutron/neutron.conf
rsync -avzP -e ‘ssh -p 22‘ controller1:/etc/haproxy/* /etc/haproxy/


#重啟相關服務
systemctl restart haproxy openstack-nova-api.service
#啟動neutron
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl start neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service


########controller3安裝配置,同上

Neutron控制節點集群