三節點單控Pike按照官方安裝+ceph整合
阿新 • • 發佈:2019-01-09
####安照官網安裝
###主機資訊
### 網路 採用selfService
###### 環境準備
###在控制節點設定
###設定firewalld
### 設定主機名
### 安裝資料庫
##這是由於mariadb有預設開啟檔案數限制。可以通過配置/usr/lib/systemd/system/mariadb.service來調大開啟檔案數目。
###設定 最大連線數不生效 在 "/usr/lib/systemd/system/mariadb.service" 修改引數
yum install memcached python-memcached -y
##修改memcached 配置 增加 控制節點資訊
##sed -i 's/指定的字元/要插入的字元&/' 檔案
##sed -i 's/指定的字元/&要插入的字元/' 檔案
##############################################
############################################################################
### 安裝 keystone 元件
############################################################################
#設定資料庫
##### 安裝glance元件
############################################
##設定資料庫
##### 安裝nova元件
#####################################################################
##設定資料庫
#####安裝nova的計算節點#####
#################################################################################
##############安裝neutron ####################
##設定資料庫
##安裝neutron package
##### 同步資料庫
###設定 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
##############################安裝dashboard 元件##################################################################
################################################################################################
##在控制節點上安裝
#如果登入提示錯誤,請修改/etc/httpd/conf.d/openstack-dashboard.conf 增加配置
#WSGIApplicationGroup %{GLOBAL}
################################################################################################
##############################安裝cinder元件##################################################################
################################################################################################
### 安裝儲存節點 13.13.59.24 ceph24
################################################################################################
### cinder 控制節點安裝################################################################################################
################################################################################################
##資料庫設定
##安裝cinder package
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmpegrep -v '#|^$' /etc/cinder/cinder.conf###同步資料庫
source .admin_openrc.sh
##建立 provider 網路
####成功建立
#### 開啟安全組中的ssh 和icmp 協議
### 可以平通
################################################################################################################
#######ceph整合到openstack ######################################################################################
################################################################################################################
### ceph 整合到glance節點
##在glance節點安裝 package
############r##################cinder整合ceph ############r########################r########################r############
############r######################################r########################r########################r############
###在ceph admin 節點
###建立
############r########################################r########################r########################r############
############r##################nova 整合ceph ############r########################r########################r############
############r######################################r########################r########################r############
##ceph 節點建立
###安裝ceph元件
######本教程結束
###主機資訊
13.13.59.11 controller
13.13.59.12 compute1
13.13.59.24 ceph24
### 都做bond bond 模式為bond0
### 網路 採用selfService
###### 環境準備
###在控制節點設定
ssh-keygen
ssh-copy-id -i .ssh/id_rsa.pub [email protected]
ssh-copy-id -i .ssh/id_rsa.pub [email protected]
###設定hosts
### 設定selinuxfor i in 10 18 24;do ssh 13.13.59.$i "echo ' 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 13.13.59.10 controller 13.13.59.18 compute1 13.13.59.24 ceph24' > /etc/hosts";done
###設定firewalld
### 設定主機名
hostnamectl set-hostname controller
ssh 13.13.59.18 "hostnamectl set-hostname compute1"
hostnamectl set-hostname ceph24
####### 設定ntp 服務
for i in 10 18 24;do ssh 13.13.59.$i "chronyc sources ";done
for i in 10 18 24;do ssh 13.13.59.$i "date ";done
###設定openstack源
######################################## 在controller 節點上操作#############################for i in 10 18 24;do ssh 13.13.59.$i "yum install centos-release-openstack-pike epel-release -y ";done for i in 10 18 24;do ssh 13.13.59.$i " yum upgrade -y && reboot ";done for i in 10 18 24;do ssh 13.13.59.$i " yum install python-openstackclient openstack-selinux -y";done
### 安裝資料庫
yum install mariadb mariadb-server python2-PyMySQL -y
## 建立一個openstack.cnf
###啟動資料庫echo ' [mysqld] bind-address = 13.13.59.10 default-storage-engine = innodb innodb_file_per_table = on max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8 ' > /etc/my.cnf.d/openstack.cnf
systemctl enable mariadb.service
systemctl start mariadb.service
systemctl status mariadb.service
##初始化資料庫
[[ -f /usr/bin/expect ]] || { yum install expect -y; } #若沒expect則安裝
/usr/bin/expect << EOF
set timeout 30
spawn mysql_secure_installation
expect {
"enter for none" { send "\r"; exp_continue}
"Y/n" { send "Y\r" ; exp_continue}
"password:" { send "[email protected]\r"; exp_continue}
"new password:" { send "[email protected]\r"; exp_continue}
"Y/n" { send "Y\r" ; exp_continue}
eof { exit }
}
EOF
### 檢視設定是否生效
mysql -uroot [email protected] -e "show variables like 'max_connections';"
+-----------------+-------+
| Variable_name | Value |
+-----------------+-------+
| max_connections | 214 |
+-----------------+-------+
1 row in set (0.00 sec)
##這是由於mariadb有預設開啟檔案數限制。可以通過配置/usr/lib/systemd/system/mariadb.service來調大開啟檔案數目。
###設定 最大連線數不生效 在 "/usr/lib/systemd/system/mariadb.service" 修改引數
##############################################################################
#匹配行前加
##sed -i '/allow 361way.com/iallow www.361way.com' the.conf.file
#匹配行前後
##sed -i '/allow 361way.com/aallow www.361way.com' the.conf.file
##############################################################################
### 在[Service] 之後連續插入兩行 引數
sed -i '/\[Service\]/a\LimitNOFILE=10000\nLimitNPROC=10000' /usr/lib/systemd/system/mariadb.service
## 重啟服務
systemctl daemon-reload
systemctl restart mariadb
mysql -uroot [email protected] -e "show variables like 'max_connections';"
+-----------------+-------+
| Variable_name | Value |
+-----------------+-------+
| max_connections | 4096 |
+-----------------+-------+
############ 安裝 rabbitmq-server
yum install rabbitmq-server -y
systemctl enable rabbitmq-server.service
systemctl start rabbitmq-server.service
systemctl status rabbitmq-server
### 設定使用者也密碼
rabbitmqctl add_user openstack RABBIT_PASS
##Creating user "openstack" ...
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
###Setting permissions for user "openstack" in vhost "/" ...
###確認是否建立成功
rabbitmqctl list_users
#輸出一下資訊表明建立成功,如果不成功,nova 的服務 openstack-nova-consoleauth.service 和openstack-nova-scheduler.service 會報錯。
#Listing users ...
#openstack []
#guest [administrator]
######## 安裝memcached############################
yum install memcached python-memcached -y
##修改memcached 配置 增加 控制節點資訊
sed -i 's/\(OPTIONS="-l 127.0.0.1,::1\)/&,controller/' /etc/sysconfig/memcached
systemctl enable memcached.service
systemctl start memcached.service
systemctl status memcached
#####小知識 在指定字串之前、之後增加字串
##sed -i 's/指定的字元/要插入的字元&/' 檔案
##sed -i 's/指定的字元/&要插入的字元/' 檔案
##############################################
############################################################################
### 安裝 keystone 元件
############################################################################
#設定資料庫
mysql -u root [email protected] -e "CREATE DATABASE keystone; \
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS'; \
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';"
#check setting
mysql -u keystone -pKEYSTONE_DBPASS -e "show databases;"
## 安裝軟體keystone
yum install openstack-keystone httpd mod_wsgi openstack-utils -y
cp /etc/keystone/keystone.conf{,.bk}
### 如果沒有openstack-config 命令可以安裝openstack-utils 解決
openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:[email protected]/keystone
openstack-config --set /etc/keystone/keystone.conf token provider fernet
### 確認設定是否正確
egrep -v '#|^$' /etc/keystone/keystone.conf
###同步資料庫
su -s /bin/sh -c "keystone-manage db_sync" keystone
##初始化keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password ADMIN_PASS \
--bootstrap-admin-url http://controller:35357/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOn
##設定httpd
echo '
ServerName controller
' >> /etc/httpd/conf/httpd.conf
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd.service
systemctl start httpd.service
##設定環境變數
echo '
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://13.13.59.10:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
' > .admin_openrc.sh
###配置keystone
openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password=DEMO_PASS demo
openstack role create user
openstack role add --project demo --user demo user
### 驗證
unset OS_AUTH_URL OS_PASSWORD
openstack --os-auth-url http://controller:35357/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name admin --os-username admin token issue
openstack --os-auth-url http://controller:5000/v3 \
--os-project-domain-name Default --os-user-domain-name Default \
--os-project-name demo --os-username demo token issue
###設定demo 環境變數
echo '
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=DEMO_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
' > .demo_openrc.sh
##驗證token
source .admin_openrc.sh
openstack token issue
####################################################################
##### 安裝glance元件
############################################
##設定資料庫
mysql -u root [email protected] -e "CREATE DATABASE glance; \
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS'; \
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';"
#check setting
mysql -u glance -pGLANCE_DBPASS -e "show databases;"
###設定認證
source .admin_openrc
openstack user create --domain default --password=GLANCE_PASS glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
##安裝glance包
yum install openstack-glance -y
##設定glance-api.conf
cp /etc/glance/glance-api.conf{,.bk}
#[database]
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:[email protected]/glance
#[keystone_authtoken]
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password GLANCE_PASS
#[paste_deploy]
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
#[glance_store]
openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http
openstack-config --set /etc/glance/glance-api.conf glance_store default_store file
openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
##確認配置
egrep -v '#|^$' /etc/glance/glance-api.conf
##設定glance-registry.conf
cp /etc/glance/glance-registry.conf{,.bk} -y
####[keystone_authtoken]
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password GLANCE_PASS
#[paste_deploy]
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
##確認配置
egrep -v '#|^$' /etc/glance/glance-registry.conf
### 同步設定
su -s /bin/sh -c "glance-manage db_sync" glance
###啟動服務
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
systemctl status openstack-glance-api.service openstack-glance-registry.service
### 確認安裝
yum install wget -y
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
source .admin-openrc.sh
openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
openstack image list
####################################################################
##### 安裝nova元件
#####################################################################
##設定資料庫
mysql -u root [email protected] -e "
CREATE DATABASE nova_api; \
CREATE DATABASE nova; \
CREATE DATABASE nova_cell0; \
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS'; \
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS'; \
"
#check setting
mysql -u nova -pNOVA_DBPASS -e "show databases;"
##設定認證
openstack user create --domain default --password=NOVA_PASS nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
openstack user create --domain default --password=PLACEMENT_PASS placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
###安裝nova 元件
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
###設定配置
cp /etc/nova/nova.conf{,.bk}
###[DEFAULT]
auth_strategy = keystone
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 13.13.59.10
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
#####[api_database]
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:[email protected]/nova_api
######[database]
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:[email protected]/nova
####[keystone_authtoken]
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS
#####[vnc]
openstack-config --set /etc/nova/nova.conf vnc enabled true
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '$my_ip'
#####[glance]
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292
####[oslo_concurrency]
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
####[placement]
openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:35357/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password PLACEMENT_PASS
####[scheduler]
openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
egrep -v '#|^$' /etc/nova/nova.conf
###設定http配置
echo '
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
' >> /etc/httpd/conf.d/00-nova-placement-api.conf
systemctl restart httpd
systemctl status httpd
###同步資料庫
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova
###確認
nova-manage cell_v2 list_cells
###啟動服務
systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl status openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
#################################################################################
#####安裝nova的計算節點#####
#################################################################################
ping compute1 -c 2
ssh compute1 "yum install openstack-nova-compute -y"
ssh compute1 "egrep -v '#|^$' /etc/nova/nova.conf"
ssh compute1 "cp /etc/nova/nova.conf{,.bk}"
#####設定配置
ssh compute1 "
###[DEFAULT]
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata;
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:[email protected];
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 13.13.59.18;
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron True;
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver;
###[api]
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone;
####[keystone_authtoken]
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_uri http://controller:5000;
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://controller:35357;
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers controller:11211;
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password;
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default;
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default;
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service;
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova;
openstack-config --set /etc/nova/nova.conf keystone_authtoken password NOVA_PASS;
#####[vnc]
openstack-config --set /etc/nova/nova.conf vnc enabled true;
openstack-config --set /etc/nova/nova.conf vnc vncserver_listen 0.0.0.0;
openstack-config --set /etc/nova/nova.conf vnc vncserver_proxyclient_address '\$my_ip';
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://controller:6080/vnc_auto.html;
#####[glance]
openstack-config --set /etc/nova/nova.conf glance api_servers http://controller:9292;
####[oslo_concurrency]
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp;
####[placement]
openstack-config --set /etc/nova/nova.conf placement os_region_name RegionOne;
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default;
openstack-config --set /etc/nova/nova.conf placement project_name service;
openstack-config --set /etc/nova/nova.conf placement auth_type password;
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default;
openstack-config --set /etc/nova/nova.conf placement auth_url http://controller:35357/v3;
openstack-config --set /etc/nova/nova.conf placement username placement;
openstack-config --set /etc/nova/nova.conf placement password PLACEMENT_PASS;
openstack-config --set /etc/nova/nova.conf libvirt virt_type kvm;
egrep -v '#|^$' /etc/nova/nova.conf "
##### 啟動服務
ssh compute1 "
systemctl enable libvirtd.service openstack-nova-compute.service;
systemctl start libvirtd.service openstack-nova-compute.service;
sleep 5;
systemctl status libvirtd.service openstack-nova-compute.service
"
###在controller 節點確認安裝
source .admin_openrc.sh
openstack compute service list --service nova-compute
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
########################################################################################
##############安裝neutron ####################
##設定資料庫
mysql -u root [email protected] -e "
CREATE DATABASE neutron; \
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS'; \
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';"
#check setting
mysql -u neutron -pNEUTRON_DBPASS -e "show databases;"
###設定認證
openstack user create --domain default --password=NEUTRON_PASS neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
###網路為Self-service networks
##安裝neutron package
yum install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables -y
##設定neutron.conf
cp /etc/neutron/neutron.conf{,.bk}
#[database]
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:[email protected]/neutron
#[DEFAULT]
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips true
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true
##[keystone_authtoken]
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS
##[nova]
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://controller:35357
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password NOVA_PASS
##[oslo_concurrency]
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
egrep -v '#|^$' /etc/neutron/neutron.conf
####設定/etc/neutron/plugins/ml2/ml2_conf.ini
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bk} -y
#####[ml2]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
##[ml2_type_flat]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
##[ml2_type_vxlan]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
##[securitygroup]
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true
egrep -v '#|^$' /etc/neutron/plugins/ml2/ml2_conf.ini
###設定 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bk}
##[linux_bridge]
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:bond1
##[vxlan]
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 13.13.59.10
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
##[securitygroup]
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
egrep -v '#|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
####配置三層代理 /etc/neutron/l3_agent.ini
cp /etc/neutron/l3_agent.ini{,.bk}
##[DEFAULT]
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge
egrep -v '#|^$' /etc/neutron/l3_agent.ini
####配置 dhcp 代理 /etc/neutron/dhcp_agent.ini
cp /etc/neutron/dhcp_agent.ini{,.bk}
[DEFAULT]
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true
egrep -v '#|^$' /etc/neutron/dhcp_agent.ini
###配置原資料metadata agent ##etc/neutron/metadata_agent.ini
cp /etc/neutron/metadata_agent.ini{,.bk}
###[DEFAULT]
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host controller
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret METADATA_SECRET
egrep -v '#|^$' /etc/neutron/metadata_agent.ini
###配置nova服務增加neutron配置
cp /etc/nova/nova.conf{,.add_neutron_before}
####[neutron]
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy true
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret METADATA_SECRET
egrep -v '#|^$' /etc/nova/nova.conf
### neutron 配置完成,下面進行同步資料庫
##### 同步資料庫
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
##啟動服務
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
systemctl enable neutron-l3-agent.service
systemctl start neutron-l3-agent.service
systemctl status neutron-l3-agent.service
#systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
#systemctl status neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service
####neutron 在計算節點上設定
ssh compute1 "yum install openstack-neutron-linuxbridge ebtables ipset -y "
###設定配置 /etc/neutron/neutron.conf
ssh compute1 "
cp /etc/neutron/neutron.conf{,.bk};
##[DEFAULT];
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:[email protected];
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone;
##[keystone_authtoken];
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_uri http://controller:5000;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://controller:35357;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers controller:11211;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron;
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password NEUTRON_PASS;
##[oslo_concurrency];
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp;
egrep -v '#|^$' /etc/neutron/neutron.conf "
####
###設定 /etc/neutron/plugins/ml2/linuxbridge_agent.ini
ssh compute1 "
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bk};
##[linux_bridge];
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:bond1;
##[vxlan];
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true;
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 13.13.59.18;
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true;
##[securitygroup];
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true;
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver;
egrep -v '#|^$' /etc/neutron/plugins/ml2/linuxbridge_agent.ini "
###設定計算節點 nova.conf 的配置增加neutron 服務
ssh compute1 "cp /etc/nova/nova.conf{,.add_neutron_before};
####[neutron];
openstack-config --set /etc/nova/nova.conf neutron url http://controller:9696;
openstack-config --set /etc/nova/nova.conf neutron auth_url http://controller:35357;
openstack-config --set /etc/nova/nova.conf neutron auth_type password;
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default;
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default;
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne;
openstack-config --set /etc/nova/nova.conf neutron project_name service;
openstack-config --set /etc/nova/nova.conf neutron username neutron;
openstack-config --set /etc/nova/nova.conf neutron password NEUTRON_PASS;
egrep -v '#|^$' /etc/nova/nova.conf "
####啟動計算幾點服務
ssh compute1 "
systemctl restart openstack-nova-compute.service;
sleep 5;
systemctl status openstack-nova-compute.service;
systemctl enable neutron-linuxbridge-agent.service;
systemctl start neutron-linuxbridge-agent.service;
sleep 5;
systemctl status neutron-linuxbridge-agent.service "
#####在控制節點確認 安裝
###
source .admin_openrc.sh
openstack network agent list
################################################################################################
##############################安裝dashboard 元件##################################################################
################################################################################################
##在控制節點上安裝
yum install openstack-dashboard -y
####設定配置
cp /etc/openstack-dashboard/local_settings{,.bk}
sed -i 's#_member_#user#g' /etc/openstack-dashboard/local_settings
sed -i 's#OPENSTACK_HOST = "127.0.0.1"#OPENSTACK_HOST = "controller"#' /etc/openstack-dashboard/local_settings
sed -i "/ALLOWED_HOSTS/cALLOWED_HOSTS = ['*', ]" /etc/openstack-dashboard/local_settings
sed -i 's#UTC#Asia/Shanghai#g' /etc/openstack-dashboard/local_settings
sed -i 's#%s:5000/v2.0#%s:5000/v3#' /etc/openstack-dashboard/local_settings
sed -i '/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True' /etc/openstack-dashboard/local_settings
sed -i "[email protected]^#[email protected][email protected]" /etc/openstack-dashboard/local_settings
### 找到以下配置,去掉 # 即可
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
SESSION_ENGINE = \'django.contrib.sessions.backends.cache\'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
##啟動服務
systemctl restart httpd.service memcached.service
###登入web頁面 http://controller/dashboard
#如果登入提示錯誤,請修改/etc/httpd/conf.d/openstack-dashboard.conf 增加配置
#WSGIApplicationGroup %{GLOBAL}
echo "
WSGIApplicationGroup %{GLOBAL}
" >> /etc/httpd/conf.d/openstack-dashboard.conf
##重啟http 服務 即可
################################################################################################
##############################安裝cinder元件##################################################################
################################################################################################
### 安裝儲存節點 13.13.59.24 ceph24
yum install lvm2 device-mapper-persistent-data
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service
##配置儲存節點服務
cp /etc/cinder/cinder.conf{,.bk}
##[database]
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:[email protected]/cinder
##[DEFAULT]
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 13.13.59.24
openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends lvm
openstack-config --set /etc/cinder/cinder.conf DEFAULT glance_api_servers http://controller:9292
##[keystone_authtoken]
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password CINDER_PASS
###[lvm]
openstack-config --set /etc/cinder/cinder.conf lvm volume_driver cinder.volume.drivers.lvm.LVMVolumeDriver
openstack-config --set /etc/cinder/cinder.conf lvm volume_group cinder-volumes
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_protocol iscsi
openstack-config --set /etc/cinder/cinder.conf lvm iscsi_helper lioadm
##[oslo_concurrency]
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmp
egrep -v '#|^$' /etc/cinder/cinder.conf
###啟動服務
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service
################################################################################################
### cinder 控制節點安裝################################################################################################
################################################################################################
##資料庫設定
mysql -uroot [email protected] -e "
CREATE DATABASE cinder; \
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY 'CINDER_DBPASS'; \
GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY 'CINDER_DBPASS'; "
mysql -ucinder -pCINDER_DBPASS -e "show databases;"
###建立keystone 認證
source .admin_openrc.sh
openstack user create --domain default --password=CINDER_PASS cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2
openstack service create --name cinderv3 --description "OpenStack Block Storage" volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%\(project_id\)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%\(project_id\)s
##安裝cinder package
yum install openstack-cinder -y
##cinder配置設定
cp /etc/cinder/cinder.conf{,.bk}
##[database]
openstack-config --set /etc/cinder/cinder.conf database connection mysql+pymysql://cinder:[email protected]/cinder
##[DEFAULT]
openstack-config --set /etc/cinder/cinder.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/cinder/cinder.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/cinder/cinder.conf DEFAULT my_ip 13.13.59.10
##[keystone_authtoken]
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_uri http://controller:5000
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_url http://controller:35357
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken memcached_servers controller:11211
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken auth_type password
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken project_name service
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken username cinder
openstack-config --set /etc/cinder/cinder.conf keystone_authtoken password CINDER_PASS
##[oslo_concurrency]
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
openstack-config --set /etc/cinder/cinder.conf oslo_concurrency lock_path /var/lib/cinder/tmpegrep -v '#|^$' /etc/cinder/cinder.conf###同步資料庫
su -s /bin/sh -c "cinder-manage db sync" cinder
###配置nova元件用 cinder 服務
cp /etc/nova/nova.conf{,.add_cinder_before}
openstack-config --set /etc/nova/nova.conf cinder os_region_name RegionOne
###啟動服務
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
###建立volume 券
openstack volume create --size 1 volume1
openstack volume list
#### 初始化例項
source .admin_openrc.sh
##建立 provider 網路
openstack network create --share --external --provider-physical-network provider --provider-network-type flat provider
openstack subnet create --network provider --allocation-pool start=13.13.60.2,end=113.13.60.250 --dns-nameserver 202.106.0.20 --gateway 13.13.59.1 --subnet-range 13.13.59.0/21 provider
##建立self-service network
openstack network create selfservice
openstack subnet create --network selfservice --dns-nameserver 202.106.0.20 --gateway 172.16.1.1 --subnet-range 172.16.1.0/24 selfservice
##建立路由
openstack router create router
neutron router-interface-add router selfservice
neutron router-gateway-set router provider
##確認配置
ip netns
neutron router-port-list router
ping -c 4
##建立模板
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey
openstack keypair list
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default
###登入web 的dashboard 建立虛機
####成功建立
#### 開啟安全組中的ssh 和icmp 協議
### 可以平通
ping -c 4 13.13.60.6
64 bytes from 13.13.60.6: icmp_seq=1 ttl=63 time=0.540 ms
64 bytes from 13.13.60.6: icmp_seq=2 ttl=63 time=0.546 ms
64 bytes from 13.13.60.6: icmp_seq=3 ttl=63 time=0.596 ms
64 bytes from 13.13.60.6: icmp_seq=5 ttl=63 time=0.556 ms
64 bytes from 13.13.60.6: icmp_seq=6 ttl=63 time=0.549 ms
#### 在控制節點直接訪問 虛擬機器的flouting ip 並測試外網是ping通的狀態
################################################################################################################
#######ceph整合到openstack ######################################################################################
################################################################################################################
### ceph 整合到glance節點
##在glance節點安裝 package
sudo yum install -y python-rbd
###設定使用者
mkdir /etc/ceph
####Cephx 密碼為:Cephx
sudo useradd Cephx
sudo passwd Cephx
###增加sudoer 許可權 用root 使用者
cat << EOF >/etc/sudoers.d/Cephx
Cephx ALL = (root) NOPASSWD:ALL
Defaults:Cephx !requiretty
EOF
### 在ceph的admin 節點 增加volumes 建立keyring
##sudo ceph osd pool create images 128
#sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rdb_children, allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
#scp /etc/ceph/ceph.conf [email protected]:/etc/ceph
#scp /etc/ceph/etc/ceph/ceph.client.glance.keyring [email protected]:/etc/ceph
#sudo chgrp glance /etc/ceph/ceph.client.glance.keyring
#sudo chmod 0640 /etc/ceph/ceph.client.glance.keyring
##備份glance-api.conf配置
egrep -v '#|^$' /etc/glance/glance-api.conf
cp /etc/glance/glance-api.conf{,.add_cephforglance_before}
###刪除原來的glance_store儲存 配置
##[glance_store]
openstack-config --del /etc/glance/glance-api.conf glance_store stores
openstack-config --del /etc/glance/glance-api.conf glance_store default_store
openstack-config --del /etc/glance/glance-api.conf glance_store filesystem_store_datadir
###增加glance_store ceph 配置
##[DEFAULT]
sudo openstack-config --set /etc/glance/glance-api.conf DEFAULT show_image_direct_url True
###[glance_store]
sudo openstack-config --set /etc/glance/glance-api.conf glance_store stores rbd
sudo openstack-config --set /etc/glance/glance-api.conf glance_store default_store rbd
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_pool images
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_user glance
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_ceph_conf /etc/ceph/ceph.conf
sudo openstack-config --set /etc/glance/glance-api.conf glance_store rbd_store_chunk_size 8
###確認配置
egrep -v '#|^$' /etc/glance/glance-api.conf
###重啟glance服務
systemctl restart openstack-glance-api openstack-glance-registry
systemctl status openstack-glance-api openstack-glance-registry
### 上傳一個image 確認 整合ceph 成功
openstack image list
openstack image create --name "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public
############r########################################r########################r########################r############
############r##################cinder整合ceph ############r########################r########################r############
############r######################################r########################r########################r############
###在ceph admin 節點
###建立
ceph osd pool create volumes 128
##ceph osd pool create vms 128
ceph osd pool create backups 128
sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.cinder.keyring
sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring
##在安裝cinder-volume節點(object和block 節點上)
yum install ceph-common -y
####Cephx 密碼為:Cephx
sudo useradd Cephx
sudo passwd Cephx
###增加sudoer 許可權 用root 使用者
cat << EOF >/etc/sudoers.d/Cephx
Cephx ALL = (root) NOPASSWD:ALL
Defaults:Cephx !requiretty
EOF
#### 複製祕鑰和檔案到 cinder節點
scp {[email protected]}:/etc/ceph/ceph.conf /etc/ceph
scp {[email protected]}:/etc/ceph/ceph.client.cinder.keyring /etc/ceph
scp {[email protected]}:/etc/ceph/ceph.client.cinder-backup.keyring /etc/ceph
##修改 許可權
chgrp cinder /etc/ceph/ceph.client.cinder*
chmod 0640 /etc/ceph/ceph.client.cinder*
###設定cinder-volume 配置
cp /etc/cinder/cinder.conf{,.add_ceph_before}
###刪除lvm之前的配置
##[DEFUALT]
##[DEFAULT]
openstack-config --del /etc/cinder/cinder.conf DEFAULT enabled_backends
##[lvm]
openstack-config --del /etc/cinder/cinder.conf lvm volume_driver
openstack-config --del /etc/cinder/cinder.conf lvm volume_group
openstack-config --del /etc/cinder/cinder.conf lvm iscsi_protocol
openstack-config --del /etc/cinder/cinder.conf lvm iscsi_helper
egrep -v '#|^$' /etc/cinder/cinder.conf
##增加ceph 後端配置
##[DEFAULT]
openstack-config --set /etc/cinder/cinder.conf DEFAULT enabled_backends ceph
#[ceph]
openstack-config --set /etc/cinder/cinder.conf ceph volume_driver cinder.volume.drivers.rbd.RBDDriver
openstack-config --set /etc/cinder/cinder.conf ceph volume_backend_name backups
openstack-config --set /etc/cinder/cinder.conf ceph rbd_cluster_name ceph
openstack-config --set /etc/cinder/cinder.conf ceph rbd_pool volumes
openstack-config --set /etc/cinder/cinder.conf ceph rbd_user cinder
openstack-config --set /etc/cinder/cinder.conf ceph rbd_ceph_conf /etc/ceph/ceph.conf
openstack-config --set /etc/cinder/cinder.conf ceph rbd_flatten_volume_from_snapshot false
openstack-config --set /etc/cinder/cinder.conf ceph rbd_secret_uuid ee403c0a-ee0d-4f05-a4f8-05ecd658db2c
openstack-config --set /etc/cinder/cinder.conf ceph rbd_max_clone_depth 5
openstack-config --set /etc/cinder/cinder.conf ceph rbd_store_chunk_size 4
openstack-config --set /etc/cinder/cinder.conf ceph rbd_store_chunk_size 4
openstack-config --set /etc/cinder/cinder.conf ceph rados_connect_timeout -1
openstack-config --set /etc/cinder/cinder.conf ceph rados_connection_retries 3
openstack-config --set /etc/cinder/cinder.conf ceph rados_connection_interval 5
openstack-config --set /etc/cinder/cinder.conf ceph replication_connect_timeout 5
##檢視配置確認
egrep -v '#|^$' /etc/cinder/cinder.conf
#重啟服務
systemctl restart openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service
###建立 volume 測試
#openstack volume create --size 1 volume1
#openstack volume list
#rbd ls volumes 查詢卷名稱
######
############r########################################r########################r########################r############
############r##################nova 整合ceph ############r########################r########################r############
############r######################################r########################r########################r############
##ceph 節點建立
ceph osd pool create vms 128
##建立祕鑰
###ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.nova.keyring
## scp 到計算節點
###安裝ceph元件
yum installed python-rbd ceph-common -y
#### 複製祕鑰和檔案到nova計算節點
ceph auth get-or-create client.client | tee /etc/ceph/ceph.client.cinder.keyring
scp {[email protected]}:/etc/ceph/ceph.conf /etc/ceph
scp {[email protected]}:/etc/ceph/ceph.client.nova.keyring /etc/ceph
##修改 許可權
chgrp nova /etc/ceph/ceph.client.cinder*
chmod 0640 /etc/ceph/ceph.client.cinder*
####得到uuid
uuidgen |tee /etc/ceph/nova.uuid.txt ###用cinder 的uuid
cat > /etc/ceph/nova.xml <<EOF
<secret ephemeral="no" private="no">
<uuid>ee403c0a-ee0d-4f05-a4f8-05ecd658db2c</uuid>
<usage type="ceph">
<name>client.cinder secret</name>
</usage>
</secret>
EOF
virsh secret-define --file /etc/ceph/nova.xml
# virsh secret-set-value --secret 5020d7ca-14cc-4d84-b3ee-d945bf22d8eb --base64 $(cat /etc/ceph/client.nova.keying)
virsh secret-set-value --secret ee403c0a-ee0d-4f05-a4f8-05ecd658db2c --base64 AQDGC9NaOcOTMxAA3c78/w4rlUwYzMuSZbhhJw==
####配置nova 服務
cp /etc/nova/nova.conf{,.add_ceph_before}
###[libvirt]
openstack-config --set /etc/nova/nova.conf libvirt hw_disk_discard unmap
openstack-config --set /etc/nova/nova.conf libvirt images_type rbd
openstack-config --set /etc/nova/nova.conf libvirt images_rbd_pool vms
openstack-config --set /etc/nova/nova.conf libvirt images_rbd_ceph_conf /etc/ceph/ceph.conf
openstack-config --set /etc/nova/nova.conf libvirt rbd_user nova
openstack-config --set /etc/nova/nova.conf libvirt rbd_secret_uuid ee403c0a-ee0d-4f05-a4f8-05ecd658db2c
openstack-config --set /etc/nova/nova.conf libvirt disk_cachemodes \"network=writeback\"
openstack-config --set /etc/nova/nova.conf libvirt inject_password false
openstack-config --set /etc/nova/nova.conf libvirt inject_key false
openstack-config --set /etc/nova/nova.conf libvirt inject_partition -2
openstack-config --set /etc/nova/nova.conf libvirt live_migration_flag \"VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED\"
##檢查配置
egrep -v '#|^$' /etc/nova/nova.conf
###重啟nova服務
systemctl restart openstack-nova-compute
systemctl status openstack-nova-compute
####到這裡整合完成
######本教程結束