前言:本次部署採用系統的是Centos 8-Stream版,儲存庫為OpenStack-Victoria版,除基礎設定,五大服務中的時間同步服務,七大元件中的nova服務,neutron服務,cinder服務需要在雙節點設定外,其他服務設定均在控制節點,neutron設定從公有網路和私有網路中選擇一種即可,大多數情況還是選公有網路的設定,此次部署所有密碼均為111111,可按自身需要自行設定
安裝環境:
阿里雲映象倉庫地址:https://mirrors.aliyun.com,有需要可自行設定,但是這裡用不到
#更改CentOS-Stream-AppStream.repo檔案,將baseurl引數中的地址改為https://mirrors.aliyun.com
[root@localhost ~]# cd /etc/yum.repos.d/
[root@localhost yum.repos.d]# vi CentOS-Stream-AppStream.repo
[appstream]
name=CentOS Stream $releasever - AppStream
#mirrorlist=http://mirrorlist.centos.org/? release=$stream&arch=$basearch&repo=AppStream&infra=$infra
baseurl=https://mirrors.aliyun.com/$contentdir/$stream/AppStream/$basearch/os/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
#更改CentOS-Stream-BaseOS.repo 檔案,將baseurl引數中的地址改為https://mirrors.aliyun.com
[root@localhost yum.repos.d]# vi CentOS-Stream-BaseOS.repo
[baseos]
name=CentOS Stream $releasever - BaseOS
#mirrorlist=http://mirrorlist.centos.org/?release=$stream&arch=$basearch&repo=BaseOS&infra=$infra
baseurl=https://mirrors.aliyun.com/$contentdir/$stream/BaseOS/$basearch/os/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
#更改CentOS-Stream-Extras.repo 檔案,將baseurl引數中的地址改為https://mirrors.aliyun.com
[root@localhost yum.repos.d]# vi CentOS-Stream-Extras.repo
[extras]
name=CentOS Stream $releasever - Extras
#mirrorlist=http://mirrorlist.centos.org/?release=$stream&arch=$basearch&repo=extras&infra=$infra
baseurl=https://mirrors.aliyun.com/$contentdir/$stream/extras/$basearch/os/
gpgcheck=1
enabled=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-centosofficial
#在yum倉庫資料夾下面建立openstack-victoria.repo檔案
[root@localhost ~]# vi /etc/yum.repos.d/openstack-victoria.repo
#寫入以下內容
[virctoria]
name=virctoria
baseurl=https://mirrors.aliyun.com/centos/8-stream/cloud/x86_64/openstack-victoria/
gpgcheck=0
enabled=1
[root@controller ~]# yum clean all
[root@controller ~]# yum makecache
#安裝network,由於8系統自帶的服務為NetworkManager,它會與neutron服務有衝突,所以安裝network,關閉NetworkManager,並設定disable狀態
[root@localhost ~]# dnf -y install network-scripts
[root@localhost ~]# systemctl disable --now NetManager
#啟動network服務,設為開機自啟動
[root@localhost ~]# systemctl enable --now network
#ens33,以控制節點為例
[root@localhost ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens33
BOOTPROTO=static #修改
ONBOOT=yes #修改
IPADDR=10.10.10.10 #新增
NETMASK=255.255.255.0 #新增
#ens34,以控制節點為例
[root@localhost ~]# vi /etc/sysconfig/network-scripts/ifcfg-ens34
BOOTPROTO=static #修改
ONBOOT=yes #修改
IPADDR=10.10.20.10 #新增
NETMASK=255.255.255.0 #新增
GATEWAY=10.10.20.2 #新增
DNS1=8.8.8.8 #新增
DNS2=114.114.114.114 #新增
[root@localhost ~]# systemctl restart network
[root@localhost ~]# ping -c 3 www.baidu.com
#控制節點
[root@localhost ~]# hostnamectl set-hostname controller
[root@localhost ~]# bash
[root@controller ~]#
#計算節點
[root@localhost ~]# hostnamectl set-hostname compute
[root@localhost ~]# bash
[root@compute ~]#
#關防火牆並設定disable開機禁啟動
[root@controller ~]# systemctl disable --now firewalld
#設定selinux並設定disable開機禁啟動
[root@controller ~]# vi /etc/selinux/config
SELINUX=disabled
#可通過getenforce命令檢視selinux狀態
[root@controller ~]# getenforce
Disabled
#控制節點
[root@controller ~]# cat >>etc/hosts<<EOF
> 10.10.10.10 controller
> 10.10.10.20 computer
> EOF
#計算節點
[root@compute ~]# cat >>etc/hosts<<EOF
> 10.10.10.10 controller
> 10.10.10.20 compute
> EOF
#安裝openstack-victoria版儲存庫
[root@controller ~]# dnf -y install centos-release-openstack-victoria
#升級節點上所有的安裝包
[root@controller ~]# dnf -y upgrade
#安裝openstack使用者端和openstack-selinux
[root@controller ~]# dnf -y install python3-openstackclient openstack-selinux
[root@controller ~]# rpm -qa |grep chrony
#沒有的話就安裝
[root@controller ~]# dnf -y install chrony
#控制節點
[root@controller ~]# vim /etc/chrony.conf
server ntp6.aliyun.com iburst #新增與阿里雲時間同步
allow 10.10.10.0/24 #新增
#計算節點
[root@controller ~]# vim /etc/chrony.conf
server controller iburst #新增與控制節點時間同步
[root@controller ~]# systemctl restart chronyd && systemctl enable chronyd
[root@controller ~]# dnf -y install mariadb mariadb-server python3-PyMySQL
#啟動mariadb資料庫
[root@controller ~]# systemctl start mariadb
[root@controller ~]# vim /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 10.10.10.10 #繫結IP,如果後面換IP,這行可以刪掉
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
[root@controller ~]# mysql_secure_installation
Enter current password for root (enter for none): #輸入當前使用者root密碼,若為空直接回車
OK, successfully used password, moving on...
Set root password? [Y/n] y # 是否設定root密碼
New password: # 輸入新密碼
Re-enter new password: # 再次輸入新密碼
Remove anonymous users? [Y/n] y # 是否刪除匿名使用者
Disallow root login remotely? [Y/n] n # 是否禁用遠端登入
Remove test database and access to it? [Y/n] y # 是否刪除資料庫並存取它
Reload privilege tables now? [Y/n] y # 是否重新載入許可權表
[root@controller ~]# systemctl restart mariadb && systemctl enable mariadb
注意:安裝rabbitmq-server時,可能會報錯,這是安裝源裡面沒有libSDL,下載所需包,再安裝rabbitmq-server就行了
下載命令:wget http://rpmfind.net/linux/centos/8-stream/PowerTools/x86_64/os/Packages/SDL2-2.0.10-2.el8.x86_64.rpm
安裝命令:dnf -y install SDL2-2.0.10-2.el8.x86_64.rpm
[root@controller ~]# dnf -y install rabbitmq-server
[root@controller ~]# systemctl start rabbitmq-server && systemctl enable rabbitmq-server
[root@controller ~]# rabbitmqctl add_user openstack 111111
[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
[root@controller ~]# rabbitmq-plugins enable rabbitmq_management
#這一步啟動後,ss -antlu命令檢視埠會有一個15672的埠開啟,可通過web介面登入RabbitMQ檢視,網站地址:http://10.10.10.10:15672,使用者和密碼預設都是guest
[root@controller ~]# dnf -y install memcached python3-memcached
[root@controller ~]# vim /etc/sysconfig/memcached
..........
OPTIONS="-l 127.0.0.1,::1,controller" #修改這一行
[root@controller ~]# systemctl start memcached && systemctl enable memcached
[root@controller ~]# dnf -y install etcd
[root@controller ~]# vim /etc/etcd/etcd.conf
#修改如下
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://10.10.10.10:2380"
ETCD_LISTEN_CLIENT_URLS="http://10.10.10.10:2379"
ETCD_NAME="controller"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.10.10.10:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.10.10.10:2379"
ETCD_INITIAL_CLUSTER="controller=http://10.10.10.10:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
[root@controller ~]# systemctl start etcd && systemctl enable etcd
#進入資料庫
[root@controller ~]# mysql -u root -p111111
#建立keystone資料庫
MariaDB [(none)]> CREATE DATABASE keystone;
#授權
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '111111';
[root@controller ~]# dnf -y install openstack-keystone httpd python3-mod_wsgi
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/keystone/keystone.conf /etc/keystone/keystone.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/keystone/keystone.conf.bak >/etc/keystone/keystone.conf
#編輯
[root@controller ~]# vim /etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:111111@controller/keystone
[token]
provider = fernet
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
[root@controller ~]# mysql -uroot -p111111
MariaDB [(none)]> use keystone;
MariaDB [keystone]> show tables;
MariaDB [keystone]> quit
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
[root@controller ~]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
[root@controller ~]# keystone-manage bootstrap --bootstrap-password 111111 \
--bootstrap-admin-url http://controller:5000/v3/ \
--bootstrap-internal-url http://controller:5000/v3/ \
--bootstrap-public-url http://controller:5000/v3/ \
--bootstrap-region-id RegionOne
#編輯httpd.conf檔案
[root@controller ~]# vim /etc/httpd/conf/httpd.conf
ServerName controller #新增這一行
<Directory />
AllowOverride none
Require all granted #這一行改成這樣
</Directory>
#建立wsgi-keystone.conf檔案連結
[root@controller ~]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
[root@controller ~]# systemctl restart httpd && systemctl enable httpd
[root@controller ~]# vim /admin-openrc.sh
export OS_USERNAME=admin
export OS_PASSWORD=111111
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
#可通過source /admin-openrc.sh命令來匯入環境變數,或./admin-openrc.sh命令,如果不想每次手動匯入,可以修改.bashrc組態檔實現開機啟動匯入
[root@controller ~]# vim .bashrc
source /admin-openrc.sh #新增這一行
#建立域,已有預設域default,自己可隨便創一個
[root@controller ~]# openstack domain create --description "An Example Domain" example
#建立service專案
[root@controller ~]# openstack project create --domain default --description "Service Project" service
#建立測試專案
[root@controller ~]# openstack project create --domain default --description "Demo Project" myproject
#建立使用者,此命令執行會要求輸入密碼,輸兩次即可
[root@controller ~]# openstack user create --domain default --password-prompt myuser
#建立角色
[root@controller ~]# openstack role create myrole
#新增角色與專案,使用者繫結
[root@controller ~]# openstack role add --project myproject --user myuser myrole
[root@controller ~]# openstack token issue
#進入資料庫
[root@controller ~]# mysql -u root -p111111
#建立glance資料庫
MariaDB [(none)]> CREATE DATABASE glance;
#授權
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '111111';
注:安裝報錯,修改CentOS-Stream-PowerTools.repo源為enable=1,重新安裝
[root@controller ~]# dnf install -y openstack-glance
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/glance/glance-api.conf.bak >/etc/glance/glance-api.conf
#編輯
[root@controller ~]# vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:111111@controller/glance
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = 111111
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
[root@controller ~]# mysql -uroot -p111111
MariaDB [(none)]> use glance;
MariaDB [keystone]> show tables;
MariaDB [keystone]> quit
#建立glance使用者
[root@controller ~]# openstack user create --domain default --password 111111 glance
#關聯admin角色
[root@controller ~]# openstack role add --project service --user glance admin
#建立glance服務
[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
#public
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
#internal
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
#admin
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
[root@controller ~]# openstack endpoint list
[root@controller ~]# systemctl start openstack-glance-api && systemctl enable openstack-glance-api
#此次採用的映象為cirros-0.5.1-x86_64-disk.img,建立命令如下
[root@controller ~]# openstack image create "cirros" --file cirros-0.5.1-x86_64-disk.img --disk-format qcow2 --container-format bare --public
#建立成功後可通過openstack命令檢視
[root@controller ~]# openstack image list
#進入glance資料庫檢視,存放在images表中
[root@controller ~]# mysql -uroot -p111111
MariaDB [(none)]> use glance;
MariaDB [glance]> select * from images\G;
#在/var/lib/glance/images/目錄下可以看到映象檔案,如果要刪除此映象需要刪除資料庫資訊,再刪除映象檔案
[root@controller ~]# ls /var/lib/glance/images/
#進入資料庫
[root@controller ~]# mysql -u root -p111111
#建立placement資料庫
MariaDB [(none)]> CREATE DATABASE placement;
#授權
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY '111111';
[root@controller ~]# dnf install -y openstack-placement-api
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/placement/placement.conf /etc/placement/placement.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/placement/placement.conf.bak >/etc/placement/placement.conf
#編輯
[root@controller ~]# vim /etc/placement/placement.conf
[placement_database]
connection = mysql+pymysql://placement:111111@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = 111111
[root@controller ~]# su -s /bin/sh -c "placement-manage db sync" placement
[root@controller ~]# mysql -uroot -p111111
MariaDB [(none)]> use placement;
MariaDB [keystone]> show tables;
MariaDB [keystone]> quit
#建立placement使用者
[root@controller ~]# openstack user create --domain default --password 111111 placement
#關聯admin角色
[root@controller ~]# openstack role add --project service --user placement admin
#建立placement服務
[root@controller ~]# openstack service create --name placement --description "Placement API" placement
#public
[root@controller ~]# openstack endpoint create --region RegionOne placement internal http://controller:8778
#internal
[root@controller ~]# openstack endpoint create --region RegionOne placement public http://controller:8778
#admin
[root@controller ~]# openstack endpoint create --region RegionOne placement admin http://controller:8778
[root@controller ~]# openstack endpoint list
[root@controller ~]# systemctl restart httpd
[root@controller ~]# placement-status upgrade check
#進入資料庫
[root@controller ~]# mysql -u root -p111111
#建立nova_api,nova和nova_cell0資料庫
MariaDB [(none)]> CREATE DATABASE nova_api;
MariaDB [(none)]> CREATE DATABASE nova;
MariaDB [(none)]> CREATE DATABASE nova_cell0;
#授權
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '111111';
[root@controller ~]# dnf install -y openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/nova/nova.conf.bak >/etc/nova/nova.conf
#編輯
[root@controller ~]# vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:111111@controller:5672/
my_ip = 10.10.10.10 #本機IP,如果將來換IP,這地方一定要改
[api_database]
connection = mysql+pymysql://nova:111111@controller/nova_api
[database]
connection = mysql+pymysql://nova:111111@controller/nova
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 111111
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 111111
# 同步nova_api資料庫
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
# 同步nova_cell0資料庫
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
# 建立cell1
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
# 同步nova資料庫
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova
#建立nova使用者
[root@controller ~]# openstack user create --domain default --password 111111 nova
#關聯admin角色
[root@controller ~]# openstack role add --project service --user nova admin
#建立nova服務
[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
#public
[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
#internal
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
#admin
[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
[root@controller ~]# openstack endpoint list
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
[root@controller ~]# systemctl enable --now openstack-nova-api openstack-nova-scheduler openstack-nova-conductor openstack-nova-novncproxy
[root@controller ~]# nova service-list
#一般只會顯示兩個服務:nova-scheduler和nova-conductor,這是因為上面這條命令是由nova-api接收,而它控制著nova-scheduler和nova-conductor服務,如果nova-api未開啟,那這兩個服務也會down掉,nova-novncproxy服務則是通過檢視埠號的形式,範例如下:
[root@controller ~]# netstat -lntup | grep 6080
tcp 0 0 0.0.0.0:6080 0.0.0.0:* LISTEN 1456/python3
[root@controller ~]# ps -ef | grep 1456
nova 1456 1 0 18:29 ? 00:00:05 /usr/bin/python3 /usr/bin/nova-novncproxy --web /usr/share/novnc/
root 27724 26054 0 20:51 pts/0 00:00:00 grep --color=auto 1456
#如果不設定域名解析,就直接用ip
http://10.10.10.10:6080
#如果要設定域名解析,在電腦C:\Windows\System32\drivers\etc目錄下里面的hosts檔案裡新增
10.10.10.10 controller
10.10.10.20 compute
#再存取
http://controller:6080
[root@compute ~]# dnf install -y openstack-nova-compute
#複製備份組態檔並去掉註釋
[root@compute ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
[root@compute ~]# grep -Ev '^$|#' /etc/nova/nova.conf.bak >/etc/nova/nova.conf
#編輯
[root@compute ~]# vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:111111@controller
my_ip = 10.10.10.20 #本機IP,如果將來換IP,這地方一定要改
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = 111111
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = 111111
#如果此命令返回值是別的數位,計算節點支援硬體加速;如果此命令返回值是0,計算節點不支援硬體加速,需要設定[libvirt]
[root@compute ~]# egrep -c '(vmx|svm)' /proc/cpuinfo
#設定[libvirt]
[root@compute ~]# vim /etc/nova/nova.conf
[libvirt]
virt_type = qemu
[root@compute ~]# systemctl enable --now libvirtd.service openstack-nova-compute.service
#確認資料庫中存在計算主機
[root@controller ~]# openstack compute service list --service nova-compute
#控制節點發現計算節點
[root@controller ~]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
[root@controller ~]# vim /etc/nova/nova.conf
[scheduler]
discover_hosts_in_cells_interval = 300
#進入資料庫
[root@controller ~]# mysql -u root -p111111
#建立neutron資料庫
MariaDB [(none)] CREATE DATABASE neutron;
#授權
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '111111';
#建立neutron使用者
[root@controller ~]# openstack user create --domain default --password 111111 neutron
#關聯admin角色
[root@controller ~]# openstack role add --project service --user neutron admin
#建立neutron服務
[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
#public
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
#internal
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
#admin
[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
[root@controller ~]# openstack endpoint list
[root@controller ~]# dnf -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf
#編輯
[root@controller ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins =
transport_url = rabbit://openstack:111111@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[database]
connection = mysql+pymysql://neutron:111111@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 111111
[nova] #如果組態檔沒有這個引數,就直接加
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 111111
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/plugins/ml2/ml2_conf.ini.bak >/etc/neutron/plugins/ml2/ml2_conf.ini
#編輯
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[securitygroup]
enable_ipset = true
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34 #這裡選擇提供給範例的net網路卡
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/dhcp_agent.ini.bak >/etc/neutron/dhcp_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
#修改系統引陣列態檔
[root@controller ~]# echo 'net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
#載入br_netfilter模組
[root@controller ~]# modprobe br_netfilter
#檢查
[root@controller ~]# sysctl -p
net.bridge.bridge-nf-call-iptables = 1 #出現這個則設定成功
net.bridge.bridge-nf-call-ip6tables = 1 #出現這個則設定成功
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/metadata_agent.ini.bak >/etc/neutron/metadata_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET
#'METADATA_SECRET'為密碼,可自行定義。但要與後面設定nova中的後設資料引數一致
#在[neutron]部分,設定存取引數,啟用後設資料代理
[root@controller ~]# vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 111111
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET #密碼要一致
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable --now neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service
[root@compute ~]# dnf install -y openstack-neutron-linuxbridge ebtables ipset
#複製備份組態檔並去掉註釋
[root@compute ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
[root@compute ~]# grep -Ev '^$|#' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf
#編輯
[root@compute ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:111111@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 111111
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#複製備份組態檔並去掉註釋
[root@compute ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
[root@compute ~]# grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34 #這裡選擇提供給範例的net網路卡
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#修改系統引陣列態檔
[root@compute ~]# echo 'net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
#載入br_netfilter模組
[root@compute ~]# modprobe br_netfilter
#檢查
[root@compute ~]# sysctl -p
net.bridge.bridge-nf-call-iptables = 1 #出現這個則設定成功
net.bridge.bridge-nf-call-ip6tables = 1 #出現這個則設定成功
#在[neutron]部分,設定存取引數,啟用後設資料代理
[root@compute ~]# vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 111111
[root@compute ~]# systemctl restart openstack-nova-api.service
[root@compute ~]# systemctl enable neutron-linuxbridge-agent.service
#控制節點檢視網路代理服務列表
[root@controller ~]# openstack network agent list
#一般成功後會出現Metadata agent,DHCP agent,兩個Linux bridge agent一共四個代理,一個Linux bridge agent屬於controlller,另一個屬於compute
[root@controller ~]# dnf -y install openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf
#編輯
[root@controller ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
transport_url = rabbit://openstack:111111@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[database]
connection = mysql+pymysql://neutron:111111@controller/neutron
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 111111
[nova] #如果組態檔沒有這個引數,就直接加
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = 111111
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugins/ml2/ml2_conf.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/plugins/ml2/ml2_conf.ini.bak >/etc/neutron/plugins/ml2/ml2_conf.ini
#編輯
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34 #這裡選擇提供給範例的net網路卡
[vxlan]
enable_vxlan = true
local_ip = 10.10.10.10
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#修改系統引陣列態檔
[root@controller ~]# echo 'net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
#載入br_netfilter模組
[root@controller ~]# modprobe br_netfilter
#檢查
[root@controller ~]# sysctl -p
net.bridge.bridge-nf-call-iptables = 1 #出現這個則設定成功
net.bridge.bridge-nf-call-ip6tables = 1 #出現這個則設定成功
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/dhcp_agent.ini /etc/neutron/dhcp_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/dhcp_agent.ini.bak >/etc/neutron/dhcp_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/l3_agent.ini /etc/neutron/l3_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/l3_agent.ini.bak > /etc/neutron/l3_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
#複製備份組態檔並去掉註釋
[root@controller ~]# cp /etc/neutron/metadata_agent.ini /etc/neutron/metadata_agent.ini.bak
[root@controller ~]# grep -Ev '^$|#' /etc/neutron/metadata_agent.ini.bak >/etc/neutron/metadata_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET
#'METADATA_SECRET'為密碼,可自行定義。但要與後面設定nova中的後設資料引數一致
#在[neutron]部分,設定存取引數,啟用後設資料代理
[root@controller ~]# vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 111111
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET #密碼要一致
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable --now neutron-server.service \
neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
neutron-metadata-agent.service neutron-l3-agent.service
[root@compute ~]# dnf install -y openstack-neutron-linuxbridge ebtables ipset
#複製備份組態檔並去掉註釋
[root@compute ~]# cp /etc/neutron/neutron.conf /etc/neutron/neutron.conf.bak
[root@compute ~]# grep -Ev '^$|#' /etc/neutron/neutron.conf.bak >/etc/neutron/neutron.conf
#編輯
[root@compute ~]# vim /etc/neutron/neutron.conf
[DEFAULT]
transport_url = rabbit://openstack:111111@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = 111111
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
#複製備份組態檔並去掉註釋
[root@compute ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak
[root@compute ~]# grep -Ev '^$|#' /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
#編輯
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34 #這裡選擇提供給範例的net網路卡
[vxlan]
enable_vxlan = true
local_ip = 10.10.10.20
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#修改系統引陣列態檔
[root@compute ~]# echo 'net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1' >> /etc/sysctl.conf
#載入br_netfilter模組
[root@compute ~]# modprobe br_netfilter
#檢查
[root@compute ~]# sysctl -p
net.bridge.bridge-nf-call-iptables = 1 #出現這個則設定成功
net.bridge.bridge-nf-call-ip6tables = 1 #出現這個則設定成功
#在[neutron]部分,設定存取引數,啟用後設資料代理
[root@compute ~]# vim /etc/nova/nova.conf
[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = 111111
[root@compute ~]# systemctl restart openstack-nova-api.service
[root@compute ~]# systemctl enable neutron-linuxbridge-agent.service
#控制節點檢視網路代理服務列表
[root@controller ~]# openstack network agent list
#一般成功後會出現Metadata agent,DHCP agent,L3 agent,兩個Linux bridge agent一共五個代理,一個Linux bridge agent屬於controlller,另一個屬於compute
[root@controller ~]# dnf install -y openstack-dashboard
#此檔案內所有選項與引數用命令模式搜尋,有就修改,沒有就新增
[root@controller ~]# vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
#不配域名解析就要把IP寫進去
ALLOWED_HOSTS = ['controller','compute','10.10.10.10','10.10.10.20']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
},
}
OPENSTACK_KEYSTONE_URL = "http://%s/identity/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 3,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_vpn': False,
'enable_fip_topology_check': False,
}
TIME_ZONE = "Asia/Shanghai"
[root@controller ~]# vi /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL} #新增這行
#編輯dashboard組態檔
[root@controller ~]# vim /etc/openstack-dashboard/local_settings
WEBROOT = '/dashboard/' #新增這行
[root@controller ~]# systemctl restart httpd.service memcached.service
#如果不設定域名解析,就直接用ip
http://10.10.10.10/dashboard
#如果要設定域名解析,在電腦C:\Windows\System32\drivers\etc目錄下里面的hosts檔案裡新增
10.10.10.10 controller
10.10.10.20 compute
#再存取
http://controller/dashboard
#進入資料庫
[root@controller ~]# mysql -u root -p111111
#建立cinder資料庫
MariaDB [(none)] CREATE DATABASE cinder;
#授權
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '111111';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '111111';
#複製一份去掉註釋
[root@controller ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
[root@controller ~]# grep -Ev '^$|#' /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
#編輯
[root@controller ~]# vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:111111@controller
auth_strategy = keystone
my_ip = 10.10.10.10
[database]
connection = mysql+pymysql://cinder:111111@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 111111
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[root@controller ~]# su -s /bin/sh -c "cinder-manage db sync" cinder
[root@controller ~]# mysql -uroot -p111111
MariaDB [(none)]> use cinder;
MariaDB [cinder]> show tables;
MariaDB [cinder]> quit
#建立cinder使用者
[root@controller ~]# openstack user create --domain default --password 111111 placement
#關聯admin角色
[root@controller ~]# openstack role add --project service --user cinder admin
#建立cinderv2,cinderv3服務
[root@controller ~]# openstack service create --name cinderv2 \> --description "OpenStack Block Storage" volumev2
[root@controller ~]# openstack service create --name cinderv3 \> --description "OpenStack Block Storage" volumev3
#public
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev2 public http://controller:8776/v2/%\(project_id\)s
#internal
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev2 internal http://controller:8776/v2/%\(project_id\)s
#admin
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev2 admin http://controller:8776/v2/%\(project_id\)s
#public
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev3 public http://controller:8776/v3/%\(project_id\)s
#internal
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev3 internal http://controller:8776/v3/%\(project_id\)s
#admin
[root@controller ~]# openstack endpoint create --region RegionOne \
> volumev3 admin http://controller:8776/v3/%\(project_id\)s
[root@controller ~]# openstack endpoint list
#編輯nova組態檔
[root@controller cinder]# vi /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
#重啟nova
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable --now openstack-cinder-api.service openstack-cinder-scheduler.service
[root@compute ~]# fdisk --list
[root@compute ~]# dnf -y install lvm2 device-mapper-persistent-data
[root@compute ~]# pvcreate /dev/sdb
[root@compute ~]# vgcreate cinder-volumes /dev/sdb
#複製一份去掉註釋
[root@compute ~]# cp /etc/lvm/lvm.conf /etc/lvm/lvm.conf.bak
[root@compute ~]# grep -Ev '^$|#' /etc/lvm/lvm.conf.bak > /etc/lvm/lvm.conf
#編輯
[root@compute ~]# vi /etc/lvm/lvm.conf
devices {
filter = [ "a/sda/",a/sdb/", "r/.*/"]
}
[root@compute ~]# dnf install -y openstack-cinder targetcli python3-keystone
#複製一份去掉註釋
[root@compute ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
[root@compute ~]# grep -Ev '^$|#' /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf
#編輯
[root@compute ~]# vim /etc/cinder/cinder.conf
[DEFAULT]
transport_url = rabbit://openstack:111111@controller
auth_strategy = keystone
my_ip = 10.10.10.20
enabled_backends = lvm
glance_api_servers = http://controller:9292
[database]
connection = mysql+pymysql://cinder:111111@controller/cinder
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = 111111
[lvm] #沒有就新增
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes #要與建立的卷組名對應
target_protocol = iscsi
target_helper = lioadm
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
[root@compute ~]# systemctl enable --now openstack-cinder-volume.service target.service
[root@controller ~]# openstack volume service list
#顯示這樣就行
+------------------+-------------+------+---------+-------+----------------------------+
| Binary | Host | Zone | Status | State | Updated At |
+------------------+-------------+------+---------+-------+----------------------------+
| cinder-scheduler | controller | nova | enabled | up | 2023-05-11T08:12:03.000000 |
| cinder-volume | compute@lvm | nova | enabled | up | 2023-05-11T08:12:02.000000 |
+------------------+-------------+------+---------+-------+----------------------------+
至此,openstack雲平臺搭建V版已全部完成