Install OpenStack Pike

#######################################################################################################################################
#  初始化环境  #
################

# Management Netwock   10.0.0.0/24(私有网络, 管理网络)
# Provicer  Network   203.0.113.0/24(公有网络, 提供外部访问虚拟机)


# controller 网络参数
hostnamectl set-hostname controller
NetName=ens32
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet ip4 10.0.0.11/24 ipv4.dns "202.103.24.68" ipv4.gateway "10.0.0.2"
nmcli connection reload

# compute1 网络参数
hostnamectl set-hostname compute1
NetName=ens32
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet ip4 10.0.0.31/24 ipv4.dns "202.103.24.68" ipv4.gateway "10.0.0.2"
nmcli connection reload

# block1 网络参数
hostnamectl set-hostname block1
NetName=ens32
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet ip4 10.0.0.41/24 ipv4.dns "202.103.24.68" ipv4.gateway "10.0.0.2"
nmcli connection reload

# object1 网络参数
hostnamectl set-hostname object1
NetName=ens32
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet ip4 10.0.0.51/24 ipv4.dns "202.103.24.68" ipv4.gateway "10.0.0.2"
nmcli connection reload

# object2 网络参数
hostnamectl set-hostname object2
NetName=ens32
rm -f /etc/sysconfig/network-scripts/ifcfg-$NetName
nmcli con add con-name $NetName ifname $NetName autoconnect yes type ethernet ip4 10.0.0.52/24 ipv4.dns "202.103.24.68" ipv4.gateway "10.0.0.2"
nmcli connection reload
```file
# 配置hosts解析
if [ -z "$(grep 'OpenStack_0' /etc/hosts)" ];then
cat <<EOF  >>/etc/hosts

# OpenStack_0   $(date +'%F %T')
10.0.0.11       controller
10.0.0.31       compute1
10.0.0.41       block1
10.0.0.51       object1
10.0.0.52       object2
EOF
fi

# ssh免秘钥
curl http://home.onlycloud.xin/code/SSH_KEY -o ~/SSH_KEY
. ~/SSH_KEY
SSH_KEY controller
SSH_KEY compute1
SSH_KEY block1
SSH_KEY object1
SSH_KEY object2

# 同步hosts,关闭防火墙,selinux,yum源
for node in controller compute1 block1 object1 object2; do
    echo "--------------- $node ---------------"
    scp /etc/hosts $node:/etc/hosts
    ssh $node "
    # 关闭, 禁用 firewalld
    systemctl stop firewalld
    systemctl disable firewalld
    firewall-cmd --state

    # 关闭, 禁用 selinux
    setenforce 0
    sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config
    grep --color=auto '^SELINUX' /etc/selinux/config

    # 下载阿里云yum源
    rm -f /etc/yum.repos.d/*
    curl -so /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo
    sed -i '/aliyuncs.com/d' /etc/yum.repos.d/Centos-7.repo

    sed -i 's/ONBOOT=.*/ONBOOT=yes/' /etc/sysconfig/network-scripts/ifcfg-ens34
    sed -i 's/BOOTPROTO=.*/BOOTPROTO=none/' /etc/sysconfig/network-scripts/ifcfg-ens34"
done

# 网络测试
for node in controller compute1 block1 object1 object2; do
    echo "--------------- $node ---------------"
    ssh $node "
    ping -c 3 openstack.org
    ping -c 3 controller
    ping -c 3 compute1"
done

#######################################################################################################################################
#  配置本地YUM  #
#################
# 挂载共享
mkdir /mnt/yum_pike
echo '//192.168.0.50/openstack /mnt cifs username=admin,vers=2.0,rw' >>/etc/fstab
mount -a

# 设置yum下载目录
mkdir -p /www/share/yum
cp /etc/yum.conf{,.bak}
sed -i 's#^keepcache=0#keepcache=1#' /etc/yum.conf
sed -i 's/^cachedir/#cachedir/' /etc/yum.conf
sed -ir '3 icachedir=/www/share/yum/$basearch/$releasever \n' /etc/yum.conf
head /etc/yum.conf

# 移动包到新的路径
find /www/share/yum/ -name *.rpm |sed -r 's#.*#mv & /mnt/tmp/\n#'|bash
yum install -y createrepo
createrepo -pdo  /mnt/tmp/ /mnt/tmp/
mv /mnt/tmp/* /mnt/yum_pike

# 创建源文件
cat <<EOF >/mnt/yum/openstack-pike.repo
[My_share]
name=My_Souce
baseurl=http://192.168.0.2/yum_pike/
gpgcheck=0
enabled=1
cost=88
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7
EOF

rm -f /etc/yum.repos.d/*
curl http://192.168.0.2/yum/openstack-pike.repo -o /etc/yum.repos.d/openstack-pike.repo

#######################################################################################################################################
#  生成 OpenStack 密码, chrony时间同步  #
#########################################
# 生成OpenStack所需密码 文件
if [ ! -f /etc/PASS ];then
list='ROOT_DBPASS ADMIN_PASS CINDER_DBPASS CINDER_PASS DASH_DBPASS DEMO_PASS GLANCE_DBPASS GLANCE_PASS KEYSTONE_DBPASS
  METADATA_SECRET NEUTRON_DBPASS NEUTRON_PASS NOVA_DBPASS NOVA_PASS PLACEMENT_PASS RABBIT_PASS DEMO_PASS'
for user in $list; do
    echo "# $user" | tee -a PASS
    echo -e "$user=$(openssl rand -hex 10)\n" | tee -a ~/PASS
done
cp ~/PASS /etc/PASS
cat ~/PASS
. ~/PASS
fi

# 同步密码文件到其他节点
scp ~/PASS compute1:~/PASS
scp ~/PASS block1:~/PASS
scp ~/PASS object1:~/PASS
scp ~/PASS object2:~/PASS

# 配置ntp(服务端)
# 安装 chrony
yum install -y chrony

# chrony 服务端配置文件
cat <<EOF  >/etc/chrony.conf
# net server $(date +'%F %T')
server ntp.aliyun.com iburst

driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony

# 允许同步的网段
allow 10.0.0.0/24
EOF

# 跟随系统启动, 启动服务
systemctl enable chronyd
systemctl start chronyd

# 验证
chronyc sources

# 配置ntp(客户端)
for node in controller compute1 block1 object1 object2; do
    echo "--------------- $node ---------------"
    ssh -T $node <<EOF
    # 安装 chrony
    yum install -y chrony

    echo "# ntp client $(date +'%F %T')" >/etc/chrony.conf
    echo 'server controller iburst' >>/etc/chrony.conf
    echo 'driftfile /var/lib/chrony/drift' >>/etc/chrony.conf
    echo 'makestep 1.0 3' >>/etc/chrony.conf
    echo 'rtcsync' >>/etc/chrony.conf
    echo 'logdir /var/log/chrony' >>/etc/chrony.conf

    # 跟随系统启动, 启动服务
    systemctl enable chronyd
    systemctl restart chronyd

    # 验证
    chronyc sources
EOF
done

# 配置OpenStack 安装源 openstack 客户端工具 selinux
for node in compute1 block1 object1 object2; do
    echo "--------------- $node ---------------"
    ssh -T $node <<'EOF'
    # 启用 OpenStack 存储库
    yum install -y centos-release-openstack-pike

    # 替换QEMU 安装源
    sed -i 's/$contentdir/centos/' /etc/yum.repos.d/CentOS-QEMU-EV.repo
    yum install -y https://rdoproject.org/repos/rdo-release.rpm

    # 安装openstack 客户端工具 selinux
    # yum upgrade
    yum install -y python-openstackclient openstack-selinux
EOF
done
#######################################################################################################################################
#  安装MariaDB 数据库(控制节点)  #
##################################
# 安装 mariadb
yum install -y mariadb mariadb-server python2-PyMySQL

# 创建 openstack 数据库配置
cat <<EOF  >/etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 10.0.0.11

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF

# 修改数据库最大连接数
cat <<EOF  >>/etc/security/limits.conf
mysql soft nofile 65535
mysql hard nofile 65535
EOF

mkdir /etc/systemd/system/mariadb.service.d
cat <<EOF  >/etc/systemd/system/mariadb.service.d/filelimit.conf
[Service]
LimitNOFILE=infinity
EOF

# 启动数据库服务,跟随系统启动
systemctl enable mariadb
systemctl start mariadb

# 初始化数据库(此处密码读取的 ~/PASS)
/usr/bin/expect <<EOF
set timeout 30
spawn mysql_secure_installation
expect {
    "enter for none" { send "\r"; exp_continue}
    "Y/n" { send "Y\r" ; exp_continue}
    "password:" { send "$ROOT_DBPASS\r"; exp_continue}
    "new password:" { send "$ROOT_DBPASS\r"; exp_continue}
    "Y/n" { send "Y\r" ; exp_continue}
    eof { exit }
}
EOF

#######################################################################################################################################
#  安装 rabbitmq 消息队列服务, memcached, Etcd (控制节点)  #
############################################################
# 安装 rabbitmq
yum install -y rabbitmq-server

# 启动服务, 跟随系统启动
systemctl enable rabbitmq-server
systemctl start rabbitmq-server

# 启动 web 插件 WebUI http://IP:15672
rabbitmq-plugins enable rabbitmq_management

# 添加 openstack用户, 授权(此处密码读取的 ~/PASS)
rabbitmqctl add_user openstack $RABBIT_PASS
rabbitmqctl set_permissions openstack ".*" ".*" ".*"


# 安装memcached (控制节点)
yum install -y memcached python-memcached

# 添加控制器节点的管理IP地址
sed -i 's/^OPTIONS/# OPTIONS/' /etc/sysconfig/memcached
sed -i '$a OPTIONS="-l 127.0.0.1,::1,controller"' /etc/sysconfig/memcached

# 启动服务,配置服务随系统启动
systemctl enable memcached
systemctl start memcached

# 安装 Etcd,一种分布式可靠的键值存储(控制节点)
yum install -y etcd

# 配置
cat <<EOF  >/etc/etcd/etcd.conf
#[Member]
ETCD_NAME="controller"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="http://10.0.0.11:2380"
ETCD_LISTEN_CLIENT_URLS="http://10.0.0.11:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.0.0.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://10.0.0.11:2379"
ETCD_INITIAL_CLUSTER="controller=http://10.0.0.11:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

# 启动服务,配置服务随系统启动
systemctl enable etcd
systemctl start etcd

#######################################################################################################################################
#  安装配置 Keyston 服务, http (控制节点)  #
############################################
[Keystone 教程](https://docs.openstack.org/keystone/pike/install/index.html)

# 创建数据库
. ~/PASS
mysql -u root -p$ROOT_DBPASS -te "
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '$KEYSTONE_DBPASS';
GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '$KEYSTONE_DBPASS';
select user,host from mysql.user;
show databases;"

# 安装http, keystone
yum install -y openstack-keystone httpd mod_wsgi

# keystone 配置
cp /etc/keystone/keystone.conf{,.bak}
cat <<EOF  >/etc/keystone/keystone.conf
[database]
connection = mysql+pymysql://keystone:$KEYSTONE_DBPASS@controller/keystone

[token]
provider = fernet
EOF

# 初始化 keystone 数据库
su -s /bin/sh -c "keystone-manage db_sync" keystone

# 初始化Fernet密钥存储库
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone

# 创建服务API
keystone-manage bootstrap --bootstrap-password $ADMIN_PASS \
  --bootstrap-admin-url http://controller:35357/v3/ \
  --bootstrap-internal-url http://controller:5000/v3/ \
  --bootstrap-public-url http://controller:5000/v3/ \
  --bootstrap-region-id RegionOne

# 配置 http
cp /etc/httpd/conf/httpd.conf{,.bak}
echo 'ServerName controller' | tee -a /etc/httpd/conf/httpd.conf

# 链接keytsone web配置文件
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

# 启动服务,配置服务随系统启动
systemctl enable httpd
systemctl start httpd

# 配置环境变量凭证
export OS_USERNAME=admin
export OS_PASSWORD=$ADMIN_PASS
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3

# 创建 service 项目
openstack project create --domain default --description "Service Project" service

# 创建 demo 项目和用户
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password=$DEMO_PASS demo

# 创建 user 角色, 将 user 角色添加到dome 项目和用户
openstack role create user
openstack role add --project demo --user demo user

# 注销变量
unset OS_AUTH_URL OS_PASSWORD

# 请求 admin 身份验证令牌
openstack --os-auth-url http://controller:35357/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name admin --os-username admin --os-password $ADMIN_PASS token issue

# 请求 demo 身份验证令牌
openstack --os-auth-url http://controller:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name demo --os-username demo --os-password $DEMO_PASS token issue

# 创建 admin 用户环境脚本
cat <<EOF  >~/admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=$ADMIN_PASS
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF

# 创建 demo 用户环境脚本
cat <<EOF  >~/demo-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=$DEMO_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF

# 验证凭证
. ~/admin-openrc
openstack token issue

. ~/demo-openrc
openstack token issue

#######################################################################################################################################
#  安装配置 Glance (控制节点)  #
################################
[glance 官方文档](https://docs.openstack.org/glance/pike/user/index.html)

# 创建 glance 用户 数据库 授权
mysql -u root -p$ROOT_DBPASS -te "
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '$GLANCE_DBPASS';
GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '$GLANCE_DBPASS';
select user,host from mysql.user;
show databases;"

# 创建 glance 用户, 授权
. ~/admin-openrc
openstack user create --domain default --password=$GLANCE_PASS glance
openstack role add --project service --user glance admin

# 创建 glance 服务, API
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292

# 安装 gnalce
yum install -y openstack-glance

# 配置 glance-api
cp /etc/glance/glance-api.conf{,.bak}
cat <<EOF  >/etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:$GLANCE_DBPASS@controller/glance

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = $GLANCE_PASS

[paste_deploy]
flavor = keystone

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
EOF

# 配置 glance-registry
cp /etc/glance/glance-registry.conf{,.bak}
cat <<EOF  >/etc/glance/glance-registry.conf
[database]
connection = mysql+pymysql://glance:$GLANCE_DBPASS@controller/glance

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = $GLANCE_PASS

[paste_deploy]
flavor = keystone
EOF

# 初始化 glance 数据库
su -s /bin/sh -c "glance-manage db_sync" glance

# 启动服务, 跟随系统启动
systemctl enable openstack-glance-api openstack-glance-registry
systemctl start openstack-glance-api openstack-glance-registry

# 验证(上传镜像)
yum install -y wget
. ~/admin-openrc
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
# wget http://192.168.0.2/img/cirros-0.3.5-x86_64-disk.img
# wget http://192.168.0.2/img/cirros-0.4.0-x86_64-disk.img
# wget http://192.168.0.2/img/CentOS-7-x86_64-GenericCloud-1805.qcow2c

# 使用QCOW2磁盘格式,qcow2格式和公共可见性将图像上载到Image服务 ,以便所有项目都可以访问它
openstack image create "cirros-3.5" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
# openstack image create "cirros-4.0" --file cirros-0.4.0-x86_64-disk.img --disk-format qcow2 --container-format bare --public
# openstack image create "CentOS-7.5" --file CentOS-7-x86_64-GenericCloud-1805.qcow2c --disk-format qcow2 --container-format bare --public

#######################################################################################################################################
#  Nova 计算服务安装配置(控制节点)  #
#####################################
[Nova 官方文档](https://docs.openstack.org/nova/pike/install/)
[Nova 高级功能](https://docs.openstack.org/nova/pike/admin/index.html)

# 创建 nova 数据库 用户 授权
. ~/PASS
mysql -u root -p$ROOT_DBPASS -te "
CREATE DATABASE nova_api;
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '$NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_DBPASS';
CREATE DATABASE nova;
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '$NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_DBPASS';
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '$NOVA_DBPASS';
GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '$NOVA_DBPASS';
select user,host from mysql.user;
show databases;"

# 创建 navo 用户, 授权
. ~/admin-openrc
openstack user create --domain default --password=$NOVA_PASS nova
openstack role add --project service --user nova admin

# 创建 glance 服务, API
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1

# 创建 Placement 服务用户, 授权
openstack user create --domain default --password=$PLACEMENT_PASS placement
openstack role add --project service --user placement admin

# 创建Placement API
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778

# 安装 nova
yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console \
  openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api

# nova 配置
cp /etc/nova/nova.conf{,.bak}
cat <<'EOF'  >/etc/nova/nova.conf
[DEFAULT]
my_ip = 10.0.0.11
use_neutron = True
enabled_apis = osapi_compute,metadata
firewall_driver = nova.virt.firewall.NoopFirewallDriver
transport_url = rabbit://openstack:$RABBIT_PASS@controller:5672

[api_database]
connection = mysql+pymysql://nova:$NOVA_DBPASS@controller/nova_api

[database]
connection = mysql+pymysql://nova:$NOVA_DBPASS@controller/nova

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = $NOVA_PASS

[vnc]
enabled = true
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = $PLACEMENT_PASS
EOF

sed -i "s/\$RABBIT_PASS/$RABBIT_PASS/" /etc/nova/nova.conf
sed -i "s/\$NOVA_DBPASS/$NOVA_DBPASS/" /etc/nova/nova.conf
sed -i "s/\$NOVA_PASS/$NOVA_PASS/" /etc/nova/nova.conf
sed -i "s/\$PLACEMENT_PASS/$PLACEMENT_PASS/" /etc/nova/nova.conf



# 配置 Placement API web服务
cp /etc/httpd/conf.d/00-nova-placement-api.conf{,.bak}
cat <<EOF  >>/etc/httpd/conf.d/00-nova-placement-api.conf

# nova-placement-api
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>
EOF

# 重启 http
systemctl restart httpd

# 初始化 nova_api, nova, nova_cell0 数据库
su -s /bin/sh -c "nova-manage api_db sync" nova      # 此步骤有一个警告(官方提示 忽略弃用警告)
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
su -s /bin/sh -c "nova-manage db sync" nova

# 验证
nova-manage cell_v2 list_cells
mysql -h controller -u nova -p$NOVA_DBPASS -te "use nova_api;show tables;"
mysql -h controller -u nova -p$NOVA_DBPASS -te "use nova;show tables;"
mysql -h controller -u nova -p$NOVA_DBPASS -te "use nova_cell0;show tables;"

# 启动服务, 配置服务跟随系统启动
systemctl enable openstack-nova-api openstack-nova-consoleauth openstack-nova-scheduler \
  openstack-nova-conductor openstack-nova-novncproxy

systemctl start openstack-nova-api openstack-nova-consoleauth openstack-nova-scheduler \
  openstack-nova-conductor openstack-nova-novncproxy

systemctl status openstack-nova-api openstack-nova-consoleauth openstack-nova-scheduler \
  openstack-nova-conductor openstack-nova-novncproxy

# 验证
openstack compute service list      # 列出 nova 服务组件进程状态
openstack compute service list --service nova-compute      # 列出计算节点
openstack catalog list      # 列出所有注册 API
openstack image list      # 列出服务器上的镜像
nova-status upgrade check      # 检测 Cells v2, 及API 状态(所有状态为 Result: Success)
openstack host list      # 列出主机及其上运行的与nova相关的服务
openstack host show compute1      # 显示主机上运行的实例的虚拟CPU总和,磁盘大小,


#######################################################################################################################################
#  neutron 网络服务安装配置(控制节点)  #
########################################
# 以下网络配置二选一 注意修改配置后需同步数据库,重启服务
[官方文档](https://docs.openstack.org/neutron/pike/install/overview.html)

# 安装和配置控制器节点
# 此处供应商网络(Provide 203.0.113.0)为公网 管理网络(Management 10.0.0.0)为私有网
# 创建 neutron 用户, 数据库, 授权
. ~/PASS
mysql -u root -p$ROOT_DBPASS -te "
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '$NEUTRON_DBPASS';
GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '$NEUTRON_DBPASS';
select user,host from mysql.user;
show databases;"

# 创建 neutron 用户, 授权
. ~/admin-openrc
openstack user create --domain default --password=$NEUTRON_PASS neutron
openstack role add --project service --user neutron admin

# 创建 neutron 服务, API
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
openstack endpoint list      # 列出所有注册 API ( -f choose from 'csv', 'json', 'table', 'value', 'yaml')
openstack catalog list      # 列出所有注册 API


############################################################
# 1.提供商网络 (Provider networks) 网络模式(二选一)  仅二层网络
# 安装 neutron 及网络插件
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables

# 创建 neutron.conf 配置文件
cp /etc/neutron/neutron.conf{,.bak}
cat <<EOF   >/etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:$NEUTRON_DBPASS@controller/neutron

[DEFAULT]
core_plugin = ml2
service_plugins =
auth_strategy = keystone
notify_nova_on_port_data_changes = true
notify_nova_on_port_status_changes = true
transport_url = rabbit://openstack:$RABBIT_PASS@controller


[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = $NEUTRON_PASS

[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = $NOVA_PASS

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
EOF

# 创建 插件ML2 配置文(二层网络 启用 flat vlan 网络, 将提供商网络配置为 provider 公网)
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
cat <<EOF   >/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
tenant_network_types =
type_drivers = flat,vlan
mechanism_drivers = linuxbridge
extension_drivers = port_security

[ml2_type_flat]
flat_networks = provider

[securitygroup]
enable_ipset = true
EOF

# 配置Linux桥代理(此处provider 供商网络 公网地址网卡)
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
EOF



# 配置DHCP代理
cp /etc/neutron/dhcp_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
enable_isolated_metadata = true
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
EOF


############################################################
# 2.自助服务网络 (Self-service networks) 网络模式(二选一)  二层三层网络

# 安装 neutron 及网络插件
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables

# 创建 neutron.conf 配置文件
cp /etc/neutron/neutron.conf{,.bak}
cat <<EOF   >/etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:$NEUTRON_DBPASS@controller/neutron

[DEFAULT]
core_plugin = ml2
auth_strategy = keystone
service_plugins = router
allow_overlapping_ips = true
notify_nova_on_port_data_changes = true
notify_nova_on_port_status_changes = true
transport_url = rabbit://openstack:$RABBIT_PASS@controller

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = $NEUTRON_PASS

[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = $NOVA_PASS

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
EOF

# 创建 插件ML2 配置文(二层网络 启用 flat vlan 网络, 将提供商网络配置为 provider 公网)
cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
cat <<EOF   >/etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
tenant_network_types = vxlan
type_drivers = flat,vlan,vxlan
extension_drivers = port_security
mechanism_drivers = linuxbridge,l2population

[ml2_type_flat]
flat_networks = provider

[ml2_type_vxlan]
vni_ranges = 1:1000

[securitygroup]
enable_ipset = true
EOF

# 配置Linux桥代理(此处provider 供商网络 公网地址网卡 local_ip 为控制器节点的管理IP地址)
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34

[vxlan]
enable_vxlan = true
l2_population = true
local_ip = 10.0.0.11

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
EOF

# 配置第3层代理
cp /etc/neutron/l3_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/l3_agent.ini
[DEFAULT]
interface_driver = linuxbridge
EOF

# 配置DHCP代理
cp /etc/neutron/dhcp_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/dhcp_agent.ini
[DEFAULT]
interface_driver = linuxbridge
enable_isolated_metadata = true
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
EOF

#########
#  END  #
############################################################

# 配置元数据代理
cp /etc/neutron/metadata_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/metadata_agent.ini
[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = $METADATA_SECRET
EOF

# 配置Compute服务以使用Networking服务
cat <<EOF   >>/etc/nova/nova.conf

[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = $NEUTRON_PASS
service_metadata_proxy = true
metadata_proxy_shared_secret = $METADATA_SECRET
EOF

# 链接配置文件
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

# 初始化数据库
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

# 重启 Compute API 服务
systemctl restart openstack-nova-api

# 启动网络服务, 配置服务跟随系统启动
systemctl enable neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent
systemctl start neutron-server neutron-linuxbridge-agent neutron-dhcp-agent neutron-metadata-agent

# 启动三层网络服务, 跟随系统启动(仅对于配置 2.自助服务网络)
systemctl enable neutron-l3-agent
systemctl start neutron-l3-agent

# 验证服务状态
openstack network agent list
openstack service list
#+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
#| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
#+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
#| 0256098d-80e6-41a1-8ff8-726c715e0bc1 | Linux bridge agent | controller | None              | :-)   | UP    | neutron-linuxbridge-agent |
#| 16a3eaee-8ba1-4b4d-8908-f61908dda6bc | DHCP agent         | controller | nova              | :-)   | UP    | neutron-dhcp-agent        |
#| adcee175-91a1-4ca9-909d-39bda3c01152 | Metadata agent     | controller | None              | :-)   | UP    | neutron-metadata-agent    |
#+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
#########
#  END  #
#######################################################################################################################################


#######################################################################################################################################
#  dashboard (控制节点)  #
##########################
[官方安装文档](https://docs.openstack.org/horizon/pike/install/index.html)
[自定义Horizo](https://docs.openstack.org/horizon/pike/configuration/customizing.html)
[自定义和配置仪表板](https://docs.openstack.org/horizon/pike/admin/customize-configure.html)
[OpenStack仪表板用户文档](https://docs.openstack.org/horizon/pike/user/index.html)

# 安装 dashboard
yum install -y openstack-dashboard

# 备份默认配置文件
cp /etc/openstack-dashboard/local_settings{,.bak}
Setfiles='/etc/openstack-dashboard/local_settings'

# 配置仪表板以在 controller 节点上使用 OpenStack 服务
sed -i 's#OPENSTACK_HOST = "127.0.0.1"#OPENSTACK_HOST = "controller"#' $Setfiles

# 允许所有主机访问
sed -i "/ALLOWED_HOSTS/cALLOWED_HOSTS = ['*', ]" $Setfiles

# 开启 memcahce 缓存
sed -in '158,163s/#//' $Setfiles 
sed -in '165,169s/.*/#&/' $Setfiles
sed -i "157a SESSION_ENGINE = \'django.contrib.sessions.backends.cache\'" $Setfiles
sed -i "s/127.0.0.1:11211/controller:11211/" $Setfiles

# 启用对域的支持, 配置域名(取消注释,更改值为 True)
sed -i '/ULTIDOMAIN_SUPPORT/cOPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True' $Setfiles
sed -i "s@^#OPENSTACK_KEYSTONE_DEFAULT@OPENSTACK_KEYSTONE_DEFAULT@" $Setfiles

# 配置user为您通过仪表板创建的用户的默认角色
sed -i 's#_member_#user#g' $Setfiles

# 配置API版本
cat <<EOF   >>$Setfiles

OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 2,
}
EOF

# 修改时区
sed -i 's#UTC#Asia/Shanghai#g' $Setfiles

# 如果选择网络选项1, 禁用对第3层网络服务的支持
sed -i "324,331 s/True/False/" $Setfiles

# 如果选择网络选项2, 启用用对第3层网络服务的支持
sed -i "324,331 s/False/True/" $Setfiles

# 网页无法访问 dashboard 服务器内部错误解决方法
sed -i '3a WSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf

# 重启 httpd, memcached
systemctl restart httpd memcached

#######################################################################################################################################
#  存储  #
##########
[manila 存储配置](https://docs.openstack.org/manila/queens/install/)





#######################################################################################################################################
#  计算节点安装配置(计算节点)【compute1 :10.0.0.31】  #
#######################################################
# 启用 OpenStack 存储库
yum install -y centos-release-openstack-pike

# 替换QEMU 安装源
sed -i 's/$contentdir/centos/' /etc/yum.repos.d/CentOS-QEMU-EV.repo
yum install -y https://rdoproject.org/repos/rdo-release.rpm

# 安装openstack 客户端工具 selinux
# yum upgrade
yum install -y python-openstackclient openstack-selinux

# 安装 nova-compute
yum install -y openstack-nova-compute

# 安装计算节点网络服务
yum install -y openstack-neutron-linuxbridge ebtables ipset

# 配置 nova
. ~/PASS
cp /etc/nova/nova.conf{,.bak}
cat <<'EOF' >/etc/nova/nova.conf
[DEFAULT]
my_ip = 10.0.0.31
use_neutron = True
enabled_apis = osapi_compute,metadata
firewall_driver = nova.virt.firewall.NoopFirewallDriver
transport_url = rabbit://openstack:$RABBIT_PASS@controller

[api]
auth_strategy = keystone

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = $NOVA_PASS

[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html

[glance]
api_servers = http://controller:9292

[oslo_concurrency]
lock_path = /var/lib/nova/tmp

[placement]
os_region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:35357/v3
username = placement
password = $PLACEMENT_PASS

# 虚拟机只能设置为 qemu 如果是物理机 则设置为 kvm
# egrep -c '(vmx|svm)' /proc/cpuinfo
[libvirt]
virt_type = qemu

[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = $NEUTRON_PASS
EOF

. ~/PASS
sed -i "s/\$RABBIT_PASS/$RABBIT_PASS/" /etc/nova/nova.conf
sed -i "s/\$NOVA_PASS/$NOVA_PASS/" /etc/nova/nova.conf
sed -i "s/\$PLACEMENT_PASS/$PLACEMENT_PASS/" /etc/nova/nova.conf
sed -i "s/\$NEUTRON_PASS/$NEUTRON_PASS/" /etc/nova/nova.conf

# 配置 neutron
cp /etc/neutron/neutron.conf{,.bak}
cat <<EOF   >/etc/neutron/neutron.conf
[DEFAULT]
auth_strategy = keystone
transport_url = rabbit://openstack:$RABBIT_PASS@controller

[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = $NEUTRON_PASS

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
EOF

##################################################################
#  选择为控制器节点选择的相同网络选项,以配置特定于其的服务
#  网络选项1:提供商网络

# 配置Linux桥代理(此处provider 供商网络 公网地址网卡)
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34

[vxlan]
enable_vxlan = false

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
EOF

##################################################################
#  选择与控制器节点选择的相同网络选项,以配置特定于其的服务
#  网络选项2:自助服务网络


# 配置Linux桥代理(此处provider 供商网络 公网地址网卡 local_ip 为控制器节点的管理IP地址)
cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
cat <<EOF   >/etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens34

[vxlan]
enable_vxlan = true
l2_population = true
local_ip = 10.0.0.11

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
EOF
#########
#  END  #
##################################################################


# 启动服务, 跟随系统启动
systemctl enable libvirtd openstack-nova-compute neutron-linuxbridge-agent
systemctl start libvirtd openstack-nova-compute neutron-linuxbridge-agent

##################################################################
# 发现计算节点 (在控制节点运行以下命令)

# 手动发现, 注册 计算节点
. ~/admin-openrc      # 获取 admin 凭证
openstack compute service list --service nova-compute      # 查看列表中是否发现新的计算节点
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova      # 注册计算节点

# 自动发现, 注册 计算节点,控制节点 nova 配置文件添加以下配置
[scheduler]
discover_hosts_in_cells_interval = 300

# 验证
openstack compute service list      # 列出 nova 服务组件进程状态
openstack compute service list --service nova-compute      # 列出计算节点
openstack catalog list      # 列出所有注册 API
openstack image list      # 列出服务器上的镜像
nova-status upgrade check      # 检测 Cells v2, 及API 状态(所有状态为 Result: Success)
openstack host list      # 列出主机及其上运行的与nova相关的服务
openstack host show compute1      # 显示主机上运行的实例的虚拟CPU总和,磁盘大小,




#######################################################################################################################################
#  安装完成后初始化  #
######################
#[启动一个实例](https://docs.openstack.org/install-guide/launch-instance.html)

# 创建公钥(创建虚拟机时可以选择此公钥免密码登录 公钥名称:mykey)
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa      # 生成秘钥
nova keypair-add --pub-key ~/.ssh/id_dsa.pub mykey      # 上传本地公钥到服务器
nova keypair-list      # 查看公钥

# 创建可用区域
nova aggregate-create NOVA-Cluster_01 NOVA-Cluster_01      # 创建 NOVA-Cluster_01 区域
nova aggregate-add-host NOVA-Cluster_01 compute1      # 将逐渐 compute1 添加到 NOVA-Cluster_01 区域
nova aggregate-list      # 查看创建的区域


# 创建安全规则 [开启 icmp, ssh(ping, ssh)]
openstack security group rule create --proto icmp default
openstack security group rule create --proto tcp --dst-port 22 default


# 创建实例类型
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano      # 创建实例类型 cpu:1 mem:64M disk:1G
openstack flavor create --id 1 --vcpus 1 --ram 512 --disk 12 m2.nano      # 创建实例类型 cpu:1 mem:512M disk:12G
openstack flavor list      # 列出所有实例

#############################################################################################################################
#  创建网络[网络选项1:提供商网络 - 连接 ]  #
#############################################
# 1.[提供商网络](https://docs.openstack.org/install-guide/launch-instance-networks-provider.html)
# 2.[自助服务网络](https://docs.openstack.org/install-guide/launch-instance-networks-selfservice.html)
# 如果不开启lan 类型网络则只能创建一个网络 [provider :为公用网络 客户通过此网络访问虚拟机]
# --share选项允许所有项目使用虚拟网络
# --external选项将虚拟网络定义为外部。如果您希望创建内部网络,则可以使用--internal。默认值是internal
# Management Netwock   10.0.0.0/24(私有网络, 管理网络)
# Provicer  Network   203.0.113.0/24(公有网络, 提供外部访问虚拟机)
NETWORK='203.0.113'
openstack network create --share --external --provider-physical-network provider \
  --provider-network-type flat Provider_$NETWORK

## 在网络上创建子网(在 Provider_$NETWORK 网络创建子网)
openstack subnet create --network Provider_$NETWORK --allocation-pool start=$NETWORK.70,end=$NETWORK.100 \
  --dns-nameserver 202.103.24.68 --gateway $NETWORK.2 --subnet-range $NETWORK.0/24 Provider_sub_$NETWORK

#############################################################################################################################
#  开启 vlan 功能  #
####################
#对应物理网卡连接交换机trunk,在交换机创建vlan,即可在openstack创建vlan网络)
#控制节点 (provider对应网卡lable,在linuxbridge_agent.ini配置对应物理网卡)
#配置 type_drivers、ml2_type_vlan (openstack-config 命令由 openstack-utils 包提供)
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vlan network_vlan_ranges provider:1001:2000
systemctl restart neutron-server

# 计算节点
systemctl restart neutron-linuxbridge-agent

# 创建 vlan 网络
openstack network create --share --provider-segment=8 --provider-network-type=vlan --provider-physical-network provider vlan_192.168.4

# 创建子网
openstack subnet create --network vlan_192.168.4 --allocation-pool start=192.168.4.100,end=192.168.4.200 --dns-nameserver 119.29.29.29 \
  --dns-nameserver 114.114.114.114 --gateway 192.168.4.1 --subnet-range 192.168.4.0/24 net_192.168.4

# 创建虚拟机示例
nova boot --flavor m1.nano --image cirros-0.4.0 --nic net-name=vlan_192.168.4 --security-group default --key-name mykey vm01
#########
#  END  #
#############################################################################################################################

#########
#  END  #
#############################################################################################################################

#############################################################################################################################
#  创建虚拟机(下载镜像,上传镜像,创建虚拟机)  #
##############################################
# 下载镜像
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
wget http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img

# 上传镜像
source ~/admin-openrc
openstack image create "cirros-0.3.5" --file cirros-0.3.5-x86_64-disk.img \
  --disk-format qcow2 --container-format bare --public

openstack image create "cirros-0.4.0" --file cirros-0.4.0-x86_64-disk.img \
  --disk-format qcow2 --container-format bare --public

openstack image list

# 创建虚拟机
nova boot --flavor m1.nano --image cirros-0.3.5 --nic net-name=Provider_$NETWORK \
  --security-group default --key-name mykey_controller1 VM01

nova boot --flavor m1.nano --image cirros-0.4.0 --nic net-name=Provider_$NETWORK \
  --security-group default --key-name mykey_controller1 VM02

openstack server list
#########
#  END  #
#############################################################################################################################

#############################################################################################################################
#  开启 RabbitMQ Web管理(http://IP:15672)  #
############################################
# 开启插件
rabbitmq-plugins enable rabbitmq_management

# 创建用户 admin, 设置密码为 admin
rabbitmqctl add_user admin admin

# 给予 admin 用户管理员权限
rabbitmqctl set_user_tags admin administrator

# Web管理(http://IP:15672)
#########
#  END  #
#############################################################################################################################

本文标题:Install OpenStack Pike

文章作者:亦 漩

发布时间:2018年08月04日 - 21:08

最后更新:2018年09月27日 - 20:09

原始链接:https://home.onlycloud.xin/posts/pike.html

许可协议: 署名4.0国际 (CC BY 4.0) 转载请保留原文链接及作者。