实际生产环境中,每个服务模块很有可能都是一个集群,但我们这里只是带大家配置了一个实验环境,所以我们这里把keystone、nova、neutron、glance、dashboard都安装在了contoller节点上。
controller节点基础配置
[root@controller ~]# hostname
controller
[root@controller ~]# lscpu
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 4
On-line CPU(s) list: 0-3
Thread(s) per core: 1
Core(s) per s
ocket: 1
Socket(s): 4
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 44
Model name: Westmere E56xx/L56xx/X56xx (Nehalem-C)
Stepping: 1
CPU MHz: 2400.084
BogoMIPS: 4800.16
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32K
L1i cache: 32K
L2 cache: 4096K
NUMA node0 CPU(s): 0-3
[root@controller ~]# free -h
total used free shared buff/cache available
Mem: 7.8G 108M 7.6G 8.3M 98M 7.6G
Swap: 0B 0B 0B
[root@controller ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 11:0 1 1024M 0 rom
vda 252:0 0 400G 0 disk
├─vda1 252:1 0 500M 0 part /boot
└─vda2 252:2 0 399.5G 0 part
├─centos-root 253:0 0 50G 0 lvm /
├─centos-swap 253:1 0 3.9G 0 lvm
└─centos-data 253:2 0 345.6G 0 lvm /data
[root@controller ~]# ifconfig
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.10.10 netmask 255.255.255.0 broadcast 192.168.10.255
inet6 fe80::5054:ff:fef1:33de prefixlen 64 scopeid 0x20<link>
ether 52:54:00:f1:33:de txqueuelen 1000 (Ethernet)
RX packets 3892 bytes 336939 (329.0 KiB)
RX errors 0 dropped 341 overruns 0 frame 0
TX packets 11 bytes 746 (746.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eth1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.0.0.10 netmask 255.255.0.0 broadcast 10.0.255.255
inet6 fe80::5054:ff:fe53:7f28 prefixlen 64 scopeid 0x20<link>
ether 52:54:00:53:7f:28 txqueuelen 1000 (Ethernet)
RX packets 3745 bytes 322672 (315.1 KiB)
RX errors 0 dropped 326 overruns 0 frame 0
TX packets 11 bytes 746 (746.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eth2: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 111.40.215.8 netmask 255.255.255.240 broadcast 111.40.215.15
inet6 fe80::5054:ff:fe53:7f82 prefixlen 64 scopeid 0x20<link>
ether 52:54:00:53:7f:82 txqueuelen 1000 (Ethernet)
RX packets 82 bytes 9291 (9.0 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 82 bytes 9653 (9.4 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 0 (Local Loopback)
RX packets 2718 bytes 387704 (378.6 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 2718 bytes 387704 (378.6 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@controller ~]# getenforce
Disabled
[root@controller ~]# iptables -vnL
Chain INPUT (policy ACCEPT 10697 packets, 37M bytes)
pkts bytes target prot opt in out source destination
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
Chain OUTPUT (policy ACCEPT 9283 packets, 865K bytes)
pkts bytes target prot opt in out source destination
[root@controller ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.10.10 controller
192.168.10.20 block1
192.168.10.31 compute1
192.168.10.32 compute2
[root@controller ~]#
配置时间同步服务
[root@controller ~]# yum list | grep chrony
chrony.x86_64 2.1.1-4.el7.centos updates
[root@controller ~]# yum install -y chrony
[root@controller ~]# vim /etc/chrony.conf
[root@controller ~]# grep -v ^# /etc/chrony.conf | tr -s [[:space:]]
server 0.centos.pool.ntp.org iburst
server 1.centos.pool.ntp.org iburst
server 2.centos.pool.ntp.org iburst
server 3.centos.pool.ntp.org iburst
stratumweight 0
driftfile /var/lib/chrony/drift
rtcsync
makestep 10 3
allow 192.168.10/24
bindcmdaddress 127.0.0.1
bindcmdaddress ::1
keyfile /etc/chrony.keys
commandkey 1
generatecommandkey
noclientlog
logchange 0.5
logdir /var/log/chrony
[root@controller ~]# systemctl enable chronyd.service
[root@controller ~]# systemctl start chronyd.service
启用OpenStack程序包仓库
[root@controller ~]# yum install -y centos-release-openstack-mitaka
升级本机系统及内核
[root@controller ~]# yum upgrade //此步不建议执行,内核升级后openstack service create时会报HTTP400错误
由于升级的内核,所以需要重启一次使新的内核生效 //没更新内核不需要
[root@controller ~]# reboot
安装 OpenStack 客户端
[root@controller ~]# yum install -y python-openstackclient
安装配置mysql
[root@controller ~]# yum install -y mariadb-server python2-PyMySQL
[root@controller ~]# cat /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.10.10
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
[root@controller ~]# systemctl enable mariadb.service
Created symlink from /etc/systemd/system/multi-user.target.wants/mariadb.service to /usr/lib/systemd/system/mariadb.service.
[root@controller ~]# systemctl start mariadb.service
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 :::22022 :::*
[root@controller ~]# mysql_secure_installation
安装配置消息队列服务rabbitmq
[root@controller ~]# yum install -y rabbitmq-server
[root@controller ~]# systemctl enable rabbitmq-server.service
Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
[root@controller ~]# systemctl start rabbitmq-server.service
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 128 :::22022 :::*
LISTEN 0 128 :::5672 :::*
[root@controller ~]# rabbitmqctl add_user openstack RABBIT_PASS
Creating user "openstack" ...
[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/" ...
[root@controller ~]#
安装配置memcached用于缓存令牌
[root@controller ~]# yum install -y memcached python-memcached
[root@controller ~]# systemctl enable memcached.service
Created symlink from /etc/systemd/system/multi-user.target.wants/memcached.service to /usr/lib/systemd/system/memcached.service.
[root@controller ~]# systemctl start memcached.service
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 128 :::22022 :::*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 ::1:11211 :::*
[root@controller ~]#
正式开始安装配置keystone
数据库准备
[root@controller ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 4
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE keystone;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \
-> IDENTIFIED BY 'KEYSTONE_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \
-> IDENTIFIED BY 'KEYSTONE_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> quit
Bye
安装并修改配置文件
[root@controller ~]# yum install -y openstack-keystone httpd mod_wsgi
[root@controller ~]# cp /etc/keystone/keystone.conf{,.bak}
[root@controller ~]# vim /etc/keystone/keystone.conf
[root@controller ~]# grep -v ^# /etc/keystone/keystone.conf | tr -s [[:space:]]
[DEFAULT]
admin_token = ADMIN_TOKEN
[assignment]
[auth]
[cache]
[catalog]
[cors]
[cors.subdomain]
[credential]
[database]
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[eventlet_server_ssl]
[federation]
[fernet_tokens]
[identity]
[identity_mapping]
[kvs]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[os_inherit]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_middleware]
[oslo_policy]
[paste_deploy]
[policy]
[resource]
[revoke]
[role]
[saml]
[shadow_users]
[signing]
[ssl]
[token]
provider = fernet
[tokenless_auth]
[trust]
初始化身份认证服务keystone的数据库
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
官方文档mysql配置文件中少了一行skip_name_resolve导致初始化数据库时报错
[root@controller ~]# mysql -e "show tables from keystone"
[root@controller ~]# mysql -hcontroller -ukeystone -pKEYSTONE_DBPASS
ERROR 1045 (28000): Access denied for user 'keystone'@'controller' (using password: YES)
[root@controller ~]# vim /etc/my.cnf.d/openstack.cnf
[root@controller ~]# cat /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 192.168.10.10
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
skip_name_resolve
[root@controller ~]# systemctl restart mariadb
[root@controller ~]# mysql -hcontroller -ukeystone -pKEYSTONE_DBPASS
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 2
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> quit
Bye
初始化keystone服务数据库
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
[root@controller ~]# mysql -e "show tables from keystone"
+------------------------+
| Tables_in_keystone |
+------------------------+
| access_token |
| assignment |
| config_register |
| consumer |
| credential |
| domain |
| endpoint |
| endpoint_group |
| federated_user |
| federation_protocol |
| group |
| id_mapping |
| identity_provider |
| idp_remote_ids |
| implied_role |
| local_user |
| mapping |
| migrate_version |
| password |
| policy |
| policy_association |
| project |
| project_endpoint |
| project_endpoint_group |
| region |
| request_token |
| revocation_event |
| role |
| sensitive_config |
| service |
| service_provider |
| token |
| trust |
| trust_role |
| user |
| user_group_membership |
| whitelisted_config |
+------------------------+
[root@controller ~]#
初始化Fernet keys
[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
配置keystone
[root@controller ~]# cp /etc/httpd/conf/httpd.conf{,.bak}
[root@controller ~]# vim /etc/httpd/conf/httpd.conf
[root@controller ~]# grep ^ServerName /etc/httpd/conf/httpd.conf //修改servername一行为hostname即可
ServerName controller
[root@controller ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
[root@controller ~]# cat /etc/httpd/conf.d/wsgi-keystone.conf
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
[root@controller ~]# httpd -t
Syntax OK
[root@controller ~]# systemctl enable httpd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.
[root@controller ~]# systemctl start httpd.service
[root@controller ~]# ss -tnl //新增5000和35357端口的监听
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 128 :::22022 :::*
LISTEN 0 128 :::5000 :::*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 ::1:11211 :::*
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::35357 :::*
[root@controller ~]#
创建keystone服务及API
[root@controller ~]# export OS_TOKEN=ADMIN_TOKEN
[root@controller ~]# export OS_URL=http://controller:35357/v3
[root@controller ~]# export OS_IDENTITY_API_VERSION=3
[root@controller ~]# openstack service create \ //创建认证服务的 API 端点
> --name keystone --description "OpenStack Identity" identity //升级内核后操作此步会报HTTP400错误
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Identity |
| enabled | True |
| id | cc3d2bc9ba464f99afa0c931e8c130a0 |
| name | keystone |
| type | identity |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \ //创建公共接口的认证服务
> identity public http://controller:5000/v3
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 8a6d73a17c23411590c40fab1513bcc8 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | cc3d2bc9ba464f99afa0c931e8c130a0 |
| service_name | keystone |
| service_type | identity |
| url | http://controller:5000/v3 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \ //创建内部接口的认证服务
> identity internal http://controller:5000/v3
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 844a6d59ef464971a5b1d227caa11de1 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | cc3d2bc9ba464f99afa0c931e8c130a0 |
| service_name | keystone |
| service_type | identity |
| url | http://controller:5000/v3 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \ //创建管理接口的认证服务
> identity admin http://controller:35357/v3
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | cae3be719b9f4e9ca0413064ac5ed693 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | cc3d2bc9ba464f99afa0c931e8c130a0 |
| service_name | keystone |
| service_type | identity |
| url | http://controller:35357/v3 |
+--------------+----------------------------------+
[root@controller ~]#
[root@controller ~]# openstack domain create --description "Default Domain" default //创建域 default
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Default Domain |
| enabled | True |
| id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| name | default |
+-------------+----------------------------------+
[root@controller ~]# openstack project create --domain default \ //创建 admin 项目
> --description "Admin Project" admin
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Admin Project |
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | 9b07e2a368214247bb3051e806f94f9b |
| is_domain | False |
| name | admin |
| parent_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
+-------------+----------------------------------+
[root@controller ~]# openstack user create --domain default \ //创建 admin 用户
> --password-prompt admin
User Password:
Repeat User Password:
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | 5ba61aad8d0d4da081d78c849b392549 |
| name | admin |
+-----------+----------------------------------+
[root@controller ~]# openstack role create admin //创建 admin 角色
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | None |
| id | 3eb429c5cf384d50b2686f2ea4d8b28a |
| name | admin |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project admin --user admin admin //添加 admin 角色到 admin 项目和用户上
[root@controller ~]#
[root@controller ~]# openstack project create --domain default \ //创建 service 项目
> --description "Service Project" service
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Service Project |
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | f69c48a81f38460ab603ade2a3e44826 |
| is_domain | False |
| name | service |
| parent_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
+-------------+----------------------------------+
[root@controller ~]# openstack project create --domain default \ //创建 demo 项目
> --description "Demo Project" demo
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Demo Project |
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | 0200f6457da84abd9055a5c192386747 |
| is_domain | False |
| name | demo |
| parent_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
+-------------+----------------------------------+
[root@controller ~]# openstack user create --domain default \ //创建 demo 用户
> --password-prompt demo
User Password:
Repeat User Password:
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | deb3adea97e34fee9161a47940762a53 |
| name | demo |
+-----------+----------------------------------+
[root@controller ~]# openstack role create user //创建 user 角色
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | None |
| id | f190788929f9490fbfa25437a4958868 |
| name | user |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project demo --user demo user //添加 user 角色到 demo 项目和用户上
[root@controller ~]#
[root@controller ~]# cp /etc/keystone/keystone-paste.ini{,.bak}
[root@controller ~]# vim /etc/keystone/keystone-paste.ini
[root@controller ~]# grep "pipeline =" /etc/keystone/keystone-paste.ini
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension public_service
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension s3_extension admin_service
pipeline = cors sizelimit url_normalize request_id build_auth_context token_auth json_body ec2_extension_v3 s3_extension service_v3
pipeline = cors sizelimit url_normalize public_version_service
pipeline = cors sizelimit url_normalize admin_version_service
[root@controller ~]# unset OS_TOKEN OS_URL //释放相应环境变量
[root@controller ~]# openstack --os-auth-url http://controller:35357/v3 \ //使用 admin 用户请求认证令牌
> --os-project-domain-name default --os-user-domain-name default \
> --os-project-name admin --os-username admin token issue
Password:
+------------+-------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-------------------------------------------------------------------------------------------------------------------+
| expires | 2017-07-15T15:38:12.000000Z |
| id | gAAAAABZaijUXthvg1i_ttx6r_h3lUAAEWtQ4DWantpcOtBHkmyIJETUkURiHO2GBaVQirpU-vGdFfdrZz_iRbnP- |
| | 8mTcZUgsZco5PhwG7ZnuBAaDx37JdlY9IqHXdiSo8x9dTa4R9lVTl_KwTtTrp1EpBIRv6sOqEhN4L0eH4HqGIyeiQj9X34 |
| project_id | 9b07e2a368214247bb3051e806f94f9b |
| user_id | 5ba61aad8d0d4da081d78c849b392549 |
+------------+-------------------------------------------------------------------------------------------------------------------+
[root@controller ~]# openstack --os-auth-url http://controller:5000/v3 \ ///使用 demo 用户请求认证令牌
> --os-project-domain-name default --os-user-domain-name default \ //使用 demo 用户的密码和API端口5000,这样只会允许对身份认证服务API的常规(非管理)访问
> --os-project-name demo --os-username demo token issue
Password:
+------------+-------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-------------------------------------------------------------------------------------------------------------------------+
| expires | 2017-07-15T15:45:40.000000Z |
| id | gAAAAABZaiqUFOQRqC9UYyq6LxagUXzm7jtuNz4co8DlDHue-RXuwoF042NuMnYDe20Bk5WvXBDzUYkTnfMTlsTpORxpx0hSfp3u6F6fsJdv- |
| | l7pAS1s0BmrK9lN_hmQF52Fnc9Ql27HSw3_TSMHSW0IltLk_s5ES3Cn9yyH4eVmE059xXcfkmE |
| project_id | 0200f6457da84abd9055a5c192386747 |
| user_id | deb3adea97e34fee9161a47940762a53 |
+------------+-------------------------------------------------------------------------------------------------------------------------+
[root@controller ~]#
创建 admin 和 ``demo``项目和用户创建客户端环境变量脚本
[root@controller ~]# vim admin-openrc
[root@controller ~]# cat admin-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@controller ~]# vim demo-openrc
[root@controller ~]# cat demo-openrc
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=DEMO_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@controller ~]#
验证keytone服务
通过加载身份认证服务的环境变量的方式来请求认证令牌
[root@controller ~]# source admin-openrc
[root@controller ~]# openstack token issue
+------------+-------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-------------------------------------------------------------------------------------------------------------------------+
| expires | 2017-07-15T15:51:36.000000Z |
| id | gAAAAABZaiv4Gracm7030LSRHtVaDkAaJccGLgdxL0sDOzj4ChC9STe8YvKeuCEafbZnG4o3QZKQIx4jg- |
| | C88nRFs4KhkhmNhT6Xxqak4DAln90Mmb4Dz8LHMdgKCukwGIojAsGvChQjT6tyfQdXM4RjasCLQoVO6x3dcPFAAcTdJx6BDzcTcAg |
| project_id | 9b07e2a368214247bb3051e806f94f9b |
| user_id | 5ba61aad8d0d4da081d78c849b392549 |
+------------+-------------------------------------------------------------------------------------------------------------------------+
[root@controller ~]# source demo-openrc
[root@controller ~]# openstack token issue
+------------+-------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-------------------------------------------------------------------------------------------------------------------------+
| expires | 2017-07-15T15:53:44.000000Z |
| id | gAAAAABZaix4nu_KL0oYBTtCgdpXKwVFS1BoQAA8DM6xONR9Ur3wrU1GqcJeOrPlmbVCH5ES0kjTTYnnnv99cfs6K30tZpS5batAmxLu7tYHVKb1cmf1zAU |
| | 7BInZr9B5QwWrHYw1W8nMFnw7H3_sbDvcXJs4MJ1V-dXRBoYDhPxM1wKY4Fs8M3E |
| project_id | 0200f6457da84abd9055a5c192386747 |
| user_id | deb3adea97e34fee9161a47940762a53 |
+------------+-------------------------------------------------------------------------------------------------------------------------+
[root@controller ~]#
glance服务的安装配置
数据库准备
[root@controller ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 13
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE glance;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' \
-> IDENTIFIED BY 'GLANCE_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
-> IDENTIFIED BY 'GLANCE_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> quit
Bye
创建glance服务及API
[root@controller ~]# . admin-openrc
[root@controller ~]# openstack user create --domain default --password-prompt glance
User Password:
Repeat User Password:
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | e80ec7311d4341568d1f118b59720565 |
| name | glance |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project service --user glance admin
[root@controller ~]# openstack service create --name glance \
> --description "OpenStack Image" p_w_picpath
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Image |
| enabled | True |
| id | 2e7d8b2f950b4f8e80fb42876f9bad91 |
| name | glance |
| type | p_w_picpath |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> p_w_picpath public http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 1594c895113547c0850a57d92fd67bc2 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 2e7d8b2f950b4f8e80fb42876f9bad91 |
| service_name | glance |
| service_type | p_w_picpath |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> p_w_picpath internal http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 6271f4201c10451d97209e54042cb995 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 2e7d8b2f950b4f8e80fb42876f9bad91 |
| service_name | glance |
| service_type | p_w_picpath |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> p_w_picpath admin http://controller:9292
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | abd592a97be54cacbe946f394be98699 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 2e7d8b2f950b4f8e80fb42876f9bad91 |
| service_name | glance |
| service_type | p_w_picpath |
| url | http://controller:9292 |
+--------------+----------------------------------+
[root@controller ~]#
安装并修改配置文件
[root@controller ~]# yum install -y openstack-glance
[root@controller ~]# cp /etc/glance/glance-api.conf{,.bak}
[root@controller ~]# vim /etc/glance/glance-api.conf
[root@controller ~]# grep -v ^# /etc/glance/glance-api.conf | tr -s [[:space:]]
[DEFAULT]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/p_w_picpaths/
[p_w_picpath_format]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = GLANCE_PASS
[matchmaker_redis]
[oslo_concurrency]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[store_type_location_strategy]
[task]
[taskflow_executor]
[root@controller ~]#
[root@controller ~]# cp /etc/glance/glance-registry.conf{,.bak}
[root@controller ~]# vim /etc/glance/glance-registry.conf
[root@controller ~]# grep -v ^# /etc/glance/glance-registry.conf | tr -s [[:space:]]
[DEFAULT]
[database]
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[glance_store]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = GLANCE_PASS
[matchmaker_redis]
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_policy]
[paste_deploy]
flavor = keystone
[profiler]
[root@controller ~]#
初始化glance服务数据库
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
Option "verbose" from group "DEFAULT" is deprecated for removal. Its value may be silently ignored in the future.
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1056: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
expire_on_commit=expire_on_commit, _conf=conf)
/usr/lib/python2.7/site-packages/pymysql/cursors.py:166: Warning: (1831, u'Duplicate index `ix_p_w_picpath_properties_p_w_picpath_id_name`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
[root@controller ~]# mysql -e "show tables from glance"
+----------------------------------+
| Tables_in_glance |
+----------------------------------+
| artifact_blob_locations |
| artifact_blobs |
| artifact_dependencies |
| artifact_properties |
| artifact_tags |
| artifacts |
| p_w_picpath_locations |
| p_w_picpath_members |
| p_w_picpath_properties |
| p_w_picpath_tags |
| p_w_picpaths |
| metadef_namespace_resource_types |
| metadef_namespaces |
| metadef_objects |
| metadef_properties |
| metadef_resource_types |
| metadef_tags |
| migrate_version |
| task_info |
| tasks |
+----------------------------------+
[root@controller ~]# systemctl enable openstack-glance-api.service \
> openstack-glance-registry.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service to /usr/lib/systemd/system/openstack-glance-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service to /usr/lib/systemd/system/openstack-glance-registry.service.
[root@controller ~]# systemctl start openstack-glance-api.service \
> openstack-glance-registry.service //启动后新增9292和9191两个端口
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:9292 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:9191 *:*
LISTEN 0 128 :::5000 :::*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 ::1:11211 :::*
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::35357 :::*
LISTEN 0 128 :::22022 :::*
[root@controller ~]#
验证glance服务
[root@controller ~]# . admin-openrc
[root@controller ~]# wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
--2017-07-15 23:43:29-- http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
Resolving download.cirros-cloud.net (download.cirros-cloud.net)... 64.90.42.85, 2607:f298:6:a036::bd6:a72a
Connecting to download.cirros-cloud.net (download.cirros-cloud.net)|64.90.42.85|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 13287936 (13M) [text/plain]
Saving to: ‘cirros-0.3.4-x86_64-disk.img’
100%[==============================================================================================>] 13,287,936 579KB/s in 26s
2017-07-15 23:43:55 (507 KB/s) - ‘cirros-0.3.4-x86_64-disk.img’ saved [13287936/13287936]
[root@controller ~]# openstack p_w_picpath list
[root@controller ~]# openstack p_w_picpath create "cirros" \
> --file cirros-0.3.4-x86_64-disk.img \
> --disk-format qcow2 --container-format bare \
> --public
+------------------+------------------------------------------------------+
| Field | Value |
+------------------+------------------------------------------------------+
| checksum | ee1eca47dc88f4879d8a229cc70a07c6 |
| container_format | bare |
| created_at | 2017-07-16T06:55:44Z |
| disk_format | qcow2 |
| file | /v2/p_w_picpaths/9b0a7de0-6ff5-488b-9067-813e8a88de98/file |
| id | 9b0a7de0-6ff5-488b-9067-813e8a88de98 |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros |
| owner | 9b07e2a368214247bb3051e806f94f9b |
| protected | False |
| schema | /v2/schemas/p_w_picpath |
| size | 13287936 |
| status | active |
| tags | |
| updated_at | 2017-07-16T06:55:44Z |
| virtual_size | None |
| visibility | public |
+------------------+------------------------------------------------------+
[root@controller ~]# openstack p_w_picpath list
+--------------------------------------+--------+--------+
| ID | Name | Status |
+--------------------------------------+--------+--------+
| 9b0a7de0-6ff5-488b-9067-813e8a88de98 | cirros | active |
+--------------------------------------+--------+--------+
[root@controller ~]#
安装配置compute服务
nova数据库创建及配置
[root@controller ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 23
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE nova_api;
Query OK, 1 row affected (0.01 sec)
MariaDB [(none)]> CREATE DATABASE nova;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' \
-> IDENTIFIED BY 'NOVA_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
-> IDENTIFIED BY 'NOVA_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' \
-> IDENTIFIED BY 'NOVA_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
-> IDENTIFIED BY 'NOVA_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> quit
Bye
[root@controller ~]#
创建nova服务及API
[root@controller ~]# . admin-openrc
[root@controller ~]# openstack user create --domain default \
> --password-prompt nova
User Password:
Repeat User Password:
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | 482ba5f11f9d4c9eaebc1318fe7a2084 |
| name | nova |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project service --user nova admin
[root@controller ~]# openstack service create --name nova \
> --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Compute |
| enabled | True |
| id | e0b01cdd88d84a5eb28bd28a63d3f268 |
| name | nova |
| type | compute |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> compute public http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------+
| enabled | True |
| id | 240f7e0efeb64386943b7852ba5628d0 |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | e0b01cdd88d84a5eb28bd28a63d3f268 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> compute internal http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------+
| enabled | True |
| id | d9acbd2cbd454b9c9c936cff43306550 |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | e0b01cdd88d84a5eb28bd28a63d3f268 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> compute admin http://controller:8774/v2.1/%\(tenant_id\)s
+--------------+-------------------------------------------+
| Field | Value |
+--------------+-------------------------------------------+
| enabled | True |
| id | f1d1d402625a4714952d72bc8b141c2e |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | e0b01cdd88d84a5eb28bd28a63d3f268 |
| service_name | nova |
| service_type | compute |
| url | http://controller:8774/v2.1/%(tenant_id)s |
+--------------+-------------------------------------------+
[root@controller ~]#
安装及修改配置文件
[root@controller ~]# yum install openstack-nova-api openstack-nova-conductor \
> openstack-nova-console openstack-nova-novncproxy \
> openstack-nova-scheduler
[root@controller ~]# cp /etc/nova/nova.conf{,.bak}
[root@controller ~]# vim /etc/nova/nova.conf
[root@controller ~]# grep -v ^# /etc/nova/nova.conf | tr -s [[:space:]]
[DEFAULT]
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.10.10
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[barbican]
[cache]
[cells]
[cinder]
[conductor]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[ephemeral_storage_encryption]
[glance]
api_servers = http://controller:9292
[guestfs]
[hyperv]
[p_w_picpath_file_url]
[ironic]
[keymgr]
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS
[libvirt]
[matchmaker_redis]
[metrics]
[neutron]
[osapi_v21]
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[oslo_middleware]
[oslo_policy]
[rdp]
[serial_console]
[spice]
[ssl]
[trusted_computing]
[upgrade_levels]
[vmware]
[vnc]
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[workarounds]
[xenserver]
[root@controller ~]#
初始化nova服务数据库
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@controller ~]# mysql -e "show tables from nova_api"
+--------------------+
| Tables_in_nova_api |
+--------------------+
| build_requests |
| cell_mappings |
| flavor_extra_specs |
| flavor_projects |
| flavors |
| host_mappings |
| instance_mappings |
| migrate_version |
| request_specs |
+--------------------+
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova
/usr/lib/python2.7/site-packages/pymysql/cursors.py:166: Warning: (1831, u'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
/usr/lib/python2.7/site-packages/pymysql/cursors.py:166: Warning: (1831, u'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
[root@controller ~]# mysql -e "show tables from nova"
+--------------------------------------------+
| Tables_in_nova |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
| block_device_mapping |
| bw_usage_cache |
| cells |
| certificates |
| compute_nodes |
| console_pools |
| consoles |
| dns_domains |
| fixed_ips |
| floating_ips |
| instance_actions |
| instance_actions_events |
| instance_extra |
| instance_faults |
| instance_group_member |
| instance_group_policy |
| instance_groups |
| instance_id_mappings |
| instance_info_caches |
| instance_metadata |
| instance_system_metadata |
| instance_type_extra_specs |
| instance_type_projects |
| instance_types |
| instances |
| inventories |
| key_pairs |
| migrate_version |
| migrations |
| networks |
| pci_devices |
| project_user_quotas |
| provider_fw_rules |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| resource_provider_aggregates |
| resource_providers |
| s3_p_w_picpaths |
| security_group_default_rules |
| security_group_instance_association |
| security_group_rules |
| security_groups |
| services |
| shadow_agent_builds |
| shadow_aggregate_hosts |
| shadow_aggregate_metadata |
| shadow_aggregates |
| shadow_block_device_mapping |
| shadow_bw_usage_cache |
| shadow_cells |
| shadow_certificates |
| shadow_compute_nodes |
| shadow_console_pools |
| shadow_consoles |
| shadow_dns_domains |
| shadow_fixed_ips |
| shadow_floating_ips |
| shadow_instance_actions |
| shadow_instance_actions_events |
| shadow_instance_extra |
| shadow_instance_faults |
| shadow_instance_group_member |
| shadow_instance_group_policy |
| shadow_instance_groups |
| shadow_instance_id_mappings |
| shadow_instance_info_caches |
| shadow_instance_metadata |
| shadow_instance_system_metadata |
| shadow_instance_type_extra_specs |
| shadow_instance_type_projects |
| shadow_instance_types |
| shadow_instances |
| shadow_key_pairs |
| shadow_migrate_version |
| shadow_migrations |
| shadow_networks |
| shadow_pci_devices |
| shadow_project_user_quotas |
| shadow_provider_fw_rules |
| shadow_quota_classes |
| shadow_quota_usages |
| shadow_quotas |
| shadow_reservations |
| shadow_s3_p_w_picpaths |
| shadow_security_group_default_rules |
| shadow_security_group_instance_association |
| shadow_security_group_rules |
| shadow_security_groups |
| shadow_services |
| shadow_snapshot_id_mappings |
| shadow_snapshots |
| shadow_task_log |
| shadow_virtual_interfaces |
| shadow_volume_id_mappings |
| shadow_volume_usage_cache |
| snapshot_id_mappings |
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
[root@controller ~]#
启用并启动nova服务
[root@controller ~]# systemctl enable openstack-nova-api.service \
> openstack-nova-consoleauth.service openstack-nova-scheduler.service \
> openstack-nova-conductor.service openstack-nova-novncproxy.service
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-api.service to /usr/lib/systemd/system/openstack-nova-api.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-consoleauth.service to /usr/lib/systemd/system/openstack-nova-consoleauth.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-scheduler.service to /usr/lib/systemd/system/openstack-nova-scheduler.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-conductor.service to /usr/lib/systemd/system/openstack-nova-conductor.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service to /usr/lib/systemd/system/openstack-nova-novncproxy.service.
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:9292 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:9191 *:*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 :::5000 :::*
LISTEN 0 128 ::1:11211 :::*
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::35357 :::*
LISTEN 0 128 :::22022 :::*
[root@controller ~]# systemctl start openstack-nova-api.service \ //nova服务启动后新增端口8774 8775
> openstack-nova-consoleauth.service openstack-nova-scheduler.service \
> openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:9292 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 100 *:6080 *:*
LISTEN 0 128 *:8774 *:*
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:8775 *:*
LISTEN 0 128 *:9191 *:*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 :::5000 :::*
LISTEN 0 128 ::1:11211 :::*
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::35357 :::*
LISTEN 0 128 :::22022 :::*
[root@controller ~]#
待计算节点成功启动后进行以下操作进行检验
[root@controller ~]# . admin-openrc
[root@controller ~]# openstack compute service list //该输出应该显示三个服务组件在控制节点上启用,一个服务组件在计算节点上启用
+----+------------------+------------+----------+---------+-------+----------------------------+
| Id | Binary | Host | Zone | Status | State | Updated At |
+----+------------------+------------+----------+---------+-------+----------------------------+
| 1 | nova-conductor | controller | internal | enabled | up | 2017-07-16T11:28:19.000000 |
| 2 | nova-scheduler | controller | internal | enabled | up | 2017-07-16T11:28:19.000000 |
| 3 | nova-consoleauth | controller | internal | enabled | up | 2017-07-16T11:28:19.000000 |
| 12 | nova-compute | compute1 | nova | enabled | up | 2017-07-16T11:28:27.000000 |
+----+------------------+------------+----------+---------+-------+----------------------------+
[root@controller ~]#
Networking 服务安装配置(创建私有网络模型(包括公有网络))
网络服务数据库准备
[root@controller ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 32
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE neutron;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' \
-> IDENTIFIED BY 'NEUTRON_DBPASS';
Query OK, 0 rows affected (0.01 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
-> IDENTIFIED BY 'NEUTRON_DBPASS';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> quit
Bye
[root@controller ~]#
创建neutron服务及API
[root@controller ~]# . admin-openrc
[root@controller ~]# openstack user create --domain default --password-prompt neutron
User Password:
Repeat User Password:
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | 3ad6ac5f704c494e9f16b9e04ef745fe |
| enabled | True |
| id | b457d498a26849a987abaf206445cee2 |
| name | neutron |
+-----------+----------------------------------+
[root@controller ~]# openstack role add --project service --user neutron admin
[root@controller ~]# openstack service create --name neutron \
> --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Networking |
| enabled | True |
| id | 8baf1525da9043e6986382c930ff568c |
| name | neutron |
| type | network |
+-------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> network public http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 6957aef00e7d40699cd90c76325fa94e |
| interface | public |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 8baf1525da9043e6986382c930ff568c |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> network internal http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 8975831926f64c3cbf3c667b383d133a |
| interface | internal |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 8baf1525da9043e6986382c930ff568c |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
[root@controller ~]# openstack endpoint create --region RegionOne \
> network admin http://controller:9696
+--------------+----------------------------------+
| Field | Value |
+--------------+----------------------------------+
| enabled | True |
| id | 60068eea4be246a7bd8880540faed006 |
| interface | admin |
| region | RegionOne |
| region_id | RegionOne |
| service_id | 8baf1525da9043e6986382c930ff568c |
| service_name | neutron |
| service_type | network |
| url | http://controller:9696 |
+--------------+----------------------------------+
[root@controller ~]#
安装及修改配置文件
[root@controller ~]# yum install -y openstack-neutron openstack-neutron-ml2 \
> openstack-neutron-linuxbridge ebtables
[root@controller ~]# cp /etc/neutron/neutron.conf{,.bak}
[root@controller ~]# vim /etc/neutron/neutron.conf
[root@controller ~]# grep -v ^# /etc/neutron/neutron.conf | tr -s [[:space:]]
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = True
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
[agent]
[cors]
[cors.subdomain]
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[matchmaker_redis]
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[oslo_messaging_amqp]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[oslo_policy]
[qos]
[quotas]
[ssl]
[root@controller ~]#
二层插件路由网络设备配置 //生产环境中要想使用比较复杂的网络环境时需要使用openswitch组件
[root@controller ~]# cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}
[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
[root@controller ~]# grep -v ^# /etc/neutron/plugins/ml2/ml2_conf.ini | tr -s [[:space:]]
[DEFAULT]
[ml2]
type_drivers = flat,vlan,vxlan
tenant_network_types = vxlan
mechanism_drivers = linuxbridge,l2population
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[ml2_type_geneve]
[ml2_type_gre]
[ml2_type_vlan]
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = True
[root@controller ~]#
linuxbridge代理端配置
[root@controller ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}
[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[root@controller ~]# grep -v ^# /etc/neutron/plugins/ml2/linuxbridge_agent.ini | tr -s [[:space:]]
[DEFAULT]
[agent]
[linux_bridge]
physical_interface_mappings = provider:eth1
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[vxlan]
enable_vxlan = True
local_ip = 192.168.10.10
l2_population = True
[root@controller ~]#
3层nat路由代理配置
[root@controller ~]# cp /etc/neutron/l3_agent.ini{,.bak}
[root@controller ~]# vim /etc/neutron/l3_agent.ini
[root@controller ~]# grep -v ^# /etc/neutron/l3_agent.ini | tr -s [[:space:]]
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
external_network_bridge =
[AGENT]
[root@controller ~]#
dhcp代理配置
[root@controller ~]# cp /etc/neutron/dhcp_agent.ini{,.bak}
[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
[root@controller ~]# grep -v ^# /etc/neutron/dhcp_agent.ini | tr -s [[:space:]]
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
[AGENT]
[root@controller ~]#
元数据代理配置
[root@controller ~]# cp /etc/neutron/metadata_agent.ini{,.bak}
[root@controller ~]# vim /etc/neutron/metadata_agent.ini
[root@controller ~]# grep -v ^# /etc/neutron/metadata_agent.ini | tr -s [[:space:]]
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA_SECRET
[AGENT]
[root@controller ~]#
网络配置完成后再次编辑计算服务配置文件
[root@controller ~]# vim /etc/nova/nova.conf
追加以下内容
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = True
metadata_proxy_shared_secret = METADATA_SECRET
创建链接文件
[root@controller ~]# ll /etc/neutron/plugin.ini
ls: cannot access /etc/neutron/plugin.ini: No such file or directory
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@controller ~]# ll /etc/neutron/plugin.ini
lrwxrwxrwx 1 root root 37 Jul 16 22:07 /etc/neutron/plugin.ini -> /etc/neutron/plugins/ml2/ml2_conf.ini
[root@controller ~]#
初始化neutron数据库
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
> --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
No handlers could be found for logger "oslo_config.cfg"
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Running upgrade for neutron ...
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> kilo, kilo_initial
INFO [alembic.runtime.migration] Running upgrade kilo -> 354db87e3225, nsxv_vdr_metadata.py
INFO [alembic.runtime.migration] Running upgrade 354db87e3225 -> 599c6a226151, neutrodb_ipam
INFO [alembic.runtime.migration] Running upgrade 599c6a226151 -> 52c5312f6baf, Initial operations in support of address scopes
INFO [alembic.runtime.migration] Running upgrade 52c5312f6baf -> 313373c0ffee, Flavor framework
INFO [alembic.runtime.migration] Running upgrade 313373c0ffee -> 8675309a5c4f, network_rbac
INFO [alembic.runtime.migration] Running upgrade 8675309a5c4f -> 45f955889773, quota_usage
INFO [alembic.runtime.migration] Running upgrade 45f955889773 -> 26c371498592, subnetpool hash
INFO [alembic.runtime.migration] Running upgrade 26c371498592 -> 1c844d1677f7, add order to dnsnameservers
INFO [alembic.runtime.migration] Running upgrade 1c844d1677f7 -> 1b4c6e320f79, address scope support in subnetpool
INFO [alembic.runtime.migration] Running upgrade 1b4c6e320f79 -> 48153cb5f051, qos db changes
INFO [alembic.runtime.migration] Running upgrade 48153cb5f051 -> 9859ac9c136, quota_reservations
INFO [alembic.runtime.migration] Running upgrade 9859ac9c136 -> 34af2b5c5a59, Add dns_name to Port
INFO [alembic.runtime.migration] Running upgrade 34af2b5c5a59 -> 59cb5b6cf4d, Add availability zone
INFO [alembic.runtime.migration] Running upgrade 59cb5b6cf4d -> 13cfb89f881a, add is_default to subnetpool
INFO [alembic.runtime.migration] Running upgrade 13cfb89f881a -> 32e5974ada25, Add standard attribute table
INFO [alembic.runtime.migration] Running upgrade 32e5974ada25 -> ec7fcfbf72ee, Add network availability zone
INFO [alembic.runtime.migration] Running upgrade ec7fcfbf72ee -> dce3ec7a25c9, Add router availability zone
INFO [alembic.runtime.migration] Running upgrade dce3ec7a25c9 -> c3a73f615e4, Add ip_version to AddressScope
INFO [alembic.runtime.migration] Running upgrade c3a73f615e4 -> 659bf3d90664, Add tables and attributes to support external DNS integration
INFO [alembic.runtime.migration] Running upgrade 659bf3d90664 -> 1df244e556f5, add_unique_ha_router_agent_port_bindings
INFO [alembic.runtime.migration] Running upgrade 1df244e556f5 -> 19f26505c74f, Auto Allocated Topology - aka Get-Me-A-Network
INFO [alembic.runtime.migration] Running upgrade 19f26505c74f -> 15be73214821, add dynamic routing model data
INFO [alembic.runtime.migration] Running upgrade 15be73214821 -> b4caf27aae4, add_bgp_dragent_model_data
INFO [alembic.runtime.migration] Running upgrade b4caf27aae4 -> 15e43b934f81, rbac_qos_policy
INFO [alembic.runtime.migration] Running upgrade 15e43b934f81 -> 31ed664953e6, Add resource_versions row to agent table
INFO [alembic.runtime.migration] Running upgrade 31ed664953e6 -> 2f9e956e7532, tag support
INFO [alembic.runtime.migration] Running upgrade 2f9e956e7532 -> 3894bccad37f, add_timestamp_to_base_resources
INFO [alembic.runtime.migration] Running upgrade 3894bccad37f -> 0e66c5227a8a, Add desc to standard attr table
INFO [alembic.runtime.migration] Running upgrade kilo -> 30018084ec99, Initial no-op Liberty contract rule.
INFO [alembic.runtime.migration] Running upgrade 30018084ec99 -> 4ffceebfada, network_rbac
INFO [alembic.runtime.migration] Running upgrade 4ffceebfada -> 5498d17be016, Drop legacy OVS and LB plugin tables
INFO [alembic.runtime.migration] Running upgrade 5498d17be016 -> 2a16083502f3, Metaplugin removal
INFO [alembic.runtime.migration] Running upgrade 2a16083502f3 -> 2e5352a0ad4d, Add missing foreign keys
INFO [alembic.runtime.migration] Running upgrade 2e5352a0ad4d -> 11926bcfe72d, add geneve ml2 type driver
INFO [alembic.runtime.migration] Running upgrade 11926bcfe72d -> 4af11ca47297, Drop cisco monolithic tables
INFO [alembic.runtime.migration] Running upgrade 4af11ca47297 -> 1b294093239c, Drop embrane plugin table
INFO [alembic.runtime.migration] Running upgrade 1b294093239c -> 8a6d8bdae39, standardattributes migration
INFO [alembic.runtime.migration] Running upgrade 8a6d8bdae39 -> 2b4c2465d44b, DVR sheduling refactoring
INFO [alembic.runtime.migration] Running upgrade 2b4c2465d44b -> e3278ee65050, Drop NEC plugin tables
INFO [alembic.runtime.migration] Running upgrade e3278ee65050 -> c6c112992c9, rbac_qos_policy
INFO [alembic.runtime.migration] Running upgrade c6c112992c9 -> 5ffceebfada, network_rbac_external
INFO [alembic.runtime.migration] Running upgrade 5ffceebfada -> 4ffceebfcdc, standard_desc
OK
[root@controller ~]# mysql -e "show tables from neutron"
+-----------------------------------------+
| Tables_in_neutron |
+-----------------------------------------+
| address_scopes |
| agents |
| alembic_version |
| allowedaddresspairs |
| arista_provisioned_nets |
| arista_provisioned_tenants |
| arista_provisioned_vms |
| auto_allocated_topologies |
| bgp_peers |
| bgp_speaker_dragent_bindings |
| bgp_speaker_network_bindings |
| bgp_speaker_peer_bindings |
| bgp_speakers |
| brocadenetworks |
| brocadeports |
| cisco_csr_identifier_map |
| cisco_hosting_devices |
| cisco_ml2_apic_contracts |
| cisco_ml2_apic_host_links |
| cisco_ml2_apic_names |
| cisco_ml2_n1kv_network_bindings |
| cisco_ml2_n1kv_network_profiles |
| cisco_ml2_n1kv_policy_profiles |
| cisco_ml2_n1kv_port_bindings |
| cisco_ml2_n1kv_profile_bindings |
| cisco_ml2_n1kv_vlan_allocations |
| cisco_ml2_n1kv_vxlan_allocations |
| cisco_ml2_nexus_nve |
| cisco_ml2_nexusport_bindings |
| cisco_port_mappings |
| cisco_router_mappings |
| consistencyhashes |
| default_security_group |
| dnsnameservers |
| dvr_host_macs |
| externalnetworks |
| extradhcpopts |
| firewall_policies |
| firewall_rules |
| firewalls |
| flavors |
| flavorserviceprofilebindings |
| floatingipdnses |
| floatingips |
| ha_router_agent_port_bindings |
| ha_router_networks |
| ha_router_vrid_allocations |
| healthmonitors |
| ikepolicies |
| ipallocationpools |
| ipallocations |
| ipamallocationpools |
| ipamallocations |
| ipamavailabilityranges |
| ipamsubnets |
| ipavailabilityranges |
| ipsec_site_connections |
| ipsecpeercidrs |
| ipsecpolicies |
| lsn |
| lsn_port |
| maclearningstates |
| members |
| meteringlabelrules |
| meteringlabels |
| ml2_brocadenetworks |
| ml2_brocadeports |
| ml2_dvr_port_bindings |
| ml2_flat_allocations |
| ml2_geneve_allocations |
| ml2_geneve_endpoints |
| ml2_gre_allocations |
| ml2_gre_endpoints |
| ml2_network_segments |
| ml2_nexus_vxlan_allocations |
| ml2_nexus_vxlan_mcast_groups |
| ml2_port_binding_levels |
| ml2_port_bindings |
| ml2_ucsm_port_profiles |
| ml2_vlan_allocations |
| ml2_vxlan_allocations |
| ml2_vxlan_endpoints |
| multi_provider_networks |
| networkconnections |
| networkdhcpagentbindings |
| networkdnsdomains |
| networkgatewaydevicereferences |
| networkgatewaydevices |
| networkgateways |
| networkqueuemappings |
| networkrbacs |
| networks |
| networksecuritybindings |
| neutron_nsx_network_mappings |
| neutron_nsx_port_mappings |
| neutron_nsx_router_mappings |
| neutron_nsx_security_group_mappings |
| nexthops |
| nsxv_edge_dhcp_static_bindings |
| nsxv_edge_vnic_bindings |
| nsxv_firewall_rule_bindings |
| nsxv_internal_edges |
| nsxv_internal_networks |
| nsxv_port_index_mappings |
| nsxv_port_vnic_mappings |
| nsxv_router_bindings |
| nsxv_router_ext_attributes |
| nsxv_rule_mappings |
| nsxv_security_group_section_mappings |
| nsxv_spoofguard_policy_network_mappings |
| nsxv_tz_network_bindings |
| nsxv_vdr_dhcp_bindings |
| nuage_net_partition_router_mapping |
| nuage_net_partitions |
| nuage_provider_net_bindings |
| nuage_subnet_l2dom_mapping |
| poolloadbalanceragentbindings |
| poolmonitorassociations |
| pools |
| poolstatisticss |
| portbindingports |
| portdnses |
| portqueuemappings |
| ports |
| portsecuritybindings |
| providerresourceassociations |
| qos_bandwidth_limit_rules |
| qos_network_policy_bindings |
| qos_policies |
| qos_port_policy_bindings |
| qospolicyrbacs |
| qosqueues |
| quotas |
| quotausages |
| reservations |
| resourcedeltas |
| router_extra_attributes |
| routerl3agentbindings |
| routerports |
| routerroutes |
| routerrules |
| routers |
| securitygroupportbindings |
| securitygrouprules |
| securitygroups |
| serviceprofiles |
| sessionpersistences |
| standardattributes |
| subnetpoolprefixes |
| subnetpools |
| subnetroutes |
| subnets |
| tags |
| tz_network_bindings |
| vcns_router_bindings |
| vips |
| ***services |
+-----------------------------------------+
[root@controller ~]#
重启nova-api服务
[root@controller ~]# systemctl restart openstack-nova-api.service
启用并启动私有网络模型的所有服务
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:9292 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 100 *:6080 *:*
LISTEN 0 128 *:8774 *:*
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:8775 *:*
LISTEN 0 128 *:9191 *:*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 :::5000 :::*
LISTEN 0 128 ::1:11211 :::*
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::35357 :::*
LISTEN 0 128 :::22022 :::*
[root@controller ~]# systemctl enable neutron-server.service \
> neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
> neutron-metadata-agent.service neutron-l3-agent.service
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-server.service to /usr/lib/systemd/system/neutron-server.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-dhcp-agent.service to /usr/lib/systemd/system/neutron-dhcp-agent.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-metadata-agent.service to /usr/lib/systemd/system/neutron-metadata-agent.service.
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-l3-agent.service to /usr/lib/systemd/system/neutron-l3-agent.service.
[root@controller ~]# systemctl start neutron-server.service \
> neutron-linuxbridge-agent.service neutron-dhcp-agent.service \
> neutron-metadata-agent.service neutron-l3-agent.service //服务启动成功后会多出9696端口
[root@controller ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:25672 *:*
LISTEN 0 128 192.168.10.10:3306 *:*
LISTEN 0 128 127.0.0.1:11211 *:*
LISTEN 0 128 *:9292 *:*
LISTEN 0 128 *:4369 *:*
LISTEN 0 128 *:9696 *:*
LISTEN 0 100 *:6080 *:*
LISTEN 0 128 *:8774 *:*
LISTEN 0 128 *:22022 *:*
LISTEN 0 128 *:8775 *:*
LISTEN 0 128 *:9191 *:*
LISTEN 0 128 :::5672 :::*
LISTEN 0 128 :::5000 :::*
LISTEN 0 128 ::1:11211 :::*
LISTEN 0 128 :::80 :::*
LISTEN 0 128 :::35357 :::*
LISTEN 0 128 :::22022 :::*
[root@controller ~]#
前往计算节点配置网络
配置完计算节点网络后继续以下步骤验证网络服务配置
[root@controller ~]# . admin-openrc
[root@controller ~]# neutron ext-list //列出加载的扩展来验证 neutron-server 进程是否正常启动
+---------------------------+-----------------------------------------------+
| alias | name |
+---------------------------+-----------------------------------------------+
| default-subnetpools | Default Subnetpools |
| network-ip-availability | Network IP Availability |
| network_availability_zone | Network Availability Zone |
| auto-allocated-topology | Auto Allocated Topology Services |
| ext-gw-mode | Neutron L3 Configurable external gateway mode |
| binding | Port Binding |
| agent | agent |
| subnet_allocation | Subnet Allocation |
| l3_agent_scheduler | L3 Agent Scheduler |
| tag | Tag support |
| external-net | Neutron external network |
| net-mtu | Network MTU |
| availability_zone | Availability Zone |
| quotas | Quota management support |
| l3-ha | HA Router extension |
| provider | Provider Network |
| multi-provider | Multi Provider Network |
| address-scope | Address scope |
| extraroute | Neutron Extra Route |
| timestamp_core | Time Stamp Fields addition for core resources |
| router | Neutron L3 Router |
| extra_dhcp_opt | Neutron Extra DHCP opts |
| security-group | security-group |
| dhcp_agent_scheduler | DHCP Agent Scheduler |
| router_availability_zone | Router Availability Zone |
| rbac-policies | RBAC Policies |
| standard-attr-description | standard-attr-description |
| port-security | Port Security |
| allowed-address-pairs | Allowed Address Pairs |
| dvr | Distributed Virtual Router |
+---------------------------+-----------------------------------------------+
[root@controller ~]# neutron agent-list //列出代理以验证启动 neutron 代理是否成功(如果仅是公共网络,会少L3 agent这一条)
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| id | agent_type | host | availability_zone | alive | admin_state_up | binary |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
| a86bbd06-5b67-44c7-b9a0-819fc4b7c9bc | Linux bridge agent | controller | | :-) | True | neutron-linuxbridge-agent |
| cdd100fc-ee7d-4821-beb1-4cbd0fc9bb15 | L3 agent | controller | nova | :-) | True | neutron-l3-agent |
| e0ba9b79-76db-4539-b980-1652411580a1 | Linux bridge agent | compute1 | | :-) | True | neutron-linuxbridge-agent |
| e146eeb9-07b4-4ec2-9312-c522e714629e | Metadata agent | controller | | :-) | True | neutron-metadata-agent |
| f7dde89e-d6f5-4b8c-8ce2-88a2ad218f32 | DHCP agent | controller | nova | :-) | True | neutron-dhcp-agent |
+--------------------------------------+--------------------+------------+-------------------+-------+----------------+---------------------------+
[root@controller ~]#
nova和neutron服务需要计算节点配置的还比较多,要想能够启动实例crontroller上还需要等compute节点安装配置完成后,再回来创建好实例启动需要的网络环境后方可进行。
来源:oschina
链接:https://my.oschina.net/u/4287823/blog/4341727