#openstack 群集配置 pacemaker+haproxy
##openstack 群集 pacemaker+haproxy #关闭selinux、防火墙 #关闭selinux、防火墙 systemctl stop firewalld.service systemctl disable firewalld.service firewall-cmd --state sed -i '/^SELINUX=.*/c SELINUX=disabled' /etc/selinux/config sed -i 's/^SELINUXTYPE=.*/SELINUXTYPE=disabled/g' /etc/selinux/config grep --color=auto '^SELINUX' /etc/selinux/config setenforce 0 #时间同步 #设置hostname, 每个节点分别设置 #时间同步 yum install -y ntp systemctl enable ntpd && systemctl restart ntpd timedatectl set-timezone Asia/Shanghai /usr/sbin/ntpdate ntp6.aliyun.com echo "*/3 * * * * /usr/sbin/ntpdate ntp6.aliyun.com &> /dev/null" > /tmp/crontab crontab /tmp/crontab hostnamectl --static set-hostname ops$(ip addr |grep brd |grep global |head -n1 |cut -d '/' -f1 |cut -d '.' -f4) ###########添加hosts cat >>/etc/hosts <<EOF 192.168.0.171 ops171 192.168.0.172 ops172 EOF [ `grep -c ' controller$' /etc/hosts ` -eq 0 ] && echo '192.168.0.173 v.meilele.com controller' >>/etc/hosts tail /etc/hosts ##yum源 免密码认证 echo ' [centos-openstack-liberty] name=CentOS-7 - OpenStack liberty baseurl=http://vault.centos.org/centos/7.3.1611/cloud/x86_64/openstack-liberty/ gpgcheck=0 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Centos-7 ' >/etc/yum.repos.d/CentOS-OpenStack-liberty.repo tail /etc/yum.repos.d/CentOS-OpenStack-liberty.repo ########### yum install -y qemu-kvm libvirt virt-install systemctl enable libvirtd && systemctl restart libvirtd ########免密码认证 wget -q http://indoor.meilele.com/download/centos/script/sshkey_tool.sh -O sshkey_tool.sh bash sshkey_tool.sh 192.168.0.171 root ess.com1 bash sshkey_tool.sh 192.168.0.172 root ess.com1 ########http高可用+负载均衡pacemaker+haproxy #所有控制节点 #安装Pacemake Corosync yum install corosync pacemaker pcs fence-agents resource-agents -y #启动pcsd systemctl enable pcsd.service systemctl start pcsd.service #修改群集用户hacluster密码 echo 123456 | passwd hacluster --stdin #http设置 cp /etc/httpd/conf/httpd.conf{,.bak} sed -i 's#^Listen 80#Listen 8080#' /etc/httpd/conf/httpd.conf echo "ServerName `hostname`:8080" >>/etc/httpd/conf/httpd.conf tail -1 /etc/httpd/conf/httpd.conf systemctl start httpd.service netstat -antp|grep httpd echo `hostname`>/var/www/html/index.html #测试主页 ############################################## #####################其中一节点执行 controller #创建、启动my_cluster集群 pcs cluster auth -u hacluster -p 123456 ops171 ops172 pcs cluster setup --start --name my_cluster ops171 ops172 #集群自启动 pcs cluster enable --all # 启动集群 #pcs cluster start --all pcs cluster status #集群状态 ####检验 #验证corosync corosync-cfgtool -s #查看成员 corosync-cmapctl| grep members #查看corosync状态 pcs status corosync #检查配置 crm_verify -L -V #禁用STONITH pcs property set stonith-enabled=false #无仲裁时,选择忽略 pcs property set no-quorum-policy=ignore #创建 VIP 资源 pcs resource create vip ocf:heartbeat:IPaddr2 ip=192.168.0.173 cidr_netmask=24 op monitor interval=28s # pcs resource rsc defaults resource-stickiness=100 # ### 可选参考 # pcs resource create haproxy systemd:haproxy op monitor interval=5s # pcs constraint colocation add vip haproxy INFINITY #HAProxy和VIP必须在同一节点 # pcs constraint order vip then haproxy #先启动VIP,再启动HAProxy #添加到群集 #pcs resource create WEB apache configfile="/etc/httpd/conf/httpd.conf" statusurl="http://127.0.0.1/server-status" # #创建group作为一个整体 # pcs resource group add MyGroup vip # pcs resource group add MyGroup WEB ############haproxy配置 yum install haproxy -y ############haproxy配置, #允许没VIP时启动 echo "net.ipv4.ip_nonlocal_bind = 1" >>/etc/sysctl.conf sysctl -p #haproxy日志 echo '$ModLoad imudp $UDPServerRun 514 $template Haproxy,"%rawmsg% \n" local0.=info -/var/log/haproxy.log;Haproxy local0.notice -/var/log/haproxy-status.log;Haproxy '>/etc/rsyslog.d/haproxy.conf systemctl status rsyslog.service systemctl restart rsyslog.service cp /etc/haproxy/haproxy.cfg{,.bak} ##############默认配置 echo ' ###########全局配置######### global log 127.0.0.1 local0 log 127.0.0.1 local1 notice daemon nbproc 1 #进程数量 maxconn 4096 #最大连接数 user haproxy #运行用户 group haproxy #运行组 chroot /var/lib/haproxy pidfile /var/run/haproxy.pid ########默认配置############ defaults log global mode http #默认模式{ tcp|http|health } option httplog #日志类别,采用httplog option dontlognull #不记录健康检查日志信息 retries 2 #2次连接失败不可用 option forwardfor #后端服务获得真实ip option httpclose #请求完毕后主动关闭http通道 option abortonclose #服务器负载很高,自动结束比较久的链接 maxconn 4096 #最大连接数 timeout connect 5m #连接超时 timeout client 1m #客户端超时 timeout server 31m #服务器超时 timeout check 10s #心跳检测超时 balance roundrobin #负载均衡方式,轮询 ########统计页面配置######## listen stats bind 0.0.0.0:1080 mode http option httplog log 127.0.0.1 local0 err maxconn 10 #最大连接数 stats refresh 30s stats uri /admin #状态页面 http//ip:1080/admin访问 stats realm Haproxy\ Statistics stats auth admin:admin #用户和密码:admin stats hide-version #隐藏版本信息 stats admin if TRUE #设置手工启动/禁用 '>/etc/haproxy/haproxy.cfg #haproxy web代理配置 #####echo ' #############WEB############ #####listen dashboard_cluster ##### bind :80 ##### balance roundrobin ##### option tcpka ##### option httpchk ##### option tcplog ##### server ops171 ops171:8080 check port 8080 inter 2000 rise 2 fall 5 ##### server ops172 ops172:8080 check port 8080 inter 2000 rise 2 fall 5 #####'>>/etc/haproxy/haproxy.cfg ######重启haproxy systemctl restart haproxy.service systemctl status haproxy.service #####systemctl disable haproxy.service #登录状态页面 http//ip:1080/admin 查看 #####Mariadb Galera Cluster 群集 安装部署 ###################################################### # #配置内核 # echo ' # * soft nofile 65536 # * hard nofile 65536 # '>>/etc/security/limits.conf # # # echo ' # fs.file-max=655350 # net.ipv4.ip_local_port_range = 1025 65000 # net.ipv4.tcp_tw_recycle = 1 # '>>/etc/sysctl.conf # sysctl -p # ########################### yum install -y mariadb mariadb-server mariadb-galera-server yum install expect -y #配置数据库 echo "# [mysqld] bind-address = 0.0.0.0 default-storage-engine = innodb innodb_file_per_table max_connections = 4096 collation-server = utf8_general_ci character-set-server = utf8 #">/etc/my.cnf.d/openstack.cnf #启动数据库服务 systemctl enable mariadb.service systemctl start mariadb.service #mysql_secure_installation #初始化设置密码,自动交互 ####初始化数据库服务,只在一个节点进行 yum install expect -y /usr/bin/expect << EOF set timeout 30 spawn mysql_secure_installation expect { "enter for none" { send "\r"; exp_continue} "Y/n" { send "Y\r" ; exp_continue} "password:" { send "123456\r"; exp_continue} "new password:" { send "123456\r"; exp_continue} "Y/n" { send "Y\r" ; exp_continue} eof { exit } } EOF ######## mysql -u root -p123456 -e "show databases;" #########galera配置 cp /etc/my.cnf.d/galera.cnf{,.bak} egrep -v "#|^$" /etc/my.cnf.d/galera.cnf.bak >/etc/my.cnf.d/galera.cnf sed -i 's/wsrep_on=1/wsrep_on=ON/' /etc/my.cnf.d/galera.cnf sed -i 's/wsrep_sst_auth=root:/wsrep_sst_auth=root:'123456'/' /etc/my.cnf.d/galera.cnf ################# ######ops171节点 #sed -i "s/bind-address = 0.0.0.0/bind-address = $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1)/" /etc/my.cnf.d/openstack.cnf echo " wsrep_cluster_address="gcomm://ops171,ops172" wsrep_node_address=$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) ">>/etc/my.cnf.d/galera.cnf cat /etc/my.cnf.d/galera.cnf systemctl daemon-reload systemctl stop mariadb.service #启动第一个节点 galera_new_cluster ########################### ######controller其它节点 #sed -i "s/bind-address = 0.0.0.0/bind-address = $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1)/" /etc/my.cnf.d/openstack.cnf echo " wsrep_cluster_address="gcomm://ops171,ops172" wsrep_node_address=$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) ">>/etc/my.cnf.d/galera.cnf cat /etc/my.cnf.d/galera.cnf systemctl restart mariadb.service ########################### #####其它节点启动后,重启第一个节点 ops171 systemctl restart mariadb.service #####检测 netstat -antp|grep mysqld mysql -u root -p123456 -e "show status like 'wsrep_cluster_size';" mysql -u root -p123456 -e "show status like 'wsrep_incoming_addresses';" #####RabbitMQ Cluster群集安装配置 ############################## #######所有节点运行 yum install -y rabbitmq-server systemctl enable rabbitmq-server.service systemctl restart rabbitmq-server.service rabbitmqctl add_user admin admin rabbitmqctl set_user_tags admin administrator rabbitmqctl add_user openstack 123456 rabbitmqctl change_password openstack 123456 rabbitmqctl set_permissions openstack ".*" ".*" ".*" rabbitmqctl set_user_tags openstack administrator rabbitmq-plugins list rabbitmq-plugins enable rabbitmq_management netstat -tnlp|grep beam ####所有节点命令 群集配置,/var/lib/rabbitmq/.erlang.cookie文件内容必须一致 echo $(echo 123456 |md5sum |cut -d ' ' -f1) >/var/lib/rabbitmq/.erlang.cookie systemctl restart rabbitmq-server.service netstat -tnlp|grep beam ######其它节点运行,主节点不用运行 rabbitmqctl stop_app rabbitmqctl join_cluster rabbit@ops171 rabbitmqctl start_app rabbitmqctl cluster_status ####此时 node2 与 node3 也会自动建立连接;如果要使用内存节点,则可以使用 ####rabbitmqctl join_cluster --ram rabbit@ops232 #更改群集名称 ###rabbitmqctl set_cluster_name RabbitMQ-Cluster #查看群集状态 rabbitmqctl cluster_status ###访问RabbitMQ,访问地址是http://ip:15672 #创建openstack相关数据库、用户授权 #创建openstack相关数据库、用户授权 #以下在controller其中一节点执行即可 #mysql -u root -p mysql -uroot -p123456 -e "CREATE DATABASE keystone; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456'; CREATE DATABASE glance; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456'; CREATE DATABASE nova; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456'; CREATE DATABASE neutron; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456'; CREATE DATABASE cinder; GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' IDENTIFIED BY '123456'; GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' IDENTIFIED BY '123456'; flush privileges; show databases;" ###测试mysql账号 mysql -e "drop database glance nova neutron;" mysql -ukeystone -p123456 -e "show databases;" mysql -u root -p123456 -e "show databases;" openstack yum安装 ############服务端安装 yum install -y python-openstackclient openstack-selinux ##MySQL yum install -y mariadb mariadb-server MySQL-python ##RabbitMQ yum install -y rabbitmq-server ##Keystone yum install -y openstack-keystone httpd mod_wsgi memcached python-memcached ##Glance yum install -y openstack-glance python-glance python-glanceclient ##Nova yum install -y openstack-nova-api openstack-nova-cert openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler python-novaclient ##Neutron linux-node1.example.com yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge python-neutronclient ebtables ipset ##Dashboard yum install -y openstack-dashboard ##Cinder yum install -y openstack-cinder python-cinderclient ############客户端的安装 ##Nova linux-node2.openstack yum install -y openstack-nova-compute sysfsutils ##Neutron linux-node2.openstack yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset ##Cinder yum install -y openstack-cinder python-cinderclient targetcli python-oslo-policy #Keystone安装 ############# #以下在controller其中一节点执行即可 #memcached启动 \cp -f /etc/sysconfig/memcached{,.bak} sed -i 's/127.0.0.1/0.0.0.0/' /etc/sysconfig/memcached systemctl enable memcached.service systemctl start memcached.service netstat -antp|grep 11211 #export OS_MASTERIP=192.168.0.173 #export OS_MASTERNAME=v.meilele.com #export OS_PWDSTR='123456' export OS_TOKEN=$(echo 123456 |md5sum |cut -d ' ' -f1) env|grep ^OS \cp -f /etc/keystone/keystone.conf{,.bak} echo " [DEFAULT] admin_token = $(echo 123456 |md5sum |cut -d ' ' -f1) [database] connection = mysql://keystone:123456@v.meilele.com/keystone [memcache] servers = v.meilele.com:11211 [revoke] driver = sql [token] provider = uuid driver = memcache " >/etc/keystone/keystone.conf grep admin_token /etc/keystone/keystone.conf su -s /bin/sh -c "keystone-manage db_sync" keystone tail /var/log/keystone/keystone.log ########Apache HTTP echo ' Listen 5000 Listen 35357 <VirtualHost *:5000> WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-public WSGIScriptAlias / /usr/bin/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On <IfVersion >= 2.4> ErrorLogFormat "%{cu}t %M" </IfVersion> ErrorLog /var/log/httpd/keystone-error.log CustomLog /var/log/httpd/keystone-access.log combined <Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> </VirtualHost> <VirtualHost *:35357> WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-admin WSGIScriptAlias / /usr/bin/keystone-wsgi-admin WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On <IfVersion >= 2.4> ErrorLogFormat "%{cu}t %M" </IfVersion> ErrorLog /var/log/httpd/keystone-error.log CustomLog /var/log/httpd/keystone-access.log combined <Directory /usr/bin> <IfVersion >= 2.4> Require all granted </IfVersion> <IfVersion < 2.4> Order allow,deny Allow from all </IfVersion> </Directory> </VirtualHost> ' >/etc/httpd/conf.d/wsgi-keystone.conf systemctl enable httpd.service && systemctl restart httpd.service netstat -tnlp|grep httpd ######API export OS_URL=http://v.meilele.com:35357/v3 export OS_IDENTITY_API_VERSION=3 env|grep ^OS openstack service create --name keystone --description "OpenStack Identity" identity openstack endpoint create --region RegionOne identity public http://v.meilele.com:5000/v2.0 openstack endpoint create --region RegionOne identity internal http://v.meilele.com:5000/v2.0 openstack endpoint create --region RegionOne identity admin http://v.meilele.com:35357/v2.0 ###admin openstack project create --domain default --description "Admin Project" admin openstack user create --domain default --password=123456 admin openstack role create admin openstack role add --project admin --user admin admin # openstack project create --domain default --description "Service Project" service openstack project create --domain default --description "Demo Project" demo openstack user create --domain default --password=123456 demo openstack role create user openstack role add --project demo --user demo user ###############验证操作 \cp -f /usr/share/keystone/keystone-dist-paste.ini{,.bak} sed -i 's#admin_token_auth##g' /usr/share/keystone/keystone-dist-paste.ini unset OS_TOKEN OS_URL # openstack --os-auth-url http://v.meilele.com:35357/v3 --os-project-domain-id default --os-user-domain-id default --os-project-name admin --os-username admin --os-password=123456 --os-auth-type password token issue openstack --os-auth-url http://v.meilele.com:5000/v3 --os-project-domain-id default --os-user-domain-id default --os-project-name demo --os-username demo --os-password=123456 --os-auth-type password token issue echo ' export OS_PROJECT_DOMAIN_ID=default export OS_USER_DOMAIN_ID=default export OS_PROJECT_NAME=admin export OS_TENANT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=123456 export OS_AUTH_URL=http://v.meilele.com:35357/v3 export OS_IDENTITY_API_VERSION=3 ' >admin-openrc.sh source admin-openrc.sh openstack token issue echo ' export OS_PROJECT_DOMAIN_ID=default export OS_USER_DOMAIN_ID=default export OS_PROJECT_NAME=demo export OS_TENANT_NAME=demo export OS_USERNAME=demo export OS_PASSWORD=123456 export OS_AUTH_URL=http://v.meilele.com:5000/v3 export OS_IDENTITY_API_VERSION=3 ' >demo-openrc.sh source demo-openrc.sh openstack token issue #install_glance ##########配置 glance 镜像服务 ;Glance 提供了虚拟机镜像的集中存储 source admin-openrc.sh openstack user create --domain default --password=123456 glance openstack role add --project service --user glance admin openstack service create --name glance --description "OpenStack Image service" image openstack endpoint create --region RegionOne image public http://v.meilele.com:9292 openstack endpoint create --region RegionOne image internal http://v.meilele.com:9292 openstack endpoint create --region RegionOne image admin http://v.meilele.com:9292 ######Glance \cp -f /etc/glance/glance-api.conf{,.bak} \cp -f /etc/glance/glance-registry.conf{,.bak} echo ' [DEFAULT] notification_driver = noop verbose = True [database] connection = mysql://glance:123456@v.meilele.com/glance [glance_store] default_store = file filesystem_store_datadir = /var/lib/glance/images/ [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone ' >/etc/glance/glance-api.conf ########################## echo ' [DEFAULT] notification_driver = noop verbose = True [database] connection = mysql://glance:123456@v.meilele.com/glance [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = glance password = 123456 [paste_deploy] flavor = keystone ' >/etc/glance/glance-registry.conf ##################################### su -s /bin/sh -c "glance-manage db_sync" glance tail /var/log/glance/api.log # systemctl enable openstack-glance-api.service openstack-glance-registry.service systemctl restart openstack-glance-api.service openstack-glance-registry.service netstat -tnlp|grep python ###验证操作 echo "export OS_IMAGE_API_VERSION=2" | tee -a admin-openrc.sh demo-openrc.sh export OS_IMAGE_API_VERSION=2 source admin-openrc.sh [ ! -e cirros-0.3.4-x86_64-disk.img ] && wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img glance image-create --name "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --visibility public --progress #[ -e /root/CentOS-7-x86_64-GenericCloud.qcow2 ] && glance image-create --name "CentOS-7-x86_64-GenericCloud" --file /root/CentOS-7-x86_64-GenericCloud.qcow2 --disk-format qcow2 --container-format bare --visibility public --progress glance image-list #install_nova ####nova模块配置 source admin-openrc.sh openstack user create --domain default --password=123456 nova openstack role add --project service --user nova admin openstack service create --name nova --description "OpenStack Compute" compute openstack endpoint create --region RegionOne compute public http://v.meilele.com:8774/v2/%\(tenant_id\)s openstack endpoint create --region RegionOne compute internal http://v.meilele.com:8774/v2/%\(tenant_id\)s openstack endpoint create --region RegionOne compute admin http://v.meilele.com:8774/v2/%\(tenant_id\)s echo " [DEFAULT] rpc_backend = rabbit auth_strategy = keystone my_ip = $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) network_api_class = nova.network.neutronv2.api.API security_group_api = neutron linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver firewall_driver = nova.virt.firewall.NoopFirewallDriver enabled_apis=osapi_compute,metadata verbose = True [database] connection = mysql://nova:123456@v.meilele.com/nova [glance] host = v.meilele.com [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = nova password = 123456 [libvirt] virt_type = kvm [neutron] url = http://v.meilele.com:9696 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default region_name = RegionOne project_name = service username = neutron password = 123456 service_metadata_proxy = True metadata_proxy_shared_secret = 123456 [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_rabbit] rabbit_host = v.meilele.com rabbit_userid = openstack rabbit_password = 123456 [vnc] vncserver_listen = \$my_ip vncserver_proxyclient_address = \$my_ip novncproxy_base_url = http://v.meilele.com:6080/vnc_auto.html " >/etc/nova/nova.conf [ $(egrep -c '(vmx|svm)' /proc/cpuinfo) -eq 0 ] && sed -i 's#virt_type.*#virt_type=qemu#g' /etc/nova/nova.conf [ $(egrep -c '(vmx|svm)' /proc/cpuinfo) -eq 1 ] && sed -i 's#virt_type.*#virt_type=kvm#g' /etc/nova/nova.conf grep virt_type /etc/nova/nova.conf ############################### su -s /bin/sh -c "nova-manage db sync" nova tail /var/log/nova/nova-manage.log systemctl enable openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service systemctl restart openstack-nova-api.service openstack-nova-cert.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service ###controller: source admin-openrc.sh nova service-list openstack host list nova endpoints glance image-list #install_neutron ##########网络模块安装 配置 source admin-openrc.sh openstack user create --domain default --password=123456 neutron openstack role add --project service --user neutron admin openstack service create --name neutron --description "OpenStack Networking" network openstack endpoint create --region RegionOne network public http://v.meilele.com:9696 openstack endpoint create --region RegionOne network internal http://v.meilele.com:9696 openstack endpoint create --region RegionOne network admin http://v.meilele.com:9696 echo " [DEFAULT] core_plugin = ml2 service_plugins = router allow_overlapping_ips = True rpc_backend = rabbit auth_strategy = keystone notify_nova_on_port_status_changes = True notify_nova_on_port_data_changes = True nova_url = http://v.meilele.com:8774/v2 verbose = True [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = neutron password = 123456 [database] connection = mysql://neutron:123456@v.meilele.com/neutron [nova] auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default region_name = RegionOne project_name = service username = nova password = 123456 [oslo_concurrency] lock_path = /var/lib/neutron/tmp [oslo_messaging_rabbit] rabbit_host = v.meilele.com rabbit_userid = openstack rabbit_password = 123456 " >/etc/neutron/neutron.conf echo " [ml2] type_drivers = flat,vlan,vxlan tenant_network_types = vxlan mechanism_drivers = linuxbridge,l2population extension_drivers = port_security [ml2_type_flat] flat_networks = public [ml2_type_vxlan] vni_ranges = 1:1000 [securitygroup] enable_ipset = True " >/etc/neutron/plugins/ml2/ml2_conf.ini echo " [linux_bridge] physical_interface_mappings = public:$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $NF}') [vxlan] enable_vxlan = True local_ip = $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) l2_population = True [agent] prevent_arp_spoofing = True [securitygroup] enable_security_group = True firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver " >/etc/neutron/plugins/ml2/linuxbridge_agent.ini echo " [DEFAULT] interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver external_network_bridge = verbose = True " >/etc/neutron/l3_agent.ini echo " [DEFAULT] interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq enable_isolated_metadata = True verbose = True dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf " >/etc/neutron/dhcp_agent.ini echo 'dhcp-option-force=26,1450' >/etc/neutron/dnsmasq-neutron.conf echo " [DEFAULT] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_region = RegionOne auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = neutron password = 123456 nova_metadata_ip = v.meilele.com metadata_proxy_shared_secret = 123456 verbose = True admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USER% admin_password = %SERVICE_PASSWORD% " >/etc/neutron/metadata_agent.ini #sed -i 's#physical_interface_mappings =.*#physical_interface_mappings = public:bond0#g' /etc/neutron/plugins/ml2/linuxbridge_agent.ini ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini ################################ su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service systemctl restart openstack-nova-api.service systemctl restart neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service #####验证操作: #####controller端(控制端): source admin-openrc.sh neutron ext-list neutron agent-list ######################创建虚拟网络 controller端 grep physical_interface_mappings /etc/neutron/plugins/ml2/linuxbridge_agent.ini grep flat_networks /etc/neutron/plugins/ml2/ml2_conf.ini echo " DEVICE=$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $NF}'):1 ONBOOT=yes IPADDR=172.16.30.$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1 |cut -d '.' -f4) PREFIX=24 " >/etc/sysconfig/network-scripts/ifcfg-$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $NF}'):1 #ifconfig $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $NF}'):1 172.16.30.1 netmask 255.255.255.0 up #ip addr add 172.16.30.$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1 |cut -d '.' -f4)/24 dev $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $NF}') source admin-openrc.sh neutron net-create public --shared --router:external --provider:physical_network public --provider:network_type flat neutron subnet-create public 172.16.30.0/24 --name public --allocation-pool start=172.16.30.220,end=172.16.30.250 --dns-nameserver 172.16.30.1 --gateway 172.16.30.1 --disable-dhcp #neutron subnet-create public 172.16.30.0/24 --name public --allocation-pool start=172.16.30.100,end=172.16.30.250 --dns-nameserver 172.16.30.1 --gateway 172.16.30.1 --disable-dhcp ###route #source admin-openrc.sh #neutron net-update public --router:external #source demo-openrc.sh neutron net-create private neutron subnet-create private 172.16.100.0/24 --name private --dns-nameserver 172.16.100.1 --gateway 172.16.100.1 neutron router-create router neutron router-interface-add router private neutron router-gateway-set router public neutron net-list ######## ##source admin-openrc.sh ##neutron router-interface-delete router private ##neutron router-gateway-clear router public ##neutron router-delete router ##neutron router-list ##neutron net-delete public ##neutron net-delete private ##neutron net-list ###验证操作 source admin-openrc.sh ip netns neutron router-port-list router [ ! -e /root/.ssh/id_rsa_admin ] && ssh-keygen -q -N '' -f /root/.ssh/id_rsa_admin nova keypair-add --pub-key ~/.ssh/id_rsa_admin.pub adminkey source demo-openrc.sh [ ! -e /root/.ssh/id_rsa ] && ssh-keygen -q -N '' -f /root/.ssh/id_rsa nova keypair-add --pub-key ~/.ssh/id_rsa.pub mykey nova keypair-list nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0 nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 source demo-openrc.sh nova flavor-list glance image-list neutron net-list nova secgroup-list nova list #nova boot --flavor m1.tiny --image cirros --nic net-id=$(neutron net-list |grep private |awk '{print $2}') --security-group default --key-name mykey private-instance nova list #nova get-vnc-console private-instance novnc ##访问url:http://v.meilele.com:6080/vnc_auto.html?token=ffec3792-a83a-4c2e-a138-bac3f8c7595d ###user:cubswin ###pwd:cirros ##install_dashboard env|grep ^OS \cp -f /etc/openstack-dashboard/local_settings {,.bak} sed -i "s#^OPENSTACK_HOST =.*#OPENSTACK_HOST = 'controller' #g" /etc/openstack-dashboard/local_settings sed -i 's#^ALLOWED_HOSTS =.*#ALLOWED_HOSTS = \["\*"\, \] #g' /etc/openstack-dashboard/local_settings sed -i "s#^ 'BACKEND':.*# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',\n 'LOCATION': 'controller:11211',#g" /etc/openstack-dashboard/local_settings sed -i 's#^OPENSTACK_KEYSTONE_DEFAULT_ROLE =.*#OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"#g' /etc/openstack-dashboard/local_settings sed -i 's#^TIME_ZONE =.*#TIME_ZONE = "Asia/Shanghai"#g' /etc/openstack-dashboard/local_settings ########### #[ $(grep -c "^OPENSTACK_API_VERSIONS = {" /etc/openstack-dashboard/local_settings ) -eq 0 ] && sed -i "/#OPENSTACK_API_VERSIONS/ i OPENSTACK_API_VERSIONS = { \n "identity": 3,\n "volume": 2,\n}" /etc/openstack-dashboard/local_settings # #sed -i 's#^OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT =.*#OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True#g' /etc/openstack-dashboard/local_settings # #sed -i "s#'enable_router': .*#'enable_router': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_quotas': .*#'enable_quotas': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_distributed_router': .*#'enable_distributed_router': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_ha_router': .*#'enable_ha_router': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_lb': .*#'enable_lb': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_firewall': .*#'enable_firewall': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_vpn': .*#'enable_vpn': False,#g" /etc/openstack-dashboard/local_settings #sed -i "s#'enable_fip_topology_check': .*#'enable_fip_topology_check': False,#g" /etc/openstack-dashboard/local_settings systemctl enable httpd.service memcached.service systemctl restart httpd.service memcached.service unset OS_MASTERIP OS_MASTERNAME #访问报500 #sed -i '/WSGISocketPrefix run\/wsgi/ a WSGIApplicationGroup %{GLOBAL}' /etc/httpd/conf.d/openstack-dashboard.conf #在浏览器中输入 http://controller/dashboard ##使用"admin"或"demo"用户登录,密码:123456 #######参考:https://www.cnblogs.com/panwenbin-logs/p/8410551.html #######https://www.cnblogs.com/kevingrace/p/5707003.html systemctl restart httpd systemctl restart openstack-nova-api.service neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service neutron-l3-agent.service ####controller 上启用计算机节点服务 #######在控制端启用 计算机节点服务 ####创建云主机 解决报错::No valid host was found. There are not enough hosts available yum install openstack-nova-compute -y systemctl status openstack-nova-compute.service service openstack-nova-compute restart systemctl status openstack-nova-compute.service ####### 解决报错::Failed to allocate the network(s), not rescheduling. echo -e "vif_plugging_timeout = 0 \nvif_plugging_is_fatal = False" >>/etc/nova/nova.conf grep vif_plugging /etc/nova/nova.conf systemctl restart openstack-nova-compute #install_cinder # keystone创建cinder用户、服务、API yum install -y openstack-cinder python-cinderclient source admin-openrc.sh openstack user create --domain default --password=123456 cinder openstack role add --project service --user cinder admin openstack service create --name cinder --description "OpenStack Block Storage" volume openstack service create --name cinderv2 --description "OpenStack Block Storage" volumev2 openstack endpoint create --region RegionOne volume public http://v.meilele.com:8776/v1/%\(tenant_id\)s openstack endpoint create --region RegionOne volume internal http://v.meilele.com:8776/v1/%\(tenant_id\)s openstack endpoint create --region RegionOne volume admin http://v.meilele.com:8776/v1/%\(tenant_id\)s openstack endpoint create --region RegionOne volumev2 public http://v.meilele.com:8776/v2/%\(tenant_id\)s openstack endpoint create --region RegionOne volumev2 internal http://v.meilele.com:8776/v2/%\(tenant_id\)s openstack endpoint create --region RegionOne volumev2 admin http://v.meilele.com:8776/v2/%\(tenant_id\)s \cp -f /etc/cinder/cinder.conf{,.bak} echo " [DEFAULT] rpc_backend = rabbit auth_strategy = keystone my_ip = $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) verbose = True [BRCD_FABRIC_EXAMPLE] [CISCO_FABRIC_EXAMPLE] [cors] [cors.subdomain] [database] connection = mysql://cinder:123456@v.meilele.com/cinder [fc-zone-manager] [keymgr] [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = cinder password = 123456 [matchmaker_redis] [matchmaker_ring] [oslo_concurrency] lock_path = /var/lib/cinder/tmp [oslo_messaging_amqp] [oslo_messaging_qpid] [oslo_messaging_rabbit] rabbit_host = v.meilele.com rabbit_userid = openstack rabbit_password = 123456 [oslo_middleware] [oslo_policy] [oslo_reports] [profiler] " >/etc/cinder/cinder.conf ###################控制节点启用cinder 存储 sed -i '/\[DEFAULT\]/ a enabled_backends = cinder_volumes\nglance_host = v.meilele.com' /etc/cinder/cinder.conf echo " [cinder_volumes] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes volume_backend_name = cinder_volumes iscsi_protocol = iscsi iscsi_helper = lioadm " >>/etc/cinder/cinder.conf head /etc/cinder/cinder.conf echo ' [cinder] os_region_name = RegionOne '>>/etc/nova/nova.conf ######################## su -s /bin/sh -c "cinder-manage db sync" cinder systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service systemctl restart openstack-nova-api.service openstack-cinder-api.service openstack-cinder-scheduler.service source admin-openrc.sh cinder service-list #创建云硬盘类型,关联volum #LVM #(backend_name与配置文件名对应) cinder type-create lvm cinder type-key lvm set volume_backend_name=lvm #NFS cinder type-create nfs cinder type-key nfs set volume_backend_name=nfs #cheack cinder extra-specs-list #cinder type-list #cinder type-delete nfs #创建云盘(容量单位G) openstack volume create --size 1 --type lvm disk01 #lvm类型 openstack volume create --size 1 --type nfs disk02 #nfs类型 openstack volume list ###节点安装 ##############节点安装 ######################################################################################### ######################################################################################### ###node ######################################################################################### ######################################################################################### sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux setenforce 0 systemctl stop firewalld.service systemctl disable firewalld.service hostnamectl --static set-hostname ops$(ip addr |grep brd |grep global |head -n1 |cut -d '/' -f1 |cut -d '.' -f4) #export OS_MASTERIP=$(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) export OS_MASTERIP=172.16.3.228 export OS_MASTERNAME=v.meilele.com [ `grep -c "${OS_MASTERIP} v.meilele.com" /etc/hosts` -eq 0 ] && echo "${OS_MASTERIP} v.meilele.com controller" >>/etc/hosts tail /etc/hosts echo ' [centos-openstack-liberty] name=CentOS-7 - OpenStack liberty baseurl=http://vault.centos.org/centos/7.3.1611/cloud/x86_64/openstack-liberty/ gpgcheck=0 enabled=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-Centos-7 ' >/etc/yum.repos.d/CentOS-OpenStack-liberty.repo yum install -y qemu-kvm libvirt virt-install systemctl enable libvirtd && systemctl restart libvirtd ##时间同步 node yum install -y ntp timedatectl set-timezone Asia/Shanghai echo '* * * * * ntpdate ${OS_MASTERIP}' >>/var/spool/cron/root ##Nova linux-node2.openstack yum install -y openstack-nova-compute sysfsutils ##Neutron linux-node2.openstack yum install -y openstack-neutron openstack-neutron-linuxbridge ebtables ipset #compute1 Nova \cp -f /etc/nova/nova.conf /etc/nova/nova.conf.bak echo ' [DEFAULT] rpc_backend = rabbit auth_strategy = keystone my_ip = $(ip addr |grep global |grep $(route |grep default |awk '{print $NF}') |head -n1 |awk '{print $2}' |cut -d '/' -f1) network_api_class = nova.network.neutronv2.api.API security_group_api = neutron linuxnet_interface_driver = nova.network.linux_net.NeutronLinuxBridgeInterfaceDriver firewall_driver = nova.virt.firewall.NoopFirewallDriver verbose = True [glance] host = v.meilele.com [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = nova password = 123456 [libvirt] virt_type = kvm [neutron] url = http://v.meilele.com:9696 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default region_name = RegionOne project_name = service username = neutron password = 123456 [oslo_concurrency] lock_path = /var/lib/nova/tmp [oslo_messaging_rabbit] rabbit_host = v.meilele.com rabbit_userid = openstack rabbit_password = 123456 [vnc] enabled = True vncserver_listen = 0.0.0.0 vncserver_proxyclient_address = \$my_ip novncproxy_base_url = http://v.meilele.com:6080/vnc_auto.html ' >/etc/nova/nova.conf [ $(egrep -c '(vmx|svm)' /proc/cpuinfo) -eq 0 ] && sed -i 's#virt_type.*#virt_type=qemu#g' /etc/nova/nova.conf [ $(egrep -c '(vmx|svm)' /proc/cpuinfo) -eq 1 ] && sed -i 's#virt_type.*#virt_type=kvm#g' /etc/nova/nova.conf grep virt_type /etc/nova/nova.conf systemctl enable libvirtd.service openstack-nova-compute.service systemctl restart libvirtd.service openstack-nova-compute.service unset OS_MASTERIP OS_MASTERNAME ###cinder 节点安装 yum install lvm2 -y systemctl enable lvm2-lvmetad.service systemctl restart lvm2-lvmetad.service yes |pvcreate /dev/sdb yes |vgcreate cinder-volumes /dev/sdb \cp -f /etc/lvm/lvm.conf{,.bak} sed -i '/devices {/ a filter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf filter = [ "a/sda/", "a/sdb/", "r/.*/"] ##在cinder节点,cinder-volume使用的磁盘(/dev/sdb),需要在/etc/lvm/lvm.conf中配置: sed -i '/devices {/ a filter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf ##如果cinder节点的操作系统也安装在lvm上,则还需要(在cinder节点操作): sed -i '/devices {/ a filter = ["a/sda/", "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf ###如果compute节点的操作系统也安装在lvm上,则需要(在compute节点操作): sed -i '/devices {/ a filter = [ "a/sdb/", "r/.*/"]' /etc/lvm/lvm.conf grep 'devices {' -C 3 /etc/lvm/lvm.conf yum install -y openstack-cinder targetcli python-oslo-policy yum install -y openstack-cinder targetcli python-keystone systemctl enable openstack-cinder-volume.service target.service systemctl restart openstack-cinder-volume.service target.service echo " [DEFAULT] rpc_backend = rabbit auth_strategy = keystone my_ip = v.meilele.com enabled_backends = cinder_volumes glance_host = v.meilele.com [BRCD_FABRIC_EXAMPLE] [CISCO_FABRIC_EXAMPLE] [cors] [cors.subdomain] [database] connection = mysql://cinder:123456@v.meilele.com/cinder [fc-zone-manager] [keymgr] [keystone_authtoken] auth_uri = http://v.meilele.com:5000 auth_url = http://v.meilele.com:35357 auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = cinder password = 123456 [matchmaker_redis] [matchmaker_ring] [oslo_concurrency] lock_path = /var/lib/cinder/tmp [oslo_messaging_amqp] [oslo_messaging_qpid] [oslo_messaging_rabbit] rabbit_host = v.meilele.com rabbit_userid = openstack rabbit_password = 123456 [oslo_middleware] [oslo_policy] [oslo_reports] [profiler] [cinder_volumes] volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver volume_group = cinder-volumes iscsi_protocol = iscsi iscsi_helper = lioadm volume_backend_name = lvm " >/etc/cinder/cinder.conf systemctl enable openstack-cinder-volume.service target.service systemctl restart openstack-cinder-volume.service target.service systemctl restart openstack-nova-api.service openstack-cinder-api.service openstack-cinder-scheduler.service systemctl restart lvm2-lvmetad.service openstack-cinder-volume.service target.service openstack-nova-api.service openstack-cinder-api.service openstack-cinder-scheduler.service