创建ceph存储池
root@ceph1:~# ceph osd pool create volumes 512 root@ceph1:~# ceph osd pool create vms 512
分发ceph配置文件到其他节点,不包括网络节点
root@ceph1:~# ssh controller sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf root@ceph1:~# ssh compute1 sudo tee /etc/ceph/ceph.conf < /etc/ceph/ceph.conf
在controller和compute节点安装ceph客户端
root@controller:~# apt-get install python-ceph ceph-common libvirt-bin -y root@compute1:~# apt-get install ceph-common -y
对cinder访问ceph volume作授权
root@ceph1:~# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images' root@ceph1:~# ceph auth get-key client.cinder | ssh compute1 tee client.cinder.key
在 compute节点上做 libvirt 配置
root@compute1:~# cat > secret.xml <<EOF > <secret ephemeral='no' private='no'> > <uuid>51f3b880-d821-41a9-a415-f33e1e88a795 </uuid> > <usage type='ceph'> > <name>client.cinder secret</name> > </usage> > </secret> > EOF root@compute1:~# virsh secret-define --file secret.xml root@compute1:~# virsh secret-set-value --secret 51f3b880-d821-41a9-a415-f33e1e88a795 --base64 $(cat client.cinder.key)
ps:51f3b880-d821-41a9-a415-f33e1e88a795 跑,UUID只用一套,计算节点和控制节点都一样。
在 controller节点上做 配置cinder
root@controller:~# vi /etc/cinder/cinder.conf [DEFAULT] enabled_backends = ceph [ceph] volume_driver = cinder.volume.drivers.rbd.RBDDriver rbd_pool = volumes rbd_ceph_conf = /etc/ceph/ceph.conf rbd_flatten_volume_from_snapshot = false rbd_max_clone_depth = 5 rbd_store_chunk_size = 4 rados_connect_timeout = -1 glance_api_version = 2 rbd_user = cinder rbd_secret_uuid = 51f3b880-d821-41a9-a415-f33e1e88a795 root@controller:~# service cinder-volume restart root@controller:~# cinder service-list +------------------+-----------------+------+---------+-------+----------------------------+-----------------+ | Binary | Host | Zone | Status | State | Updated_at | Disabled Reason | +------------------+-----------------+------+---------+-------+----------------------------+-----------------+ | cinder-scheduler | controller | nova | enabled | up | 2019-10-13T09:43:46.000000 | None | | cinder-volume | controller@ceph | nova | enabled | up | 2019-10-13T09:43:40.000000 | None | +------------------+-----------------+------+---------+-------+----------------------------+-----------------+
在 compute节点上做nova配置
root@compute1:~# vi /etc/nova/nova.conf [libvirt] images_type = rbd images_rbd_pool = vms images_rbd_ceph_conf = /etc/ceph/ceph.conf rbd_user = cinder rbd_secret_uuid = 51f3b880-d821-41a9-a415-f33e1e88a795 inject_password = false inject_key = false inject_partition = -2 block_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_IN C, VIR_MIGRATE_PERSIST_DEST live_migration_bandwidth = 0 live_migration_flag = VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_PERSIST_DEST, VIR_MIGRATE_PERSIST_DEST live_migration_uri = qemu+tcp://%s/system hw_disk_discard = unmap disk_cachemodes = "network=writeback" cpu_mode = host-passthrough
创建一个10G的volume
root@controller:~# cinder create --name demo-volume1 10 root@controller:~# cinder list +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+ | ID | Status | Name | Size | Volume Type | Bootable | Attached to | +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+ | 91841049-0d9d-434f-a8ce-917c3776d7f8 | available | demo-volume1 | 10 | None | false | | +--------------------------------------+-----------+--------------+------+-------------+----------+-------------+
将卷添加到实例中
root@controller:~# nova volume-attach demo1 91841049-0d9d-434f-a8ce-917c3776d7f8 +----------+--------------------------------------+ | Property | Value | +----------+--------------------------------------+ | device | /dev/vdc | | id | 91841049-0d9d-434f-a8ce-917c3776d7f8 | | serverId | 0a54786a-3795-4126-a72b-30a58c1ef68a | | volumeId | 91841049-0d9d-434f-a8ce-917c3776d7f8 | +----------+--------------------------------------+