liwya 发表于 2019-2-2 06:31:39

openstack K版本和ceph对接

  本次环境:

  openstack(K版本):控制和计算各一台,并且安装到dashboard,可以正常创建虚拟机(搭建过程建官方http://docs.openstack.org/kilo/install-guide/install/yum/content/)
  ceph: 共3台,两台节点一台desploy部署机(搭建过程建官方http://ceph.com/)
  下面在控制节点安装cinder,在控制节点上操作:
  ##创建数据库并且授权
# mysql
Welcome to the MariaDB monitor.Commands end with ; or \g.
Your MariaDB connection id is 2439
Server version: 5.5.47-MariaDB MariaDB Server
Copyright (c) 2000, 2015, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE cinder;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'localhost' \
    ->   IDENTIFIED BY 'awcloud';
Query OK, 0 rows affected (0.15 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
    ->   IDENTIFIED BY 'awcloud';
Query OK, 0 rows affected (0.01 sec)  ##创建用户、端点等信息
# source admin-openrc.sh
# openstack user create --password-prompt cinder
# openstack role add --project service --user cinder admin
# openstack service create --name cinder \
>   --description "OpenStack Block Storage" volume
# openstack service create --name cinderv2 \
>   --description "OpenStack Block Storage" volumev2
# openstack endpoint create \
>   --publicurl http://controller:8776/v2/%\(tenant_id\)s \
>   --internalurl http://controller:8776/v2/%\(tenant_id\)s \
>   --adminurl http://controller:8776/v2/%\(tenant_id\)s \
>   --region RegionOne \
>   volume
# openstack endpoint create \
>   --publicurl http://controller:8776/v2/%\(tenant_id\)s \
>   --internalurl http://controller:8776/v2/%\(tenant_id\)s \
>   --adminurl http://controller:8776/v2/%\(tenant_id\)s \
>   --region RegionOne \
>   volumev2  安装cinder服务
# yum install openstack-cinder python-cinderclient python-oslo-db -y  修改配置文件
# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bk
# vim /etc/cinder/cinder.conf
# egrep -v "^#|^$" /etc/cinder/cinder.conf

rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.8.199
verbose = True



connection = mysql://cinder:awcloud@controller/cinder



auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = cinder
password = awcloud





rabbit_host = controller
rabbit_userid = guest
rabbit_password = guest


lock_path = /var/lock/cinder  重启服务
# systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
# systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service  ###为contronller节点配置实现接管ceph
#yum install python-rbd ceph-common -y
# yum install python-rbd ceph-common -y  把验证文件和ceph的配置文件拷贝到控制节点
# scp ceph.client.admin.keyring ceph.conf 192.168.8.199:/etc/ceph/  此时在controller节点执行ceph命令是否成功
# ceph -s
    cluster 3155ed83-9e92-43da-90f1-c7715148f48f
   health HEALTH_OK
   monmap e1: 1 mons at {node1=192.168.8.35:6789/0}
            election epoch 2, quorum 0 node1
   osdmap e47: 2 osds: 2 up, 2 in
      pgmap v1325: 64 pgs, 1 pools, 0 bytes data, 0 objects
            80896 kB used, 389 GB / 389 GB avail
                  64 active+clean  ##为cinder、nova、glance创建volume
# ceph osd pool create vms
# ceph osd pool create volumes 50
pool 'volumes' created
# ceph osd pool create images 50
pool 'images' created
# ceph osd pool create backups 50
pool 'backups' created
# ceph osd pool create vms 50
pool 'vms' created
#  为ceph客户端做认证
# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'
rbd_children, allow rwx pool=images'
ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'
# ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'
#  ##创建用户的认证文件
# ceph auth get-or-create client.glance|tee /etc/ceph/ceph.client.glance.keyring

key = AQANyXRXb5l7CRAA2yVyM92BIm+U3QDseZGqow==
# chown glance:glance /etc/ceph/ceph.client.glance.keyring
# ceph auth get-or-create client.cinder | sudo tee /etc/ceph/ceph.client.cinder.keyring

key = AQDkyHRXvOTwARAAbRha/MtmqPcJm0RF9jcrsQ==
# sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring
# ceph auth get-or-create client.cinder-backup |sudo tee /etc/ceph/ceph.client.cinder-backup.keyring

key = AQAVyXRXQDKFBRAAtY9DuiGGRSTBDu0MRckXbA==
#chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring
#
#  ##把/etc/ceph/ceph.client.cinder.keyring用户认证文件拷贝到计算节点
# scp /etc/ceph/ceph.client.cinder.keyring compute:/etc/ceph/
##在compute节点创建libvirt的key
# uuidgen
457eb676-33da-42ec-9a8c-9293d545c337
cat > secret.xml
页: [1]
查看完整版本: openstack K版本和ceph对接