Ceph集群安装
1.环境说明http://s3.运维网.com/wyfs02/M02/75/88/wKiom1Y7UaOSj9FHAAClt46daRU173.jpg
2. 安装步骤
2.1 ceph模块安装(各节点都需要安装)
yum installceph-deploy ceph python-ceph nodejs-argparse redhat-lsb xfsdump qemu-kvmqemu-kvm-tools qemu-img qemu-guest-agent libvirt –y
2.2 生成fsid
# uuidgen
2.3 ceph配置(在bgw-os-node151上执行)
# cat > /etc/ceph/ceph.conf >/etc/fstab
# echo "/dev/sdb /var/lib/ceph/osd/ceph-0xfsremount,user_xattr0 0" >>/etc/fstab
# ceph-osd -i 0 --mkfs --mkkey
2015-03-19 13:38:14.363503 7f3fe46e77a0 -1journal FileJournal::_open: disabling aio for non-block journal.Use journal_force_aio to force use of aioanyway
2015-03-19 13:38:14.369579 7f3fe46e77a0 -1journal FileJournal::_open: disabling aio for non-block journal.Use journal_force_aio to force use of aioanyway
2015-03-19 13:38:14.370140 7f3fe46e77a0 -1filestore(/var/lib/ceph/osd/ceph-0) could not find23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2015-03-19 13:38:14.377213 7f3fe46e77a0 -1created object store /var/lib/ceph/osd/ceph-0 journal/var/lib/ceph/osd/ceph-0/journal for osd.0 fsid0071bd6f-849c-433a-8051-2e553df49aea
2015-03-19 13:38:14.377265 7f3fe46e77a0 -1auth: error reading file: /var/lib/ceph/osd/ceph-0/keyring: can't open/var/lib/ceph/osd/ceph-0/keyring: (2) No such file or directory
2015-03-19 13:38:14.377364 7f3fe46e77a0 -1created new key in keyring /var/lib/ceph/osd/ceph-0/keyring
# ceph auth add osd.0 osd 'allow *' mon 'allow profile osd'-i /var/lib/ceph/osd/ceph-0/keyring
added key for osd.0
2.10 为第一个osd添加rack规则
# ceph osd crush add-bucket rack1 rack
added bucket rack1 type rack to crush map
#ceph osd crush move bgw-os-node151 rack=rack1
Error ENOENT: itembgw-os-node151 does not exist -- 需要先执行下面的操作
# ceph osd crush add-bucket bgw-os-node151 host
added bucket bgw-os-node151 type host tocrush map
# ceph osd crush move bgw-os-node151 rack=rack1
moved item id -3 name 'bgw-os-node151' tolocation {rack=rack1} in crush map
# ceph osd crush move rack1 root=default
moved item id -2 name 'rack1' to location{root=default} in crush map
# ceph osd crush add osd.0 1.0 host=bgw-os-node151
add item id 0 name 'osd.0' weight 1 atlocation {host=bgw-os-node151} to crush map
# touch /var/lib/ceph/osd/ceph-0/sysvinit
# /etc/init.d/ceph start osd.0
=== osd.0 ===
create-or-move updated item name 'osd.0'weight 0.27 at location {host=bgw-os-node151,root=default} to crush map
Starting Ceph osd.0 on bgw-os-node151...
starting osd.0 at :/0 osd_data/var/lib/ceph/osd/ceph-0 /var/lib/ceph/osd/ceph-0/journal
# ps aux | grep osd
root 250905.70.0 504804 27836 ? Ssl 13:44 0:00 /usr/bin/ceph-osd -i0 --pid-file /var/run/ceph/osd.0.pid -c /etc/ceph/ceph.conf --cluster ceph
root 251540.00.0 103304 2028 pts/0 S+ 13:440:00 grep osd
2.11 在bgw-os-node151上添加其余osd
# ceph osd create
1
# mkdir -p /var/lib/ceph/osd/ceph-1
# mkfs.xfs -f /dev/sdc
meta-data=/dev/sdc isize=256 agcount=4, agsize=18308499 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=73233995,imaxpct=25
= sunit=0 swidth=0 blks
naming=version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=35758,version=2
= sectsz=512 sunit=0 blks,lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
# mount /dev/sdc /var/lib/ceph/osd/ceph-1
# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-1
# echo "/dev/sdc /var/lib/ceph/osd/ceph-1xfsdefaults 0 0" >>/etc/fstab
# echo "/dev/sdc /var/lib/ceph/osd/ceph-1xfsremount,user_xattr0 0">> /etc/fstab
# ceph-osd -i 1 --mkfs --mkkey
2015-03-19 13:56:58.131623 7f209809e7a0 -1journal FileJournal::_open: disabling aio for non-block journal.Use journal_force_aio to force use of aioanyway
2015-03-19 13:56:58.137304 7f209809e7a0 -1journal FileJournal::_open: disabling aio for non-block journal.Use journal_force_aio to force use of aioanyway
2015-03-19 13:56:58.137875 7f209809e7a0 -1filestore(/var/lib/ceph/osd/ceph-1) could not find 23c2fcde/osd_superblock/0//-1in index: (2) No such file or directory
2015-03-19 13:56:58.145813 7f209809e7a0 -1created object store /var/lib/ceph/osd/ceph-1 journal/var/lib/ceph/osd/ceph-1/journal for osd.1 fsid0071bd6f-849c-433a-8051-2e553df49aea
2015-03-19 13:56:58.145862 7f209809e7a0 -1auth: error reading file: /var/lib/ceph/osd/ceph-1/keyring: can't open/var/lib/ceph/osd/ceph-1/keyring: (2) No such file or directory
2015-03-19 13:56:58.145958 7f209809e7a0 -1created new key in keyring /var/lib/ceph/osd/ceph-1/keyring
# ceph auth add osd.1 osd 'allow *' mon 'allow profile osd'-i /var/lib/ceph/osd/ceph-1/keyring
added key for osd.1
# ceph osd crush add osd.1 1.0 host=bgw-os-node151
add item id 1 name 'osd.1' weight 1 atlocation {host=bgw-os-node151} to crush map
# touch /var/lib/ceph/osd/ceph-1/sysvinit
# /etc/init.d/ceph start osd.1
=== osd.1 ===
create-or-move updated item name 'osd.1'weight 0.27 at location {host=bgw-os-node151,root=default} to crush map
Starting Ceph osd.1 on bgw-os-node151...
starting osd.1 at :/0 osd_data/var/lib/ceph/osd/ceph-1 /var/lib/ceph/osd/ceph-1/journal
# ceph -s
cluster 0071bd6f-849c-433a-8051-2e553df49aea
health HEALTH_WARN 192 pgs degraded; 192 pgs stuck unclean; clock skewdetected on mon.bgw-os-node153
monmap e2: 3 mons at{bgw-os-node151=10.240.216.151:6789/0,bgw-os-node152=10.240.216.152:6789/0,bgw-os-node153=10.240.216.153:6789/0},election epoch 8, quorum 0,1,2 bgw-os-node151,bgw-os-node152,bgw-os-node153
osdmap e15: 2 osds: 2 up, 2 in
pgmap v22: 192 pgs, 3 pools, 0 bytes data, 0 objects
1058 MB used, 278 GB / 279 GB avail
192 active+degraded
# ceph osd tree
# id weighttype name up/down reweight
-1 2 root default
-2 2 rack rack1
-3 2 hostbgw-os-node151
0 1 osd.0 up 1
1 1 osd.1 up 1
2.12 在bgw-os-node152上创建osd并启动
# ceph osd create #osd的编号在集群中是累加的,从0开始
4
# mkdir -p /var/lib/ceph/osd/ceph-4
# mkfs.xfs -f /dev/sdb
meta-data=/dev/sdb isize=256 agcount=4, agsize=18308499 blks
= sectsz=512 attr=2, projid32bit=0
data = bsize=4096 blocks=73233995,imaxpct=25
= sunit=0 swidth=0 blks
naming=version 2 bsize=4096 ascii-ci=0
log =internal log bsize=4096 blocks=35758,version=2
= sectsz=512 sunit=0 blks,lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
# mount /dev/sdb /var/lib/ceph/osd/ceph-4
# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-4
# echo "/dev/sdb /var/lib/ceph/osd/ceph-4xfsdefaults 0 0" >>/etc/fstab
# echo "/dev/sdb /var/lib/ceph/osd/ceph-4xfsremount,user_xattr0 0" >>/etc/fstab
# ceph-osd -i 4 --mkfs --mkkey
2015-03-19 14:23:57.488335 7f474bc637a0 -1journal FileJournal::_open: disabling aio for non-block journal.Use journal_force_aio to force use of aioanyway
2015-03-19 14:23:57.494038 7f474bc637a0 -1journal FileJournal::_open: disabling aio for non-block journal.Use journal_force_aio to force use of aioanyway
2015-03-19 14:23:57.494475 7f474bc637a0 -1filestore(/var/lib/ceph/osd/ceph-4) could not find23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory
2015-03-19 14:23:57.502901 7f474bc637a0 -1created object store /var/lib/ceph/osd/ceph-4 journal/var/lib/ceph/osd/ceph-4/journal for osd.4 fsid0071bd6f-849c-433a-8051-2e553df49aea
2015-03-19 14:23:57.502952 7f474bc637a0 -1auth: error reading file: /var/lib/ceph/osd/ceph-4/keyring: can't open/var/lib/ceph/osd/ceph-4/keyring: (2) No such file or directory
2015-03-19 14:23:57.503040 7f474bc637a0 -1created new key in keyring /var/lib/ceph/osd/ceph-4/keyring
# ceph auth add osd.4 osd 'allow *' mon 'allow profile osd'-i /var/lib/ceph/osd/ceph-4/keyring
added key for osd.4
# ceph osd crush add-bucket rack2 rack#添加rack规则rack2
added bucket rack2 type rack to crush map
# ceph osd crush add-bucket bgw-os-node152 host #添加host到rack
added bucket bgw-os-node152 type host tocrush map
# ceph osd crush move bgw-os-node152 rack=rack2
moved item id -5 name 'bgw-os-node152' tolocation {rack=rack2} in crush map
# ceph osd crush move rack2 root=default
moved item id -4 name 'rack2' to location{root=default} in crush map
# ceph osd crush add osd.4 1.0 host=bgw-os-node152
add item id 4 name 'osd.4' weight 1 atlocation {host=bgw-os-node152} to crush map
# touch /var/lib/ceph/osd/ceph-4/sysvinit
# /etc/init.d/ceph start osd.4
=== osd.4 ===
create-or-move updated item name 'osd.4'weight 0.27 at location {host=bgw-os-node152,root=default} to crush map
Starting Ceph osd.4 on bgw-os-node152...
starting osd.4 at :/0 osd_data/var/lib/ceph/osd/ceph-4 /var/lib/ceph/osd/ceph-4/journal
# ceph osd tree
# id weighttype name up/down reweight
-1 5 root default
-2 4 rack rack1
-3 4 hostbgw-os-node151
0 1 osd.0 up 1
1 1 osd.1 up 1
2 1 osd.2 up 1
3 1 osd.3 up 1
-4 1 rack rack2
-5 1 hostbgw-os-node152
4 1 osd.4 up 1
2.13 在bgw-os-node152上添加其他osd节点并加入规则rack2
# ceph osd create
# mkdir -p /var/lib/ceph/osd/ceph-5
# mkfs.xfs -f /dev/sdc
# mount /dev/sdc /var/lib/ceph/osd/ceph-5
# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-5
# echo "/dev/sdc /var/lib/ceph/osd/ceph-5xfsdefaults 0 0" >>/etc/fstab
# echo "/dev/sdc /var/lib/ceph/osd/ceph-5xfsremount,user_xattr0 0" >>/etc/fstab
# ceph-osd -i 5 --mkfs --mkkey
# ceph auth add osd.5 osd 'allow *' mon 'allow profile osd'-i /var/lib/ceph/osd/ceph-5/keyring
# ceph osd crush add osd.5 1.0 host=bgw-os-node152
# touch /var/lib/ceph/osd/ceph-5/sysvinit
# /etc/init.d/ceph start osd.5
# ceph osd create
# mkdir -p /var/lib/ceph/osd/ceph-6
# mkfs.xfs -f /dev/sdd
# mount /dev/sdd /var/lib/ceph/osd/ceph-6
# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-6
# echo "/dev/sdd /var/lib/ceph/osd/ceph-6xfsdefaults 0 0" >>/etc/fstab
# echo "/dev/sdd /var/lib/ceph/osd/ceph-6xfsremount,user_xattr0 0" >>/etc/fstab
# ceph-osd -i 6 --mkfs --mkkey
# ceph auth add osd.6 osd 'allow *' mon 'allow profile osd'-i /var/lib/ceph/osd/ceph-6/keyring
# ceph osd crush add osd.6 1.0 host=bgw-os-node152
# touch /var/lib/ceph/osd/ceph-6/sysvinit
# /etc/init.d/ceph start osd.6
# ceph osd create
# mkdir -p /var/lib/ceph/osd/ceph-7
# mkfs.xfs -f /dev/sde
# mount /dev/sde /var/lib/ceph/osd/ceph-7
# mount -o remount,user_xattr /var/lib/ceph/osd/ceph-7
# echo "/dev/sde /var/lib/ceph/osd/ceph-7xfsdefaults 0 0" >>/etc/fstab
# echo "/dev/sde /var/lib/ceph/osd/ceph-7xfs remount,user_xattr0 0" >> /etc/fstab
# ceph-osd -i 7 --mkfs --mkkey
# ceph auth add osd.7 osd 'allow *' mon 'allow profile osd'-i /var/lib/ceph/osd/ceph-7/keyring
# ceph osd crush add osd.7 1.0 host=bgw-os-node152
# touch /var/lib/ceph/osd/ceph-7/sysvinit
# /etc/init.d/ceph start osd.7
# ceph osd tree
# id weighttype name up/down reweight
-1 8 root default
-2 4 rack rack1
-3 4 hostbgw-os-node151
0 1 osd.0 up 1
1 1 osd.1 up 1
2 1 osd.2 up 1
3 1 osd.3 up 1
-4 4 rack rack2
-5 4 hostbgw-os-node152
4 1 osd.4 up 1
5 1 osd.5up 1
6 1 osd.6 up 1
7 1 osd.7 up 1
2.14 在bgw-os-node153上添加osd,并加入到规则rack3(同152-- 略)
# ceph osd tree
# id weighttype name up/down reweight
-1 12 root default
-2 4 rack rack1
-3 4 hostbgw-os-node151
0 1 osd.0 up 1
1 1 osd.1 up 1
2 1 osd.2 up 1
3 1 osd.3 up 1
-4 4 rack rack2
-5 4 hostbgw-os-node152
4 1 osd.4 up 1
5 1 osd.5 up 1
6 1 osd.6 up 1
7 1 osd.7 up 1
-6 4 rack rack3
-7 4 hostbgw-os-node153
8 1 osd.8 up 1
9 1 osd.9 up 1
10 1 osd.10up 1
11 1 osd.11up 1
3.添加元数据服务器
3.1在bgw-os-node151上创建mds
# mkdir -p /var/lib/ceph/mds/ceph-bgw-os-node151
# ceph-authtool --create-keyring/var/lib/ceph/bootstrap-mds/ceph.keyring --gen-key -n client.bootstrap-mds #集群中仅需要执行一次
# ceph auth list #存在client.bootstrap-mds用户下一条命令可省略
# ceph auth add client.bootstrap-mds mon 'allow profilebootstrap-mds' -i /var/lib/ceph/bootstrap-mds/ceph.keyring
# touch /root/ceph.bootstrap-mds.keyring
# ceph-authtool --import-keyring/var/lib/ceph/bootstrap-mds/ceph.keyring ceph.bootstrap-mds.keyring
# ceph --cluster ceph --name client.bootstrap-mds--keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-createmds.bgw-os-node151 osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o/var/lib/ceph/mds/ceph-bgw-os-node151/keyring
# touch /var/lib/ceph/mds/ceph-bgw-os-node151/sysvinit
# touch /var/lib/ceph/mds/ceph-bgw-os-node151/done
# service ceph start mds.bgw-os-node151
# ceph -s
cluster 0071bd6f-849c-433a-8051-2e553df49aea
health HEALTH_WARN too few pgs per osd (16 < min 20); clock skewdetected on mon.bgw-os-node153
monmap e2: 3 mons at{bgw-os-node151=10.240.216.151:6789/0,bgw-os-node152=10.240.216.152:6789/0,bgw-os-node153=10.240.216.153:6789/0},election epoch 8, quorum 0,1,2 bgw-os-node151,bgw-os-node152,bgw-os-node153
mdsmap e4: 1/1/1 up {0=bgw-os-node151=up:active}
osdmap e81: 12 osds: 12 up, 12 in
pgmap v224: 192 pgs, 3 pools, 1884 bytes data, 20 objects
12703 MB used, 3338 GB / 3350 GB avail
192 active+clean
client io 0 B/s wr, 0 op/s
3.2在bgw-os-node152上创建mds
# mkdir -p /var/lib/ceph/mds/ceph-bgw-os-node152
# mkdir -p /var/lib/ceph/bootstrap-mds/
# scp /var/lib/ceph/bootstrap-mds/ceph.keyringbgw-os-node152:/var/lib/ceph/bootstrap-mds/
# scp /root/ceph.bootstrap-mds.keyring bgw-os-node152:/root
# scp /var/lib/ceph/mds/ceph-bgw-os-node151/sysvinitbgw-os-node152:/var/lib/ceph/mds/ceph-bgw-os-node152/
# ceph --cluster ceph --name client.bootstrap-mds --keyring/var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.bgw-os-node152osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o/var/lib/ceph/mds/ceph-bgw-os-node152/keyring
# touch /var/lib/ceph/mds/ceph-bgw-os-node152/done
# service ceph start mds.bgw-os-node152
# ceph -s
cluster 0071bd6f-849c-433a-8051-2e553df49aea
health HEALTH_WARN too few pgs per osd (16 < min 20); clock skewdetected on mon.bgw-os-node153
monmap e2: 3 mons at{bgw-os-node151=10.240.216.151:6789/0,bgw-os-node152=10.240.216.152:6789/0,bgw-os-node153=10.240.216.153:6789/0},election epoch 8, quorum 0,1,2 bgw-os-node151,bgw-os-node152,bgw-os-node153
mdsmap e5: 1/1/1 up {0=bgw-os-node151=up:active}, 1up:standby#注意这
osdmap e81: 12 osds: 12 up, 12 in
pgmap v229: 192 pgs, 3 pools, 1884 bytes data, 20 objects
12702 MB used, 3338 GB / 3350 GB avail
192 active+clean
3.3在bgw-os-node153上创建mds
# mkdir -p /var/lib/ceph/mds/ceph-bgw-os-node153
# mkdir -p /var/lib/ceph/bootstrap-mds/
# scp /var/lib/ceph/bootstrap-mds/ceph.keyringbgw-os-node153:/var/lib/ceph/bootstrap-mds/
# scp /root/ceph.bootstrap-mds.keyring bgw-os-node153:/root
# scp /var/lib/ceph/mds/ceph-bgw-os-node151/sysvinitbgw-os-node153:/var/lib/ceph/mds/ceph-bgw-os-node153/
# ceph --cluster ceph --name client.bootstrap-mds --keyring/var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.bgw-os-node153osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o/var/lib/ceph/mds/ceph-bgw-os-node153/keyring
# touch /var/lib/ceph/mds/ceph-bgw-os-node153/done
#service cephstart mds.bgw-os-node153
=== mds.bgw-os-node153 ===
Starting Ceph mds.bgw-os-node153 onbgw-os-node153...
starting mds.bgw-os-node153 at :/0
# ceph -s
cluster 0071bd6f-849c-433a-8051-2e553df49aea
health HEALTH_WARN too few pgs per osd (16 < min 20); clock skewdetected on mon.bgw-os-node153
monmap e2: 3 mons at{bgw-os-node151=10.240.216.151:6789/0,bgw-os-node152=10.240.216.152:6789/0,bgw-os-node153=10.240.216.153:6789/0},election epoch 8, quorum 0,1,2 bgw-os-node151,bgw-os-node152,bgw-os-node153
mdsmap e8: 1/1/1 up {0=bgw-os-node151=up:active}, 2up:standby #注意这
osdmap e81: 12 osds: 12 up, 12 in
pgmap v229: 192 pgs, 3 pools, 1884 bytes data, 20 objects
12702 MB used, 3338 GB / 3350 GB avail
192 active+clean
4.添加rules
在bgw-os-node151上执行如下命令:
# ceph osd crush rule create-simplejiayuan-replicated-ruleset default rack
# ceph osd pool set data crush_ruleset 1
# ceph osd pool set metadata crush_ruleset 1
# ceph osd pool set rbd crush_ruleset 1
# ceph osd pool set images crush_ruleset 1
# ceph osd pool set volumes crush_ruleset 1
# ceph osd pool set compute crush_ruleset 1
页:
[1]