Redhat gluster storage(一)
Fedora中文社区https://www.fdzh.org/
存储官网
https://www.gluster.org/
Google repo
# vi /etc/yum.repos.d/google-chrome-mirrors.repo
name=Google Chrome mirrors
#baseurl=https://dl.google.com/linux/chrome/rpm/stable/$basearch
#gpgkey=https://dl.google.com/linux/linux_signing_key.pub
baseurl=https://repo.fdzh.org/chrome/rpm/$basearch
gpgkey=https://repo.fdzh.org/chrome/linux_signing_key.pub
gpgcheck=1
enabled=1
skip_if_unavailable=1
Fedora repo
# vi /etc/yum.repos.d/FZUG.repo
name=FZUG fc$releasever - Free
baseurl=https://repo.fdzh.org/FZUG/free/$releasever/$basearch/
skip_if_unavailable=True
metadata_expire=1d
gpgcheck=0
enabled=1
name=FZUG fc$releasever - Nonfree
baseurl=https://repo.fdzh.org/FZUG/nonfree/$releasever/$basearch/
skip_if_unavailable=True
metadata_expire=1d
gpgcheck=0
enabled=1
name=FZUG fc$releasever - Testing
baseurl=https://repo.fdzh.org/FZUG/testing/$releasever/$basearch/
skip_if_unavailable=True
metadata_expire=1d
gpgcheck=0
enabled=1
name=FZUG fc$releasever - Free - Source
baseurl=https://repo.fdzh.org/FZUG/free/$releasever/source/SRPMS/
skip_if_unavailable=True
metadata_expire=1d
gpgcheck=0
enabled=0
name=FZUG fc$releasever - Nonfree - Source
baseurl=https://repo.fdzh.org/FZUG/nonfree/$releasever/source/SRPMS/
skip_if_unavailable=True
metadata_expire=1d
gpgcheck=0
enabled=0
# dnf install -y glusterfs-server
# systemctl start glusterd
# systemctl enable glusterd
# systemctl status tuned
# cd /usr/lib/tuned/
# tuned-adm list
# tuned-adm profile rhgs-random-io
# pwd
/usr/lib/tuned/throughput-performance
# vim tuned.conf
# firewall-cmd --permanent --add-service=glusterfs
# firewall-cmd --reload
# gluster peer probe serverb
# gluster peer status
# gluster pool list
# gluster peer detach servera
# gluster peer probe servera
# gluster pool list
# cd /var/log/glusterfs/
# tail -n 20 cli.log
# tail -n 20 etc-glusterfs-glusterd.vol.log
servera
# vgs
# lvcreate -L 10G -T vg_bricks/thinpool
# vgs
# lvs
create brick
# lvcreate -V 2G -T vg_bricks/thinpool -n brick-a1
# lvs
# mkfs.xfs -i size=512 /dev/vg_bricks/brick-a1
# mkdir -p /bricks/brick-a1
# vim /etc/fstab
/dev/vg_bricks/brick-a1 /bricks/brick-a1 xfs defaults 1 2
# mount -a
# mkdir /bricks/brick-a1/brick
serverb
# lvcreate -L 10G -T vg_bricks/thinpool
# lvcreate -V 2G -T vg_bricks/thinpool -n brick-b1
# mkfs.xfs -i size=512 /dev/vg_bricks/brick-b1
# mkdir -p /bricks/brick-b1
# vim /etc/fstab
/dev/vg_bricks/brick-b1 /bricks/brick-b1 xfs defaults 1 2
# mount -a
# mkdir /bricks/brick-b1/brick
selinux
# df -Th
# chcon -Rt glusterd_brick_t /bricks/
# cd /bricks/brick-b1/brick/
# touch a
# ls -Z a
check lab
# lab setup-bricks grade
create volume
# gluster volume create replvol serverb:/bricks/brick-b1/brick servera:/bricks/brick-a1/brick
# gluster volume list
# gluster volume info replvol
# gluster volume start replvol
# gluster volume info replvol
workstation
# yum install -y glusterfs-fuse
# mount -t glusterfs servera:/replvol /mnt
# cd /mnt/
# touch file{00..99}
view servera and servera
# cd /bricks/brick-a1/brick/
# ls | wc -l
# cd /bricks/brick-b1/brick/
# ls | wc -l
chapter 4
$ rht-vmctl reset all
# lab createvolumes setup
create 1*2
# gluster volume create replvol replica 2 servera:/bricks/brick-a1/brick serverb:/bricks/brick-b1/brick
# gluster volume start replvol
# gluster volume status replvol
# gluster volume info replvol
Test and mount volume
# yum install -y glusterfs-fuse
# mkdir /mnt/replvol
# mount -t glusterfs servera:/replvol /mnt/replvol/
# cd /mnt/replvol/
# touch file{00..09}
create 1*(4+2)
# gluster volume create dispersevol disperse-data 4 redundancy 2 \
serverc:/bricks/brick-c1/brick/ \
\> serverd:/bricks/brick-d1/brick/ \
\> servera:/bricks/brick-a2/brick/ \
\> serverb:/bricks/brick-b2/brick/ \
\> serverc:/bricks/brick-c2/brick/ \
\> serverd:/bricks/brick-d2/brick/force
start volume
# gluster volume start dispersevol
# gluster volume info dispersevol
Test and mount volume
# mkdir dispersevol
# mount -t glusterfs servera:/dispersevol /mnt/dispersevol/
# cp -a /boot/ /mnt/dispersevol/
# du -sh boot
# pwd
/bricks/brick-b2/brick
# du -sh boot
Check and reset lab
# lab createvolumes grade
$ rht-vmctl reset all
chapter 5
# lab native-client setup
# gluster volume list
# gluster volume info custdata
# gluster volume info mediadata
Test and mount volume
# yum install -y glusterfs-fuse
# mkdir /mnt/custdata
# vim /etc/fstab
servera:/custdata /mnt/custdata glusterfs defaults,_netdev,acl,backup-volfile-servers=serverb:serverc:serverd 0 0
# mount -a
# cd /mnt/custdata/
# touch file{00..39}
# gluster volume list
# gluster volume info custdata
Close servera view data
# init 0
# df -Th
# lab native-client grade
NFS mount:
# showmount -e localhost
# lab nfs-client setup
# gluster volume list
# firewall-cmd --permanent --add-service=rpc-bind --add-service=nfs
# firewall-cmd --reload
Test and mount nfs
# showmount -e servera
# mkdir /mnt/mediadata
# vim /etc/fstab
servera:/mediadata /mnt/mediadata nfs defaults,rw,vers=3
# touch file{000..100}
# lab nfs-client grade
samba mount:
1.cancel mount
# lab smb-client setup
# umount /mnt/mediadata/
# vim /etc/fstab
#servera:/mediadata /mnt/mediadata nfs defaults,rw,vers=3
2.allow firewall and install soft pack
# firewall-cmd --permanent --add-service=samba
# firewall-cmd --reload
# yum -y install samba
# systemctl start smb.service
# systemctl enable smb.service
3.add samba user . the same of adduser and useradd
# which adduser
# which useradd
# rpm -qf /usr/sbin/adduser
# rpm -qf /usr/sbin/useradd
# useradd smbuser
# smbpasswd -a smbuser
# pdbedit -L smbuser
# gluster volume set mediadata stat-prefetch off
# gluster volume set mediadata server.allow-insecure on
# gluster volume set mediadata storage.batch-fsync-delay-usec 0
# vim /etc/glusterfs/glusterd.vol
option rpc-auth-allow-insecure on
4.restart gluster and volume
# systemctl restart glusterd
# gluster volume stop mediadata
# gluster volume start mediadata
# yum -y install samba-client
# smbclient -L servera -U smbuser
# vim /etc/samba/smb.conf
5.mount gluster volume on the workstation client
# mkdir /mnt/smbdata
# vim /etc/fstab
//servera/gluster-mediadata /mnt/mediadata cifs defaults,username=smbuser,password=redhat 0 0
# yum install cifs-utils
# id smbuser
# useradd smbuser
# mount -a
# df -Th
6.If not set ACL,workstation don't write file
# setfacl -Rm u:smbuser:rwX /bricks/brick-a1
# setfacl -Rm d:u:smbuser:rwX /bricks/brick-a1
# useradd smbuser
# setfacl -Rm u:smbuser:rwX /bricks/brick-b1/
# setfacl -Rm d:u:smbuser:rwX /bricks/brick-b1/
# setfacl -Rm u:smbuser:rwX /bricks/brick-c1
# setfacl -Rm d:u:smbuser:rwX /bricks/brick-c1
# setfacl -Rm u:smbuser:rwX /bricks/brick-d1/
# setfacl -Rm d:u:smbuser:rwX /bricks/brick-d1/
# gluster volume info mediadata
chapter 6
# lab volopts setup
# gluster volume list
# gluster volume info galactica
# gluster volume set galactica server.root-squash on
# gluster volume info galactica
# mkdir galactica
# vim /etc/fstab
servera:/galactica /mnt/galactica nfs defaults,rw,vers=3
# mount -a
# setfacl -Rm u:nfsnobody:rwX /bricks/brick-a4
# setfacl -Rm d:u:nfsnobody:rwX /bricks/brick-a4
# setfacl -Rm u:nobody:rwX /bricks/brick-b4/
# setfacl -Rm d:u:nobody:rwX /bricks/brick-b4/
# setfacl -Rm u:nobody:rwX /bricks/brick-c4
# setfacl -Rm d:u:nobody:rwX /bricks/brick-c4
# setfacl -Rm u:nobody:rwX /bricks/brick-d4/
# setfacl -Rm d:u:nobody:rwX /bricks/brick-d4/
# touch test
# gluster volume list
# gluster volume stop galactica
# gluster volume start galactica
页:
[1]