刚过元旦,由于最近比较忙,还木有时间给各位小伙伴们拜年,在此祝各位小伙伴们新年新气象,在新的一年内财源广进,身体健康。

什么是分布式文件系统:

分布式文件系统(Distributed File System)是指文件系统管理的物理存储资源不一定直接连接在本地节点上,而是通过计算机网络与节点相连。分布式文件系统的设计基于客户机/服务器模 式。一个典型的网络可能包括多个供多用户访问的服务器。另外,对等特性允许一些系统扮演客户机和服务器的双重角色。例如,用户可以“发表”一个允许其他客 户机访问的目录,一旦被访问,这个目录对客户机来说就像使用本地驱动器一样。

分区磁盘
[root@localhost src]# fdisk -l
[root@localhost src]# mkfs.etx4 /dev/sdb

1、下载glusterfs源码包到/usr/local/urc
http://www.gluster.org/download/

2、解压
[root@localhost src]# tar zvxf glusterfs-3.6.6.tar.gz

3、编译安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@localhost glusterfs-3.6.6]# ./configure   --prefix=/usr/local/glusterfs -enable-systemt
--------------------------------------------------------------------------------------------------------------------
# 如果出现下面错误
checking for flex... no
checking for lex... no
configure: error: Flex or lex required to build glusterfs.
解决方法 [root@localhost glusterfs-3.6.6]# yum -y install flex
# 如果出现下面错误
configure: error: GNU Bison required to build glusterfs.
解决方法 [root@localhost glusterfs-3.6.6]# yum -y install bison
# 如果出现下面错误
configure: error: OpenSSL crypto library is required to build glusterfs
解决方法 [root@localhost glusterfs-3.6.6]# yum -y install openssl-devel

configure: error: libxml2 devel libraries not found
解决方法:[root@localhost glusterfs-3.6.6]# yum install libxml2 libxml2-devel
--------------------------------------------------------------------------------------------------------------------
[root@localhost glusterfs-3.6.6]#make && make install

4、查看版本信息
[root@localhost sbin]# ./gluster –version
glusterfs 3.6.6 built on Dec 27 2015 03:48:07
Repository revision: git://git.gluster.com/glusterfs.git
Copyright (c) 2006-2011 Gluster Inc. http://www.gluster.com
GlusterFS comes with ABSOLUTELY NO WARRANTY.
You may redistribute copies of GlusterFS under the terms of the GNU General Public License.

5、glusterfs 启动
[root@localhost sbin]# service glusterd start
Starting glusterd: [确定]

[root@localhost sbin]# ps -ef |grep glusterd
root 49226 1 0 03:56 ? 00:00:00 /usr/local/glusterfs/sbin/glusterd –pid-file=/var/run/glusterd.pid
root 49363 6751 0 03:57 pts/2 00:00:00 grep glusterd

6、gluster基本操作
6.1、创建并启动volume
[root@localhost sbin]# ./gluster volume create help
Usage: volume create [stripe ] [replica ] [disperse []] [redundancy ] [transport ] … [force]
[root@localhost sbin]# ./gluster volume create testvol 192.168.108.141:/data/brick1 192.168.108.141:/data/brick2
volume create: testvol: failed: The brick 192.168.108.141:/data/brick1 is being created in the root partition. It is recommended that you don’t use the system’s root partition for storage backend. Or use ‘force’ at the end of the command if you want to override this behavior.
提示:建议您不使用系统的根分区来存储后端。或使用“强制”命令结束时,如果你想重写此行为。
[root@localhost sbin]# ./gluster volume create testvol 192.168.108.141:/data/brick1 192.168.108.141:/data/brick2 force
volume create: testvol: success: please start the volume to access data
[root@localhost sbin]# ./gluster volume info
Volume Name: testvol
Type: Distribute
Volume ID: 1b1c1b4d-cd7e-4770-beb4-8dfa1499d723
Status: Created
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick1
Brick2: 192.168.108.141:/data/brick2

[root@localhost sbin]# ./gluster volume start testvol —testvol 是指创建volume的名字
volume start: testvol: success
[root@localhost sbin]# ./gluster volume info

Volume Name: testvol
Type: Distribute
Volume ID: 1b1c1b4d-cd7e-4770-beb4-8dfa1499d723
Status: Started
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick1
Brick2: 192.168.108.141:/data/brick2

[root@localhost sbin]# mount -t glusterfs 192.168.108.141:/testvol /mnt
[root@localhost sbin]# mount
/dev/mapper/VolGroup-lv_root on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
gvfs-fuse-daemon on /root/.gvfs type fuse.gvfs-fuse-daemon (rw,nosuid,nodev)
/dev/sr0 on /media/CentOS_6.5_Final type iso9660 (ro,nosuid,nodev,uhelper=udisks,uid=0,gid=0,iocharset=utf8,mode=0400,dmode=0500)
192.168.108.141:/testvol on /mnt type fuse.glusterfs (rw,default_permissions,allow_other,max_read=131072)
[root@localhost sbin]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 18G 8.0G 8.6G 49% /
tmpfs 491M 228K 491M 1% /dev/shm
/dev/sda1 485M 35M 426M 8% /boot
/dev/sr0 4.2G 4.2G 0 100% /media/CentOS_6.5_Final
192.168.108.141:/testvol 35G 16G 18G 49% /mnt

[root@localhost /]# cd /mnt/
[root@localhost mnt]# touch file.{1..10}
[root@localhost mnt]# ll
总用量 8
-rw-r–r– 1 root root 0 12月 27 23:05 file.1
-rw-r–r– 1 root root 0 12月 27 23:05 file.10
-rw-r–r– 1 root root 0 12月 27 23:05 file.2
-rw-r–r– 1 root root 0 12月 27 23:05 file.3
-rw-r–r– 1 root root 0 12月 27 23:05 file.4
-rw-r–r– 1 root root 0 12月 27 23:05 file.5
-rw-r–r– 1 root root 0 12月 27 23:05 file.6
-rw-r–r– 1 root root 0 12月 27 23:05 file.7
-rw-r–r– 1 root root 0 12月 27 23:05 file.8
-rw-r–r– 1 root root 0 12月 27 23:05 file.9

drwxr-xr-x 2 root root 8192 12月 27 22:53 test11[root@localhost /]# cd /mnt/
[root@localhost mnt]# ll
总用量 8
drwxr-xr-x 2 root root 8192 12月 27 22:53 test11
[root@localhost mnt]# touch file.{1..10}
[root@localhost mnt]# ll
总用量 8
-rw-r–r– 1 root root 0 12月 27 23:05 file.1
-rw-r–r– 1 root root 0 12月 27 23:05 file.10
-rw-r–r– 1 root root 0 12月 27 23:05 file.2
-rw-r–r– 1 root root 0 12月 27 23:05 file.3
-rw-r–r– 1 root root 0 12月 27 23:05 file.4
-rw-r–r– 1 root root 0 12月 27 23:05 file.5
-rw-r–r– 1 root root 0 12月 27 23:05 file.6
-rw-r–r– 1 root root 0 12月 27 23:05 file.7
-rw-r–r– 1 root root 0 12月 27 23:05 file.8
-rw-r–r– 1 root root 0 12月 27 23:05 file.9
drwxr-xr-x 2 root root 8192 12月 27 22:53 test11
[root@localhost /]# ll /data/brick1/
总用量 4
-rw-r–r– 2 root root 0 12月 27 23:05 file.6
-rw-r–r– 2 root root 0 12月 27 23:05 file.7
-rw-r–r– 2 root root 0 12月 27 23:05 file.8
drwxr-xr-x 2 root root 4096 12月 27 22:53 test11
[root@localhost /]# ll /data/brick2/
总用量 4
-rw-r–r– 2 root root 0 12月 27 23:05 file.1
-rw-r–r– 2 root root 0 12月 27 23:05 file.10
-rw-r–r– 2 root root 0 12月 27 23:05 file.2
-rw-r–r– 2 root root 0 12月 27 23:05 file.3
-rw-r–r– 2 root root 0 12月 27 23:05 file.4
-rw-r–r– 2 root root 0 12月 27 23:05 file.5
-rw-r–r– 2 root root 0 12月 27 23:05 file.9
drwxr-xr-x 2 root root 4096 12月 27 22:53 test11
提示:当我们创建了testvol时候,在挂载点上创建11个文件,自动分配给两个节点。
分配4个节点实战:
[root@localhost /]# /usr/local/glusterfs/sbin/gluster volume create testvol2 192.168.108.141:/data/brick3 192.168.108.141:/data/brick4 192.168.108.141:/data/brick5 192.168.108.141:/data/brick6 force
volume create: testvol2: success: please start the volume to access data
[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume info

Volume Name: testvol
Type: Distribute
Volume ID: 1b1c1b4d-cd7e-4770-beb4-8dfa1499d723
Status: Started
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick1
Brick2: 192.168.108.141:/data/brick2

Volume Name: testvol2
Type: Distribute
Volume ID: 1aa17053-51d7-4d9d-bb5f-3b6ed4ddc559
Status: Created
Number of Bricks: 4
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick3
Brick2: 192.168.108.141:/data/brick4
Brick3: 192.168.108.141:/data/brick5
Brick4: 192.168.108.141:/data/brick6
[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume stop testvol2 –停止testvol2卷
Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y
volume stop: testvol2: failed: Volume testvol2 is not in the started state
[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume delete testvol2 –删除testvol2卷
Deleting volume will erase all information about the volume. Do you want to continue? (y/n) y
volume delete: testvol2: success
[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume info

Volume Name: testvol
Type: Distribute
Volume ID: 1b1c1b4d-cd7e-4770-beb4-8dfa1499d723
Status: Started
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick1
Brick2: 192.168.108.141:/data/brick2

[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume create test2vol replica 2 192.168.108.141:/data/brick3 192.168.108.141:/data/brick4 192.168.108.141:/data/brick5 192.168.108.141:/data/brick6 force
Multiple bricks of a replicate volume are present on the same server. This setup is not optimal.
Do you still want to continue creating the volume? (y/n) y
volume create: test2vol: success: please start the volume to access data

[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume info

Volume Name: test2vol
Type: Distributed-Replicate
Volume ID: 581ca5a1-2651-47cc-a35f-0a018b164871
Status: Created
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick3
Brick2: 192.168.108.141:/data/brick4
Brick3: 192.168.108.141:/data/brick5
Brick4: 192.168.108.141:/data/brick6

Volume Name: testvol
Type: Distribute
Volume ID: 1b1c1b4d-cd7e-4770-beb4-8dfa1499d723
Status: Started
Number of Bricks: 2
Transport-type: tcp
Bricks:
Brick1: 192.168.108.141:/data/brick1
Brick2: 192.168.108.141:/data/brick2

[root@localhost data]# mount -t glusterfs 192.168.108.141:/test2vol /mnt/test2vol/ –挂载test2vol卷
Mount failed. Please check the log file for more details.
[root@localhost data]# df -h –由于卷没有启动,所以挂载失败
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 18G 7.9G 8.6G 48% /
tmpfs 491M 224K 491M 1% /dev/shm
/dev/sda1 485M 35M 426M 8% /boot
192.168.108.141:/testvol 35G 16G 18G 48% /mnt
[root@localhost data]# /usr/local/glusterfs/sbin/gluster volume start test2vol
volume start: test2vol: success
[root@localhost data]# mount -t glusterfs 192.168.108.141:/test2vol /mnt/test2vol/
[root@localhost data]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root 18G 7.9G 8.6G 48% /
tmpfs 491M 224K 491M 1% /dev/shm
/dev/sda1 485M 35M 426M 8% /boot
192.168.108.141:/testvol 35G 16G 18G 48% /mnt
192.168.108.141:/test2vol 35G 16G 18G 48% /mnt/test2vol

[root@localhost data]# cd /mnt/test2vol/
[root@localhost test2vol]# ll
总用量 0
[root@localhost test2vol]# touch file.{20..30}
[root@localhost test2vol]# ll
总用量 0
-rw-r–r– 1 root root 0 12月 27 23:29 file.20
-rw-r–r– 1 root root 0 12月 27 23:29 file.21
-rw-r–r– 1 root root 0 12月 27 23:29 file.22
-rw-r–r– 1 root root 0 12月 27 23:29 file.23
-rw-r–r– 1 root root 0 12月 27 23:29 file.24
-rw-r–r– 1 root root 0 12月 27 23:29 file.25
-rw-r–r– 1 root root 0 12月 27 23:29 file.26
-rw-r–r– 1 root root 0 12月 27 23:29 file.27
-rw-r–r– 1 root root 0 12月 27 23:29 file.28
-rw-r–r– 1 root root 0 12月 27 23:29 file.29
-rw-r–r– 1 root root 0 12月 27 23:29 file.30
[root@localhost test2vol]# ll /data/brick
brick1/ brick2/ brick3/ brick4/ brick5/ brick6/
[root@localhost test2vol]# ll /data/brick3/
总用量 0
-rw-r–r– 2 root root 0 12月 27 23:29 file.20
-rw-r–r– 2 root root 0 12月 27 23:29 file.21
-rw-r–r– 2 root root 0 12月 27 23:29 file.22
-rw-r–r– 2 root root 0 12月 27 23:29 file.28
-rw-r–r– 2 root root 0 12月 27 23:29 file.30
[root@localhost test2vol]# ll /data/brick4
总用量 0
-rw-r–r– 2 root root 0 12月 27 23:29 file.20
-rw-r–r– 2 root root 0 12月 27 23:29 file.21
-rw-r–r– 2 root root 0 12月 27 23:29 file.22
-rw-r–r– 2 root root 0 12月 27 23:29 file.28
-rw-r–r– 2 root root 0 12月 27 23:29 file.30
[root@localhost test2vol]# ll /data/brick5
总用量 0
-rw-r–r– 2 root root 0 12月 27 23:29 file.23
-rw-r–r– 2 root root 0 12月 27 23:29 file.24
-rw-r–r– 2 root root 0 12月 27 23:29 file.25
-rw-r–r– 2 root root 0 12月 27 23:29 file.26
-rw-r–r– 2 root root 0 12月 27 23:29 file.27
-rw-r–r– 2 root root 0 12月 27 23:29 file.29
[root@localhost test2vol]# ll /data/brick6
总用量 0
-rw-r–r– 2 root root 0 12月 27 23:29 file.23
-rw-r–r– 2 root root 0 12月 27 23:29 file.24
-rw-r–r– 2 root root 0 12月 27 23:29 file.25
-rw-r–r– 2 root root 0 12月 27 23:29 file.26
-rw-r–r– 2 root root 0 12月 27 23:29 file.27
-rw-r–r– 2 root root 0 12月 27 23:29 file.29
由上可以得出:当写入数据时候、数据结构是以下图:

把节点3的数据删除,能不能恢复:

[root@localhost test2vol]# ll /data/brick3
总用量 0
-rw-r–r– 2 root root 0 12月 27 23:29 file.20
-rw-r–r– 2 root root 0 12月 27 23:29 file.21
-rw-r–r– 2 root root 0 12月 27 23:29 file.22
-rw-r–r– 2 root root 0 12月 27 23:29 file.28
-rw-r–r– 2 root root 0 12月 27 23:29 file.30
[root@localhost test2vol]# rm -rf /data/brick3/*
[root@localhost test2vol]# ll /data/brick3
总用量 0
[root@localhost test2vol]# ll /mnt/test2vol
总用量 0
-rw-r–r– 1 root root 0 12月 27 23:29 file.20
-rw-r–r– 1 root root 0 12月 27 23:29 file.21
-rw-r–r– 1 root root 0 12月 27 23:29 file.22
-rw-r–r– 1 root root 0 12月 27 23:29 file.23
-rw-r–r– 1 root root 0 12月 27 23:29 file.24
-rw-r–r– 1 root root 0 12月 27 23:29 file.25
-rw-r–r– 1 root root 0 12月 27 23:29 file.26
-rw-r–r– 1 root root 0 12月 27 23:29 file.27
-rw-r–r– 1 root root 0 12月 27 23:29 file.28
-rw-r–r– 1 root root 0 12月 27 23:29 file.29
-rw-r–r– 1 root root 0 12月 27 23:29 file.30

当节点3数据掉失后,卷数据显示还是正常的。

均衡下:

[root@localhost test2vol]# /usr/local/glusterfs/sbin/gluster volume rebalance test2vol start
volume rebalance: test2vol: success: Initiated rebalance on volume test2vol.
Execute “gluster volume rebalance status” to check status.
ID: adb5c963-10a5-463c-adac-f6a4f0b3ce5e

[root@localhost test2vol]# ll /data/brick3 –数据已经恢复
总用量 0
-rw-r–r– 2 root root 0 12月 27 23:29 file.20
-rw-r–r– 2 root root 0 12月 27 23:29 file.21
-rw-r–r– 2 root root 0 12月 27 23:29 file.22
-rw-r–r– 2 root root 0 12月 27 23:29 file.28
-rw-r–r– 2 root root 0 12月 27 23:29 file.30

Comments

2016-01-05