Lustre Intel: Notes from Installing Plymouth IEEL 3.0 Non HA ZFS

From Define Wiki
Jump to navigation Jump to search

System Setup

Server Installation

cd ee-3.0.0.0
./create_installer zfs
tar zxvf lustre-zfs-el7-installer.tar.gz
cd lustre-zfs/
./install
# reboot into the lustre kernel, then make sure following is loaded
modprobe spl
modprobe zfs
# repeat above on all nodes

Create ZFS Pools on the servers

# lets create the zfs pools (in this instance, /dev/sdX were RAID6 arrays, there will be other instructions for creating RAIDZ2 pools - please fill in if setting up)

# mds1
zpool create -f mgt /dev/sda5
zpool create -f mdt0000 /dev/sda6
zpool list
zpool status

# oss1
zpool create -f ost0000 /dev/sda
zpool create -f ost0001 /dev/sdb
zpool create -f ost0002 /dev/sdc

# oss2
zpool create -f ost0003 /dev/sda
zpool create -f ost0004 /dev/sdb
zpool create -f ost0005 /dev/sdc

Create the Lustre FS

# create the lustre fs

# mds1
mkfs.lustre --mgs --backfstype=zfs  --reformat mgt/mgt
mkfs.lustre --mdt --backfstype=zfs --fsname=lzfs --index=0 --mgsnode=172.28.1.12@tcp --reformat mdt0000/mdt0000
mkdir /zfs
mkdir /zfs/mgt
mkdir /zfs/mdt0000
mount -t lustre mgt/mgt /zfs/mgt
mount -t lustre mdt0000/mdt0000 /zfs/mdt0000/

# oss1
mkfs.lustre --ost --backfstype=zfs --fsname=lzfs --index=0 --mgsnode=172.28.1.12@tcp --reformat ost0000/ost0000
mkfs.lustre --ost --backfstype=zfs --fsname=lzfs --index=1 --mgsnode=172.28.1.12@tcp --reformat ost0001/ost0001
mkfs.lustre --ost --backfstype=zfs --fsname=lzfs --index=2 --mgsnode=172.28.1.12@tcp --reformat ost0002/ost0002
mkdir /zfs
mkdir /zfs/ost0000
mkdir /zfs/ost0001
mkdir /zfs/ost0002
mount -t lustre ost0000/ost0000 /zfs/ost0000
mount -t lustre ost0001/ost0001 /zfs/ost0001
mount -t lustre ost0002/ost0002 /zfs/ost0002

# oss2
mkfs.lustre --ost --backfstype=zfs --fsname=lzfs --index=3 --mgsnode=172.28.1.12@tcp --reformat ost0003/ost0003
mkfs.lustre --ost --backfstype=zfs --fsname=lzfs --index=4 --mgsnode=172.28.1.12@tcp --reformat ost0004/ost0004
mkfs.lustre --ost --backfstype=zfs --fsname=lzfs --index=5 --mgsnode=172.28.1.12@tcp --reformat ost0005/ost0005
mkdir /zfs
mkdir /zfs/ost0003
mkdir /zfs/ost0004
mkdir /zfs/ost0005
mount -t lustre ost0003/ost0003 /zfs/ost0003
mount -t lustre ost0004/ost0004 /zfs/ost0004
mount -t lustre ost0005/ost0005 /zfs/ost0005

Verify

# note as you as mounting the local directories, check the MDS to ensure they are appearing ok and reporting as UP
[root@mds1 lustre-zfs]# lctl dl
  0 UP osd-zfs MGS-osd MGS-osd_UUID 5
  1 UP mgs MGS MGS 7
  2 UP mgc MGC172.28.1.12@tcp 0df590f0-ea83-c81f-e135-abaa1e0f740d 5
  3 UP osd-zfs lzfs-MDT0000-osd lzfs-MDT0000-osd_UUID 10
  4 UP mds MDS MDS_uuid 3
  5 UP lod lzfs-MDT0000-mdtlov lzfs-MDT0000-mdtlov_UUID 4
  6 UP mdt lzfs-MDT0000 lzfs-MDT0000_UUID 11
  7 UP mdd lzfs-MDD0000 lzfs-MDD0000_UUID 4
  8 UP qmt lzfs-QMT0000 lzfs-QMT0000_UUID 4
  9 UP lwp lzfs-MDT0000-lwp-MDT0000 lzfs-MDT0000-lwp-MDT0000_UUID 5
10 UP osp lzfs-OST0000-osc-MDT0000 lzfs-MDT0000-mdtlov_UUID 5
11 UP osp lzfs-OST0001-osc-MDT0000 lzfs-MDT0000-mdtlov_UUID 5
12 UP osp lzfs-OST0002-osc-MDT0000 lzfs-MDT0000-mdtlov_UUID 5

# client (didnt add any RPMs)
[root@head ~]# mkdir /mnt/lzfs
[root@head ~]# mount -t lustre 172.28.1.12@tcp:/lzfs /mnt/lzfs
[root@head ~]# df -h
Filesystem               Size  Used Avail Use% Mounted on
/dev/mapper/centos-root   50G   45G  5.8G  89% /
devtmpfs                  63G     0   63G   0% /dev
tmpfs                     63G     0   63G   0% /dev/shm
tmpfs                     63G   18M   63G   1% /run
tmpfs                     63G     0   63G   0% /sys/fs/cgroup
/dev/mapper/centos-home  184G  182G  2.7G  99% /home
/dev/sda1                497M  250M  248M  51% /boot
tmpfs                     13G     0   13G   0% /run/user/0
172.28.1.12@tcp:/lzfs    188T   18M  188T   1% /mnt/lzfs