1.为主机增加80G SCSI 接口硬盘
[root@localhost ~]# fdisk -l /dev/sdb
磁盘 /dev/sdb:85.9 GB, 85899345920 字节,167772160 个扇区
Units = 扇区 of 1 * 512 = 512 bytes
扇区大小(逻辑/物理):512 字节 / 512 字节
I/O 大小(最小/最佳):512 字节 / 512 字节
2.划分三个各20G的主分区
[root@localhost ~]# parted /dev/sdb
GNU Parted 3.1
使用 /dev/sdb
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted) mklabel
新的磁盘标签类型? gpt
(parted) mkpart
分区名称? []? sdb1
文件系统类型? [ext2]? ext4
起始点? 1
结束点? 20G
(parted) mkpart
分区名称? []? sdb2
文件系统类型? [ext2]? ext4
起始点? 20G
结束点? 40G
(parted) mkpart
分区名称? []? sdb3
文件系统类型? [ext2]? ext4
起始点? 40G
结束点? 60G
(parted) p
Model: VMware, VMware Virtual S (scsi)
Disk /dev/sdb: 85.9GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags:
Number Start End Size File system Name 标志
1 1049kB 20.0GB 20.0GB sdb1
2 20.0GB 40.0GB 20.0GB sdb2
3 40.0GB 60.0GB 20.0GB sdb3
(parted) q
3.将三个主分区转换为物理卷(pvcreate),扫描系统中的物理卷
[root@localhost ~]# pvcreate /dev/sdb[123] && pvscan
Physical volume "/dev/sdb1" successfully created
Physical volume "/dev/sdb2" successfully created
Physical volume "/dev/sdb3" successfully created
PV /dev/sda2 VG centos lvm2 [39.51 GiB / 44.00 MiB free]
PV /dev/sdb3 lvm2 [18.63 GiB]
PV /dev/sdb1 lvm2 [18.62 GiB]
PV /dev/sdb2 lvm2 [18.63 GiB]
Total: 4 [95.39 GiB] / in use: 1 [39.51 GiB] / in no VG: 3 [55.88 GiB]
4.使用两个物理卷创建卷组,名字为myvg,查看卷组大小
[root@localhost ~]# vgcreate myvg /dev/sdb[12] && vgdisplay myvg
Volume group "myvg" successfully created
--- Volume group ---
VG Name myvg
System ID
Format lvm2
Metadata Areas 2
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 2
Act PV 2
VG Size 37.25 GiB
PE Size 4.00 MiB
Total PE 9535
Alloc PE / Size 0 / 0
Free PE / Size 9535 / 37.25 GiB
VG UUID 3jCBJr-GfnR-XJW3-4hOV-URK2-LbLu-h7judn
5.创建逻辑卷mylv,大小为30G
[root@localhost ~]# lvcreate -L 30G -n mylv myvg && lvdisplay /dev/myvg/mylv
Logical volume "mylv" created.
--- Logical volume ---
LV Path /dev/myvg/mylv
LV Name mylv
VG Name myvg
LV UUID 9BZvSc-X4NN-LQU0-540r-sP6T-tYfk-1KSWEj
LV Write Access read/write
LV Creation host, time localhost.localdomain, 2019-08-01 19:15:52 +0800
LV Status available
# open 0
LV Size 30.00 GiB
Current LE 7680
Segments 2
Allocation inherit
Read ahead sectors auto
- currently set to 8192
Block device 253:2
6.将逻辑卷格式化成xfs文件系统,并挂载到/data目录上,创建文件测试
[root@localhost ~]# mkfs.xfs /dev/myvg/mylv && mkdir /data && mount /dev/myvg/mylv /data && touch /data/1.txt
meta-data=/dev/myvg/mylv isize=256 agcount=4, agsize=1966080 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=7864320, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=3840, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# ls /
bin boot ceshi data dev etc home lib lib64 media mnt opt proc root run sbin srv sys tmp usr var web
[root@localhost ~]# ls /data
1.txt
7.增大逻辑卷到35G
[root@localhost ~]# lvextend -L +5G /dev/myvg/mylv
Size of logical volume myvg/mylv changed from 30.00 GiB (7680 extents) to 35.00 GiB (8960 extents).
Logical volume mylv successfully resized
[root@localhost ~]# xfs_growfs /dev/myvg/mylv
meta-data=/dev/mapper/myvg-mylv isize=256 agcount=4, agsize=1966080 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0 finobt=0
data = bsize=4096 blocks=7864320, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal bsize=4096 blocks=3840, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
data blocks changed from 7864320 to 9175040
[root@localhost ~]# df -hT
文件系统 类型 容量 已用 可用 已用% 挂载点
/dev/mapper/centos-root xfs 38G 4.8G 33G 13% /
devtmpfs devtmpfs 481M 0 481M 0% /dev
tmpfs tmpfs 490M 80K 490M 1% /dev/shm
tmpfs tmpfs 490M 7.1M 483M 2% /run
tmpfs tmpfs 490M 0 490M 0% /sys/fs/cgroup
/dev/sda1 xfs 497M 107M 391M 22% /boot
/dev/mapper/myvg-mylv xfs 35G 33M 35G 1% /data
8.编辑/etc/fstab文件挂载逻辑卷,并支持磁盘配额选项
[root@localhost ~]# echo "/dev/myvg/mylv /data xfs defaults,usrquota,grpquota 0 0">> /etc/fstab && tail -1 /etc/fstab
/dev/myvg/mylv /data xfs defaults,usrquota,grpquota 0 0
9.创建磁盘配额,crushlinux用户在/data目录下文件大小软限制为80M,硬限制为100M,
crushlinux用户在/data目录下文件数量软限制为80个,硬限制为100个。
[root@localhost ~]# umount /data
[root@localhost ~]# mount -a
root@localhost ~]# quotacheck -auvg
quotacheck: Skipping /dev/mapper/myvg-mylv [/data]
quotacheck: Cannot find filesystem to check or filesystem not mounted with quota option.
[root@localhost ~]# quotaon -auvg
[root@localhost ~]# edquota -u crushlinux
Disk quotas for user crushlinux (uid 1002):
Filesystem blocks soft hard inodes soft hard
/dev/mapper/myvg-mylv 0 819200 102400 0 80 100
10.使用touch dd 命令在/data目录下测试
[crushlinux@localhost data]$ dd if=/dev/zero of=/1.txt bs=1M count=110
dd: 写入"/1.txt" 出错: 超出磁盘限额
[crushlinux@localhost data]$ touch {1..101}.txt
touch: 无法创建"101.txt": 超出磁盘限额
11.查看配额的使用情况:用户角度
[root@localhost ~]# quota -uvs
Disk quotas for user root (uid 0):
Filesystem space quota limit grace files quota limit grace
/dev/mapper/myvg-mylv
0K 0K 0K 3 0 0
12.查看配额的使用情况:文件系统角度
[root@localhost ~]# repquota -auvs
*** Report for user quotas on device /dev/mapper/myvg-mylv
Block grace time: 7days; Inode grace time: 7days
Space limits File limits
User used soft hard grace used soft hard grace
----------------------------------------------------------------------
root -- 0K 0K 0K 3 0 0
crushlinux -- 0K 0K 0K 0 80 100
*** Status for user quotas on device /dev/mapper/myvg-mylv
Accounting: ON; Enforcement: ON
Inode: #132 (2 blocks, 2 extents)
[root@localhost ~]# cat peie.sh
#!/bin/bashecho "np +20Gnp +20Gnp +20Gpw" | fdisk /dev/sdbpvcreate /dev/sdb[12]
prscanvgcreate myvg /dev/sdb[12]
vgdisplaylvcreate -y -L 30G -n mylv myvg
lvdisplay /dev/myvg/mylvmkfs.xfs /dev/myvg/mylv
if [ ! -d "/data" ];then mkdir /data mount /dev/myvg/mylv /dataelse echo "存在data目录,可以挂在!" mount /dev/myvg/mylv /datafitouch /data/1.txt
if [ -f "/data/1.txt" ];then
echo "挂载成功!"else echo "挂载失败"filvextend -L +5G /dev/myvg/mylv
xfs_growfs /dev/myvg/mylvdf -hT
echo "/dev/myvg/mylv /data xfs defaults,usrquota,grpquota 1 0">> /etc/fstab
tail -1 /etc/fstab
[root@localhost ~]# cat deljuan.sh
#!/bin/bash
umount /data
lvremove -y /dev/myvg/mylvvgremove -y myvgpvremove -y /dev/sdb[12]
echo "d
d
d
p
w" | fdisk /dev/sdb
if [ -d "/data" ];then
rm -fr /dataelse echo "不存在data目录!"fi