## Let us now experiment with ZFS
## here is my general environment:
##
MySolaris10Host# date
Wed Feb 20 16:05:17 EST 2008
MySolaris10Host# uname -a
SunOS MySolaris10Host 5.10 Generic_118833-24 sun4u sparc SUNW,Ultra-80
MySolaris10Host# psrinfo
0 on-line since 02/16/2008 10:22:04
1 on-line since 02/16/2008 10:22:23
2 on-line since 02/16/2008 10:22:23
3 on-line since 02/16/2008 10:22:23
MySolaris10Host# prtconf -v grep Memory
Memory size: 4096 Megabytes
MySolaris10Host#
##
## my current SAN disks
##
MySolaris10Host# vxdisk -o alldgs list
DEVICE TYPE DISK GROUP STATUS
c0t0d0s2 auto:none - - online invalid
c0t1d0s2 auto:none - - online invalid
emcpower0s2 auto:simple - - online
emcpower1s2 auto:simple - (someothersdg) online
MySolaris10Host# echo format
Searching for disks...done
AVAILABLE DISK SELECTIONS:
0. c0t0d0
/pci@1f,4000/scsi@3/sd@0,0
1. c0t1d0
/pci@1f,4000/scsi@3/sd@1,0
2. c4t28d53
/pci@1f,4000/lpfc@2/sd@1c,35
3. c4t28d54
/pci@1f,4000/lpfc@2/sd@1c,36
4. c5t72d53
/pci@1f,4000/lpfc@4/sd@48,35
5. c5t72d54
/pci@1f,4000/lpfc@4/sd@48,36
6. emcpower0a
/pseudo/emcp@0
7. emcpower1a
/pseudo/emcp@1
Specify disk (enter its number): Specify disk (enter its number):
MySolaris10Host#
MySolaris10Host# inq
Inquiry utility, Version V7.3-648 (Rev 2.0) (SIL Version V6.0.2.0 (Edit Level 648)
Copyright (C) by EMC Corporation, all rights reserved.
For help type inq -h.
.....................
---------------------------------------------------------------------------------------------
DEVICE :VEND :PROD :REV :SER NUM :CAP(kb)
---------------------------------------------------------------------------------------------
/dev/rdsk/c0t0d0s2 :SEAGATE :ST318404LSUN18G :4207 : : 17689266
/dev/rdsk/c0t1d0s2 :SEAGATE :ST318404LSUN18G :4207 : : 17689266
/dev/rdsk/c4t28d53s2 :EMC :SYMMETRIX :5771 :5902adf000 : 17679360
/dev/rdsk/c4t28d54s2 :EMC :SYMMETRIX :5771 :5902ae1000 : 17679360
/dev/rdsk/c5t72d53s2 :EMC :SYMMETRIX :5771 :5902adf000 : 17679360
/dev/rdsk/c5t72d54s2 :EMC :SYMMETRIX :5771 :5902ae1000 : 17679360
/dev/rdsk/emcpower0c :EMC :SYMMETRIX :5771 :5902adf000 : 17679360
/dev/rdsk/emcpower1c :EMC :SYMMETRIX :5771 :5902ae1000 : 17679360
/dev/vx/rdmp/c0t0d0s2 :SEAGATE :ST318404LSUN18G :4207 : : 17689266
/dev/vx/rdmp/c0t1d0s2 :SEAGATE :ST318404LSUN18G :4207 : : 17689266
/dev/vx/rdmp/emcpower0s2 :EMC :SYMMETRIX :5771 :5902adf000 : 17679360
/dev/vx/rdmp/emcpower1s2 :EMC :SYMMETRIX :5771 :5902ae1000 : 17679360
##
## I will use emcpower0s2 which has "SER NUM" at 5902adf000
## which correponds to c5t72d53s2
## you may create pool on either disk names
## the powerpath will work regardless
## I prefer to use cXtXdXs2 because it is easier to identify
##
MySolaris10Host# zpool create -f testpool c5t72d53s2
##
## to verify the disk has changed
##
MySolaris10Host# format c5t72d53
selecting c5t72d53
[disk formatted]
/dev/dsk/c5t72d53s2 is part of active ZFS pool testpool. Please see zpool(1M).
FORMAT MENU:
disk - select a disk
type - select (define) a disk type
partition - select (define) a partition table
current - describe the current disk
format - format and analyze the disk
repair - repair a defective sector
label - write label to the disk
analyze - surface analysis
defect - defect list management
backup - search for backup labels
verify - read and display labels
save - save new disk/partition definitions
inquiry - show vendor, product and revision
volname - set 8-character volume name
!
quit
format> p
PARTITION MENU:
0 - change `0' partition
1 - change `1' partition
2 - change `2' partition
3 - change `3' partition
4 - change `4' partition
5 - change `5' partition
6 - change `6' partition
7 - change `7' partition
select - select a predefined table
modify - modify a predefined partition table
name - name the current table
print - display the current table
label - write partition map and label to the disk
!
quit
partition> p
Current partition table (original):
Total disk cylinders available: 18414 + 2 (reserved cylinders)
Part Tag Flag Cylinders Size Blocks
0 unassigned wm 0 0 (0/0/0) 0
1 unassigned wm 0 0 (0/0/0) 0
2 backup wu 0 - 18413 16.86GB (18414/0/0) 35354880
3 - wu 1 - 18413 16.86GB (18413/0/0) 35352960
4 unassigned wm 0 0 (0/0/0) 0
5 unassigned wm 0 0 (0/0/0) 0
6 unassigned wm 0 0 (0/0/0) 0
7 unassigned wm 0 0 (0/0/0) 0
partition> ^D
# by default /testpool entry point has been created.
# note, you may alter this default with -m option
##
## check pool status
##
MySolaris10Host# zpool status
pool: testpool
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
testpool ONLINE 0 0 0
c5t72d53s2 ONLINE 0 0 0
errors: No known data errors
MySolaris10Host# zfs get all testpool
NAME PROPERTY VALUE SOURCE
testpool type filesystem -
testpool creation Wed Feb 20 15:59 2008 -
testpool used 34.1M -
testpool available 16.5G -
testpool referenced 24.5K -
testpool compressratio 1.00x -
testpool mounted yes -
testpool quota none default
testpool reservation none default
testpool recordsize 128K default
testpool mountpoint /testpool default
testpool sharenfs off default
testpool checksum on default
testpool compression off default
testpool atime on default
testpool devices on default
testpool exec on default
testpool setuid on default
testpool readonly off default
testpool zoned off default
testpool snapdir hidden default
testpool aclmode groupmask default
testpool aclinherit secure default
MySolaris10Host#
MySolaris10Host# df -h
Filesystem size used avail capacity Mounted on
... # cut some lines here
testpool 16G 24K 16G 1% /testpool
##
## now, create file system "ZFS"
##
MySolaris10Host# zfs create testpool/testfs
MySolaris10Host# df grep testfs
/testpool/testfs (testpool/testfs ):34578223 blocks 34578223 files
MySolaris10Host# zfs set mountpoint=/opt/testfs testpool/testfs
MySolaris10Host# zfs get all testpool/testfs
NAME PROPERTY VALUE SOURCE
testpool/testfs type filesystem -
testpool/testfs creation Wed Feb 20 16:34 2008 -
testpool/testfs used 34.0M -
testpool/testfs available 16.5G -
testpool/testfs referenced 34.0M -
testpool/testfs compressratio 1.00x -
testpool/testfs mounted yes -
testpool/testfs quota none default
testpool/testfs reservation none default
testpool/testfs recordsize 128K default
testpool/testfs mountpoint /opt/testfs local
testpool/testfs sharenfs off default
testpool/testfs checksum on default
testpool/testfs compression off default
testpool/testfs atime on default
testpool/testfs devices on default
testpool/testfs exec on default
testpool/testfs setuid on default
testpool/testfs readonly off default
testpool/testfs zoned off default
testpool/testfs snapdir visible local
testpool/testfs aclmode groupmask default
testpool/testfs aclinherit secure default
MySolaris10Host# df -h grep testfs
testpool/testfs 16G 24K 16G 1% /opt/testfs
##
## create and destroy snapshots
##
MySolaris10Host# zpool create pool_on_pool /dev/zvol/dsk/testpool/testvol
warning: device in use checking failed: No such device
MySolaris10Host# zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
pool_on_pool 195M 80K 195M 0% ONLINE -
testpool 16.8G 35.3M 16.7G 0% ONLINE -
MySolaris10Host# zfs get all pool_on_pool
NAME PROPERTY VALUE SOURCE
pool_on_pool type filesystem -
pool_on_pool creation Wed Feb 20 17:30 2008 -
pool_on_pool used 77K -
pool_on_pool available 163M -
pool_on_pool referenced 24.5K -
pool_on_pool compressratio 1.00x -
pool_on_pool mounted yes -
pool_on_pool quota none default
pool_on_pool reservation none default
pool_on_pool recordsize 128K default
pool_on_pool mountpoint /pool_on_pool default
pool_on_pool sharenfs off default
pool_on_pool checksum on default
pool_on_pool compression off default
pool_on_pool atime on default
pool_on_pool devices on default
pool_on_pool exec on default
pool_on_pool setuid on default
pool_on_pool readonly off default
pool_on_pool zoned off default
pool_on_pool snapdir hidden default
pool_on_pool aclmode groupmask default
pool_on_pool aclinherit secure default
MySolaris10Host# cd /opt/testfs
MySolaris10Host# ls
MySolaris10Host# pwd
/opt/testfs
MySolaris10Host# find /etc cpio -pdm ./
cpio: Cannot open "/etc/vx/vxesd/vxesd.socket", skipped, errno 122, Operation not supported on transport endpoint
76880 blocks
1 error(s)
MySolaris10Host# ls
etc
MySolaris10Host# df -k .
Filesystem kbytes used avail capacity Mounted on
testpool/testfs 17289216 34818 17254310 1% /opt/testfs
MySolaris10Host# zfs snapshot testpool/testfs@testsnap
MySolaris10Host# zfs set snapdir=visible testpool/testfs
MySolaris10Host# ls -al
total 22
drwxr-xr-x 3 root sys 3 Feb 20 16:59 .
drwxr-xr-x 27 bin bin 1024 Feb 20 16:34 ..
dr-xr-xr-x 3 root root 3 Feb 20 16:34 .zfs
drwxr-xr-x 70 root sys 264 Feb 20 16:59 etc
MySolaris10Host# cd .zfs/snapshot/testsnap
MySolaris10Host# ls
etc
MySolaris10Host#
rsweb2-h# cd /opt/testfs
MySolaris10Host# df -k
Filesystem kbytes used avail capacity Mounted on
/dev/md/dsk/d0 6196234 5925575 208697 97% /
/devices 0 0 0 0% /devices
ctfs 0 0 0 0% /system/contract
proc 0 0 0 0% /proc
mnttab 0 0 0 0% /etc/mnttab
swap 7306320 1080 7305240 1% /etc/svc/volatile
objfs 0 0 0 0% /system/object
fd 0 0 0 0% /dev/fd
swap 7340680 35440 7305240 1% /tmp
swap 7305280 40 7305240 1% /var/run
/dev/md/dsk/d3 7070981 1469832 5530440 21% /opt
testpool 17289216 24 17254309 1% /testpool
testpool/testfs 17289216 34818 17254309 1% /opt/testfs
MySolaris10Host# pwd
/opt/testfs
MySolaris10Host# ls
etc
MySolaris10Host# rm -rf etc
MySolaris10Host# ls
MySolaris10Host# cd
MySolaris10Host# pwd
/
MySolaris10Host# zfs rollback testpool/testfs@testsnap
MySolaris10Host# cd /opt/testfs
MySolaris10Host# ls
etc
MySolaris10Host# zfs destroy testpool/testfs@testsnap
##
## create a volume on an existing pool
##
#rsweb2-h# df -h -F zfs
Filesystem size used avail capacity Mounted on
testpool 16G 24K 16G 1% /testpool
testpool/testfs 16G 34M 16G 1% /opt/testfs
MySolaris10Host# zfs create -V 200m testpool/testvol
MySolaris10Host# ls -lL /dev/zvol/*dsk/testpool
/dev/zvol/dsk/testpool:
total 0
brw------- 1 root sys 256, 1 Feb 20 17:29 testvol
/dev/zvol/rdsk/testpool:
total 0
crw------- 1 root sys 256, 1 Feb 20 17:29 testvol
##
## create a pool on pool
##
MySolaris10Host# zpool create pool_on_pool /dev/zvol/dsk/testpool/testvol
warning: device in use checking failed: No such device
MySolaris10Host# zpool list
NAME SIZE USED AVAIL CAP HEALTH ALTROOT
pool_on_pool 195M 80K 195M 0% ONLINE -
testpool 16.8G 35.3M 16.7G 0% ONLINE -
MySolaris10Host# zfs get all pool_on_pool
NAME PROPERTY VALUE SOURCE
pool_on_pool type filesystem -
pool_on_pool creation Wed Feb 20 17:30 2008 -
pool_on_pool used 77K -
pool_on_pool available 163M -
pool_on_pool referenced 24.5K -
pool_on_pool compressratio 1.00x -
pool_on_pool mounted yes -
pool_on_pool quota none default
pool_on_pool reservation none default
pool_on_pool recordsize 128K default
pool_on_pool mountpoint /pool_on_pool default
pool_on_pool sharenfs off default
pool_on_pool checksum on default
pool_on_pool compression off default
pool_on_pool atime on default
pool_on_pool devices on default
pool_on_pool exec on default
pool_on_pool setuid on default
pool_on_pool readonly off default
pool_on_pool zoned off default
pool_on_pool snapdir hidden default
pool_on_pool aclmode groupmask default
pool_on_pool aclinherit secure default
MySolaris10Host# df -F zfs
/testpool (testpool ):34099012 blocks 34099012 files
/opt/testfs (testpool/testfs ):34099012 blocks 34099012 files
/pool_on_pool (pool_on_pool ): 333670 blocks 333670 files
MySolaris10Host# zfs create pool_on_pool/poolonpool_fs1
MySolaris10Host# zfs set mountpoint=/opt/poolonpool_fs1 pool_on_pool/poolonpool_fs1
MySolaris10Host# df -h -F zfs
Filesystem size used avail capacity Mounted on
testpool 16G 24K 16G 1% /testpool
testpool/testfs 16G 34M 16G 1% /opt/testfs
pool_on_pool 163M 26K 163M 1% /pool_on_pool
pool_on_pool/poolonpool_fs1
163M 24K 163M 1% /opt/poolonpool_fs1
##
## destroy everything
##
rsweb2-h# zfs umount -a
MySolaris10Host# df -F zfs
MySolaris10Host#
MySolaris10Host# zfs destroy pool_on_pool/poolonpool_fs1
MySolaris10Host# zfs destroy testpool/testfs
MySolaris10Host# zpool destroy pool_on_pool
MySolaris10Host# zpool destroy testpool
MySolaris10Host# format c5t72d53
selecting c5t72d53
[disk formatted]
FORMAT MENU:
disk - select a disk
type - select (define) a disk type
partition - select (define) a partition table
current - describe the current disk
format - format and analyze the disk
repair - repair a defective sector
label - write label to the disk
analyze - surface analysis
defect - defect list management
backup - search for backup labels
verify - read and display labels
save - save new disk/partition definitions
inquiry - show vendor, product and revision
volname - set 8-character volume name
!
quit
format> p
PARTITION MENU:
0 - change `0' partition
1 - change `1' partition
2 - change `2' partition
3 - change `3' partition
4 - change `4' partition
5 - change `5' partition
6 - change `6' partition
7 - change `7' partition
select - select a predefined table
modify - modify a predefined partition table
name - name the current table
print - display the current table
label - write partition map and label to the disk
!
quit
partition> p
Current partition table (original):
Total disk cylinders available: 18414 + 2 (reserved cylinders)
Part Tag Flag Cylinders Size Blocks
0 unassigned wm 0 0 (0/0/0) 0
1 unassigned wm 0 0 (0/0/0) 0
2 backup wu 0 - 18413 16.86GB (18414/0/0) 35354880
3 - wu 1 - 18413 16.86GB (18413/0/0) 35352960
4 unassigned wm 0 0 (0/0/0) 0
5 unassigned wm 0 0 (0/0/0) 0
6 unassigned wm 0 0 (0/0/0) 0
7 unassigned wm 0 0 (0/0/0) 0
partition> ^D
MySolaris10Host# date
Wed Feb 20 17:49:53 EST 2008
MySolaris10Host#
No comments:
Post a Comment