ZFS raidz2 ¼º´É

   Á¶È¸ 12265   Ãßõ 0    

현재 ZFS 볼륨으로 만든 스토리지의 속도를 체크 해봤습니다.

약 50% 정도의 데이타가 들어 있는 시스템 입니다.

[root@BACKUP ~]# zpool status
pool: DATA
state: ONLINE
scan: scrub repaired 0 in 74h7m with 0 errors on Sun Dec 18 13:33:10 2016
config:

NAME                                         STATE     READ WRITE CKSUM
DATA                                         ONLINE       0     0     0
raidz2-0                                   ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJZAU9X  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK4PB6X  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJWMYSS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJUUDEX  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK4HSKS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK2VXUS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK4M1RS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK5AP8X  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK3UAUS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJYHW3S  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK5WK1X  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK1E43S  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK2338P4H7UPVC  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK2338P4H6ZKWC  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK36N4X  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK4K6YS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK2381PCKYXZNB  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJWPAYS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK2VK8S  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJVDL4S  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCK5A1PX  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJWP9BS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJV5DWS  ONLINE       0     0     0
ata-HGST_HDN724040ALE640_PK1334PCJWSPJS  ONLINE       0     0     0

errors: No known data errors
[root@BACKUP ~]#
[root@BACKUP ~]# zpool list
NAME   SIZE  ALLOC   FREE  EXPANDSZ   FRAG    CAP  DEDUP  HEALTH  ALTROOT
DATA    87T  43.3T  43.7T         -    14%    49%  1.00x  ONLINE  -
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=1k count=1000000 && sync
1000000+0 records in
1000000+0 records out
1024000000 bytes (1.0 GB) copied, 4.99862 s, 205 MB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=1k count=10000000 && sync
10000000+0 records in
10000000+0 records out
10240000000 bytes (10 GB) copied, 48.8692 s, 210 MB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=4k count=1000000 && sync
1000000+0 records in
1000000+0 records out
4096000000 bytes (4.1 GB) copied, 5.63363 s, 727 MB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=8k count=1000000 && sync
1000000+0 records in
1000000+0 records out
8192000000 bytes (8.2 GB) copied, 6.59761 s, 1.2 GB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=16k count=1000000 && sync
1000000+0 records in
1000000+0 records out
16384000000 bytes (16 GB) copied, 8.70386 s, 1.9 GB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=256k count=100000 && sync
100000+0 records in
100000+0 records out
26214400000 bytes (26 GB) copied, 6.15736 s, 4.3 GB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=512k count=100000 && sync
100000+0 records in
100000+0 records out
52428800000 bytes (52 GB) copied, 12.3036 s, 4.3 GB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test bs=1024k count=100000 && sync
100000+0 records in
100000+0 records out
104857600000 bytes (105 GB) copied, 24.5744 s, 4.3 GB/s
[root@BACKUP ~]#


Ȥ½Ã hba¸¸ ¾²¼Å¼­ ÀÌÁ¤µµ ¼Óµµ°¡ ³ª¿À½Å°ÇÁö¿ä? 9211 IT mode·Î raidz2¿¡ l2arc±îÁö ¿Ã·Á º¸¾Ò´Âµ¥, »ý°¢º¸´Ù ºü¸£Áö°¡ ¾Ê´õ¶ó±¸¿ä.. ij½¬ÀÖ´Â Çϵå¿þ¾î ·¹À̵åÀ§¿¡ º¹ÀâÇÑ ¿É¼Ç(raidz ¹× ij½¬µî) ¾È¾²°í zfs¸¦ ¿Ã·Á¾²´Â°Ô ÈξÀ ´õ ºü¸¥°Å °°¾Æ¼­.. hba¸¦ ¹Ù²ã¾ßµÇ´ÂÁö raid¸¦ ¿Ã·Á¾ß ÇÏ´ÂÁö °í¹ÎÀ̶ó Áú¹®µå¸³´Ï´Ù
Àúµµ Áö±Ý ZFS ½ºÅ丮Áö ¸¸µé¾î¼­ Å×½ºÆ® ÁßÀε¥ »ý°¢º¸´Ù ¼º´ÉÀÌ
¾È ³ª¿À´õ¶ó±¸¿ä. ±×·±µ¥ ÀÌ Å×½ºÆ®¿¡¼­´Â ¾öû Àß ³ª¿À´Â±º¿ä.
»ç¿ëÇϽŠ¸í·É¾îÁß¿¡...
sync && dd if=/dev/zero of=/DATA/dd_test bs=1k count=1000000 && sync
... ¿ä°Ô ¾ø´Â ÆÄÀÏÀ» ÀÓÀÇ·Î ¸¸µé¾î¼­ Å×½ºÆ® ÇÏ´Â °Ç°¡¿ä?
¸Â´Ù¸é Àúµµ ÀÌ ¸í·É¾î·Î ³»ºÎ Å×½ºÆ®¸¦ Á» ÇغÁ¾ß ÇÒ °Í °°½À´Ï´Ù.
À¯»ç¿ë 2018-03
ÇöÀç HBA ÀÔ´Ï´Ù. º¸µå ³»Àå LSI SAS3 ÀÔ´Ï´Ù.
CentOS6 ¿¡ ZFS ¿Ã¸°°Å°í¿ä.
mpt3sas_cm0: sending message unit reset !!
mpt3sas_cm0: message unit reset: SUCCESS
Refined TSC clocksource calibration: 3499.997 MHz.
Switching to clocksource tsc
mpt3sas_cm0: Allocated physical memory: size(17971 kB)
mpt3sas_cm0: Current Controller Queue Depth(9979),Max Controller Queue Depth(10240)
mpt3sas_cm0: Scatter Gather Elements per IO(128)
mpt3sas_cm0: LSISAS3008: FWVersion(10.00.00.00), ChipRevision(0x02), BiosVersion(08.25.00.00)
mpt3sas_cm0: Protocol=(
Initiator
,Target
),
Capabilities=(
TLR
,EEDP
,Snapshot Buffer
,Diag Trace Buffer
,Task Set Full
,NCQ
)
scsi0 : Fusion MPT SAS Host
mpt3sas_cm0: sending port enable !!
mpt3sas_cm0: host_add: handle(0x0001), sas_addr(0x5003048016a8b500), phys(8)
mpt3sas_cm0: expander_add: handle(0x0009), parent(0x0001), sas_addr(0x50030480178130bf), phys(38)
mpt3sas_cm0: port enable: SUCCESS

dd ¸í·É¾î·Î ÆÄÀÏ ¾²±âÇϴ°̴ϴÙ.
¼øÂ÷¾²±â ... ·£´ý ¾²±â´Â ´Ù½Ã ¿Ã·Á µå¸±²²¿ä. ¤¾
     
Ä£ÀýÇÑ ´äº¯ °í¸¿½À´Ï´Ù.
½Ã°£³ª´Â´ë·Î Å×½ºÆ® ÇغÁ¾ß°Ú½À´Ï´Ù. ^0^)/
À¯»ç¿ë 2018-03
·£´ý¾²±â´Â À߾ȵdz׿ä.. ÀÌ»óÇÏ°Ô..
ZFS ¿É¼ÇÀ» ¸î°¡Áö ¾²±äÇϴµ¥..

ZFS ARC CACHE Min 1G / Max 8G Limit
compressratio                2.37x
recordsize                  128K
compression                  on
sync                        disabled

autoexpand                                    on
autoexpand                                    on

[root@BACKUP DATA]# sync && dd if=/dev/zero of=/DATA/dd_test bs=16k count=1000000 && sync
1000000+0 records in
1000000+0 records out
16384000000 bytes (16 GB) copied, 8.65717 s, 1.9 GB/s
[root@BACKUP DATA]#
[root@BACKUP DATA]# sync && dd if=/dev/zero of=/DATA/dd_test bs=32k count=1000000 && sync
1000000+0 records in
1000000+0 records out
32768000000 bytes (33 GB) copied, 12.6994 s, 2.6 GB/s
[root@BACKUP DATA]#
[root@BACKUP DATA]# sync && dd if=/dev/zero of=/DATA/dd_test bs=64k count=1000000 && sync
1000000+0 records in
1000000+0 records out
65536000000 bytes (66 GB) copied, 20.6471 s, 3.2 GB/s
[root@BACKUP DATA]#
[root@BACKUP DATA]# sync && dd if=/dev/zero of=/DATA/dd_test bs=128k count=1000000 && sync
1000000+0 records in
1000000+0 records out
131072000000 bytes (131 GB) copied, 28.5942 s, 4.6 GB/s
[root@BACKUP DATA]#
[root@BACKUP DATA]# sync && dd if=/dev/zero of=/DATA/dd_test bs=256k count=1000000 && sync
1000000+0 records in
1000000+0 records out
262144000000 bytes (262 GB) copied, 59.8449 s, 4.4 GB/s
[root@BACKUP DATA]#
[root@BACKUP DATA]# sync && dd if=/dev/zero of=/DATA/dd_test bs=512k count=1000000 && sync
1000000+0 records in
1000000+0 records out
524288000000 bytes (524 GB) copied, 119.561 s, 4.4 GB/s
[root@BACKUP DATA]#
     
sync°¡ disableÀ롃 º¸´Ï ZILÀº ¾È¾²½Ã´Â±º¿ä..º¥Ä¡ Á¤º¸ °¨»çÇÕ´Ï´Ù~ ij½¬ÀÖ´Â Çϵå¿þ¾î RAIDÀ§¿¡ ¿Ã¸° ZFS°¡ °³ÀÎÀûÀ¸·Î´Â L2ARC³ª ZIL ¼³Á¤±îÁö ÇßÀ»¶§ º¸´Ù ´õ ºü¸¥°Å °°¾Æ¼­, Çϵå¿þ¾î RAIDÀ§¿¡ Æи®Æ¼ Ãæµ¹ °°Àº°Ô ¾ÈÀϾ°Ô raid0·Î ZFS¸¦ ¿Ã·Á ½á¾ß°Ú½À´Ï´Ù..
epowergate 2018-03
Áö±Ý Å×½ºÆ® ÇÏ½Ã´Â°Ç ´ÜÀÏ Thread Å×½ºÆ® ÀÔ´Ï´Ù.  filesystem/storage°¡ °¡ÀåÁÁ¾ÆÇÏ°í ÃÖÀûÀ» ¼º´ÉÀ» ³¾ ¼ö ÀÖ´Â Å×½ºÆ® ÀÔ´Ï´Ù.
µ¿ÀÏÇÑ ¹æ½ÄÀ¸·Î DD¸¦ 10°³, 20°³, 50°³, 100°³, 1000°³¸¦ µ¿½Ã¿¡ ¼öÇà½ÃÄѺ¸½Ã¸é Á»´õ Çö½ÇÀûÀÎ Å×½ºÆ®¸¦ ÇÏ½Ç ¼ö ÀÖÀ»°Ì´Ï´Ù.
¾Æ½Ã°ÚÁö¸¸ ZFS´Â DBMS¸¦ ¿Ã¸®±â¿¡ Àû´çÇÑ  ÆÄÀϽýºÅÛÀº ¾Æ´Õ´Ï´Ù.  ¸¸¾à¿¡ DB¿ë ½ºÅ丮Áö Å×½ºÆ®¸¦ ÇÏ½Ç ¼ö ÀÖÀ¸½Ã¸é ³î¶ó½Ç °Ì´Ï´Ù.

±×¸®°í, ¸¶Áö¸·¿¡ sync´Â ÇÊ¿ä¾ø½À´Ï´Ù.  º¸½Ã´Â °á°ú/½Ã°£Àº "dd"¿¡ ´ëÇÑ °á°úÀÌÁö ¸¶Áö¸· "sync" ÀÇ ½Ã°£ÀÌ Æ÷ÇԵǾî ÀÖÁö ¾Ê½À´Ï´Ù.
±×·¸±â ¶§¹®¿¡ ¸¹À¸ µ¥ÀÌÅÍ°¡ ¿©ÀüÈ÷ ¸Þ¸ð¸®¿¡ ÀÖÀ»°Ì´Ï´Ù.  ¾Æ½Ã°ÚÁö¸¸ ZFS°¡ cacheing ¹«Áö ÀßÇÕ´Ï´Ù.
¸¶Áö¸·¿¡ "SYNC"¸¦ ³ÖÀ¸½Å ÀÌÀ¯°¡ ¸ðµç µ¥ÀÌÅÍ°¡ DISK±îÁö µé¾î°¡´Â ½Ã°£À» È®ÀÎÇϽô °Å¶ó¸é "time"À» DD¿Í SYNC¸¦ ¸ðµÎ ³Ö´Â ¹®ÀåÀ¸·Î ¹Ù²Ù¼Å¾ß ÇÕ´Ï´Ù.

ZFS´Â COW ÆÄÀϽýºÅÛÀ̱⠶§¹®¿¡ ¿µÇâÀÌ ÀûÀ» ¼ø ÀÖ°Ú½À´Ï´Ù.
     
À¯»ç¿ë 2018-03
ÇѲ¨¹ø¿¡ 10°³¸¦ µ¹·Á ºÃ½À´Ï´Ù.
ARC ij½¬´Â MAX 8G ÀÔ´Ï´Ù.

ARC Summary: (HEALTHY)
        Memory Throttle Count:                  0

ARC Misc:
        Deleted:                                11.76b
        Mutex Misses:                          31.54k
        Evict Skips:                            31.54k

ARC Size:                              12.68%  1.01    GiB
        Target Size: (Adaptive)        26.20%  2.10    GiB
        Min Size (Hard Limit):          12.50%  1.00    GiB
        Max Size (High Water):          8:1    8.00    GiB

ARC Size Breakdown:
        Recently Used Cache Size:      97.45%  2.04    GiB
        Frequently Used Cache Size:    2.55%  54.71  MiB

ARC Hash Breakdown:
        Elements Max:                          610.25k
        Elements Current:              10.25%  62.58k
        Collisions:                            2.04b
        Chain Max:                              8
        Chains:                                925

ARC Total accesses:                                    3.16b
        Cache Hit Ratio:                57.50%  1.82b
        Cache Miss Ratio:              42.50%  1.34b
        Actual Hit Ratio:              57.50%  1.82b

        Data Demand Efficiency:        57.99%  357.59m

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test2 bs=128k count=1000000 oflag=sync
1000000+0 records in
1000000+0 records out
131072000000 bytes (131 GB) copied, 422.089 s, 311 MB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test4 bs=128k count=1000000 oflag=sync
1000000+0 records in
1000000+0 records out
131072000000 bytes (131 GB) copied, 458.118 s, 286 MB/s
[root@BACKUP ~]#

[root@BACKUP ~]# sync && dd if=/dev/zero of=/DATA/dd_test0 bs=128k count=1000000 oflag=sync
1000000+0 records in
1000000+0 records out
131072000000 bytes (131 GB) copied, 455.995 s, 287 MB/s
[root@BACKUP ~]#

¾à 300 MB/s Á¤µµ ³ª¿À³×¿ä.
          
epowergate 2018-03
ÀÌ·±°Å ¹ø°Å·¯¿ö¼­ ÀúÈñ´Â º¸Åë VDBENCH·Î ÇÕ´Ï´Ù.
Á¤È®ÇÑ º¯¼ö´Â ±â¾ïÀÌ ¾ø´Âµ¥ ´ë·« ´ÙÀ½°ú °°½À´Ï´Ù.

»ý¼º
DEPTH=3, WIDTH=100, FILES =1000, FILESIZE=16MB
RD´Â
FILEIO/FILESELECTION RANDOM
xfersize=16KB
THREAD=10,20,50,100,1000
READ:WRITE = 7:3 (°¢ THREAD°¡ READ 7, WRITE 3)
INTERVAL=100 (Áß¿ä)
À¸·Î ÇÕ´Ï´Ù.

R/W´Â »ý°¢ÇϽô »ç¿ëÀÚÀÇ È¯°æ¿¡ ¸ÂÃç¼­ ¸¸µå½Ã¸é µÇ°ÚÁö¿ä.

Çѹø ¸¸µé¾î ³õÀ¸¸é ±×³É ¼ö½Ã·Î µ¹¸®¸é µË´Ï´Ù.


Á¦¸ñPage 4/55
2014-05   4960274   Á¤ÀºÁØ1
2015-12   1496826   ¹é¸Þ°¡
2023-01   33278   sudosu
2023-07   49011   ±è¹Î¼ö2
2015-01   46893   nova
2018-03   12266   ºý½Ã´Ù
2020-02   4213   mahu
2021-04   5568   perls
2013-05   34465   ÀÌ°©ºÎ
2014-01   47419   À±¹Î¼ö
2022-07   10952   thisway
2015-11   11840   ±èÇö¸°
2019-06   6347   µö¸¶Àεå
2021-07   3949   anti2cpu
2013-10   14814   ±è»ó¹Î
2016-02   20136   ĵÀ§µå
2018-10   10357   õµÕÀÌ
2019-03   8369   ±î¸£
2020-09   5443   Ãʺ¸½ÅÀÔ
2023-01   23822   psj284
2017-09   12089   Sakura24
2018-10   10245   chotws