diff options
| author | Bill Pijewski <wdp@joyent.com> | 2011-03-31 09:33:01 -0700 |
|---|---|---|
| committer | Bill Pijewski <wdp@joyent.com> | 2011-03-31 09:33:01 -0700 |
| commit | 4e79c50cac072a506defdd893c6182af93e5b895 (patch) | |
| tree | a20f5a3d9175539096742d6126f45d69ba572d64 /usr/src | |
| parent | 0f6619823b71f98873243768b8f0cf072bfc5fb5 (diff) | |
| download | illumos-joyent-4e79c50cac072a506defdd893c6182af93e5b895.tar.gz | |
OS-172 ziostat's wsvc_t column is unimplemented
OS-339 vfsstat should show I/O throttle statistics
OS-355 ziostat doesn't need columns for w/s and kw/s
Diffstat (limited to 'usr/src')
| -rw-r--r-- | usr/src/cmd/stat/vfsstat/vfsstat.pl | 31 | ||||
| -rwxr-xr-x | usr/src/cmd/stat/ziostat/ziostat.pl | 52 | ||||
| -rw-r--r-- | usr/src/uts/common/fs/zfs/sys/zio.h | 6 | ||||
| -rw-r--r-- | usr/src/uts/common/fs/zfs/zfs_zone.c | 53 | ||||
| -rw-r--r-- | usr/src/uts/common/fs/zfs/zio.c | 11 | ||||
| -rw-r--r-- | usr/src/uts/common/os/zone.c | 70 | ||||
| -rw-r--r-- | usr/src/uts/common/sys/zone.h | 21 |
7 files changed, 138 insertions, 106 deletions
diff --git a/usr/src/cmd/stat/vfsstat/vfsstat.pl b/usr/src/cmd/stat/vfsstat/vfsstat.pl index ff02ef1d70..7da629a3f5 100644 --- a/usr/src/cmd/stat/vfsstat/vfsstat.pl +++ b/usr/src/cmd/stat/vfsstat/vfsstat.pl @@ -83,12 +83,14 @@ if ( defined($ARGV[0]) ) { } my $HEADER_FMT = $USE_COMMA ? - "r/%s,w/%s,%sr/%s,%sw/%s,wait_t,ractv,wactv,read_t,writ_t,%%r,%%w,zone\n" : - " r/%s w/%s %sr/%s %sw/%s wait_t ractv wactv " . - "read_t writ_t %%r %%w zone\n"; + "r/%s,w/%s,%sr/%s,%sw/%s,ractv,wactv,read_t,writ_t,%%r,%%w," . + "d/%s,del_t,zone\n" : + " r/%s w/%s %sr/%s %sw/%s ractv wactv read_t writ_t " . + "%%r %%w d/%s del_t zone\n"; my $DATA_FMT = $USE_COMMA ? - "%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%d,%d,%s,%d\n" : - "%5.1f %5.1f %5.1f %5.1f %6.1f %5.1f %5.1f %6.1f %6.1f %3d %3d %s (%d)\n"; + "%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%d,%d,%.1f,%.1f,%s,%d\n" : + "%5.1f %5.1f %5.1f %5.1f %5.1f %5.1f %6.1f %6.1f %3d %3d " . + "%5.1f %6.1f %s (%d)\n"; my $BYTES_PREFIX = $USE_MB ? "M" : "k"; my $BYTES_DIVISOR = $USE_MB ? 1024 * 1024 : 1024; @@ -96,7 +98,7 @@ my $INTERVAL_SUFFIX = $USE_INTERVAL ? "i" : "s"; my $NANOSEC = 1000000000; my @fields = ( 'reads', 'writes', 'nread', 'nwritten', 'rtime', 'wtime', - 'rlentime', 'wlentime', 'snaptime' ); + 'rlentime', 'wlentime', 'delay_cnt', 'delay_time', 'snaptime' ); chomp(my $curzone = (`/sbin/zonename`)); @@ -121,7 +123,7 @@ for (my $ii = 0; $ii < $count; $ii++) { if ($rows_printed == 0 || $ALL_ZONES) { printf($HEADER_FMT, $INTERVAL_SUFFIX, $INTERVAL_SUFFIX, $BYTES_PREFIX, $INTERVAL_SUFFIX, $BYTES_PREFIX, - $INTERVAL_SUFFIX); + $INTERVAL_SUFFIX, $INTERVAL_SUFFIX); } $rows_printed = $rows_printed >= 20 ? 0 : $rows_printed + 1; @@ -172,9 +174,6 @@ sub print_stats { my $nwritten = ($data->{'nwritten'} - $old->{'nwritten'}) / $rate_divisor / $BYTES_DIVISOR; - # XXX Need to investigate how to calculate this - my $wait_t = 0.0; - # Calculate transactions per second my $r_tps = ($data->{'reads'} - $old->{'reads'}) / $etime; my $w_tps = ($data->{'writes'} - $old->{'writes'}) / $etime; @@ -189,6 +188,12 @@ sub print_stats { my $read_t = $r_tps > 0 ? $r_actv * (1000 / $r_tps) : 0.0; my $writ_t = $w_tps > 0 ? $w_actv * (1000 / $w_tps) : 0.0; + # Calculate I/O throttle delay metrics + my $delays = $data->{'delay_cnt'} - $old->{'delay_cnt'}; + my $d_tps = $delays / $etime; + my $del_t = $delays > 0 ? + ($data->{'delay_time'} - $old->{'delay_time'}) / $delays : 0.0; + # Calculate the % time the VFS layer is active my $r_b_pct = ((($data->{'rtime'} - $old->{'rtime'}) / $NANOSEC) / $etime) * 100; @@ -197,9 +202,9 @@ sub print_stats { if (! $HIDE_ZEROES || $reads != 0.0 || $writes != 0.0 || $nread != 0.0 || $nwritten != 0.0) { - printf($DATA_FMT, $reads, $writes, $nread, $nwritten, - $wait_t, $r_actv, $w_actv, $read_t, $writ_t, - $r_b_pct, $w_b_pct, substr($zone, 0, 8), $zoneid); + printf($DATA_FMT, $reads, $writes, $nread, $nwritten, $r_actv, + $w_actv, $read_t, $writ_t, $r_b_pct, $w_b_pct, + $d_tps, $del_t, substr($zone, 0, 8), $zoneid); } # Save current calculations for next loop diff --git a/usr/src/cmd/stat/ziostat/ziostat.pl b/usr/src/cmd/stat/ziostat/ziostat.pl index b8628096cd..cf95d2f5a5 100755 --- a/usr/src/cmd/stat/ziostat/ziostat.pl +++ b/usr/src/cmd/stat/ziostat/ziostat.pl @@ -81,19 +81,18 @@ if ( defined($ARGV[0]) ) { } my $HEADER_FMT = $USE_COMMA ? - "r/%s,w/%s,%sr/%s,%sw/%s,wait,actv,wsvc_t,asvc_t,%%w,%%b,zone\n" : - " r/%s w/%s %sr/%s %sw/%s wait actv wsvc_t asvc_t " . - "%%w %%b zone\n"; + "r/%s,%sr/%s,actv,wsvc_t,asvc_t,%%b,zone\n" : + " r/%s %sr/%s actv wsvc_t asvc_t %%b zone\n"; my $DATA_FMT = $USE_COMMA ? - "%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%d,%d,%s,%d\n" : - " %6.1f %6.1f %6.1f %6.1f %4.1f %4.1f %6.1f %6.1f %3d %3d %s (%d)\n"; + "%.1f,%.1f,%.1f,%.1f,%.1f,%d,%s,%d\n" : + " %6.1f %6.1f %6.1f %6.1f %6.1f %3d %s (%d)\n"; my $BYTES_PREFIX = $USE_MB ? "M" : "k"; my $BYTES_DIVISOR = $USE_MB ? 1024 * 1024 : 1024; my $INTERVAL_SUFFIX = $USE_INTERVAL ? "i" : "s"; +my $NANOSEC = 1000000000; -my @fields = ( 'reads', 'writes', 'nread', 'nwritten', 'rtime', 'wtime', - 'rlentime', 'wlentime', 'snaptime' ); +my @fields = ( 'reads', 'nread', 'waittime', 'rtime', 'rlentime', 'snaptime' ); chomp(my $curzone = (`/sbin/zonename`)); @@ -116,9 +115,8 @@ $Kstat->update(); for (my $ii = 0; $ii < $count; $ii++) { # Print the column header every 20 rows if ($rows_printed == 0 || $ALL_ZONES) { - printf($HEADER_FMT, $INTERVAL_SUFFIX, $INTERVAL_SUFFIX, - $BYTES_PREFIX, $INTERVAL_SUFFIX, $BYTES_PREFIX, - $INTERVAL_SUFFIX); + printf($HEADER_FMT, $INTERVAL_SUFFIX, $BYTES_PREFIX, + $INTERVAL_SUFFIX, $INTERVAL_SUFFIX); } $rows_printed = $rows_printed >= 20 ? 0 : $rows_printed + 1; @@ -142,7 +140,7 @@ for (my $ii = 0; $ii < $count; $ii++) { my $zoneid = $zoneids->{$zone}; print_stats($zone, $zoneid, - $Kstat->{'zone_io'}{$zoneid}{$trimmed_zone}, $old->{$zone}); + $Kstat->{'zone_zfs'}{$zoneid}{$trimmed_zone}, $old->{$zone}); } sleep ($interval); @@ -161,33 +159,29 @@ sub print_stats { # Calculate basic statistics my $rate_divisor = $USE_INTERVAL ? 1 : $etime; my $reads = ($data->{'reads'} - $old->{'reads'}) / $rate_divisor; - my $writes = ($data->{'writes'} - $old->{'writes'}) / $rate_divisor; my $nread = ($data->{'nread'} - $old->{'nread'}) / $rate_divisor / $BYTES_DIVISOR; - my $nwritten = ($data->{'nwritten'} - $old->{'nwritten'}) / - $rate_divisor / $BYTES_DIVISOR; # Calculate overall transactions per second - my $tps = ($data->{'reads'} - $old->{'reads'} + - $data->{'writes'} - $old->{'writes'}) / $etime; + my $ops = $data->{'reads'} - $old->{'reads'}; + my $tps = $ops / $etime; - # Calculate average length of wait and run queues - my $wait = ($data->{'wlentime'} - $old->{'wlentime'}) / $etime; - my $actv = ($data->{'rlentime'} - $old->{'rlentime'}) / $etime; + # Calculate average length of disk run queue + my $actv = (($data->{'rlentime'} - $old->{'rlentime'}) / $NANOSEC) / + $etime; - # Calculate average wait and run times - my $wsvc = $tps > 0 ? $wait * (1000 / $tps) : 0.0; + # Calculate average disk wait and service times + my $wsvc = $ops > 0 ? (($data->{'waittime'} - $old->{'waittime'}) / + 1000000) / $ops : 0.0; my $asvc = $tps > 0 ? $actv * (1000 / $tps) : 0.0; - # Calculate the % time the wait queue and disk are active - my $w_pct = (($data->{'wtime'} - $old->{'wtime'}) / $etime) * 100; - my $b_pct = (($data->{'rtime'} - $old->{'rtime'}) / $etime) * 100; + # Calculate the % time the disk run queue is active + my $b_pct = ((($data->{'rtime'} - $old->{'rtime'}) / $NANOSEC) / + $etime) * 100; - if (! $HIDE_ZEROES || $reads != 0.0 || $writes != 0.0 || - $nread != 0.0 || $nwritten != 0.0) { - printf($DATA_FMT, $reads, $writes, $nread, $nwritten, - $wait, $actv, $wsvc, $asvc, $w_pct, $b_pct, - substr($zone, 0, 8), $zoneid); + if (! $HIDE_ZEROES || $reads != 0.0 || $nread != 0.0 ) { + printf($DATA_FMT, $reads, $nread, $actv, $wsvc, $asvc, + $b_pct, substr($zone, 0, 8), $zoneid); } # Save current calculations for next loop diff --git a/usr/src/uts/common/fs/zfs/sys/zio.h b/usr/src/uts/common/fs/zfs/sys/zio.h index 3c792e34cb..75f1b96972 100644 --- a/usr/src/uts/common/fs/zfs/sys/zio.h +++ b/usr/src/uts/common/fs/zfs/sys/zio.h @@ -418,9 +418,9 @@ struct zio { zio_cksum_report_t *io_cksum_report; uint64_t io_ena; - /* Zone which originated this IO */ - zoneid_t io_zoneid; - hrtime_t io_start; /* time IO was dispatched */ + zoneid_t io_zoneid; /* zone which originated this I/O */ + hrtime_t io_start; /* time I/O entered zio pipeline */ + hrtime_t io_dispatched; /* time I/O was dispatched to disk */ }; extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, diff --git a/usr/src/uts/common/fs/zfs/zfs_zone.c b/usr/src/uts/common/fs/zfs/zfs_zone.c index 9cb3907f1f..8f90e8158b 100644 --- a/usr/src/uts/common/fs/zfs/zfs_zone.c +++ b/usr/src/uts/common/fs/zfs/zfs_zone.c @@ -875,7 +875,6 @@ void zfs_zone_io_throttle(zfs_zone_iop_type_t type, uint64_t size) { zone_t *zonep = curzone; - zone_zfs_kstat_t *zzp; hrtime_t unow; uint16_t wait; @@ -929,9 +928,11 @@ zfs_zone_io_throttle(zfs_zone_iop_type_t type, uint64_t size) drv_usecwait(wait); - if ((zzp = zonep->zone_zfs_stats) != NULL) { - atomic_inc_64(&zzp->zz_throttle_cnt.value.ui64); - atomic_add_64(&zzp->zz_throttle_time.value.ui64, wait); + if (zonep->zone_vfs_stats != NULL) { + atomic_inc_64(&zonep->zone_vfs_stats-> + zv_delay_cnt.value.ui64); + atomic_add_64(&zonep->zone_vfs_stats-> + zv_delay_time.value.ui64, wait); } } } @@ -1007,17 +1008,18 @@ zfs_zone_zio_start(zio_t *zp) if ((zonep = zone_find_by_id(zp->io_zoneid)) == NULL) return; - mutex_enter(&zonep->zone_io_lock); - kstat_runq_enter(zonep->zone_io_kiop); + mutex_enter(&zonep->zone_zfs_lock); + if (zp->io_type == ZIO_TYPE_READ) + kstat_runq_enter(&zonep->zone_zfs_rwstats); zonep->zone_zfs_weight = 0; - mutex_exit(&zonep->zone_io_lock); + mutex_exit(&zonep->zone_zfs_lock); mutex_enter(&zfs_disk_lock); - zp->io_start = gethrtime(); + zp->io_dispatched = gethrtime(); if (zfs_disk_rcnt++ != 0) - zfs_disk_rtime += (zp->io_start - zfs_disk_rlastupdate); - zfs_disk_rlastupdate = zp->io_start; + zfs_disk_rtime += (zp->io_dispatched - zfs_disk_rlastupdate); + zfs_disk_rlastupdate = zp->io_dispatched; mutex_exit(&zfs_disk_lock); zone_rele(zonep); @@ -1040,26 +1042,33 @@ zfs_zone_zio_done(zio_t *zp) if ((zonep = zone_find_by_id(zp->io_zoneid)) == NULL) return; - mutex_enter(&zonep->zone_io_lock); + now = gethrtime(); + unow = NANO_TO_MICRO(now); + udelta = unow - NANO_TO_MICRO(zp->io_dispatched); - kstat_runq_exit(zonep->zone_io_kiop); + mutex_enter(&zonep->zone_zfs_lock); + /* + * To calculate the wsvc_t average, keep a cumulative sum of all the + * wait time before each I/O was dispatched. Since most writes are + * asynchronous, only track the wait time for read I/Os. + */ if (zp->io_type == ZIO_TYPE_READ) { - zonep->zone_io_kiop->reads++; - zonep->zone_io_kiop->nread += zp->io_size; + zonep->zone_zfs_rwstats.reads++; + zonep->zone_zfs_rwstats.nread += zp->io_size; + + zonep->zone_zfs_stats->zz_waittime.value.ui64 += + zp->io_dispatched - zp->io_start; + + kstat_runq_exit(&zonep->zone_zfs_rwstats); } else { - zonep->zone_io_kiop->writes++; - zonep->zone_io_kiop->nwritten += zp->io_size; + zonep->zone_zfs_rwstats.writes++; + zonep->zone_zfs_rwstats.nwritten += zp->io_size; } - mutex_exit(&zonep->zone_io_lock); + mutex_exit(&zonep->zone_zfs_lock); mutex_enter(&zfs_disk_lock); - - now = gethrtime(); - unow = NANO_TO_MICRO(now); - udelta = unow - NANO_TO_MICRO(zp->io_start); - zfs_disk_rcnt--; zfs_disk_rtime += (now - zfs_disk_rlastupdate); zfs_disk_rlastupdate = now; diff --git a/usr/src/uts/common/fs/zfs/zio.c b/usr/src/uts/common/fs/zfs/zio.c index c063e26d71..a3159b91b8 100644 --- a/usr/src/uts/common/fs/zfs/zio.c +++ b/usr/src/uts/common/fs/zfs/zio.c @@ -493,6 +493,8 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, zio = kmem_cache_alloc(zio_cache, KM_SLEEP); bzero(zio, sizeof (zio_t)); + zio->io_start = gethrtime(); + mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL); @@ -550,10 +552,10 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp, if (zio->io_child_type == ZIO_CHILD_GANG) zio->io_gang_leader = pio->io_gang_leader; zio_add_child(pio, zio); + } else { + zfs_zone_zio_init(zio); } - zfs_zone_zio_init(zio); - return (zio); } @@ -889,6 +891,8 @@ zio_read_bp_init(zio_t *zio) { blkptr_t *bp = zio->io_bp; + zio->io_start = gethrtime(); + if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF && zio->io_child_type == ZIO_CHILD_LOGICAL && !(zio->io_flags & ZIO_FLAG_RAW)) { @@ -2246,6 +2250,9 @@ zio_vdev_io_start(zio_t *zio) ASSERT(zio->io_error == 0); ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0); + if (zio->io_type == ZIO_TYPE_WRITE) + zio->io_start = gethrtime(); + if (vd == NULL) { if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER)) spa_config_enter(spa, SCL_ZIO, zio, RW_READER); diff --git a/usr/src/uts/common/os/zone.c b/usr/src/uts/common/os/zone.c index ca02229671..cde6677f00 100644 --- a/usr/src/uts/common/os/zone.c +++ b/usr/src/uts/common/os/zone.c @@ -1927,6 +1927,8 @@ zone_vfs_kstat_create(zone_t *zone) kstat_named_init(&zvp->zv_10ms_ops, "10ms_ops", KSTAT_DATA_UINT64); kstat_named_init(&zvp->zv_100ms_ops, "100ms_ops", KSTAT_DATA_UINT64); kstat_named_init(&zvp->zv_1s_ops, "1s_ops", KSTAT_DATA_UINT64); + kstat_named_init(&zvp->zv_delay_cnt, "delay_cnt", KSTAT_DATA_UINT64); + kstat_named_init(&zvp->zv_delay_time, "delay_time", KSTAT_DATA_UINT64); ksp->ks_update = zone_vfs_kstat_update; ksp->ks_private = zone; @@ -1935,6 +1937,38 @@ zone_vfs_kstat_create(zone_t *zone) return (ksp); } +static int +zone_zfs_kstat_update(kstat_t *ksp, int rw) +{ + zone_t *zone = ksp->ks_private; + zone_zfs_kstat_t *zzp = ksp->ks_data; + kstat_io_t *kiop = &zone->zone_zfs_rwstats; + + if (rw == KSTAT_WRITE) + return (EACCES); + + /* + * Extract the ZFS statistics from the kstat_io_t structure used by + * kstat_runq_enter() and related functions. Since the I/O throttle + * counters are updated directly by the ZFS layer, there's no need to + * copy those statistics here. + * + * Note that kstat_runq_enter() and the related functions use + * gethrtime_unscaled(), so scale the time here. + */ + zzp->zz_nread.value.ui64 = kiop->nread; + zzp->zz_reads.value.ui64 = kiop->reads; + zzp->zz_rtime.value.ui64 = kiop->rtime; + zzp->zz_rlentime.value.ui64 = kiop->rlentime; + zzp->zz_nwritten.value.ui64 = kiop->nwritten; + zzp->zz_writes.value.ui64 = kiop->writes; + + scalehrtime((hrtime_t *)&zzp->zz_rtime.value.ui64); + scalehrtime((hrtime_t *)&zzp->zz_rlentime.value.ui64); + + return (0); +} + static kstat_t * zone_zfs_kstat_create(zone_t *zone) { @@ -1954,12 +1988,17 @@ zone_zfs_kstat_create(zone_t *zone) ksp->ks_lock = &zone->zone_zfs_lock; zone->zone_zfs_stats = zzp; - kstat_named_init(&zzp->zz_throttle_cnt, - "throttle_cnt", KSTAT_DATA_UINT64); - kstat_named_init(&zzp->zz_throttle_time, - "throttle_time", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_nread, "nread", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_reads, "reads", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_rtime, "rtime", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_rlentime, "rlentime", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_nwritten, "nwritten", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_writes, "writes", KSTAT_DATA_UINT64); + kstat_named_init(&zzp->zz_waittime, "waittime", KSTAT_DATA_UINT64); + ksp->ks_update = zone_zfs_kstat_update; ksp->ks_private = zone; + kstat_install(ksp); return (ksp); } @@ -1974,22 +2013,6 @@ zone_kstat_create(zone_t *zone) zone->zone_nprocs_kstat = zone_rctl_kstat_create_common(zone, "nprocs", zone_nprocs_kstat_update); - zone->zone_io_ksp = kstat_create_zone("zone_io", zone->zone_id, - zone->zone_name, "zone_io", KSTAT_TYPE_IO, 1, - KSTAT_FLAG_PERSISTENT, zone->zone_id); - - if (zone->zone_io_ksp != NULL) { - if (zone->zone_id != GLOBAL_ZONEID) - kstat_zone_add(zone->zone_io_ksp, GLOBAL_ZONEID); - - zone->zone_io_ksp->ks_lock = &zone->zone_io_lock; - kstat_install(zone->zone_io_ksp); - zone->zone_io_kiop = zone->zone_io_ksp->ks_data; - } else { - zone->zone_io_kiop = kmem_zalloc( - sizeof (kstat_io_t), KM_SLEEP); - } - if ((zone->zone_vfs_ksp = zone_vfs_kstat_create(zone)) == NULL) { zone->zone_vfs_stats = kmem_zalloc( sizeof (zone_vfs_kstat_t), KM_SLEEP); @@ -2024,13 +2047,6 @@ zone_kstat_delete(zone_t *zone) zone_kstat_delete_common(&zone->zone_nprocs_kstat, sizeof (zone_kstat_t)); - if (zone->zone_io_ksp != NULL) { - kstat_delete(zone->zone_io_ksp); - zone->zone_io_ksp = NULL; - } else { - kmem_free(zone->zone_io_kiop, sizeof (kstat_io_t)); - } - zone_kstat_delete_common(&zone->zone_vfs_ksp, sizeof (zone_vfs_kstat_t)); zone_kstat_delete_common(&zone->zone_zfs_ksp, diff --git a/usr/src/uts/common/sys/zone.h b/usr/src/uts/common/sys/zone.h index 4ab70fe624..74132c2fd3 100644 --- a/usr/src/uts/common/sys/zone.h +++ b/usr/src/uts/common/sys/zone.h @@ -399,11 +399,18 @@ typedef struct { kstat_named_t zv_10ms_ops; kstat_named_t zv_100ms_ops; kstat_named_t zv_1s_ops; + kstat_named_t zv_delay_cnt; + kstat_named_t zv_delay_time; } zone_vfs_kstat_t; typedef struct { - kstat_named_t zz_throttle_cnt; - kstat_named_t zz_throttle_time; + kstat_named_t zz_nread; + kstat_named_t zz_reads; + kstat_named_t zz_rtime; + kstat_named_t zz_rlentime; + kstat_named_t zz_nwritten; + kstat_named_t zz_writes; + kstat_named_t zz_waittime; } zone_zfs_kstat_t; typedef struct zone { @@ -563,13 +570,6 @@ typedef struct zone { sys_zio_cntr_t zone_lwr_ops; /* - * kstats and counters for I/O ops and bytes. - */ - kmutex_t zone_io_lock; /* protects I/O statistics */ - kstat_t *zone_io_ksp; - kstat_io_t *zone_io_kiop; - - /* * kstats and counters for VFS ops and bytes. */ kmutex_t zone_vfs_lock; /* protects VFS statistics */ @@ -578,10 +578,11 @@ typedef struct zone { zone_vfs_kstat_t *zone_vfs_stats; /* - * kstats for ZFS observability. + * kstats for ZFS I/O ops and bytes. */ kmutex_t zone_zfs_lock; /* protects ZFS statistics */ kstat_t *zone_zfs_ksp; + kstat_io_t zone_zfs_rwstats; zone_zfs_kstat_t *zone_zfs_stats; /* |
