summaryrefslogtreecommitdiff
path: root/usr/src/uts/common/fs/zfs/arc.c
diff options
context:
space:
mode:
Diffstat (limited to 'usr/src/uts/common/fs/zfs/arc.c')
-rw-r--r--usr/src/uts/common/fs/zfs/arc.c213
1 files changed, 122 insertions, 91 deletions
diff --git a/usr/src/uts/common/fs/zfs/arc.c b/usr/src/uts/common/fs/zfs/arc.c
index 844abbcd5d..8cd31e7c7c 100644
--- a/usr/src/uts/common/fs/zfs/arc.c
+++ b/usr/src/uts/common/fs/zfs/arc.c
@@ -348,7 +348,8 @@ int arc_no_grow_shift = 5;
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
-static int arc_min_prefetch_lifespan;
+static int zfs_arc_min_prefetch_ms = 1;
+static int zfs_arc_min_prescient_prefetch_ms = 6;
/*
* If this percent of memory is free, don't throttle.
@@ -695,8 +696,9 @@ typedef struct arc_stats {
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_meta_max;
kstat_named_t arcstat_meta_min;
- kstat_named_t arcstat_sync_wait_for_async;
+ kstat_named_t arcstat_async_upgrade_sync;
kstat_named_t arcstat_demand_hit_predictive_prefetch;
+ kstat_named_t arcstat_demand_hit_prescient_prefetch;
} arc_stats_t;
static arc_stats_t arc_stats = {
@@ -780,8 +782,9 @@ static arc_stats_t arc_stats = {
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 },
- { "sync_wait_for_async", KSTAT_DATA_UINT64 },
+ { "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
+ { "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
};
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
@@ -878,22 +881,23 @@ typedef struct arc_callback arc_callback_t;
struct arc_callback {
void *acb_private;
- arc_done_func_t *acb_done;
+ arc_read_done_func_t *acb_done;
arc_buf_t *acb_buf;
boolean_t acb_compressed;
zio_t *acb_zio_dummy;
+ zio_t *acb_zio_head;
arc_callback_t *acb_next;
};
typedef struct arc_write_callback arc_write_callback_t;
struct arc_write_callback {
- void *awcb_private;
- arc_done_func_t *awcb_ready;
- arc_done_func_t *awcb_children_ready;
- arc_done_func_t *awcb_physdone;
- arc_done_func_t *awcb_done;
- arc_buf_t *awcb_buf;
+ void *awcb_private;
+ arc_write_done_func_t *awcb_ready;
+ arc_write_done_func_t *awcb_children_ready;
+ arc_write_done_func_t *awcb_physdone;
+ arc_write_done_func_t *awcb_done;
+ arc_buf_t *awcb_buf;
};
/*
@@ -1019,6 +1023,8 @@ struct arc_buf_hdr {
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
+#define HDR_PRESCIENT_PREFETCH(hdr) \
+ ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
@@ -3249,6 +3255,8 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
+ int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
+ zfs_arc_min_prescient_prefetch_ms : zfs_arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
@@ -3301,8 +3309,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
- ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
- arc_min_prefetch_lifespan)) {
+ ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < min_lifetime * hz)) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
@@ -4613,13 +4620,15 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
- if (HDR_PREFETCH(hdr)) {
+ if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
- arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
+ arc_hdr_clear_flags(hdr,
+ ARC_FLAG_PREFETCH |
+ ARC_FLAG_PRESCIENT_PREFETCH);
ARCSTAT_BUMP(arcstat_mru_hits);
}
hdr->b_l1hdr.b_arc_access = now;
@@ -4650,10 +4659,13 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
* MFU state.
*/
- if (HDR_PREFETCH(hdr)) {
+ if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
- if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
- arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
+ if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
+ arc_hdr_clear_flags(hdr,
+ ARC_FLAG_PREFETCH |
+ ARC_FLAG_PRESCIENT_PREFETCH);
+ }
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
@@ -4674,11 +4686,6 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
- if ((HDR_PREFETCH(hdr)) != 0) {
- ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
- /* link protected by hash_lock */
- ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
- }
ARCSTAT_BUMP(arcstat_mfu_hits);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
@@ -4689,12 +4696,11 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
* MFU state.
*/
- if (HDR_PREFETCH(hdr)) {
+ if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
- ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
new_state = arc_mru;
}
@@ -4760,21 +4766,26 @@ arc_buf_access(arc_buf_t *buf)
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
-/* a generic arc_done_func_t which you can use */
+/* a generic arc_read_done_func_t which you can use */
/* ARGSUSED */
void
-arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
+arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
+ arc_buf_t *buf, void *arg)
{
- if (zio == NULL || zio->io_error == 0)
- bcopy(buf->b_data, arg, arc_buf_size(buf));
+ if (buf == NULL)
+ return;
+
+ bcopy(buf->b_data, arg, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
-/* a generic arc_done_func_t */
+/* a generic arc_read_done_func_t */
void
-arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
+arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
+ arc_buf_t *buf, void *arg)
{
arc_buf_t **bufp = arg;
+
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
@@ -4809,7 +4820,6 @@ arc_read_done(zio_t *zio)
arc_callback_t *callback_list;
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
- boolean_t no_zio_error = (zio->io_error == 0);
/*
* The hdr was inserted into hash-table and removed from lists
@@ -4835,7 +4845,7 @@ arc_read_done(zio_t *zio)
ASSERT3P(hash_lock, !=, NULL);
}
- if (no_zio_error) {
+ if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
@@ -4856,7 +4866,8 @@ arc_read_done(zio_t *zio)
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
- if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) {
+ if (hash_lock && zio->io_error == 0 &&
+ hdr->b_l1hdr.b_state == arc_anon) {
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
@@ -4877,30 +4888,29 @@ arc_read_done(zio_t *zio)
if (!acb->acb_done)
continue;
- /* This is a demand read since prefetches don't use callbacks */
callback_cnt++;
- if (no_zio_error) {
- int error = arc_buf_alloc_impl(hdr, acb->acb_private,
- acb->acb_compressed, zio->io_error == 0,
- &acb->acb_buf);
- if (error != 0) {
- /*
- * Decompression failed. Set io_error
- * so that when we call acb_done (below),
- * we will indicate that the read failed.
- * Note that in the unusual case where one
- * callback is compressed and another
- * uncompressed, we will mark all of them
- * as failed, even though the uncompressed
- * one can't actually fail. In this case,
- * the hdr will not be anonymous, because
- * if there are multiple callbacks, it's
- * because multiple threads found the same
- * arc buf in the hash table.
- */
- zio->io_error = error;
- }
+ if (zio->io_error != 0)
+ continue;
+
+ int error = arc_buf_alloc_impl(hdr, acb->acb_private,
+ acb->acb_compressed, B_TRUE, &acb->acb_buf);
+ if (error != 0) {
+ /*
+ * Decompression failed. Set io_error
+ * so that when we call acb_done (below),
+ * we will indicate that the read failed.
+ * Note that in the unusual case where one
+ * callback is compressed and another
+ * uncompressed, we will mark all of them
+ * as failed, even though the uncompressed
+ * one can't actually fail. In this case,
+ * the hdr will not be anonymous, because
+ * if there are multiple callbacks, it's
+ * because multiple threads found the same
+ * arc buf in the hash table.
+ */
+ zio->io_error = error;
}
}
/*
@@ -4923,7 +4933,7 @@ arc_read_done(zio_t *zio)
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
- if (no_zio_error) {
+ if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
@@ -4966,7 +4976,8 @@ arc_read_done(zio_t *zio)
arc_buf_destroy(acb->acb_buf, acb->acb_private);
acb->acb_buf = NULL;
}
- acb->acb_done(zio, acb->acb_buf, acb->acb_private);
+ acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
+ acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
@@ -5001,7 +5012,7 @@ arc_read_done(zio_t *zio)
* for readers of this block.
*/
int
-arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
+arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_read_done_func_t *done,
void *private, zio_priority_t priority, int zio_flags,
arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
@@ -5010,6 +5021,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW) != 0;
+ int rc = 0;
ASSERT(!BP_IS_EMBEDDED(bp) ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
@@ -5028,32 +5040,20 @@ top:
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
+ zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
+ ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
- * This sync read must wait for an
- * in-progress async read (e.g. a predictive
- * prefetch). Async reads are queued
- * separately at the vdev_queue layer, so
- * this is a form of priority inversion.
- * Ideally, we would "inherit" the demand
- * i/o's priority by moving the i/o from
- * the async queue to the synchronous queue,
- * but there is currently no mechanism to do
- * so. Track this so that we can evaluate
- * the magnitude of this potential performance
- * problem.
- *
- * Note that if the prefetch i/o is already
- * active (has been issued to the device),
- * the prefetch improved performance, because
- * we issued it sooner than we would have
- * without the prefetch.
+ * This is a sync read that needs to wait for
+ * an in-flight async read. Request that the
+ * zio have its priority upgraded.
*/
- DTRACE_PROBE1(arc__sync__wait__for__async,
+ zio_change_priority(head_zio, priority);
+ DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
- ARCSTAT_BUMP(arcstat_sync_wait_for_async);
+ ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
arc_hdr_clear_flags(hdr,
@@ -5080,6 +5080,7 @@ top:
spa, NULL, NULL, NULL, zio_flags);
ASSERT3P(acb->acb_done, !=, NULL);
+ acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb = acb;
mutex_exit(hash_lock);
@@ -5107,17 +5108,33 @@ top:
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
+
+ if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
+ ARCSTAT_BUMP(
+ arcstat_demand_hit_prescient_prefetch);
+ arc_hdr_clear_flags(hdr,
+ ARC_FLAG_PRESCIENT_PREFETCH);
+ }
+
ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
- VERIFY0(arc_buf_alloc_impl(hdr, private,
- compressed_read, B_TRUE, &buf));
+ rc = arc_buf_alloc_impl(hdr, private,
+ compressed_read, B_TRUE, &buf);
+ if (rc != 0) {
+ arc_buf_destroy(buf, private);
+ buf = NULL;
+ }
+ ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
+ rc == 0 || rc != ENOENT);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
+ if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
+ arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
@@ -5127,7 +5144,7 @@ top:
data, metadata, hits);
if (done)
- done(NULL, buf, private);
+ done(NULL, zb, bp, buf, private);
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
@@ -5201,6 +5218,9 @@ top:
if (*arc_flags & ARC_FLAG_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
+ if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
+ arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
+
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (BP_GET_LEVEL(bp) > 0)
@@ -5230,14 +5250,17 @@ top:
vd = NULL;
}
- if (priority == ZIO_PRIORITY_ASYNC_READ)
+ /*
+ * We count both async reads and scrub IOs as asynchronous so
+ * that both can be upgraded in the event of a cache hit while
+ * the read IO is still in-flight.
+ */
+ if (priority == ZIO_PRIORITY_ASYNC_READ ||
+ priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
- if (hash_lock != NULL)
- mutex_exit(hash_lock);
-
/*
* At this point, we have a level 1 cache miss. Try again in
* L2ARC if possible.
@@ -5307,6 +5330,11 @@ top:
ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
+ acb->acb_zio_head = rzio;
+
+ if (hash_lock != NULL)
+ mutex_exit(hash_lock);
+
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes, size);
@@ -5321,6 +5349,8 @@ top:
return (0);
/* l2arc read error; goto zio_read() */
+ if (hash_lock != NULL)
+ mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
@@ -5341,6 +5371,10 @@ top:
rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pabd, size,
arc_read_done, hdr, priority, zio_flags, zb);
+ acb->acb_zio_head = rzio;
+
+ if (hash_lock != NULL)
+ mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT)
return (zio_wait(rzio));
@@ -5828,9 +5862,9 @@ arc_write_done(zio_t *zio)
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
- boolean_t l2arc, const zio_prop_t *zp, arc_done_func_t *ready,
- arc_done_func_t *children_ready, arc_done_func_t *physdone,
- arc_done_func_t *done, void *private, zio_priority_t priority,
+ boolean_t l2arc, const zio_prop_t *zp, arc_write_done_func_t *ready,
+ arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
+ arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
@@ -6242,9 +6276,6 @@ arc_init(void)
mutex_init(&arc_adjust_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&arc_adjust_waiters_cv, NULL, CV_DEFAULT, NULL);
- /* Convert seconds to clock ticks */
- arc_min_prefetch_lifespan = 1 * hz;
-
/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
arc_c_min = MAX(allmem / 32, 64 << 20);
/* set max to 3/4 of all memory, or all but 1GB, whichever is more */