summaryrefslogtreecommitdiff
path: root/usr/src
diff options
context:
space:
mode:
authorToomas Soome <tsoome@me.com>2019-04-05 12:19:59 +0300
committerToomas Soome <tsoome@me.com>2019-05-08 16:16:07 +0300
commitc28006dedbb1a63ed6cd12fc2ec1c2be15b425d1 (patch)
treea18a3e57789c62883a4bf24f0c26ad88ee6bc6c5 /usr/src
parent0eb3364f7d9157494d68a9f390c120ca86e03be1 (diff)
downloadillumos-joyent-c28006dedbb1a63ed6cd12fc2ec1c2be15b425d1.tar.gz
10838 loader: bcache.c cstyle cleanup
Reviewed by: Gergő Doma <domag02@gmail.com> Approved by: Dan McDonald <danmcd@joyent.com>
Diffstat (limited to 'usr/src')
-rw-r--r--usr/src/boot/sys/boot/common/bcache.c617
1 files changed, 312 insertions, 305 deletions
diff --git a/usr/src/boot/sys/boot/common/bcache.c b/usr/src/boot/sys/boot/common/bcache.c
index fd43ebd68e..9d8e385ab4 100644
--- a/usr/src/boot/sys/boot/common/bcache.c
+++ b/usr/src/boot/sys/boot/common/bcache.c
@@ -1,4 +1,4 @@
-/*-
+/*
* Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
* Copyright 2015 Toomas Soome <tsoome@me.com>
* All rights reserved.
@@ -43,15 +43,15 @@
/* #define BCACHE_DEBUG */
#ifdef BCACHE_DEBUG
-# define DEBUG(fmt, args...) printf("%s: " fmt "\n" , __func__ , ## args)
+#define DEBUG(fmt, args...) printf("%s: " fmt "\n", __func__, ## args)
#else
-# define DEBUG(fmt, args...)
+#define DEBUG(fmt, args...) ((void)0)
#endif
struct bcachectl
{
- daddr_t bc_blkno;
- int bc_count;
+ daddr_t bc_blkno;
+ int bc_count;
};
/*
@@ -61,24 +61,24 @@ struct bcachectl
* to boot from, but this has changed with zfs.
*/
struct bcache {
- struct bcachectl *bcache_ctl;
- caddr_t bcache_data;
- size_t bcache_nblks;
- size_t ra;
+ struct bcachectl *bcache_ctl;
+ caddr_t bcache_data;
+ size_t bcache_nblks;
+ size_t ra;
};
-static u_int bcache_total_nblks; /* set by bcache_init */
-static u_int bcache_blksize; /* set by bcache_init */
-static u_int bcache_numdev; /* set by bcache_add_dev */
+static uint_t bcache_total_nblks; /* set by bcache_init */
+static uint_t bcache_blksize; /* set by bcache_init */
+static uint_t bcache_numdev; /* set by bcache_add_dev */
/* statistics */
-static u_int bcache_units; /* number of devices with cache */
-static u_int bcache_unit_nblks; /* nblocks per unit */
-static u_int bcache_hits;
-static u_int bcache_misses;
-static u_int bcache_ops;
-static u_int bcache_bypasses;
-static u_int bcache_bcount;
-static u_int bcache_rablks;
+static uint_t bcache_units; /* number of devices with cache */
+static uint_t bcache_unit_nblks; /* nblocks per unit */
+static uint_t bcache_hits;
+static uint_t bcache_misses;
+static uint_t bcache_ops;
+static uint_t bcache_bypasses;
+static uint_t bcache_bcount;
+static uint_t bcache_rablks;
#define BHASH(bc, blkno) ((blkno) & ((bc)->bcache_nblks - 1))
#define BCACHE_LOOKUP(bc, blkno) \
@@ -96,9 +96,9 @@ static void bcache_free_instance(struct bcache *bc);
void
bcache_init(size_t nblks, size_t bsize)
{
- /* set up control data */
- bcache_total_nblks = nblks;
- bcache_blksize = bsize;
+ /* set up control data */
+ bcache_total_nblks = nblks;
+ bcache_blksize = bsize;
}
/*
@@ -112,69 +112,69 @@ bcache_init(size_t nblks, size_t bsize)
void
bcache_add_dev(int devices)
{
- bcache_numdev += devices;
+ bcache_numdev += devices;
}
void *
bcache_allocate(void)
{
- u_int i;
- struct bcache *bc = malloc(sizeof (struct bcache));
- int disks = bcache_numdev;
+ uint_t i;
+ struct bcache *bc = malloc(sizeof (struct bcache));
+ int disks = bcache_numdev;
+
+ if (disks == 0)
+ disks = 1; /* safe guard */
+
+ if (bc == NULL) {
+ errno = ENOMEM;
+ return (bc);
+ }
+
+ /*
+ * the bcache block count must be power of 2 for hash function
+ */
+ i = fls(disks) - 1; /* highbit - 1 */
+ if (disks > (1 << i)) /* next power of 2 */
+ i++;
+
+ bc->bcache_nblks = bcache_total_nblks >> i;
+ bcache_unit_nblks = bc->bcache_nblks;
+ bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize);
+ if (bc->bcache_data == NULL) {
+ /* dont error out yet. fall back to 32 blocks and try again */
+ bc->bcache_nblks = 32;
+ bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize +
+ sizeof (uint32_t));
+ }
- if (disks == 0)
- disks = 1; /* safe guard */
+ bc->bcache_ctl = malloc(bc->bcache_nblks * sizeof (struct bcachectl));
- if (bc == NULL) {
- errno = ENOMEM;
+ if ((bc->bcache_data == NULL) || (bc->bcache_ctl == NULL)) {
+ bcache_free_instance(bc);
+ errno = ENOMEM;
+ return (NULL);
+ }
+
+ /* Flush the cache */
+ for (i = 0; i < bc->bcache_nblks; i++) {
+ bc->bcache_ctl[i].bc_count = -1;
+ bc->bcache_ctl[i].bc_blkno = -1;
+ }
+ bcache_units++;
+ bc->ra = BCACHE_READAHEAD; /* optimistic read ahead */
return (bc);
- }
-
- /*
- * the bcache block count must be power of 2 for hash function
- */
- i = fls(disks) - 1; /* highbit - 1 */
- if (disks > (1 << i)) /* next power of 2 */
- i++;
-
- bc->bcache_nblks = bcache_total_nblks >> i;
- bcache_unit_nblks = bc->bcache_nblks;
- bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize);
- if (bc->bcache_data == NULL) {
- /* dont error out yet. fall back to 32 blocks and try again */
- bc->bcache_nblks = 32;
- bc->bcache_data = malloc(bc->bcache_nblks * bcache_blksize +
- sizeof (uint32_t));
- }
-
- bc->bcache_ctl = malloc(bc->bcache_nblks * sizeof(struct bcachectl));
-
- if ((bc->bcache_data == NULL) || (bc->bcache_ctl == NULL)) {
- bcache_free_instance(bc);
- errno = ENOMEM;
- return (NULL);
- }
-
- /* Flush the cache */
- for (i = 0; i < bc->bcache_nblks; i++) {
- bc->bcache_ctl[i].bc_count = -1;
- bc->bcache_ctl[i].bc_blkno = -1;
- }
- bcache_units++;
- bc->ra = BCACHE_READAHEAD; /* optimistic read ahead */
- return (bc);
}
void
bcache_free(void *cache)
{
- struct bcache *bc = cache;
+ struct bcache *bc = cache;
- if (bc == NULL)
- return;
+ if (bc == NULL)
+ return;
- bcache_free_instance(bc);
- bcache_units--;
+ bcache_free_instance(bc);
+ bcache_units--;
}
/*
@@ -185,19 +185,19 @@ static int
write_strategy(void *devdata, int rw, daddr_t blk, size_t size,
char *buf, size_t *rsize)
{
- struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
- struct bcache *bc = dd->dv_cache;
- daddr_t i, nblk;
+ struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
+ struct bcache *bc = dd->dv_cache;
+ daddr_t i, nblk;
- nblk = size / bcache_blksize;
+ nblk = size / bcache_blksize;
- /* Invalidate the blocks being written */
- for (i = 0; i < nblk; i++) {
- bcache_invalidate(bc, blk + i);
- }
+ /* Invalidate the blocks being written */
+ for (i = 0; i < nblk; i++) {
+ bcache_invalidate(bc, blk + i);
+ }
- /* Write the blocks */
- return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
+ /* Write the blocks */
+ return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
}
/*
@@ -209,141 +209,145 @@ static int
read_strategy(void *devdata, int rw, daddr_t blk, size_t size,
char *buf, size_t *rsize)
{
- struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
- struct bcache *bc = dd->dv_cache;
- size_t i, nblk, p_size, r_size, complete, ra;
- int result;
- daddr_t p_blk;
- caddr_t p_buf;
-
- if (bc == NULL) {
- errno = ENODEV;
- return (-1);
- }
-
- if (rsize != NULL)
- *rsize = 0;
-
- nblk = size / bcache_blksize;
- if (nblk == 0 && size != 0)
- nblk++;
- result = 0;
- complete = 1;
-
- /* Satisfy any cache hits up front, break on first miss */
- for (i = 0; i < nblk; i++) {
- if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) {
- bcache_misses += (nblk - i);
- complete = 0;
- if (nblk - i > BCACHE_MINREADAHEAD && bc->ra > BCACHE_MINREADAHEAD)
- bc->ra >>= 1; /* reduce read ahead */
- break;
- } else {
- bcache_hits++;
+ struct bcache_devdata *dd = devdata;
+ struct bcache *bc = dd->dv_cache;
+ size_t i, nblk, p_size, r_size, complete, ra;
+ int result;
+ daddr_t p_blk;
+ caddr_t p_buf;
+
+ if (bc == NULL) {
+ errno = ENODEV;
+ return (-1);
}
- }
-
- if (complete) { /* whole set was in cache, return it */
- if (bc->ra < BCACHE_READAHEAD)
- bc->ra <<= 1; /* increase read ahead */
- bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
- goto done;
- }
-
- /*
- * Fill in any misses. From check we have i pointing to first missing
- * block, read in all remaining blocks + readahead.
- * We have space at least for nblk - i before bcache wraps.
- */
- p_blk = blk + i;
- p_buf = bc->bcache_data + (bcache_blksize * BHASH(bc, p_blk));
- r_size = bc->bcache_nblks - BHASH(bc, p_blk); /* remaining blocks */
-
- p_size = MIN(r_size, nblk - i); /* read at least those blocks */
-
- /*
- * The read ahead size setup.
- * While the read ahead can save us IO, it also can complicate things:
- * 1. We do not want to read ahead by wrapping around the
- * bcache end - this would complicate the cache management.
- * 2. We are using bc->ra as dynamic hint for read ahead size,
- * detected cache hits will increase the read-ahead block count, and
- * misses will decrease, see the code above.
- * 3. The bcache is sized by 512B blocks, however, the underlying device
- * may have a larger sector size, and we should perform the IO by
- * taking into account these larger sector sizes. We could solve this by
- * passing the sector size to bcache_allocate(), or by using ioctl(), but
- * in this version we are using the constant, 16 blocks, and are rounding
- * read ahead block count down to multiple of 16.
- * Using the constant has two reasons, we are not entirely sure if the
- * BIOS disk interface is providing the correct value for sector size.
- * And secondly, this way we get the most conservative setup for the ra.
- *
- * The selection of multiple of 16 blocks (8KB) is quite arbitrary, however,
- * we want to cover CDs (2K) and 4K disks.
- * bcache_allocate() will always fall back to a minimum of 32 blocks.
- * Our choice of 16 read ahead blocks will always fit inside the bcache.
- */
-
- if ((rw & F_NORA) == F_NORA)
- ra = 0;
- else
- ra = bc->bcache_nblks - BHASH(bc, p_blk + p_size);
-
- if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */
- ra = MIN(bc->ra, ra - 1);
- ra = rounddown(ra, 16); /* multiple of 16 blocks */
- p_size += ra;
- }
-
- /* invalidate bcache */
- for (i = 0; i < p_size; i++) {
- bcache_invalidate(bc, p_blk + i);
- }
-
- r_size = 0;
- /*
- * with read-ahead, it may happen we are attempting to read past
- * disk end, as bcache has no information about disk size.
- * in such case we should get partial read if some blocks can be
- * read or error, if no blocks can be read.
- * in either case we should return the data in bcache and only
- * return error if there is no data.
- */
- rw &= F_MASK;
- result = dd->dv_strategy(dd->dv_devdata, rw, p_blk,
- p_size * bcache_blksize, p_buf, &r_size);
-
- r_size /= bcache_blksize;
- for (i = 0; i < r_size; i++)
- bcache_insert(bc, p_blk + i);
-
- /* update ra statistics */
- if (r_size != 0) {
- if (r_size < p_size)
- bcache_rablks += (p_size - r_size);
+
+ if (rsize != NULL)
+ *rsize = 0;
+
+ nblk = size / bcache_blksize;
+ if (nblk == 0 && size != 0)
+ nblk++;
+ result = 0;
+ complete = 1;
+
+ /* Satisfy any cache hits up front, break on first miss */
+ for (i = 0; i < nblk; i++) {
+ if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) {
+ bcache_misses += (nblk - i);
+ complete = 0;
+ if (nblk - i > BCACHE_MINREADAHEAD &&
+ bc->ra > BCACHE_MINREADAHEAD)
+ bc->ra >>= 1; /* reduce read ahead */
+ break;
+ } else {
+ bcache_hits++;
+ }
+ }
+
+ if (complete) { /* whole set was in cache, return it */
+ if (bc->ra < BCACHE_READAHEAD)
+ bc->ra <<= 1; /* increase read ahead */
+ bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)),
+ buf, size);
+ goto done;
+ }
+
+ /*
+ * Fill in any misses. From check we have i pointing to first missing
+ * block, read in all remaining blocks + readahead.
+ * We have space at least for nblk - i before bcache wraps.
+ */
+ p_blk = blk + i;
+ p_buf = bc->bcache_data + (bcache_blksize * BHASH(bc, p_blk));
+ r_size = bc->bcache_nblks - BHASH(bc, p_blk); /* remaining blocks */
+
+ p_size = MIN(r_size, nblk - i); /* read at least those blocks */
+
+ /*
+ * The read ahead size setup.
+ * While the read ahead can save us IO, it also can complicate things:
+ * 1. We do not want to read ahead by wrapping around the
+ * bcache end - this would complicate the cache management.
+ * 2. We are using bc->ra as dynamic hint for read ahead size,
+ * detected cache hits will increase the read-ahead block count,
+ * and misses will decrease, see the code above.
+ * 3. The bcache is sized by 512B blocks, however, the underlying device
+ * may have a larger sector size, and we should perform the IO by
+ * taking into account these larger sector sizes. We could solve
+ * this by passing the sector size to bcache_allocate(), or by
+ * using ioctl(), but in this version we are using the constant,
+ * 16 blocks, and are rounding read ahead block count down to
+ * multiple of 16. Using the constant has two reasons, we are not
+ * entirely sure if the BIOS disk interface is providing the
+ * correct value for sector size. And secondly, this way we get
+ * the most conservative setup for the ra.
+ *
+ * The selection of multiple of 16 blocks (8KB) is quite arbitrary,
+ * however, we want to cover CDs (2K) and 4K disks.
+ * bcache_allocate() will always fall back to a minimum of 32 blocks.
+ * Our choice of 16 read ahead blocks will always fit inside the bcache.
+ */
+
+ if ((rw & F_NORA) == F_NORA)
+ ra = 0;
else
- bcache_rablks += ra;
- }
+ ra = bc->bcache_nblks - BHASH(bc, p_blk + p_size);
- /* check how much data can we copy */
- for (i = 0; i < nblk; i++) {
- if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i)))
- break;
- }
+ if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */
+ ra = MIN(bc->ra, ra - 1);
+ ra = rounddown(ra, 16); /* multiple of 16 blocks */
+ p_size += ra;
+ }
- if (size > i * bcache_blksize)
- size = i * bcache_blksize;
+ /* invalidate bcache */
+ for (i = 0; i < p_size; i++) {
+ bcache_invalidate(bc, p_blk + i);
+ }
- if (size != 0) {
- bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size);
- result = 0;
- }
+ r_size = 0;
+ /*
+ * with read-ahead, it may happen we are attempting to read past
+ * disk end, as bcache has no information about disk size.
+ * in such case we should get partial read if some blocks can be
+ * read or error, if no blocks can be read.
+ * in either case we should return the data in bcache and only
+ * return error if there is no data.
+ */
+ rw &= F_MASK;
+ result = dd->dv_strategy(dd->dv_devdata, rw, p_blk,
+ p_size * bcache_blksize, p_buf, &r_size);
+
+ r_size /= bcache_blksize;
+ for (i = 0; i < r_size; i++)
+ bcache_insert(bc, p_blk + i);
+
+ /* update ra statistics */
+ if (r_size != 0) {
+ if (r_size < p_size)
+ bcache_rablks += (p_size - r_size);
+ else
+ bcache_rablks += ra;
+ }
- done:
- if ((result == 0) && (rsize != NULL))
- *rsize = size;
- return(result);
+ /* check how much data can we copy */
+ for (i = 0; i < nblk; i++) {
+ if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i)))
+ break;
+ }
+
+ if (size > i * bcache_blksize)
+ size = i * bcache_blksize;
+
+ if (size != 0) {
+ bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)),
+ buf, size);
+ result = 0;
+ }
+
+done:
+ if ((result == 0) && (rsize != NULL))
+ *rsize = size;
+ return (result);
}
/*
@@ -354,68 +358,72 @@ int
bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size,
char *buf, size_t *rsize)
{
- struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
- struct bcache *bc = dd->dv_cache;
- u_int bcache_nblks = 0;
- int nblk, cblk, ret;
- size_t csize, isize, total;
-
- bcache_ops++;
-
- if (bc != NULL)
- bcache_nblks = bc->bcache_nblks;
-
- /* bypass large requests, or when the cache is inactive */
- if (bc == NULL ||
- ((size * 2 / bcache_blksize) > bcache_nblks)) {
- DEBUG("bypass %zu from %qu", size / bcache_blksize, blk);
- bcache_bypasses++;
- rw &= F_MASK;
- return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf, rsize));
- }
-
- switch (rw & F_MASK) {
- case F_READ:
- nblk = size / bcache_blksize;
- if (size != 0 && nblk == 0)
- nblk++; /* read at least one block */
-
- ret = 0;
- total = 0;
- while(size) {
- cblk = bcache_nblks - BHASH(bc, blk); /* # of blocks left */
- cblk = MIN(cblk, nblk);
-
- if (size <= bcache_blksize)
- csize = size;
- else
- csize = cblk * bcache_blksize;
-
- ret = read_strategy(devdata, rw, blk, csize, buf+total, &isize);
-
- /*
- * we may have error from read ahead, if we have read some data
- * return partial read.
- */
- if (ret != 0 || isize == 0) {
- if (total != 0)
- ret = 0;
- break;
- }
- blk += isize / bcache_blksize;
- total += isize;
- size -= isize;
- nblk = size / bcache_blksize;
+ struct bcache_devdata *dd = (struct bcache_devdata *)devdata;
+ struct bcache *bc = dd->dv_cache;
+ uint_t bcache_nblks = 0;
+ int nblk, cblk, ret;
+ size_t csize, isize, total;
+
+ bcache_ops++;
+
+ if (bc != NULL)
+ bcache_nblks = bc->bcache_nblks;
+
+ /* bypass large requests, or when the cache is inactive */
+ if (bc == NULL ||
+ ((size * 2 / bcache_blksize) > bcache_nblks)) {
+ DEBUG("bypass %zu from %qu", size / bcache_blksize, blk);
+ bcache_bypasses++;
+ rw &= F_MASK;
+ return (dd->dv_strategy(dd->dv_devdata, rw, blk, size, buf,
+ rsize));
}
- if (rsize)
- *rsize = total;
-
- return (ret);
- case F_WRITE:
- return write_strategy(devdata, F_WRITE, blk, size, buf, rsize);
- }
- return -1;
+ switch (rw & F_MASK) {
+ case F_READ:
+ nblk = size / bcache_blksize;
+ if (size != 0 && nblk == 0)
+ nblk++; /* read at least one block */
+
+ ret = 0;
+ total = 0;
+ while (size) {
+ /* # of blocks left */
+ cblk = bcache_nblks - BHASH(bc, blk);
+ cblk = MIN(cblk, nblk);
+
+ if (size <= bcache_blksize)
+ csize = size;
+ else
+ csize = cblk * bcache_blksize;
+
+ ret = read_strategy(devdata, rw, blk, csize,
+ buf + total, &isize);
+
+ /*
+ * we may have error from read ahead, if we have read
+ * some data return partial read.
+ */
+ if (ret != 0 || isize == 0) {
+ if (total != 0)
+ ret = 0;
+ break;
+ }
+ blk += isize / bcache_blksize;
+ total += isize;
+ size -= isize;
+ nblk = size / bcache_blksize;
+ }
+
+ if (rsize)
+ *rsize = total;
+
+ return (ret);
+ case F_WRITE:
+ return (write_strategy(devdata, F_WRITE, blk, size, buf,
+ rsize));
+ }
+ return (-1);
}
/*
@@ -424,13 +432,11 @@ bcache_strategy(void *devdata, int rw, daddr_t blk, size_t size,
static void
bcache_free_instance(struct bcache *bc)
{
- if (bc != NULL) {
- if (bc->bcache_ctl)
- free(bc->bcache_ctl);
- if (bc->bcache_data)
- free(bc->bcache_data);
- free(bc);
- }
+ if (bc != NULL) {
+ free(bc->bcache_ctl);
+ free(bc->bcache_data);
+ free(bc);
+ }
}
/*
@@ -439,13 +445,13 @@ bcache_free_instance(struct bcache *bc)
static void
bcache_insert(struct bcache *bc, daddr_t blkno)
{
- u_int cand;
+ uint_t cand;
- cand = BHASH(bc, blkno);
+ cand = BHASH(bc, blkno);
- DEBUG("insert blk %llu -> %u # %d", blkno, cand, bcache_bcount);
- bc->bcache_ctl[cand].bc_blkno = blkno;
- bc->bcache_ctl[cand].bc_count = bcache_bcount++;
+ DEBUG("insert blk %llu -> %u # %d", blkno, cand, bcache_bcount);
+ bc->bcache_ctl[cand].bc_blkno = blkno;
+ bc->bcache_ctl[cand].bc_count = bcache_bcount++;
}
/*
@@ -454,32 +460,33 @@ bcache_insert(struct bcache *bc, daddr_t blkno)
static void
bcache_invalidate(struct bcache *bc, daddr_t blkno)
{
- u_int i;
-
- i = BHASH(bc, blkno);
- if (bc->bcache_ctl[i].bc_blkno == blkno) {
- bc->bcache_ctl[i].bc_count = -1;
- bc->bcache_ctl[i].bc_blkno = -1;
- DEBUG("invalidate blk %llu", blkno);
- }
+ uint_t i;
+
+ i = BHASH(bc, blkno);
+ if (bc->bcache_ctl[i].bc_blkno == blkno) {
+ bc->bcache_ctl[i].bc_count = -1;
+ bc->bcache_ctl[i].bc_blkno = -1;
+ DEBUG("invalidate blk %llu", blkno);
+ }
}
-COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats", command_bcache);
+COMMAND_SET(bcachestat, "bcachestat", "get disk block cache stats",
+ command_bcache);
static int
-command_bcache(int argc, char *argv[] __attribute((unused)))
+command_bcache(int argc, char *argv[] __unused)
{
- if (argc != 1) {
- command_errmsg = "wrong number of arguments";
- return(CMD_ERROR);
- }
-
- printf("\ncache blocks: %u\n", bcache_total_nblks);
- printf("cache blocksz: %u\n", bcache_blksize);
- printf("cache readahead: %u\n", bcache_rablks);
- printf("unit cache blocks: %u\n", bcache_unit_nblks);
- printf("cached units: %u\n", bcache_units);
- printf("%u ops %u bypasses %u hits %u misses\n", bcache_ops,
- bcache_bypasses, bcache_hits, bcache_misses);
- return(CMD_OK);
+ if (argc != 1) {
+ command_errmsg = "wrong number of arguments";
+ return (CMD_ERROR);
+ }
+
+ printf("\ncache blocks: %u\n", bcache_total_nblks);
+ printf("cache blocksz: %u\n", bcache_blksize);
+ printf("cache readahead: %u\n", bcache_rablks);
+ printf("unit cache blocks: %u\n", bcache_unit_nblks);
+ printf("cached units: %u\n", bcache_units);
+ printf("%u ops %u bypasses %u hits %u misses\n", bcache_ops,
+ bcache_bypasses, bcache_hits, bcache_misses);
+ return (CMD_OK);
}