summaryrefslogtreecommitdiff
path: root/src/pmdas/solaris
diff options
context:
space:
mode:
Diffstat (limited to 'src/pmdas/solaris')
-rw-r--r--src/pmdas/solaris/GNUmakefile82
-rw-r--r--src/pmdas/solaris/arcstats.c55
-rw-r--r--src/pmdas/solaris/clusters.h20
-rw-r--r--src/pmdas/solaris/common.h137
-rw-r--r--src/pmdas/solaris/data.c1462
-rw-r--r--src/pmdas/solaris/disk.c420
-rw-r--r--src/pmdas/solaris/help729
-rw-r--r--src/pmdas/solaris/kvm.c59
-rw-r--r--src/pmdas/solaris/netlink.c125
-rw-r--r--src/pmdas/solaris/netmib2.c329
-rw-r--r--src/pmdas/solaris/netmib2.h54
-rw-r--r--src/pmdas/solaris/pmns.disk58
-rw-r--r--src/pmdas/solaris/pmns.hinv34
-rw-r--r--src/pmdas/solaris/pmns.kernel200
-rw-r--r--src/pmdas/solaris/pmns.mem88
-rw-r--r--src/pmdas/solaris/pmns.network302
-rw-r--r--src/pmdas/solaris/pmns.zfs60
-rw-r--r--src/pmdas/solaris/pmns.zpool31
-rw-r--r--src/pmdas/solaris/pmns.zpool_perdisk16
-rw-r--r--src/pmdas/solaris/root42
-rw-r--r--src/pmdas/solaris/solaris.c216
-rw-r--r--src/pmdas/solaris/sysinfo.c376
-rw-r--r--src/pmdas/solaris/vnops.c221
-rw-r--r--src/pmdas/solaris/zfs.c171
-rw-r--r--src/pmdas/solaris/zpool.c154
-rw-r--r--src/pmdas/solaris/zpool_perdisk.c289
26 files changed, 5730 insertions, 0 deletions
diff --git a/src/pmdas/solaris/GNUmakefile b/src/pmdas/solaris/GNUmakefile
new file mode 100644
index 0000000..e5f858c
--- /dev/null
+++ b/src/pmdas/solaris/GNUmakefile
@@ -0,0 +1,82 @@
+#
+# Copyright (c) 2013 Red Hat.
+# Copyright (c) 2000,2003,2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+
+TOPDIR = ../../..
+include $(TOPDIR)/src/include/builddefs
+
+IAM = solaris
+DOMAIN = SOLARIS
+PMDADIR = $(PCP_PMDAS_DIR)/$(IAM)
+PMDAINIT = solaris_init
+CMDTARGET = pmdasolaris
+LIBTARGET = pmda_solaris.so
+CONF_LINE = "solaris 75 dso $(PMDAINIT) $(PMDADIR)/$(LIBTARGET)"
+
+CFILES = solaris.c data.c sysinfo.c disk.c zpool.c zfs.c \
+ zpool_perdisk.c netmib2.c netlink.c kvm.c arcstats.c vnops.c
+
+BARE_NS = disk kernel mem network hinv zpool zfs zpool_perdisk
+PMNS = $(BARE_NS:%=pmns.%)
+
+LSRCFILES = $(PMNS) help root common.h clusters.h netmib2.h
+HELPTARGETS = help.dir help.pag
+VERSION_SCRIPT = exports
+
+LDIRT = domain.h *.log $(HELPTARGETS) root_solaris
+
+LLDLIBS = $(PCP_PMDALIB) -lkstat -lzfs -lnvpair -lkvm -ldevinfo
+
+default: build-me
+
+include $(BUILDRULES)
+
+ifeq "$(TARGET_OS)" "solaris"
+build-me: root_solaris domain.h $(LIBTARGET) $(CMDTARGET) $(HELPTARGETS)
+ @if [ `grep -c $(CONF_LINE) ../pmcd.conf` -eq 0 ]; then \
+ echo $(CONF_LINE) >> ../pmcd.conf ; \
+ fi
+
+install: build-me
+ $(INSTALL) -m 755 -d $(PMDADIR)
+ $(INSTALL) -m 644 domain.h $(HELPTARGETS) $(PMDADIR)
+ $(INSTALL) -m 755 $(LIBTARGET) $(CMDTARGET) $(PMDADIR)
+ $(INSTALL) -m 644 root_solaris $(PCP_VAR_DIR)/pmns/root_solaris
+else
+build-me:
+install:
+endif
+
+default_pcp : default
+
+install_pcp : install
+
+$(OBJECTS): common.h
+
+$(HELPTARGETS): help root_solaris
+ $(RUN_IN_BUILD_ENV) $(TOPDIR)/src/newhelp/newhelp -n root_solaris -v 2 -o help < help
+
+root_solaris: ../../pmns/stdpmid $(PMNS) root
+ rm -f root_solaris
+ sed -e 's;<stdpmid>;"../../pmns/stdpmid";' <root \
+ | $(RUN_IN_BUILD_ENV) $(TOPDIR)/src/pmcpp/pmcpp \
+ | sed -e '/^#/d' -e '/^$$/d' >root_solaris
+
+domain.h: ../../pmns/stdpmid
+ $(DOMAIN_MAKERULE)
+
+$(VERSION_SCRIPT):
+ $(VERSION_SCRIPT_MAKERULE)
+
+$(LIBTARGET): $(VERSION_SCRIPT)
diff --git a/src/pmdas/solaris/arcstats.c b/src/pmdas/solaris/arcstats.c
new file mode 100644
index 0000000..75cd17c
--- /dev/null
+++ b/src/pmdas/solaris/arcstats.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2010 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Extract information about ZFS' Adjustable Replacement Cache
+ *
+ * The stats are in the sections called "arc_stats" of module zfs
+ */
+
+#include <kstat.h>
+#include "common.h"
+
+static kstat_t *arcstats;
+int arcstats_fresh;
+
+void
+arcstats_refresh(void)
+{
+ kstat_ctl_t *kc;
+ arcstats_fresh = 0;
+ if ((kc = kstat_ctl_update()) == NULL)
+ return;
+ if ((arcstats = kstat_lookup(kc, "zfs", -1, "arcstats")) != NULL)
+ arcstats_fresh = kstat_read(kc, arcstats, NULL) != -1;
+}
+
+int
+arcstats_fetch(pmdaMetric *pm, int inst, pmAtomValue *av)
+{
+ metricdesc_t *md = pm->m_user;
+ char *metric = (char *)md->md_offset;
+ kstat_named_t *kn;
+
+ if (!arcstats_fresh)
+ return 0;
+
+ if ((kn = kstat_data_lookup(arcstats, metric)) != NULL)
+ return kstat_named_to_pmAtom(kn, av);
+
+ return 0;
+}
diff --git a/src/pmdas/solaris/clusters.h b/src/pmdas/solaris/clusters.h
new file mode 100644
index 0000000..9e44ccb
--- /dev/null
+++ b/src/pmdas/solaris/clusters.h
@@ -0,0 +1,20 @@
+#ifndef __PMDA_SOLARIS_CLUSTERS_H
+#define __PMDA_SOLARIS_CLUSTERS_H
+
+/*
+ * PMID cluster numbers
+ *
+ * Clusters are used to index method[] table and shall be contigious
+ */
+#define SCLR_SYSINFO 0
+#define SCLR_DISK 1
+#define SCLR_NETIF 2
+#define SCLR_ZPOOL 3
+#define SCLR_ZFS 4
+#define SCLR_ZPOOL_PERDISK 5
+#define SCLR_NETLINK 6
+#define SCLR_FSFLUSH 7
+#define SCLR_ARCSTATS 8
+#define SCLR_FILESYS 9
+
+#endif
diff --git a/src/pmdas/solaris/common.h b/src/pmdas/solaris/common.h
new file mode 100644
index 0000000..469725e
--- /dev/null
+++ b/src/pmdas/solaris/common.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __PMDASOLARIS_COMMON_H
+#define __PMDASOLARIS_COMMON_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "pmapi.h"
+#include "impl.h"
+#include "pmda.h"
+#include "domain.h"
+#include "clusters.h"
+
+#include <kstat.h>
+#include <sys/sysinfo.h>
+
+typedef struct {
+ const char *m_name;
+ void (*m_init)(int);
+ void (*m_prefetch)(void);
+ int (*m_fetch)(pmdaMetric *, int, pmAtomValue *);
+ int m_fetched;
+ uint64_t m_elapsed;
+ uint64_t m_hits;
+} method_t;
+
+extern method_t methodtab[];
+extern const int methodtab_sz;
+
+extern void init_data(int);
+
+extern void sysinfo_init(int);
+extern void sysinfo_prefetch(void);
+extern int sysinfo_fetch(pmdaMetric *, int, pmAtomValue *);
+
+extern void disk_init(int);
+extern void disk_prefetch(void);
+extern int disk_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void zpool_init(int);
+void zpool_refresh(void);
+int zpool_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void zfs_init(int);
+void zfs_refresh(void);
+int zfs_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void zpool_perdisk_init(int);
+void zpool_perdisk_refresh(void);
+int zpool_perdisk_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void netlink_init(int);
+void netlink_refresh(void);
+int netlink_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void kvm_init(int);
+void kvm_refresh(void);
+int kvm_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void arcstats_refresh(void);
+int arcstats_fetch(pmdaMetric *, int, pmAtomValue *);
+
+void vnops_init(int);
+void vnops_refresh(void);
+int vnops_fetch(pmdaMetric *, int, pmAtomValue *);
+
+/*
+ * metric descriptions
+ */
+typedef struct {
+ const char *md_name;
+ pmDesc md_desc; // PMDA's idea of the semantics
+ ptrdiff_t md_offset; // offset into kstat stats structure
+ uint64_t md_elapsed;
+ uint64_t md_hits;
+} metricdesc_t;
+
+extern metricdesc_t metricdesc[];
+extern pmdaMetric *metrictab;
+extern int metrictab_sz;
+
+#define DISK_INDOM 0
+#define CPU_INDOM 1
+#define NETIF_INDOM 2
+#define ZPOOL_INDOM 3
+#define ZFS_INDOM 4
+#define ZPOOL_PERDISK_INDOM 5
+#define NETLINK_INDOM 6
+#define ZFS_SNAP_INDOM 7
+#define LOADAVG_INDOM 8
+#define PREFETCH_INDOM 9
+#define METRIC_INDOM 10
+#define FILESYS_INDOM 11
+#define FSTYPE_INDOM 12
+
+extern pmdaIndom indomtab[];
+extern int indomtab_sz;
+
+/*
+ * kstat() control
+ */
+kstat_ctl_t *kstat_ctl_update(void);
+void kstat_ctl_needs_update(void);
+int kstat_named_to_pmAtom(const kstat_named_t *, pmAtomValue *);
+int kstat_named_to_typed_atom(const kstat_named_t *, int, pmAtomValue *);
+
+/* Snarfed from usr/src/uts/common/fs/fsflush.c in OpenSolaris source tree */
+typedef struct {
+ ulong_t fsf_scan; /* number of pages scanned */
+ ulong_t fsf_examined; /* number of page_t's actually examined, can */
+ /* be less than fsf_scan due to large pages */
+ ulong_t fsf_locked; /* pages we actually page_lock()ed */
+ ulong_t fsf_modified; /* number of modified pages found */
+ ulong_t fsf_coalesce; /* number of page coalesces done */
+ ulong_t fsf_time; /* nanoseconds of run time */
+ ulong_t fsf_releases; /* number of page_release() done */
+} fsf_stat_t;
+
+#endif
diff --git a/src/pmdas/solaris/data.c b/src/pmdas/solaris/data.c
new file mode 100644
index 0000000..6d09a0e
--- /dev/null
+++ b/src/pmdas/solaris/data.c
@@ -0,0 +1,1462 @@
+/*
+ * Data structures that define metrics and control the Solaris PMDA
+ *
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2010 Max Matveev. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "common.h"
+#include "netmib2.h"
+#include <ctype.h>
+#include <libzfs.h>
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+#endif
+
+method_t methodtab[] = {
+ { "sysinfo", sysinfo_init, sysinfo_prefetch, sysinfo_fetch },
+ { "disk", disk_init, disk_prefetch, disk_fetch },
+ { "netmib2", netmib2_init, netmib2_refresh, netmib2_fetch },
+ { "zpool", zpool_init, zpool_refresh, zpool_fetch },
+ { "zfs", zfs_init, zfs_refresh, zfs_fetch },
+ { "zpool_vdev", zpool_perdisk_init, zpool_perdisk_refresh, zpool_perdisk_fetch },
+ { "netlink", netlink_init, netlink_refresh, netlink_fetch },
+ { "kvm", kvm_init, kvm_refresh, kvm_fetch },
+ { "zfs_arc", NULL, arcstats_refresh, arcstats_fetch },
+ { "filesystem", vnops_init, vnops_refresh, vnops_fetch }
+};
+
+const int methodtab_sz = ARRAY_SIZE(methodtab);
+static pmdaInstid prefetch_insts[ARRAY_SIZE(methodtab)];
+
+static pmdaInstid loadavg_insts[] = {
+ {1, "1 minute"},
+ {5, "5 minute"},
+ {15, "15 minute"}
+};
+
+pmdaMetric *metrictab;
+
+#define SYSINFO_OFF(field) ((ptrdiff_t)&((cpu_stat_t *)0)->cpu_sysinfo.field)
+#define KSTAT_IO_OFF(field) ((ptrdiff_t)&((kstat_io_t *)0)->field)
+#define VDEV_OFFSET(field) ((ptrdiff_t)&((vdev_stat_t *)0)->field)
+#define NM2_UDP_OFFSET(field) ((ptrdiff_t)&(nm2_udp.field))
+#define NM2_NETIF_OFFSET(field) ((ptrdiff_t)&((nm2_netif_stats_t *)0)->field)
+#define FSF_STAT_OFFSET(field) ((ptrdiff_t)&((fsf_stat_t *)0)->field)
+
+/*
+ * all metrics supported in this PMDA - one table entry for each metric
+ */
+metricdesc_t metricdesc[] = {
+
+ { "kernel.all.cpu.idle",
+ { PMDA_PMID(SCLR_SYSINFO,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_IDLE]) },
+
+ { "kernel.all.cpu.user",
+ { PMDA_PMID(SCLR_SYSINFO,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_USER]) },
+
+ { "kernel.all.cpu.sys",
+ { PMDA_PMID(SCLR_SYSINFO,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_KERNEL]) },
+
+ { "kernel.all.cpu.wait.total",
+ { PMDA_PMID(SCLR_SYSINFO,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_WAIT]) },
+
+ { "kernel.percpu.cpu.idle",
+ { PMDA_PMID(SCLR_SYSINFO,4), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_IDLE]) },
+
+ { "kernel.percpu.cpu.user",
+ { PMDA_PMID(SCLR_SYSINFO,5), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_USER]) },
+
+ { "kernel.percpu.cpu.sys",
+ { PMDA_PMID(SCLR_SYSINFO,6), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_KERNEL]) },
+
+ { "kernel.percpu.cpu.wait.total",
+ { PMDA_PMID(SCLR_SYSINFO,7), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(cpu[CPU_WAIT]) },
+
+ { "kernel.all.cpu.wait.io",
+ { PMDA_PMID(SCLR_SYSINFO,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(wait[W_IO]) },
+
+ { "kernel.all.cpu.wait.pio",
+ { PMDA_PMID(SCLR_SYSINFO,9), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(wait[W_PIO]) },
+
+ { "kernel.all.cpu.wait.swap",
+ { PMDA_PMID(SCLR_SYSINFO,10), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(wait[W_SWAP]) },
+
+ { "kernel.percpu.cpu.wait.io",
+ { PMDA_PMID(SCLR_SYSINFO,11), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(wait[W_IO]) },
+
+ { "kernel.percpu.cpu.wait.pio",
+ { PMDA_PMID(SCLR_SYSINFO,12), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(wait[W_PIO]) },
+
+ { "kernel.percpu.cpu.wait.swap",
+ { PMDA_PMID(SCLR_SYSINFO,13), PM_TYPE_U64, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_MSEC, 0)
+ }, SYSINFO_OFF(wait[W_SWAP]) },
+
+ { "kernel.all.io.bread",
+ { PMDA_PMID(SCLR_SYSINFO,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(bread) },
+
+ { "kernel.all.io.bwrite",
+ { PMDA_PMID(SCLR_SYSINFO,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(bwrite) },
+
+ { "kernel.all.io.lread",
+ { PMDA_PMID(SCLR_SYSINFO,16), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(lread) },
+
+ { "kernel.all.io.lwrite",
+ { PMDA_PMID(SCLR_SYSINFO,17), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(lwrite) },
+
+ { "kernel.percpu.io.bread",
+ { PMDA_PMID(SCLR_SYSINFO,18), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(bread) },
+
+ { "kernel.percpu.io.bwrite",
+ { PMDA_PMID(SCLR_SYSINFO,19), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(bwrite) },
+
+ { "kernel.percpu.io.lread",
+ { PMDA_PMID(SCLR_SYSINFO,20), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(lread) },
+
+ { "kernel.percpu.io.lwrite",
+ { PMDA_PMID(SCLR_SYSINFO,21), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(lwrite) },
+
+ { "kernel.all.syscall",
+ { PMDA_PMID(SCLR_SYSINFO,22), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(syscall) },
+
+ { "kernel.all.pswitch",
+ { PMDA_PMID(SCLR_SYSINFO,23), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(pswitch) },
+
+ { "kernel.percpu.syscall",
+ { PMDA_PMID(SCLR_SYSINFO,24), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(syscall) },
+
+ { "kernel.percpu.pswitch",
+ { PMDA_PMID(SCLR_SYSINFO,25), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(pswitch) },
+
+ { "kernel.all.io.phread",
+ { PMDA_PMID(SCLR_SYSINFO,26), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(phread) },
+
+ { "kernel.all.io.phwrite",
+ { PMDA_PMID(SCLR_SYSINFO,27), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(phwrite) },
+
+ { "kernel.all.io.intr",
+ { PMDA_PMID(SCLR_SYSINFO,28), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(intr) },
+
+ { "kernel.percpu.io.phread",
+ { PMDA_PMID(SCLR_SYSINFO,29), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(phread) },
+
+ { "kernel.percpu.io.phwrite",
+ { PMDA_PMID(SCLR_SYSINFO,30), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(phwrite) },
+
+ { "kernel.percpu.io.intr",
+ { PMDA_PMID(SCLR_SYSINFO,31), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(intr) },
+
+ { "kernel.all.trap",
+ { PMDA_PMID(SCLR_SYSINFO,32), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(trap) },
+
+ { "kernel.all.sysexec",
+ { PMDA_PMID(SCLR_SYSINFO,33), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysexec) },
+
+ { "kernel.all.sysfork",
+ { PMDA_PMID(SCLR_SYSINFO,34), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysfork) },
+
+ { "kernel.all.sysvfork",
+ { PMDA_PMID(SCLR_SYSINFO,35), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysvfork) },
+
+ { "kernel.all.sysread",
+ { PMDA_PMID(SCLR_SYSINFO,36), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysread) },
+
+ { "kernel.all.syswrite",
+ { PMDA_PMID(SCLR_SYSINFO,37), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(syswrite) },
+
+ { "kernel.percpu.trap",
+ { PMDA_PMID(SCLR_SYSINFO,38), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(trap) },
+
+ { "kernel.percpu.sysexec",
+ { PMDA_PMID(SCLR_SYSINFO,39), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysexec) },
+
+ { "kernel.percpu.sysfork",
+ { PMDA_PMID(SCLR_SYSINFO,40), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysfork) },
+
+ { "kernel.percpu.sysvfork",
+ { PMDA_PMID(SCLR_SYSINFO,41), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysvfork) },
+
+ { "kernel.percpu.sysread",
+ { PMDA_PMID(SCLR_SYSINFO,42), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(sysread) },
+
+ { "kernel.percpu.syswrite",
+ { PMDA_PMID(SCLR_SYSINFO,43), PM_TYPE_U32, CPU_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, SYSINFO_OFF(syswrite) },
+
+ { "disk.all.read",
+ { PMDA_PMID(SCLR_DISK,0), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, KSTAT_IO_OFF(reads) },
+
+ { "disk.all.write",
+ { PMDA_PMID(SCLR_DISK,1), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, KSTAT_IO_OFF(writes) },
+
+ { "disk.all.total",
+ { PMDA_PMID(SCLR_DISK,2), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, -1},
+
+ { "disk.all.read_bytes",
+ { PMDA_PMID(SCLR_DISK,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, KSTAT_IO_OFF(nread) },
+
+ { "disk.all.write_bytes",
+ { PMDA_PMID(SCLR_DISK,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, KSTAT_IO_OFF(nwritten) },
+
+ { "disk.all.total_bytes",
+ { PMDA_PMID(SCLR_DISK,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, -1},
+
+ { "disk.dev.read",
+ { PMDA_PMID(SCLR_DISK,10), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, KSTAT_IO_OFF(reads) },
+
+
+ { "disk.dev.write",
+ { PMDA_PMID(SCLR_DISK,11), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, KSTAT_IO_OFF(writes) },
+
+ { "disk.dev.total",
+ { PMDA_PMID(SCLR_DISK,12), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, -1},
+
+ { "disk.dev.read_bytes",
+ { PMDA_PMID(SCLR_DISK,13), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, KSTAT_IO_OFF(nread) },
+
+ { "disk.dev.write_bytes",
+ { PMDA_PMID(SCLR_DISK,14), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, KSTAT_IO_OFF(nwritten) },
+
+ { "disk.dev.total_bytes",
+ { PMDA_PMID(SCLR_DISK,15), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, -1},
+
+ { "hinv.ncpu",
+ { PMDA_PMID(SCLR_SYSINFO,56), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1},
+
+ { "hinv.ndisk",
+ { PMDA_PMID(SCLR_DISK,20), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1},
+
+ { "hinv.nfilesys",
+ { PMDA_PMID(SCLR_FILESYS,1023), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1},
+
+ { "pmda.uname",
+ { PMDA_PMID(SCLR_SYSINFO,107), PM_TYPE_STRING, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1 },
+
+ { "hinv.pagesize",
+ { PMDA_PMID(SCLR_SYSINFO,108), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, -1 },
+
+ { "hinv.physmem",
+ { PMDA_PMID(SCLR_SYSINFO,109), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_MBYTE, 0, 0)
+ }, -1 },
+
+ { "zpool.capacity",
+ { PMDA_PMID(SCLR_ZPOOL,2), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, VDEV_OFFSET(vs_space) },
+ { "zpool.used",
+ { PMDA_PMID(SCLR_ZPOOL,3), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, VDEV_OFFSET(vs_alloc) },
+ { "zpool.checksum_errors",
+ { PMDA_PMID(SCLR_ZPOOL,4), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_checksum_errors) },
+ { "zpool.self_healed",
+ { PMDA_PMID(SCLR_ZPOOL,5), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, VDEV_OFFSET(vs_self_healed) },
+ { "zpool.in.bytes",
+ { PMDA_PMID(SCLR_ZPOOL,6), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, VDEV_OFFSET(vs_bytes[ZIO_TYPE_READ]) },
+ { "zpool.in.ops",
+ { PMDA_PMID(SCLR_ZPOOL,7), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_ops[ZIO_TYPE_READ]) },
+ { "zpool.in.errors",
+ { PMDA_PMID(SCLR_ZPOOL,8), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_read_errors) },
+ { "zpool.out.bytes",
+ { PMDA_PMID(SCLR_ZPOOL,9), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, VDEV_OFFSET(vs_bytes[ZIO_TYPE_WRITE]) },
+ { "zpool.out.ops",
+ { PMDA_PMID(SCLR_ZPOOL,10), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_ops[ZIO_TYPE_WRITE]) },
+ { "zpool.out.errors",
+ { PMDA_PMID(SCLR_ZPOOL,11), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_write_errors) },
+ { "zpool.ops.noops",
+ { PMDA_PMID(SCLR_ZPOOL,12), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_ops[ZIO_TYPE_NULL]) },
+ { "zpool.ops.ioctls",
+ { PMDA_PMID(SCLR_ZPOOL,13), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_ops[ZIO_TYPE_WRITE]) },
+ { "zpool.ops.claims",
+ { PMDA_PMID(SCLR_ZPOOL,14), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_ops[ZIO_TYPE_WRITE]) },
+ { "zpool.ops.frees",
+ { PMDA_PMID(SCLR_ZPOOL,15), PM_TYPE_U64, ZPOOL_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_ops[ZIO_TYPE_WRITE]) },
+ { "zfs.used.total",
+ { PMDA_PMID(SCLR_ZFS,10), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_USED },
+ { "zfs.available",
+ { PMDA_PMID(SCLR_ZFS,0), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_AVAILABLE },
+ { "zfs.quota",
+ { PMDA_PMID(SCLR_ZFS,1), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_QUOTA },
+ { "zfs.reservation",
+ { PMDA_PMID(SCLR_ZFS,2), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_RESERVATION },
+ { "zfs.compression",
+ { PMDA_PMID(SCLR_ZFS,3), PM_TYPE_DOUBLE, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, ZFS_PROP_COMPRESSRATIO },
+ { "zfs.copies",
+ { PMDA_PMID(SCLR_ZFS,4), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, ZFS_PROP_COPIES },
+ { "zfs.used.byme",
+ { PMDA_PMID(SCLR_ZFS,11), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_USEDDS },
+ { "zfs.used.bysnapshots",
+ { PMDA_PMID(SCLR_ZFS,12), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_USEDSNAP },
+ { "zfs.used.bychildren",
+ { PMDA_PMID(SCLR_ZFS,13), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_USEDCHILD },
+
+ { "network.udp.ipackets",
+ { PMDA_PMID(SCLR_NETIF,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_UDP_OFFSET(ipackets) },
+ { "network.udp.opackets",
+ { PMDA_PMID(SCLR_NETIF,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_UDP_OFFSET(opackets) },
+ { "network.udp.ierrors",
+ { PMDA_PMID(SCLR_NETIF,16), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_UDP_OFFSET(ierrors) },
+ { "network.udp.oerrors",
+ { PMDA_PMID(SCLR_NETIF,17), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_UDP_OFFSET(oerrors) },
+
+ { "network.interface.mtu",
+ { PMDA_PMID(SCLR_NETIF,0), PM_TYPE_U32, NETIF_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, NM2_NETIF_OFFSET(mtu) },
+ { "network.interface.in.packets",
+ { PMDA_PMID(SCLR_NETIF,2), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(ipackets) },
+ { "network.interface.in.bytes",
+ { PMDA_PMID(SCLR_NETIF,3), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, NM2_NETIF_OFFSET(ibytes) },
+ { "network.interface.in.bcasts",
+ { PMDA_PMID(SCLR_NETIF,4), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(ibcast) },
+ { "network.interface.in.mcasts",
+ { PMDA_PMID(SCLR_NETIF,5), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(imcast) },
+ { "network.interface.out.packets",
+ { PMDA_PMID(SCLR_NETIF,9), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(opackets) },
+ { "network.interface.out.bytes",
+ { PMDA_PMID(SCLR_NETIF,10), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, NM2_NETIF_OFFSET(obytes) },
+ { "network.interface.out.bcasts",
+ { PMDA_PMID(SCLR_NETIF,11), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(obcast) },
+ { "network.interface.out.mcasts",
+ { PMDA_PMID(SCLR_NETIF,12), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(omcast) },
+ { "network.interface.in.errors",
+ { PMDA_PMID(SCLR_NETIF,1), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(ierrors) },
+ { "network.interface.out.errors",
+ { PMDA_PMID(SCLR_NETIF,8), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(oerrors) },
+ { "network.interface.in.drops",
+ { PMDA_PMID(SCLR_NETIF,6), PM_TYPE_U32, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(idrops) },
+ { "network.interface.out.drops",
+ { PMDA_PMID(SCLR_NETIF,13), PM_TYPE_U32, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(odrops) },
+ { "network.interface.in.delivers",
+ { PMDA_PMID(SCLR_NETIF,7), PM_TYPE_U64, NETIF_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_NETIF_OFFSET(delivered) },
+ { "network.udp.noports",
+ { PMDA_PMID(SCLR_NETIF,18), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_UDP_OFFSET(noports) },
+ { "network.udp.overflows",
+ { PMDA_PMID(SCLR_NETIF,19), PM_TYPE_U32, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, NM2_UDP_OFFSET(overflows) },
+
+ { "zpool.state",
+ { PMDA_PMID(SCLR_ZPOOL,0), PM_TYPE_STRING, ZPOOL_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, 0 },
+
+ { "zpool.state_int",
+ { PMDA_PMID(SCLR_ZPOOL,1), PM_TYPE_U32, ZPOOL_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, 0 },
+ { "zpool.perdisk.state",
+ { PMDA_PMID(SCLR_ZPOOL_PERDISK,0), PM_TYPE_STRING, ZPOOL_PERDISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, VDEV_OFFSET(vs_state) },
+ { "zpool.perdisk.state_int",
+ { PMDA_PMID(SCLR_ZPOOL_PERDISK,1), PM_TYPE_U32, ZPOOL_PERDISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, 0 },
+ { "zpool.perdisk.checksum_errors",
+ { PMDA_PMID(SCLR_ZPOOL_PERDISK,2), PM_TYPE_U64, ZPOOL_PERDISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_checksum_errors) },
+ { "zpool.perdisk.self_healed",
+ { PMDA_PMID(SCLR_ZPOOL_PERDISK,3), PM_TYPE_U64, ZPOOL_PERDISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, VDEV_OFFSET(vs_self_healed) },
+ { "zpool.perdisk.in.errors",
+ { PMDA_PMID(SCLR_ZPOOL_PERDISK,4), PM_TYPE_U64, ZPOOL_PERDISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_read_errors) },
+ { "zpool.perdisk.out.errors",
+ { PMDA_PMID(SCLR_ZPOOL_PERDISK,5), PM_TYPE_U64, ZPOOL_PERDISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, VDEV_OFFSET(vs_write_errors) },
+
+ { "network.link.in.errors",
+ { PMDA_PMID(SCLR_NETLINK,4), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ierrors" },
+ { "network.link.in.packets",
+ { PMDA_PMID(SCLR_NETLINK,5), PM_TYPE_U64, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ipackets64" },
+ { "network.link.in.bytes",
+ { PMDA_PMID(SCLR_NETLINK,6), PM_TYPE_U64, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"rbytes64" },
+ { "network.link.in.bcasts",
+ { PMDA_PMID(SCLR_NETLINK,7), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"brdcstrcv" },
+ { "network.link.in.mcasts",
+ { PMDA_PMID(SCLR_NETLINK,8), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"multircv" },
+ { "network.link.in.nobufs",
+ { PMDA_PMID(SCLR_NETLINK,9), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"norcvbuf" },
+ { "network.link.out.errors",
+ { PMDA_PMID(SCLR_NETLINK,10), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"oerrors" },
+ { "network.link.out.packets",
+ { PMDA_PMID(SCLR_NETLINK,11), PM_TYPE_U64, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"opackets64" },
+ { "network.link.out.bytes",
+ { PMDA_PMID(SCLR_NETLINK,12), PM_TYPE_U64, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"obytes64" },
+ { "network.link.out.bcasts",
+ { PMDA_PMID(SCLR_NETLINK,13), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"brdcstxmt" },
+ { "network.link.out.mcasts",
+ { PMDA_PMID(SCLR_NETLINK,14), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"multixmt" },
+ { "network.link.out.nobufs",
+ { PMDA_PMID(SCLR_NETLINK,15), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"noxmtbuf" },
+ { "network.link.collisions",
+ { PMDA_PMID(SCLR_NETLINK,0), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"collisions" },
+ { "network.link.state",
+ { PMDA_PMID(SCLR_NETLINK,1), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"link_state" },
+ { "network.link.duplex",
+ { PMDA_PMID(SCLR_NETLINK,2), PM_TYPE_U32, NETLINK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"link_duplex" },
+ { "network.link.speed",
+ { PMDA_PMID(SCLR_NETLINK,3), PM_TYPE_U64, NETLINK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ifspeed" },
+
+ { "zfs.recordsize",
+ { PMDA_PMID(SCLR_ZFS,5), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_RECORDSIZE },
+ { "zfs.refquota",
+ { PMDA_PMID(SCLR_ZFS,6), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_REFQUOTA },
+ { "zfs.refreservation",
+ { PMDA_PMID(SCLR_ZFS,7), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_REFRESERVATION },
+ { "zfs.used.byrefreservation",
+ { PMDA_PMID(SCLR_ZFS,14), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_USEDREFRESERV },
+ { "zfs.referenced",
+ { PMDA_PMID(SCLR_ZFS,8), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_REFERENCED },
+ { "zfs.nsnapshots",
+ { PMDA_PMID(SCLR_ZFS,9), PM_TYPE_U64, ZFS_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, -1 },
+ { "zfs.snapshot.used",
+ { PMDA_PMID(SCLR_ZFS,15), PM_TYPE_U64, ZFS_SNAP_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_USED },
+ { "zfs.snapshot.referenced",
+ { PMDA_PMID(SCLR_ZFS,16), PM_TYPE_U64, ZFS_SNAP_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, ZFS_PROP_REFERENCED },
+ { "zfs.snapshot.compression",
+ { PMDA_PMID(SCLR_ZFS,17), PM_TYPE_DOUBLE, ZFS_SNAP_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, ZFS_PROP_COMPRESSRATIO },
+ { "kernel.all.load",
+ { PMDA_PMID(SCLR_SYSINFO,135), PM_TYPE_FLOAT, LOADAVG_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, 0 },
+
+ { "kernel.fsflush.scanned",
+ { PMDA_PMID(SCLR_FSFLUSH,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, FSF_STAT_OFFSET(fsf_scan) },
+ { "kernel.fsflush.examined",
+ { PMDA_PMID(SCLR_FSFLUSH,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, FSF_STAT_OFFSET(fsf_examined) },
+ { "kernel.fsflush.locked",
+ { PMDA_PMID(SCLR_FSFLUSH,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, FSF_STAT_OFFSET(fsf_locked) },
+ { "kernel.fsflush.modified",
+ { PMDA_PMID(SCLR_FSFLUSH,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, FSF_STAT_OFFSET(fsf_modified) },
+ { "kernel.fsflush.coalesced",
+ { PMDA_PMID(SCLR_FSFLUSH,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, FSF_STAT_OFFSET(fsf_coalesce) },
+ { "kernel.fsflush.released",
+ { PMDA_PMID(SCLR_FSFLUSH,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, FSF_STAT_OFFSET(fsf_releases) },
+ { "kernel.fsflush.time",
+ { PMDA_PMID(SCLR_FSFLUSH,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, FSF_STAT_OFFSET(fsf_time) },
+
+ { "mem.physmem",
+ { PMDA_PMID(SCLR_SYSINFO,136), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_KBYTE, 0, 0)
+ }, -1},
+ { "mem.freemem",
+ { PMDA_PMID(SCLR_SYSINFO,137), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_KBYTE, 0, 0)
+ }, -1},
+ { "mem.lotsfree",
+ { PMDA_PMID(SCLR_SYSINFO,138), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_KBYTE, 0, 0)
+ }, -1},
+ { "mem.availrmem",
+ { PMDA_PMID(SCLR_SYSINFO,139), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_KBYTE, 0, 0)
+ }, -1},
+
+ { "zfs.arc.size",
+ { PMDA_PMID(SCLR_ARCSTATS,0), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"size"},
+ { "zfs.arc.min_size",
+ { PMDA_PMID(SCLR_ARCSTATS,1), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"c_min"},
+ { "zfs.arc.max_size",
+ { PMDA_PMID(SCLR_ARCSTATS,2), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"c_max"},
+ { "zfs.arc.mru_size",
+ { PMDA_PMID(SCLR_ARCSTATS,3), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"p"},
+ { "zfs.arc.target_size",
+ { PMDA_PMID(SCLR_ARCSTATS,4), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"c"},
+ { "zfs.arc.misses.total",
+ { PMDA_PMID(SCLR_ARCSTATS,5), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"misses"},
+ { "zfs.arc.misses.demand_data",
+ { PMDA_PMID(SCLR_ARCSTATS,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"demand_data_misses"},
+ { "zfs.arc.misses.demand_metadata",
+ { PMDA_PMID(SCLR_ARCSTATS,7), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"demand_metadata_misses"},
+ { "zfs.arc.misses.prefetch_data",
+ { PMDA_PMID(SCLR_ARCSTATS,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"prefetch_data_misses"},
+ { "zfs.arc.misses.prefetch_metadata",
+ { PMDA_PMID(SCLR_ARCSTATS,9), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"prefetch_metadata_misses"},
+ { "zfs.arc.hits.total",
+ { PMDA_PMID(SCLR_ARCSTATS,10), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"hits"},
+ { "zfs.arc.hits.mfu",
+ { PMDA_PMID(SCLR_ARCSTATS,11), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"mfu_hits"},
+ { "zfs.arc.hits.mru",
+ { PMDA_PMID(SCLR_ARCSTATS,12), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"mru_hits"},
+ { "zfs.arc.hits.mfu_ghost",
+ { PMDA_PMID(SCLR_ARCSTATS,13), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"mfu_ghost_hits"},
+ { "zfs.arc.hits.mru_ghost",
+ { PMDA_PMID(SCLR_ARCSTATS,14), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"mru_ghost_hits"},
+ { "zfs.arc.hits.demand_data",
+ { PMDA_PMID(SCLR_ARCSTATS,15), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"demand_data_hits"},
+ { "zfs.arc.hits.demand_metadata",
+ { PMDA_PMID(SCLR_ARCSTATS,16), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"demand_metadata_hits"},
+ { "zfs.arc.hits.prefetch_data",
+ { PMDA_PMID(SCLR_ARCSTATS,17), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"prefetch_data_hits"},
+ { "zfs.arc.hits.prefetch_metadata",
+ { PMDA_PMID(SCLR_ARCSTATS,18), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"prefetch_metadata_hits"},
+ { "pmda.prefetch.time",
+ { PMDA_PMID(4095,0), PM_TYPE_U64, PREFETCH_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, -1 },
+ { "pmda.prefetch.count",
+ { PMDA_PMID(4095,1), PM_TYPE_U64, PREFETCH_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1 },
+ { "pmda.metric.time",
+ { PMDA_PMID(4095,2), PM_TYPE_U64, METRIC_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, -1 },
+ { "pmda.metric.count",
+ { PMDA_PMID(4095,3), PM_TYPE_U64, METRIC_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1 },
+ { "disk.dev.wait.time",
+ { PMDA_PMID(SCLR_DISK,16), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, KSTAT_IO_OFF(wtime)},
+ { "disk.dev.wait.count",
+ { PMDA_PMID(SCLR_DISK,17), PM_TYPE_U32, DISK_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, KSTAT_IO_OFF(wcnt)},
+ { "disk.dev.run.time",
+ { PMDA_PMID(SCLR_DISK,18), PM_TYPE_U64, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, KSTAT_IO_OFF(rtime)},
+ { "disk.dev.run.count",
+ { PMDA_PMID(SCLR_DISK,19), PM_TYPE_U32, DISK_INDOM, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, KSTAT_IO_OFF(rcnt)},
+
+ { "disk.all.wait.time",
+ { PMDA_PMID(SCLR_DISK,6), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, KSTAT_IO_OFF(wtime)},
+ { "disk.all.wait.count",
+ { PMDA_PMID(SCLR_DISK,7), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, -1},
+ { "disk.all.run.time",
+ { PMDA_PMID(SCLR_DISK,8), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 1, 0, 0, PM_TIME_NSEC, 0)
+ }, KSTAT_IO_OFF(rtime)},
+ { "disk.all.run.count",
+ { PMDA_PMID(SCLR_DISK,9), PM_TYPE_U64, PM_INDOM_NULL, PM_SEM_INSTANT,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, -1},
+
+ { "kernel.fs.read_bytes",
+ { PMDA_PMID(SCLR_FILESYS,0), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"read_bytes"},
+ { "kernel.fs.readdir_bytes",
+ { PMDA_PMID(SCLR_FILESYS,1), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"readdir_bytes"},
+ { "kernel.fs.write_bytes",
+ { PMDA_PMID(SCLR_FILESYS,2), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"write_bytes"},
+ { "kernel.fs.vnops.access",
+ { PMDA_PMID(SCLR_FILESYS,3), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"naccess"},
+ { "kernel.fs.vnops.addmap",
+ {PMDA_PMID(SCLR_FILESYS,4), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"naddmap"},
+ { "kernel.fs.vnops.close",
+ {PMDA_PMID(SCLR_FILESYS,5), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nclose"},
+ { "kernel.fs.vnops.cmp",
+ {PMDA_PMID(SCLR_FILESYS,6), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ncmp"},
+ { "kernel.fs.vnops.create",
+ {PMDA_PMID(SCLR_FILESYS,7), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ncreate"},
+ { "kernel.fs.vnops.delmap",
+ {PMDA_PMID(SCLR_FILESYS,8), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndelmap"},
+ { "kernel.fs.vnops.dispose",
+ {PMDA_PMID(SCLR_FILESYS,9), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndispose"},
+ { "kernel.fs.vnops.dump",
+ {PMDA_PMID(SCLR_FILESYS,10), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndump"},
+ { "kernel.fs.vnops.dumpctl",
+ {PMDA_PMID(SCLR_FILESYS,11), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndumpctl"},
+ { "kernel.fs.vnops.fid",
+ {PMDA_PMID(SCLR_FILESYS,12), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nfid"},
+ { "kernel.fs.vnops.frlock",
+ {PMDA_PMID(SCLR_FILESYS,13), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nfrlock"},
+ { "kernel.fs.vnops.fsync",
+ {PMDA_PMID(SCLR_FILESYS,14), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nfsync"},
+ { "kernel.fs.vnops.getattr",
+ {PMDA_PMID(SCLR_FILESYS,15), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ngetattr"},
+ { "kernel.fs.vnops.getpage",
+ {PMDA_PMID(SCLR_FILESYS,16), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ngetpage"},
+ { "kernel.fs.vnops.getsecattr",
+ {PMDA_PMID(SCLR_FILESYS,17), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ngetsecattr"},
+ { "kernel.fs.vnops.inactive",
+ {PMDA_PMID(SCLR_FILESYS,18), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ninactive"},
+ { "kernel.fs.vnops.ioctl",
+ {PMDA_PMID(SCLR_FILESYS,19), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nioctl"},
+ { "kernel.fs.vnops.link",
+ {PMDA_PMID(SCLR_FILESYS,20), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nlink"},
+ { "kernel.fs.vnops.lookup",
+ {PMDA_PMID(SCLR_FILESYS,21), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nlookup"},
+ { "kernel.fs.vnops.map",
+ {PMDA_PMID(SCLR_FILESYS,22), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nmap"},
+ { "kernel.fs.vnops.mkdir",
+ {PMDA_PMID(SCLR_FILESYS,23), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nmkdir"},
+ { "kernel.fs.vnops.open",
+ {PMDA_PMID(SCLR_FILESYS,24), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nopen"},
+ { "kernel.fs.vnops.pageio",
+ {PMDA_PMID(SCLR_FILESYS,25), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"npageio"},
+ { "kernel.fs.vnops.pathconf",
+ {PMDA_PMID(SCLR_FILESYS,26), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"npathconf"},
+ { "kernel.fs.vnops.poll",
+ {PMDA_PMID(SCLR_FILESYS,27), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"npoll"},
+ { "kernel.fs.vnops.putpage",
+ {PMDA_PMID(SCLR_FILESYS,28), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nputpage"},
+ { "kernel.fs.vnops.read",
+ {PMDA_PMID(SCLR_FILESYS,29), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nread"},
+ { "kernel.fs.vnops.readdir",
+ {PMDA_PMID(SCLR_FILESYS,30), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nreaddir"},
+ { "kernel.fs.vnops.readlink",
+ {PMDA_PMID(SCLR_FILESYS,31), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nreadlink"},
+ { "kernel.fs.vnops.realvp",
+ {PMDA_PMID(SCLR_FILESYS,32), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrealvp"},
+ { "kernel.fs.vnops.remove",
+ {PMDA_PMID(SCLR_FILESYS,33), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nremove"},
+ { "kernel.fs.vnops.rename",
+ {PMDA_PMID(SCLR_FILESYS,34), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrename"},
+ { "kernel.fs.vnops.rmdir",
+ {PMDA_PMID(SCLR_FILESYS,35), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrmdir"},
+ { "kernel.fs.vnops.rwlock",
+ {PMDA_PMID(SCLR_FILESYS,36), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrwlock"},
+ { "kernel.fs.vnops.rwunlock",
+ {PMDA_PMID(SCLR_FILESYS,37), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrwunlock"},
+ { "kernel.fs.vnops.seek",
+ {PMDA_PMID(SCLR_FILESYS,38), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nseek"},
+ { "kernel.fs.vnops.setattr",
+ {PMDA_PMID(SCLR_FILESYS,39), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsetattr"},
+ { "kernel.fs.vnops.setfl",
+ {PMDA_PMID(SCLR_FILESYS,40), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsetfl"},
+ { "kernel.fs.vnops.setsecattr",
+ {PMDA_PMID(SCLR_FILESYS,41), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsetsecattr"},
+ { "kernel.fs.vnops.shrlock",
+ {PMDA_PMID(SCLR_FILESYS,42), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nshrlock"},
+ { "kernel.fs.vnops.space",
+ {PMDA_PMID(SCLR_FILESYS,43), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nspace"},
+ { "kernel.fs.vnops.symlink",
+ {PMDA_PMID(SCLR_FILESYS,44), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsymlink"},
+ { "kernel.fs.vnops.vnevent",
+ {PMDA_PMID(SCLR_FILESYS,45), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nvnevent"},
+ { "kernel.fs.vnops.write",
+ {PMDA_PMID(SCLR_FILESYS,46), PM_TYPE_U64, FILESYS_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nwrite"},
+
+ { "kernel.fstype.read_bytes",
+ { PMDA_PMID(SCLR_FILESYS,47), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"read_bytes"},
+ { "kernel.fstype.readdir_bytes",
+ { PMDA_PMID(SCLR_FILESYS,48), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"readdir_bytes"},
+ { "kernel.fstype.write_bytes",
+ { PMDA_PMID(SCLR_FILESYS,49), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"write_bytes"},
+ { "kernel.fstype.vnops.access",
+ { PMDA_PMID(SCLR_FILESYS,50), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"naccess"},
+ { "kernel.fstype.vnops.addmap",
+ {PMDA_PMID(SCLR_FILESYS,51), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"naddmap"},
+ { "kernel.fstype.vnops.close",
+ {PMDA_PMID(SCLR_FILESYS,52), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nclose"},
+ { "kernel.fstype.vnops.cmp",
+ {PMDA_PMID(SCLR_FILESYS,53), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ncmp"},
+ { "kernel.fstype.vnops.create",
+ {PMDA_PMID(SCLR_FILESYS,54), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ncreate"},
+ { "kernel.fstype.vnops.delmap",
+ {PMDA_PMID(SCLR_FILESYS,55), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndelmap"},
+ { "kernel.fstype.vnops.dispose",
+ {PMDA_PMID(SCLR_FILESYS,56), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndispose"},
+ { "kernel.fstype.vnops.dump",
+ {PMDA_PMID(SCLR_FILESYS,57), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndump"},
+ { "kernel.fstype.vnops.dumpctl",
+ {PMDA_PMID(SCLR_FILESYS,58), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ndumpctl"},
+ { "kernel.fstype.vnops.fid",
+ {PMDA_PMID(SCLR_FILESYS,59), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nfid"},
+ { "kernel.fstype.vnops.frlock",
+ {PMDA_PMID(SCLR_FILESYS,60), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nfrlock"},
+ { "kernel.fstype.vnops.fsync",
+ {PMDA_PMID(SCLR_FILESYS,61), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nfsync"},
+ { "kernel.fstype.vnops.getattr",
+ {PMDA_PMID(SCLR_FILESYS,62), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ngetattr"},
+ { "kernel.fstype.vnops.getpage",
+ {PMDA_PMID(SCLR_FILESYS,63), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ngetpage"},
+ { "kernel.fstype.vnops.getsecattr",
+ {PMDA_PMID(SCLR_FILESYS,64), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ngetsecattr"},
+ { "kernel.fstype.vnops.inactive",
+ {PMDA_PMID(SCLR_FILESYS,65), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"ninactive"},
+ { "kernel.fstype.vnops.ioctl",
+ {PMDA_PMID(SCLR_FILESYS,66), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nioctl"},
+ { "kernel.fstype.vnops.link",
+ {PMDA_PMID(SCLR_FILESYS,67), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nlink"},
+ { "kernel.fstype.vnops.lookup",
+ {PMDA_PMID(SCLR_FILESYS,68), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nlookup"},
+ { "kernel.fstype.vnops.map",
+ {PMDA_PMID(SCLR_FILESYS,69), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nmap"},
+ { "kernel.fstype.vnops.mkdir",
+ {PMDA_PMID(SCLR_FILESYS,70), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nmkdir"},
+ { "kernel.fstype.vnops.open",
+ {PMDA_PMID(SCLR_FILESYS,71), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nopen"},
+ { "kernel.fstype.vnops.pageio",
+ {PMDA_PMID(SCLR_FILESYS,72), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"npageio"},
+ { "kernel.fstype.vnops.pathconf",
+ {PMDA_PMID(SCLR_FILESYS,73), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"npathconf"},
+ { "kernel.fstype.vnops.poll",
+ {PMDA_PMID(SCLR_FILESYS,74), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"npoll"},
+ { "kernel.fstype.vnops.putpage",
+ {PMDA_PMID(SCLR_FILESYS,75), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nputpage"},
+ { "kernel.fstype.vnops.read",
+ {PMDA_PMID(SCLR_FILESYS,76), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nread"},
+ { "kernel.fstype.vnops.readdir",
+ {PMDA_PMID(SCLR_FILESYS,77), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nreaddir"},
+ { "kernel.fstype.vnops.readlink",
+ {PMDA_PMID(SCLR_FILESYS,78), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nreadlink"},
+ { "kernel.fstype.vnops.realvp",
+ {PMDA_PMID(SCLR_FILESYS,79), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrealvp"},
+ { "kernel.fstype.vnops.remove",
+ {PMDA_PMID(SCLR_FILESYS,80), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nremove"},
+ { "kernel.fstype.vnops.rename",
+ {PMDA_PMID(SCLR_FILESYS,81), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrename"},
+ { "kernel.fstype.vnops.rmdir",
+ {PMDA_PMID(SCLR_FILESYS,82), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrmdir"},
+ { "kernel.fstype.vnops.rwlock",
+ {PMDA_PMID(SCLR_FILESYS,83), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrwlock"},
+ { "kernel.fstype.vnops.rwunlock",
+ {PMDA_PMID(SCLR_FILESYS,84), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nrwunlock"},
+ { "kernel.fstype.vnops.seek",
+ {PMDA_PMID(SCLR_FILESYS,85), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nseek"},
+ { "kernel.fstype.vnops.setattr",
+ {PMDA_PMID(SCLR_FILESYS,86), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsetattr"},
+ { "kernel.fstype.vnops.setfl",
+ {PMDA_PMID(SCLR_FILESYS,87), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsetfl"},
+ { "kernel.fstype.vnops.setsecattr",
+ {PMDA_PMID(SCLR_FILESYS,88), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsetsecattr"},
+ { "kernel.fstype.vnops.shrlock",
+ {PMDA_PMID(SCLR_FILESYS,89), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nshrlock"},
+ { "kernel.fstype.vnops.space",
+ {PMDA_PMID(SCLR_FILESYS,90), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nspace"},
+ { "kernel.fstype.vnops.symlink",
+ {PMDA_PMID(SCLR_FILESYS,91), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nsymlink"},
+ { "kernel.fstype.vnops.vnevent",
+ {PMDA_PMID(SCLR_FILESYS,92), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nvnevent"},
+ { "kernel.fstype.vnops.write",
+ {PMDA_PMID(SCLR_FILESYS,93), PM_TYPE_U64, FSTYPE_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE)
+ }, (ptrdiff_t)"nwrite"},
+
+ { "hinv.cpu.maxclock",
+ {PMDA_PMID(SCLR_SYSINFO,147), PM_TYPE_64, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, -1, 1, 0, PM_TIME_SEC, 6)
+ }, (ptrdiff_t)"clock_MHz"},
+ { "hinv.cpu.clock",
+ {PMDA_PMID(SCLR_SYSINFO,148), PM_TYPE_U64, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, -1, 1, 0, PM_TIME_SEC, 0)
+ }, (ptrdiff_t)"current_clock_Hz"},
+ { "hinv.cpu.brand",
+ {PMDA_PMID(SCLR_SYSINFO, 149), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"brand"},
+ { "hinv.cpu.frequencies",
+ {PMDA_PMID(SCLR_SYSINFO, 150), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"supported_frequencies_Hz"},
+ { "hinv.cpu.implementation",
+ {PMDA_PMID(SCLR_SYSINFO, 151), PM_TYPE_STRING, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"implementation"},
+ { "hinv.cpu.chip_id",
+ {PMDA_PMID(SCLR_SYSINFO, 152), PM_TYPE_64, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"chip_id"},
+ { "hinv.cpu.clog_id",
+ {PMDA_PMID(SCLR_SYSINFO, 153), PM_TYPE_32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"clog_id"},
+ { "hinv.cpu.core_id",
+ {PMDA_PMID(SCLR_SYSINFO, 154), PM_TYPE_64, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"core_id"},
+ { "hinv.cpu.pkg_core_id",
+ {PMDA_PMID(SCLR_SYSINFO, 155), PM_TYPE_64, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"pkg_core_id"},
+ { "hinv.cpu.cstate",
+ {PMDA_PMID(SCLR_SYSINFO, 156), PM_TYPE_32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"current_cstate"},
+ { "hinv.cpu.maxcstates",
+ {PMDA_PMID(SCLR_SYSINFO, 157), PM_TYPE_32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"supported_max_cstates"},
+ { "hinv.cpu.ncores",
+ {PMDA_PMID(SCLR_SYSINFO, 158), PM_TYPE_32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"ncore_per_chip"},
+ { "hinv.cpu.ncpus",
+ {PMDA_PMID(SCLR_SYSINFO, 159), PM_TYPE_32, CPU_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"ncpu_per_chip"},
+
+ { "disk.dev.errors.soft",
+ {PMDA_PMID(SCLR_DISK, 21), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Soft Errors"},
+ { "disk.dev.errors.hard",
+ {PMDA_PMID(SCLR_DISK, 22), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Hard Errors"},
+ { "disk.dev.errors.transport",
+ {PMDA_PMID(SCLR_DISK, 23), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Transport Errors"},
+ { "disk.dev.errors.media",
+ {PMDA_PMID(SCLR_DISK, 24), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Media Error"},
+ { "disk.dev.errors.recoverable",
+ {PMDA_PMID(SCLR_DISK, 25), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Recoverable"},
+ { "disk.dev.errors.notready",
+ {PMDA_PMID(SCLR_DISK, 26), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Device Not Ready"},
+ { "disk.dev.errors.nodevice",
+ {PMDA_PMID(SCLR_DISK, 27), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"No Device"},
+ { "disk.dev.errors.badrequest",
+ {PMDA_PMID(SCLR_DISK, 28), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Illegal Request"},
+ { "disk.dev.errors.pfa",
+ {PMDA_PMID(SCLR_DISK, 29), PM_TYPE_U32, DISK_INDOM, PM_SEM_COUNTER,
+ PMDA_PMUNITS(0, 0, 1, 0, 0, PM_COUNT_ONE),
+ }, (ptrdiff_t)"Predictive Failure Analysis"},
+ { "hinv.disk.vendor",
+ {PMDA_PMID(SCLR_DISK, 30), PM_TYPE_STRING, DISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"Vendor"},
+ { "hinv.disk.product",
+ {PMDA_PMID(SCLR_DISK, 31), PM_TYPE_STRING, DISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"Product"},
+ { "hinv.disk.revision",
+ {PMDA_PMID(SCLR_DISK, 32), PM_TYPE_STRING, DISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"Revision"},
+ { "hinv.disk.serial",
+ {PMDA_PMID(SCLR_DISK, 33), PM_TYPE_STRING, DISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, (ptrdiff_t)"Serial No"},
+ { "hinv.disk.capacity",
+ { PMDA_PMID(SCLR_DISK,34), PM_TYPE_U64, DISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(1, 0, 0, PM_SPACE_BYTE, 0, 0)
+ }, (ptrdiff_t)"Size" },
+ { "hinv.disk.devlink",
+ {PMDA_PMID(SCLR_DISK, 35), PM_TYPE_STRING, DISK_INDOM, PM_SEM_DISCRETE,
+ PMDA_PMUNITS(0, 0, 0, 0, 0, 0)
+ }, -1}
+
+ /* remember to add trailing comma before adding more entries ... */
+};
+int metrictab_sz = ARRAY_SIZE(metricdesc);
+
+pmdaInstid metric_insts[ARRAY_SIZE(metricdesc)];
+
+/*
+ * List of instance domains ... we expect the *_INDOM macros
+ * to index into this table.
+ */
+pmdaIndom indomtab[] = {
+ { DISK_INDOM, 0, NULL },
+ { CPU_INDOM, 0, NULL },
+ { NETIF_INDOM, 0, NULL },
+ { ZPOOL_INDOM, 0, NULL },
+ { ZFS_INDOM, 0, NULL },
+ { ZPOOL_PERDISK_INDOM, 0, NULL },
+ { NETLINK_INDOM},
+ { ZFS_SNAP_INDOM },
+ { LOADAVG_INDOM, ARRAY_SIZE(loadavg_insts), loadavg_insts},
+ { PREFETCH_INDOM, ARRAY_SIZE(prefetch_insts), prefetch_insts},
+ { METRIC_INDOM, ARRAY_SIZE(metric_insts), metric_insts},
+ { FILESYS_INDOM },
+ { FSTYPE_INDOM }
+};
+
+int indomtab_sz = sizeof(indomtab) / sizeof(indomtab[0]);
+
+static kstat_ctl_t *kc;
+static int kstat_chains_updated;
+
+kstat_ctl_t *
+kstat_ctl_update(void)
+{
+ if (!kstat_chains_updated) {
+ if (kstat_chain_update(kc) == -1) {
+ kstat_chains_updated = 0;
+ return NULL;
+ }
+ kstat_chains_updated = 1;
+ }
+ return kc;
+}
+
+void
+kstat_ctl_needs_update(void)
+{
+ kstat_chains_updated = 0;
+}
+
+void
+init_data(int domain)
+{
+ int i;
+ int serial;
+ __pmID_int *ip;
+
+ /*
+ * set up kstat() handle ... failure is fatal
+ */
+ if ((kc = kstat_open()) == NULL) {
+ fprintf(stderr, "init_data: kstat_open failed: %s\n", osstrerror());
+ exit(1);
+ }
+
+ /*
+ * Create the PMDA's metrictab[] version of the per-metric table.
+ *
+ * Also do domain initialization for each pmid and indom element of
+ * the metricdesc[] table ... the PMDA table is fixed up in
+ * libpcp_pmda
+ */
+ if ((metrictab = (pmdaMetric *)malloc(metrictab_sz * sizeof(pmdaMetric))) == NULL) {
+ fprintf(stderr, "init_data: Error: malloc metrictab [%d] failed: %s\n",
+ (int)(metrictab_sz * sizeof(pmdaMetric)), osstrerror());
+ exit(1);
+ }
+ for (i = 0; i < metrictab_sz; i++) {
+ metrictab[i].m_user = &metricdesc[i];
+ metrictab[i].m_desc = metricdesc[i].md_desc;
+ ip = (__pmID_int *)&metricdesc[i].md_desc.pmid;
+ ip->domain = domain;
+
+ if (metricdesc[i].md_desc.indom != PM_INDOM_NULL) {
+ serial = metricdesc[i].md_desc.indom;
+ metricdesc[i].md_desc.indom = pmInDom_build(domain, serial);
+ }
+ metric_insts[i].i_inst = i+1;
+ metric_insts[i].i_name = (char *)metricdesc[i].md_name;
+ }
+
+ /* Bless indoms with our own domain - usually pmdaInit will do it for
+ * us but we need properly setup indoms for pmdaCache which means that
+ * we have to do it ourselves */
+ for (i = 0; i < indomtab_sz; i++) {
+ __pmindom_int(&indomtab[i].it_indom)->domain = domain;
+ }
+
+ /*
+ * initialize each of the methods
+ */
+ for (i = 0; i < methodtab_sz; i++) {
+ if (methodtab[i].m_init) {
+ methodtab[i].m_init(1);
+ }
+
+ prefetch_insts[i].i_inst = i + 1;
+ prefetch_insts[i].i_name = (char *)methodtab[i].m_name;
+ }
+}
diff --git a/src/pmdas/solaris/disk.c b/src/pmdas/solaris/disk.c
new file mode 100644
index 0000000..70261c2
--- /dev/null
+++ b/src/pmdas/solaris/disk.c
@@ -0,0 +1,420 @@
+/*
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2010 Max Matveev. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "common.h"
+#include <libdevinfo.h>
+
+#define SOLARIS_PMDA_TRACE (DBG_TRACE_APPL0|DBG_TRACE_APPL2)
+
+typedef struct {
+ int fetched;
+ int err;
+ kstat_t *ksp;
+ kstat_io_t iostat;
+ kstat_t *sderr;
+ int sderr_fresh;
+} ctl_t;
+
+static di_devlink_handle_t devlink_hndl = DI_LINK_NIL;
+static di_node_t di_root = DI_NODE_NIL;
+
+static ctl_t *
+getDiskCtl(pmInDom dindom, const char *name)
+{
+ ctl_t *ctl = NULL;
+ int inst;
+ int rv = pmdaCacheLookupName(dindom,name, &inst, (void **)&ctl);
+
+ if (rv == PMDA_CACHE_ACTIVE)
+ return ctl;
+
+ if ((rv == PMDA_CACHE_INACTIVE) && ctl) {
+ rv = pmdaCacheStore(dindom, PMDA_CACHE_ADD, name, ctl);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot reactivate cached data for disk '%s': %s\n",
+ name, pmErrStr(rv));
+ return NULL;
+ }
+ } else {
+ if ((ctl = (ctl_t *)calloc(1, sizeof(ctl_t))) == NULL) {
+ __pmNotifyErr(LOG_WARNING,
+ "Out of memory to keep state for disk '%s'\n",
+ name);
+ return NULL;
+ }
+
+ rv = pmdaCacheStore(dindom, PMDA_CACHE_ADD, name, ctl);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot cache data for disk '%s': %s\n",
+ name, pmErrStr(rv));
+ free(ctl);
+ return NULL;
+ }
+ }
+ return ctl;
+}
+
+static void
+disk_walk_chains(pmInDom dindom)
+{
+ kstat_t *ksp;
+ kstat_ctl_t *kc;
+
+ if ((kc = kstat_ctl_update()) == NULL)
+ return;
+
+ for (ksp = kc->kc_chain; ksp != NULL; ksp = ksp->ks_next) {
+ ctl_t *ctl;
+
+ if ((strcmp(ksp->ks_class, "disk") == 0) &&
+ (ksp->ks_type == KSTAT_TYPE_IO)) {
+ if ((ctl = getDiskCtl(dindom, ksp->ks_name)) == NULL)
+ continue;
+
+ ctl->ksp = ksp;
+ ctl->fetched = 0;
+ } else if (strcmp(ksp->ks_class, "device_error") == 0) {
+ char *comma;
+ char modname[KSTAT_STRLEN];
+
+ strcpy(modname, ksp->ks_name);
+ if ((comma = strchr(modname, ',')) == NULL)
+ continue;
+
+ *comma = '\0';
+ if ((ctl = getDiskCtl(dindom, modname)) == NULL)
+ continue;
+ ctl->sderr = ksp;
+ ctl->sderr_fresh = 0;
+ }
+ }
+}
+
+void
+disk_init(int first)
+{
+ pmInDom dindom = indomtab[DISK_INDOM].it_indom;
+
+ if (!first)
+ /* TODO ... not sure if/when we'll use this re-init hook */
+ return;
+
+ pmdaCacheOp(dindom, PMDA_CACHE_LOAD);
+ disk_walk_chains(dindom);
+ pmdaCacheOp(dindom, PMDA_CACHE_SAVE);
+}
+
+void
+disk_prefetch(void)
+{
+ if (di_root != DI_NODE_NIL) {
+ di_fini(di_root);
+ di_root = DI_NODE_NIL;
+ }
+
+ if (devlink_hndl != DI_LINK_NIL) {
+ di_devlink_fini(&devlink_hndl);
+ devlink_hndl = DI_LINK_NIL;
+ }
+ pmdaCacheOp(indomtab[DISK_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ disk_walk_chains(indomtab[DISK_INDOM].it_indom);
+ pmdaCacheOp(indomtab[DISK_INDOM].it_indom, PMDA_CACHE_SAVE);
+}
+
+static __uint64_t
+disk_derived(pmdaMetric *mdesc, int inst, const kstat_io_t *iostat)
+{
+ pmID pmid;
+ __pmID_int *ip = (__pmID_int *)&pmid;
+ __uint64_t val;
+
+ pmid = mdesc->m_desc.pmid;
+ ip->domain = 0;
+
+// from kstat_io_t ...
+//
+// u_longlong_t nread; /* number of bytes read */
+// u_longlong_t nwritten; /* number of bytes written */
+// uint_t reads; /* number of read operations */
+// uint_t writes; /* number of write operations */
+//
+
+ switch (pmid) {
+ case PMDA_PMID(SCLR_DISK,2): /* disk.all.total */
+ case PMDA_PMID(SCLR_DISK,12): /* disk.dev.total */
+ val = iostat->reads + iostat->writes;
+ break;
+
+ case PMDA_PMID(SCLR_DISK,5): /* disk.all.total_bytes */
+ case PMDA_PMID(SCLR_DISK,15): /* disk.dev.total_bytes */
+ val = iostat->nread + iostat->nwritten;
+ break;
+
+ /* iostat->wcnt and iostat->rcnt are 32 bit intergers,
+ * these two metrics must be derived because the metrics
+ * are using 64 bit integers to avoid overflows during
+ * accumultion */
+ case PMDA_PMID(SCLR_DISK,7): /* disk.all.wait.count */
+ val = iostat->wcnt;
+ break;
+ case PMDA_PMID(SCLR_DISK,9): /* disk.all.run.time */
+ val = iostat->rcnt;
+ break;
+
+ default:
+ fprintf(stderr, "disk_derived: Botch: no method for pmid %s\n",
+ pmIDStr(mdesc->m_desc.pmid));
+ val = 0;
+ break;
+ }
+
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE) {
+ /* desperate */
+ fprintf(stderr, "disk_derived: pmid %s inst %d val %llu\n",
+ pmIDStr(mdesc->m_desc.pmid), inst, (unsigned long long)val);
+ }
+#endif
+
+ return val;
+}
+
+static int
+fetch_disk_data(kstat_ctl_t *kc, const pmdaMetric *mdesc, ctl_t *ctl,
+ const char *diskname)
+{
+ if (ctl->fetched == 1)
+ return 1;
+
+ if (ctl->ksp == NULL)
+ return 0;
+
+ if ((kstat_read(kc, ctl->ksp, &ctl->iostat) == -1)) {
+ if (ctl->err == 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Error: disk_fetch(pmid=%s disk=%s ...) - "
+ "kstat_read(kc=%p, ksp=%p, ...) failed: %s\n",
+ pmIDStr(mdesc->m_desc.pmid), diskname,
+ kc, ctl->ksp, osstrerror());
+ }
+ ctl->err++;
+ ctl->fetched = -1;
+ return 0;
+ }
+
+ ctl->fetched = 1;
+ if (ctl->err != 0) {
+ __pmNotifyErr(LOG_INFO,
+ "Success: disk_fetch(pmid=%s disk=%s ...) "
+ "after %d errors as previously reported\n",
+ pmIDStr(mdesc->m_desc.pmid), diskname, ctl->err);
+ ctl->err = 0;
+ }
+
+ return 1;
+}
+
+static int
+get_devlink_path(di_devlink_t devlink, void *arg)
+{
+ const char **p = arg;
+ *p = di_devlink_path(devlink);
+ return DI_WALK_TERMINATE;
+}
+
+static int
+fetch_disk_devlink(const kstat_t *ksp, pmAtomValue *atom)
+{
+ di_node_t n;
+
+ if (di_root == DI_NODE_NIL) {
+ if ((di_root = di_init("/", DINFOCPYALL)) == DI_NODE_NIL)
+ return 0;
+ }
+
+ if (devlink_hndl == DI_LINK_NIL) {
+ if ((devlink_hndl = di_devlink_init(NULL, DI_MAKE_LINK)) == DI_LINK_NIL)
+ return 0;
+ }
+
+ if ((n = di_drv_first_node(ksp->ks_module, di_root)) == DI_NODE_NIL) {
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE) {
+ fprintf(stderr,"No nodes for %s: %s\n",
+ ksp->ks_name, osstrerror());
+ }
+#endif
+ return 0;
+ }
+
+ do {
+ if (di_instance(n) == ksp->ks_instance) {
+ di_minor_t minor = di_minor_next(n, DI_MINOR_NIL);
+ char *path;
+ char *devlink = NULL;
+
+ if (minor == DI_MINOR_NIL) {
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE) {
+ fprintf (stderr, "No minors of %s: %s\n",
+ ksp->ks_name, osstrerror());
+ }
+#endif
+ return 0;
+ }
+ path = di_devfs_minor_path(minor);
+ di_devlink_walk(devlink_hndl, NULL, path, 0, &devlink,
+ get_devlink_path);
+ di_devfs_path_free(path);
+
+ if (devlink) {
+ atom->cp = devlink;
+ return 1;
+ }
+ return 0;
+ }
+ n = di_drv_next_node(n);
+ } while (n != DI_NODE_NIL);
+ return 0;
+}
+
+static int
+get_instance_value(pmdaMetric *mdesc, pmInDom dindom, int inst,
+ pmAtomValue *atom)
+{
+ ctl_t *ctl;
+ char *diskname;
+ uint64_t ull;
+ ptrdiff_t offset = ((metricdesc_t *)mdesc->m_user)->md_offset;
+ kstat_ctl_t *kc;
+
+ if ((kc = kstat_ctl_update()) == NULL)
+ return 0;
+
+ if (pmdaCacheLookup(dindom, inst, &diskname,
+ (void **)&ctl) != PMDA_CACHE_ACTIVE) {
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE) {
+ fprintf(stderr,
+ "Unexpected cache result - instance %d "
+ "is not active in disk indom cache\n",
+ inst);
+ }
+#endif
+ return 0;
+ }
+
+ if (offset == -1) {
+ if (pmid_item(mdesc->m_desc.pmid) == 35) { /* hinv.disk.devlink */
+ return fetch_disk_devlink(ctl->ksp, atom);
+ }
+ if (!fetch_disk_data(kc, mdesc, ctl, diskname))
+ return 0;
+ ull = disk_derived(mdesc, inst, &ctl->iostat);
+ } else if (offset > sizeof(ctl->iostat)) { /* device_error */
+ if (ctl->sderr) {
+ kstat_named_t *kn;
+ char * m = (char *)offset;
+
+ if (!ctl->sderr_fresh) {
+ ctl->sderr_fresh = (kstat_read(kc, ctl->sderr, NULL) != -1);
+
+ if (!ctl->sderr_fresh)
+ return 0;
+ }
+
+ if ((kn = kstat_data_lookup(ctl->sderr, m)) == NULL) {
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE)
+ fprintf(stderr, "No %s in %s\n", m, diskname);
+#endif
+ return 0;
+ }
+
+ return kstat_named_to_pmAtom(kn, atom);
+ }
+ return 0;
+ } else {
+ char *iop = ((char *)&ctl->iostat) + offset;
+ if (!fetch_disk_data(kc, mdesc, ctl, diskname))
+ return 0;
+ if (mdesc->m_desc.type == PM_TYPE_U64) {
+ __uint64_t *ullp = (__uint64_t *)iop;
+ ull = *ullp;
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE) {
+ /* desperate */
+ fprintf(stderr, "disk_fetch: pmid %s inst %d val %llu\n",
+ pmIDStr(mdesc->m_desc.pmid), inst,
+ (unsigned long long)*ullp);
+ }
+#endif
+ }
+ else {
+ __uint32_t *ulp = (__uint32_t *)iop;
+ ull = *ulp;
+#ifdef PCP_DEBUG
+ if ((pmDebug & SOLARIS_PMDA_TRACE) == SOLARIS_PMDA_TRACE) {
+ /* desperate */
+ fprintf(stderr, "disk_fetch: pmid %s inst %d val %u\n",
+ pmIDStr(mdesc->m_desc.pmid), inst, *ulp);
+ }
+#endif
+ }
+ }
+
+ if (mdesc->m_desc.type == PM_TYPE_U64) {
+ /* export as 64-bit value */
+ atom->ull += ull;
+ }
+ else {
+ /* else export as a 32-bit */
+ atom->ul += (__uint32_t)ull;
+ }
+
+ return 1;
+}
+
+int
+disk_fetch(pmdaMetric *mdesc, int inst, pmAtomValue *atom)
+{
+ int i;
+ pmInDom dindom = indomtab[DISK_INDOM].it_indom;
+
+ if (pmid_item(mdesc->m_desc.pmid) == 20) { /* hinv.ndisk */
+ i = pmdaCacheOp(dindom, PMDA_CACHE_SIZE_ACTIVE);
+ if (i < 0) {
+ return 0;
+ } else {
+ atom->ul = i;
+ return 1;
+ }
+ }
+
+ memset(atom, 0, sizeof(*atom));
+
+ if (inst == PM_IN_NULL) {
+ pmdaCacheOp(dindom,PMDA_CACHE_WALK_REWIND);
+ while ((i = pmdaCacheOp(dindom, PMDA_CACHE_WALK_NEXT)) != -1) {
+ if (get_instance_value(mdesc, dindom, i, atom) == 0)
+ return 0;
+ }
+ return 1;
+ }
+
+ return get_instance_value(mdesc, dindom, inst, atom);
+}
diff --git a/src/pmdas/solaris/help b/src/pmdas/solaris/help
new file mode 100644
index 0000000..4940247
--- /dev/null
+++ b/src/pmdas/solaris/help
@@ -0,0 +1,729 @@
+#
+# Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Solaris PMDA help file in the ASCII format
+#
+# lines beginning with a # are ignored
+# lines beginning @ introduce a new entry of the form
+# @ metric_name oneline-text
+# help test goes
+# here over multiple lines
+# ...
+#
+# the metric_name is decoded against the default PMNS -- as a special case,
+# a name of the form NNN.MM (for numeric NNN and MM) is interpreted as an
+# instance domain identification, and the text describes the instance domain
+#
+# blank lines before the @ line are ignored
+#
+
+@ kernel.all.cpu.idle Amount of time CPUs were idle
+@ kernel.all.cpu.user Amount of time spent executing userspace tasks
+@ kernel.all.cpu.sys Amount of time spent executing kernel code
+@ kernel.all.cpu.wait.total Amount of time CPU spent waiting for events
+@ kernel.percpu.cpu.user Amount of time spent executing userspace tasks by each CPU
+@ kernel.percpu.cpu.idle Amount of time each CPU was idle
+@ kernel.percpu.cpu.sys Amount of time each CPU spent executing kernel code
+@ kernel.percpu.cpu.wait.total Amount of time each CPU spent waiting for events
+@ disk.all.read Number of read requests aggregated across all disks
+@ disk.all.write Number of write requests aggregated across all disks
+@ disk.all.total Number of IO requests aggregated across all disks
+@ disk.all.read_bytes Number of bytes read from all disks
+@ disk.all.write_bytes Number of bytes written to all disk
+@ disk.all.total_bytes Number of bytes transferred to and from all disks
+@ disk.dev.read Number of read requests for each individual disk
+@ disk.dev.write Number of write requests for each individual disk
+@ disk.dev.total Number of IO requests for each individual disk
+@ disk.dev.read_bytes Number of bytes read from each individual disk
+@ disk.dev.write_bytes Number of bytes written to each individual disk
+@ disk.dev.total_bytes Number of bytes transferred to and from each individual disk
+@ network.interface.mtu Maximum Transmission Unit of a network interface
+Maximum Transmision Unit is the largest size of IP datagram which can be
+transferred over the data link.
+@ network.interface.in.bytes Number of bytes received by a network interface
+@ network.interface.in.errors Number of receive errors per network interface
+Number of receive errors per network interface. The errors counted towards
+this metric are: IP header errors, packets larger then the link MTU, packets
+delivered to the unknown address, packets sent to the unknown IP protocol,
+truncated packets, packets discarded due to not having a route for
+the destination.
+@ network.interface.in.drops Number of packets droped by a network interface
+Number of packets discared due to lack of space during the input processing.
+@ network.interface.in.delivers Number of packets delivered to ULPs
+Number of packets delivered for further processing by the upper-layer
+protocols.
+@ network.interface.in.bcasts Number of broadcast packets received by a network interface
+@ network.interface.in.packets Number of IP packets received by a network interface
+@ network.interface.in.mcasts Number of multicast packets received by a network interface
+@ network.interface.out.packets Number of packets sent by a network interface
+@ network.interface.out.bytes Number of bytes sent by a network interface
+@ network.interface.out.errors Number of send errors per network interface
+@ network.interface.out.bcasts Number of broadcast packets sent by a network interface
+@ network.interface.out.mcasts Number of multicast packets sent by a network interface
+@ network.interface.out.drops Number of packets discared by a network interface
+Number of packets discared due to lack of space during the output processing.
+@ network.udp.ipackets Number of UDP packets received
+@ network.udp.opackets Number of UDP packets sent
+@ network.udp.ierrors Number of receive errors in UDP processing
+@ network.udp.oerrors Number of send erros in UDP processing
+@ network.udp.noports Number of UDP packets received on unknown UDP port
+Number of UDP packets received for which here is no port can be found.
+This counter is reported on the per-interface basis and aggregated by the PMDA.
+
+@ network.udp.overflows Number of UDP packets droped due to queue overflow
+Number of UDP packets droped due to queue overflow.
+This counter is reported on the per-interface basis and aggregated by the PMDA.
+
+@zpool.capacity Total capacity of a zpool in bytes
+@zpool.used Total space used on a pool
+@zpool.checksum_errors Number of checksum errors per zpool
+@zpool.self_healed Number of bytes healed
+@zpool.in.bytes Counter of bytes read from a zpool
+@zpool.out.bytes Counter of bytes written to a zpool
+@zpool.in.ops Counter of reads per zpool
+@zpool.out.ops Counter of writes per zpool
+@zpool.in.errors Counter of read errors per zpool
+@zpool.out.errors Counter of write errors per zpool
+@zpool.state Current state of zpool
+@zpool.state_int vs_aux << 8 | vs_state
+
+@zfs.available Amount of space available to the dataset
+The amount of space available to the dataset (a filesystem,
+a snapshot or a volume) and all its children. This is usually
+the amount of space available in the zpool which houses the
+dataset.
+
+@zfs.used.total Amount of space used by the dataset and its children
+The amount of space consumed by the filesystem, snapshot or
+volume and all its children. This amount does not include
+any reservation made by the dataset itself but do include
+the reservation of the children.
+
+@zfs.used.byme Amount of space used by the dataset itself.
+This amount exclude any space used by the children of this dataset
+or any of its snapshots.
+
+@zfs.used.bysnapshots Amount of space used by the snapshots of the dataset
+The amount of space consumed by the snapshots of this dataset.
+
+@zfs.used.bychildren Amount of space used by decendents of the dataset
+The amount of space consumed by all the decendants of this dataset.
+
+@zfs.quota Maximum amount of space a dataset can use
+Quotas are used to restrict the growth of the datasets. If
+the quota is set to 0 then the size of the dataset is limited only
+by the size of the pool which houses this dataset.
+
+@zfs.reservation Minimum amount of space guaranteed to a dataset
+The amount of space which dataset and its decendents are guaranteed
+to be available for them to use. This amount of taken off the quota
+of the parent of the dataset.
+
+@zfs.compression Compression ratio of the dataset
+Compression ratio is expressed as multiplier. To estimate how much data
+will be used by the uncompressed data multiply the amount of space used
+by the dataset by the compression ratio.
+
+@zfs.copies Number of redundant copies of data
+The number of redundant copies does not include any copies made as
+part of the pool redundancy.
+
+@zfs.recordsize Recommendend block size for files in filesystems
+By using recommended block size applications which deal with fixed size
+records can improve I/O performance.
+
+@zfs.used.byrefreservation Space used by refreservation
+The amount of space used by a refreservation set on this
+filesystem, which would be freed if the refreservation was
+removed.
+
+@zfs.refreservation Minimum amount of space guaranteed to a filesystem
+The minimum amount of space guaranteed to a dataset, not
+including its descendents. Unlike reservation refreservation is
+counted towards the total used space of a dataset.
+
+@zfs.refquota Amount of space a filesystem can consume
+The hard limit on the amount of space a filesystem but not its descendants
+can consume from the pool.
+
+@zfs.referenced Amount of space referenced by the filesystem
+The amount of data that is accessible by the filesystem. The data
+may be shared with other datasets in the pool.
+
+@zfs.nsnapshots Number of snapshots in the filesystem
+
+@zfs.snapshot.compression Compression ratio of the data in the snapshot
+Compression ratio is expressed as multiplier. To estimate how much data
+will be used by the uncompressed data multiply the amount of space used
+by the snapshot by the compression ratio.
+
+@zfs.snapshot.used Amount of space used by the snapshot
+
+@zfs.snapshot.referenced Amount of space referenced by the snapshot
+The amount of data that is accessible by the snapshot. The data
+may be shared with other datasets in the filesystem.
+
+
+@zpool.perdisk.state Current state per disk in zpool
+@zpool.perdisk.state_int vs_aux << 8 | vs_state
+@zpool.perdisk.checksum_errors Number of checksum errors per disk in zpool
+@zpool.perdisk.self_healed Number of bytes healed per disk in zpool
+@zpool.perdisk.in.errors Counter of read errors per disk in zpool
+@zpool.perdisk.out.errors Counter of write errors per disk in zpool
+
+@network.link.in.errors Number of input errors per link
+Counts input errors per link
+@network.link.in.packets Numbe of datagrams received by a link
+@network.link.in.bytes Number of bytes received by a link
+Counts number of bytes received by a link. For the physical links
+this is the raw counter of bytes received, for the aggregated links
+this is the number of bytes received by all links in the aggregation
+group
+@network.link.in.bcasts Number of broadcast datagrams received by a link
+@network.link.in.mcasts Number of multicast datagrams
+Counts multicast datagram recieved by a link.
+@network.link.in.nobufs Number of inpit packets discared
+Counts number of packets discared because of failure to allocate buffers
+@network.link.out.errors Number of output errors per link
+@network.link.out.packets Number of packets sent from a link
+@network.link.out.bytes Number of bytes sent from a link
+@network.link.out.bcasts Number of broadcast datagrams sent from a link
+@network.link.out.mcasts Number of multicast datagrams sent from a link
+@network.link.out.nobufs Number of output packets discared
+Counts number of packets discared because of failure to allocate buffers
+@network.link.collisions Number of collisions detected per link
+@network.link.state Link state
+1 - Link is up, 2 - Link is down, 0 - unknown state
+@network.link.duplex Link duplex
+1 - Half duplex, 2 - Full duplex
+@network.link.speed Link speed in bytes per second
+@hinv.pagesize Memory page size
+The memory page size of the running kernel in bytes.
+@hinv.physmem Total physical system memory
+Total physical system memory size rounded down to the nearest page size
+boundary
+@pmda.uname identity and type of current system
+Identity and type of current system. The concatenation of the values
+returned from utsname(2), also similar to uname -a.
+@kernel.fsflush.scanned Number of pages scanned by fsflush daemon
+@kernel.fsflush.examined Number of pages examined by fsflush daemon
+@kernel.fsflush.coalesced Number of pages coalesced into larger page
+@kernel.fsflush.modified Number of modified pages written to disk
+@kernel.fsflush.locked Number of pages locked by fsflush daemon
+Pages which were considered to be on interest for further examination
+are locked before deciding if they could be coalesced, released or flushed
+to disk.
+@kernel.fsflush.released Number of free pages released by fsflush daemon
+@kernel.fsflush.time Amount of time fsflush daemon spent doing its work
+@mem.physmem Total physical system memory
+Total physical system memory size rounded down to the nearest page size
+boundary. This metric is the same as hinv.physmem but uses different
+units.
+@mem.freemem Amount of free memory in the system
+@mem.lotsfree Paging theshold
+If freemem fails below the lostfree threshold then paging out daemon
+starts its activity. Default value for lotsfree is 1/64 of physical memory
+or 512K (which ever is larger).
+@mem.availrmem Amount of resident memory in the system
+
+@kernel.all.io.bread Physical block reads across all CPUs
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.all.io.bwrite Physical block writes across all CPUs
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.all.io.lread Logical block reads across all CPUs
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.all.io.lwrite Logical block writes across all CPUs
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.all.io.phread Raw I/O reads across all CPUs
+@kernel.all.io.phwrite Raw I/O writes across all CPUs
+@kernel.all.io.intr Device interrupts across all CPUs
+
+@kernel.percpu.io.bread Physical block reads
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.percpu.io.bwrite Physical block writes
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.percpu.io.lread Logical block reads
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.percpu.io.lwrite Logical block writes
+This metric is only updated if reading or writing to UFS mounted filesystems,
+reads and writes to ZFS do not update this metric.
+@kernel.percpu.io.phread Raw I/O reads
+@kernel.percpu.io.phwrite Raw I/O writes
+@kernel.percpu.io.intr Device interrupts
+
+@hinv.ncpu Number of CPUs in the system
+@hinv.ndisk Number of disks in the system
+
+@kernel.all.trap Traps across all CPUs
+@kernel.all.pswitch Context switches across all CPUs
+@kernel.all.syscall Total number of system calls across all CPUs
+@kernel.all.sysexec Total number of calls from exec(2) family across all CPUs
+@kernel.all.sysfork Total number of new processes created across all CPUs
+@kernel.all.sysvfork Total number of new processes created across all CPUs
+Unlike fork vfork does not copy all the virtual memory of the parent
+process into the child process and is mostly used to create new system context
+for execve(2). vfork(2) calls are not counted towards kernel.all.sysfork.
+@kernel.all.sysread Total number of system calls from read(2) family across all CPUs
+@kernel.all.syswrite Total number of system calls from write (2) family across all CPUs
+
+@kernel.percpu.trap Traps on each CPUs
+@kernel.percpu.pswitch Context switches on each CPUs
+@kernel.percpu.syscall Total number of system calls on each CPU
+@kernel.percpu.sysexec Total number of calls from exec(2) family on each CPU
+@kernel.percpu.sysfork Total number of new processes created on each CPU
+@kernel.percpu.sysvfork Total number of new processes created on each CPU
+Unlike fork vfork does not copy all the virtual memory of the parent
+process into the child process and is mostly used to create new system context
+for execve(2). vfork(2) calls are not counted towards kernel.percpu.sysfork.
+@kernel.percpu.sysread Total number of system calls from read(2) family on each CPU
+@kernel.percpu.syswrite Total number of system calls from write (2) family on each CPU
+
+@kernel.all.load Classic load average for 1, 5 and 15 minute intervals
+
+@kernel.all.cpu.wait.io Time spent waiting for I/O across all CPUs
+This metric is not updated by OpenSolaris kernel.
+@kernel.all.cpu.wait.pio Time spent wait for polled I/O across all CPUs
+This metric is not updated by OpenSolaris kernel.
+@kernel.all.cpu.wait.swap Time spent wait for swap across all CPUs
+This metric is not updated by OpenSolaris kernel.
+@kernel.percpu.cpu.wait.io Time spent waiting for I/O on per-CPU basis
+This metric is not updated by OpenSolaris kernel.
+@kernel.percpu.cpu.wait.pio Time spent waiting for polled I/O on per-CPU basis
+This metric is not updated by OpenSolaris kernel.
+@kernel.percpu.cpu.wait.swap Time spent waiting swap on per-CPU basis
+This metric is not updated by OpenSolaris kernel.
+
+@zfs.arc.size Total amount of memory used by ZFS ARC
+@zfs.arc.min_size Lower limit of them amount of memory for ZFS ARC
+@zfs.arc.max_size Upper limit of the amount of memory for ZFS ARC
+The default is to use 7/8 of total physical memory.
+@zfs.arc.mru_size Amount of memory used by the most recently used pages
+@zfs.arc.target_size "Ideal" size of the cached based on aging
+@zfs.arc.hits.total Number of times data is found in the cache
+@zfs.arc.hits.mfu Number of times data is found in the most frequently used buffers
+@zfs.arc.hits.mru Number of times data is found in the most recently used buffers
+@zfs.arc.hits.mfu_ghost Number of times MFU ghost buffer is accessed
+A ghost buffer is a buffer which is no longer cached but is still
+linked into the hash.
+@zfs.arc.hits.mru_ghost Number of times MRU ghost buffer is accessed
+A ghost buffer is a buffer which is no longer cached but is still
+linked into the hash.
+@zfs.arc.hits.demand_data Number of times file data is found in the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.hits.demand_metadata Number of times filesystem metadata is found in the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.hits.prefetch_data Number of times speculative request for data is satisfied from the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.hits.prefetch_metadata Number of times speculative request for metadata is satisfied from the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.misses.total Number of times the data is not found in the cache
+@zfs.arc.misses.demand_data Number of times file data is not found in the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.misses.demand_metadata Number of times filesystem metadata is not found in the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.misses.prefetch_data Number of times speculatively accessed file data is not found in the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@zfs.arc.misses.prefetch_metadata Number of times speculatively accessed filesystem metadata is not found in the cache
+ARC statistics provide separate counters for demand vs prefetch
+and data vs metadata accesses: demand is a result of the direct
+request for a particular data, prefetch is a result of speculative
+request for a particular data.
+@pmda.prefetch.time Amount of time spent extracting information about a group of metrics
+Each metric belongs to a prefetch group. When a client asks for the metric
+to be fetched the information for the group must be extracted from the kernel.
+@pmda.prefetch.count Number of times each group of metrics was updated
+
+@pmda.metric.time Amount of time spent extracting information about individual metrics
+Requesting multiple instances of the same metrics counts against the metric
+itself and not against the individual instances
+@pmda.metric.count Number of times individual metrics have been fetched
+Requesting multiple instances of the same metrics counts as multiple hits
+against the metric itself
+
+@disk.all.wait.time Amount of time IO requests spent waiting for service
+Amount of time IO transactions spent waiting to be serviced, i.e. the
+transaction has been accepted for processing but for which the processing
+has not yet begun. Each transaction waiting for processing adds to
+to the total time which means that if multiple transactions are waiting then
+total time for the sampling interval may be larger then the interval.
+
+@disk.dev.wait.time Amount of time IO requests spent waiting for service
+Amount of time IO transactions spent waiting to be serviced, i.e. the
+transaction has been accepted for processing but for which the processing
+has not yet begun. Each transaction waiting for processing adds to
+to the total time which means that if multiple transactions are waiting then
+total time for the sampling interval may be larger then the interval.
+
+@disk.all.wait.count Number of transactions waiting to be serviced
+Number of transactions accepted for processing but for which the processing
+has not yet begun.
+@disk.dev.wait.count Number of transactions waiting to be serviced
+Number of transactions accepted for processing but for which the processing
+has not yet begun.
+
+@disk.all.run.time Amount of time spent processing IO requests
+@disk.dev.run.time Amount of time spent processing IO requests
+@disk.all.run.count Number of transactions being processed
+@disk.dev.run.count Number of transactions being processed
+
+
+# from i86pc/os/cpuid.c
+# /*
+# * 8bit APIC IDs on dual core Pentiums
+# * look like this:
+# *
+# * +-----------------------+------+------+
+# * | Physical Package ID | MC | HT |
+# * +-----------------------+------+------+
+# * <------- chipid -------->
+# * <------- coreid --------------->
+# * <--- clogid -->
+# * <------>
+# * pkgcoreid
+# *
+# * Where the number of bits necessary to
+# * represent MC and HT fields together equals
+# * to the minimum number of bits necessary to
+# * store the value of cpi->cpi_ncpu_per_chip.
+# * Of those bits, the MC part uses the number
+# * of bits necessary to store the value of
+# * cpi->cpi_ncore_per_chip.
+# */
+#
+@hinv.cpu.brand Marketing name of CPU
+@hinv.cpu.clock Current CPU clock frequency
+On CPUs which support dynamic clock frequency changes current clock frequency
+could differ from the nominal ("maximum") clock frequency specified by
+the manufacturer.
+@hinv.cpu.maxclock Maximum clock frequency supported by CPU
+Nominal CPU clock frequency as specified by the manufacturer.
+@hinv.cpu.frequencies List of clock frequencies supported by CPU
+@hinv.cpu.implementation Details of CPU implementation
+@hinv.cpu.chip_id Chip or Socket identifier of the CPU
+Logical CPUs can share single chip identifier
+@hinv.cpu.clog_id Logical core identifier
+Logical cores identifier combines identifiers of the CPU core and
+virtual CPU identifier (aka hyperthread identifier).
+@hinv.cpu.core_id CPU core identifier
+CPU core identifire combines chip identifier and per-chip core
+identifier. If cores support more the one virtual CPU per core
+then same core identifier is shared across several virtual
+CPUs.
+@hinv.cpu.pkg_core_id Per-chip core identifier
+This identifier is used to identify individual cores within the
+package. If a core support more the one virtual CPU per core
+then same core identifier is shared across several virtual
+CPUs.
+@hinv.cpu.cstate Current CPU idle state
+@hinv.cpu.maxcstates Maximum number of idle state supported by the CPU
+Information about cstate is available in kstat(1m).
+@hinv.cpu.ncores Number of CPU cores per physical chip
+@hinv.cpu.ncpus Number of virtual CPUs per physical chip
+
+@disk.dev.errors.soft Number of soft errors per device
+@disk.dev.errors.hard Number of hard errors per device
+@disk.dev.errors.transport Number of transport errors per device
+@disk.dev.errors.media Number of media errors per device
+@disk.dev.errors.recoverable Number of recoverable errors per device
+@disk.dev.errors.notready Number of times device reported as not ready
+@disk.dev.errors.nodevice Number of times device was found missing
+@disk.dev.errors.badrequest Number of illegal requests per device
+@disk.dev.errors.pfa Number of times failure prediction threshold has been exceeded
+@hinv.disk.vendor Device Vendor
+Can be reported as ATA if SATA device is behind SAS expander
+@hinv.disk.product Device name
+Vendor's device name (up-to 16 characters long)
+@hinv.disk.revision Device Revision
+@hinv.disk.serial Device Serial Number
+@hinv.disk.capacity Device Capacity
+For removable devices capacity of the media is reported.
+
+@kernel.fs.vnops.access Number of VOP_ACCESS calls per filesystem
+VOP_ACCESS is used by access(2) system call.
+@kernel.fs.vnops.addmap Number of VOP_ADDMAP calls per filesystem
+VOP_ADDMAP is used to manage reference counting of the vnode used by
+mmap(2) operations.
+@kernel.fs.vnops.close Number of VOP_CLOSE calls per filesystem
+VOP_CLOSE is called every time a close(2) system call is called
+@kernel.fs.vnops.cmp Number of VOP_CMP calls per filesystem
+VOP_CMP is used to check if two vnodes are "equal" to each other, i.e.
+both refer to the same filesystem object.
+@kernel.fs.vnops.create Number of VOP_CREATE calls per filesystem
+VOP_CREATE is used to create regular files and device or FIFO nodes.
+@kernel.fs.vnops.delmap Number of VOP_DELMAP calls
+VOP_DELMAP is used to destroy a previously created memory-mapped region
+of a file.
+@kernel.fs.vnops.dispose Number ot VOP_DISPOSE calls per filesystem
+VOP_DISPOSE is used to dispose(free or invalidate) of a page associated
+with a file.
+@kernel.fs.vnops.dump Number of VOP_DUMP calls per filesystem
+VOP_DUMP is used to transfer data from the frozen kernel directly
+to the dump device
+@kernel.fs.vnops.dumpctl Number of VOP_DUMPCTL calls per filesystem
+VOP_DUMPCTL sets up context used by VOP_DUMP call. It is used to
+allocate, free or search for data blocks on the dump device.
+@kernel.fs.vnops.fid Number of VOP_FID calls per filesystem
+VOP_FID is used to get file identifier which can be used instead of the
+file name in some operations. NFS server is one known user of this vnode
+operation.
+@kernel.fs.vnops.frlock Number of VOP_FRLOCK calls per filesystem
+VOP_FRLOCK is used to implement file record locking used by flock(2)
+@kernel.fs.vnops.fsync Number of VOP_FSYNC calls per filesystem
+VOP_FSYNC is used to implement fsync(2) system call which flushes
+data for a specific file to disk.
+@kernel.fs.vnops.getattr Number of VOP_GETATTR calls per filesystem
+VOP_GETATTR is used to extract vnode attributes. It use used as part of
+many system calls which manipulate file attributes, e.g. chmod(2), stat(2),
+utimes(2) etc.
+@kernel.fs.vnops.getpage Number of VOP_GETPAGE calls per filesystem
+VOP_GETPAGE is used to allocate pages (could be several at a time) to cover
+a region in a file.
+@kernel.fs.vnops.getsecattr Number of VOP_GETSECATTR calls per filesystem
+VOP_GETSECATTR used to extract ACL entires associated with a file.
+@kernel.fs.vnops.inactive Number of VOP_INACTIVE calls per filesystem
+VOP_INACTIVE is used to destroy vnode before it is removed from the
+cache or reused.
+@kernel.fs.vnops.ioctl Number of VOP_IOCTL calls per filesystem
+VOP_IOCTL is used to implement ioctl(2) system call.
+@kernel.fs.vnops.link Number of VOP_LINK calls per filesystem
+VOP_LINK is used to implement support for hard links
+@kernel.fs.vnops.lookup Number of VOP_LOOKUP calls per filesystem
+VOP_LOOKUP is used to translate filename to vnode.
+@kernel.fs.vnops.map Number of VOP_MAP calls per filesystem
+VOP_MAP is used to create a new memory-mapped region of a file
+@kernel.fs.vnops.mkdir Number of VOP_MKDIR calls per filesystem
+VOP_MKDIR is used to create directories
+@kernel.fs.vnops.open Number of VOP_OPEN calls per filesystem
+VOP_OPEN is called every time open(2) system call is called.
+@kernel.fs.vnops.pageio Number of VOP_PAGEIO calls per filesystem
+VOP_PAGEIO is similar to VOP_GETPAGE and VOP_PUTPAGE and can be used when
+either of the other two are less efficient, e.g. in the case when pages
+will be reused after the IO is done.
+@kernel.fs.vnops.pathconf Number of VOP_PATHCONF calls per filesystem
+VOP_PATHCONF is used to obtain information about filesystem's parameters
+reported by pathconf(2) system call
+@kernel.fs.vnops.poll Number of VOP_POLL calls per filesystem
+VOP_POLL is used to implement pool(2) system call
+@kernel.fs.vnops.putpage Number of VOP_PUTPAGE calls per filesystem
+VOP_PUTPAGE is used to release pages which have been used to hold
+data from a file
+@kernel.fs.vnops.read Number of VOP_READ calls per filesystem
+VOP_READ is used to implement read(2) system call
+@kernel.fs.vnops.readdir Number of VOP_READDIR calls per filesystem
+VOP_READDIR is used to read directory entries
+@kernel.fs.vnops.readlink Number of VOP_READLINK calls per filesystem
+VOP_READLINK is used to read the information about the target of the symbolic
+link
+@kernel.fs.vnops.realvp Number of VOP_REALVP calls per filesystem
+VOP_REALVP is used to traverse stacking filesystems and extract information
+about the vnode which refers to the "real" filesystem object.
+@kernel.fs.vnops.remove Number of VOP_REMOVE calls per filesystem
+VOP_REMOVE is used to remove entires from a directory.
+@kernel.fs.vnops.rename Number of VOP_RENAME calls per filesystem
+VOP_RENAME is used to implement rename(2) system call
+@kernel.fs.vnops.rmdir Number of VOP_RMDIR calls per filesystem
+VOP_RMDIR is used to implement rmdir(2) system call
+@kernel.fs.vnops.rwlock Number of VOP_RWLOCK calls per filesystem
+VOP_RWLOCK and VOP_RWUNLOCK are used to protect access to vnode data.
+@kernel.fs.vnops.rwunlock Number of VOP_RWUNLOCK calls per filesystem
+VOP_RWLOCK and VOP_RWUNLOCK are used to protect access to vnode data.
+@kernel.fs.vnops.seek Number of VOP_SEEK calls per filesystem
+VOP_SEEK is used by lseek(2). Because vnodes can be shared across multiple
+instances of vfile VOP_SEEK does not usually change the position of the
+file pointer, it instead used to verify the offset before it is changed.
+@kernel.fs.vnops.setattr Number of VOP_SETATTR calls per filesystem
+VOP_SETATTR is used to change vnode attributes which are modified by system
+calls like chmod(2), chown(2), utimes(2) etc.
+@kernel.fs.vnops.setfl Number of VOP_SETFL calls per filesystem
+VOP_SETFL is used to implement fcntl(2) F_SETFL option.
+Currently only sockfs pseudo filesystem is implementing this vnode operation.
+@kernel.fs.vnops.setsecattr Number of VOP_SETSECATTR calls per filesystem
+VOP_SETSECATTR is used to change ACL entries
+@kernel.fs.vnops.shrlock Number of VOP_SHRLOCK calls per filesystem
+VOP_SHRLOCK is usually used to implement CIFS and NLMv3 shared reservations.
+@kernel.fs.vnops.space Number of VOP_SPACE calls per filesystem
+VOP_SPACE is used to provide optimized support for growing and shrinking the files.
+F_FREESP option of fcntl(2) is using this vnode operation to implment ftruncate(3c)
+function.
+@kernel.fs.vnops.symlink Number of VOP_SYMLINK calls per filesystem
+VOP_SYMLINK is used to create symbolic links.
+@kernel.fs.vnops.vnevent Number of VOP_VNEVENT calls per filesystem
+VIP_VNEVENT is used to check if a filesystem support vnode event
+notifications for operations which change the names of the files.
+@kernel.fs.vnops.write Number of VOP_WRITE calls per filesystem
+VOP_WRITE is used to implement write(2) system call
+@kernel.fs.read_bytes Number of bytes read from a specific filesystem
+@kernel.fs.readdir_bytes Number of bytes containting directory entires read from a specific filesystem
+@kernel.fs.write_bytes Number of bytes written to a specific filesystem
+
+@kernel.fstype.vnops.access Number of VOP_ACCESS calls for all filesystems of a given type
+VOP_ACCESS is used by access(2) system call.
+@kernel.fstype.vnops.addmap Number of VOP_ADDMAP calls for all filesystems of a given type
+VOP_ADDMAP is used to manage reference counting of the vnode used by
+mmap(2) operations.
+@kernel.fstype.vnops.close Number of VOP_CLOSE calls for the specific filesystem
+VOP_CLOSE is called every time a close(2) system call is called
+@kernel.fstype.vnops.cmp Number of VOP_CMP calls for the specific filesystem
+VOP_CMP is used to check if two vnodes are "equal" to each other, i.e.
+both refer to the same filesystem object.
+@kernel.fstype.vnops.create Number of VOP_CREATE calls for all filesystems of a given type
+VOP_CREATE is used to create regular files and device or FIFO nodes.
+@kernel.fstype.vnops.delmap Number of VOP_DELMAP was called
+VOP_DELMAP is used to destroy a previously created memory-mapped region
+of a file.
+@kernel.fstype.vnops.dispose Number ot VOP_DISPOSE calls for all filesystems of a given type
+VOP_DISPOSE is used to dispose(free or invalidate) of a page associated
+with a file.
+@kernel.fstype.vnops.dump Number of VOP_DUMP calls for all filesystems of a given type
+VOP_DUMP is used to transfer data from the frozen kernel directly
+to the dump device
+@kernel.fstype.vnops.dumpctl Number of VOP_DUMPCTL calls for all filesystems of a given type
+VOP_DUMPCTL sets up context used by VOP_DUMP call. It is used to
+allocate, free or search for data blocks on the dump device.
+@kernel.fstype.vnops.fid Number of VOP_FID calls for all filesystems of a given type
+VOP_FID is used to get file identifier which can be used instead of the
+file name in some operations. NFS server is one known user of this vnode
+operation.
+@kernel.fstype.vnops.frlock Number of time VOP_FRLOCK calls for all filesystems of a given type
+VOP_FRLOCK is used to implement file record locking used by flock(2)
+@kernel.fstype.vnops.fsync Number of VOP_FSYNC calls for all filesystems of a given type
+VOP_FSYNC is used to implement fsync(2) system call which flushes
+data for a specific file to disk.
+@kernel.fstype.vnops.getattr Number of VOP_GETATTR calls for all filesystems of a given type
+VOP_GETATTR is used to extract vnode attributes. It use used as part of
+many system calls which manipulate file attributes, e.g. chmod(2), stat(2),
+utimes(2) etc.
+@kernel.fstype.vnops.getpage Number of VOP_GETPAGE calls for all filesystems of a given type
+VOP_GETPAGE is used to allocate pages (could be several at a time) to cover
+a region in a file.
+@kernel.fstype.vnops.getsecattr Number of VOP_GETSECATTR calls for all filesystems of a given type
+VOP_GETSECATTR used to extract ACL entires associated with a file.
+@kernel.fstype.vnops.inactive Number of VOP_INACTIVE calls for all filesystems of a given type
+VOP_INACTIVE is used to destroy vnode before it is removed from the
+cache or reused.
+@kernel.fstype.vnops.ioctl Number of VOP_IOCTL calls for all filesystems of a given type
+VOP_IOCTL is used to implement ioctl(2) system call.
+@kernel.fstype.vnops.link Number of VOP_LINK calls for all filesystems of a given type
+VOP_LINK is used to implement support for hard links
+@kernel.fstype.vnops.lookup Number of VOP_LOOKUP calls for all filesystems of a given type
+VOP_LOOKUP is used to translate filename to vnode.
+@kernel.fstype.vnops.map Number of VOP_MAP calls for all filesystems of a given type
+VOP_MAP is used to create a new memory-mapped region of a file
+@kernel.fstype.vnops.mkdir Number of VOP_MKDIR calls for all filesystems of a given type
+VOP_MKDIR is used to create directories
+@kernel.fstype.vnops.open Number of VOP_OPEN calls for all filesystems of a given type
+VOP_OPEN is called every time open(2) system call is called.
+@kernel.fstype.vnops.pageio Number of VOP_PAGEIO calls for all filesystems of a given type
+VOP_PAGEIO is similar to VOP_GETPAGE and VOP_PUTPAGE and can be used when
+either of the other two are less efficient, e.g. in the case when pages
+will be reused after the IO is done.
+@kernel.fstype.vnops.pathconf Number of VOP_PATHCONF calls for all filesystems of a given type
+VOP_PATHCONF is used to obtain information about filesystem's parameters
+reported by pathconf(2) system call
+@kernel.fstype.vnops.poll Number of VOP_POLL calls for all filesystems of a given type
+VOP_POLL is used to implement pool(2) system call
+@kernel.fstype.vnops.putpage Number of VOP_PUTPAGE calls for all filesystems of a given type
+VOP_PUTPAGE is used to release pages which have been used to hold
+data from a file
+@kernel.fstype.vnops.read Number of VOP_READ calls for all filesystems of a given type
+VOP_READ is used to implement read(2) system call
+@kernel.fstype.vnops.readdir Number of VOP_READDIR calls for all filesystems of a given type
+VOP_READDIR is used to read directory entries
+@kernel.fstype.vnops.readlink Number of VOP_READLINK calls for all filesystems of a given type
+VOP_READLINK is used to read the information about the target of the symbolic
+link
+@kernel.fstype.vnops.realvp Number of VOP_REALVP calls for all filesystems of a given type
+VOP_REALVP is used to traverse stacking filesystems and extract information
+about the vnode which refers to the "real" filesystem object.
+@kernel.fstype.vnops.remove Number of VOP_REMOVE calls for all filesystems of a given type
+VOP_REMOVE is used to remove entires from a directory.
+@kernel.fstype.vnops.rename Number of VOP_RENAME calls for all filesystems of a given type
+VOP_RENAME is used to implement rename(2) system call
+@kernel.fstype.vnops.rmdir Number of VOP_RMDIR calls for all filesystems of a given type
+VOP_RMDIR is used to implement rmdir(2) system call
+@kernel.fstype.vnops.rwlock Number of VOP_RWLOCK calls for all filesystems of a given type
+VOP_RWLOCK and VOP_RWUNLOCK are used to protect access to vnode data.
+@kernel.fstype.vnops.rwunlock Number of VOP_RWUNLOCK calls for all filesystems of a given type
+VOP_RWLOCK and VOP_RWUNLOCK are used to protect access to vnode data.
+@kernel.fstype.vnops.seek Number of VOP_SEEK calls for all filesystems of a given type
+VOP_SEEK is used by lseek(2). Because vnodes can be shared across multiple
+instances of vfile VOP_SEEK does not usually change the position of the
+file pointer, it instead used to verify the offset before it is changed.
+@kernel.fstype.vnops.setattr Number of VOP_SETATTR calls for all filesystems of a given type
+VOP_SETATTR is used to change vnode attributes which are modified by system
+calls like chmod(2), chown(2), utimes(2) etc.
+@kernel.fstype.vnops.setfl Number of VOP_SETFL calls for all filesystems of a given type
+VOP_SETFL is used to implement fcntl(2) F_SETFL option.
+Currently only sockfs pseudo filesystem is implementing this vnode operation.
+@kernel.fstype.vnops.setsecattr Number of VOP_SETSECATTR calls for all filesystems of a given type
+VOP_SETSECATTR is used to change ACL entries
+@kernel.fstype.vnops.shrlock Number of VOP_SHRLOCK calls for all filesystems of a given type
+VOP_SHRLOCK is usually used to implement CIFS and NLMv3 shared reservations.
+@kernel.fstype.vnops.space Number of VOP_SPACE calls for all filesystems of a given type
+VOP_SPACE is used to provide optimized support for growing and shrinking the files.
+F_FREESP option of fcntl(2) is using this vnode operation to implment ftruncate(3c)
+function.
+@kernel.fstype.vnops.symlink Number of VOP_SYMLINK calls for all filesystems of a given type
+VOP_SYMLINK is used to create symbolic links.
+@kernel.fstype.vnops.vnevent Number of VOP_VNEVENT calls for all filesystems of a given type
+VIP_VNEVENT is used to check if a filesystem support vnode event
+notifications for operations which change the names of the files.
+@kernel.fstype.vnops.write Number of VOP_WRITE calls for all filesystems of a given type
+VOP_WRITE is used to implement write(2) system call
+@kernel.fstype.read_bytes Bytes read from all filesystems of a given type
+@kernel.fstype.readdir_bytes Bytes read for directory entries from all filesystems of a given type
+@kernel.fstype.write_bytes Bytes written to all filesystems of a given type
+
+@hinv.disk.devlink Disk name in the descriptive format
+Solaris uses symbolic links under /dev to provide access to device nodes via
+"descriptive" names like /dev/dsk/cXtYdZsN. This metrics provides a
+translation from a "descriptive" name to instances in the disk instance
+domain.
+
+The name is always the name of the first minor device for a particular disk
+and includes the slice information.
+
+NOTE! Fetching this metric is expensive - several system calls are made
+ to fetch each instance.
diff --git a/src/pmdas/solaris/kvm.c b/src/pmdas/solaris/kvm.c
new file mode 100644
index 0000000..5aa674a
--- /dev/null
+++ b/src/pmdas/solaris/kvm.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2009 Max Matveev. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <kvm.h>
+#include <nlist.h>
+
+#include "common.h"
+
+
+static kvm_t *kvm;
+struct nlist kvm_names[] = {
+ {.n_name = "fsf_total"},
+ {.n_name = NULL}
+};
+
+fsf_stat_t s = {0};
+static int fresh;
+
+void
+kvm_init(int ignore)
+{
+ kvm = kvm_open(NULL, NULL, NULL, O_RDONLY, "pmdasolaris");
+ if (kvm && kvm_nlist(kvm, kvm_names))
+ fprintf(stderr, "Cannot extract addresses\n");
+}
+
+void
+kvm_refresh(void)
+{
+ fresh = kvm &&
+ (kvm_kread(kvm, kvm_names[0].n_value, &s, sizeof(s)) == sizeof(s));
+}
+
+int
+kvm_fetch(pmdaMetric *pm, int inst, pmAtomValue *v)
+{
+ metricdesc_t *md = pm->m_user;
+ char *p = (char *)&s;
+
+ if (!fresh)
+ return 0;
+
+ memcpy(&v->ull, p + md->md_offset, sizeof(v->ull));
+ return 1;
+}
diff --git a/src/pmdas/solaris/netlink.c b/src/pmdas/solaris/netlink.c
new file mode 100644
index 0000000..d3b61bf
--- /dev/null
+++ b/src/pmdas/solaris/netlink.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2010 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* Extract per-link network information via kstat.
+ *
+ * Link stats are in the sections called "link" and the stat name
+ * is the same as the link names */
+
+#include <kstat.h>
+#include "common.h"
+
+int
+netlink_fetch(pmdaMetric *pm, int inst, pmAtomValue *av)
+{
+ char *lname;
+ metricdesc_t *md = pm->m_user;
+ kstat_t *k;
+ char *stat = (char *)md->md_offset;
+
+ if (pmdaCacheLookup(indomtab[NETLINK_INDOM].it_indom, inst, &lname,
+ (void **)&k) != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ if (k) {
+ kstat_named_t *kn = kstat_data_lookup(k, stat);
+
+ if (kn == NULL) {
+ fprintf(stderr, "No kstat called %s for %s\n", stat, lname);
+ return 0;
+ }
+
+ switch (pm->m_desc.type) {
+ case PM_TYPE_32:
+ if (kn->data_type == KSTAT_DATA_INT32) {
+ av->l = kn->value.i32;
+ return 1;
+ }
+ break;
+ case PM_TYPE_U32:
+ if (kn->data_type == KSTAT_DATA_UINT32) {
+ av->ul = kn->value.ui32;
+ return 1;
+ }
+ break;
+ case PM_TYPE_64:
+ if (kn->data_type == KSTAT_DATA_INT64) {
+ av->ll = kn->value.i64;
+ return 1;
+ }
+ break;
+ case PM_TYPE_U64:
+ if (kn->data_type == KSTAT_DATA_UINT64) {
+ av->ull = kn->value.ui64;
+ return 1;
+ }
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void
+netlink_update_stats(int fetch)
+{
+ kstat_t *k;
+ kstat_ctl_t *kc;
+ pmInDom indom = indomtab[NETLINK_INDOM].it_indom;
+
+ if ((kc = kstat_ctl_update()) == NULL)
+ return;
+
+ for (k = kc->kc_chain; k != NULL; k = k->ks_next) {
+ if (strcmp(k->ks_module, "link") == 0) {
+ int rv;
+ kstat_t *cached;
+
+ if (pmdaCacheLookupName(indom, k->ks_name, &rv,
+ (void **)&cached) != PMDA_CACHE_ACTIVE) {
+ rv = pmdaCacheStore(indom, PMDA_CACHE_ADD, k->ks_name, k);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot create instance for "
+ "network link '%s': %s\n",
+ k->ks_name, pmErrStr(rv));
+ continue;
+ }
+ }
+
+ if (fetch)
+ kstat_read(kc, k, NULL);
+ }
+ }
+}
+
+void
+netlink_refresh(void)
+{
+ pmdaCacheOp(indomtab[NETLINK_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ netlink_update_stats(1);
+ pmdaCacheOp(indomtab[NETLINK_INDOM].it_indom, PMDA_CACHE_SAVE);
+}
+
+void
+netlink_init(int first)
+{
+ pmdaCacheOp(indomtab[NETLINK_INDOM].it_indom, PMDA_CACHE_LOAD);
+ netlink_update_stats(0);
+ pmdaCacheOp(indomtab[NETLINK_INDOM].it_indom, PMDA_CACHE_SAVE);
+}
diff --git a/src/pmdas/solaris/netmib2.c b/src/pmdas/solaris/netmib2.c
new file mode 100644
index 0000000..aab417a
--- /dev/null
+++ b/src/pmdas/solaris/netmib2.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2009 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+/* Extract network-related information from the kernel using MIB2
+ * interfaces. MIB2 structures are described by RFC 4113, 4293,
+ * 4001. IPv6 specific MIB structures are described in RFC 2465, 2466.
+ */
+
+#include <fcntl.h>
+#include <stropts.h>
+#include <inet/mib2.h>
+#include <sys/tihdr.h>
+#include <sys/socket.h>
+#include <net/if.h>
+
+#include "common.h"
+#include "netmib2.h"
+
+static int afd = -1;
+static int data_valid;
+static int netif_added;
+
+nm2_udp_stats_t nm2_udp;
+
+static nm2_netif_stats_t *
+netif_cache_inst(const char *ifname)
+{
+ pmInDom indom = indomtab[NETIF_INDOM].it_indom;
+ nm2_netif_stats_t *ist;
+ int rv;
+
+ if (pmdaCacheLookupName(indom, ifname, &rv,
+ (void **)&ist) != PMDA_CACHE_ACTIVE) {
+ ist = malloc(sizeof(*ist));
+ if (ist == NULL) {
+ __pmNotifyErr(LOG_WARNING,
+ "Out of memory for stats on network interface '%s'\n",
+ ifname);
+ return NULL;
+ }
+
+ rv = pmdaCacheStore(indom, PMDA_CACHE_ADD, ifname, ist);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot create instance for '%s': %s\n",
+ ifname, pmErrStr(rv));
+ free(ist);
+ return NULL;
+ }
+ netif_added++;
+ }
+
+ return ist;
+}
+
+static void
+ipv4_stats (const void *data, int sz)
+{
+ const mib2_ipAddrEntry_t *ipa = data;
+
+ while (sz > 0) {
+ nm2_netif_stats_t *ist = netif_cache_inst(ipa->ipAdEntIfIndex.o_bytes);
+
+ if (ist) {
+ ist->mtu = ipa->ipAdEntInfo.ae_mtu;
+ /* We get byte count and other stuff from Traffic stats */
+ }
+ sz -= sizeof(*ipa);
+ ipa++;
+ }
+}
+
+static void
+ipv4_ifstats(const void *data, int sz)
+{
+ const mib2_ipIfStatsEntry_t *ips = data;
+
+ nm2_udp.noports = 0;
+ nm2_udp.overflows = 0;
+
+ while (sz > 0) {
+ /* index 0 is a pseudo-interface */
+ if (ips->ipIfStatsIfIndex) {
+ nm2_netif_stats_t *ist;
+ char name[64];
+
+ if ((if_indextoname(ips->ipIfStatsIfIndex, name) != NULL) &&
+ ((ist = netif_cache_inst(name)) != NULL)) {
+
+ ist->ibytes = ips->ipIfStatsHCInOctets;
+ ist->obytes = ips->ipIfStatsHCOutOctets;
+ ist->ipackets = ips->ipIfStatsHCInReceives;
+ ist->opackets = ips->ipIfStatsHCOutTransmits;
+ ist->imcast = ips->ipIfStatsHCInMcastPkts;
+ ist->omcast = ips->ipIfStatsHCOutMcastPkts;
+ ist->ibcast = ips->ipIfStatsHCInBcastPkts;
+ ist->obcast = ips->ipIfStatsHCOutBcastPkts;
+ ist->delivered = ips->ipIfStatsHCInDelivers;
+ ist->idrops = ips->ipIfStatsInDiscards;
+ ist->odrops = ips->ipIfStatsOutDiscards;
+ ist->ierrors =
+ + (uint64_t)ips->ipIfStatsInHdrErrors
+ + ips->ipIfStatsInTooBigErrors
+ + ips->ipIfStatsInNoRoutes
+ + ips->ipIfStatsInAddrErrors
+ + ips->ipIfStatsInUnknownProtos
+ + ips->ipIfStatsInTruncatedPkts;
+
+ ist->oerrors = ips->ipIfStatsOutFragFails;
+ }
+ }
+
+ nm2_udp.noports += ips->udpNoPorts;
+ nm2_udp.overflows += ips->udpInOverflows;
+
+ sz -= sizeof(*ips);
+ ips++;
+ }
+}
+
+void
+netmib2_refresh(void)
+{
+ struct strbuf ctrl;
+ struct opthdr *oh;
+ uint64_t buf[64]; /* Arbitrary size, just large enough to fit req + opthdr */
+ struct T_optmgmt_req *omreq = (struct T_optmgmt_req *)buf;
+ struct T_optmgmt_ack *omack = (struct T_optmgmt_ack *)buf;
+
+ omreq->PRIM_type = T_SVR4_OPTMGMT_REQ;
+ omreq->OPT_offset = sizeof (*omreq);
+ omreq->OPT_length = sizeof (*oh);
+ omreq->MGMT_flags = T_CURRENT;
+
+ oh = (struct opthdr *)(omreq + 1);
+ oh->level = /*EXPER_IP_AND_TESTHIDDEN*/MIB2_IP;
+ oh->name = 0;
+ oh->len = 0;
+
+ ctrl.buf = (char *)buf;
+ ctrl.len = omreq->OPT_length + omreq->OPT_offset;
+
+ data_valid = 0;
+
+ if (putmsg(afd, &ctrl, NULL, 0) == -1) {
+ __pmNotifyErr(LOG_ERR, "Failed to push message down stream: %s\n",
+ osstrerror());
+ return;
+ }
+
+ oh = (struct opthdr *)(omack + 1);
+ ctrl.maxlen = sizeof(buf);
+
+ netif_added = 0;
+
+ for (;;) {
+ int flags = 0;
+ struct strbuf data;
+ int rv;
+
+ rv = getmsg(afd, &ctrl, NULL, &flags);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_ERR, "netmib2: failed to get a response: %s\n",
+ osstrerror());
+ break;
+ }
+
+ if ((rv == 0) && (ctrl.len >= sizeof(*omack)) &&
+ (omack->PRIM_type == T_OPTMGMT_ACK) &&
+ (omack->MGMT_flags == T_SUCCESS) && (oh->len == 0)) {
+ data_valid = 1;
+ break;
+ }
+
+ if ((rv != MOREDATA) || (ctrl.len < sizeof(*omack)) ||
+ (omack->PRIM_type != T_OPTMGMT_ACK) ||
+ (omack->MGMT_flags != T_SUCCESS)) {
+ __pmNotifyErr(LOG_ERR, "netmib2: Unexpected message received\n");
+ break;
+ }
+
+ memset(&data, 0, sizeof(data));
+ data.buf = malloc(oh->len);
+ if (data.buf == NULL) {
+ __pmNotifyErr(LOG_ERR, "netmib2: Out of memory\n");
+ break;
+ }
+
+ data.maxlen = oh->len;
+ flags = 0;
+
+ rv = getmsg(afd, NULL, &data, &flags);
+ if (rv) {
+ __pmNotifyErr(LOG_ERR,
+ "net2mib: Failed to get additional data: %s\n",
+ osstrerror());
+ break;
+ }
+
+ switch (oh->level) {
+ case MIB2_IP:
+ switch(oh->name) {
+ case 0: /* Overall statistic */
+ break;
+
+ case MIB2_IP_ADDR:
+ ipv4_stats(data.buf, data.len);
+ break;
+
+ case MIB2_IP_TRAFFIC_STATS:
+ ipv4_ifstats(data.buf, data.len);
+ break;
+ }
+ break;
+
+ case MIB2_IP6:
+ break;
+
+ case MIB2_UDP:
+ if (oh->name == 0) {
+ mib2_udp_t *m2u = (mib2_udp_t *)data.buf;
+
+#ifdef EXPER_IP_AND_TESTHIDDEN
+ nm2_udp.ipackets = m2u->udpHCInDatagrams;
+ nm2_udp.opackets = m2u->udpHCOutDatagrams;
+#else
+ nm2_udp.ipackets = m2u->udpInDatagrams;
+ nm2_udp.opackets = m2u->udpOutDatagrams;
+#endif
+ nm2_udp.ierrors = m2u->udpInErrors;
+ nm2_udp.oerrors = m2u->udpOutErrors;
+ }
+ break;
+
+ case MIB2_TCP:
+ break;
+ }
+
+ free(data.buf);
+ }
+
+ if (netif_added) {
+ pmdaCacheOp(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}
+
+int
+netmib2_fetch(pmdaMetric *pm, int inst, pmAtomValue *av)
+{
+ char *fsname;
+ metricdesc_t *md = pm->m_user;
+ char *ist;
+
+ if (pm->m_desc.indom == PM_INDOM_NULL) {
+ switch (pm->m_desc.type) {
+ case PM_TYPE_U32:
+ av->ul = *(uint32_t *)md->md_offset;
+ return 1;
+
+ case PM_TYPE_U64:
+ av->ull = *(uint64_t *)md->md_offset;
+ return 1;
+ }
+
+ return PM_ERR_APPVERSION;
+ }
+
+ if (pmdaCacheLookup(indomtab[NETIF_INDOM].it_indom, inst, &fsname,
+ (void **)&ist) != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ if (ist) {
+ switch (pm->m_desc.type) {
+ case PM_TYPE_U32:
+ av->ul = *(uint32_t *)(ist + md->md_offset);
+ return 1;
+
+ case PM_TYPE_U64:
+ av->ull = *(uint64_t *)(ist + md->md_offset);
+ return 1;
+ }
+
+ return PM_ERR_APPVERSION;
+ }
+
+ /* Even if we've copied the values don't admit they're good unless
+ * the update was problem-free. */
+ return data_valid;
+}
+
+void
+netmib2_init(int first)
+{
+ char *mods[] = {"tcp", "udp", "icmp"};
+ int i;
+
+ if (afd >= 0)
+ return;
+
+ afd = open("/dev/arp", O_RDWR);
+ if (afd < 0) {
+ __pmNotifyErr(LOG_ERR, "Cannot open /dev/arp: %s\n", osstrerror());
+ return;
+ }
+
+ for (i = 0; i < 3; i++ ) {
+ if (ioctl(afd, I_PUSH, mods[i]) < 0) {
+ __pmNotifyErr(LOG_ERR, "Cannot push %s into /dev/arp: %s\n",
+ mods[i], osstrerror());
+ close(afd);
+ afd = -1;
+ return;
+ }
+ }
+
+ pmdaCacheOp(indomtab[NETIF_INDOM].it_indom, PMDA_CACHE_LOAD);
+ netmib2_refresh();
+}
diff --git a/src/pmdas/solaris/netmib2.h b/src/pmdas/solaris/netmib2.h
new file mode 100644
index 0000000..e506c28
--- /dev/null
+++ b/src/pmdas/solaris/netmib2.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2009 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef __PMDA_SOLARIS_NETMIB2_H
+#define __PMDA_SOLARIS_NETMIB2_H
+
+typedef struct nm2_udp_stats {
+ uint64_t ipackets;
+ uint64_t opackets;
+ int32_t ierrors;
+ int32_t oerrors;
+ uint32_t noports;
+ uint32_t overflows;
+} nm2_udp_stats_t;
+
+extern nm2_udp_stats_t nm2_udp;
+
+typedef struct nm2_netif_stats {
+ uint64_t ipackets;
+ uint64_t opackets;
+ uint64_t ibytes;
+ uint64_t obytes;
+ uint64_t delivered;
+ uint64_t imcast;
+ uint64_t omcast;
+ uint64_t ibcast;
+ uint64_t obcast;
+ uint64_t ierrors;
+ uint64_t oerrors;
+ int32_t idrops;
+ int32_t odrops;
+ int mtu;
+} nm2_netif_stats_t;
+
+void netmib2_init(int);
+void netmib2_refresh(void);
+int netmib2_fetch(pmdaMetric *, int, pmAtomValue *);
+
+#endif
diff --git a/src/pmdas/solaris/pmns.disk b/src/pmdas/solaris/pmns.disk
new file mode 100644
index 0000000..1875a25
--- /dev/null
+++ b/src/pmdas/solaris/pmns.disk
@@ -0,0 +1,58 @@
+disk {
+ all
+ dev
+}
+
+disk.all {
+ read SOLARIS:SCLR_DISK:0
+ write SOLARIS:SCLR_DISK:1
+ total SOLARIS:SCLR_DISK:2
+ read_bytes SOLARIS:SCLR_DISK:3
+ write_bytes SOLARIS:SCLR_DISK:4
+ total_bytes SOLARIS:SCLR_DISK:5
+ wait
+ run
+}
+
+disk.all.wait {
+ time SOLARIS:SCLR_DISK:6
+ count SOLARIS:SCLR_DISK:7
+}
+
+disk.all.run {
+ time SOLARIS:SCLR_DISK:8
+ count SOLARIS:SCLR_DISK:9
+}
+
+disk.dev {
+ read SOLARIS:SCLR_DISK:10
+ write SOLARIS:SCLR_DISK:11
+ total SOLARIS:SCLR_DISK:12
+ read_bytes SOLARIS:SCLR_DISK:13
+ write_bytes SOLARIS:SCLR_DISK:14
+ total_bytes SOLARIS:SCLR_DISK:15
+ wait
+ run
+ errors
+}
+
+disk.dev.wait {
+ time SOLARIS:SCLR_DISK:16
+ count SOLARIS:SCLR_DISK:17
+}
+disk.dev.run {
+ time SOLARIS:SCLR_DISK:18
+ count SOLARIS:SCLR_DISK:19
+}
+
+disk.dev.errors {
+ soft SOLARIS:SCLR_DISK:21
+ hard SOLARIS:SCLR_DISK:22
+ transport SOLARIS:SCLR_DISK:23
+ media SOLARIS:SCLR_DISK:24
+ recoverable SOLARIS:SCLR_DISK:25
+ notready SOLARIS:SCLR_DISK:26
+ nodevice SOLARIS:SCLR_DISK:27
+ badrequest SOLARIS:SCLR_DISK:28
+ pfa SOLARIS:SCLR_DISK:29
+}
diff --git a/src/pmdas/solaris/pmns.hinv b/src/pmdas/solaris/pmns.hinv
new file mode 100644
index 0000000..964e166
--- /dev/null
+++ b/src/pmdas/solaris/pmns.hinv
@@ -0,0 +1,34 @@
+hinv {
+ ncpu SOLARIS:SCLR_SYSINFO:56
+ ndisk SOLARIS:SCLR_DISK:20
+ nfilesys SOLARIS:SCLR_FILESYS:1023
+ pagesize SOLARIS:SCLR_SYSINFO:108
+ physmem SOLARIS:SCLR_SYSINFO:109
+ cpu
+ disk
+}
+
+hinv.cpu {
+ maxclock SOLARIS:SCLR_SYSINFO:147
+ clock SOLARIS:SCLR_SYSINFO:148
+ brand SOLARIS:SCLR_SYSINFO:149
+ frequencies SOLARIS:SCLR_SYSINFO:150
+ implementation SOLARIS:SCLR_SYSINFO:151
+ chip_id SOLARIS:SCLR_SYSINFO:152
+ clog_id SOLARIS:SCLR_SYSINFO:153
+ core_id SOLARIS:SCLR_SYSINFO:154
+ pkg_core_id SOLARIS:SCLR_SYSINFO:155
+ cstate SOLARIS:SCLR_SYSINFO:156
+ maxcstates SOLARIS:SCLR_SYSINFO:157
+ ncores SOLARIS:SCLR_SYSINFO:158
+ ncpus SOLARIS:SCLR_SYSINFO:159
+}
+
+hinv.disk {
+ vendor SOLARIS:SCLR_DISK:30
+ product SOLARIS:SCLR_DISK:31
+ revision SOLARIS:SCLR_DISK:32
+ serial SOLARIS:SCLR_DISK:33
+ capacity SOLARIS:SCLR_DISK:34
+ devlink SOLARIS:SCLR_DISK:35
+}
diff --git a/src/pmdas/solaris/pmns.kernel b/src/pmdas/solaris/pmns.kernel
new file mode 100644
index 0000000..d6722ae
--- /dev/null
+++ b/src/pmdas/solaris/pmns.kernel
@@ -0,0 +1,200 @@
+kernel {
+ all
+ percpu
+ fsflush
+ fs
+ fstype
+}
+
+kernel.all {
+ cpu
+ io
+ trap SOLARIS:SCLR_SYSINFO:32
+ pswitch SOLARIS:SCLR_SYSINFO:23
+ syscall SOLARIS:SCLR_SYSINFO:22
+ sysexec SOLARIS:SCLR_SYSINFO:33
+ sysfork SOLARIS:SCLR_SYSINFO:34
+ sysvfork SOLARIS:SCLR_SYSINFO:35
+ sysread SOLARIS:SCLR_SYSINFO:36
+ syswrite SOLARIS:SCLR_SYSINFO:37
+ load SOLARIS:SCLR_SYSINFO:135
+}
+
+kernel.all.cpu {
+ idle SOLARIS:SCLR_SYSINFO:0
+ user SOLARIS:SCLR_SYSINFO:1
+ sys SOLARIS:SCLR_SYSINFO:2
+ wait
+}
+
+kernel.all.cpu.wait {
+ total SOLARIS:SCLR_SYSINFO:3
+ io SOLARIS:SCLR_SYSINFO:8
+ pio SOLARIS:SCLR_SYSINFO:9
+ swap SOLARIS:SCLR_SYSINFO:10
+}
+
+kernel.all.io {
+ bread SOLARIS:SCLR_SYSINFO:14
+ bwrite SOLARIS:SCLR_SYSINFO:15
+ lread SOLARIS:SCLR_SYSINFO:16
+ lwrite SOLARIS:SCLR_SYSINFO:17
+ phread SOLARIS:SCLR_SYSINFO:26
+ phwrite SOLARIS:SCLR_SYSINFO:27
+ intr SOLARIS:SCLR_SYSINFO:28
+}
+
+kernel.percpu {
+ cpu
+ io
+ trap SOLARIS:SCLR_SYSINFO:38
+ pswitch SOLARIS:SCLR_SYSINFO:25
+ syscall SOLARIS:SCLR_SYSINFO:24
+ sysexec SOLARIS:SCLR_SYSINFO:39
+ sysfork SOLARIS:SCLR_SYSINFO:40
+ sysvfork SOLARIS:SCLR_SYSINFO:41
+ sysread SOLARIS:SCLR_SYSINFO:42
+ syswrite SOLARIS:SCLR_SYSINFO:43
+}
+
+kernel.percpu.cpu {
+ idle SOLARIS:SCLR_SYSINFO:4
+ user SOLARIS:SCLR_SYSINFO:5
+ sys SOLARIS:SCLR_SYSINFO:6
+ wait
+}
+
+kernel.percpu.cpu.wait {
+ total SOLARIS:SCLR_SYSINFO:7
+ io SOLARIS:SCLR_SYSINFO:11
+ pio SOLARIS:SCLR_SYSINFO:12
+ swap SOLARIS:SCLR_SYSINFO:13
+}
+
+kernel.percpu.io {
+ bread SOLARIS:SCLR_SYSINFO:18
+ bwrite SOLARIS:SCLR_SYSINFO:19
+ lread SOLARIS:SCLR_SYSINFO:20
+ lwrite SOLARIS:SCLR_SYSINFO:21
+ phread SOLARIS:SCLR_SYSINFO:29
+ phwrite SOLARIS:SCLR_SYSINFO:30
+ intr SOLARIS:SCLR_SYSINFO:31
+}
+
+kernel.fsflush {
+ scanned SOLARIS:SCLR_FSFLUSH:0
+ examined SOLARIS:SCLR_FSFLUSH:1
+ locked SOLARIS:SCLR_FSFLUSH:2
+ modified SOLARIS:SCLR_FSFLUSH:3
+ coalesced SOLARIS:SCLR_FSFLUSH:4
+ released SOLARIS:SCLR_FSFLUSH:5
+ time SOLARIS:SCLR_FSFLUSH:6
+}
+
+kernel.fs {
+ vnops
+ read_bytes SOLARIS:SCLR_FILESYS:0
+ readdir_bytes SOLARIS:SCLR_FILESYS:1
+ write_bytes SOLARIS:SCLR_FILESYS:2
+}
+
+kernel.fs.vnops {
+ access SOLARIS:SCLR_FILESYS:3
+ addmap SOLARIS:SCLR_FILESYS:4
+ close SOLARIS:SCLR_FILESYS:5
+ cmp SOLARIS:SCLR_FILESYS:6
+ create SOLARIS:SCLR_FILESYS:7
+ delmap SOLARIS:SCLR_FILESYS:8
+ dispose SOLARIS:SCLR_FILESYS:9
+ dump SOLARIS:SCLR_FILESYS:10
+ dumpctl SOLARIS:SCLR_FILESYS:11
+ fid SOLARIS:SCLR_FILESYS:12
+ frlock SOLARIS:SCLR_FILESYS:13
+ fsync SOLARIS:SCLR_FILESYS:14
+ getattr SOLARIS:SCLR_FILESYS:15
+ getpage SOLARIS:SCLR_FILESYS:16
+ getsecattr SOLARIS:SCLR_FILESYS:17
+ inactive SOLARIS:SCLR_FILESYS:18
+ ioctl SOLARIS:SCLR_FILESYS:19
+ link SOLARIS:SCLR_FILESYS:20
+ lookup SOLARIS:SCLR_FILESYS:21
+ map SOLARIS:SCLR_FILESYS:22
+ mkdir SOLARIS:SCLR_FILESYS:23
+ open SOLARIS:SCLR_FILESYS:24
+ pageio SOLARIS:SCLR_FILESYS:25
+ pathconf SOLARIS:SCLR_FILESYS:26
+ poll SOLARIS:SCLR_FILESYS:27
+ putpage SOLARIS:SCLR_FILESYS:28
+ read SOLARIS:SCLR_FILESYS:29
+ readdir SOLARIS:SCLR_FILESYS:30
+ readlink SOLARIS:SCLR_FILESYS:31
+ realvp SOLARIS:SCLR_FILESYS:32
+ remove SOLARIS:SCLR_FILESYS:33
+ rename SOLARIS:SCLR_FILESYS:34
+ rmdir SOLARIS:SCLR_FILESYS:35
+ rwlock SOLARIS:SCLR_FILESYS:36
+ rwunlock SOLARIS:SCLR_FILESYS:37
+ seek SOLARIS:SCLR_FILESYS:38
+ setattr SOLARIS:SCLR_FILESYS:39
+ setfl SOLARIS:SCLR_FILESYS:40
+ setsecattr SOLARIS:SCLR_FILESYS:41
+ shrlock SOLARIS:SCLR_FILESYS:42
+ space SOLARIS:SCLR_FILESYS:43
+ symlink SOLARIS:SCLR_FILESYS:44
+ vnevent SOLARIS:SCLR_FILESYS:45
+ write SOLARIS:SCLR_FILESYS:46
+}
+
+kernel.fstype {
+ vnops
+ read_bytes SOLARIS:SCLR_FILESYS:47
+ readdir_bytes SOLARIS:SCLR_FILESYS:48
+ write_bytes SOLARIS:SCLR_FILESYS:49
+}
+
+kernel.fstype.vnops {
+ access SOLARIS:SCLR_FILESYS:50
+ addmap SOLARIS:SCLR_FILESYS:51
+ close SOLARIS:SCLR_FILESYS:52
+ cmp SOLARIS:SCLR_FILESYS:53
+ create SOLARIS:SCLR_FILESYS:54
+ delmap SOLARIS:SCLR_FILESYS:55
+ dispose SOLARIS:SCLR_FILESYS:56
+ dump SOLARIS:SCLR_FILESYS:57
+ dumpctl SOLARIS:SCLR_FILESYS:58
+ fid SOLARIS:SCLR_FILESYS:59
+ frlock SOLARIS:SCLR_FILESYS:60
+ fsync SOLARIS:SCLR_FILESYS:61
+ getattr SOLARIS:SCLR_FILESYS:62
+ getpage SOLARIS:SCLR_FILESYS:63
+ getsecattr SOLARIS:SCLR_FILESYS:64
+ inactive SOLARIS:SCLR_FILESYS:65
+ ioctl SOLARIS:SCLR_FILESYS:66
+ link SOLARIS:SCLR_FILESYS:67
+ lookup SOLARIS:SCLR_FILESYS:68
+ map SOLARIS:SCLR_FILESYS:69
+ mkdir SOLARIS:SCLR_FILESYS:70
+ open SOLARIS:SCLR_FILESYS:71
+ pageio SOLARIS:SCLR_FILESYS:72
+ pathconf SOLARIS:SCLR_FILESYS:73
+ poll SOLARIS:SCLR_FILESYS:74
+ putpage SOLARIS:SCLR_FILESYS:75
+ read SOLARIS:SCLR_FILESYS:76
+ readdir SOLARIS:SCLR_FILESYS:77
+ readlink SOLARIS:SCLR_FILESYS:78
+ realvp SOLARIS:SCLR_FILESYS:79
+ remove SOLARIS:SCLR_FILESYS:80
+ rename SOLARIS:SCLR_FILESYS:81
+ rmdir SOLARIS:SCLR_FILESYS:82
+ rwlock SOLARIS:SCLR_FILESYS:83
+ rwunlock SOLARIS:SCLR_FILESYS:84
+ seek SOLARIS:SCLR_FILESYS:85
+ setattr SOLARIS:SCLR_FILESYS:86
+ setfl SOLARIS:SCLR_FILESYS:87
+ setsecattr SOLARIS:SCLR_FILESYS:88
+ shrlock SOLARIS:SCLR_FILESYS:89
+ space SOLARIS:SCLR_FILESYS:90
+ symlink SOLARIS:SCLR_FILESYS:91
+ vnevent SOLARIS:SCLR_FILESYS:92
+ write SOLARIS:SCLR_FILESYS:93
+}
diff --git a/src/pmdas/solaris/pmns.mem b/src/pmdas/solaris/pmns.mem
new file mode 100644
index 0000000..04fb6f1
--- /dev/null
+++ b/src/pmdas/solaris/pmns.mem
@@ -0,0 +1,88 @@
+/*
+ * TODO
+ *
+ * These are the IRIX names, for reference
+ * mem.freemem
+ * mem.availsmem
+ * mem.availrmem
+ * mem.ravailrmem
+ * mem.bufmem
+ * mem.physmem
+ * mem.dchunkpages
+ * mem.pmapmem
+ * mem.strmem
+ * mem.chunkpages
+ * mem.dpages
+ * mem.emptymem
+ * mem.freeswap
+ * mem.halloc
+ * mem.heapmem
+ * mem.hfree
+ * mem.hovhd
+ * mem.hunused
+ * mem.zfree
+ * mem.zonemem
+ * mem.zreq
+ * mem.iclean
+ * mem.bsdnet
+ * mem.palloc
+ * mem.unmodfl
+ * mem.unmodsw
+ * mem.min_file_pages
+ * mem.min_free_pages
+ * mem.bufs.fs_metadata
+ * mem.bufs.fs_data
+ * mem.bufs.empty
+ * mem.bufs.inact
+ * mem.fault.prot.total
+ * mem.fault.prot.cow
+ * mem.fault.prot.steal
+ * mem.fault.addr.total
+ * mem.fault.addr.cache
+ * mem.fault.addr.demand
+ * mem.fault.addr.file
+ * mem.fault.addr.swap
+ * mem.tlb.flush
+ * mem.tlb.invalid
+ * mem.tlb.rfault
+ * mem.tlb.sync
+ * mem.tlb.tfault
+ * mem.tlb.purge
+ * mem.tlb.idnew
+ * mem.tlb.idwrap
+ * mem.tlb.kvmwrap
+ * mem.paging.reclaim
+ * mem.system.sptalloc
+ * mem.system.sptfree
+ * mem.system.sptclean
+ * mem.system.sptdirty
+ * mem.system.sptintrans
+ * mem.system.sptaged
+ * mem.system.sptbp
+ * mem.system.sptheap
+ * mem.system.sptzone
+ * mem.system.sptpt
+ * mem.lpage.faults
+ * mem.lpage.allocs
+ * mem.lpage.downgrade
+ * mem.lpage.page_splits
+ * mem.lpage.basesize
+ * mem.lpage.maxsize
+ * mem.lpage.maxenabled
+ * mem.lpage.enabled
+ * mem.lpage.coalesce.scans
+ * mem.lpage.coalesce.success
+ * mem.util.kernel
+ * mem.util.fs_ctl
+ * mem.util.fs_dirty
+ * mem.util.fs_clean
+ * mem.util.free
+ * mem.util.user
+ */
+
+mem {
+ physmem SOLARIS:SCLR_SYSINFO:136
+ freemem SOLARIS:SCLR_SYSINFO:137
+ lotsfree SOLARIS:SCLR_SYSINFO:138
+ availrmem SOLARIS:SCLR_SYSINFO:139
+}
diff --git a/src/pmdas/solaris/pmns.network b/src/pmdas/solaris/pmns.network
new file mode 100644
index 0000000..e9fe9f8
--- /dev/null
+++ b/src/pmdas/solaris/pmns.network
@@ -0,0 +1,302 @@
+/*
+ * TODO
+ *
+ * These are the IRIX names, for reference
+ * network.icmp.error
+ * network.icmp.oldshort
+ * network.icmp.oldicmp
+ * network.icmp.badcode
+ * network.icmp.tooshort
+ * network.icmp.checksum
+ * network.icmp.badlen
+ * network.icmp.reflect
+ * network.icmp.inhist.echoreply
+ * network.icmp.inhist.unreach
+ * network.icmp.inhist.sourcequench
+ * network.icmp.inhist.redirect
+ * network.icmp.inhist.echo
+ * network.icmp.inhist.routeradvert
+ * network.icmp.inhist.routersolicit
+ * network.icmp.inhist.timxceed
+ * network.icmp.inhist.paramprob
+ * network.icmp.inhist.tstamp
+ * network.icmp.inhist.tstampreply
+ * network.icmp.inhist.ireq
+ * network.icmp.inhist.ireqreply
+ * network.icmp.inhist.maskreq
+ * network.icmp.inhist.maskreply
+ * network.icmp.outhist.echoreply
+ * network.icmp.outhist.unreach
+ * network.icmp.outhist.sourcequench
+ * network.icmp.outhist.redirect
+ * network.icmp.outhist.echo
+ * network.icmp.outhist.routeradvert
+ * network.icmp.outhist.routersolicit
+ * network.icmp.outhist.timxceed
+ * network.icmp.outhist.paramprob
+ * network.icmp.outhist.tstamp
+ * network.icmp.outhist.tstampreply
+ * network.icmp.outhist.ireq
+ * network.icmp.outhist.ireqreply
+ * network.icmp.outhist.maskreq
+ * network.icmp.outhist.maskreply
+ * network.igmp.rcv_total
+ * network.igmp.rcv_tooshort
+ * network.igmp.rcv_badsum
+ * network.igmp.rcv_queries
+ * network.igmp.rcv_badqueries
+ * network.igmp.rcv_reports
+ * network.igmp.rcv_badreports
+ * network.igmp.rcv_ourreports
+ * network.igmp.snd_reports
+ * network.ip.badhlen
+ * network.ip.badlen
+ * network.ip.badoptions
+ * network.ip.badsum
+ * network.ip.cantforward
+ * network.ip.cantfrag
+ * network.ip.delivered
+ * network.ip.forward
+ * network.ip.fragdropped
+ * network.ip.fragmented
+ * network.ip.fragments
+ * network.ip.fragtimeout
+ * network.ip.localout
+ * network.ip.noproto
+ * network.ip.noroute
+ * network.ip.odropped
+ * network.ip.ofragments
+ * network.ip.reassembled
+ * network.ip.redirect
+ * network.ip.tooshort
+ * network.ip.toosmall
+ * network.ip.badvers
+ * network.ip.rawout
+ * network.ip.strictreassoverlapfrags
+ * network.ip.strictreassgapfrags
+ * network.ip.total
+ * network.tcp.connattempt
+ * network.tcp.accepts
+ * network.tcp.connects
+ * network.tcp.drops
+ * network.tcp.conndrops
+ * network.tcp.closed
+ * network.tcp.segstimed
+ * network.tcp.rttupdated
+ * network.tcp.delack
+ * network.tcp.timeoutdrop
+ * network.tcp.rexmttimeo
+ * network.tcp.persisttimeo
+ * network.tcp.keeptimeo
+ * network.tcp.keepprobe
+ * network.tcp.keepdrops
+ * network.tcp.sndtotal
+ * network.tcp.sndpack
+ * network.tcp.sndbyte
+ * network.tcp.sndrexmitpack
+ * network.tcp.sndrexmitbyte
+ * network.tcp.sndacks
+ * network.tcp.sndprobe
+ * network.tcp.sndurg
+ * network.tcp.sndwinup
+ * network.tcp.sndctrl
+ * network.tcp.sndrst
+ * network.tcp.rcvtotal
+ * network.tcp.rcvpack
+ * network.tcp.rcvbyte
+ * network.tcp.rcvbadsum
+ * network.tcp.rcvbadoff
+ * network.tcp.rcvshort
+ * network.tcp.rcvduppack
+ * network.tcp.rcvdupbyte
+ * network.tcp.rcvpartduppack
+ * network.tcp.rcvpartdupbyte
+ * network.tcp.rcvoopack
+ * network.tcp.rcvoobyte
+ * network.tcp.rcvpackafterwin
+ * network.tcp.rcvbyteafterwin
+ * network.tcp.rcvafterclose
+ * network.tcp.rcvwinprobe
+ * network.tcp.rcvdupack
+ * network.tcp.rcvacktoomuch
+ * network.tcp.rcvackpack
+ * network.tcp.rcvackbyte
+ * network.tcp.rcvwinupd
+ * network.tcp.pcbcachemiss
+ * network.tcp.predack
+ * network.tcp.preddat
+ * network.tcp.pawsdrop
+ * network.tcp.badsyn
+ * network.tcp.listendrop
+ * network.tcp.persistdrop
+ * network.tcp.synpurge
+ * network.udp.ipackets
+ * network.udp.hdrops
+ * network.udp.badsum
+ * network.udp.badlen
+ * network.udp.noport
+ * network.udp.noportbcast
+ * network.udp.fullsock
+ * network.udp.opackets
+ * network.udp.pcbcachemiss
+
+ * network.interface.collisions
+ * network.interface.mtu
+ * network.interface.noproto
+ * network.interface.baudrate
+ * network.interface.in.errors
+ * network.interface.in.packets
+ * network.interface.in.bytes
+ * network.interface.in.mcasts
+ * network.interface.in.drops
+
+ * network.interface.out.errors
+ * network.interface.out.packets
+ * network.interface.out.bytes
+ * network.interface.out.mcasts
+ * network.interface.out.drops
+ * network.interface.out.qdrops
+ * network.interface.out.qlength
+ * network.interface.out.qmax
+ * network.interface.total.errors
+ * network.interface.total.packets
+ * network.interface.total.bytes
+ * network.interface.total.mcasts
+ * network.interface.total.drops
+ * network.mbuf.alloc
+ * network.mbuf.typealloc
+ * network.mbuf.clustalloc
+ * network.mbuf.clustfree
+ * network.mbuf.failed
+ * network.mbuf.waited
+ * network.mbuf.drained
+ * network.mbuf.pcb.total
+ * network.mbuf.pcb.bytes
+ * network.mbuf.mcb.total
+ * network.mbuf.mcb.bytes
+ * network.mbuf.mcb.fail
+ * network.mcr.mfc_lookups
+ * network.mcr.mfc_misses
+ * network.mcr.upcalls
+ * network.mcr.no_route
+ * network.mcr.bad_tunnel
+ * network.mcr.cant_tunnel
+ * network.mcr.wrong_if
+ * network.mcr.upq_ovflw
+ * network.mcr.cache_cleanups
+ * network.mcr.drop_sel
+ * network.mcr.q_overflow
+ * network.mcr.pkt2large
+ * network.mcr.upq_sockfull
+ * network.socket.type
+ * network.socket.state
+ * network.st.connattempt
+ * network.st.accepts
+ * network.st.connects
+ * network.st.drops
+ * network.st.connfails
+ * network.st.closed
+ * network.st.txtotal
+ * network.st.datatxtotal
+ * network.st.rxtotal
+ * network.st.datarxtotal
+ * network.st.cksumbad
+ * network.st.oototal
+ * network.st.keyrejects
+ * network.st.txrejects
+ * network.st.rxrejects
+ * network.st.slotdrops
+ * network.is.in_window
+ * network.is.in_underflow
+ * network.is.in_overlap
+ * network.is.up_disordered
+ * network.is.up_ordered
+ * network.is.outq_full
+ * network.is.outq_wakeups
+ * network.is.outq_drains
+ * network.is.reorder_wakeups
+ * network.is.reorder_drains
+ * network.is.drain_underflow
+ * network.is.drain_loop
+ * network.is.drain_empty
+ * network.is.window_stalls
+ * network.is.window_flush_null
+ * network.is.window_seqno_fixup
+ * network.is.window_flush_skipped
+ * network.is.window_flush_nlinks
+ * network.is.link_quota_oflows
+ * network.is.link_empty_headers
+ * network.is.link_header_allocs
+ * network.is.link_soft_cksums
+ * network.is.link_sync_seqno
+ * network.is.err_bad_version
+ * network.is.err_input_no_link
+ */
+
+network {
+ interface
+ link
+ udp
+}
+
+network.interface {
+ in
+ out
+ mtu SOLARIS:SCLR_NETIF:0
+}
+
+network.interface.in {
+ errors SOLARIS:SCLR_NETIF:1
+ packets SOLARIS:SCLR_NETIF:2
+ bytes SOLARIS:SCLR_NETIF:3
+ bcasts SOLARIS:SCLR_NETIF:4
+ mcasts SOLARIS:SCLR_NETIF:5
+ drops SOLARIS:SCLR_NETIF:6
+ delivers SOLARIS:SCLR_NETIF:7
+}
+
+network.interface.out {
+ errors SOLARIS:SCLR_NETIF:8
+ packets SOLARIS:SCLR_NETIF:9
+ bytes SOLARIS:SCLR_NETIF:10
+ bcasts SOLARIS:SCLR_NETIF:11
+ mcasts SOLARIS:SCLR_NETIF:12
+ drops SOLARIS:SCLR_NETIF:13
+}
+
+network.udp {
+ ipackets SOLARIS:SCLR_NETIF:14
+ opackets SOLARIS:SCLR_NETIF:15
+ ierrors SOLARIS:SCLR_NETIF:16
+ oerrors SOLARIS:SCLR_NETIF:17
+ noports SOLARIS:SCLR_NETIF:18
+ overflows SOLARIS:SCLR_NETIF:19
+}
+
+network.link {
+ in
+ out
+ collisions SOLARIS:SCLR_NETLINK:0
+ state SOLARIS:SCLR_NETLINK:1
+ duplex SOLARIS:SCLR_NETLINK:2
+ speed SOLARIS:SCLR_NETLINK:3
+}
+
+network.link.in {
+ errors SOLARIS:SCLR_NETLINK:4
+ packets SOLARIS:SCLR_NETLINK:5
+ bytes SOLARIS:SCLR_NETLINK:6
+ bcasts SOLARIS:SCLR_NETLINK:7
+ mcasts SOLARIS:SCLR_NETLINK:8
+ nobufs SOLARIS:SCLR_NETLINK:9
+}
+
+network.link.out {
+ errors SOLARIS:SCLR_NETLINK:10
+ packets SOLARIS:SCLR_NETLINK:11
+ bytes SOLARIS:SCLR_NETLINK:12
+ bcasts SOLARIS:SCLR_NETLINK:13
+ mcasts SOLARIS:SCLR_NETLINK:14
+ nobufs SOLARIS:SCLR_NETLINK:15
+}
+
diff --git a/src/pmdas/solaris/pmns.zfs b/src/pmdas/solaris/pmns.zfs
new file mode 100644
index 0000000..6686d80
--- /dev/null
+++ b/src/pmdas/solaris/pmns.zfs
@@ -0,0 +1,60 @@
+zfs {
+ arc
+ used
+ snapshot
+ available SOLARIS:SCLR_ZFS:0
+ quota SOLARIS:SCLR_ZFS:1
+ reservation SOLARIS:SCLR_ZFS:2
+ compression SOLARIS:SCLR_ZFS:3
+ copies SOLARIS:SCLR_ZFS:4
+ recordsize SOLARIS:SCLR_ZFS:5
+ refquota SOLARIS:SCLR_ZFS:6
+ refreservation SOLARIS:SCLR_ZFS:7
+ referenced SOLARIS:SCLR_ZFS:8
+ nsnapshots SOLARIS:SCLR_ZFS:9
+}
+
+zfs.used {
+ total SOLARIS:SCLR_ZFS:10
+ byme SOLARIS:SCLR_ZFS:11
+ bysnapshots SOLARIS:SCLR_ZFS:12
+ bychildren SOLARIS:SCLR_ZFS:13
+ byrefreservation SOLARIS:SCLR_ZFS:14
+}
+
+zfs.snapshot {
+ used SOLARIS:SCLR_ZFS:15
+ referenced SOLARIS:SCLR_ZFS:16
+ compression SOLARIS:SCLR_ZFS:17
+}
+
+zfs.arc {
+ size SOLARIS:SCLR_ARCSTATS:0
+ min_size SOLARIS:SCLR_ARCSTATS:1
+ max_size SOLARIS:SCLR_ARCSTATS:2
+ mru_size SOLARIS:SCLR_ARCSTATS:3
+ target_size SOLARIS:SCLR_ARCSTATS:4
+ hits
+ misses
+
+}
+
+zfs.arc.misses {
+ total SOLARIS:SCLR_ARCSTATS:5
+ demand_data SOLARIS:SCLR_ARCSTATS:6
+ demand_metadata SOLARIS:SCLR_ARCSTATS:7
+ prefetch_data SOLARIS:SCLR_ARCSTATS:8
+ prefetch_metadata SOLARIS:SCLR_ARCSTATS:9
+}
+
+zfs.arc.hits {
+ total SOLARIS:SCLR_ARCSTATS:10
+ mfu SOLARIS:SCLR_ARCSTATS:11
+ mru SOLARIS:SCLR_ARCSTATS:12
+ mfu_ghost SOLARIS:SCLR_ARCSTATS:13
+ mru_ghost SOLARIS:SCLR_ARCSTATS:14
+ demand_data SOLARIS:SCLR_ARCSTATS:15
+ demand_metadata SOLARIS:SCLR_ARCSTATS:16
+ prefetch_data SOLARIS:SCLR_ARCSTATS:17
+ prefetch_metadata SOLARIS:SCLR_ARCSTATS:18
+}
diff --git a/src/pmdas/solaris/pmns.zpool b/src/pmdas/solaris/pmns.zpool
new file mode 100644
index 0000000..2fcae14
--- /dev/null
+++ b/src/pmdas/solaris/pmns.zpool
@@ -0,0 +1,31 @@
+zpool {
+ state SOLARIS:SCLR_ZPOOL:0
+ state_int SOLARIS:SCLR_ZPOOL:1
+ capacity SOLARIS:SCLR_ZPOOL:2
+ used SOLARIS:SCLR_ZPOOL:3
+ checksum_errors SOLARIS:SCLR_ZPOOL:4
+ self_healed SOLARIS:SCLR_ZPOOL:5
+ perdisk
+ in
+ out
+ ops
+}
+
+zpool.in {
+ bytes SOLARIS:SCLR_ZPOOL:6
+ ops SOLARIS:SCLR_ZPOOL:7
+ errors SOLARIS:SCLR_ZPOOL:8
+}
+
+zpool.out {
+ bytes SOLARIS:SCLR_ZPOOL:9
+ ops SOLARIS:SCLR_ZPOOL:10
+ errors SOLARIS:SCLR_ZPOOL:11
+}
+
+zpool.ops {
+ noops SOLARIS:SCLR_ZPOOL:12
+ ioctls SOLARIS:SCLR_ZPOOL:13
+ claims SOLARIS:SCLR_ZPOOL:14
+ frees SOLARIS:SCLR_ZPOOL:15
+}
diff --git a/src/pmdas/solaris/pmns.zpool_perdisk b/src/pmdas/solaris/pmns.zpool_perdisk
new file mode 100644
index 0000000..5c6dfa1
--- /dev/null
+++ b/src/pmdas/solaris/pmns.zpool_perdisk
@@ -0,0 +1,16 @@
+zpool.perdisk {
+ state SOLARIS:SCLR_ZPOOL_PERDISK:0
+ state_int SOLARIS:SCLR_ZPOOL_PERDISK:1
+ checksum_errors SOLARIS:SCLR_ZPOOL_PERDISK:2
+ self_healed SOLARIS:SCLR_ZPOOL_PERDISK:3
+ in
+ out
+}
+
+zpool.perdisk.in {
+ errors SOLARIS:SCLR_ZPOOL_PERDISK:4
+}
+
+zpool.perdisk.out {
+ errors SOLARIS:SCLR_ZPOOL_PERDISK:5
+}
diff --git a/src/pmdas/solaris/root b/src/pmdas/solaris/root
new file mode 100644
index 0000000..7131985
--- /dev/null
+++ b/src/pmdas/solaris/root
@@ -0,0 +1,42 @@
+/*
+ * fake "root" for validating the local PMNS subtree
+ */
+
+#include <stdpmid>
+#include "clusters.h"
+
+root {
+ kernel
+ disk
+ mem
+ network
+ hinv
+ zpool
+ zfs
+ pmda
+}
+
+pmda {
+ uname SOLARIS:0:107
+ prefetch
+ metric
+}
+
+pmda.prefetch {
+ time SOLARIS:4095:0
+ count SOLARIS:4095:1
+}
+
+pmda.metric {
+ time SOLARIS:4095:2
+ count SOLARIS:4095:3
+}
+
+#include "pmns.kernel"
+#include "pmns.disk"
+#include "pmns.mem"
+#include "pmns.network"
+#include "pmns.hinv"
+#include "pmns.zpool"
+#include "pmns.zfs"
+#include "pmns.zpool_perdisk"
diff --git a/src/pmdas/solaris/solaris.c b/src/pmdas/solaris/solaris.c
new file mode 100644
index 0000000..2deb6fd
--- /dev/null
+++ b/src/pmdas/solaris/solaris.c
@@ -0,0 +1,216 @@
+/*
+ * Solaris PMDA
+ *
+ * Collect performance data from the Solaris kernel using kstat() for
+ * the most part.
+ *
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2010 Max Matveev. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <time.h>
+#include <sys/time.h>
+#include "common.h"
+
+static int _isDSO = 1;
+static char mypath[MAXPATHLEN];
+
+/*
+ * wrapper for pmdaFetch which primes the methods ready for
+ * the next fetch
+ * ... real callback is fetch_callback()
+ */
+static int
+solaris_fetch(int numpmid, pmID pmidlist[], pmResult **resp, pmdaExt *pmda)
+{
+ int i;
+
+ kstat_ctl_needs_update();
+
+ for (i = 0; i < methodtab_sz; i++) {
+ methodtab[i].m_fetched = 0;
+ }
+
+ return pmdaFetch(numpmid, pmidlist, resp, pmda);
+}
+
+/*
+ * callback provided to pmdaFetch
+ */
+static int
+solaris_fetch_callback(pmdaMetric *mdesc, unsigned int inst, pmAtomValue *atom)
+{
+ metricdesc_t *mdp = (metricdesc_t *)mdesc->m_user;
+ int cluster = pmid_cluster(mdesc->m_desc.pmid);
+ method_t *m = methodtab + cluster;
+ hrtime_t start;
+ int rv;
+ __pmID_int *id = __pmid_int(&mdesc->m_desc.pmid);
+
+ if (cluster == 4095) {
+ switch (id->item) {
+ case 0: /* pmda.prefetch.time */
+ if ((inst <= 0) || (inst > methodtab_sz+1))
+ return PM_ERR_INST;
+ atom->ull = methodtab[inst-1].m_elapsed;
+ return 1;
+ case 1: /* pmda.prefetch.count */
+ if ((inst <= 0) || (inst > methodtab_sz+1))
+ return PM_ERR_INST;
+ atom->ull = methodtab[inst-1].m_hits;
+ return 1;
+ case 2: /* pmda.metric.time */
+ if ((inst <= 0) || (inst > metrictab_sz+1))
+ return PM_ERR_INST;
+ atom->ull = metricdesc[inst-1].md_elapsed;
+ return 1;
+ case 3: /* pmda.metric.count */
+ if ((inst <= 0) || (inst > metrictab_sz+1))
+ return PM_ERR_INST;
+ atom->ull = metricdesc[inst-1].md_hits;
+ return 1;
+ default:
+ return PM_ERR_PMID;
+ }
+ } else if (cluster >= methodtab_sz) {
+ return PM_ERR_PMID;
+ }
+
+ if (!m->m_fetched && m->m_prefetch) {
+ start = gethrtime();
+ m->m_prefetch();
+ m->m_elapsed = gethrtime() - start;
+ m->m_hits++;
+ m->m_fetched = 1;
+ }
+ start = gethrtime();
+ rv = m->m_fetch(mdesc, inst, atom);
+ mdp->md_elapsed = gethrtime() - start;
+ mdp->md_hits++;
+ return rv;
+}
+
+/*
+ * Initialise the agent (both daemon and DSO).
+ */
+void
+__PMDA_INIT_CALL
+solaris_init(pmdaInterface *dp)
+{
+ if (_isDSO) {
+ int sep = __pmPathSeparator();
+ snprintf(mypath, sizeof(mypath), "%s%c" "solaris" "%c" "help",
+ pmGetConfig("PCP_PMDAS_DIR"), sep, sep);
+ pmdaDSO(dp, PMDA_INTERFACE_3, "Solaris DSO", mypath);
+ }
+
+ if (dp->status != 0)
+ return;
+
+ dp->version.two.fetch = solaris_fetch;
+ pmdaSetFetchCallBack(dp, solaris_fetch_callback);
+ init_data(dp->domain);
+ pmdaInit(dp, indomtab, indomtab_sz, metrictab, metrictab_sz);
+}
+
+static void
+usage(void)
+{
+ fprintf(stderr, "Usage: %s [options]\n\n", pmProgname);
+ fputs("Options:\n"
+ " -d domain use domain (numeric) for metrics domain of PMDA\n"
+ " -l logfile write log into logfile rather than using default log name\n"
+ " -N namespace verify consistency of internal metrics with the namespace\n",
+ stderr);
+ exit(1);
+}
+
+static void
+checkname(const char *mname)
+{
+ int i;
+ for (i = 0; i < metrictab_sz; i++) {
+ if (strcmp(mname, metricdesc[i].md_name) == 0)
+ return;
+ }
+ printf ("Cannot find %s in the code\n", mname);
+}
+
+/*
+ * Set up the agent if running as a daemon.
+ */
+int
+main(int argc, char **argv)
+{
+ int err = 0;
+ int sep = __pmPathSeparator();
+ pmdaInterface desc;
+ int c;
+ char *namespace = NULL;
+
+ _isDSO = 0;
+ __pmSetProgname(argv[0]);
+
+ snprintf(mypath, sizeof(mypath), "%s%c" "solaris" "%c" "help",
+ pmGetConfig("PCP_PMDAS_DIR"), sep, sep);
+ pmdaDaemon(&desc, PMDA_INTERFACE_3, pmProgname, SOLARIS,
+ "solaris.log", mypath);
+
+ while ((c = pmdaGetOpt(argc, argv, "N:D:d:l:?", &desc, &err)) != EOF) {
+ switch (c) {
+ case 'N':
+ namespace = optarg;
+ break;
+ default:
+ err++;
+ break;
+ }
+ }
+ if (err)
+ usage();
+
+ if (namespace) {
+ if (pmLoadNameSpace(namespace))
+ exit(1);
+
+ for (c = 0; c < metrictab_sz; c++) {
+ char *name;
+ int e;
+ __pmID_int *id = __pmid_int(&metricdesc[c].md_desc.pmid);
+ id->domain = desc.domain;
+
+ if ((e = pmNameID(metricdesc[c].md_desc.pmid, &name)) != 0) {
+ printf ("Cannot find %s(%s) in %s: %s\n",
+ metricdesc[c].md_name,
+ pmIDStr(metricdesc[c].md_desc.pmid),
+ namespace, pmErrStr(e));
+ } else {
+ if (strcmp(name, metricdesc[c].md_name)) {
+ printf ("%s is %s in the %s but %s in code\n",
+ pmIDStr(metricdesc[c].md_desc.pmid),
+ name, namespace,metricdesc[c].md_name);
+ }
+ }
+ }
+
+ pmTraversePMNS("", checkname);
+ exit (0);
+ }
+
+ pmdaOpenLog(&desc);
+ solaris_init(&desc);
+ pmdaConnect(&desc);
+ pmdaMain(&desc);
+
+ exit(0);
+}
diff --git a/src/pmdas/solaris/sysinfo.c b/src/pmdas/solaris/sysinfo.c
new file mode 100644
index 0000000..1937eed
--- /dev/null
+++ b/src/pmdas/solaris/sysinfo.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2010 Max Matveev. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include "common.h"
+#include <sys/utsname.h>
+#include <sys/loadavg.h>
+
+typedef struct {
+ int fetched;
+ int err;
+ kstat_t *ksp;
+ cpu_stat_t cpustat;
+ kstat_t *info;
+ int info_is_good;
+} ctl_t;
+
+static int ncpu;
+static int hz;
+static long pagesize;
+static ctl_t *ctl;
+static char uname_full[SYS_NMLN * 5];
+static int nloadavgs;
+static double loadavgs[3];
+
+void
+sysinfo_init(int first)
+{
+ kstat_t *ksp;
+ int i;
+ char buf[10]; /* cpuXXXXX */
+ kstat_ctl_t *kc;
+
+ if (!first)
+ /* TODO ... not sure if/when we'll use this re-init hook */
+ return;
+
+ if ((kc = kstat_ctl_update()) == NULL)
+ return;
+
+ for (ncpu = 0; ; ncpu++) {
+ ksp = kstat_lookup(kc, "cpu_stat", ncpu, NULL);
+ if (ksp == NULL) break;
+ if ((ctl = (ctl_t *)realloc(ctl, (ncpu+1) * sizeof(ctl_t))) == NULL) {
+ fprintf(stderr, "sysinfo_init: ctl realloc[%d] @ cpu=%d failed: %s\n",
+ (int)((ncpu+1) * sizeof(ctl_t)), ncpu, osstrerror());
+ exit(1);
+ }
+ ctl[ncpu].info = kstat_lookup(kc, "cpu_info", ncpu, NULL);
+ ctl[ncpu].ksp = ksp;
+ ctl[ncpu].err = 0;
+ }
+
+ indomtab[CPU_INDOM].it_numinst = ncpu;
+ indomtab[CPU_INDOM].it_set = (pmdaInstid *)malloc(ncpu * sizeof(pmdaInstid));
+ /* TODO check? */
+
+ for (i = 0; i < ncpu; i++) {
+ indomtab[CPU_INDOM].it_set[i].i_inst = i;
+ snprintf(buf, sizeof(buf), "cpu%d", i);
+ indomtab[CPU_INDOM].it_set[i].i_name = strdup(buf);
+ /* TODO check? */
+ }
+
+ hz = (int)sysconf(_SC_CLK_TCK);
+ pagesize = sysconf(_SC_PAGESIZE);
+
+#ifdef PCP_DEBUG
+ if ((pmDebug & (DBG_TRACE_APPL0|DBG_TRACE_APPL2)) == (DBG_TRACE_APPL0|DBG_TRACE_APPL2)) {
+ /* desperate */
+ fprintf(stderr, "sysinfo: ncpu=%d hz=%d\n", ncpu, hz);
+ }
+#endif
+}
+
+static __uint32_t
+sysinfo_derived(pmdaMetric *mdesc, int inst)
+{
+ pmID pmid = mdesc->m_desc.pmid;
+ __pmID_int *ip = (__pmID_int *)&pmid;
+ __uint32_t val;
+
+ ip->domain = 0;
+
+ switch (pmid) {
+
+ case PMDA_PMID(SCLR_SYSINFO,56): /* hinv.ncpu */
+ if (inst == 0)
+ val = ncpu;
+ else
+ val = 0;
+ break;
+
+ default:
+ fprintf(stderr, "cpu_derived: Botch: no method for pmid %s\n",
+ pmIDStr(mdesc->m_desc.pmid));
+ val = 0;
+ break;
+ }
+
+#ifdef PCP_DEBUG
+ if ((pmDebug & (DBG_TRACE_APPL0|DBG_TRACE_APPL2)) == (DBG_TRACE_APPL0|DBG_TRACE_APPL2)) {
+ /* desperate */
+ fprintf(stderr, "cpu_derived: pmid %s inst %d val %d\n",
+ pmIDStr(mdesc->m_desc.pmid), inst, val);
+ }
+#endif
+
+ return val;
+}
+
+void
+sysinfo_prefetch(void)
+{
+ int i;
+
+ nloadavgs = -1;
+ for (i = 0; i < ncpu; i++) {
+ ctl[i].fetched = 0;
+ ctl[i].info_is_good = 0;
+ }
+}
+
+int
+kstat_named_to_pmAtom(const kstat_named_t *kn, pmAtomValue *atom)
+{
+ static char chardat[sizeof(kn->value.c) + 1];
+
+ switch (kn->data_type) {
+ case KSTAT_DATA_UINT64:
+ atom->ull = kn->value.ui64;
+ return 1;
+ case KSTAT_DATA_INT64:
+ atom->ull = kn->value.i64;
+ return 1;
+ case KSTAT_DATA_UINT32:
+ atom->ull = kn->value.ui32;
+ return 1;
+ case KSTAT_DATA_INT32:
+ atom->ull = kn->value.i32;
+ return 1;
+ case KSTAT_DATA_STRING:
+ atom->cp = kn->value.str.addr.ptr;
+ return 1;
+ case KSTAT_DATA_CHAR:
+ memcpy(chardat, kn->value.c, sizeof(kn->value.c));
+ chardat[sizeof(chardat)-1] = '\0';
+ atom->cp = chardat;
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+kstat_fetch_named(kstat_ctl_t *kc, pmAtomValue *atom, char *metric,
+ int shift_bits)
+{
+ kstat_t *ks;
+
+ if ((ks = kstat_lookup(kc, "unix", -1, "system_pages")) != NULL) {
+ kstat_named_t *kn;
+
+ if (kstat_read(kc, ks, NULL) == -1)
+ return 0;
+
+ if (((kn = kstat_data_lookup(ks, metric)) != NULL) &&
+ kstat_named_to_pmAtom(kn, atom)) {
+ atom->ull = (atom->ull * pagesize) >> shift_bits;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int
+kstat_named_to_typed_atom(const kstat_named_t *kn, int pmtype,
+ pmAtomValue *atom)
+{
+ static char chardat[sizeof(kn->value.c) + 1];
+
+ switch (pmtype) {
+ case PM_TYPE_32:
+ if (kn->data_type == KSTAT_DATA_INT32) {
+ atom->l = kn->value.i32;
+ return 1;
+ }
+ break;
+ case PM_TYPE_U32:
+ if (kn->data_type == KSTAT_DATA_UINT32) {
+ atom->ul = kn->value.ui32;
+ return 1;
+ }
+ break;
+ case PM_TYPE_64:
+ if (kn->data_type == KSTAT_DATA_INT64) {
+ atom->ll = kn->value.i64;
+ return 1;
+ }
+ break;
+ case PM_TYPE_U64:
+ if (kn->data_type == KSTAT_DATA_UINT64) {
+ atom->ull = kn->value.ui64;
+ return 1;
+ }
+ break;
+ case PM_TYPE_STRING:
+ switch(kn->data_type) {
+ case KSTAT_DATA_STRING:
+ atom->cp = kn->value.str.addr.ptr;
+ return 1;
+ case KSTAT_DATA_CHAR:
+ memcpy(chardat, kn->value.c, sizeof(kn->value.c));
+ chardat[sizeof(chardat)-1] = '\0';
+ atom->cp = chardat;
+ return 1;
+ }
+ break;
+ }
+ return 0;
+}
+
+int
+sysinfo_fetch(pmdaMetric *mdesc, int inst, pmAtomValue *atom)
+{
+ __uint64_t ull;
+ int i;
+ int ok;
+ ptrdiff_t offset;
+ struct utsname u;
+ kstat_ctl_t *kc;
+
+ if ((kc = kstat_ctl_update()) == NULL)
+ return 0;
+
+ /* Special processing of metrics which notionally belong
+ * to sysinfo category */
+ switch (pmid_item(mdesc->m_desc.pmid)) {
+ case 109: /* hinv.physmem */
+ return kstat_fetch_named(kc, atom, "physmem", 20);
+ case 136: /* mem.physmem */
+ return kstat_fetch_named(kc, atom, "physmem", 10);
+ case 137: /* mem.freemem */
+ return kstat_fetch_named(kc, atom, "freemem", 10);
+ case 138: /* mem.lotsfree */
+ return kstat_fetch_named(kc, atom, "lotsfree", 10);
+ case 139: /* mem.availrmem */
+ return kstat_fetch_named(kc, atom, "availrmem", 10);
+
+ case 108: /* hinv.pagesize */
+ atom->ul = pagesize;
+ return 1;
+
+ case 107: /* pmda.uname */
+ if (uname(&u) < 0)
+ return 0;
+
+ snprintf(uname_full, sizeof(uname_full), "%s %s %s %s %s",
+ u.sysname, u.nodename, u.release, u.version, u.machine);
+ atom->cp = uname_full;
+ return 1;
+ case 135: /* kernel.all.load */
+ if (nloadavgs < 0) {
+ if ((nloadavgs = getloadavg(loadavgs, 3)) < 0)
+ return 0;
+ }
+
+ switch (inst) {
+ case 1:
+ atom->f = (float)loadavgs[LOADAVG_1MIN];
+ return nloadavgs > LOADAVG_1MIN;
+ case 5:
+ atom->f = (float)loadavgs[LOADAVG_5MIN];
+ return nloadavgs > LOADAVG_5MIN;
+ case 15:
+ atom->f = (float)loadavgs[LOADAVG_15MIN];
+ return nloadavgs > LOADAVG_15MIN;
+ }
+ return PM_ERR_INST;
+ }
+
+ ok = 1;
+ for (i = 0; i < ncpu; i++) {
+ if (inst == PM_IN_NULL || inst == i) {
+ if (!ctl[i].info_is_good) {
+ ctl[i].info_is_good = (ctl[i].info &&
+ (kstat_read(kc, ctl[i].info,
+ NULL) != -1));
+ }
+ if (ctl[i].fetched == 1)
+ continue;
+ if (kstat_read(kc, ctl[i].ksp, &ctl[i].cpustat) == -1) {
+ if (ctl[i].err == 0) {
+ fprintf(stderr, "Error: sysinfo_fetch(pmid=%s cpu=%d ...)\n", pmIDStr(mdesc->m_desc.pmid), i);
+ fprintf(stderr, "kstat_read(kc=%p, ksp=%p, ...) failed: %s\n", kc, ctl[i].ksp, osstrerror());
+ }
+ ctl[i].err++;
+ ctl[i].fetched = -1;
+ ok = 0;
+ }
+ else {
+ ctl[i].fetched = 1;
+ if (ctl[i].err != 0) {
+ fprintf(stderr, "Success: sysinfo_fetch(pmid=%s cpu=%d ...) after %d errors as previously reported\n",
+ pmIDStr(mdesc->m_desc.pmid), i, ctl[i].err);
+ ctl[i].err = 0;
+ }
+ }
+ }
+ }
+
+ if (!ok)
+ return 0;
+
+ ull = 0;
+ for (i = 0; i < ncpu; i++) {
+ if (inst == PM_IN_NULL || inst == i) {
+ offset = ((metricdesc_t *)mdesc->m_user)->md_offset;
+
+ if (offset == -1) {
+ ull += sysinfo_derived(mdesc, i);
+ } else if (offset > sizeof(ctl[i].cpustat)) {
+ char *stat = (char *)offset;
+ kstat_named_t *kn;
+
+ if (!ctl[i].info_is_good)
+ return 0;
+
+ if ((kn = kstat_data_lookup(ctl[i].info, stat)) == NULL) {
+ fprintf(stderr, "No kstat called %s for CPU %d\n", stat, i);
+ return 0;
+ }
+
+ return kstat_named_to_typed_atom(kn, mdesc->m_desc.type, atom);
+ } else {
+ /* all the kstat fields are 32-bit unsigned */
+ __uint32_t *ulp;
+ ulp = (__uint32_t *)&((char *)&ctl[i].cpustat)[offset];
+ ull += *ulp;
+#ifdef PCP_DEBUG
+ if ((pmDebug & (DBG_TRACE_APPL0|DBG_TRACE_APPL2)) == (DBG_TRACE_APPL0|DBG_TRACE_APPL2)) {
+ /* desperate */
+ fprintf(stderr, "sysinfo_fetch: pmid %s inst %d val %u\n",
+ pmIDStr(mdesc->m_desc.pmid), i, *ulp);
+ }
+#endif
+ }
+ }
+ }
+
+ if (mdesc->m_desc.units.dimTime == 1) {
+ /* sysinfo times are in ticks, and we export as 64-bit msec */
+ atom->ull = ull * 1000 / hz;
+ }
+ else if (mdesc->m_desc.type == PM_TYPE_U64) {
+ /* export as 64-bit value */
+ atom->ull = ull;
+ }
+ else {
+ /* else export as a 32-bit */
+ atom->ul = (__uint32_t)ull;
+ }
+
+ return 1;
+}
diff --git a/src/pmdas/solaris/vnops.c b/src/pmdas/solaris/vnops.c
new file mode 100644
index 0000000..69054d8
--- /dev/null
+++ b/src/pmdas/solaris/vnops.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2010 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* kstat has counters for vnode operations for each filesystem.
+ *
+ * Unfortunately the counters for mounted fileystems are mixed with counters
+ * for the filesystem types and there is no obvious way to distinguish
+ * between the two except by trying to convert the kstat's name to the number
+ * and see if works */
+
+#include <stdio.h>
+#include <kstat.h>
+#include <sys/mnttab.h>
+#include <sys/stat.h>
+#include "common.h"
+
+struct mountpoint {
+ struct mountpoint *next;
+ dev_t dev;
+ char mountpoint[];
+};
+
+static struct mountpoint *mountpoints;
+static struct timespec mtime;
+
+/* NB! The order of entires in mountopoints list is important:
+ * lofs mounts use the same device number but appear later
+ * in /etc/mnttab then the target filesystem - keeping the
+ * order the same as /etc/mnttab means that more "logical"
+ * mountpoints are reported, in particular the counters
+ * for "/" are not reported as /lib/libc.so.1 */
+static void
+cache_mnttab(void)
+{
+ FILE *f;
+ struct mnttab m;
+ struct mountpoint *mp;
+ struct stat sb;
+ struct mountpoint **tail = &mountpoints;
+
+ if (stat("/etc/mnttab", &sb) < 0)
+ return;
+
+ if (mountpoints &&
+ (sb.st_mtim.tv_sec == mtime.tv_sec) &&
+ (sb.st_mtim.tv_nsec == mtime.tv_nsec))
+ return;
+
+ if ((f = fopen("/etc/mnttab", "r")) == NULL)
+ return;
+
+ for (mp = mountpoints; mp; mp = mountpoints) {
+ mountpoints = mp->next;
+ free(mp);
+ }
+
+ while(getmntent(f, &m) == 0) {
+ char *devop= hasmntopt(&m, "dev");
+ if (devop) {
+ char *end;
+ dev_t d = strtoul(devop+4, &end, 16);
+
+ if ((end != devop+4) && (*end != '\0')) {
+ fprintf(stderr, "Bogus device number %s for filesystem %s\n",
+ devop+4, m.mnt_mountp);
+ continue;
+ }
+
+ mp = malloc(sizeof(*mp) + strlen(m.mnt_mountp) + 1);
+ if (mp == NULL) {
+ fprintf(stderr,
+ "Cannot allocate memory for cache entry of %s\n",
+ m.mnt_mountp);
+ continue;
+ }
+ mp->next = NULL;
+ mp->dev = d;
+ strcpy(mp->mountpoint, m.mnt_mountp);
+ *tail = mp;
+ tail = &mp->next;
+ }
+ }
+ fclose(f);
+ mtime = sb.st_mtim;
+}
+
+static const char *
+mountpoint_bydev(dev_t dev)
+{
+ int i;
+ for (i=0; i < 2; i++) {
+ struct mountpoint *mp = mountpoints;
+ while(mp) {
+ if (mp->dev == dev)
+ return mp->mountpoint;
+ mp = mp->next;
+ }
+ cache_mnttab();
+ }
+ return NULL;
+}
+
+int
+vnops_fetch(pmdaMetric *pm, int inst, pmAtomValue *av)
+{
+ char *fsname;
+ metricdesc_t *md = pm->m_user;
+ kstat_t *k;
+ char *stat = (char *)md->md_offset;
+
+ if (pmid_item(pm->m_desc.pmid) == 1023) { /* hinv.nfilesys */
+ int sts;
+ sts = pmdaCacheOp(indomtab[FILESYS_INDOM].it_indom, PMDA_CACHE_SIZE_ACTIVE);
+ if (sts < 0)
+ return 0;
+ else {
+ av->ul = sts;
+ return 1;
+ }
+ }
+
+ if (pmdaCacheLookup(pm->m_desc.indom, inst, &fsname,
+ (void **)&k) != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ if (k) {
+ kstat_named_t *kn = kstat_data_lookup(k, stat);
+
+ if (kn == NULL) {
+ fprintf(stderr, "No kstat called %s for %s\n", stat, fsname);
+ return 0;
+ }
+
+ return kstat_named_to_typed_atom(kn, pm->m_desc.type, av);
+ }
+
+ return 0;
+}
+
+static void
+vnops_update_stats(int fetch)
+{
+ kstat_t *k;
+ kstat_ctl_t *kc = kstat_ctl_update();
+
+ if (kc == NULL)
+ return;
+
+ for (k = kc->kc_chain; k != NULL; k = k->ks_next) {
+ int rv;
+ kstat_t *cached;
+ const char *key;
+ dev_t dev;
+ char *end;
+ pmInDom indom;
+
+ if (strcmp(k->ks_module, "unix") ||
+ strncmp(k->ks_name, "vopstats_", sizeof("vopstats_")-1))
+ continue;
+
+ key = k->ks_name + 9;
+ dev = strtoul(key, &end, 16);
+ if ((end != key) && (*end == '\0')) {
+ indom = indomtab[FILESYS_INDOM].it_indom;
+ if ((key = mountpoint_bydev(dev)) == NULL)
+ continue;
+ } else {
+ indom = indomtab[FSTYPE_INDOM].it_indom;
+ }
+
+ if (pmdaCacheLookupName(indom, key, &rv,
+ (void **)&cached) != PMDA_CACHE_ACTIVE) {
+ rv = pmdaCacheStore(indom, PMDA_CACHE_ADD, key, k);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot create instance for "
+ "filesystem '%s': %s\n",
+ key, pmErrStr(rv));
+ continue;
+ }
+ }
+
+ if (fetch)
+ kstat_read(kc, k, NULL);
+ }
+}
+
+void
+vnops_refresh(void)
+{
+ pmdaCacheOp(indomtab[FILESYS_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indomtab[FSTYPE_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ vnops_update_stats(1);
+ pmdaCacheOp(indomtab[FILESYS_INDOM].it_indom, PMDA_CACHE_SAVE);
+ pmdaCacheOp(indomtab[FSTYPE_INDOM].it_indom, PMDA_CACHE_SAVE);
+}
+
+void
+vnops_init(int first)
+{
+ pmdaCacheOp(indomtab[FILESYS_INDOM].it_indom, PMDA_CACHE_LOAD);
+ pmdaCacheOp(indomtab[FSTYPE_INDOM].it_indom, PMDA_CACHE_LOAD);
+ vnops_update_stats(0);
+ pmdaCacheOp(indomtab[FILESYS_INDOM].it_indom, PMDA_CACHE_SAVE);
+ pmdaCacheOp(indomtab[FSTYPE_INDOM].it_indom, PMDA_CACHE_SAVE);
+}
diff --git a/src/pmdas/solaris/zfs.c b/src/pmdas/solaris/zfs.c
new file mode 100644
index 0000000..9f4bc55
--- /dev/null
+++ b/src/pmdas/solaris/zfs.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2009 Max Matveev. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <libzfs.h>
+
+#include "common.h"
+
+static libzfs_handle_t *zh;
+static int zf_added;
+
+struct zfs_data {
+ zfs_handle_t *zh;
+ uint64_t nsnaps;
+};
+
+/*
+ * For each filesystem or snapshot check if the name is in the
+ * corresponding instance cache. If it's not there then add it to the
+ * cache. If we've cached the new instance then we keep the zfs_handle
+ * which we've received in the argument, otherwise we need to close it
+ * - zfs_iter_root() expects that from us.
+ *
+ * For filesystems iterate over their snapshots and update snapshot
+ * count which is stored in the cached data for the instances in ZFS_INDOM
+ * domain.
+ */
+static int
+zfs_cache_inst(zfs_handle_t *zf, void *arg)
+{
+ const char *fsname = zfs_get_name(zf);
+ pmInDom zfindom;
+ int inst, rv;
+ struct zfs_data *zdata = NULL;
+ uint64_t *snapcnt = arg;
+
+ switch (zfs_get_type(zf)) {
+ case ZFS_TYPE_FILESYSTEM:
+ zfindom = indomtab[ZFS_INDOM].it_indom;
+ break;
+ case ZFS_TYPE_SNAPSHOT:
+ (*snapcnt)++;
+ zfindom = indomtab[ZFS_SNAP_INDOM].it_indom;
+ break;
+ default:
+ zfs_close(zf);
+ return 0;
+ }
+
+ if ((rv = pmdaCacheLookupName(zfindom, fsname, &inst,
+ (void **)&zdata)) == PMDA_CACHE_ACTIVE) {
+ zfs_close(zf);
+ zfs_refresh_properties(zdata->zh);
+ zf = zdata->zh;
+ } else if ((rv == PMDA_CACHE_INACTIVE) && zdata) {
+ rv = pmdaCacheStore(zfindom, PMDA_CACHE_ADD, fsname, zdata);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot reactivate cached data for '%s': %s\n",
+ fsname, pmErrStr(rv));
+ zfs_close(zf);
+ return 0;
+ }
+ zfs_close(zf);
+ zfs_refresh_properties(zdata->zh);
+ zf = zdata->zh;
+ } else {
+ if ((zdata = calloc(1, sizeof(*zdata))) == NULL) {
+ __pmNotifyErr(LOG_WARNING,
+ "Out of memory for data of %s\n", fsname);
+ zfs_close(zf);
+ return 0;
+ }
+ zdata->zh = zf;
+ rv = pmdaCacheStore(zfindom, PMDA_CACHE_ADD, fsname, zdata);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot cache data for '%s': %s\n",
+ fsname, pmErrStr(rv));
+ zfs_close(zf);
+ return 0;
+ }
+ zf_added++;
+ }
+
+ zfs_iter_filesystems(zf, zfs_cache_inst, NULL);
+ if (zfs_get_type(zf) == ZFS_TYPE_FILESYSTEM) {
+ zdata->nsnaps = 0;
+ zfs_iter_snapshots(zf, zfs_cache_inst, &zdata->nsnaps);
+ }
+
+ return 0;
+}
+
+void
+zfs_refresh(void)
+{
+ zf_added = 0;
+
+ pmdaCacheOp(indomtab[ZFS_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ pmdaCacheOp(indomtab[ZFS_SNAP_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ zfs_iter_root(zh, zfs_cache_inst, NULL);
+
+ if (zf_added) {
+ pmdaCacheOp(indomtab[ZFS_INDOM].it_indom, PMDA_CACHE_SAVE);
+ pmdaCacheOp(indomtab[ZFS_SNAP_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}
+
+int
+zfs_fetch(pmdaMetric *pm, int inst, pmAtomValue *atom)
+{
+ char *fsname;
+ metricdesc_t *md = pm->m_user;
+ struct zfs_data *zdata;
+ uint64_t v;
+
+ if (pmdaCacheLookup(pm->m_desc.indom, inst, &fsname,
+ (void **)&zdata) != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ if (md->md_offset == -1) { /* nsnapshot */
+ atom->ull = zdata->nsnaps;
+ return 1;
+ }
+
+ v = zfs_prop_get_int(zdata->zh, md->md_offset);
+
+ /* Special processing - compression ratio is in precent, we export
+ * it as multiplier */
+ switch (md->md_offset) {
+ case ZFS_PROP_COMPRESSRATIO:
+ atom->d = v / 100.0;
+ break;
+ default:
+ atom->ull = v;
+ break;
+ }
+
+ return 1;
+}
+
+void
+zfs_init(int first)
+{
+ if (zh)
+ return;
+
+ zh = libzfs_init();
+ if (zh) {
+ pmdaCacheOp(indomtab[ZFS_INDOM].it_indom, PMDA_CACHE_LOAD);
+ pmdaCacheOp(indomtab[ZFS_SNAP_INDOM].it_indom, PMDA_CACHE_LOAD);
+ zfs_iter_root(zh, zfs_cache_inst, &first);
+ pmdaCacheOp(indomtab[ZFS_INDOM].it_indom, PMDA_CACHE_SAVE);
+ pmdaCacheOp(indomtab[ZFS_SNAP_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}
diff --git a/src/pmdas/solaris/zpool.c b/src/pmdas/solaris/zpool.c
new file mode 100644
index 0000000..2f402f2
--- /dev/null
+++ b/src/pmdas/solaris/zpool.c
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2009 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <libzfs.h>
+
+#include "common.h"
+
+struct zpool_stats {
+ int vdev_stats_fresh;
+ vdev_stat_t vds;
+};
+
+static libzfs_handle_t *zh;
+static int zp_added;
+
+/*
+ * For each zpool check the name in the instance cache, if it's not there then
+ * add it to the cache. Regardless if it's the first time we've seen this one
+ * or if it was in the cache before refresh the stats
+ */
+static int
+zp_cache_pool(zpool_handle_t *zp, void *arg)
+{
+ nvlist_t *cfg = zpool_get_config(zp, NULL);
+ char *zpname = (char *)zpool_get_name(zp);
+ struct zpool_stats *zps = NULL;
+ pmInDom zpindom = indomtab[ZPOOL_INDOM].it_indom;
+ uint_t cnt = 0;
+ vdev_stat_t *vds;
+ int rv;
+ int inst;
+ nvlist_t *vdt;
+
+ if ((rv = pmdaCacheLookupName(zpindom, zpname, &inst,
+ (void **)&zps)) != PMDA_CACHE_ACTIVE) {
+ int newpool = (zps == NULL);
+
+ if (rv != PMDA_CACHE_INACTIVE || zps == NULL) {
+ zps = malloc(sizeof(*zps));
+ if (zps == NULL) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot allocate memory to hold stats for "
+ "zpool '%s'\n",
+ zpname);
+ goto done;
+ }
+ }
+
+ rv = pmdaCacheStore(zpindom, PMDA_CACHE_ADD, zpname, zps);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot add '%s' to the cache "
+ "for instance domain %s: %s\n",
+ zpname, pmInDomStr(zpindom), pmErrStr(rv));
+ free(zps);
+ goto done;
+ }
+ zp_added += newpool;
+ }
+
+ rv = nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdt);
+ if (rv != 0) {
+ __pmNotifyErr(LOG_ERR, "Cannot get vdev tree for '%s': %d %d\n",
+ zpname, rv, oserror());
+ zps->vdev_stats_fresh = 0;
+ } else {
+ /* accommodate zpool api changes ... */
+#ifdef ZPOOL_CONFIG_VDEV_STATS
+ rv = nvlist_lookup_uint64_array(vdt, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&vds, &cnt);
+#else
+ rv = nvlist_lookup_uint64_array(vdt, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&vds, &cnt);
+#endif
+ if (rv == 0) {
+ memcpy(&zps->vds, vds, sizeof(zps->vds));
+ zps->vdev_stats_fresh = 1;
+ } else {
+ __pmNotifyErr(LOG_ERR,
+ "Cannot get zpool stats for '%s': %d %d\n",
+ zpname, rv, oserror());
+ zps->vdev_stats_fresh = 0;
+ }
+ }
+
+done:
+ zpool_close(zp);
+ return 0;
+}
+
+void
+zpool_refresh(void)
+{
+ zp_added = 0;
+
+ pmdaCacheOp(indomtab[ZPOOL_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ zpool_iter(zh, zp_cache_pool, NULL);
+
+ if (zp_added) {
+ pmdaCacheOp(indomtab[ZPOOL_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}
+
+int
+zpool_fetch(pmdaMetric *pm, int inst, pmAtomValue *atom)
+{
+ struct zpool_stats *zps;
+ char *zpname;
+ metricdesc_t *md = pm->m_user;
+
+ if (pmdaCacheLookup(indomtab[ZPOOL_INDOM].it_indom, inst, &zpname,
+ (void **)&zps) != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ if (zps->vdev_stats_fresh) {
+ switch (pmid_item(md->md_desc.pmid)) {
+ case 0: /* zpool.state */
+ atom->cp = zpool_state_to_name(zps->vds.vs_state, zps->vds.vs_aux);
+ break;
+ case 1: /* zpool.state_int */
+ atom->ul = (zps->vds.vs_aux << 8) | zps->vds.vs_state;
+ break;
+ default:
+ memcpy(&atom->ull, ((char *)&zps->vds) + md->md_offset,
+ sizeof(atom->ull));
+ }
+ }
+ return zps->vdev_stats_fresh;
+}
+
+void
+zpool_init(int first)
+{
+ if (zh)
+ return;
+
+ zh = libzfs_init();
+ if (zh) {
+ pmdaCacheOp(indomtab[ZPOOL_INDOM].it_indom, PMDA_CACHE_LOAD);
+ zpool_iter(zh, zp_cache_pool, NULL);
+ pmdaCacheOp(indomtab[ZPOOL_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}
diff --git a/src/pmdas/solaris/zpool_perdisk.c b/src/pmdas/solaris/zpool_perdisk.c
new file mode 100644
index 0000000..f9646e9
--- /dev/null
+++ b/src/pmdas/solaris/zpool_perdisk.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2009 Max Matveev. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+
+#include <libzfs.h>
+
+#include "common.h"
+
+struct vdev_stats {
+ int vdev_stats_fresh;
+ vdev_stat_t vds;
+};
+
+static libzfs_handle_t *zh;
+static int vdev_added;
+
+static char *
+make_vdev_name(zpool_handle_t *zp, const char *pname, nvlist_t *child)
+{
+ char *name = NULL;
+ char *cname = zpool_vdev_name(zh, zp, child, B_FALSE);
+ uint_t size;
+
+ if (cname == NULL) {
+ __pmNotifyErr(LOG_WARNING, "Cannot get the name of %s\'s "
+ "child\n", pname);
+ goto out;
+ }
+ size = strlen(pname) + strlen(cname) + 2;
+ name = malloc(size);
+ if (name == NULL) {
+ __pmNotifyErr(LOG_WARNING, "Cannot allocate memory for %s.%s\n",
+ pname, cname);
+ goto free_out;
+ }
+ snprintf(name, size, "%s.%s", pname, cname);
+free_out:
+ free(cname);
+out:
+ return name;
+}
+
+/*
+ * get the names and stats of those vdevs in the pool that are disks and are
+ * either children, cache or spare devices
+ */
+static int
+zp_get_vdevs(zpool_handle_t *zp, char *zpname, nvlist_t *vdt,
+ char ***vdev_names,
+ vdev_stat_t ***vds, int *num)
+{
+ int rv = 0;
+ uint_t cnt;
+ char **new_vdev_names;
+ vdev_stat_t **new_vds;
+ int nelem = *num;
+
+ char *name;
+ vdev_stat_t *stats;
+ nvlist_t **children;
+ uint_t nchildren;
+
+ static const char *prop[] = {
+ ZPOOL_CONFIG_CHILDREN,
+ ZPOOL_CONFIG_L2CACHE,
+ ZPOOL_CONFIG_SPARES
+ };
+ int i;
+ int j;
+ char *vdev_type;
+
+ rv = nvlist_lookup_string(vdt, ZPOOL_CONFIG_TYPE, &vdev_type);
+ /* we've found disk, look no further */
+ if (rv == 0 && strcmp(vdev_type, "disk") == 0) {
+
+ /* accommodate zpool api changes ... */
+#ifdef ZPOOL_CONFIG_VDEV_STATS
+ rv = nvlist_lookup_uint64_array(vdt, ZPOOL_CONFIG_VDEV_STATS,
+ (uint64_t **)&stats, &cnt);
+#else
+ rv = nvlist_lookup_uint64_array(vdt, ZPOOL_CONFIG_STATS,
+ (uint64_t **)&stats, &cnt);
+#endif
+ if (rv != 0) {
+ __pmNotifyErr(LOG_WARNING, "Cannot get the stats of %s\'s "
+ "child\n", zpname);
+ goto out;
+ }
+ name = make_vdev_name(zp, zpname, vdt);
+ if (name == NULL) {
+ __pmNotifyErr(LOG_WARNING, "Cannot get the name of a %s\'s "
+ "disk\n", zpname);
+ goto out;
+ }
+ nelem++;
+ new_vdev_names = realloc(*vdev_names, nelem *
+ sizeof(*new_vdev_names));
+ if (new_vdev_names == NULL) {
+ __pmNotifyErr(LOG_WARNING, "Cannot realloc memory for %s\n",
+ name);
+ goto free_out;
+ }
+ new_vdev_names[nelem - 1] = NULL;
+ *vdev_names = new_vdev_names;
+
+ new_vds = realloc(*vds, nelem * sizeof(*new_vds));
+ if (new_vds == NULL) {
+ __pmNotifyErr(LOG_WARNING, "Cannot realloc memory for vds %s\n",
+ name);
+ goto free_out;
+ }
+ new_vds[nelem - 1] = stats;
+ new_vdev_names[nelem - 1] = name;
+
+ *vds = new_vds;
+ *num = nelem;
+ goto out;
+ }
+
+ /* not a disk, traversing children until we find all the disks */
+ for (i = 0; i < sizeof(prop) / sizeof(prop[0]); i++) {
+ rv = nvlist_lookup_nvlist_array(vdt, prop[i], &children,
+ &nchildren);
+ if (rv != 0)
+ nchildren = 0;
+ for (j = 0; j < nchildren; j++) {
+ zp_get_vdevs(zp, zpname, children[j], vdev_names, vds, num);
+ }
+ }
+ return 0;
+out:
+ return rv;
+free_out:
+ free(name);
+ return rv;
+}
+
+/*
+ * For each zpool, check the leaf vdev names that are disks in the instance
+ * cache, if one is not there then add it to the cache. Regardless if it's the
+ * first time we've seen it or if it was in the cache before refresh the stats
+ */
+static int
+zp_cache_vdevs(zpool_handle_t *zp, void *arg)
+{
+ nvlist_t *cfg = zpool_get_config(zp, NULL);
+ char *zpname = (char *)zpool_get_name(zp);
+ struct vdev_stats *zps = NULL;
+ pmInDom zpindom = indomtab[ZPOOL_PERDISK_INDOM].it_indom;
+ int rv;
+ int inst;
+ nvlist_t *vdt;
+
+ int i;
+ char **vdev_names = NULL;
+ vdev_stat_t **vds = NULL;
+ int num = 0;
+
+ rv = nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdt);
+ if (rv != 0) {
+ __pmNotifyErr(LOG_ERR, "Cannot get vdev tree for '%s': %d %d\n",
+ zpname, rv, oserror());
+ goto done;
+ }
+
+ rv = zp_get_vdevs(zp, zpname, vdt, &vdev_names, &vds, &num);
+ if (rv != 0) {
+ __pmNotifyErr(LOG_WARNING, "Cannot get vdevs for zpool '%s'\n",
+ zpname);
+ goto free_done;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (vdev_names[i] == NULL)
+ continue;
+ rv = pmdaCacheLookupName(zpindom, vdev_names[i], &inst,
+ (void **)&zps);
+ if (rv != PMDA_CACHE_ACTIVE) {
+ int new_vdev = (zps == NULL);
+
+ if (rv != PMDA_CACHE_INACTIVE || new_vdev) {
+ zps = malloc(sizeof(*zps));
+ if (zps == NULL) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot allocate memory to hold stats for "
+ "vdev '%s'\n", vdev_names[i]);
+ goto free_done;
+ }
+ }
+
+ rv = pmdaCacheStore(zpindom, PMDA_CACHE_ADD, vdev_names[i],
+ zps);
+ if (rv < 0) {
+ __pmNotifyErr(LOG_WARNING,
+ "Cannot add '%s' to the cache "
+ "for instance domain %s: %s\n",
+ vdev_names[i], pmInDomStr(zpindom),
+ pmErrStr(rv));
+ free(zps);
+ goto free_done;
+ }
+ vdev_added += new_vdev;
+ }
+
+ if (rv >= 0) {
+ memcpy(&zps->vds, vds[i], sizeof(zps->vds));
+ zps->vdev_stats_fresh = 1;
+ } else {
+ __pmNotifyErr(LOG_ERR,
+ "Cannot get stats for '%s': %d %d\n",
+ vdev_names[i], rv, oserror());
+ zps->vdev_stats_fresh = 0;
+ }
+ }
+free_done:
+ for (i = 0; i < num; i++)
+ free(vdev_names[i]);
+ free(vdev_names);
+ free(vds);
+done:
+ zpool_close(zp);
+ return 0;
+}
+
+void
+zpool_perdisk_refresh(void)
+{
+ vdev_added = 0;
+
+ pmdaCacheOp(indomtab[ZPOOL_PERDISK_INDOM].it_indom, PMDA_CACHE_INACTIVE);
+ zpool_iter(zh, zp_cache_vdevs, NULL);
+
+ if (vdev_added) {
+ pmdaCacheOp(indomtab[ZPOOL_PERDISK_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}
+
+int
+zpool_perdisk_fetch(pmdaMetric *pm, int inst, pmAtomValue *atom)
+{
+ struct vdev_stats *stats;
+ char *vdev_name;
+ metricdesc_t *md = pm->m_user;
+
+ if (pmdaCacheLookup(indomtab[ZPOOL_PERDISK_INDOM].it_indom, inst,
+ &vdev_name, (void **)&stats) != PMDA_CACHE_ACTIVE)
+ return PM_ERR_INST;
+
+ if (stats->vdev_stats_fresh) {
+ switch (pmid_item(md->md_desc.pmid)) {
+ case 0: /* zpool.perdisk.state */
+ atom->cp = zpool_state_to_name(stats->vds.vs_state,
+ stats->vds.vs_aux);
+ break;
+ case 1: /* zpool.perdisk.state_int */
+ atom->ul = (stats->vds.vs_aux << 8) | stats->vds.vs_state;
+ break;
+ default:
+ memcpy(&atom->ull, ((char *)&stats->vds) + md->md_offset,
+ sizeof(atom->ull));
+ }
+ }
+
+ return stats->vdev_stats_fresh;
+}
+
+void
+zpool_perdisk_init(int first)
+{
+ if (zh)
+ return;
+
+ zh = libzfs_init();
+ if (zh) {
+ pmdaCacheOp(indomtab[ZPOOL_PERDISK_INDOM].it_indom, PMDA_CACHE_LOAD);
+ zpool_iter(zh, zp_cache_vdevs, NULL);
+ pmdaCacheOp(indomtab[ZPOOL_PERDISK_INDOM].it_indom, PMDA_CACHE_SAVE);
+ }
+}